Skip to content

Commit d1b33cb

Browse files
authored
Remove quantize_io from compile_spec (#7647)
quantize_io was only used in arm_partitioner and is not needed there anymore when running the delegate in the graph. Signed-off-by: Erik Lundell <[email protected]>
1 parent 0dba025 commit d1b33cb

File tree

5 files changed

+11
-55
lines changed

5 files changed

+11
-55
lines changed

backends/arm/arm_backend.py

+1-20
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,7 @@ def __init__(self):
4949
self.compiler_flags = []
5050
self.output_format = None
5151
self.path_for_intermediates = None
52-
self.quantize_io = False
53-
self.tosa_spec = None
52+
self.tosa_version = None
5453
self.input_order = None
5554

5655
def ethosu_compile_spec(
@@ -123,14 +122,6 @@ def dump_intermediate_artifacts_to(
123122
self.path_for_intermediates = output_path
124123
return self
125124

126-
def set_quantize_io(self, quantize_io: bool = False) -> "ArmCompileSpecBuilder":
127-
"""
128-
Quantization of inputs and dequantization of outputs for cases where
129-
whole graph is quantized and method signature is not of quantized type.
130-
"""
131-
self.quantize_io = quantize_io
132-
return self
133-
134125
def set_input_order(
135126
self, input_order: Optional[str] = None
136127
) -> "ArmCompileSpecBuilder":
@@ -170,9 +161,6 @@ def build(self) -> List[CompileSpec]:
170161
)
171162
)
172163

173-
if self.quantize_io:
174-
self.compile_spec.append(CompileSpec("quantize_io", "True".encode()))
175-
176164
return self.compile_spec
177165

178166

@@ -183,13 +171,6 @@ def is_tosa(compile_spec: List[CompileSpec]) -> bool:
183171
return False
184172

185173

186-
def is_quantize_io(compile_specs: List[CompileSpec]) -> bool:
187-
for spec in compile_specs:
188-
if spec.key == "quantize_io" and spec.value.decode() == "True":
189-
return True
190-
return False
191-
192-
193174
def get_tosa_version(compile_spec: List[CompileSpec]) -> TosaSpecification:
194175
for spec in compile_spec:
195176
if spec.key == "tosa_version":

backends/arm/arm_partitioner.py

-4
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
import torch
1313
from executorch.backends.arm.arm_backend import (
1414
ArmBackend,
15-
is_quantize_io,
1615
) # usort: skip
1716
from executorch.backends.arm.operator_support.tosa_supported_operators import (
1817
TOSASupportedOperators,
@@ -89,9 +88,6 @@ def is_partitioned(node: torch.fx.Node, tag=tag) -> bool:
8988
node.meta["delegation_tag"] = tag
9089
partition_tags[tag] = self.delegation_spec
9190

92-
if not is_quantize_io(self.delegation_spec.compile_specs):
93-
continue
94-
9591
# De-tag outmost q-nodes upwards and dq-nodes downwards.
9692
# De-tag if at least one input/ output is not part of partition.
9793
for node in partition.nodes:

backends/arm/test/common.py

-9
Original file line numberDiff line numberDiff line change
@@ -78,44 +78,38 @@ def get_tosa_compile_spec_unbuilt(
7878
ArmCompileSpecBuilder()
7979
.tosa_compile_spec(tosa_spec)
8080
.dump_intermediate_artifacts_to(custom_path)
81-
.set_quantize_io(True)
8281
)
8382

8483
return compile_spec_builder
8584

8685

8786
def get_u55_compile_spec(
88-
quantize_io=True,
8987
custom_path=None,
9088
reorder_inputs=None,
9189
) -> list[CompileSpec]:
9290
"""
9391
Default compile spec for Ethos-U55 tests.
9492
"""
9593
return get_u55_compile_spec_unbuilt(
96-
quantize_io=quantize_io,
9794
custom_path=custom_path,
9895
reorder_inputs=reorder_inputs,
9996
).build()
10097

10198

10299
def get_u85_compile_spec(
103-
quantize_io=True,
104100
custom_path=None,
105101
reorder_inputs=None,
106102
) -> list[CompileSpec]:
107103
"""
108104
Default compile spec for Ethos-U85 tests.
109105
"""
110106
return get_u85_compile_spec_unbuilt(
111-
quantize_io=quantize_io,
112107
custom_path=custom_path,
113108
reorder_inputs=reorder_inputs,
114109
).build()
115110

116111

117112
def get_u55_compile_spec_unbuilt(
118-
quantize_io=True,
119113
custom_path=None,
120114
reorder_inputs=None,
121115
) -> ArmCompileSpecBuilder:
@@ -133,15 +127,13 @@ def get_u55_compile_spec_unbuilt(
133127
memory_mode="Shared_Sram",
134128
extra_flags="--debug-force-regor --output-format=raw",
135129
)
136-
.set_quantize_io(quantize_io)
137130
.dump_intermediate_artifacts_to(artifact_path)
138131
.set_input_order(reorder_inputs)
139132
)
140133
return compile_spec
141134

142135

143136
def get_u85_compile_spec_unbuilt(
144-
quantize_io=True,
145137
custom_path=None,
146138
reorder_inputs=None,
147139
) -> list[CompileSpec]:
@@ -157,7 +149,6 @@ def get_u85_compile_spec_unbuilt(
157149
memory_mode="Shared_Sram",
158150
extra_flags="--output-format=raw",
159151
)
160-
.set_quantize_io(quantize_io)
161152
.dump_intermediate_artifacts_to(artifact_path)
162153
.set_input_order(reorder_inputs)
163154
)

backends/arm/test/ops/test_depthwise_conv.py

+10-20
Original file line numberDiff line numberDiff line change
@@ -259,58 +259,48 @@ def test_dw_conv_tosa_BI(self, test_name: str, model: torch.nn.Module):
259259

260260
@parameterized.expand(testsuite_conv2d[:4], skip_on_empty=True)
261261
@pytest.mark.corstone_fvp
262-
def test_dw_conv2d_u55_BI(
263-
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = True
264-
):
262+
def test_dw_conv2d_u55_BI(self, test_name: str, model: torch.nn.Module):
265263
self._test_dw_conv_ethos_BI_pipeline(
266264
model,
267-
common.get_u55_compile_spec(quantize_io=set_quantize_io),
265+
common.get_u55_compile_spec(),
268266
model.get_inputs(),
269267
)
270268

271269
@parameterized.expand(testsuite_conv2d[4:], skip_on_empty=True)
272270
@pytest.mark.corstone_fvp
273271
@conftest.expectedFailureOnFVP # TODO: MLETORCH-516
274-
def test_dw_conv2d_u55_BI_xfails(
275-
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = False
276-
):
272+
def test_dw_conv2d_u55_BI_xfails(self, test_name: str, model: torch.nn.Module):
277273
self._test_dw_conv_ethos_BI_pipeline(
278274
model,
279-
common.get_u55_compile_spec(quantize_io=set_quantize_io),
275+
common.get_u55_compile_spec(),
280276
model.get_inputs(),
281277
)
282278

283279
@parameterized.expand(testsuite_conv1d, skip_on_empty=True)
284280
@pytest.mark.corstone_fvp
285-
def test_dw_conv1d_u55_BI(
286-
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = True
287-
):
281+
def test_dw_conv1d_u55_BI(self, test_name: str, model: torch.nn.Module):
288282
self._test_dw_conv_ethos_BI_pipeline(
289283
model,
290-
common.get_u55_compile_spec(quantize_io=set_quantize_io),
284+
common.get_u55_compile_spec(),
291285
model.get_inputs(),
292286
)
293287

294288
@parameterized.expand(testsuite_conv1d + testsuite_conv2d_u85)
295289
@pytest.mark.corstone_fvp
296-
def test_dw_conv_u85_BI(
297-
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = True
298-
):
290+
def test_dw_conv_u85_BI(self, test_name: str, model: torch.nn.Module):
299291
self._test_dw_conv_ethos_BI_pipeline(
300292
model,
301-
common.get_u85_compile_spec(quantize_io=set_quantize_io),
293+
common.get_u85_compile_spec(),
302294
model.get_inputs(),
303295
)
304296

305297
# All test cases except 3x3_1x3x256x256_gp3_st1 have numerical issues on FVP. MLETORCH-520
306298
@parameterized.expand(testsuite_conv2d_u85_xfails)
307299
@pytest.mark.corstone_fvp
308300
@conftest.expectedFailureOnFVP
309-
def test_dw_conv_u85_BI_xfails(
310-
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = True
311-
):
301+
def test_dw_conv_u85_BI_xfails(self, test_name: str, model: torch.nn.Module):
312302
self._test_dw_conv_ethos_BI_pipeline(
313303
model,
314-
common.get_u85_compile_spec(quantize_io=set_quantize_io),
304+
common.get_u85_compile_spec(),
315305
model.get_inputs(),
316306
)

examples/arm/aot_arm_compiler.py

-2
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,6 @@ def get_compile_spec(
275275
memory_mode=memory_mode,
276276
extra_flags="--debug-force-regor --output-format=raw --verbose-operators --verbose-cycle-estimate",
277277
)
278-
.set_quantize_io(True)
279278
.set_input_order(reorder_inputs)
280279
)
281280
elif "ethos-u85" in target:
@@ -287,7 +286,6 @@ def get_compile_spec(
287286
memory_mode=memory_mode,
288287
extra_flags="--output-format=raw --verbose-operators --verbose-cycle-estimate",
289288
)
290-
.set_quantize_io(True)
291289
.set_input_order(reorder_inputs)
292290
)
293291

0 commit comments

Comments
 (0)