Skip to content

Commit faea42a

Browse files
authored
Bump ruff to 0.3.2 and black to 24 (microsoft#19878)
### Motivation and Context Routing updates
1 parent 9e0a0f0 commit faea42a

File tree

103 files changed

+702
-764
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

103 files changed

+702
-764
lines changed

cgmanifests/generate_cgmanifest.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -115,8 +115,8 @@ def normalize_path_separators(path):
115115
submodule_lines = proc.stdout.splitlines()
116116
for submodule_line in submodule_lines:
117117
(absolute_path, url, commit) = submodule_line.split(" ")
118-
git_deps[GitDep(commit, url)] = "git submodule at {}".format(
119-
normalize_path_separators(os.path.relpath(absolute_path, REPO_DIR))
118+
git_deps[GitDep(commit, url)] = (
119+
f"git submodule at {normalize_path_separators(os.path.relpath(absolute_path, REPO_DIR))}"
120120
)
121121

122122
with open(os.path.join(SCRIPT_DIR, "..", "cmake", "deps.txt")) as f:

docs/python/examples/plot_train_convert_predict.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def loop(X_test, fct, n=None):
134134
nrow = X_test.shape[0]
135135
if n is None:
136136
n = nrow
137-
for i in range(0, n):
137+
for i in range(n):
138138
im = i % nrow
139139
fct(X_test[im : im + 1])
140140

onnxruntime/python/tools/microbench/benchmark.py

+3-6
Original file line numberDiff line numberDiff line change
@@ -147,20 +147,17 @@ def __init__(self, args):
147147

148148
@classmethod
149149
@abstractmethod
150-
def create_inputs_outputs(cls, op_param):
151-
...
150+
def create_inputs_outputs(cls, op_param): ...
152151

153152
def add_case(self, op_param, model):
154153
self.cases += [(op_param, model)]
155154

156155
@abstractmethod
157-
def create_cases(self):
158-
...
156+
def create_cases(self): ...
159157

160158
@classmethod
161159
@abstractmethod
162-
def case_profile(cls, op_param, time):
163-
...
160+
def case_profile(cls, op_param, time): ...
164161

165162
def benchmark(self):
166163
self.create_cases()

onnxruntime/python/tools/quantization/base_quantizer.py

+4-8
Original file line numberDiff line numberDiff line change
@@ -187,27 +187,23 @@ def check_opset_version(self):
187187

188188
if opset_version == 10:
189189
logging.warning(
190-
"The original model opset version is {}, which does not support node fusions. Please update the model to opset >= 11 for better performance.".format(
191-
opset_version
192-
)
190+
f"The original model opset version is {opset_version}, which does not support node fusions. Please update the model to opset >= 11 for better performance."
193191
)
194192
return 10
195193

196194
if opset_version < 10:
197195
logging.warning(
198-
"The original model opset version is {}, which does not support quantization. Please update the model to opset >= 11. Updating the model automatically to opset 11. Please verify the quantized model.".format(
199-
opset_version
200-
)
196+
f"The original model opset version is {opset_version}, which does not support quantization. Please update the model to opset >= 11. Updating the model automatically to opset 11. Please verify the quantized model."
201197
)
202198
self.model.model.opset_import.remove(ai_onnx_domain[0])
203199
self.model.model.opset_import.extend([onnx.helper.make_opsetid("", 11)])
204200
opset_version = 11
205201

206202
if opset_version < 19 and self.weight_qType == onnx.TensorProto.FLOAT8E4M3FN:
207203
logging.warning(
208-
"The original model opset version is {}, which does not support quantization to float 8. "
204+
f"The original model opset version is {opset_version}, which does not support quantization to float 8. "
209205
"Please update the model to opset >= 19. Updating the model automatically to opset 19. "
210-
"Please verify the quantized model.".format(opset_version)
206+
"Please verify the quantized model."
211207
)
212208
self.model.model.opset_import.remove(ai_onnx_domain[0])
213209
self.model.model.opset_import.extend([onnx.helper.make_opsetid("", 19)])

onnxruntime/python/tools/quantization/calibrate.py

+1-5
Original file line numberDiff line numberDiff line change
@@ -918,11 +918,7 @@ def compute_entropy(self):
918918
thresholds_dict = {} # per tensor thresholds
919919

920920
print(f"Number of tensors : {len(histogram_dict)}")
921-
print(
922-
"Number of histogram bins : {} (The number may increase depends on the data it collects)".format(
923-
self.num_bins
924-
)
925-
)
921+
print(f"Number of histogram bins : {self.num_bins} (The number may increase depends on the data it collects)")
926922
print(f"Number of quantized bins : {self.num_quantized_bins}")
927923

928924
for tensor, histogram in histogram_dict.items():

onnxruntime/python/tools/quantization/matmul_4bits_quantizer.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ def pack_on_row_fast_248bit(pack_tensor, ori_int_tensor, bits):
216216
pack_tensor = pack_tensor.T
217217
if bits in [2, 4, 8]:
218218
compress_ratio = pack_tensor.element_size() * 8 // bits
219-
for j in range(0, compress_ratio):
219+
for j in range(compress_ratio):
220220
pack_tensor[0:] |= ori_int_tensor[j::compress_ratio] << (bits * (j))
221221
else:
222222
raise NotImplementedError("Only 2,4,8 bits are supported.")

onnxruntime/python/tools/quantization/onnx_model.py

+1-5
Original file line numberDiff line numberDiff line change
@@ -79,11 +79,7 @@ def _clean_initializers_helper(graph, model):
7979
graph.input.remove(name_to_input[initializer.name])
8080
except StopIteration:
8181
if model.ir_version < 4:
82-
print(
83-
"Warning: invalid weight name {} found in the graph (not a graph input)".format(
84-
initializer.name
85-
)
86-
)
82+
print(f"Warning: invalid weight name {initializer.name} found in the graph (not a graph input)")
8783

8884
requesting_tensor_names.difference_update(input.name for input in graph.input)
8985

onnxruntime/python/tools/quantization/operators/concat.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def quantize(self):
3030
zero_point_names,
3131
scale_names,
3232
nodes,
33-
) = self.quantizer.quantize_activation(node, [*range(0, len(node.input))])
33+
) = self.quantizer.quantize_activation(node, [*range(len(node.input))])
3434
if not data_found or q_input_names is None:
3535
return super().quantize()
3636

@@ -52,7 +52,7 @@ def quantize(self):
5252
qnode_name = node.name + "_quant" if node.name else ""
5353

5454
qlconcat_inputs = [output_scale_name, output_zp_name]
55-
for i in range(0, len(q_input_names)):
55+
for i in range(len(q_input_names)):
5656
qlconcat_inputs.extend([q_input_names[i], scale_names[i], zero_point_names[i]])
5757
qlconcat_node = onnx.helper.make_node(
5858
"QLinearConcat", qlconcat_inputs, [quantized_output_value.q_name], qnode_name, **kwargs

onnxruntime/python/tools/quantization/operators/gemm.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,5 @@ def quantize(self):
157157
set_default_beta(self.node)
158158
else:
159159
logging.warning(
160-
"Bias of Gemm node '{}' is not constant. Please exclude this node for better performance.".format(
161-
self.node.name
162-
)
160+
f"Bias of Gemm node '{self.node.name}' is not constant. Please exclude this node for better performance."
163161
)

onnxruntime/python/tools/quantization/qdq_quantizer.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -153,9 +153,7 @@ def _is_tensor_quantizable(self, tensor_name):
153153
return True
154154
else:
155155
logging.warning(
156-
"failed to infer the type of tensor: {}. Skip to quantize it. Please check if it is expected.".format(
157-
tensor_name
158-
)
156+
f"failed to infer the type of tensor: {tensor_name}. Skip to quantize it. Please check if it is expected."
159157
)
160158

161159
return False

onnxruntime/python/tools/quantization/quant_utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,7 @@ def compute_scale_zp_float8(element_type, std):
276276
from onnx.reference.custom_element_types import float8e4m3fn
277277

278278
zp_dtype = float8e4m3fn
279-
all_values = [float8e4m3_to_float32(i) for i in range(0, 256)]
279+
all_values = [float8e4m3_to_float32(i) for i in range(256)]
280280
values = numpy.array(
281281
[f for f in all_values if not numpy.isnan(f) and not numpy.isinf(f)], dtype=numpy.float32
282282
)
@@ -530,7 +530,7 @@ def get_elem_index(elem_name, elem_list):
530530
Helper function to return index of an item in a node list
531531
"""
532532
elem_idx = -1
533-
for i in range(0, len(elem_list)):
533+
for i in range(len(elem_list)):
534534
if elem_list[i] == elem_name:
535535
elem_idx = i
536536
return elem_idx

onnxruntime/python/tools/symbolic_shape_infer.py

+17-41
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ def _add_suggested_merge(self, symbols, apply=False):
282282
# when nothing to map to, use the shorter one
283283
if map_to is None:
284284
if self.verbose_ > 0:
285-
logger.warning("Potential unsafe merge between symbolic expressions: ({})".format(",".join(symbols)))
285+
logger.warning("Potential unsafe merge between symbolic expressions: (%s)", ",".join(symbols))
286286
symbols_list = list(symbols)
287287
lens = [len(s) for s in symbols_list]
288288
map_to = symbols_list[lens.index(min(lens))]
@@ -335,10 +335,7 @@ def _merge_symbols(self, dims):
335335
int_dim = is_int.index(1)
336336
if self.verbose_ > 0:
337337
logger.debug(
338-
"dim {} has been merged with value {}".format(
339-
unique_dims[:int_dim] + unique_dims[int_dim + 1 :],
340-
unique_dims[int_dim],
341-
)
338+
f"dim {unique_dims[:int_dim] + unique_dims[int_dim + 1 :]} has been merged with value {unique_dims[int_dim]}"
342339
)
343340
self._check_merged_dims(unique_dims, allow_broadcast=False)
344341
return unique_dims[int_dim]
@@ -379,7 +376,7 @@ def _broadcast_shapes(self, shape1, shape2):
379376
if self.auto_merge_:
380377
self._add_suggested_merge([dim1, dim2], apply=True)
381378
else:
382-
logger.warning("unsupported broadcast between " + str(dim1) + " " + str(dim2))
379+
logger.warning("unsupported broadcast between " + str(dim1) + " " + str(dim2)) # noqa: G003
383380
new_shape = [new_dim, *new_shape]
384381
return new_shape
385382

@@ -663,12 +660,7 @@ def _new_symbolic_dim(self, prefix, dim):
663660

664661
def _new_symbolic_dim_from_output(self, node, out_idx=0, dim=0):
665662
return self._new_symbolic_dim(
666-
"{}{}_{}_o{}_".format(
667-
node.op_type,
668-
self.prefix_,
669-
list(self.out_mp_.graph.node).index(node),
670-
out_idx,
671-
),
663+
f"{node.op_type}{self.prefix_}_{list(self.out_mp_.graph.node).index(node)}_o{out_idx}_",
672664
dim,
673665
)
674666

@@ -1216,9 +1208,7 @@ def _infer_Loop(self, node): # noqa: N802
12161208
if need_second_infer:
12171209
if self.verbose_ > 2:
12181210
logger.debug(
1219-
"Rerun Loop: {}({}...), because of sequence in loop carried variables".format(
1220-
node.name, node.output[0]
1221-
)
1211+
f"Rerun Loop: {node.name}({node.output[0]}...), because of sequence in loop carried variables"
12221212
)
12231213
self._onnx_infer_subgraph(node, subgraph, inc_subgraph_id=False)
12241214

@@ -1843,7 +1833,7 @@ def handle_negative_index(index, bound):
18431833
axes = self._try_get_value(node, 3)
18441834
steps = self._try_get_value(node, 4)
18451835
if axes is None and not (starts is None and ends is None):
1846-
axes = list(range(0, len(starts if starts is not None else ends)))
1836+
axes = list(range(len(starts if starts is not None else ends)))
18471837
if steps is None and not (starts is None and ends is None):
18481838
steps = [1] * len(starts if starts is not None else ends)
18491839
axes = as_list(axes, keep_none=True)
@@ -2669,11 +2659,9 @@ def get_prereq(node):
26692659
break
26702660

26712661
if self.verbose_ > 2:
2672-
logger.debug(node.op_type + ": " + node.name)
2662+
logger.debug(node.op_type + ": " + node.name) # noqa: G003
26732663
for i, name in enumerate(node.input):
2674-
logger.debug(
2675-
" Input {}: {} {}".format(i, name, "initializer" if name in self.initializers_ else "")
2676-
)
2664+
logger.debug(" Input %s: %s %s", i, name, "initializer" if name in self.initializers_ else "")
26772665

26782666
# onnx automatically merge dims with value, i.e. Mul(['aaa', 'bbb'], [1000, 1]) -> [1000, 'bbb']
26792667
# symbolic shape inference needs to apply merge of 'aaa' -> 1000 in this case
@@ -2722,7 +2710,7 @@ def get_prereq(node):
27222710
seq_cls_type = out_type.sequence_type.elem_type.WhichOneof("value")
27232711
if seq_cls_type == "tensor_type":
27242712
logger.debug(
2725-
" {}: sequence of {} {}".format(
2713+
" {}: sequence of {} {}".format( # noqa: G001
27262714
node.output[i_o],
27272715
str(get_shape_from_value_info(vi)),
27282716
onnx.TensorProto.DataType.Name(
@@ -2740,14 +2728,10 @@ def get_prereq(node):
27402728
out_type_undefined = out_type.tensor_type.elem_type == onnx.TensorProto.UNDEFINED
27412729
if self.verbose_ > 2:
27422730
logger.debug(
2743-
" {}: {} {}".format(
2744-
node.output[i_o],
2745-
str(out_shape),
2746-
onnx.TensorProto.DataType.Name(vi.type.tensor_type.elem_type),
2747-
)
2731+
f" {node.output[i_o]}: {out_shape!s} {onnx.TensorProto.DataType.Name(vi.type.tensor_type.elem_type)}"
27482732
)
27492733
if node.output[i_o] in self.sympy_data_:
2750-
logger.debug(" Sympy Data: " + str(self.sympy_data_[node.output[i_o]]))
2734+
logger.debug(" Sympy Data: " + str(self.sympy_data_[node.output[i_o]])) # noqa: G003
27512735

27522736
# onnx >= 1.11.0, use unk__#index instead of None when the shape dim is uncertain
27532737
if (
@@ -2848,24 +2832,16 @@ def get_prereq(node):
28482832
if self.verbose_ > 0:
28492833
if is_unknown_op:
28502834
logger.debug(
2851-
"Possible unknown op: {} node: {}, guessing {} shape".format(
2852-
node.op_type, node.name, vi.name
2853-
)
2835+
f"Possible unknown op: {node.op_type} node: {node.name}, guessing {vi.name} shape"
28542836
)
28552837
if self.verbose_ > 2:
2856-
logger.debug(
2857-
" {}: {} {}".format(
2858-
node.output[i_o],
2859-
str(new_shape),
2860-
vi.type.tensor_type.elem_type,
2861-
)
2862-
)
2838+
logger.debug(f" {node.output[i_o]}: {new_shape!s} {vi.type.tensor_type.elem_type}")
28632839

28642840
self.run_ = True
28652841
continue # continue the inference after guess, no need to stop as no merge is needed
28662842

28672843
if self.verbose_ > 0 or not self.auto_merge_ or out_type_undefined:
2868-
logger.debug("Stopping at incomplete shape inference at " + node.op_type + ": " + node.name)
2844+
logger.debug("Stopping at incomplete shape inference at %s: %s", node.op_type, node.name)
28692845
logger.debug("node inputs:")
28702846
for i in node.input:
28712847
if i in self.known_vi_:
@@ -2879,7 +2855,7 @@ def get_prereq(node):
28792855
else:
28802856
logger.debug(f"not in known_vi_ for {o}")
28812857
if self.auto_merge_ and not out_type_undefined:
2882-
logger.debug("Merging: " + str(self.suggested_merge_))
2858+
logger.debug("Merging: " + str(self.suggested_merge_)) # noqa: G003
28832859
return False
28842860

28852861
self.run_ = False
@@ -2964,9 +2940,9 @@ def parse_arguments():
29642940

29652941
if __name__ == "__main__":
29662942
args = parse_arguments()
2967-
logger.info("input model: " + args.input)
2943+
logger.info("input model: " + args.input) # noqa: G003
29682944
if args.output:
2969-
logger.info("output model " + args.output)
2945+
logger.info("output model " + args.output) # noqa: G003
29702946
logger.info("Doing symbolic shape inference...")
29712947
out_mp = SymbolicShapeInference.infer_shapes(
29722948
onnx.load(args.input),

onnxruntime/python/tools/tensorrt/perf/benchmark.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -790,7 +790,7 @@ def skip_ep(model_name, ep, model_to_fail_ep):
790790

791791
# if ep in fail_ep_list and fail_ep_list[ep] == "runtime error":
792792
if ep in fail_ep_list:
793-
logger.info("Skip testing " + model_name + " using " + ep + " since it has some issues.")
793+
logger.info("Skip testing " + model_name + " using " + ep + " since it has some issues.") # noqa: G003
794794
return True
795795

796796
return False
@@ -925,7 +925,7 @@ def find_model_path(path):
925925

926926
logger.info(target_model_path)
927927
if len(target_model_path) > 1:
928-
logger.error("We expect to find only one model in " + path)
928+
logger.error("We expect to find only one model in " + path) # noqa: G003
929929
raise
930930

931931
return target_model_path[0]

onnxruntime/python/tools/tensorrt/perf/benchmark_wrapper.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -80,9 +80,9 @@ def main():
8080
benchmark = is_benchmark_mode(args.running_mode) # noqa: F405
8181

8282
for model, model_info in models.items():
83-
logger.info("\n" + "=" * 40 + "=" * len(model)) # noqa: F405
84-
logger.info("=" * 20 + model + "=" * 20) # noqa: F405
85-
logger.info("=" * 40 + "=" * len(model)) # noqa: F405
83+
logger.info("\n" + "=" * 40 + "=" * len(model)) # noqa: F405, G003
84+
logger.info("=" * 20 + model + "=" * 20) # noqa: F405, G003
85+
logger.info("=" * 40 + "=" * len(model)) # noqa: F405, G003
8686

8787
model_info["model_name"] = model
8888

onnxruntime/python/tools/transformers/benchmark.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -802,7 +802,7 @@ def main():
802802
try:
803803
os.mkdir(args.cache_dir)
804804
except OSError:
805-
logger.error("Creation of the directory %s failed" % args.cache_dir)
805+
logger.error("Creation of the directory %s failed" % args.cache_dir) # noqa: G002
806806

807807
enable_torch = "torch" in args.engines
808808
enable_torch2 = "torch2" in args.engines
@@ -921,7 +921,7 @@ def main():
921921
args,
922922
)
923923
except Exception:
924-
logger.error("Exception", exc_info=True)
924+
logger.exception("Exception")
925925

926926
time_stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
927927
if model_fusion_statistics:

onnxruntime/python/tools/transformers/benchmark_helper.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ def create_onnxruntime_session(
142142

143143
session = onnxruntime.InferenceSession(onnx_model_path, sess_options, providers=providers)
144144
except Exception:
145-
logger.error("Exception", exc_info=True)
145+
logger.error("Exception", exc_info=True) # noqa: G201
146146

147147
return session
148148

onnxruntime/python/tools/transformers/bert_perf_test.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -232,9 +232,9 @@ def onnxruntime_inference(session, all_inputs, output_names):
232232
def to_string(model_path, session, test_setting):
233233
sess_options = session.get_session_options()
234234
option = f"model={os.path.basename(model_path)},"
235-
option += "graph_optimization_level={},intra_op_num_threads={},".format(
236-
sess_options.graph_optimization_level, sess_options.intra_op_num_threads
237-
).replace("GraphOptimizationLevel.ORT_", "")
235+
option += f"graph_optimization_level={sess_options.graph_optimization_level},intra_op_num_threads={sess_options.intra_op_num_threads},".replace(
236+
"GraphOptimizationLevel.ORT_", ""
237+
)
238238

239239
option += f"batch_size={test_setting.batch_size},sequence_length={test_setting.sequence_length},"
240240
option += f"test_cases={test_setting.test_cases},test_times={test_setting.test_times},"

0 commit comments

Comments
 (0)