Skip to content

Commit ad312d9

Browse files
authored
Enable comprehension simplification in ruff rules (#23414)
Enable comprehension simplification rules (C4) for ruff and apply autofix.
1 parent a350040 commit ad312d9

File tree

68 files changed

+176
-180
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

68 files changed

+176
-180
lines changed

docs/python/_common/onnx_sphinx.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -282,7 +282,7 @@ def get_domain_list():
282282
"""
283283
Returns the list of available domains.
284284
"""
285-
return list(sorted(set(map(lambda s: s.domain, get_all_schemas_with_history()))))
285+
return sorted({s.domain for s in get_all_schemas_with_history()})
286286

287287

288288
def get_operator_schemas(op_name, version=None, domain=None):
@@ -779,9 +779,9 @@ def render(self, indent=""):
779779
name = op["name"]
780780
dom = self.domain.replace(".", "-")
781781
table_dom.append(f" * - :ref:`l-onnx-doc{dom}-{name}`")
782-
versions = list(reversed(sorted((k, v) for k, v in op["links"].items() if isinstance(k, int))))
782+
versions = sorted(((k, v) for k, v in op["links"].items() if isinstance(k, int)), reverse=True)
783783
col1 = ", ".join(f":ref:`{k} <{v}>`" for k, v in versions)
784-
diffs = list(reversed(sorted((k, v) for k, v in op["links"].items() if isinstance(k, tuple))))
784+
diffs = sorted(((k, v) for k, v in op["links"].items() if isinstance(k, tuple)), reverse=True)
785785
col2 = ", ".join(f":ref:`{k[1]}/{k[0]} <{v}>`" for k, v in diffs)
786786
table_dom.append(f" - {col1}")
787787
table_dom.append(f" - {col2}")

onnxruntime/python/onnxruntime_inference_collection.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -138,10 +138,10 @@ def set_provider_options(name, options):
138138
if len(providers) != len(provider_options):
139139
raise ValueError("'providers' and 'provider_options' should be the same length if both are given.")
140140

141-
if not all([isinstance(provider, str) for provider in providers]):
141+
if not all(isinstance(provider, str) for provider in providers):
142142
raise ValueError("Only string values for 'providers' are supported if 'provider_options' is given.")
143143

144-
if not all([isinstance(options_for_provider, dict) for options_for_provider in provider_options]):
144+
if not all(isinstance(options_for_provider, dict) for options_for_provider in provider_options):
145145
raise ValueError("'provider_options' values must be dicts.")
146146

147147
for name, options in zip(providers, provider_options, strict=False):
@@ -150,7 +150,7 @@ def set_provider_options(name, options):
150150
else:
151151
for provider in providers:
152152
if isinstance(provider, str):
153-
set_provider_options(provider, dict())
153+
set_provider_options(provider, {})
154154
elif (
155155
isinstance(provider, tuple)
156156
and len(provider) == 2

onnxruntime/python/tools/kernel_explorer/kernels/kernel_explorer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def set_dispatch(name):
220220
from difflib import SequenceMatcher as Matcher
221221

222222
valid_names = list(_ke_context.dispatchable.keys())
223-
scored_names = list(reversed(sorted([(Matcher(None, name, a).ratio(), a) for a in valid_names])))
223+
scored_names = sorted([(Matcher(None, name, a).ratio(), a) for a in valid_names], reverse=True)
224224
top10 = "\n ".join([a for _, a in scored_names[:10]])
225225
msg = f"'{name}' is not registered for dispatch. Top 10 matches are:\n {top10}"
226226
print(msg)

onnxruntime/python/tools/profile_explorer/profile_explorer.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ def _print_op_kernel_mapping_info(cpu_df, gpu_df, num_runs, csv=None):
200200
# Count op occurrences in the selected runs
201201
op_counts = defaultdict(int)
202202
for op in cpu_df.T.to_dict().values():
203-
identifiers = tuple([op["name"], op["input_type_shape"]])
203+
identifiers = (op["name"], op["input_type_shape"])
204204
op_counts[identifiers] += 1
205205

206206
# Collect kernel stats: count/duration
@@ -212,15 +212,15 @@ def _print_op_kernel_mapping_info(cpu_df, gpu_df, num_runs, csv=None):
212212
input_type_shape = kernel["input_type_shape"]
213213
kernel_name = kernel["name"]
214214
dimensions = kernel["dimensions"]
215-
identifiers = tuple([op_name, input_type_shape, kernel_name, dimensions])
215+
identifiers = (op_name, input_type_shape, kernel_name, dimensions)
216216
stat_dict[identifiers]["count"] += 1
217217
stat_dict[identifiers]["duration"] += kernel["duration"]
218218

219219
# Create the DataFrame for kernel entries with op correlation info
220220
kernel_list = []
221221
for identifiers, stat in stat_dict.items():
222222
op_name, input_type_shape, kernel_name, dimensions = identifiers
223-
op_count = op_counts.get(tuple([op_name, input_type_shape]))
223+
op_count = op_counts.get((op_name, input_type_shape))
224224
if op_count is None:
225225
continue
226226
kernel_list.append(

onnxruntime/python/tools/quantization/base_quantizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,9 @@ def __init__(
118118
'Conv_4:0': [np.float32(1), np.float32(3.5)]
119119
}
120120
"""
121-
if tensors_range is not None and any(map(lambda t: not isinstance(t, TensorData), tensors_range.values())):
121+
if tensors_range is not None and any(not isinstance(t, TensorData) for t in tensors_range.values()):
122122
raise TypeError(
123-
f"tensors_range contains unexpected types {set(type(v) for v in tensors_range.values())}, not TensorData."
123+
f"tensors_range contains unexpected types { {type(v) for v in tensors_range.values()} }, not TensorData."
124124
)
125125
self.tensors_range = tensors_range
126126
self.nodes_to_quantize = nodes_to_quantize # specific nodes to quantize

onnxruntime/python/tools/quantization/calibrate.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -504,9 +504,9 @@ def compute_data(self) -> TensorsData:
504504

505505
if self.symmetric:
506506
max_absolute_value = np.max([np.abs(min_value_array), np.abs(max_value_array)], axis=0)
507-
pairs.append(tuple([-max_absolute_value, max_absolute_value]))
507+
pairs.append((-max_absolute_value, max_absolute_value))
508508
else:
509-
pairs.append(tuple([min_value_array, max_value_array]))
509+
pairs.append((min_value_array, max_value_array))
510510

511511
new_calibrate_tensors_range = TensorsData(
512512
CalibrationMethod.MinMax, dict(zip(calibrate_tensor_names, pairs, strict=False))
@@ -823,7 +823,7 @@ def collect_absolute_value(self, name_to_arr):
823823
if isinstance(data_arr, list):
824824
for arr in data_arr:
825825
assert isinstance(arr, np.ndarray), f"Unexpected type {type(arr)} for tensor={tensor!r}"
826-
dtypes = set(a.dtype for a in data_arr)
826+
dtypes = {a.dtype for a in data_arr}
827827
assert len(dtypes) == 1, (
828828
f"The calibration expects only one element type but got {dtypes} for tensor={tensor!r}"
829829
)

onnxruntime/python/tools/quantization/execution_providers/qnn/mixed_precision_overrides_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def apply(
178178
# Use type requests to "fix" tensor quantization overrides by adding
179179
# quantization type conversions where necessary.
180180
for tensor_name, type_req in type_requests.items():
181-
all_consumers = set([node.name for node in self.consumers.get(tensor_name, [])])
181+
all_consumers = {node.name for node in self.consumers.get(tensor_name, [])}
182182
has_producer_req = type_req.producer is not None
183183
has_consumer_req = bool(type_req.consumers)
184184

onnxruntime/python/tools/quantization/matmul_4bits_quantizer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1186,7 +1186,7 @@ def _generate_q4_node_config(self):
11861186
}
11871187
for node in self.model.model.graph.node:
11881188
if node.op_type in ["MatMul"]:
1189-
if not all([self.model.get_initializer(i) is None for i in node.input]):
1189+
if not all(self.model.get_initializer(i) is None for i in node.input):
11901190
q4_node_config[node.name] = template_config_q4
11911191
return q4_node_config
11921192

onnxruntime/python/tools/quantization/onnx_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -576,7 +576,7 @@ def _check_init(self, init, test=None):
576576
if init.data_type == onnx.TensorProto.FLOAT8E4M3FN:
577577
if init.HasField("raw_data"):
578578
b = list(init.raw_data)
579-
if any(map(lambda i: (i & 127) == 127, b)):
579+
if any((i & 127) == 127 for i in b):
580580
raise ValueError(f"Initializer {init.name!r} has nan.")
581581
return init
582582

onnxruntime/python/tools/quantization/qdq_loss_debug.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -316,8 +316,8 @@ def create_weight_matching(float_model_path: str, qdq_model_path: str) -> dict[s
316316
# Perform dequantization:
317317
if weight_scale.size == weight_zp.size == 1:
318318
# Avoids the confusion between a scaler and a tensor of one element.
319-
weight_scale = weight_scale.reshape(tuple())
320-
weight_zp = weight_zp.reshape(tuple())
319+
weight_scale = weight_scale.reshape(())
320+
weight_zp = weight_zp.reshape(())
321321
if weight_scale.shape != weight_zp.shape:
322322
raise RuntimeError(
323323
f"scale and zero_point must have the same shape but {weight_scale.shape} != {weight_zp.shape}"

0 commit comments

Comments
 (0)