Skip to content

Commit b599a99

Browse files
cshiels-ieclaude
andcommitted
Fix SonarCloud MINOR CODE_SMELL issues: S7508, S6353, S7504, S7500, S8521, S7519, S7494
- Fix python:S7508 (redundant list() before sorted()) in anonymized_rollups.py, credentials_anonymized_rollup.py, events_modules_anonymized_rollup.py, test_jobs_anonymized_rollups.py, test_multiple_files.py, report/base.py: Removed unnecessary list() wrapping since sorted() accepts any iterable. - Fix python:S6353 (use \w instead of [A-Za-z0-9_]) in events_modules_anonymized_rollup.py: Simplified regex patterns to use \w shorthand. - Fix python:S7504 (unnecessary list() on iterable) in renewal_guidance.py: Removed redundant list() in list comprehension iteration. - Fix python:S7500 (replace comprehension with collection constructor) in conftest.py and report_saver_s3.py: Simplified unnecessary comprehension wrappers. - Fix python:S8521 (unnecessary .keys() call) in test_gathering.py and test_slicing.py: Removed .keys() calls since dict membership testing is implicit. - Fix python:S7519 (use dict.fromkeys) in management_utility.py: Replaced dict comprehension with dict.fromkeys() call. - Fix python:S7494 (use set comprehension) in main_jobevent_service.py: Replaced set() constructor with generator with a set comprehension literal. Co-Authored-By: Claude Sonnet 4.6 (1M context) <noreply@anthropic.com>
1 parent 1254f38 commit b599a99

13 files changed

Lines changed: 57 additions & 57 deletions

File tree

metrics_utility/anonymized_rollups/anonymized_rollups.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ def _merge_ansible_versions(jobs_by_job_type: List[Dict[str, Any]]) -> List[str]
275275
ansible_versions = job.get('ansible_versions', [])
276276
if isinstance(ansible_versions, list):
277277
ansible_versions_set.update(ansible_versions)
278-
return sorted(list(ansible_versions_set)) if ansible_versions_set else []
278+
return sorted(ansible_versions_set) if ansible_versions_set else []
279279

280280

281281
def _calculate_execution_environments_total(execution_environments: Dict[str, Any]) -> Any:

metrics_utility/anonymized_rollups/credentials_anonymized_rollup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ def merge(self, data_all, data_new):
6767
credential_types_new = set(data_new.get('credential_types', []))
6868

6969
# Union the sets and convert back to sorted list
70-
credential_types_merged = sorted(list(credential_types_all.union(credential_types_new)))
70+
credential_types_merged = sorted(credential_types_all.union(credential_types_new))
7171

7272
return {
7373
'credential_types': credential_types_merged,

metrics_utility/anonymized_rollups/events_modules_anonymized_rollup.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@
1111

1212
# Regex pattern to match collection names (e.g., namespace.collection.role or namespace.collection.role.task)
1313
# Pattern is safe from reDOS: uses non-capturing groups and non-nested quantifiers
14-
_COLLECTION_RE = re.compile(r'^([A-Za-z0-9_]+)\.([A-Za-z0-9_]+)\.[A-Za-z0-9_]+(?:\.[A-Za-z0-9_]+)*$')
15-
_COLLECTION_PATTERN = r'^([A-Za-z0-9_]+\.[A-Za-z0-9_]+)\.[A-Za-z0-9_]+(?:\.[A-Za-z0-9_]+)*$'
14+
_COLLECTION_RE = re.compile(r'^(\w+)\.(\w+)\.\w+(?:\.\w+)*$')
15+
_COLLECTION_PATTERN = r'^(\w+\.\w+)\.\w+(?:\.\w+)*$'
1616

1717

1818
def extract_collection_name(x: str | None) -> str | None:
@@ -138,7 +138,7 @@ def _merge_list_columns(self, item_all, item_new, merged_item):
138138
list_new = item_new.get(col) if item_new.get(col) is not None else []
139139
set_all = set(list_all) if isinstance(list_all, list) else set()
140140
set_new = set(list_new) if isinstance(list_new, list) else set()
141-
merged_item[col] = sorted(list(set_all.union(set_new)))
141+
merged_item[col] = sorted(set_all.union(set_new))
142142

143143
def _merge_single_item(self, item_all, item_new):
144144
"""Merge a single item from all and new data."""
@@ -184,7 +184,7 @@ def _merge_unique_modules(self, data_all, data_new):
184184
"""Merge unique_modules lists (union and sort)."""
185185
unique_modules_all = set(data_all.get('unique_modules', []))
186186
unique_modules_new = set(data_new.get('unique_modules', []))
187-
return sorted(list(unique_modules_all.union(unique_modules_new)))
187+
return sorted(unique_modules_all.union(unique_modules_new))
188188

189189
def _merge_modules_per_playbook(self, data_all, data_new):
190190
"""Merge modules_per_playbook dicts (union lists per playbook)."""
@@ -197,14 +197,14 @@ def _merge_modules_per_playbook(self, data_all, data_new):
197197
list_new = modules_per_playbook_new.get(playbook, []) or []
198198
set_all = set(list_all) if isinstance(list_all, list) else set()
199199
set_new = set(list_new) if isinstance(list_new, list) else set()
200-
modules_per_playbook[playbook] = sorted(list(set_all.union(set_new)))
200+
modules_per_playbook[playbook] = sorted(set_all.union(set_new))
201201
return modules_per_playbook
202202

203203
def _merge_unique_hosts(self, data_all, data_new):
204204
"""Merge unique_hosts lists (union and sort)."""
205205
unique_hosts_all = set(data_all.get('unique_hosts', []))
206206
unique_hosts_new = set(data_new.get('unique_hosts', []))
207-
return sorted(list(unique_hosts_all.union(unique_hosts_new)))
207+
return sorted(unique_hosts_all.union(unique_hosts_new))
208208

209209
def merge(self, data_all, data_new):
210210
"""
@@ -515,7 +515,7 @@ def _compute_all_stats(self, task_summary):
515515
def _convert_set_or_list_to_sorted_list(value):
516516
"""Convert set or list to sorted list, return empty list for other types."""
517517
if isinstance(value, set):
518-
return sorted(list(value))
518+
return sorted(value)
519519
if isinstance(value, list):
520520
return value
521521
return []
@@ -557,15 +557,15 @@ def _convert_stats_to_json(self, module_stats, collection_stats, role_stats):
557557

558558
def _compute_unique_metadata(self, task_summary):
559559
"""Compute unique_modules, modules_per_playbook, and unique_hosts."""
560-
unique_modules = sorted(list(set(task_summary['module_name'].dropna().unique())))
560+
unique_modules = sorted(set(task_summary['module_name'].dropna().unique()))
561561

562562
modules_per_playbook = {}
563563
for playbook in task_summary['playbook'].dropna().unique():
564-
modules_in_playbook = sorted(list(set(task_summary[task_summary['playbook'] == playbook]['module_name'].dropna().unique())))
564+
modules_in_playbook = sorted(set(task_summary[task_summary['playbook'] == playbook]['module_name'].dropna().unique()))
565565
modules_per_playbook[playbook] = modules_in_playbook
566566

567567
host_sets = [s for s in task_summary['host_ids'].dropna() if isinstance(s, set)]
568-
unique_hosts = sorted(list(set().union(*host_sets))) if host_sets else []
568+
unique_hosts = sorted(set().union(*host_sets)) if host_sets else []
569569

570570
return unique_modules, modules_per_playbook, unique_hosts
571571

metrics_utility/automation_controller_billing/dedup/renewal_guidance.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def _build_deduped_record(self, dupes, latest_hostname, dupes_clean=None):
4747

4848
def stringify(self, value):
4949
"""Convert a set of values to a comma-separated string, filtering out None."""
50-
return ', '.join([v for v in list(value) if v is not None])
50+
return ', '.join([v for v in value if v is not None])
5151

5252
def run(self):
5353
"""Abstract method to be implemented by subclasses."""

metrics_utility/automation_controller_billing/report/base.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,14 +86,14 @@ def add_dedup_labels_if_needed(self, labels, column_names):
8686
def convert_cell(self, cell):
8787
# If the cell is a dictionary, convert each set value to a sorted list, then dump as a JSON string.
8888
if isinstance(cell, dict):
89-
new_cell = {k: sorted(list(v)) if isinstance(v, set) else v for k, v in cell.items()}
89+
new_cell = {k: sorted(v) if isinstance(v, set) else v for k, v in cell.items()}
9090
return json.dumps(new_cell)
9191
# If the cell itself is a set, convert it to a sorted list and then to a JSON string.
9292
elif isinstance(cell, set):
93-
return json.dumps(sorted(list(cell)))
93+
return json.dumps(sorted(cell))
9494
# If the cell is a list, convert any set elements inside to sorted lists and dump as a JSON string.
9595
elif isinstance(cell, list):
96-
new_cell = [sorted(list(item)) if isinstance(item, set) else item for item in cell]
96+
new_cell = [sorted(item) if isinstance(item, set) else item for item in cell]
9797
# Sort the list itself if it contains strings
9898
if new_cell and all(isinstance(item, str) for item in new_cell):
9999
new_cell = sorted(new_cell)

metrics_utility/automation_controller_billing/report_saver/report_saver_s3.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ def __init__(self, extra_params):
1616
self.s3_handler = S3Handler(params=self.extra_params)
1717

1818
def report_exist(self):
19-
return len([file for file in self.s3_handler.list_files(self.report_spreadsheet_destination_path)]) > 0
19+
return len(list(self.s3_handler.list_files(self.report_spreadsheet_destination_path))) > 0
2020

2121
def save(self, report_spreadsheet):
2222
with tempfile.TemporaryDirectory(prefix='report_saver_billing_data_') as temp_dir:

metrics_utility/library/collectors/controller/main_jobevent_service.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def main_jobevent_service(*, db=None, since=None, until=None, output=DataframeOu
3131
# We are loading the finished jobs then we are filtering
3232
# for the job_created, this cannot be done by simple joins because
3333
# job_created is partitioned and partitions pruning dont work with joins
34-
job_ids_set = set(job_id for job_id, _ in jobs)
34+
job_ids_set = {job_id for job_id, _ in jobs}
3535

3636
# Extract unique hour boundaries from job_created timestamps
3737
# This reduces potentially 100K timestamps down to ~100-1000 hourly ranges

metrics_utility/management_utility.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ def fetch_command(self, subcommand):
7777
def get_commands():
7878
commands = {}
7979
path = os.path.join(os.path.dirname(__file__), 'management')
80-
commands.update({name: 'metrics_utility' for name in management.find_commands(path)})
80+
commands.update(dict.fromkeys(management.find_commands(path), 'metrics_utility'))
8181
return commands
8282

8383
def run_subcommand(self, subcommand, argv):

metrics_utility/test/base/functional/test_gathering.py

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,8 @@ def test_json_collections(collector):
4343
files[member.name] = archive.extractfile(member)
4444

4545
assert_common_files(files)
46-
assert './json_collection_1.json' in files.keys()
47-
assert './json_collection_2.json' in files.keys()
46+
assert './json_collection_1.json' in files
47+
assert './json_collection_2.json' in files
4848

4949
assert json.loads(files['./config.json'].read()) == {'version': '1.0'}
5050
assert json.loads(files['./json_collection_1.json'].read()) == {'json1': 'True'}
@@ -64,9 +64,9 @@ def test_small_csvs(collector):
6464
files[member.name] = archive.extractfile(member)
6565

6666
assert_common_files(files)
67-
assert './csv_collection_1.csv' in files.keys()
68-
assert './csv_collection_2.csv' in files.keys()
69-
assert './csv_collection_3.csv' in files.keys()
67+
assert './csv_collection_1.csv' in files
68+
assert './csv_collection_2.csv' in files
69+
assert './csv_collection_3.csv' in files
7070

7171
# length defined by @registered function
7272
assert len(files['./csv_collection_1.csv'].read()) == 100
@@ -97,13 +97,13 @@ def test_jsons_with_csvs_with_slicing(collector):
9797

9898
assert_common_files(files)
9999
if i == 0:
100-
assert './json_collection_1.json' in files.keys()
101-
assert './json_collection_2.json' in files.keys()
102-
assert './csv_slicing_1.csv' in files.keys()
100+
assert './json_collection_1.json' in files
101+
assert './json_collection_2.json' in files
102+
assert './csv_slicing_1.csv' in files
103103
if i == 1:
104-
assert './csv_slicing_2.csv' in files.keys()
104+
assert './csv_slicing_2.csv' in files
105105
if i == 2:
106-
assert './csv_slicing_2.csv' in files.keys()
106+
assert './csv_slicing_2.csv' in files
107107

108108

109109
def test_one_csv_collection_splitted_by_size(collector):
@@ -119,7 +119,7 @@ def test_one_csv_collection_splitted_by_size(collector):
119119

120120
assert_common_files(files)
121121
assert len(files.keys()) == 1 + _common_files_count()
122-
assert './big_table.csv' in files.keys()
122+
assert './big_table.csv' in files
123123
assert len(files['./big_table.csv'].read()) == 1000
124124

125125
collector._gather_cleanup()
@@ -141,15 +141,15 @@ def test_multiple_collections_multiple_tarballs(mocker, collector):
141141
assert_common_files(files)
142142
if i == 0:
143143
assert len(files.keys()) == 2 + _common_files_count()
144-
assert './big_table_2.csv' in files.keys()
145-
assert './csv_collection_1.csv' in files.keys()
144+
assert './big_table_2.csv' in files
145+
assert './csv_collection_1.csv' in files
146146
elif i == 1:
147147
assert len(files.keys()) == 2 + _common_files_count()
148-
assert './big_table_2.csv' in files.keys()
149-
assert './csv_collection_2.csv' in files.keys()
148+
assert './big_table_2.csv' in files
149+
assert './csv_collection_2.csv' in files
150150
elif i == 2:
151151
assert len(files.keys()) == 1 + _common_files_count()
152-
assert './big_table_2.csv' in files.keys()
152+
assert './big_table_2.csv' in files
153153

154154
collector._gather_cleanup()
155155

@@ -173,51 +173,51 @@ def test_multiple_collections_and_distributions(collector):
173173

174174
assert_common_files(files)
175175
if i == 0:
176-
assert './simple_json1.json' in files.keys()
176+
assert './simple_json1.json' in files
177177
else:
178-
assert './simple_json1.json' not in files.keys()
178+
assert './simple_json1.json' not in files
179179

180180
# CSVs with no slicing start at index 0
181181
if 0 <= i <= 1:
182-
assert './csv_no_slicing_1-2x.csv' in files.keys()
182+
assert './csv_no_slicing_1-2x.csv' in files
183183
else:
184-
assert './csv_no_slicing_1-2x.csv' not in files.keys()
184+
assert './csv_no_slicing_1-2x.csv' not in files
185185

186186
if i == 0:
187-
assert './csv_no_slicing_2-1x.csv' in files.keys()
187+
assert './csv_no_slicing_2-1x.csv' in files
188188
else:
189-
assert './csv_no_slicing_2-1x.csv' not in files.keys()
189+
assert './csv_no_slicing_2-1x.csv' not in files
190190

191191
if 0 <= i <= 9:
192-
assert './csv_no_slicing_3-10x.csv' in files.keys()
192+
assert './csv_no_slicing_3-10x.csv' in files
193193
else:
194-
assert './csv_no_slicing_3-10x.csv' not in files.keys()
194+
assert './csv_no_slicing_3-10x.csv' not in files
195195

196196
if 0 <= i <= 11:
197-
assert './csv_no_slicing_4-12x.csv' in files.keys()
197+
assert './csv_no_slicing_4-12x.csv' in files
198198
else:
199-
assert './csv_no_slicing_4-12x.csv' not in files.keys()
199+
assert './csv_no_slicing_4-12x.csv' not in files
200200

201201
# CSVs with slicing start after index next to previous slice
202202
if 0 <= i <= 4:
203-
assert './csv_with_slicing_1-5x.csv' in files.keys()
203+
assert './csv_with_slicing_1-5x.csv' in files
204204
else:
205-
assert './csv_with_slicing_1-5x.csv' not in files.keys()
205+
assert './csv_with_slicing_1-5x.csv' not in files
206206

207207
if 5 <= i <= 7:
208-
assert './csv_with_slicing_2-3x.csv' in files.keys()
208+
assert './csv_with_slicing_2-3x.csv' in files
209209
else:
210-
assert './csv_with_slicing_2-3x.csv' not in files.keys()
210+
assert './csv_with_slicing_2-3x.csv' not in files
211211

212212
if 8 <= i <= 9:
213-
assert './csv_with_slicing_3-2x.csv' in files.keys()
213+
assert './csv_with_slicing_3-2x.csv' in files
214214
else:
215-
assert './csv_with_slicing_3-2x.csv' not in files.keys()
215+
assert './csv_with_slicing_3-2x.csv' not in files
216216

217217
if 10 <= i <= 12:
218-
assert './csv_with_slicing_4-3x.csv' in files.keys()
218+
assert './csv_with_slicing_4-3x.csv' in files
219219
else:
220-
assert './csv_with_slicing_4-3x.csv' not in files.keys()
220+
assert './csv_with_slicing_4-3x.csv' not in files
221221

222222

223223
def test_manifest_and_status(collector):

metrics_utility/test/base/functional/test_slicing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def test_slices_by_date(collector):
4747
files[member.name] = archive.extractfile(member)
4848

4949
assert_common_files(files)
50-
assert './csv_one_day_slicing_1.csv' in files.keys()
50+
assert './csv_one_day_slicing_1.csv' in files
5151

5252
lines = files['./csv_one_day_slicing_1.csv'].readlines()
5353
_header = lines.pop(0)

0 commit comments

Comments
 (0)