Skip to content

Commit 2831f5a

Browse files
committed
fix linter errors
1 parent 00ef452 commit 2831f5a

File tree

13 files changed

+87
-257
lines changed

13 files changed

+87
-257
lines changed

cubids/__about__.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,4 @@
3434
)
3535
__url__ = "https://github.com/PennLINC/CuBIDS"
3636

37-
DOWNLOAD_URL = (
38-
f"https://github.com/PennLINC/{__packagename__}/archive/{__version__}.tar.gz"
39-
)
37+
DOWNLOAD_URL = f"https://github.com/PennLINC/{__packagename__}/archive/{__version__}.tar.gz"

cubids/cli.py

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -289,9 +289,7 @@ def _parse_bids_sidecar_merge():
289289
The `IsFile` partial function is used to validate that the provided file paths exist.
290290
"""
291291
parser = argparse.ArgumentParser(
292-
description=(
293-
"bids-sidecar-merge: merge critical keys from one sidecar to another"
294-
),
292+
description=("bids-sidecar-merge: merge critical keys from one sidecar to another"),
295293
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
296294
allow_abbrev=False,
297295
)
@@ -382,9 +380,7 @@ def _parse_group():
382380
default="subject",
383381
choices=["subject", "session"],
384382
action="store",
385-
help=(
386-
"Level at which acquisition groups are created options: 'subject' or 'session'"
387-
),
383+
help=("Level at which acquisition groups are created options: 'subject' or 'session'"),
388384
)
389385
parser.add_argument(
390386
"--config",
@@ -446,9 +442,7 @@ def _parse_apply():
446442
The argument parser with the defined arguments.
447443
"""
448444
parser = argparse.ArgumentParser(
449-
description=(
450-
"cubids apply: apply the changes specified in a tsv to a BIDS directory"
451-
),
445+
description=("cubids apply: apply the changes specified in a tsv to a BIDS directory"),
452446
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
453447
allow_abbrev=False,
454448
)
@@ -512,9 +506,7 @@ def _parse_apply():
512506
default="subject",
513507
choices=["subject", "session"],
514508
action="store",
515-
help=(
516-
"Level at which acquisition groups are created options: 'subject' or 'session'"
517-
),
509+
help=("Level at which acquisition groups are created options: 'subject' or 'session'"),
518510
)
519511
parser.add_argument(
520512
"--config",
@@ -1072,9 +1064,7 @@ def _get_parser():
10721064
from cubids import __version__
10731065

10741066
parser = argparse.ArgumentParser(prog="cubids", allow_abbrev=False)
1075-
parser.add_argument(
1076-
"-v", "--version", action="version", version=f"cubids v{__version__}"
1077-
)
1067+
parser.add_argument("-v", "--version", action="version", version=f"cubids v{__version__}")
10781068
subparsers = parser.add_subparsers(help="CuBIDS commands")
10791069

10801070
for command, parser_func, run_func in COMMANDS:

cubids/cubids.py

Lines changed: 17 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -123,9 +123,7 @@ def __init__(
123123
self.data_dict = {} # data dictionary for TSV outputs
124124
self.use_datalad = use_datalad # True if flag set, False if flag unset
125125
self.schema = load_schema(schema_json)
126-
self.is_longitudinal = (
127-
self._infer_longitudinal()
128-
) # inferred from dataset structure
126+
self.is_longitudinal = self._infer_longitudinal() # inferred from dataset structure
129127

130128
if self.use_datalad:
131129
self.init_datalad()
@@ -188,9 +186,7 @@ def reset_bids_layout(self, validate=False):
188186
re.compile(r"/\."),
189187
]
190188

191-
indexer = bids.BIDSLayoutIndexer(
192-
validate=validate, ignore=ignores, index_metadata=False
193-
)
189+
indexer = bids.BIDSLayoutIndexer(validate=validate, ignore=ignores, index_metadata=False)
194190

195191
self._layout = bids.BIDSLayout(self.path, validate=validate, indexer=indexer)
196192

@@ -297,9 +293,7 @@ def datalad_undo_last_commit(self):
297293
If there are untracked changes in the datalad dataset.
298294
"""
299295
if not self.is_datalad_clean():
300-
raise Exception(
301-
"Untracked changes present. Run clear_untracked_changes first"
302-
)
296+
raise Exception("Untracked changes present. Run clear_untracked_changes first")
303297
reset_proc = subprocess.run(["git", "reset", "--hard", "HEAD~1"], cwd=self.path)
304298
reset_proc.check_returncode()
305299

@@ -423,9 +417,7 @@ def add_file_collections(self):
423417
continue
424418

425419
# Add file collection metadata to the sidecar
426-
files, collection_metadata = utils.collect_file_collections(
427-
self.layout, path
428-
)
420+
files, collection_metadata = utils.collect_file_collections(self.layout, path)
429421
filepaths = [f.path for f in files]
430422
checked_files.extend(filepaths)
431423

@@ -447,9 +439,7 @@ def add_file_collections(self):
447439

448440
self.reset_bids_layout()
449441

450-
def apply_tsv_changes(
451-
self, summary_tsv, files_tsv, new_prefix, raise_on_error=True
452-
):
442+
def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=True):
453443
"""Apply changes documented in the edited summary tsv and generate the new tsv files.
454444
455445
This function looks at the RenameEntitySet and MergeInto
@@ -485,15 +475,11 @@ def apply_tsv_changes(
485475
files_df = pd.read_table(files_tsv)
486476

487477
# Check that the MergeInto column only contains valid merges
488-
ok_merges, deletions = check_merging_operations(
489-
summary_tsv, raise_on_error=raise_on_error
490-
)
478+
ok_merges, deletions = check_merging_operations(summary_tsv, raise_on_error=raise_on_error)
491479

492480
merge_commands = []
493481
for source_id, dest_id in ok_merges:
494-
dest_files = files_df.loc[
495-
(files_df[["ParamGroup", "EntitySet"]] == dest_id).all(1)
496-
]
482+
dest_files = files_df.loc[(files_df[["ParamGroup", "EntitySet"]] == dest_id).all(1)]
497483
source_files = files_df.loc[
498484
(files_df[["ParamGroup", "EntitySet"]] == source_id).all(1)
499485
]
@@ -504,16 +490,12 @@ def apply_tsv_changes(
504490
for dest_nii in dest_files.FilePath:
505491
dest_json = utils.img_to_new_ext(self.path + dest_nii, ".json")
506492
if Path(dest_json).exists() and Path(source_json).exists():
507-
merge_commands.append(
508-
f"cubids bids-sidecar-merge {source_json} {dest_json}"
509-
)
493+
merge_commands.append(f"cubids bids-sidecar-merge {source_json} {dest_json}")
510494

511495
# Get the delete commands
512496
to_remove = []
513497
for rm_id in deletions:
514-
files_to_rm = files_df.loc[
515-
(files_df[["ParamGroup", "EntitySet"]] == rm_id).all(1)
516-
]
498+
files_to_rm = files_df.loc[(files_df[["ParamGroup", "EntitySet"]] == rm_id).all(1)]
517499

518500
for rm_me in files_to_rm.FilePath:
519501
if Path(self.path + rm_me).exists():
@@ -748,17 +730,13 @@ def change_filename(self, filepath, entities):
748730
# remove old filename
749731
data["IntendedFor"].remove(item)
750732
# add new filename
751-
data["IntendedFor"].append(
752-
utils._get_participant_relative_path(new_path)
753-
)
733+
data["IntendedFor"].append(utils._get_participant_relative_path(new_path))
754734

755735
if item == utils._get_bidsuri(filepath, self.path):
756736
# remove old filename
757737
data["IntendedFor"].remove(item)
758738
# add new filename
759-
data["IntendedFor"].append(
760-
utils._get_bidsuri(new_path, self.path)
761-
)
739+
data["IntendedFor"].append(utils._get_bidsuri(new_path, self.path))
762740

763741
# update the json with the new data dictionary
764742
utils._update_json(filename_with_if, data)
@@ -935,9 +913,7 @@ def _purge_associations(self, scans):
935913

936914
if "/func/" in str(path):
937915
# add tsvs
938-
tsv = utils.img_to_new_ext(str(path), ".tsv").replace(
939-
"_bold", "_events"
940-
)
916+
tsv = utils.img_to_new_ext(str(path), ".tsv").replace("_bold", "_events")
941917
if Path(tsv).exists():
942918
to_remove.append(tsv)
943919
# add tsv json (if exists)
@@ -1292,23 +1268,17 @@ def get_param_groups_dataframes(self):
12921268
long_name = big_df.loc[row, "FilePath"]
12931269
big_df.loc[row, "FilePath"] = long_name.replace(self.path, "")
12941270

1295-
summary = utils._order_columns(
1296-
pd.concat(param_group_summaries, ignore_index=True)
1297-
)
1271+
summary = utils._order_columns(pd.concat(param_group_summaries, ignore_index=True))
12981272

12991273
# create new col that strings key and param group together
1300-
summary["KeyParamGroup"] = (
1301-
summary["EntitySet"] + "__" + summary["ParamGroup"].map(str)
1302-
)
1274+
summary["KeyParamGroup"] = summary["EntitySet"] + "__" + summary["ParamGroup"].map(str)
13031275

13041276
# move this column to the front of the dataframe
13051277
key_param_col = summary.pop("KeyParamGroup")
13061278
summary.insert(0, "KeyParamGroup", key_param_col)
13071279

13081280
# do the same for the files df
1309-
big_df["KeyParamGroup"] = (
1310-
big_df["EntitySet"] + "__" + big_df["ParamGroup"].map(str)
1311-
)
1281+
big_df["KeyParamGroup"] = big_df["EntitySet"] + "__" + big_df["ParamGroup"].map(str)
13121282

13131283
# move this column to the front of the dataframe
13141284
key_param_col = big_df.pop("KeyParamGroup")
@@ -1383,12 +1353,8 @@ def get_tsvs(self, path_prefix):
13831353

13841354
big_df, summary = self.get_param_groups_dataframes()
13851355

1386-
summary = summary.sort_values(
1387-
by=["Modality", "EntitySetCount"], ascending=[True, False]
1388-
)
1389-
big_df = big_df.sort_values(
1390-
by=["Modality", "EntitySetCount"], ascending=[True, False]
1391-
)
1356+
summary = summary.sort_values(by=["Modality", "EntitySetCount"], ascending=[True, False])
1357+
big_df = big_df.sort_values(by=["Modality", "EntitySetCount"], ascending=[True, False])
13921358

13931359
# Create json dictionaries for summary and files tsvs
13941360
self.create_data_dictionary()

cubids/metadata_merge.py

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -54,18 +54,14 @@ def check_merging_operations(action_tsv, raise_on_error=False):
5454
)
5555

5656
def _check_sdc_cols(meta1, meta2):
57-
return {key: meta1[key] for key in sdc_cols} == {
58-
key: meta2[key] for key in sdc_cols
59-
}
57+
return {key: meta1[key] for key in sdc_cols} == {key: meta2[key] for key in sdc_cols}
6058

6159
needs_merge = actions[np.isfinite(actions["MergeInto"])]
6260
for _, row_needs_merge in needs_merge.iterrows():
6361
source_param_key = tuple(row_needs_merge[["MergeInto", "EntitySet"]])
6462
dest_param_key = tuple(row_needs_merge[["ParamGroup", "EntitySet"]])
6563
dest_metadata = row_needs_merge.to_dict()
66-
source_row = actions.loc[
67-
(actions[["ParamGroup", "EntitySet"]] == source_param_key).all(1)
68-
]
64+
source_row = actions.loc[(actions[["ParamGroup", "EntitySet"]] == source_param_key).all(1)]
6965

7066
if source_param_key[0] == 0:
7167
print("going to delete ", dest_param_key)
@@ -304,9 +300,7 @@ def get_acq_dictionary(is_longitudinal=False):
304300
return acq_dict
305301

306302

307-
def group_by_acquisition_sets(
308-
files_tsv, output_prefix, acq_group_level, is_longitudinal=False
309-
):
303+
def group_by_acquisition_sets(files_tsv, output_prefix, acq_group_level, is_longitudinal=False):
310304
"""Find unique sets of Key/Param groups across subjects.
311305
312306
This writes out the following files:
@@ -371,9 +365,7 @@ def group_by_acquisition_sets(
371365
acq_group_info = []
372366
for groupnum, content_id_row in enumerate(descending_order, start=1):
373367
content_id = content_ids[content_id_row]
374-
acq_group_info.append(
375-
(groupnum, content_id_counts[content_id_row]) + content_id
376-
)
368+
acq_group_info.append((groupnum, content_id_counts[content_id_row]) + content_id)
377369
if is_longitudinal:
378370
for subject, session in contents_to_subjects[content_id]:
379371
grouped_sub_sess.append(
@@ -385,9 +377,7 @@ def group_by_acquisition_sets(
385377
)
386378
elif not is_longitudinal:
387379
for subject in contents_to_subjects[content_id]:
388-
grouped_sub_sess.append(
389-
{"subject": "sub-" + subject, "AcqGroup": groupnum}
390-
)
380+
grouped_sub_sess.append({"subject": "sub-" + subject, "AcqGroup": groupnum})
391381

392382
# Write the mapping of subject/session to
393383
acq_group_df = pd.DataFrame(grouped_sub_sess)

cubids/tests/test_apply.py

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -14,18 +14,14 @@
1414
"dir": "AP",
1515
"suffix": "epi",
1616
"metadata": {
17-
"IntendedFor": [
18-
"ses-01/dwi/sub-01_ses-01_dir-AP_run-01_dwi.nii.gz"
19-
],
17+
"IntendedFor": ["ses-01/dwi/sub-01_ses-01_dir-AP_run-01_dwi.nii.gz"],
2018
},
2119
},
2220
{
2321
"dir": "PA",
2422
"suffix": "epi",
2523
"metadata": {
26-
"IntendedFor": [
27-
"ses-01/dwi/sub-01_ses-01_dir-AP_run-01_dwi.nii.gz"
28-
],
24+
"IntendedFor": ["ses-01/dwi/sub-01_ses-01_dir-AP_run-01_dwi.nii.gz"],
2925
},
3026
},
3127
],
@@ -122,18 +118,14 @@
122118
"dir": "AP",
123119
"suffix": "epi",
124120
"metadata": {
125-
"IntendedFor": [
126-
"bids::sub-01/dwi/sub-01_dir-AP_run-01_dwi.nii.gz"
127-
],
121+
"IntendedFor": ["bids::sub-01/dwi/sub-01_dir-AP_run-01_dwi.nii.gz"],
128122
},
129123
},
130124
{
131125
"dir": "PA",
132126
"suffix": "epi",
133127
"metadata": {
134-
"IntendedFor": [
135-
"bids::sub-01/dwi/sub-01_dir-AP_run-01_dwi.nii.gz"
136-
],
128+
"IntendedFor": ["bids::sub-01/dwi/sub-01_dir-AP_run-01_dwi.nii.gz"],
137129
},
138130
},
139131
],

0 commit comments

Comments
 (0)