Skip to content

Commit 75bf86e

Browse files
committed
Still fixing lint issues
1 parent 6f85a1f commit 75bf86e

File tree

9 files changed

+99
-198
lines changed

9 files changed

+99
-198
lines changed

cubids/cli.py

Lines changed: 9 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -27,8 +27,7 @@ def _is_file(path, parser):
2727
"""Ensure a given path exists and it is a file."""
2828
path = _path_exists(path, parser)
2929
if not path.is_file():
30-
raise parser.error(
31-
f"Path should point to a file (or symlink of file): <{path}>.")
30+
raise parser.error(f"Path should point to a file (or symlink of file): <{path}>.")
3231
return path
3332

3433

@@ -145,8 +144,7 @@ def _enter_bids_version(argv=None):
145144

146145
def _parse_bids_sidecar_merge():
147146
parser = argparse.ArgumentParser(
148-
description=(
149-
"bids-sidecar-merge: merge critical keys from one sidecar to another"),
147+
description=("bids-sidecar-merge: merge critical keys from one sidecar to another"),
150148
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
151149
)
152150
IsFile = partial(_is_file, parser=parser)
@@ -218,8 +216,7 @@ def _parse_group():
218216
default="subject",
219217
choices=["subject", "session"],
220218
action="store",
221-
help=(
222-
"Level at which acquisition groups are created options: 'subject' or 'session'"),
219+
help=("Level at which acquisition groups are created options: 'subject' or 'session'"),
223220
)
224221
parser.add_argument(
225222
"--config",
@@ -247,8 +244,7 @@ def _enter_group(argv=None):
247244

248245
def _parse_apply():
249246
parser = argparse.ArgumentParser(
250-
description=(
251-
"cubids-apply: apply the changes specified in a tsv to a BIDS directory"),
247+
description=("cubids-apply: apply the changes specified in a tsv to a BIDS directory"),
252248
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
253249
)
254250
PathExists = partial(_path_exists, parser=parser)
@@ -316,8 +312,7 @@ def _parse_apply():
316312
default="subject",
317313
choices=["subject", "session"],
318314
action="store",
319-
help=(
320-
"Level at which acquisition groups are created options: 'subject' or 'session'"),
315+
help=("Level at which acquisition groups are created options: 'subject' or 'session'"),
321316
)
322317
parser.add_argument(
323318
"--config",
@@ -346,8 +341,7 @@ def _enter_apply(argv=None):
346341

347342
def _parse_datalad_save():
348343
parser = argparse.ArgumentParser(
349-
description=(
350-
"cubids-datalad-save: perform a DataLad save on a BIDS directory"),
344+
description=("cubids-datalad-save: perform a DataLad save on a BIDS directory"),
351345
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
352346
)
353347
PathExists = partial(_path_exists, parser=parser)
@@ -705,10 +699,8 @@ def _enter_print_metadata_fields(argv=None):
705699
("copy-exemplars", _parse_copy_exemplars, workflows.copy_exemplars),
706700
("undo", _parse_undo, workflows.undo),
707701
("datalad-save", _parse_datalad_save, workflows.datalad_save),
708-
("print-metadata-fields", _parse_print_metadata_fields,
709-
workflows.print_metadata_fields),
710-
("remove-metadata-fields", _parse_remove_metadata_fields,
711-
workflows.remove_metadata_fields),
702+
("print-metadata-fields", _parse_print_metadata_fields, workflows.print_metadata_fields),
703+
("remove-metadata-fields", _parse_remove_metadata_fields, workflows.remove_metadata_fields),
712704
]
713705

714706

@@ -717,8 +709,7 @@ def _get_parser():
717709
from cubids import __version__
718710

719711
parser = argparse.ArgumentParser(prog="cubids")
720-
parser.add_argument("-v", "--version",
721-
action="version", version=__version__)
712+
parser.add_argument("-v", "--version", action="version", version=__version__)
722713
subparsers = parser.add_subparsers(help="CuBIDS commands")
723714

724715
for command, parser_func, run_func in COMMANDS:

cubids/cubids.py

Lines changed: 35 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -149,11 +149,9 @@ def reset_bids_layout(self, validate=False):
149149
re.compile(r"/\."),
150150
]
151151

152-
indexer = bids.BIDSLayoutIndexer(
153-
validate=validate, ignore=ignores, index_metadata=False)
152+
indexer = bids.BIDSLayoutIndexer(validate=validate, ignore=ignores, index_metadata=False)
154153

155-
self._layout = bids.BIDSLayout(
156-
self.path, validate=validate, indexer=indexer)
154+
self._layout = bids.BIDSLayout(self.path, validate=validate, indexer=indexer)
157155

158156
def create_cubids_code_dir(self):
159157
"""Create CuBIDS code directory.
@@ -203,8 +201,7 @@ def datalad_save(self, message=None):
203201
Commit message to use with datalad save.
204202
"""
205203
if not self.datalad_ready:
206-
raise Exception(
207-
"DataLad has not been initialized. use datalad_init()")
204+
raise Exception("DataLad has not been initialized. use datalad_init()")
208205

209206
statuses = self.datalad_handle.save(message=message or "CuBIDS Save")
210207
saved_status = set([status["status"] for status in statuses])
@@ -226,8 +223,7 @@ def is_datalad_clean(self):
226223
"""
227224
if not self.datalad_ready:
228225
raise Exception("Datalad not initialized, can't determine status")
229-
statuses = set([status["state"]
230-
for status in self.datalad_handle.status()])
226+
statuses = set([status["state"] for status in self.datalad_handle.status()])
231227
return statuses == set(["clean"])
232228

233229
def datalad_undo_last_commit(self):
@@ -241,10 +237,8 @@ def datalad_undo_last_commit(self):
241237
If there are untracked changes in the datalad dataset.
242238
"""
243239
if not self.is_datalad_clean():
244-
raise Exception(
245-
"Untracked changes present. Run clear_untracked_changes first")
246-
reset_proc = subprocess.run(
247-
["git", "reset", "--hard", "HEAD~1"], cwd=self.path)
240+
raise Exception("Untracked changes present. Run clear_untracked_changes first")
241+
reset_proc = subprocess.run(["git", "reset", "--hard", "HEAD~1"], cwd=self.path)
248242
reset_proc.check_returncode()
249243

250244
def add_nifti_info(self):
@@ -348,13 +342,11 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
348342
files_df = pd.read_table(files_tsv)
349343

350344
# Check that the MergeInto column only contains valid merges
351-
ok_merges, deletions = check_merging_operations(
352-
summary_tsv, raise_on_error=raise_on_error)
345+
ok_merges, deletions = check_merging_operations(summary_tsv, raise_on_error=raise_on_error)
353346

354347
merge_commands = []
355348
for source_id, dest_id in ok_merges:
356-
dest_files = files_df.loc[(
357-
files_df[["ParamGroup", "EntitySet"]] == dest_id).all(1)]
349+
dest_files = files_df.loc[(files_df[["ParamGroup", "EntitySet"]] == dest_id).all(1)]
358350
source_files = files_df.loc[
359351
(files_df[["ParamGroup", "EntitySet"]] == source_id).all(1)
360352
]
@@ -365,15 +357,13 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
365357
for dest_nii in dest_files.FilePath:
366358
dest_json = img_to_new_ext(self.path + dest_nii, ".json")
367359
if Path(dest_json).exists() and Path(source_json).exists():
368-
merge_commands.append(
369-
f"bids-sidecar-merge {source_json} {dest_json}")
360+
merge_commands.append(f"bids-sidecar-merge {source_json} {dest_json}")
370361

371362
# Get the delete commands
372363
# delete_commands = []
373364
to_remove = []
374365
for rm_id in deletions:
375-
files_to_rm = files_df.loc[(
376-
files_df[["ParamGroup", "EntitySet"]] == rm_id).all(1)]
366+
files_to_rm = files_df.loc[(files_df[["ParamGroup", "EntitySet"]] == rm_id).all(1)]
377367

378368
for rm_me in files_to_rm.FilePath:
379369
if Path(self.path + rm_me).exists():
@@ -446,8 +436,7 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
446436

447437
rename_commit = s1 + s2
448438

449-
self.datalad_handle.run(
450-
cmd=["bash", renames], message=rename_commit)
439+
self.datalad_handle.run(cmd=["bash", renames], message=rename_commit)
451440
else:
452441
subprocess.run(
453442
["bash", renames],
@@ -487,8 +476,7 @@ def change_filename(self, filepath, entities):
487476
entity_file_keys = []
488477

489478
# Entities that may be in the filename?
490-
file_keys = ["task", "acquisition",
491-
"direction", "reconstruction", "run"]
479+
file_keys = ["task", "acquisition", "direction", "reconstruction", "run"]
492480

493481
for key in file_keys:
494482
if key in list(entities.keys()):
@@ -502,8 +490,7 @@ def change_filename(self, filepath, entities):
502490
# XXX: This adds an extra leading zero to run.
503491
entities["run"] = "0" + str(entities["run"])
504492

505-
filename = "_".join(
506-
[f"{key}-{entities[key]}" for key in entity_file_keys])
493+
filename = "_".join([f"{key}-{entities[key]}" for key in entity_file_keys])
507494
filename = (
508495
filename.replace("acquisition", "acq")
509496
.replace("direction", "dir")
@@ -512,8 +499,7 @@ def change_filename(self, filepath, entities):
512499
if len(filename) > 0:
513500
filename = sub_ses + "_" + filename + "_" + suffix + old_ext
514501
else:
515-
raise ValueError(
516-
f"Could not construct new filename for {filepath}")
502+
raise ValueError(f"Could not construct new filename for {filepath}")
517503

518504
# CHECK TO SEE IF DATATYPE CHANGED
519505
# datatype may be overridden/changed if the original file is located in the wrong folder.
@@ -531,8 +517,7 @@ def change_filename(self, filepath, entities):
531517
dtype_new = dtype_orig
532518

533519
# Construct the new filename
534-
new_path = str(self.path) + "/" + sub + "/" + \
535-
ses + "/" + dtype_new + "/" + filename
520+
new_path = str(self.path) + "/" + sub + "/" + ses + "/" + dtype_new + "/" + filename
536521

537522
# Add the scan path + new path to the lists of old, new filenames
538523
self.old_filenames.append(filepath)
@@ -551,8 +536,7 @@ def change_filename(self, filepath, entities):
551536
# ensure assoc not an IntendedFor reference
552537
if ".nii" not in str(assoc_path):
553538
self.old_filenames.append(assoc_path)
554-
new_ext_path = img_to_new_ext(
555-
new_path, "".join(Path(assoc_path).suffixes))
539+
new_ext_path = img_to_new_ext(new_path, "".join(Path(assoc_path).suffixes))
556540
self.new_filenames.append(new_ext_path)
557541

558542
# MAKE SURE THESE AREN'T COVERED BY get_associations!!!
@@ -625,8 +609,7 @@ def change_filename(self, filepath, entities):
625609
if Path(old_labeling).exists():
626610
self.old_filenames.append(old_labeling)
627611
new_scan_end = "_" + suffix + old_ext
628-
new_labeling = new_path.replace(
629-
new_scan_end, "_asllabeling.jpg")
612+
new_labeling = new_path.replace(new_scan_end, "_asllabeling.jpg")
630613
self.new_filenames.append(new_labeling)
631614

632615
# RENAME INTENDED FORS!
@@ -652,8 +635,7 @@ def change_filename(self, filepath, entities):
652635
# remove old filename
653636
data["IntendedFor"].remove(item)
654637
# add new filename
655-
data["IntendedFor"].append(
656-
_get_intended_for_reference(new_path))
638+
data["IntendedFor"].append(_get_intended_for_reference(new_path))
657639

658640
# update the json with the new data dictionary
659641
_update_json(filename_with_if, data)
@@ -826,8 +808,7 @@ def _purge_associations(self, scans):
826808

827809
if "/func/" in str(path):
828810
# add tsvs
829-
tsv = img_to_new_ext(str(path), ".tsv").replace(
830-
"_bold", "_events")
811+
tsv = img_to_new_ext(str(path), ".tsv").replace("_bold", "_events")
831812
if Path(tsv).exists():
832813
to_remove.append(tsv)
833814
# add tsv json (if exists)
@@ -941,8 +922,7 @@ def get_param_groups_from_entity_set(self, entity_set):
941922
2. A data frame with param group summaries
942923
"""
943924
if not self.fieldmaps_cached:
944-
raise Exception(
945-
"Fieldmaps must be cached to find parameter groups.")
925+
raise Exception("Fieldmaps must be cached to find parameter groups.")
946926
key_entities = _entity_set_to_entities(entity_set)
947927
key_entities["extension"] = ".nii[.gz]*"
948928

@@ -995,8 +975,7 @@ def create_data_dictionary(self):
995975
mod_dict = sidecar_params[mod]
996976
for s_param in mod_dict.keys():
997977
if s_param not in self.data_dict.keys():
998-
self.data_dict[s_param] = {
999-
"Description": "Scanning Parameter"}
978+
self.data_dict[s_param] = {"Description": "Scanning Parameter"}
1000979

1001980
relational_params = self.grouping_config.get("relational_params")
1002981
for r_param in relational_params.keys():
@@ -1008,8 +987,7 @@ def create_data_dictionary(self):
1008987
mod_dict = derived_params[mod]
1009988
for d_param in mod_dict.keys():
1010989
if d_param not in self.data_dict.keys():
1011-
self.data_dict[d_param] = {
1012-
"Description": "NIfTI Header Parameter"}
990+
self.data_dict[d_param] = {"Description": "NIfTI Header Parameter"}
1013991

1014992
# Manually add non-sidecar columns/descriptions to data_dict
1015993
desc1 = "Column where users mark groups to manually check"
@@ -1116,20 +1094,17 @@ def get_param_groups_dataframes(self):
11161094
long_name = big_df.loc[row, "FilePath"]
11171095
big_df.loc[row, "FilePath"] = long_name.replace(self.path, "")
11181096

1119-
summary = _order_columns(
1120-
pd.concat(param_group_summaries, ignore_index=True))
1097+
summary = _order_columns(pd.concat(param_group_summaries, ignore_index=True))
11211098

11221099
# create new col that strings key and param group together
1123-
summary["KeyParamGroup"] = summary["EntitySet"] + \
1124-
"__" + summary["ParamGroup"].map(str)
1100+
summary["KeyParamGroup"] = summary["EntitySet"] + "__" + summary["ParamGroup"].map(str)
11251101

11261102
# move this column to the front of the dataframe
11271103
key_param_col = summary.pop("KeyParamGroup")
11281104
summary.insert(0, "KeyParamGroup", key_param_col)
11291105

11301106
# do the same for the files df
1131-
big_df["KeyParamGroup"] = big_df["EntitySet"] + \
1132-
"__" + big_df["ParamGroup"].map(str)
1107+
big_df["KeyParamGroup"] = big_df["EntitySet"] + "__" + big_df["ParamGroup"].map(str)
11331108

11341109
# move this column to the front of the dataframe
11351110
key_param_col = big_df.pop("KeyParamGroup")
@@ -1278,10 +1253,8 @@ def get_tsvs(self, path_prefix):
12781253

12791254
big_df, summary = self.get_param_groups_dataframes()
12801255

1281-
summary = summary.sort_values(
1282-
by=["Modality", "EntitySetCount"], ascending=[True, False])
1283-
big_df = big_df.sort_values(
1284-
by=["Modality", "EntitySetCount"], ascending=[True, False])
1256+
summary = summary.sort_values(by=["Modality", "EntitySetCount"], ascending=[True, False])
1257+
big_df = big_df.sort_values(by=["Modality", "EntitySetCount"], ascending=[True, False])
12851258

12861259
# Create json dictionaries for summary and files tsvs
12871260
self.create_data_dictionary()
@@ -1300,8 +1273,7 @@ def get_tsvs(self, path_prefix):
13001273
summary.to_csv(f"{path_prefix}_summary.tsv", sep="\t", index=False)
13011274

13021275
# Calculate the acq groups
1303-
group_by_acquisition_sets(
1304-
f"{path_prefix}_files.tsv", path_prefix, self.acq_group_level)
1276+
group_by_acquisition_sets(f"{path_prefix}_files.tsv", path_prefix, self.acq_group_level)
13051277

13061278
print(f"CuBIDS detected {len(summary)} Parameter Groups.")
13071279

@@ -1520,8 +1492,7 @@ def _get_param_groups(
15201492
# Get the fieldmaps out and add their types
15211493
if "FieldmapKey" in relational_params:
15221494
fieldmap_types = sorted(
1523-
[_file_to_entity_set(fmap.path)
1524-
for fmap in fieldmap_lookup[path]]
1495+
[_file_to_entity_set(fmap.path) for fmap in fieldmap_lookup[path]]
15251496
)
15261497

15271498
# check if config says columns or bool
@@ -1543,8 +1514,7 @@ def _get_param_groups(
15431514
# If it's a fieldmap, see what entity set it's intended to correct
15441515
if "IntendedForKey" in relational_params:
15451516
intended_entity_sets = sorted(
1546-
[_file_to_entity_set(intention)
1547-
for intention in intentions]
1517+
[_file_to_entity_set(intention) for intention in intentions]
15481518
)
15491519

15501520
# check if config says columns or bool
@@ -1598,30 +1568,25 @@ def _get_param_groups(
15981568
{"Counts": value_counts.to_numpy(), "ParamGroup": value_counts.index.to_numpy()}
15991569
)
16001570

1601-
param_groups_with_counts = pd.merge(
1602-
deduped, param_group_counts, on=["ParamGroup"])
1571+
param_groups_with_counts = pd.merge(deduped, param_group_counts, on=["ParamGroup"])
16031572

16041573
# Sort by counts and relabel the param groups
1605-
param_groups_with_counts.sort_values(
1606-
by=["Counts"], inplace=True, ascending=False)
1607-
param_groups_with_counts["ParamGroup"] = np.arange(
1608-
param_groups_with_counts.shape[0]) + 1
1574+
param_groups_with_counts.sort_values(by=["Counts"], inplace=True, ascending=False)
1575+
param_groups_with_counts["ParamGroup"] = np.arange(param_groups_with_counts.shape[0]) + 1
16091576

16101577
# Send the new, ordered param group ids to the files list
16111578
ordered_labeled_files = pd.merge(
16121579
df, param_groups_with_counts, on=check_cols, suffixes=("_x", "")
16131580
)
16141581

16151582
# sort ordered_labeled_files by param group
1616-
ordered_labeled_files.sort_values(
1617-
by=["Counts"], inplace=True, ascending=False)
1583+
ordered_labeled_files.sort_values(by=["Counts"], inplace=True, ascending=False)
16181584

16191585
# now get rid of cluster cols from deduped and df
16201586
for col in list(ordered_labeled_files.columns):
16211587
if col.startswith("Cluster_"):
16221588
ordered_labeled_files = ordered_labeled_files.drop(col, axis=1)
1623-
param_groups_with_counts = param_groups_with_counts.drop(
1624-
col, axis=1)
1589+
param_groups_with_counts = param_groups_with_counts.drop(col, axis=1)
16251590
if col.endswith("_x"):
16261591
ordered_labeled_files = ordered_labeled_files.drop(col, axis=1)
16271592

0 commit comments

Comments
 (0)