@@ -149,9 +149,11 @@ def reset_bids_layout(self, validate=False):
149149 re .compile (r"/\." ),
150150 ]
151151
152- indexer = bids .BIDSLayoutIndexer (validate = validate , ignore = ignores , index_metadata = False )
152+ indexer = bids .BIDSLayoutIndexer (
153+ validate = validate , ignore = ignores , index_metadata = False )
153154
154- self ._layout = bids .BIDSLayout (self .path , validate = validate , indexer = indexer )
155+ self ._layout = bids .BIDSLayout (
156+ self .path , validate = validate , indexer = indexer )
155157
156158 def create_cubids_code_dir (self ):
157159 """Create CuBIDS code directory.
@@ -201,7 +203,8 @@ def datalad_save(self, message=None):
201203 Commit message to use with datalad save.
202204 """
203205 if not self .datalad_ready :
204- raise Exception ("DataLad has not been initialized. use datalad_init()" )
206+ raise Exception (
207+ "DataLad has not been initialized. use datalad_init()" )
205208
206209 statuses = self .datalad_handle .save (message = message or "CuBIDS Save" )
207210 saved_status = set ([status ["status" ] for status in statuses ])
@@ -223,7 +226,8 @@ def is_datalad_clean(self):
223226 """
224227 if not self .datalad_ready :
225228 raise Exception ("Datalad not initialized, can't determine status" )
226- statuses = set ([status ["state" ] for status in self .datalad_handle .status ()])
229+ statuses = set ([status ["state" ]
230+ for status in self .datalad_handle .status ()])
227231 return statuses == set (["clean" ])
228232
229233 def datalad_undo_last_commit (self ):
@@ -237,8 +241,10 @@ def datalad_undo_last_commit(self):
237241 If there are untracked changes in the datalad dataset.
238242 """
239243 if not self .is_datalad_clean ():
240- raise Exception ("Untracked changes present. Run clear_untracked_changes first" )
241- reset_proc = subprocess .run (["git" , "reset" , "--hard" , "HEAD~1" ], cwd = self .path )
244+ raise Exception (
245+ "Untracked changes present. Run clear_untracked_changes first" )
246+ reset_proc = subprocess .run (
247+ ["git" , "reset" , "--hard" , "HEAD~1" ], cwd = self .path )
242248 reset_proc .check_returncode ()
243249
244250 def add_nifti_info (self ):
@@ -342,11 +348,13 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
342348 files_df = pd .read_table (files_tsv )
343349
344350 # Check that the MergeInto column only contains valid merges
345- ok_merges , deletions = check_merging_operations (summary_tsv , raise_on_error = raise_on_error )
351+ ok_merges , deletions = check_merging_operations (
352+ summary_tsv , raise_on_error = raise_on_error )
346353
347354 merge_commands = []
348355 for source_id , dest_id in ok_merges :
349- dest_files = files_df .loc [(files_df [["ParamGroup" , "EntitySet" ]] == dest_id ).all (1 )]
356+ dest_files = files_df .loc [(
357+ files_df [["ParamGroup" , "EntitySet" ]] == dest_id ).all (1 )]
350358 source_files = files_df .loc [
351359 (files_df [["ParamGroup" , "EntitySet" ]] == source_id ).all (1 )
352360 ]
@@ -357,13 +365,15 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
357365 for dest_nii in dest_files .FilePath :
358366 dest_json = img_to_new_ext (self .path + dest_nii , ".json" )
359367 if Path (dest_json ).exists () and Path (source_json ).exists ():
360- merge_commands .append (f"bids-sidecar-merge { source_json } { dest_json } " )
368+ merge_commands .append (
369+ f"bids-sidecar-merge { source_json } { dest_json } " )
361370
362371 # Get the delete commands
363372 # delete_commands = []
364373 to_remove = []
365374 for rm_id in deletions :
366- files_to_rm = files_df .loc [(files_df [["ParamGroup" , "EntitySet" ]] == rm_id ).all (1 )]
375+ files_to_rm = files_df .loc [(
376+ files_df [["ParamGroup" , "EntitySet" ]] == rm_id ).all (1 )]
367377
368378 for rm_me in files_to_rm .FilePath :
369379 if Path (self .path + rm_me ).exists ():
@@ -436,7 +446,8 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
436446
437447 rename_commit = s1 + s2
438448
439- self .datalad_handle .run (cmd = ["bash" , renames ], message = rename_commit )
449+ self .datalad_handle .run (
450+ cmd = ["bash" , renames ], message = rename_commit )
440451 else :
441452 subprocess .run (
442453 ["bash" , renames ],
@@ -476,7 +487,8 @@ def change_filename(self, filepath, entities):
476487 entity_file_keys = []
477488
478489 # Entities that may be in the filename?
479- file_keys = ["task" , "acquisition" , "direction" , "reconstruction" , "run" ]
490+ file_keys = ["task" , "acquisition" ,
491+ "direction" , "reconstruction" , "run" ]
480492
481493 for key in file_keys :
482494 if key in list (entities .keys ()):
@@ -490,7 +502,8 @@ def change_filename(self, filepath, entities):
490502 # XXX: This adds an extra leading zero to run.
491503 entities ["run" ] = "0" + str (entities ["run" ])
492504
493- filename = "_" .join ([f"{ key } -{ entities [key ]} " for key in entity_file_keys ])
505+ filename = "_" .join (
506+ [f"{ key } -{ entities [key ]} " for key in entity_file_keys ])
494507 filename = (
495508 filename .replace ("acquisition" , "acq" )
496509 .replace ("direction" , "dir" )
@@ -499,7 +512,8 @@ def change_filename(self, filepath, entities):
499512 if len (filename ) > 0 :
500513 filename = sub_ses + "_" + filename + "_" + suffix + old_ext
501514 else :
502- raise ValueError (f"Could not construct new filename for { filepath } " )
515+ raise ValueError (
516+ f"Could not construct new filename for { filepath } " )
503517
504518 # CHECK TO SEE IF DATATYPE CHANGED
505519 # datatype may be overridden/changed if the original file is located in the wrong folder.
@@ -517,7 +531,8 @@ def change_filename(self, filepath, entities):
517531 dtype_new = dtype_orig
518532
519533 # Construct the new filename
520- new_path = str (self .path ) + "/" + sub + "/" + ses + "/" + dtype_new + "/" + filename
534+ new_path = str (self .path ) + "/" + sub + "/" + \
535+ ses + "/" + dtype_new + "/" + filename
521536
522537 # Add the scan path + new path to the lists of old, new filenames
523538 self .old_filenames .append (filepath )
@@ -536,7 +551,8 @@ def change_filename(self, filepath, entities):
536551 # ensure assoc not an IntendedFor reference
537552 if ".nii" not in str (assoc_path ):
538553 self .old_filenames .append (assoc_path )
539- new_ext_path = img_to_new_ext (new_path , "" .join (Path (assoc_path ).suffixes ))
554+ new_ext_path = img_to_new_ext (
555+ new_path , "" .join (Path (assoc_path ).suffixes ))
540556 self .new_filenames .append (new_ext_path )
541557
542558 # MAKE SURE THESE AREN'T COVERED BY get_associations!!!
@@ -609,7 +625,8 @@ def change_filename(self, filepath, entities):
609625 if Path (old_labeling ).exists ():
610626 self .old_filenames .append (old_labeling )
611627 new_scan_end = "_" + suffix + old_ext
612- new_labeling = new_path .replace (new_scan_end , "_asllabeling.jpg" )
628+ new_labeling = new_path .replace (
629+ new_scan_end , "_asllabeling.jpg" )
613630 self .new_filenames .append (new_labeling )
614631
615632 # RENAME INTENDED FORS!
@@ -635,7 +652,8 @@ def change_filename(self, filepath, entities):
635652 # remove old filename
636653 data ["IntendedFor" ].remove (item )
637654 # add new filename
638- data ["IntendedFor" ].append (_get_intended_for_reference (new_path ))
655+ data ["IntendedFor" ].append (
656+ _get_intended_for_reference (new_path ))
639657
640658 # update the json with the new data dictionary
641659 _update_json (filename_with_if , data )
@@ -808,7 +826,8 @@ def _purge_associations(self, scans):
808826
809827 if "/func/" in str (path ):
810828 # add tsvs
811- tsv = img_to_new_ext (str (path ), ".tsv" ).replace ("_bold" , "_events" )
829+ tsv = img_to_new_ext (str (path ), ".tsv" ).replace (
830+ "_bold" , "_events" )
812831 if Path (tsv ).exists ():
813832 to_remove .append (tsv )
814833 # add tsv json (if exists)
@@ -922,7 +941,8 @@ def get_param_groups_from_entity_set(self, entity_set):
922941 2. A data frame with param group summaries
923942 """
924943 if not self .fieldmaps_cached :
925- raise Exception ("Fieldmaps must be cached to find parameter groups." )
944+ raise Exception (
945+ "Fieldmaps must be cached to find parameter groups." )
926946 key_entities = _entity_set_to_entities (entity_set )
927947 key_entities ["extension" ] = ".nii[.gz]*"
928948
@@ -975,7 +995,8 @@ def create_data_dictionary(self):
975995 mod_dict = sidecar_params [mod ]
976996 for s_param in mod_dict .keys ():
977997 if s_param not in self .data_dict .keys ():
978- self .data_dict [s_param ] = {"Description" : "Scanning Parameter" }
998+ self .data_dict [s_param ] = {
999+ "Description" : "Scanning Parameter" }
9791000
9801001 relational_params = self .grouping_config .get ("relational_params" )
9811002 for r_param in relational_params .keys ():
@@ -987,7 +1008,8 @@ def create_data_dictionary(self):
9871008 mod_dict = derived_params [mod ]
9881009 for d_param in mod_dict .keys ():
9891010 if d_param not in self .data_dict .keys ():
990- self .data_dict [d_param ] = {"Description" : "NIfTI Header Parameter" }
1011+ self .data_dict [d_param ] = {
1012+ "Description" : "NIfTI Header Parameter" }
9911013
9921014 # Manually add non-sidecar columns/descriptions to data_dict
9931015 desc1 = "Column where users mark groups to manually check"
@@ -1094,17 +1116,20 @@ def get_param_groups_dataframes(self):
10941116 long_name = big_df .loc [row , "FilePath" ]
10951117 big_df .loc [row , "FilePath" ] = long_name .replace (self .path , "" )
10961118
1097- summary = _order_columns (pd .concat (param_group_summaries , ignore_index = True ))
1119+ summary = _order_columns (
1120+ pd .concat (param_group_summaries , ignore_index = True ))
10981121
10991122 # create new col that strings key and param group together
1100- summary ["KeyParamGroup" ] = summary ["EntitySet" ] + "__" + summary ["ParamGroup" ].map (str )
1123+ summary ["KeyParamGroup" ] = summary ["EntitySet" ] + \
1124+ "__" + summary ["ParamGroup" ].map (str )
11011125
11021126 # move this column to the front of the dataframe
11031127 key_param_col = summary .pop ("KeyParamGroup" )
11041128 summary .insert (0 , "KeyParamGroup" , key_param_col )
11051129
11061130 # do the same for the files df
1107- big_df ["KeyParamGroup" ] = big_df ["EntitySet" ] + "__" + big_df ["ParamGroup" ].map (str )
1131+ big_df ["KeyParamGroup" ] = big_df ["EntitySet" ] + \
1132+ "__" + big_df ["ParamGroup" ].map (str )
11081133
11091134 # move this column to the front of the dataframe
11101135 key_param_col = big_df .pop ("KeyParamGroup" )
@@ -1253,8 +1278,10 @@ def get_tsvs(self, path_prefix):
12531278
12541279 big_df , summary = self .get_param_groups_dataframes ()
12551280
1256- summary = summary .sort_values (by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
1257- big_df = big_df .sort_values (by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
1281+ summary = summary .sort_values (
1282+ by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
1283+ big_df = big_df .sort_values (
1284+ by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
12581285
12591286 # Create json dictionaries for summary and files tsvs
12601287 self .create_data_dictionary ()
@@ -1273,7 +1300,8 @@ def get_tsvs(self, path_prefix):
12731300 summary .to_csv (f"{ path_prefix } _summary.tsv" , sep = "\t " , index = False )
12741301
12751302 # Calculate the acq groups
1276- group_by_acquisition_sets (f"{ path_prefix } _files.tsv" , path_prefix , self .acq_group_level )
1303+ group_by_acquisition_sets (
1304+ f"{ path_prefix } _files.tsv" , path_prefix , self .acq_group_level )
12771305
12781306 print (f"CuBIDS detected { len (summary )} Parameter Groups." )
12791307
@@ -1492,7 +1520,8 @@ def _get_param_groups(
14921520 # Get the fieldmaps out and add their types
14931521 if "FieldmapKey" in relational_params :
14941522 fieldmap_types = sorted (
1495- [_file_to_entity_set (fmap .path ) for fmap in fieldmap_lookup [path ]]
1523+ [_file_to_entity_set (fmap .path )
1524+ for fmap in fieldmap_lookup [path ]]
14961525 )
14971526
14981527 # check if config says columns or bool
@@ -1514,7 +1543,8 @@ def _get_param_groups(
15141543 # If it's a fieldmap, see what entity set it's intended to correct
15151544 if "IntendedForKey" in relational_params :
15161545 intended_entity_sets = sorted (
1517- [_file_to_entity_set (intention ) for intention in intentions ]
1546+ [_file_to_entity_set (intention )
1547+ for intention in intentions ]
15181548 )
15191549
15201550 # check if config says columns or bool
@@ -1568,25 +1598,30 @@ def _get_param_groups(
15681598 {"Counts" : value_counts .to_numpy (), "ParamGroup" : value_counts .index .to_numpy ()}
15691599 )
15701600
1571- param_groups_with_counts = pd .merge (deduped , param_group_counts , on = ["ParamGroup" ])
1601+ param_groups_with_counts = pd .merge (
1602+ deduped , param_group_counts , on = ["ParamGroup" ])
15721603
15731604 # Sort by counts and relabel the param groups
1574- param_groups_with_counts .sort_values (by = ["Counts" ], inplace = True , ascending = False )
1575- param_groups_with_counts ["ParamGroup" ] = np .arange (param_groups_with_counts .shape [0 ]) + 1
1605+ param_groups_with_counts .sort_values (
1606+ by = ["Counts" ], inplace = True , ascending = False )
1607+ param_groups_with_counts ["ParamGroup" ] = np .arange (
1608+ param_groups_with_counts .shape [0 ]) + 1
15761609
15771610 # Send the new, ordered param group ids to the files list
15781611 ordered_labeled_files = pd .merge (
15791612 df , param_groups_with_counts , on = check_cols , suffixes = ("_x" , "" )
15801613 )
15811614
15821615 # sort ordered_labeled_files by param group
1583- ordered_labeled_files .sort_values (by = ["Counts" ], inplace = True , ascending = False )
1616+ ordered_labeled_files .sort_values (
1617+ by = ["Counts" ], inplace = True , ascending = False )
15841618
15851619 # now get rid of cluster cols from deduped and df
15861620 for col in list (ordered_labeled_files .columns ):
15871621 if col .startswith ("Cluster_" ):
15881622 ordered_labeled_files = ordered_labeled_files .drop (col , axis = 1 )
1589- param_groups_with_counts = param_groups_with_counts .drop (col , axis = 1 )
1623+ param_groups_with_counts = param_groups_with_counts .drop (
1624+ col , axis = 1 )
15901625 if col .endswith ("_x" ):
15911626 ordered_labeled_files = ordered_labeled_files .drop (col , axis = 1 )
15921627
0 commit comments