@@ -149,11 +149,9 @@ def reset_bids_layout(self, validate=False):
149149 re .compile (r"/\." ),
150150 ]
151151
152- indexer = bids .BIDSLayoutIndexer (
153- validate = validate , ignore = ignores , index_metadata = False )
152+ indexer = bids .BIDSLayoutIndexer (validate = validate , ignore = ignores , index_metadata = False )
154153
155- self ._layout = bids .BIDSLayout (
156- self .path , validate = validate , indexer = indexer )
154+ self ._layout = bids .BIDSLayout (self .path , validate = validate , indexer = indexer )
157155
158156 def create_cubids_code_dir (self ):
159157 """Create CuBIDS code directory.
@@ -203,8 +201,7 @@ def datalad_save(self, message=None):
203201 Commit message to use with datalad save.
204202 """
205203 if not self .datalad_ready :
206- raise Exception (
207- "DataLad has not been initialized. use datalad_init()" )
204+ raise Exception ("DataLad has not been initialized. use datalad_init()" )
208205
209206 statuses = self .datalad_handle .save (message = message or "CuBIDS Save" )
210207 saved_status = set ([status ["status" ] for status in statuses ])
@@ -226,8 +223,7 @@ def is_datalad_clean(self):
226223 """
227224 if not self .datalad_ready :
228225 raise Exception ("Datalad not initialized, can't determine status" )
229- statuses = set ([status ["state" ]
230- for status in self .datalad_handle .status ()])
226+ statuses = set ([status ["state" ] for status in self .datalad_handle .status ()])
231227 return statuses == set (["clean" ])
232228
233229 def datalad_undo_last_commit (self ):
@@ -241,10 +237,8 @@ def datalad_undo_last_commit(self):
241237 If there are untracked changes in the datalad dataset.
242238 """
243239 if not self .is_datalad_clean ():
244- raise Exception (
245- "Untracked changes present. Run clear_untracked_changes first" )
246- reset_proc = subprocess .run (
247- ["git" , "reset" , "--hard" , "HEAD~1" ], cwd = self .path )
240+ raise Exception ("Untracked changes present. Run clear_untracked_changes first" )
241+ reset_proc = subprocess .run (["git" , "reset" , "--hard" , "HEAD~1" ], cwd = self .path )
248242 reset_proc .check_returncode ()
249243
250244 def add_nifti_info (self ):
@@ -348,13 +342,11 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
348342 files_df = pd .read_table (files_tsv )
349343
350344 # Check that the MergeInto column only contains valid merges
351- ok_merges , deletions = check_merging_operations (
352- summary_tsv , raise_on_error = raise_on_error )
345+ ok_merges , deletions = check_merging_operations (summary_tsv , raise_on_error = raise_on_error )
353346
354347 merge_commands = []
355348 for source_id , dest_id in ok_merges :
356- dest_files = files_df .loc [(
357- files_df [["ParamGroup" , "EntitySet" ]] == dest_id ).all (1 )]
349+ dest_files = files_df .loc [(files_df [["ParamGroup" , "EntitySet" ]] == dest_id ).all (1 )]
358350 source_files = files_df .loc [
359351 (files_df [["ParamGroup" , "EntitySet" ]] == source_id ).all (1 )
360352 ]
@@ -365,15 +357,13 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
365357 for dest_nii in dest_files .FilePath :
366358 dest_json = img_to_new_ext (self .path + dest_nii , ".json" )
367359 if Path (dest_json ).exists () and Path (source_json ).exists ():
368- merge_commands .append (
369- f"bids-sidecar-merge { source_json } { dest_json } " )
360+ merge_commands .append (f"bids-sidecar-merge { source_json } { dest_json } " )
370361
371362 # Get the delete commands
372363 # delete_commands = []
373364 to_remove = []
374365 for rm_id in deletions :
375- files_to_rm = files_df .loc [(
376- files_df [["ParamGroup" , "EntitySet" ]] == rm_id ).all (1 )]
366+ files_to_rm = files_df .loc [(files_df [["ParamGroup" , "EntitySet" ]] == rm_id ).all (1 )]
377367
378368 for rm_me in files_to_rm .FilePath :
379369 if Path (self .path + rm_me ).exists ():
@@ -446,8 +436,7 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
446436
447437 rename_commit = s1 + s2
448438
449- self .datalad_handle .run (
450- cmd = ["bash" , renames ], message = rename_commit )
439+ self .datalad_handle .run (cmd = ["bash" , renames ], message = rename_commit )
451440 else :
452441 subprocess .run (
453442 ["bash" , renames ],
@@ -487,8 +476,7 @@ def change_filename(self, filepath, entities):
487476 entity_file_keys = []
488477
489478 # Entities that may be in the filename?
490- file_keys = ["task" , "acquisition" ,
491- "direction" , "reconstruction" , "run" ]
479+ file_keys = ["task" , "acquisition" , "direction" , "reconstruction" , "run" ]
492480
493481 for key in file_keys :
494482 if key in list (entities .keys ()):
@@ -502,8 +490,7 @@ def change_filename(self, filepath, entities):
502490 # XXX: This adds an extra leading zero to run.
503491 entities ["run" ] = "0" + str (entities ["run" ])
504492
505- filename = "_" .join (
506- [f"{ key } -{ entities [key ]} " for key in entity_file_keys ])
493+ filename = "_" .join ([f"{ key } -{ entities [key ]} " for key in entity_file_keys ])
507494 filename = (
508495 filename .replace ("acquisition" , "acq" )
509496 .replace ("direction" , "dir" )
@@ -512,8 +499,7 @@ def change_filename(self, filepath, entities):
512499 if len (filename ) > 0 :
513500 filename = sub_ses + "_" + filename + "_" + suffix + old_ext
514501 else :
515- raise ValueError (
516- f"Could not construct new filename for { filepath } " )
502+ raise ValueError (f"Could not construct new filename for { filepath } " )
517503
518504 # CHECK TO SEE IF DATATYPE CHANGED
519505 # datatype may be overridden/changed if the original file is located in the wrong folder.
@@ -531,8 +517,7 @@ def change_filename(self, filepath, entities):
531517 dtype_new = dtype_orig
532518
533519 # Construct the new filename
534- new_path = str (self .path ) + "/" + sub + "/" + \
535- ses + "/" + dtype_new + "/" + filename
520+ new_path = str (self .path ) + "/" + sub + "/" + ses + "/" + dtype_new + "/" + filename
536521
537522 # Add the scan path + new path to the lists of old, new filenames
538523 self .old_filenames .append (filepath )
@@ -551,8 +536,7 @@ def change_filename(self, filepath, entities):
551536 # ensure assoc not an IntendedFor reference
552537 if ".nii" not in str (assoc_path ):
553538 self .old_filenames .append (assoc_path )
554- new_ext_path = img_to_new_ext (
555- new_path , "" .join (Path (assoc_path ).suffixes ))
539+ new_ext_path = img_to_new_ext (new_path , "" .join (Path (assoc_path ).suffixes ))
556540 self .new_filenames .append (new_ext_path )
557541
558542 # MAKE SURE THESE AREN'T COVERED BY get_associations!!!
@@ -625,8 +609,7 @@ def change_filename(self, filepath, entities):
625609 if Path (old_labeling ).exists ():
626610 self .old_filenames .append (old_labeling )
627611 new_scan_end = "_" + suffix + old_ext
628- new_labeling = new_path .replace (
629- new_scan_end , "_asllabeling.jpg" )
612+ new_labeling = new_path .replace (new_scan_end , "_asllabeling.jpg" )
630613 self .new_filenames .append (new_labeling )
631614
632615 # RENAME INTENDED FORS!
@@ -652,8 +635,7 @@ def change_filename(self, filepath, entities):
652635 # remove old filename
653636 data ["IntendedFor" ].remove (item )
654637 # add new filename
655- data ["IntendedFor" ].append (
656- _get_intended_for_reference (new_path ))
638+ data ["IntendedFor" ].append (_get_intended_for_reference (new_path ))
657639
658640 # update the json with the new data dictionary
659641 _update_json (filename_with_if , data )
@@ -826,8 +808,7 @@ def _purge_associations(self, scans):
826808
827809 if "/func/" in str (path ):
828810 # add tsvs
829- tsv = img_to_new_ext (str (path ), ".tsv" ).replace (
830- "_bold" , "_events" )
811+ tsv = img_to_new_ext (str (path ), ".tsv" ).replace ("_bold" , "_events" )
831812 if Path (tsv ).exists ():
832813 to_remove .append (tsv )
833814 # add tsv json (if exists)
@@ -941,8 +922,7 @@ def get_param_groups_from_entity_set(self, entity_set):
941922 2. A data frame with param group summaries
942923 """
943924 if not self .fieldmaps_cached :
944- raise Exception (
945- "Fieldmaps must be cached to find parameter groups." )
925+ raise Exception ("Fieldmaps must be cached to find parameter groups." )
946926 key_entities = _entity_set_to_entities (entity_set )
947927 key_entities ["extension" ] = ".nii[.gz]*"
948928
@@ -995,8 +975,7 @@ def create_data_dictionary(self):
995975 mod_dict = sidecar_params [mod ]
996976 for s_param in mod_dict .keys ():
997977 if s_param not in self .data_dict .keys ():
998- self .data_dict [s_param ] = {
999- "Description" : "Scanning Parameter" }
978+ self .data_dict [s_param ] = {"Description" : "Scanning Parameter" }
1000979
1001980 relational_params = self .grouping_config .get ("relational_params" )
1002981 for r_param in relational_params .keys ():
@@ -1008,8 +987,7 @@ def create_data_dictionary(self):
1008987 mod_dict = derived_params [mod ]
1009988 for d_param in mod_dict .keys ():
1010989 if d_param not in self .data_dict .keys ():
1011- self .data_dict [d_param ] = {
1012- "Description" : "NIfTI Header Parameter" }
990+ self .data_dict [d_param ] = {"Description" : "NIfTI Header Parameter" }
1013991
1014992 # Manually add non-sidecar columns/descriptions to data_dict
1015993 desc1 = "Column where users mark groups to manually check"
@@ -1116,20 +1094,17 @@ def get_param_groups_dataframes(self):
11161094 long_name = big_df .loc [row , "FilePath" ]
11171095 big_df .loc [row , "FilePath" ] = long_name .replace (self .path , "" )
11181096
1119- summary = _order_columns (
1120- pd .concat (param_group_summaries , ignore_index = True ))
1097+ summary = _order_columns (pd .concat (param_group_summaries , ignore_index = True ))
11211098
11221099 # create new col that strings key and param group together
1123- summary ["KeyParamGroup" ] = summary ["EntitySet" ] + \
1124- "__" + summary ["ParamGroup" ].map (str )
1100+ summary ["KeyParamGroup" ] = summary ["EntitySet" ] + "__" + summary ["ParamGroup" ].map (str )
11251101
11261102 # move this column to the front of the dataframe
11271103 key_param_col = summary .pop ("KeyParamGroup" )
11281104 summary .insert (0 , "KeyParamGroup" , key_param_col )
11291105
11301106 # do the same for the files df
1131- big_df ["KeyParamGroup" ] = big_df ["EntitySet" ] + \
1132- "__" + big_df ["ParamGroup" ].map (str )
1107+ big_df ["KeyParamGroup" ] = big_df ["EntitySet" ] + "__" + big_df ["ParamGroup" ].map (str )
11331108
11341109 # move this column to the front of the dataframe
11351110 key_param_col = big_df .pop ("KeyParamGroup" )
@@ -1278,10 +1253,8 @@ def get_tsvs(self, path_prefix):
12781253
12791254 big_df , summary = self .get_param_groups_dataframes ()
12801255
1281- summary = summary .sort_values (
1282- by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
1283- big_df = big_df .sort_values (
1284- by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
1256+ summary = summary .sort_values (by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
1257+ big_df = big_df .sort_values (by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
12851258
12861259 # Create json dictionaries for summary and files tsvs
12871260 self .create_data_dictionary ()
@@ -1300,8 +1273,7 @@ def get_tsvs(self, path_prefix):
13001273 summary .to_csv (f"{ path_prefix } _summary.tsv" , sep = "\t " , index = False )
13011274
13021275 # Calculate the acq groups
1303- group_by_acquisition_sets (
1304- f"{ path_prefix } _files.tsv" , path_prefix , self .acq_group_level )
1276+ group_by_acquisition_sets (f"{ path_prefix } _files.tsv" , path_prefix , self .acq_group_level )
13051277
13061278 print (f"CuBIDS detected { len (summary )} Parameter Groups." )
13071279
@@ -1520,8 +1492,7 @@ def _get_param_groups(
15201492 # Get the fieldmaps out and add their types
15211493 if "FieldmapKey" in relational_params :
15221494 fieldmap_types = sorted (
1523- [_file_to_entity_set (fmap .path )
1524- for fmap in fieldmap_lookup [path ]]
1495+ [_file_to_entity_set (fmap .path ) for fmap in fieldmap_lookup [path ]]
15251496 )
15261497
15271498 # check if config says columns or bool
@@ -1543,8 +1514,7 @@ def _get_param_groups(
15431514 # If it's a fieldmap, see what entity set it's intended to correct
15441515 if "IntendedForKey" in relational_params :
15451516 intended_entity_sets = sorted (
1546- [_file_to_entity_set (intention )
1547- for intention in intentions ]
1517+ [_file_to_entity_set (intention ) for intention in intentions ]
15481518 )
15491519
15501520 # check if config says columns or bool
@@ -1598,30 +1568,25 @@ def _get_param_groups(
15981568 {"Counts" : value_counts .to_numpy (), "ParamGroup" : value_counts .index .to_numpy ()}
15991569 )
16001570
1601- param_groups_with_counts = pd .merge (
1602- deduped , param_group_counts , on = ["ParamGroup" ])
1571+ param_groups_with_counts = pd .merge (deduped , param_group_counts , on = ["ParamGroup" ])
16031572
16041573 # Sort by counts and relabel the param groups
1605- param_groups_with_counts .sort_values (
1606- by = ["Counts" ], inplace = True , ascending = False )
1607- param_groups_with_counts ["ParamGroup" ] = np .arange (
1608- param_groups_with_counts .shape [0 ]) + 1
1574+ param_groups_with_counts .sort_values (by = ["Counts" ], inplace = True , ascending = False )
1575+ param_groups_with_counts ["ParamGroup" ] = np .arange (param_groups_with_counts .shape [0 ]) + 1
16091576
16101577 # Send the new, ordered param group ids to the files list
16111578 ordered_labeled_files = pd .merge (
16121579 df , param_groups_with_counts , on = check_cols , suffixes = ("_x" , "" )
16131580 )
16141581
16151582 # sort ordered_labeled_files by param group
1616- ordered_labeled_files .sort_values (
1617- by = ["Counts" ], inplace = True , ascending = False )
1583+ ordered_labeled_files .sort_values (by = ["Counts" ], inplace = True , ascending = False )
16181584
16191585 # now get rid of cluster cols from deduped and df
16201586 for col in list (ordered_labeled_files .columns ):
16211587 if col .startswith ("Cluster_" ):
16221588 ordered_labeled_files = ordered_labeled_files .drop (col , axis = 1 )
1623- param_groups_with_counts = param_groups_with_counts .drop (
1624- col , axis = 1 )
1589+ param_groups_with_counts = param_groups_with_counts .drop (col , axis = 1 )
16251590 if col .endswith ("_x" ):
16261591 ordered_labeled_files = ordered_labeled_files .drop (col , axis = 1 )
16271592
0 commit comments