@@ -123,7 +123,9 @@ def __init__(
123123 self .data_dict = {} # data dictionary for TSV outputs
124124 self .use_datalad = use_datalad # True if flag set, False if flag unset
125125 self .schema = load_schema (schema_json )
126- self .is_longitudinal = self ._infer_longitudinal () # inferred from dataset structure
126+ self .is_longitudinal = (
127+ self ._infer_longitudinal ()
128+ ) # inferred from dataset structure
127129
128130 if self .use_datalad :
129131 self .init_datalad ()
@@ -186,7 +188,9 @@ def reset_bids_layout(self, validate=False):
186188 re .compile (r"/\." ),
187189 ]
188190
189- indexer = bids .BIDSLayoutIndexer (validate = validate , ignore = ignores , index_metadata = False )
191+ indexer = bids .BIDSLayoutIndexer (
192+ validate = validate , ignore = ignores , index_metadata = False
193+ )
190194
191195 self ._layout = bids .BIDSLayout (self .path , validate = validate , indexer = indexer )
192196
@@ -293,7 +297,9 @@ def datalad_undo_last_commit(self):
293297 If there are untracked changes in the datalad dataset.
294298 """
295299 if not self .is_datalad_clean ():
296- raise Exception ("Untracked changes present. Run clear_untracked_changes first" )
300+ raise Exception (
301+ "Untracked changes present. Run clear_untracked_changes first"
302+ )
297303 reset_proc = subprocess .run (["git" , "reset" , "--hard" , "HEAD~1" ], cwd = self .path )
298304 reset_proc .check_returncode ()
299305
@@ -417,7 +423,9 @@ def add_file_collections(self):
417423 continue
418424
419425 # Add file collection metadata to the sidecar
420- files , collection_metadata = utils .collect_file_collections (self .layout , path )
426+ files , collection_metadata = utils .collect_file_collections (
427+ self .layout , path
428+ )
421429 filepaths = [f .path for f in files ]
422430 checked_files .extend (filepaths )
423431
@@ -439,7 +447,9 @@ def add_file_collections(self):
439447
440448 self .reset_bids_layout ()
441449
442- def apply_tsv_changes (self , summary_tsv , files_tsv , new_prefix , raise_on_error = True ):
450+ def apply_tsv_changes (
451+ self , summary_tsv , files_tsv , new_prefix , raise_on_error = True
452+ ):
443453 """Apply changes documented in the edited summary tsv and generate the new tsv files.
444454
445455 This function looks at the RenameEntitySet and MergeInto
@@ -475,11 +485,15 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
475485 files_df = pd .read_table (files_tsv )
476486
477487 # Check that the MergeInto column only contains valid merges
478- ok_merges , deletions = check_merging_operations (summary_tsv , raise_on_error = raise_on_error )
488+ ok_merges , deletions = check_merging_operations (
489+ summary_tsv , raise_on_error = raise_on_error
490+ )
479491
480492 merge_commands = []
481493 for source_id , dest_id in ok_merges :
482- dest_files = files_df .loc [(files_df [["ParamGroup" , "EntitySet" ]] == dest_id ).all (1 )]
494+ dest_files = files_df .loc [
495+ (files_df [["ParamGroup" , "EntitySet" ]] == dest_id ).all (1 )
496+ ]
483497 source_files = files_df .loc [
484498 (files_df [["ParamGroup" , "EntitySet" ]] == source_id ).all (1 )
485499 ]
@@ -490,12 +504,16 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
490504 for dest_nii in dest_files .FilePath :
491505 dest_json = utils .img_to_new_ext (self .path + dest_nii , ".json" )
492506 if Path (dest_json ).exists () and Path (source_json ).exists ():
493- merge_commands .append (f"cubids bids-sidecar-merge { source_json } { dest_json } " )
507+ merge_commands .append (
508+ f"cubids bids-sidecar-merge { source_json } { dest_json } "
509+ )
494510
495511 # Get the delete commands
496512 to_remove = []
497513 for rm_id in deletions :
498- files_to_rm = files_df .loc [(files_df [["ParamGroup" , "EntitySet" ]] == rm_id ).all (1 )]
514+ files_to_rm = files_df .loc [
515+ (files_df [["ParamGroup" , "EntitySet" ]] == rm_id ).all (1 )
516+ ]
499517
500518 for rm_me in files_to_rm .FilePath :
501519 if Path (self .path + rm_me ).exists ():
@@ -692,19 +710,10 @@ def change_filename(self, filepath, entities):
692710 new_context = new_path .replace (new_scan_end , "_aslcontext.tsv" )
693711 self .new_filenames .append (new_context )
694712
695- old_m0scan = filepath .replace (scan_end , "_m0scan.nii.gz" )
696- if Path (old_m0scan ).exists ():
697- self .old_filenames .append (old_m0scan )
698- new_scan_end = "_" + suffix + old_ext
699- new_m0scan = new_path .replace (new_scan_end , "_m0scan.nii.gz" )
700- self .new_filenames .append (new_m0scan )
701-
702- old_mjson = filepath .replace (scan_end , "_m0scan.json" )
703- if Path (old_mjson ).exists ():
704- self .old_filenames .append (old_mjson )
705- new_scan_end = "_" + suffix + old_ext
706- new_mjson = new_path .replace (new_scan_end , "_m0scan.json" )
707- self .new_filenames .append (new_mjson )
713+ # Do NOT rename M0 scans or their JSON sidecars. M0 files should
714+ # retain their original filenames to preserve independent variability.
715+ # The IntendedFor field in M0 JSONs will be updated below to point
716+ # to the newly renamed ASL files.
708717
709718 old_labeling = filepath .replace (scan_end , "_asllabeling.jpg" )
710719 if Path (old_labeling ).exists ():
@@ -739,13 +748,17 @@ def change_filename(self, filepath, entities):
739748 # remove old filename
740749 data ["IntendedFor" ].remove (item )
741750 # add new filename
742- data ["IntendedFor" ].append (utils ._get_participant_relative_path (new_path ))
751+ data ["IntendedFor" ].append (
752+ utils ._get_participant_relative_path (new_path )
753+ )
743754
744755 if item == utils ._get_bidsuri (filepath , self .path ):
745756 # remove old filename
746757 data ["IntendedFor" ].remove (item )
747758 # add new filename
748- data ["IntendedFor" ].append (utils ._get_bidsuri (new_path , self .path ))
759+ data ["IntendedFor" ].append (
760+ utils ._get_bidsuri (new_path , self .path )
761+ )
749762
750763 # update the json with the new data dictionary
751764 utils ._update_json (filename_with_if , data )
@@ -922,7 +935,9 @@ def _purge_associations(self, scans):
922935
923936 if "/func/" in str (path ):
924937 # add tsvs
925- tsv = utils .img_to_new_ext (str (path ), ".tsv" ).replace ("_bold" , "_events" )
938+ tsv = utils .img_to_new_ext (str (path ), ".tsv" ).replace (
939+ "_bold" , "_events"
940+ )
926941 if Path (tsv ).exists ():
927942 to_remove .append (tsv )
928943 # add tsv json (if exists)
@@ -1277,17 +1292,23 @@ def get_param_groups_dataframes(self):
12771292 long_name = big_df .loc [row , "FilePath" ]
12781293 big_df .loc [row , "FilePath" ] = long_name .replace (self .path , "" )
12791294
1280- summary = utils ._order_columns (pd .concat (param_group_summaries , ignore_index = True ))
1295+ summary = utils ._order_columns (
1296+ pd .concat (param_group_summaries , ignore_index = True )
1297+ )
12811298
12821299 # create new col that strings key and param group together
1283- summary ["KeyParamGroup" ] = summary ["EntitySet" ] + "__" + summary ["ParamGroup" ].map (str )
1300+ summary ["KeyParamGroup" ] = (
1301+ summary ["EntitySet" ] + "__" + summary ["ParamGroup" ].map (str )
1302+ )
12841303
12851304 # move this column to the front of the dataframe
12861305 key_param_col = summary .pop ("KeyParamGroup" )
12871306 summary .insert (0 , "KeyParamGroup" , key_param_col )
12881307
12891308 # do the same for the files df
1290- big_df ["KeyParamGroup" ] = big_df ["EntitySet" ] + "__" + big_df ["ParamGroup" ].map (str )
1309+ big_df ["KeyParamGroup" ] = (
1310+ big_df ["EntitySet" ] + "__" + big_df ["ParamGroup" ].map (str )
1311+ )
12911312
12921313 # move this column to the front of the dataframe
12931314 key_param_col = big_df .pop ("KeyParamGroup" )
@@ -1362,8 +1383,12 @@ def get_tsvs(self, path_prefix):
13621383
13631384 big_df , summary = self .get_param_groups_dataframes ()
13641385
1365- summary = summary .sort_values (by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
1366- big_df = big_df .sort_values (by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
1386+ summary = summary .sort_values (
1387+ by = ["Modality" , "EntitySetCount" ], ascending = [True , False ]
1388+ )
1389+ big_df = big_df .sort_values (
1390+ by = ["Modality" , "EntitySetCount" ], ascending = [True , False ]
1391+ )
13671392
13681393 # Create json dictionaries for summary and files tsvs
13691394 self .create_data_dictionary ()
0 commit comments