@@ -123,9 +123,7 @@ def __init__(
123123 self .data_dict = {} # data dictionary for TSV outputs
124124 self .use_datalad = use_datalad # True if flag set, False if flag unset
125125 self .schema = load_schema (schema_json )
126- self .is_longitudinal = (
127- self ._infer_longitudinal ()
128- ) # inferred from dataset structure
126+ self .is_longitudinal = self ._infer_longitudinal () # inferred from dataset structure
129127
130128 if self .use_datalad :
131129 self .init_datalad ()
@@ -188,9 +186,7 @@ def reset_bids_layout(self, validate=False):
188186 re .compile (r"/\." ),
189187 ]
190188
191- indexer = bids .BIDSLayoutIndexer (
192- validate = validate , ignore = ignores , index_metadata = False
193- )
189+ indexer = bids .BIDSLayoutIndexer (validate = validate , ignore = ignores , index_metadata = False )
194190
195191 self ._layout = bids .BIDSLayout (self .path , validate = validate , indexer = indexer )
196192
@@ -297,9 +293,7 @@ def datalad_undo_last_commit(self):
297293 If there are untracked changes in the datalad dataset.
298294 """
299295 if not self .is_datalad_clean ():
300- raise Exception (
301- "Untracked changes present. Run clear_untracked_changes first"
302- )
296+ raise Exception ("Untracked changes present. Run clear_untracked_changes first" )
303297 reset_proc = subprocess .run (["git" , "reset" , "--hard" , "HEAD~1" ], cwd = self .path )
304298 reset_proc .check_returncode ()
305299
@@ -423,9 +417,7 @@ def add_file_collections(self):
423417 continue
424418
425419 # Add file collection metadata to the sidecar
426- files , collection_metadata = utils .collect_file_collections (
427- self .layout , path
428- )
420+ files , collection_metadata = utils .collect_file_collections (self .layout , path )
429421 filepaths = [f .path for f in files ]
430422 checked_files .extend (filepaths )
431423
@@ -447,9 +439,7 @@ def add_file_collections(self):
447439
448440 self .reset_bids_layout ()
449441
450- def apply_tsv_changes (
451- self , summary_tsv , files_tsv , new_prefix , raise_on_error = True
452- ):
442+ def apply_tsv_changes (self , summary_tsv , files_tsv , new_prefix , raise_on_error = True ):
453443 """Apply changes documented in the edited summary tsv and generate the new tsv files.
454444
455445 This function looks at the RenameEntitySet and MergeInto
@@ -485,15 +475,11 @@ def apply_tsv_changes(
485475 files_df = pd .read_table (files_tsv )
486476
487477 # Check that the MergeInto column only contains valid merges
488- ok_merges , deletions = check_merging_operations (
489- summary_tsv , raise_on_error = raise_on_error
490- )
478+ ok_merges , deletions = check_merging_operations (summary_tsv , raise_on_error = raise_on_error )
491479
492480 merge_commands = []
493481 for source_id , dest_id in ok_merges :
494- dest_files = files_df .loc [
495- (files_df [["ParamGroup" , "EntitySet" ]] == dest_id ).all (1 )
496- ]
482+ dest_files = files_df .loc [(files_df [["ParamGroup" , "EntitySet" ]] == dest_id ).all (1 )]
497483 source_files = files_df .loc [
498484 (files_df [["ParamGroup" , "EntitySet" ]] == source_id ).all (1 )
499485 ]
@@ -504,16 +490,12 @@ def apply_tsv_changes(
504490 for dest_nii in dest_files .FilePath :
505491 dest_json = utils .img_to_new_ext (self .path + dest_nii , ".json" )
506492 if Path (dest_json ).exists () and Path (source_json ).exists ():
507- merge_commands .append (
508- f"cubids bids-sidecar-merge { source_json } { dest_json } "
509- )
493+ merge_commands .append (f"cubids bids-sidecar-merge { source_json } { dest_json } " )
510494
511495 # Get the delete commands
512496 to_remove = []
513497 for rm_id in deletions :
514- files_to_rm = files_df .loc [
515- (files_df [["ParamGroup" , "EntitySet" ]] == rm_id ).all (1 )
516- ]
498+ files_to_rm = files_df .loc [(files_df [["ParamGroup" , "EntitySet" ]] == rm_id ).all (1 )]
517499
518500 for rm_me in files_to_rm .FilePath :
519501 if Path (self .path + rm_me ).exists ():
@@ -748,17 +730,13 @@ def change_filename(self, filepath, entities):
748730 # remove old filename
749731 data ["IntendedFor" ].remove (item )
750732 # add new filename
751- data ["IntendedFor" ].append (
752- utils ._get_participant_relative_path (new_path )
753- )
733+ data ["IntendedFor" ].append (utils ._get_participant_relative_path (new_path ))
754734
755735 if item == utils ._get_bidsuri (filepath , self .path ):
756736 # remove old filename
757737 data ["IntendedFor" ].remove (item )
758738 # add new filename
759- data ["IntendedFor" ].append (
760- utils ._get_bidsuri (new_path , self .path )
761- )
739+ data ["IntendedFor" ].append (utils ._get_bidsuri (new_path , self .path ))
762740
763741 # update the json with the new data dictionary
764742 utils ._update_json (filename_with_if , data )
@@ -935,9 +913,7 @@ def _purge_associations(self, scans):
935913
936914 if "/func/" in str (path ):
937915 # add tsvs
938- tsv = utils .img_to_new_ext (str (path ), ".tsv" ).replace (
939- "_bold" , "_events"
940- )
916+ tsv = utils .img_to_new_ext (str (path ), ".tsv" ).replace ("_bold" , "_events" )
941917 if Path (tsv ).exists ():
942918 to_remove .append (tsv )
943919 # add tsv json (if exists)
@@ -1292,23 +1268,17 @@ def get_param_groups_dataframes(self):
12921268 long_name = big_df .loc [row , "FilePath" ]
12931269 big_df .loc [row , "FilePath" ] = long_name .replace (self .path , "" )
12941270
1295- summary = utils ._order_columns (
1296- pd .concat (param_group_summaries , ignore_index = True )
1297- )
1271+ summary = utils ._order_columns (pd .concat (param_group_summaries , ignore_index = True ))
12981272
12991273 # create new col that strings key and param group together
1300- summary ["KeyParamGroup" ] = (
1301- summary ["EntitySet" ] + "__" + summary ["ParamGroup" ].map (str )
1302- )
1274+ summary ["KeyParamGroup" ] = summary ["EntitySet" ] + "__" + summary ["ParamGroup" ].map (str )
13031275
13041276 # move this column to the front of the dataframe
13051277 key_param_col = summary .pop ("KeyParamGroup" )
13061278 summary .insert (0 , "KeyParamGroup" , key_param_col )
13071279
13081280 # do the same for the files df
1309- big_df ["KeyParamGroup" ] = (
1310- big_df ["EntitySet" ] + "__" + big_df ["ParamGroup" ].map (str )
1311- )
1281+ big_df ["KeyParamGroup" ] = big_df ["EntitySet" ] + "__" + big_df ["ParamGroup" ].map (str )
13121282
13131283 # move this column to the front of the dataframe
13141284 key_param_col = big_df .pop ("KeyParamGroup" )
@@ -1383,12 +1353,8 @@ def get_tsvs(self, path_prefix):
13831353
13841354 big_df , summary = self .get_param_groups_dataframes ()
13851355
1386- summary = summary .sort_values (
1387- by = ["Modality" , "EntitySetCount" ], ascending = [True , False ]
1388- )
1389- big_df = big_df .sort_values (
1390- by = ["Modality" , "EntitySetCount" ], ascending = [True , False ]
1391- )
1356+ summary = summary .sort_values (by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
1357+ big_df = big_df .sort_values (by = ["Modality" , "EntitySetCount" ], ascending = [True , False ])
13921358
13931359 # Create json dictionaries for summary and files tsvs
13941360 self .create_data_dictionary ()
0 commit comments