Skip to content

Commit 55673d1

Browse files
committed
cursor
1 parent 0be1b9a commit 55673d1

File tree

2 files changed

+137
-30
lines changed

2 files changed

+137
-30
lines changed

cubids/cubids.py

Lines changed: 55 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,9 @@ def __init__(
123123
self.data_dict = {} # data dictionary for TSV outputs
124124
self.use_datalad = use_datalad # True if flag set, False if flag unset
125125
self.schema = load_schema(schema_json)
126-
self.is_longitudinal = self._infer_longitudinal() # inferred from dataset structure
126+
self.is_longitudinal = (
127+
self._infer_longitudinal()
128+
) # inferred from dataset structure
127129

128130
if self.use_datalad:
129131
self.init_datalad()
@@ -186,7 +188,9 @@ def reset_bids_layout(self, validate=False):
186188
re.compile(r"/\."),
187189
]
188190

189-
indexer = bids.BIDSLayoutIndexer(validate=validate, ignore=ignores, index_metadata=False)
191+
indexer = bids.BIDSLayoutIndexer(
192+
validate=validate, ignore=ignores, index_metadata=False
193+
)
190194

191195
self._layout = bids.BIDSLayout(self.path, validate=validate, indexer=indexer)
192196

@@ -293,7 +297,9 @@ def datalad_undo_last_commit(self):
293297
If there are untracked changes in the datalad dataset.
294298
"""
295299
if not self.is_datalad_clean():
296-
raise Exception("Untracked changes present. Run clear_untracked_changes first")
300+
raise Exception(
301+
"Untracked changes present. Run clear_untracked_changes first"
302+
)
297303
reset_proc = subprocess.run(["git", "reset", "--hard", "HEAD~1"], cwd=self.path)
298304
reset_proc.check_returncode()
299305

@@ -417,7 +423,9 @@ def add_file_collections(self):
417423
continue
418424

419425
# Add file collection metadata to the sidecar
420-
files, collection_metadata = utils.collect_file_collections(self.layout, path)
426+
files, collection_metadata = utils.collect_file_collections(
427+
self.layout, path
428+
)
421429
filepaths = [f.path for f in files]
422430
checked_files.extend(filepaths)
423431

@@ -439,7 +447,9 @@ def add_file_collections(self):
439447

440448
self.reset_bids_layout()
441449

442-
def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=True):
450+
def apply_tsv_changes(
451+
self, summary_tsv, files_tsv, new_prefix, raise_on_error=True
452+
):
443453
"""Apply changes documented in the edited summary tsv and generate the new tsv files.
444454
445455
This function looks at the RenameEntitySet and MergeInto
@@ -475,11 +485,15 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
475485
files_df = pd.read_table(files_tsv)
476486

477487
# Check that the MergeInto column only contains valid merges
478-
ok_merges, deletions = check_merging_operations(summary_tsv, raise_on_error=raise_on_error)
488+
ok_merges, deletions = check_merging_operations(
489+
summary_tsv, raise_on_error=raise_on_error
490+
)
479491

480492
merge_commands = []
481493
for source_id, dest_id in ok_merges:
482-
dest_files = files_df.loc[(files_df[["ParamGroup", "EntitySet"]] == dest_id).all(1)]
494+
dest_files = files_df.loc[
495+
(files_df[["ParamGroup", "EntitySet"]] == dest_id).all(1)
496+
]
483497
source_files = files_df.loc[
484498
(files_df[["ParamGroup", "EntitySet"]] == source_id).all(1)
485499
]
@@ -490,12 +504,16 @@ def apply_tsv_changes(self, summary_tsv, files_tsv, new_prefix, raise_on_error=T
490504
for dest_nii in dest_files.FilePath:
491505
dest_json = utils.img_to_new_ext(self.path + dest_nii, ".json")
492506
if Path(dest_json).exists() and Path(source_json).exists():
493-
merge_commands.append(f"cubids bids-sidecar-merge {source_json} {dest_json}")
507+
merge_commands.append(
508+
f"cubids bids-sidecar-merge {source_json} {dest_json}"
509+
)
494510

495511
# Get the delete commands
496512
to_remove = []
497513
for rm_id in deletions:
498-
files_to_rm = files_df.loc[(files_df[["ParamGroup", "EntitySet"]] == rm_id).all(1)]
514+
files_to_rm = files_df.loc[
515+
(files_df[["ParamGroup", "EntitySet"]] == rm_id).all(1)
516+
]
499517

500518
for rm_me in files_to_rm.FilePath:
501519
if Path(self.path + rm_me).exists():
@@ -692,19 +710,10 @@ def change_filename(self, filepath, entities):
692710
new_context = new_path.replace(new_scan_end, "_aslcontext.tsv")
693711
self.new_filenames.append(new_context)
694712

695-
old_m0scan = filepath.replace(scan_end, "_m0scan.nii.gz")
696-
if Path(old_m0scan).exists():
697-
self.old_filenames.append(old_m0scan)
698-
new_scan_end = "_" + suffix + old_ext
699-
new_m0scan = new_path.replace(new_scan_end, "_m0scan.nii.gz")
700-
self.new_filenames.append(new_m0scan)
701-
702-
old_mjson = filepath.replace(scan_end, "_m0scan.json")
703-
if Path(old_mjson).exists():
704-
self.old_filenames.append(old_mjson)
705-
new_scan_end = "_" + suffix + old_ext
706-
new_mjson = new_path.replace(new_scan_end, "_m0scan.json")
707-
self.new_filenames.append(new_mjson)
713+
# Do NOT rename M0 scans or their JSON sidecars. M0 files should
714+
# retain their original filenames to preserve independent variability.
715+
# The IntendedFor field in M0 JSONs will be updated below to point
716+
# to the newly renamed ASL files.
708717

709718
old_labeling = filepath.replace(scan_end, "_asllabeling.jpg")
710719
if Path(old_labeling).exists():
@@ -739,13 +748,17 @@ def change_filename(self, filepath, entities):
739748
# remove old filename
740749
data["IntendedFor"].remove(item)
741750
# add new filename
742-
data["IntendedFor"].append(utils._get_participant_relative_path(new_path))
751+
data["IntendedFor"].append(
752+
utils._get_participant_relative_path(new_path)
753+
)
743754

744755
if item == utils._get_bidsuri(filepath, self.path):
745756
# remove old filename
746757
data["IntendedFor"].remove(item)
747758
# add new filename
748-
data["IntendedFor"].append(utils._get_bidsuri(new_path, self.path))
759+
data["IntendedFor"].append(
760+
utils._get_bidsuri(new_path, self.path)
761+
)
749762

750763
# update the json with the new data dictionary
751764
utils._update_json(filename_with_if, data)
@@ -922,7 +935,9 @@ def _purge_associations(self, scans):
922935

923936
if "/func/" in str(path):
924937
# add tsvs
925-
tsv = utils.img_to_new_ext(str(path), ".tsv").replace("_bold", "_events")
938+
tsv = utils.img_to_new_ext(str(path), ".tsv").replace(
939+
"_bold", "_events"
940+
)
926941
if Path(tsv).exists():
927942
to_remove.append(tsv)
928943
# add tsv json (if exists)
@@ -1277,17 +1292,23 @@ def get_param_groups_dataframes(self):
12771292
long_name = big_df.loc[row, "FilePath"]
12781293
big_df.loc[row, "FilePath"] = long_name.replace(self.path, "")
12791294

1280-
summary = utils._order_columns(pd.concat(param_group_summaries, ignore_index=True))
1295+
summary = utils._order_columns(
1296+
pd.concat(param_group_summaries, ignore_index=True)
1297+
)
12811298

12821299
# create new col that strings key and param group together
1283-
summary["KeyParamGroup"] = summary["EntitySet"] + "__" + summary["ParamGroup"].map(str)
1300+
summary["KeyParamGroup"] = (
1301+
summary["EntitySet"] + "__" + summary["ParamGroup"].map(str)
1302+
)
12841303

12851304
# move this column to the front of the dataframe
12861305
key_param_col = summary.pop("KeyParamGroup")
12871306
summary.insert(0, "KeyParamGroup", key_param_col)
12881307

12891308
# do the same for the files df
1290-
big_df["KeyParamGroup"] = big_df["EntitySet"] + "__" + big_df["ParamGroup"].map(str)
1309+
big_df["KeyParamGroup"] = (
1310+
big_df["EntitySet"] + "__" + big_df["ParamGroup"].map(str)
1311+
)
12911312

12921313
# move this column to the front of the dataframe
12931314
key_param_col = big_df.pop("KeyParamGroup")
@@ -1362,8 +1383,12 @@ def get_tsvs(self, path_prefix):
13621383

13631384
big_df, summary = self.get_param_groups_dataframes()
13641385

1365-
summary = summary.sort_values(by=["Modality", "EntitySetCount"], ascending=[True, False])
1366-
big_df = big_df.sort_values(by=["Modality", "EntitySetCount"], ascending=[True, False])
1386+
summary = summary.sort_values(
1387+
by=["Modality", "EntitySetCount"], ascending=[True, False]
1388+
)
1389+
big_df = big_df.sort_values(
1390+
by=["Modality", "EntitySetCount"], ascending=[True, False]
1391+
)
13671392

13681393
# Create json dictionaries for summary and files tsvs
13691394
self.create_data_dictionary()

cubids/tests/test_perf_m0.py

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
"""Tests for ASL/M0 renaming behavior.
2+
3+
Ensures that when ASL scans are renamed with variant acquisition labels:
4+
- aslcontext files are renamed to match the ASL scan
5+
- M0 files (nii/json) are NOT renamed
6+
- M0 JSON IntendedFor entries are updated to point to the new ASL path
7+
"""
8+
9+
import json
10+
from pathlib import Path
11+
12+
from cubids.cubids import CuBIDS
13+
14+
15+
def _write(path: Path, content: str = ""):
16+
path.parent.mkdir(parents=True, exist_ok=True)
17+
path.write_text(content)
18+
19+
20+
def test_m0_not_renamed_but_aslcontext_is_and_intendedfor_updated(tmp_path):
21+
bids_root = tmp_path / "bids"
22+
sub = "sub-01"
23+
ses = "ses-01"
24+
25+
# Create minimal perf files
26+
perf_dir = bids_root / sub / ses / "perf"
27+
perf_dir.mkdir(parents=True, exist_ok=True)
28+
29+
asl_base = perf_dir / f"{sub}_{ses}_asl.nii.gz"
30+
asl_json = perf_dir / f"{sub}_{ses}_asl.json"
31+
m0_base = perf_dir / f"{sub}_{ses}_m0scan.nii.gz"
32+
m0_json = perf_dir / f"{sub}_{ses}_m0scan.json"
33+
aslcontext = perf_dir / f"{sub}_{ses}_aslcontext.tsv"
34+
35+
# Touch NIfTIs (empty is fine for this test) and sidecars
36+
asl_base.write_bytes(b"")
37+
m0_base.write_bytes(b"")
38+
39+
_write(asl_json, json.dumps({}))
40+
41+
# M0 IntendedFor should reference the ASL time series (participant-relative path)
42+
intended_for_rel = f"{ses}/perf/{sub}_{ses}_asl.nii.gz"
43+
_write(m0_json, json.dumps({"IntendedFor": [intended_for_rel]}))
44+
45+
_write(aslcontext, "label\ncontrol\nlabel\ncontrol\n")
46+
47+
c = CuBIDS(str(bids_root))
48+
49+
# Rename the ASL scan by adding a variant acquisition
50+
entities = {"suffix": "asl", "acquisition": "VARIANTTest"}
51+
c.change_filename(str(asl_base), entities)
52+
53+
# Old/new filenames prepared for ASL and aslcontext, but NOT for M0
54+
assert str(asl_base) in c.old_filenames
55+
assert any(fn.endswith("_asl.json") for fn in c.old_filenames)
56+
assert any(fn.endswith("_aslcontext.tsv") for fn in c.old_filenames)
57+
58+
assert not any(fn.endswith("_m0scan.nii.gz") for fn in c.old_filenames)
59+
assert not any(fn.endswith("_m0scan.json") for fn in c.old_filenames)
60+
61+
# Compute expected new ASL path and aslcontext path
62+
expected_new_asl = perf_dir / f"{sub}_{ses}_acq-VARIANTTest_asl.nii.gz"
63+
expected_new_aslcontext = perf_dir / f"{sub}_{ses}_acq-VARIANTTest_aslcontext.tsv"
64+
65+
assert str(expected_new_asl) in c.new_filenames
66+
assert str(expected_new_aslcontext) in c.new_filenames
67+
68+
# M0 files remain with original names
69+
assert m0_base.exists()
70+
assert m0_json.exists()
71+
72+
# But M0 IntendedFor should now point to the new ASL relative path
73+
with open(m0_json, "r") as f:
74+
m0_meta = json.load(f)
75+
76+
new_rel = f"{ses}/perf/{sub}_{ses}_acq-VARIANTTest_asl.nii.gz"
77+
assert "IntendedFor" in m0_meta
78+
assert new_rel in m0_meta["IntendedFor"]
79+
# Ensure old reference removed
80+
assert intended_for_rel not in m0_meta["IntendedFor"]
81+
82+

0 commit comments

Comments
 (0)