Skip to content

Commit 125c754

Browse files
committed
add na_rep="n/a" to all to_csv
1 parent 0fc645c commit 125c754

File tree

6 files changed

+18
-15
lines changed

6 files changed

+18
-15
lines changed

cubids/cubids.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1449,9 +1449,9 @@ def get_tsvs(self, path_prefix):
14491449
with open(summary_json, "w") as outfile:
14501450
json.dump(summary_dict, outfile, indent=4)
14511451

1452-
big_df.to_csv(files_tsv, sep="\t", index=False)
1452+
big_df.to_csv(files_tsv, sep="\t", index=False, na_rep="n/a")
14531453

1454-
summary.to_csv(summary_tsv, sep="\t", index=False)
1454+
summary.to_csv(summary_tsv, sep="\t", index=False, na_rep="n/a")
14551455

14561456
# Calculate the acq groups
14571457
group_by_acquisition_sets(files_tsv, path_prefix, self.acq_group_level)

cubids/metadata_merge.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -381,7 +381,7 @@ def group_by_acquisition_sets(files_tsv, output_prefix, acq_group_level, is_long
381381

382382
# Write the mapping of subject/session to
383383
acq_group_df = pd.DataFrame(grouped_sub_sess)
384-
acq_group_df.to_csv(output_prefix + "_AcqGrouping.tsv", sep="\t", index=False)
384+
acq_group_df.to_csv(output_prefix + "_AcqGrouping.tsv", sep="\t", index=False, na_rep="n/a")
385385

386386
# Create data dictionary for acq group tsv
387387
acq_dict = get_acq_dictionary(is_longitudinal)

cubids/tests/test_apply.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -323,12 +323,12 @@ def test_cubids_apply_intendedfor(
323323
# Create a CuBIDS summary tsv
324324
summary_tsv = tmpdir / "summary.tsv"
325325
df = pd.DataFrame(summary_data)
326-
df.to_csv(summary_tsv, sep="\t", index=False)
326+
df.to_csv(summary_tsv, sep="\t", index=False, na_rep="n/a")
327327

328328
# Create a CuBIDS files tsv
329329
files_tsv = tmpdir / "files.tsv"
330330
df = pd.DataFrame(fdata)
331-
df.to_csv(files_tsv, sep="\t", index=False)
331+
df.to_csv(files_tsv, sep="\t", index=False, na_rep="n/a")
332332

333333
# Run cubids apply
334334
if isinstance(expected, str):

cubids/tests/test_bond.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -459,7 +459,7 @@ def test_tsv_merge_no_datalad(tmp_path):
459459
summary_df.loc[fa_nan_dwi_row, "MergeInto"] = summary_df.ParamGroup[complete_dwi_row]
460460

461461
valid_tsv_file = tsv_prefix + "_valid_summary.tsv"
462-
summary_df.to_csv(valid_tsv_file, sep="\t", index=False)
462+
summary_df.to_csv(valid_tsv_file, sep="\t", index=False, na_rep="n/a")
463463

464464
# about to apply merges!
465465

@@ -472,7 +472,7 @@ def test_tsv_merge_no_datalad(tmp_path):
472472
complete_dwi_row
473473
]
474474
invalid_tsv_file = tsv_prefix + "_invalid_summary.tsv"
475-
summary_df.to_csv(invalid_tsv_file, sep="\t", index=False)
475+
summary_df.to_csv(invalid_tsv_file, sep="\t", index=False, na_rep="n/a")
476476

477477
with pytest.raises(Exception):
478478
bod.apply_tsv_changes(
@@ -572,7 +572,7 @@ def test_tsv_merge_changes(tmp_path):
572572
summary_df.loc[fa_nan_dwi_row, "MergeInto"] = summary_df.ParamGroup[complete_dwi_row]
573573

574574
valid_tsv_file = tsv_prefix + "_valid_summary.tsv"
575-
summary_df.to_csv(valid_tsv_file, sep="\t", index=False)
575+
summary_df.to_csv(valid_tsv_file, sep="\t", index=False, na_rep="n/a")
576576

577577
# about to merge
578578
bod.apply_tsv_changes(valid_tsv_file, original_files_tsv, str(tmp_path / "ok_modified"))
@@ -584,7 +584,7 @@ def test_tsv_merge_changes(tmp_path):
584584
complete_dwi_row
585585
]
586586
invalid_tsv_file = tsv_prefix + "_invalid_summary.tsv"
587-
summary_df.to_csv(invalid_tsv_file, sep="\t", index=False)
587+
summary_df.to_csv(invalid_tsv_file, sep="\t", index=False, na_rep="n/a")
588588

589589
with pytest.raises(Exception):
590590
bod.apply_tsv_changes(

cubids/tests/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def _add_deletion(summary_tsv):
108108
"""
109109
df = pd.read_table(summary_tsv)
110110
df.loc[3, "MergeInto"] = 0
111-
df.to_csv(summary_tsv, sep="\t", index=False)
111+
df.to_csv(summary_tsv, sep="\t", index=False, na_rep="n/a")
112112
return df.loc[3, "KeyParamGroup"]
113113

114114

cubids/workflows.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -176,10 +176,13 @@ def _link_or_copy(src_path, dst_path):
176176
participants_tsv_path,
177177
sep="\t",
178178
index=False,
179+
na_rep="n/a",
179180
)
180-
except Exception as e: # noqa: F841
181-
# Non-fatal: continue validation even if filtering fails
182-
pass
181+
except Exception as e:
182+
logger.warning(
183+
f"Failed to filter participants.tsv for subject {subject}: {e}. "
184+
"Continuing validation without filtering."
185+
)
183186

184187
# Run the validator
185188
call = build_validator_call(
@@ -281,7 +284,7 @@ def validate(
281284
else:
282285
val_tsv = str(bids_dir) + "/code/CuBIDS/" + str(output_prefix) + "_validation.tsv"
283286

284-
parsed.to_csv(val_tsv, sep="\t", index=False)
287+
parsed.to_csv(val_tsv, sep="\t", index=False, na_rep="n/a")
285288

286289
# build validation data dictionary json sidecar
287290
val_dict = get_val_dictionary()
@@ -373,7 +376,7 @@ def validate(
373376
else:
374377
val_tsv = str(bids_dir) + "/code/CuBIDS/" + str(output_prefix) + "_validation.tsv"
375378

376-
parsed.to_csv(val_tsv, sep="\t", index=False)
379+
parsed.to_csv(val_tsv, sep="\t", index=False, na_rep="n/a")
377380

378381
# build validation data dictionary json sidecar
379382
val_dict = get_val_dictionary()

0 commit comments

Comments
 (0)