diff --git a/CPAC/anat_preproc/anat_preproc.py b/CPAC/anat_preproc/anat_preproc.py index f3ef8c1e1..2cf19ced7 100644 --- a/CPAC/anat_preproc/anat_preproc.py +++ b/CPAC/anat_preproc/anat_preproc.py @@ -28,7 +28,6 @@ fsl_aff_to_rigid, fslmaths_command, mri_convert, - normalize_wmparc, pad, VolumeRemoveIslands, wb_command, @@ -696,13 +695,15 @@ def afni_brain_connector(wf, cfg, strat_pool, pipe_num, opt): wf.connect(anat_skullstrip, "out_file", anat_brain_mask, "in_file_a") + outputs = {} + if strat_pool.check_rpool("desc-preproc_T1w"): outputs = {"space-T1w_desc-brain_mask": (anat_brain_mask, "out_file")} elif strat_pool.check_rpool("desc-preproc_T2w"): outputs = {"space-T2w_desc-brain_mask": (anat_brain_mask, "out_file")} - return (wf, outputs) + return wf, outputs def fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt): @@ -1322,22 +1323,29 @@ def freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt): wf.connect(combine_mask, "out_file", binarize_combined_mask, "in_file") - if opt == "FreeSurfer-BET-Tight": - outputs = { - "space-T1w_desc-tight_brain_mask": ( - binarize_combined_mask, - "out_file", - ) - } - elif opt == "FreeSurfer-BET-Loose": - outputs = { - "space-T1w_desc-loose_brain_mask": ( - binarize_combined_mask, - "out_file", - ) - } + # CCS brain mask is in FS space, transfer it back to native T1 space + match_fov_ccs_brain_mask = pe.Node( + interface=fsl.FLIRT(), name=f"match_fov_CCS_brain_mask_{node_id}" + ) + match_fov_ccs_brain_mask.inputs.apply_xfm = True + match_fov_ccs_brain_mask.inputs.uses_qform = True + match_fov_ccs_brain_mask.inputs.interp = "nearestneighbour" - return (wf, outputs) + node, out = strat_pool.get_data("pipeline-fs_raw-average") + convert_fs_T1_to_nifti = pe.Node( + Function( + input_names=["in_file"], output_names=["out_file"], function=mri_convert + ), + name=f"convert_fs_T1_to_nifti_for_ccs_{node_id}", + ) + wf.connect(node, out, convert_fs_T1_to_nifti, "in_file") + wf.connect( + convert_fs_T1_to_nifti, "out_file", match_fov_ccs_brain_mask, "reference" + ) + + wf.connect(binarize_combined_mask, "out_file", match_fov_ccs_brain_mask, "in_file") + + return wf, {"space-T1w_desc-brain_mask": (match_fov_ccs_brain_mask, "out_file")} def mask_T2(wf_name="mask_T2"): @@ -1408,9 +1416,8 @@ def mask_T2(wf_name="mask_T2"): ) def anatomical_init(wf, cfg, strat_pool, pipe_num, opt=None): if opt not in anatomical_init.option_val: - raise ValueError( - f"\n[!] Error: Invalid option for deoblique: {opt}. \nExpected one of {anatomical_init.option_val}" - ) + msg = f"\n[!] Error: Invalid option for deoblique: {opt}. \nExpected one of {anatomical_init.option_val}" + raise ValueError(msg) if opt == "warp": anat_deoblique = pe.Node( @@ -1500,7 +1507,7 @@ def acpc_align_head(wf, cfg, strat_pool, pipe_num, opt=None): ( "desc-head_T1w", "desc-preproc_T1w", - ["space-T1w_desc-brain_mask", "space-T1w_desc-brain_mask"], + "space-T1w_desc-brain_mask", ), "T1w-ACPC-template", "T1w-brain-ACPC-template", @@ -1508,7 +1515,7 @@ def acpc_align_head(wf, cfg, strat_pool, pipe_num, opt=None): outputs=[ "desc-head_T1w", "desc-preproc_T1w", - ["space-T1w_desc-brain_mask", "space-T1w_desc-brain_mask"], + "space-T1w_desc-brain_mask", "from-T1w_to-ACPC_mode-image_desc-aff2rig_xfm", ], ) @@ -1939,211 +1946,102 @@ def brain_mask_acpc_unet(wf, cfg, strat_pool, pipe_num, opt=None): ["anatomical_preproc", "run"], ], option_key=["anatomical_preproc", "brain_extraction", "using"], - option_val="FreeSurfer-Brainmask", - inputs=[ - "pipeline-fs_raw-average", - "pipeline-fs_brainmask", - "freesurfer-subject-dir", + option_val=[ + "FreeSurfer-ABCD", + "FreeSurfer-BET-Loose", + "FreeSurfer-BET-Tight", + "FreeSurfer-Brainmask", ], - outputs=["space-T1w_desc-brain_mask"], -) -def brain_mask_freesurfer(wf, cfg, strat_pool, pipe_num, opt=None): - wf, outputs = freesurfer_brain_connector(wf, cfg, strat_pool, pipe_num, opt) - - return (wf, outputs) - - -@nodeblock( - name="brain_mask_acpc_freesurfer", - switch=[ - ["anatomical_preproc", "brain_extraction", "run"], - ["anatomical_preproc", "run"], - ], - option_key=["anatomical_preproc", "brain_extraction", "using"], - option_val="FreeSurfer-Brainmask", - inputs=[ - "space-T1w_desc-brain_mask", - "pipeline-fs_raw-average", - "freesurfer-subject-dir", - ], - outputs=["space-T1w_desc-acpcbrain_mask"], -) -def brain_mask_acpc_freesurfer(wf, cfg, strat_pool, pipe_num, opt=None): - wf, wf_outputs = freesurfer_brain_connector(wf, cfg, strat_pool, pipe_num, opt) - - outputs = {"space-T1w_desc-acpcbrain_mask": wf_outputs["space-T1w_desc-brain_mask"]} - - return (wf, outputs) - - -@nodeblock( - name="brain_mask_freesurfer_abcd", - switch=[ - ["anatomical_preproc", "brain_extraction", "run"], - ["anatomical_preproc", "run"], - ], - option_key=["anatomical_preproc", "brain_extraction", "using"], - option_val="FreeSurfer-ABCD", inputs=[ - ["desc-restore_T1w", "desc-preproc_T1w"], - "pipeline-fs_wmparc", - "pipeline-fs_raw-average", - "freesurfer-subject-dir", - ], - outputs=["space-T1w_desc-brain_mask"], -) -def brain_mask_freesurfer_abcd(wf, cfg, strat_pool, pipe_num, opt=None): - wf, outputs = freesurfer_abcd_brain_connector(wf, cfg, strat_pool, pipe_num, opt) - - return (wf, outputs) - - -@nodeblock( - name="brain_mask_freesurfer_fsl_tight", - switch=[ - ["anatomical_preproc", "brain_extraction", "run"], - ["anatomical_preproc", "run"], - ], - option_key=["anatomical_preproc", "brain_extraction", "using"], - option_val="FreeSurfer-BET-Tight", - inputs=[ - "pipeline-fs_brainmask", - "pipeline-fs_T1", - "pipeline-fs_raw-average", - "freesurfer-subject-dir", + ( + ["desc-restore_T1w", "desc-preproc_T1w"], + "space-T1w_desc-brain_mask", + "pipeline-fs_T1", + "pipeline-fs_wmparc", + "pipeline-fs_raw-average", + "pipeline-fs_brainmask", + "freesurfer-subject-dir", + ), "T1w-brain-template-mask-ccs", "T1w-ACPC-template", ], - outputs={ - "space-T1w_desc-brain_mask": { - "Description": "Brain mask extracted using FreeSurfer-BET-Tight method", - "Method": "FreeSurfer-BET-Tight", - "Threshold": "tight", - } - }, + outputs={"space-T1w_desc-brain_mask": {}}, ) -def brain_mask_freesurfer_fsl_tight(wf, cfg, strat_pool, pipe_num, opt=None): - wf, outputs = freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt) - - # Convert the tight brain mask to generic brain mask - outputs["space-T1w_desc-brain_mask"] = outputs.pop( - "space-T1w_desc-tight_brain_mask" - ) - return (wf, outputs) +def brain_mask_freesurfer(wf, cfg, strat_pool, pipe_num, opt=None): + assert isinstance(brain_mask_freesurfer.outputs, dict) + brain_mask_freesurfer.outputs["space-T1w_desc-brain_mask"] = { + "Description": f"Brain mask extracted using {opt} method", + "Method": opt, + } + match opt: + case "FreeSurfer-ABCD": + return freesurfer_abcd_brain_connector(wf, cfg, strat_pool, pipe_num, opt) + case "FreeSurfer-BET-Loose" | "FreeSurfer-BET-Tight": + brain_mask_freesurfer.outputs["space-T1w_desc-brain_mask"]["Threshold"] = ( + opt.rsplit("-")[-1].lower() + ) + return freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt) + case "FreeSurfer-Brainmask": + return freesurfer_brain_connector(wf, cfg, strat_pool, pipe_num, opt) + return wf, {} @nodeblock( - name="brain_mask_acpc_freesurfer_abcd", + name="brain_mask_acpc_freesurfer", switch=[ ["anatomical_preproc", "brain_extraction", "run"], ["anatomical_preproc", "run"], ], option_key=["anatomical_preproc", "brain_extraction", "using"], - option_val="FreeSurfer-ABCD", - inputs=[ - ["desc-restore_T1w", "desc-preproc_T1w"], - "pipeline-fs_wmparc", - "pipeline-fs_raw-average", - "freesurfer-subject-dir", - ], - outputs=["space-T1w_desc-acpcbrain_mask"], -) -def brain_mask_acpc_freesurfer_abcd(wf, cfg, strat_pool, pipe_num, opt=None): - wf, wf_outputs = freesurfer_abcd_brain_connector(wf, cfg, strat_pool, pipe_num, opt) - - outputs = {"space-T1w_desc-acpcbrain_mask": wf_outputs["space-T1w_desc-brain_mask"]} - - return (wf, outputs) - - -@nodeblock( - name="brain_mask_freesurfer_fsl_loose", - switch=[ - ["anatomical_preproc", "brain_extraction", "run"], - ["anatomical_preproc", "run"], + option_val=[ + "FreeSurfer-ABCD", + "FreeSurfer-Brainmask", + "FreeSurfer-BET-Loose", + "FreeSurfer-BET-Tight", ], - option_key=["anatomical_preproc", "brain_extraction", "using"], - option_val="FreeSurfer-BET-Loose", inputs=[ - "pipeline-fs_brainmask", - "pipeline-fs_T1", - "pipeline-fs_raw-average", - "freesurfer-subject-dir", - "T1w-brain-template-mask-ccs", - "T1w-ACPC-template", - ], - outputs={ - "space-T1w_desc-brain_mask": { - "Description": "Brain mask extracted using FreeSurfer-BET-Loose method", - "Method": "FreeSurfer-BET-Loose", - "Threshold": "loose", - } - }, -) -def brain_mask_freesurfer_fsl_loose(wf, cfg, strat_pool, pipe_num, opt=None): - wf, outputs = freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt) - - # Convert the loose brain mask to generic brain mask - outputs["space-T1w_desc-brain_mask"] = outputs.pop( - "space-T1w_desc-loose_brain_mask" - ) - return (wf, outputs) - - -@nodeblock( - name="brain_mask_acpc_freesurfer_fsl_tight", - switch=[ - ["anatomical_preproc", "brain_extraction", "run"], - ["anatomical_preproc", "run"], - ], - option_key=["anatomical_preproc", "brain_extraction", "using"], - option_val="FreeSurfer-BET-Tight", - inputs=[ - "pipeline-fs_brainmask", - "pipeline-fs_T1", - "T1w-brain-template-mask-ccs", + ( + ["desc-restore_T1w", "desc-preproc_T1w"], + "space-T1w_desc-brain_mask", + "space-T1w_desc-acpcbrain_mask", + "pipeline-fs_brainmask", + "pipeline-fs_raw-average", + "pipeline-fs_T1", + "pipeline-fs_wmparc", + "freesurfer-subject-dir", + ), "T1w-ACPC-template", - ], - outputs=["space-T1w_desc-tight_acpcbrain_mask"], -) -def brain_mask_acpc_freesurfer_fsl_tight(wf, cfg, strat_pool, pipe_num, opt=None): - wf, wf_outputs = freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt) - - outputs = { - "space-T1w_desc-tight_acpcbrain_mask": wf_outputs[ - "space-T1w_desc-tight_brain_mask" - ] - } - - return (wf, outputs) - - -@nodeblock( - name="brain_mask_acpc_freesurfer_fsl_loose", - switch=[ - ["anatomical_preproc", "brain_extraction", "run"], - ["anatomical_preproc", "run"], - ], - option_key=["anatomical_preproc", "brain_extraction", "using"], - option_val="FreeSurfer-BET-Loose", - inputs=[ - "pipeline-fs_brainmask", - "pipeline-fs_T1", "T1w-brain-template-mask-ccs", - "T1w-ACPC-template", ], - outputs=["space-T1w_desc-loose_acpcbrain_mask"], + outputs={"space-T1w_desc-acpcbrain_mask": {}}, ) -def brain_mask_acpc_freesurfer_fsl_loose(wf, cfg, strat_pool, pipe_num, opt=None): - wf, wf_outputs = freesurfer_fsl_brain_connector(wf, cfg, strat_pool, pipe_num, opt) - - outputs = { - "space-T1w_desc-loose_acpcbrain_mask": wf_outputs[ - "space-T1w_desc-loose_brain_mask" - ] +def brain_mask_acpc_freesurfer(wf, cfg, strat_pool, pipe_num, opt=None): + if opt != strat_pool.get_json("space-T1w_desc-brain_mask").get( + "CpacVariant", {} + ).get("space-T1w_mask", opt): + # https://tenor.com/baIhQ.gif + return wf, {} + assert isinstance(brain_mask_acpc_freesurfer.outputs, dict) + outputs = wf_outputs = {} + key = "space-T1w_desc-brain_mask" + functions = { + "FreeSurfer-ABCD": freesurfer_abcd_brain_connector, + "FreeSurfer-Brainmask": freesurfer_brain_connector, + "FreeSurfer-BET-Loose": freesurfer_fsl_brain_connector, + "FreeSurfer-BET-Tight": freesurfer_fsl_brain_connector, } + if opt in ["FreeSurfer-BET-Loose", "FreeSurfer-BET-Tight"]: + brain_mask_acpc_freesurfer.outputs["space-T1w_desc-acpcbrain_mask"] = { + "Description": f"Brain mask extracted using {opt} method", + "Method": opt, + "Threshold": opt.rsplit("-")[-1].lower(), + } + if opt in functions: + wf, wf_outputs = functions[opt](wf, cfg, strat_pool, pipe_num, opt) + if key in wf_outputs: + outputs = {"space-T1w_desc-acpcbrain_mask": wf_outputs[key]} - return (wf, outputs) + return wf, outputs @nodeblock( diff --git a/CPAC/anat_preproc/tests/test_anat_preproc.py b/CPAC/anat_preproc/tests/test_anat_preproc.py index 829a3acd7..7a65dd8a3 100755 --- a/CPAC/anat_preproc/tests/test_anat_preproc.py +++ b/CPAC/anat_preproc/tests/test_anat_preproc.py @@ -1,34 +1,45 @@ +# Copyright (C) 2012-2025 C-PAC Developers + +# This file is part of C-PAC. + +# C-PAC is free software: you can redistribute it and/or modify it under +# the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. + +# C-PAC is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +# You should have received a copy of the GNU Lesser General Public +# License along with C-PAC. If not, see . +"""Tests for anatomical preprocessing.""" + import os -from nose.tools import * import numpy as np +import pytest import nibabel as nib -from .. import anat_preproc -from unittest.mock import Mock, patch -from ..anat_preproc import ( - brain_mask_freesurfer_fsl_loose, - brain_mask_freesurfer_fsl_tight, -) +from CPAC.anat_preproc import anat_preproc +from CPAC.anat_preproc.anat_preproc import brain_mask_freesurfer +from CPAC.pipeline import nipype_pipeline_engine as pe +from CPAC.pipeline.engine import ResourcePool +from CPAC.utils.configuration import Preconfiguration +from CPAC.utils.test_init import create_dummy_node +CFG = Preconfiguration("ccs-options") -class TestAnatPreproc: - def __init__(self): + +@pytest.mark.skip(reason="This test needs refactoring.") +class TestAnatPreproc: # noqa + def setup_method(self) -> None: """ Initialize and run the the anat_preproc workflow. Populate the node-name : node_output dictionary using the workflow object. This dictionary serves the outputs of each of the nodes in the workflow to all the tests that need them. - - Parameters - ---------- - self - - Returns - ------- - None - - """ self.preproc = anat_preproc.create_anat_preproc() self.input_anat = os.path.abspath("$FSLDIR/data/standard/MNI152_T1_2mm.nii.gz") @@ -276,69 +287,51 @@ def test_anat_brain(self): assert correlation[0, 1] >= 0.97 -@patch("CPAC.anat_preproc.anat_preproc.freesurfer_fsl_brain_connector") -def test_brain_mask_freesurfer_fsl_loose(mock_connector): - """Test that brain_mask_freesurfer_fsl_loose correctly renames output key.""" - - mock_wf = Mock() - mock_cfg = Mock() - mock_strat_pool = Mock() - pipe_num = 1 - - mock_outputs = { - "space-T1w_desc-loose_brain_mask": "brain_mask_data", - "other_output": "other_data", - } - - mock_connector.return_value = (mock_wf, mock_outputs) - - result_wf, result_outputs = brain_mask_freesurfer_fsl_loose( - mock_wf, mock_cfg, mock_strat_pool, pipe_num - ) - - mock_connector.assert_called_once_with( - mock_wf, mock_cfg, mock_strat_pool, pipe_num, None - ) - - # Assert workflow returned unchanged - assert result_wf == mock_wf - - # Assert output key was renamed correctly - assert "space-T1w_desc-brain_mask" in result_outputs - assert "space-T1w_desc-loose_brain_mask" not in result_outputs - assert result_outputs["space-T1w_desc-brain_mask"] == "brain_mask_data" - assert result_outputs["other_output"] == "other_data" - - -@patch("CPAC.anat_preproc.anat_preproc.freesurfer_fsl_brain_connector") -def test_brain_mask_freesurfer_fsl_tight(mock_connector): - """Test that brain_mask_freesurfer_fsl_tight correctly renames output key.""" +@pytest.mark.parametrize("opt", ["FreeSurfer-BET-Loose", "FreeSurfer-BET-Tight"]) +@pytest.mark.parametrize("t1w", ["desc-restore_T1w", "desc-preproc_T1w"]) +def test_brain_mask_freesurfer_fsl(opt: str, t1w: str): + """Test that brain_mask_freesurfer_fsl correctly generates output key using real code.""" + # Create minimal mocks for required workflow/config/strat_pool, but do not patch freesurfer_fsl_brain_connector + + CFG["subject_id"] = opt + + wf = pe.Workflow(name=opt) + pre_resources = [ + t1w, + "space-T1w_desc-brain_mask", + "pipeline-fs_T1", + "pipeline-fs_wmparc", + "pipeline-fs_raw-average", + "pipeline-fs_brainmask", + "freesurfer-subject-dir", + "T1w-brain-template-mask-ccs", + "T1w-ACPC-template", + ] + before_this_test = create_dummy_node("created_before_this_test", pre_resources) + rpool = ResourcePool(name=f"{opt}_{opt}", cfg=CFG) + for resource in pre_resources: + rpool.set_data( + resource, before_this_test, resource, {}, "", before_this_test.name + ) + rpool.gather_pipes(wf, CFG) + strat_pool = next(iter(rpool.get_strats(pre_resources).values())) - mock_wf = Mock() - mock_cfg = Mock() - mock_strat_pool = Mock() pipe_num = 1 - mock_outputs = { - "space-T1w_desc-tight_brain_mask": "brain_mask_data", - "other_output": "other_data", - } - - mock_connector.return_value = (mock_wf, mock_outputs) - - result_wf, result_outputs = brain_mask_freesurfer_fsl_tight( - mock_wf, mock_cfg, mock_strat_pool, pipe_num - ) - - mock_connector.assert_called_once_with( - mock_wf, mock_cfg, mock_strat_pool, pipe_num, None + result_wf, result_outputs = brain_mask_freesurfer( + wf, CFG, strat_pool, pipe_num, opt ) - # Assert workflow returned unchanged - assert result_wf == mock_wf - - # Assert output key was renamed correctly - assert "space-T1w_desc-brain_mask" in result_outputs - assert "space-T1w_desc-tight_brain_mask" not in result_outputs - assert result_outputs["space-T1w_desc-brain_mask"] == "brain_mask_data" - assert result_outputs["other_output"] == "other_data" + # The output key should always be present + assert any( + k.startswith("space-T1w_desc-brain_mask") for k in result_outputs + ), "Expected brain_mask key in outputs." + # Should not have loose/tight keys + assert not any( + "loose_brain_mask" in k for k in result_outputs + ), "Loose brain mask key should not be present." + assert not any( + "tight_brain_mask" in k for k in result_outputs + ), "Tight brain mask key should not be present." + # Should return the workflow unchanged + assert result_wf == wf diff --git a/CPAC/func_preproc/func_preproc.py b/CPAC/func_preproc/func_preproc.py index 43658bbec..10a70392c 100644 --- a/CPAC/func_preproc/func_preproc.py +++ b/CPAC/func_preproc/func_preproc.py @@ -1719,10 +1719,9 @@ def bold_mask_anatomical_resampled(wf, cfg, strat_pool, pipe_num, opt=None): option_val="CCS_Anatomical_Refined", inputs=[ ["desc-motion_bold", "desc-preproc_bold", "bold"], - "desc-brain_T1w", - ["desc-preproc_T1w", "desc-reorient_T1w", "T1w"], + ("desc-brain_T1w", "sbref", ["desc-preproc_T1w", "desc-reorient_T1w", "T1w"]), ], - outputs=["space-bold_desc-brain_mask", "desc-ref_bold"], + outputs=["space-bold_desc-brain_mask", "sbref"], ) def bold_mask_ccs(wf, cfg, strat_pool, pipe_num, opt=None): """Generate the BOLD mask by basing it off of the anatomical brain. @@ -1837,7 +1836,7 @@ def bold_mask_ccs(wf, cfg, strat_pool, pipe_num, opt=None): outputs = { "space-bold_desc-brain_mask": (intersect_mask, "out_file"), - "desc-ref_bold": (example_func_brain, "out_file"), + "sbref": (example_func_brain, "out_file"), } return (wf, outputs) diff --git a/CPAC/pipeline/cpac_pipeline.py b/CPAC/pipeline/cpac_pipeline.py index a54fa929b..e274b39ac 100644 --- a/CPAC/pipeline/cpac_pipeline.py +++ b/CPAC/pipeline/cpac_pipeline.py @@ -52,18 +52,12 @@ brain_extraction_temp_T2, brain_mask_acpc_afni, brain_mask_acpc_freesurfer, - brain_mask_acpc_freesurfer_abcd, - brain_mask_acpc_freesurfer_fsl_loose, - brain_mask_acpc_freesurfer_fsl_tight, brain_mask_acpc_fsl, brain_mask_acpc_niworkflows_ants, brain_mask_acpc_T2, brain_mask_acpc_unet, brain_mask_afni, brain_mask_freesurfer, - brain_mask_freesurfer_abcd, - brain_mask_freesurfer_fsl_loose, - brain_mask_freesurfer_fsl_tight, brain_mask_fsl, brain_mask_niworkflows_ants, brain_mask_T2, @@ -933,10 +927,7 @@ def build_anat_preproc_stack(rpool, cfg, pipeline_blocks=None): brain_mask_acpc_fsl, brain_mask_acpc_niworkflows_ants, brain_mask_acpc_unet, - brain_mask_acpc_freesurfer_abcd, brain_mask_acpc_freesurfer, - brain_mask_acpc_freesurfer_fsl_tight, - brain_mask_acpc_freesurfer_fsl_loose, ], acpc_align_brain_with_mask, brain_extraction_temp, @@ -986,10 +977,7 @@ def build_anat_preproc_stack(rpool, cfg, pipeline_blocks=None): brain_mask_fsl, brain_mask_niworkflows_ants, brain_mask_unet, - brain_mask_freesurfer_abcd, brain_mask_freesurfer, - brain_mask_freesurfer_fsl_tight, - brain_mask_freesurfer_fsl_loose, ] ] pipeline_blocks += anat_brain_mask_blocks diff --git a/CPAC/pipeline/engine.py b/CPAC/pipeline/engine.py index a36b97ed4..415307c75 100644 --- a/CPAC/pipeline/engine.py +++ b/CPAC/pipeline/engine.py @@ -24,7 +24,7 @@ import json import os import re -from typing import Literal, Optional +from typing import Callable, Generator, Literal, Optional import warnings import pandas as pd @@ -42,8 +42,11 @@ from CPAC.pipeline.check_outputs import ExpectedOutputs from CPAC.pipeline.nodeblock import NodeBlockFunction from CPAC.pipeline.utils import ( + CrossedVariantsError, + find_variants, MOVEMENT_FILTER_KEYS, name_fork, + short_circuit_crossed_variants, source_set, validate_outputs, ) @@ -598,12 +601,10 @@ def flatten_prov(self, prov): return flat_prov return None - def get_strats(self, resources, debug=False): + def get_strats(self, resources, debug: bool | str = False): # TODO: NOTE: NOT COMPATIBLE WITH SUB-RPOOL/STRAT_POOLS # TODO: (and it doesn't have to be) - import itertools - linked_resources = [] resource_list = [] if debug: @@ -684,7 +685,7 @@ def get_strats(self, resources, debug=False): # TODO: and the actual resource is encoded in the tag: of the last item, every time! # keying the strategies to the resources, inverting it if len_inputs > 1: - strats = itertools.product(*total_pool) + strats = self.linked_product(total_pool, linked_resources, self.get_json) # we now currently have "strats", the combined permutations of all the strategies, as a list of tuples, each tuple combining one version of input each, being one of the permutations. # OF ALL THE DIFFERENT INPUTS. and they are tagged by their fetched inputs with {name}:{strat}. @@ -704,6 +705,7 @@ def get_strats(self, resources, debug=False): if debug: verbose_logger = getLogger("CPAC.engine") verbose_logger.debug("len(strat_list_list): %s\n", len(strat_list_list)) + for strat_list in strat_list_list: json_dct = {} for strat in strat_list: @@ -850,7 +852,27 @@ def get_strats(self, resources, debug=False): new_strats[pipe_idx].rpool["json"]["subjson"][data_type].update( copy.deepcopy(resource_strat_dct["json"]) ) - return new_strats + return_strats: dict[str, ResourcePool] = {} + for pipe_idx, strat_pool in new_strats.items(): + try: + short_circuit_crossed_variants(strat_pool, resources) + return_strats[pipe_idx] = strat_pool + except CrossedVariantsError: + if debug: + verbose_logger = getLogger("CPAC.engine") + verbose_logger.debug( + "Dropped crossed variants strat: %s", + find_variants(strat_pool, resources), + ) + continue + if debug: + verbose_logger = getLogger("CPAC.engine") + _k = list(return_strats.keys()) + if isinstance(debug, str): + verbose_logger.debug("return_strats: (%s, %s) %s\n", debug, len(_k), _k) + else: + verbose_logger.debug("return_strats: (%s) %s\n", len(_k), _k) + return return_strats def derivative_xfm(self, wf, label, connection, json_info, pipe_idx, pipe_x): if label in self.xfm: @@ -1489,6 +1511,128 @@ def node_data(self, resource, **kwargs): """ return NodeData(self, resource, **kwargs) + @staticmethod + def _normalize_variant_dict(json_obj: dict) -> dict[str, Optional[str]]: + """ + Return {variant_key: primary_value or None}. + + - list items are concatentated + - "NO-..." entries normalize to None + """ + out = {} + for k, v in json_obj.get("CpacVariant", {}).items(): + assert isinstance(v, (list, str)) + primary = "-".join(v) if isinstance(v, list) else v + out[k] = ( + None + if (isinstance(primary, str) and primary.startswith("NO-")) + else primary + ) + return out + + def _is_consistent( + self, + strat_list: list, + linked_resources: list | tuple, + json_lookup: Callable[[str, str | list[str]], dict], + debug: bool = False, + ) -> bool: + """ + Ensure consistency for linked_resources in strat_list. + + Rules: + - Sub-keys only compared if they exist in multiple resources in the same linked group. + - Missing sub-keys or NO-... values are compatible with anything. + - Lists are compared ignoring order. + - Debug prints a summary table per linked group. + """ + if not linked_resources: + return True + + # Build JSON for each prov + prov_json = {} + for prov in strat_list: + resource, strat_idx = self.generate_prov_string(prov) + prov_json[resource] = self._normalize_variant_dict( + json_lookup(resource, strat_idx) + ) + + for linked_group in linked_resources: + # Keep only resources present in strat_list + variants_map = {r: prov_json[r] for r in linked_group if r in prov_json} + if len(variants_map) < 2: + continue # nothing to compare yet + + # Determine which sub-keys are shared across multiple resources + subkey_counts = {} + for subdict in variants_map.values(): + for k in subdict.keys(): + subkey_counts[k] = subkey_counts.get(k, 0) + 1 + shared_subkeys = {k for k, count in subkey_counts.items() if count > 1} + + # Pairwise comparison only for shared sub-keys + resources = list(variants_map.keys()) + for i in range(len(resources)): + res_a = resources[i] + subdict_a = variants_map[res_a] + for j in range(i + 1, len(resources)): + res_b = resources[j] + subdict_b = variants_map[res_b] + + for subkey in shared_subkeys: + val_a = subdict_a.get(subkey) + val_b = subdict_b.get(subkey) + + # Skip if missing or NO-... + skip_a = ( + val_a is None + or ( + isinstance(val_a, list) + and all(str(v).startswith("NO-") for v in val_a) + ) + or (isinstance(val_a, str) and val_a.startswith("NO-")) + ) + skip_b = ( + val_b is None + or ( + isinstance(val_b, list) + and all(str(v).startswith("NO-") for v in val_b) + ) + or (isinstance(val_b, str) and val_b.startswith("NO-")) + ) + if skip_a or skip_b: + continue + + # Normalize lists + val_a_norm = sorted(val_a) if isinstance(val_a, list) else val_a + val_b_norm = sorted(val_b) if isinstance(val_b, list) else val_b + + if val_a_norm != val_b_norm: + return False + return True + + def linked_product( + self, + resource_pools: "list[ResourcePool]", + linked_resources: list[str], + json_lookup: Callable[[str, str | list[str]], dict], + ) -> Generator: + """ + Generate only consistent combinations of cpac_prov values across pools. + """ + + def backtrack(idx, current): + if idx == len(resource_pools): + yield list(current) + return + for prov in resource_pools[idx]: + current.append(prov) + if self._is_consistent(current, linked_resources, json_lookup): + yield from backtrack(idx + 1, current) + current.pop() + + yield from backtrack(0, []) + class NodeBlock: def __init__(self, node_block_functions, debug=False): @@ -1589,7 +1733,10 @@ def grab_tiered_dct(self, cfg, key_list): raise KeyError(msg) from ke return cfg_dct - def connect_block(self, wf, cfg, rpool): + def connect_block( + self, wf: pe.Workflow, cfg: Configuration, rpool: ResourcePool + ) -> pe.Workflow: + """Connect NodeBlock to a Workflow given a Configuration and ResourcePool.""" debug = cfg.pipeline_setup["Debugging"]["verbose"] all_opts = [] for name, block_dct in self.node_blocks.items(): @@ -1645,6 +1792,11 @@ def connect_block(self, wf, cfg, rpool): opts.append(option_val) else: # AND, if there are multiple option-val's (in a list) in the docstring, it gets iterated below in 'for opt in option' etc. AND THAT'S WHEN YOU HAVE TO DELINEATE WITHIN THE NODE BLOCK CODE!!! opts = [None] + if debug: + verbose_logger = getLogger("CPAC.engine") + verbose_logger.debug( + "[connect_block] opts resolved for %s: %s", name, opts + ) all_opts += opts sidecar_additions = { @@ -1739,7 +1891,7 @@ def connect_block(self, wf, cfg, rpool): for ( pipe_idx, strat_pool, # strat_pool is a ResourcePool like {'desc-preproc_T1w': { 'json': info, 'data': (node, out) }, 'desc-brain_mask': etc.} - ) in rpool.get_strats(inputs, debug).items(): + ) in rpool.get_strats(inputs, name if debug else False).items(): # keep in mind rpool.get_strats(inputs) = {pipe_idx1: {'desc-preproc_T1w': etc.}, pipe_idx2: {..} } fork = False in switch for opt in opts: # it's a dictionary of ResourcePools called strat_pools, except those sub-ResourcePools only have one level! no pipe_idx strat keys. @@ -1761,6 +1913,13 @@ def connect_block(self, wf, cfg, rpool): strat_pool.copy_resource(input_name, interface[0]) replaced_inputs.append(interface[0]) try: + if debug: + verbose_logger = getLogger("CPAC.engine") + verbose_logger.debug( + "Before block '%s', strat_pool contains: %s", + block_function.__name__, + list(strat_pool.rpool.keys()), + ) wf, outs = block_function(wf, cfg, strat_pool, pipe_x, opt) except IOError as e: # duplicate node WFLOGGER.warning(e) @@ -2474,7 +2633,7 @@ def func_outdir_ingress( def set_iterables(scan, mask_paths=None, ts_paths=None): - # match scan with filepath to get filepath + """Match scan with filepath to get filepath.""" mask_path = [path for path in mask_paths if scan in path] ts_path = [path for path in ts_paths if scan in path] diff --git a/CPAC/pipeline/nipype_pipeline_engine/engine.py b/CPAC/pipeline/nipype_pipeline_engine/engine.py index 508bc0337..94aacd123 100644 --- a/CPAC/pipeline/nipype_pipeline_engine/engine.py +++ b/CPAC/pipeline/nipype_pipeline_engine/engine.py @@ -54,7 +54,7 @@ from inspect import Parameter, Signature, signature import os import re -from typing import Any, ClassVar, Optional +from typing import Any, ClassVar, Optional, TYPE_CHECKING from numpy import prod from traits.api import List as TraitListObject @@ -76,6 +76,10 @@ from CPAC.utils.monitoring import getLogger, WFLOGGER +if TYPE_CHECKING: + from CPAC.pipeline.engine import ResourcePool + + # set global default mem_gb DEFAULT_MEM_GB = 2.0 UNDEFINED_SIZE = (42, 42, 42, 1200) @@ -762,6 +766,18 @@ def write_hierarchical_dotfile( else: WFLOGGER.info(dotstr) + def connect_optional( + self, + source_resource_pool: "ResourcePool", + source_resource: str | list[str], + dest: pe.Node, + dest_input: str, + ) -> None: + """Connect optional inputs to a workflow.""" + if source_resource_pool.check_rpool(source_resource): + node, out = source_resource_pool.get_data(source_resource) + self.connect(node, out, dest, dest_input) + def get_data_size(filepath, mode="xyzt"): """Return the size of a functional image (x * y * z * t). diff --git a/CPAC/pipeline/utils.py b/CPAC/pipeline/utils.py index b5205861f..834d0b4e1 100644 --- a/CPAC/pipeline/utils.py +++ b/CPAC/pipeline/utils.py @@ -28,11 +28,16 @@ from CPAC.utils.monitoring import IFLOGGER if TYPE_CHECKING: + from CPAC.pipeline.engine import ResourcePool from CPAC.pipeline.nodeblock import POOL_RESOURCE_MAPPING MOVEMENT_FILTER_KEYS = motion_estimate_filter.outputs +class CrossedVariantsError(Exception): + """Exception raised when crossed variants are found in the inputs.""" + + def get_shell() -> str: """Return the path to default shell.""" shell: Optional[str] = subprocess.getoutput( @@ -368,3 +373,54 @@ def _update_resource_idx(resource_idx, out_dct, key, value): resource_idx = insert_entity(resource_idx, key, value) out_dct["filename"] = insert_entity(out_dct["filename"], key, value) return resource_idx, out_dct + + +def find_variants( + pool: "ResourcePool", keys: list | str | tuple +) -> dict[str, dict[str, set[str]]]: + """Find variants in the ResourcePool for the given keys.""" + outputs = {} + if isinstance(keys, str): + try: + return { + keys: { + _k: {str(_v)} + for _k, _v in pool.get_json(keys)["CpacVariant"].items() + } + } + except LookupError: + return {} + for key in keys: + outputs = {**outputs, **find_variants(pool, key)} + return outputs + + +def short_circuit_crossed_variants( + pool: "ResourcePool", inputs: list | str | tuple +) -> None: + """Short-circuit the strategy if crossed variants are found. + + .. image:: https://media1.tenor.com/m/S93jWPGv52gAAAAd/dont-cross-the-streams-egon.gif + :width: 48 + :alt: Don't cross the streams + """ + _variants = find_variants(pool, inputs) + # collect all variant dicts + variant_dicts = list(_variants.values()) + if not variant_dicts: + return + + # only keep keys that exist in all variant dicts + common_keys = set.intersection(*(set(v.keys()) for v in variant_dicts)) + + crossed_variants = {} + for key in common_keys: + values = set() + for variant in variant_dicts: + values.update(variant.get(key, [])) + if len(values) > 1: + crossed_variants[key] = values + + if crossed_variants: + msg = f"Crossed variants found: {crossed_variants}" + raise CrossedVariantsError(msg) diff --git a/CPAC/qc/xcp.py b/CPAC/qc/xcp.py index 1a316717c..3ee7db741 100644 --- a/CPAC/qc/xcp.py +++ b/CPAC/qc/xcp.py @@ -77,11 +77,12 @@ from io import BufferedReader import os import re +from typing import Any, Optional from bids.layout import parse_file_entities import numpy as np import pandas as pd -import nibabel as nib +from nibabel import load as nib_load # type: ignore[reportPrivateImportUsage] from nipype.interfaces import afni, fsl from CPAC.generate_motion_statistics.generate_motion_statistics import ( @@ -89,6 +90,7 @@ ImageTo1D, ) from CPAC.pipeline import nipype_pipeline_engine as pe +from CPAC.pipeline.engine import ResourcePool from CPAC.pipeline.nodeblock import nodeblock from CPAC.qc.qcmetrics import regisQ from CPAC.utils.interfaces.function import Function @@ -101,40 +103,27 @@ ] -def _connect_motion(wf, nodes, strat_pool, qc_file, pipe_num): +def _connect_motion( + wf: pe.Workflow, strat_pool: ResourcePool, qc_file: pe.Node, pipe_num: int +) -> pe.Workflow: """ Connect the motion metrics to the workflow. Parameters ---------- - wf : nipype.pipeline.engine.Workflow + wf The workflow to connect the motion metrics to. - nodes : dict - Dictionary of nodes already collected from the strategy pool. - - strat_pool : CPAC.pipeline.engine.ResourcePool + strat_pool The current strategy pool. - qc_file : nipype.pipeline.engine.Node + qc_file A function node with the function ``generate_xcp_qc``. - - pipe_num : int - - Returns - ------- - wf : nipype.pipeline.engine.Workflow """ # pylint: disable=invalid-name, too-many-arguments - try: - nodes = {**nodes, "censor-indices": strat_pool.node_data("censor-indices")} - wf.connect( - nodes["censor-indices"].node, - nodes["censor-indices"].out, - qc_file, - "censor_indices", - ) - except LookupError: + if strat_pool.check_rpool("censor-indices"): + wf.connect_optional(strat_pool, "censor-indices", qc_file, "censor_indices") + else: qc_file.inputs.censor_indices = [] cal_DVARS = pe.Node( ImageTo1D(method="dvars"), @@ -153,36 +142,24 @@ def _connect_motion(wf, nodes, strat_pool, qc_file, pipe_num): name=f"cal_DVARS_strip_{pipe_num}", ) motion_name = "desc-movementParametersUnfiltered_motion" - if motion_name not in nodes: + if not strat_pool.check_rpool(motion_name): motion_name = "desc-movementParameters_motion" + wf.connect_optional(strat_pool, "desc-preproc_bold", cal_DVARS, "in_file") + wf.connect_optional(strat_pool, "space-bold_desc-brain_mask", cal_DVARS, "mask") + wf.connect_optional(strat_pool, motion_name, qc_file, "movement_parameters") + for resource in motion_params: + if not resource.endswith("_motion"): + wf.connect_optional( + strat_pool, resource, qc_file, resource.replace("-", "_") + ) wf.connect( [ - ( - nodes["desc-preproc_bold"].node, - cal_DVARS, - [(nodes["desc-preproc_bold"].out, "in_file")], - ), - ( - nodes["space-bold_desc-brain_mask"].node, - cal_DVARS, - [(nodes["space-bold_desc-brain_mask"].out, "mask")], - ), (cal_DVARS, cal_DVARS_strip, [("out_file", "file_1D")]), ( cal_DVARS_strip, qc_file, [("out_file", "dvars_after_path"), ("out_matrix", "dvars_after")], ), - ( - nodes[motion_name].node, - qc_file, - [(nodes[motion_name].out, "movement_parameters")], - ), - *[ - (nodes[node].node, qc_file, [(nodes[node].out, node.replace("-", "_"))]) - for node in motion_params - if not node.endswith("_motion") and node in nodes - ], ] ) return wf @@ -209,20 +186,20 @@ def generate_xcp_qc( # noqa: PLR0913 task: str, run: str | int, desc: str, - regressors: str, - bold2t1w_mask: str, - t1w_mask: str, - bold2template_mask: str, - template_mask: str, - original_func: str, - final_func: str, - movement_parameters: str, - dvars: str, - censor_indices: list[int], - framewise_displacement_jenkinson: str, - dvars_after: np.ndarray, - dvars_after_path: str, - template: str, + regressors: Optional[str], + bold2t1w_mask: Optional[str], + t1w_mask: Optional[str], + bold2template_mask: Optional[str], + template_mask: Optional[str], + original_func: Optional[str], + final_func: Optional[str], + movement_parameters: Optional[str], + dvars: Optional[str], + censor_indices: Optional[list[int]], + framewise_displacement_jenkinson: Optional[str], + dvars_after: Optional[np.ndarray], + dvars_after_path: Optional[str], + template: Optional[str], ) -> str: """ Generate an RBC-style QC CSV. @@ -292,90 +269,131 @@ def generate_xcp_qc( # noqa: PLR0913 str path to space-template_desc-xcp_quality TSV """ - columns = ( - "sub,ses,task,run,desc,regressors,space,meanFD,relMeansRMSMotion," - "relMaxRMSMotion,meanDVInit,meanDVFinal,nVolCensored,nVolsRemoved," - "motionDVCorrInit,motionDVCorrFinal,coregDice,coregJaccard," - "coregCrossCorr,coregCoverage,normDice,normJaccard,normCrossCorr," - "normCoverage".split(",") - ) - images = { - "original_func": nib.load(original_func), - "final_func": nib.load(final_func), + key: nib_load(image) + for key, image in [("original_func", original_func), ("final_func", final_func)] + if image } - # `sub` through `space` - from_bids = { - "sub": sub, - "ses": ses, - "task": task, - "run": run, - "desc": desc, - "regressors": regressors, - "space": os.path.basename(template).split(".", 1)[0].split("_", 1)[0], - } - if from_bids["space"].startswith("tpl-"): - from_bids["space"] = from_bids["space"][4:] - - # `nVolCensored` & `nVolsRemoved` - n_vols_censored = len(censor_indices) if censor_indices is not None else "unknown" - shape_params = { - "nVolCensored": n_vols_censored, - "nVolsRemoved": images["original_func"].shape[3] - - images["final_func"].shape[3], - } + qc_dict: dict[str, Any] = {} + """Quality control dictionary to be converted to a DataFrame.""" - if isinstance(final_func, BufferedReader): - final_func = final_func.name - qc_filepath = os.path.join(os.getcwd(), "xcpqc.tsv") + columns: list[str] = [] + """Header for the quality control DataFrame.""" - desc_span = re.search(r"_desc-.*_", final_func) - if desc_span: - desc_span = desc_span.span() - final_func = "_".join([final_func[: desc_span[0]], final_func[desc_span[1] :]]) - del desc_span - - # `meanFD (Jenkinson)` - power_params = {"meanFD": np.mean(np.loadtxt(framewise_displacement_jenkinson))} - - # `relMeansRMSMotion` & `relMaxRMSMotion` - mot = np.genfromtxt(movement_parameters).T - # Relative RMS of translation - rms = np.sqrt(mot[3] ** 2 + mot[4] ** 2 + mot[5] ** 2) - rms_params = {"relMeansRMSMotion": [np.mean(rms)], "relMaxRMSMotion": [np.max(rms)]} - - # `meanDVInit` & `meanDVFinal` - meanDV = {"meanDVInit": np.mean(np.loadtxt(dvars))} - try: - meanDV["motionDVCorrInit"] = dvcorr(dvars, framewise_displacement_jenkinson) - except ValueError as value_error: - meanDV["motionDVCorrInit"] = f"ValueError({value_error!s})" - meanDV["meanDVFinal"] = np.mean(dvars_after) - try: - meanDV["motionDVCorrFinal"] = dvcorr( - dvars_after_path, framewise_displacement_jenkinson + # `sub` through `space` + from_bids: dict[str, Any] = { + _k: _v + for _k, _v in { + "sub": sub, + "ses": ses, + "task": task, + "run": run, + "desc": desc, + "regressors": regressors, + "space": os.path.basename(template).split(".", 1)[0].split("_", 1)[0] + if template + else None, + }.items() + if _v is not None + } + columns.extend(["sub", "ses", "task", "run", "desc", "regressors"]) + if from_bids["space"] is not None: + if from_bids["space"].startswith("tpl-"): + from_bids["space"] = from_bids["space"][4:] + columns.append("space") + qc_dict = {**qc_dict, **from_bids} + + if framewise_displacement_jenkinson is not None: + # `meanFD (Jenkinson)` + power_params = {"meanFD": np.mean(np.loadtxt(framewise_displacement_jenkinson))} + qc_dict = {**qc_dict, **power_params} + columns.append("meanFD") + + if movement_parameters is not None: + # `relMeansRMSMotion` & `relMaxRMSMotion` + mot = np.genfromtxt(movement_parameters).T + # Relative RMS of translation + rms = np.sqrt(mot[3] ** 2 + mot[4] ** 2 + mot[5] ** 2) + rms_params = { + "relMeansRMSMotion": [np.mean(rms)], + "relMaxRMSMotion": [np.max(rms)], + } + qc_dict = {**qc_dict, **rms_params} + columns.extend(list(rms_params.keys())) + + if dvars is not None: + # `meanDVInit` & `meanDVFinal` + meanDV = {"meanDVInit": np.mean(np.loadtxt(dvars))} + try: + meanDV["motionDVCorrInit"] = dvcorr(dvars, framewise_displacement_jenkinson) + except ValueError as value_error: + meanDV["motionDVCorrInit"] = f"ValueError({value_error!s})" + meanDV["meanDVFinal"] = np.mean(dvars_after) + try: + meanDV["motionDVCorrFinal"] = dvcorr( + dvars_after_path, framewise_displacement_jenkinson + ) + except ValueError as value_error: + meanDV["motionDVCorrFinal"] = f"ValueError({value_error!s})" + qc_dict = {**qc_dict, **meanDV} + columns.extend(list(meanDV.keys())) + + if censor_indices is not None: + # `nVolCensored` & `nVolsRemoved` + n_vols_censored = ( + len(censor_indices) if censor_indices is not None else "unknown" ) - except ValueError as value_error: - meanDV["motionDVCorrFinal"] = f"ValueError({value_error!s})" - - # Overlap - overlap_params = regisQ( - bold2t1w_mask=bold2t1w_mask, - t1w_mask=t1w_mask, - bold2template_mask=bold2template_mask, - template_mask=template_mask, - ) + shape_params = { + "nVolCensored": n_vols_censored, + "nVolsRemoved": images["original_func"].shape[3] + - images["final_func"].shape[3], + } + qc_dict = {**qc_dict, **shape_params} + if "motionDVCorrFinal" in columns: + columns.insert(-2, "meanDVInit") + columns.insert(-2, "meanDVFinal") + columns.extend(["motionDVCorrInit", "motionDVCorrFinal"]) + else: + columns.extend(list(shape_params.keys())) + + if final_func is not None: + if isinstance(final_func, BufferedReader): + final_func = final_func.name + + desc_span = re.search(r"_desc-.*_", final_func) if final_func else None + if desc_span: + desc_span = desc_span.span() + assert final_func is not None + final_func = "_".join( + [final_func[: desc_span[0]], final_func[desc_span[1] :]] + ) + del desc_span + + if all( + _var is not None + for _var in [bold2t1w_mask, t1w_mask, bold2template_mask, template_mask] + ): + # `coregDice`, `coregJaccard`, `coregCrossCorr`, `coregCoverage` + coreg_params = regisQ( + bold2t1w_mask=bold2t1w_mask, + t1w_mask=t1w_mask, + bold2template_mask=bold2template_mask, + template_mask=template_mask, + ) + # Overlap + overlap_params = regisQ( + bold2t1w_mask=bold2t1w_mask, + t1w_mask=t1w_mask, + bold2template_mask=bold2template_mask, + template_mask=template_mask, + ) + qc_dict = {**qc_dict, **coreg_params, **overlap_params} + columns.extend([*list(coreg_params.keys()), *list(overlap_params.keys())]) - qc_dict = { - **from_bids, - **power_params, - **rms_params, - **shape_params, - **overlap_params, - **meanDV, - } df = pd.DataFrame(qc_dict, columns=columns) + + qc_filepath = os.path.join(os.getcwd(), "xcpqc.tsv") df.to_csv(qc_filepath, sep="\t", index=False) return qc_filepath @@ -543,31 +561,6 @@ def qc_xcp(wf, cfg, strat_pool, pipe_num, opt=None): name=f"binarize_bold_to_T1w_mask_{pipe_num}", op_string="-bin ", ) - nodes = { - key: strat_pool.node_data(key) - for key in [ - "bold", - "desc-preproc_bold", - "max-displacement", - "scan", - "space-bold_desc-brain_mask", - "space-T1w_desc-brain_mask", - "space-T1w_sbref", - "space-template_desc-preproc_bold", - "subject", - *motion_params, - ] - if strat_pool.check_rpool(key) - } - nodes["bold2template_mask"] = strat_pool.node_data( - ["space-template_desc-bold_mask", "space-EPItemplate_desc-bold_mask"] - ) - nodes["template_mask"] = strat_pool.node_data( - ["T1w-brain-template-mask", "EPI-template-mask"] - ) - nodes["template"] = strat_pool.node_data( - ["T1w-brain-template-funcreg", "EPI-brain-template-funcreg"] - ) resample_bold_mask_to_template = pe.Node( afni.Resample(), name=f"resample_bold_mask_to_anat_res_{pipe_num}", @@ -575,44 +568,45 @@ def qc_xcp(wf, cfg, strat_pool, pipe_num, opt=None): mem_x=(0.0115, "in_file", "t"), ) resample_bold_mask_to_template.inputs.outputtype = "NIFTI_GZ" - wf = _connect_motion(wf, nodes, strat_pool, qc_file, pipe_num=pipe_num) + wf: pe.Workflow = _connect_motion(wf, strat_pool, qc_file, pipe_num=pipe_num) + if not hasattr(wf, "connect_optional"): + setattr(wf, "connect_optional", pe.Workflow.connect_optional) + + for key in ["subject", "scan"]: + wf.connect_optional(strat_pool, key, bids_info, key) + wf.connect_optional(strat_pool, "space-T1w_sbref", bold_to_T1w_mask, "in_file") + wf.connect_optional(strat_pool, "space-T1w_desc-brain_mask", qc_file, "t1w_mask") + wf.connect_optional( + strat_pool, + ["T1w-brain-template-mask", "EPI-template-mask"], + qc_file, + "template_mask", + ) + wf.connect_optional( + strat_pool, + ["T1w-brain-template-mask", "EPI-template-mask"], + resample_bold_mask_to_template, + "master", + ) + wf.connect_optional(strat_pool, "bold", qc_file, "original_func") + wf.connect_optional( + strat_pool, "space-template_desc-preproc_bold", qc_file, "final_func" + ) + wf.connect_optional( + strat_pool, + ["T1w-brain-template-funcreg", "EPI-brain-template-funcreg"], + qc_file, + "template", + ) + wf.connect_optional( + strat_pool, + ["space-template_desc-bold_mask", "space-EPItemplate_desc-bold_mask"], + resample_bold_mask_to_template, + "in_file", + ) wf.connect( [ - (nodes["subject"].node, bids_info, [(nodes["subject"].out, "subject")]), - (nodes["scan"].node, bids_info, [(nodes["scan"].out, "scan")]), - ( - nodes["space-T1w_sbref"].node, - bold_to_T1w_mask, - [(nodes["space-T1w_sbref"].out, "in_file")], - ), - ( - nodes["space-T1w_desc-brain_mask"].node, - qc_file, - [(nodes["space-T1w_desc-brain_mask"].out, "t1w_mask")], - ), (bold_to_T1w_mask, qc_file, [("out_file", "bold2t1w_mask")]), - ( - nodes["template_mask"].node, - qc_file, - [(nodes["template_mask"].out, "template_mask")], - ), - (nodes["bold"].node, qc_file, [(nodes["bold"].out, "original_func")]), - ( - nodes["space-template_desc-preproc_bold"].node, - qc_file, - [(nodes["space-template_desc-preproc_bold"].out, "final_func")], - ), - (nodes["template"].node, qc_file, [(nodes["template"].out, "template")]), - ( - nodes["template_mask"].node, - resample_bold_mask_to_template, - [(nodes["template_mask"].out, "master")], - ), - ( - nodes["bold2template_mask"].node, - resample_bold_mask_to_template, - [(nodes["bold2template_mask"].out, "in_file")], - ), ( resample_bold_mask_to_template, qc_file, diff --git a/CPAC/registration/registration.py b/CPAC/registration/registration.py index 6bf649b44..7b4f50be7 100644 --- a/CPAC/registration/registration.py +++ b/CPAC/registration/registration.py @@ -3366,7 +3366,7 @@ def coregistration_prep_fmriprep(wf, cfg, strat_pool, pipe_num, opt=None): ( "desc-preproc_T1w", "space-T1w_desc-brain_mask", - ["desc-restore-brain_T1w", "desc-preproc_T1w"], + "desc-restore-brain_T1w", ["desc-restore_T1w", "desc-head_T1w"], "desc-preproc_T2w", "desc-preproc_T2w", @@ -4017,7 +4017,6 @@ def warp_wholeheadT1_to_template(wf, cfg, strat_pool, pipe_num, opt=None): ) def warp_T1mask_to_template(wf, cfg, strat_pool, pipe_num, opt=None): """Warp T1 mask to template.""" - if ( cfg.registration_workflows["anatomical_registration"]["overwrite_transform"] and cfg.registration_workflows["anatomical_registration"][