diff --git a/fmriprep/data/fmap_spec.json b/fmriprep/data/fmap_spec.json
new file mode 100644
index 000000000..3091f33b3
--- /dev/null
+++ b/fmriprep/data/fmap_spec.json
@@ -0,0 +1,33 @@
+{
+ "queries": {
+ "fieldmaps": {
+ "fieldmap": {
+ "datatype": "fmap",
+ "desc": "preproc",
+ "suffix": "fieldmap",
+ "extension": [
+ ".nii.gz",
+ ".nii"
+ ]
+ },
+ "coeffs": {
+ "datatype": "fmap",
+ "desc": ["coeff", "coeff0", "coeff1"],
+ "suffix": "fieldmap",
+ "extension": [
+ ".nii.gz",
+ ".nii"
+ ]
+ },
+ "magnitude": {
+ "datatype": "fmap",
+ "desc": "magnitude",
+ "suffix": "fieldmap",
+ "extension": [
+ ".nii.gz",
+ ".nii"
+ ]
+ }
+ }
+ }
+}
diff --git a/fmriprep/data/io_spec.json b/fmriprep/data/io_spec.json
index 364cff7e3..d4c54c666 100644
--- a/fmriprep/data/io_spec.json
+++ b/fmriprep/data/io_spec.json
@@ -34,7 +34,7 @@
"boldref2anat": {
"datatype": "func",
"from": "boldref",
- "to": "anat",
+ "to": ["anat", "T1w", "T2w"],
"mode": "image",
"suffix": "xfm",
"extension": ".txt"
diff --git a/fmriprep/interfaces/reports.py b/fmriprep/interfaces/reports.py
index 6d199ccee..49ac59054 100644
--- a/fmriprep/interfaces/reports.py
+++ b/fmriprep/interfaces/reports.py
@@ -204,7 +204,11 @@ class FunctionalSummaryInputSpec(TraitedSpec):
desc='Phase-encoding direction detected',
)
registration = traits.Enum(
- 'FSL', 'FreeSurfer', mandatory=True, desc='Functional/anatomical registration method'
+ 'FSL',
+ 'FreeSurfer',
+ 'Precomputed',
+ mandatory=True,
+ desc='Functional/anatomical registration method',
)
fallback = traits.Bool(desc='Boundary-based registration rejected')
registration_dof = traits.Enum(
@@ -239,18 +243,21 @@ def _generate_segment(self):
else:
stc = 'n/a'
# TODO: Add a note about registration_init below?
- reg = {
- 'FSL': [
- 'FSL flirt with boundary-based registration'
- f' (BBR) metric - {dof} dof',
- 'FSL flirt rigid registration - 6 dof',
- ],
- 'FreeSurfer': [
- 'FreeSurfer bbregister '
- f'(boundary-based registration, BBR) - {dof} dof',
- f'FreeSurfer mri_coreg - {dof} dof',
- ],
- }[self.inputs.registration][self.inputs.fallback]
+ if self.inputs.registration == 'Precomputed':
+ reg = 'Precomputed affine transformation'
+ else:
+ reg = {
+ 'FSL': [
+ 'FSL flirt with boundary-based registration'
+ f' (BBR) metric - {dof} dof',
+ 'FSL flirt rigid registration - 6 dof',
+ ],
+ 'FreeSurfer': [
+ 'FreeSurfer bbregister '
+ f'(boundary-based registration, BBR) - {dof} dof',
+ f'FreeSurfer mri_coreg - {dof} dof',
+ ],
+ }[self.inputs.registration][self.inputs.fallback]
pedir = get_world_pedir(self.inputs.orientation, self.inputs.pe_direction)
diff --git a/fmriprep/interfaces/resampling.py b/fmriprep/interfaces/resampling.py
index de5c7fcf7..603b901fc 100644
--- a/fmriprep/interfaces/resampling.py
+++ b/fmriprep/interfaces/resampling.py
@@ -671,6 +671,15 @@ def reconstruct_fieldmap(
target.__class__(target.dataobj, projected_affine, target.header),
)
else:
+ # Hack. Sometimes the reference array is rotated relative to the fieldmap
+ # and coefficient grids. As far as I know, coefficients are always RAS,
+ # but good to check before doing this.
+ if (
+ nb.aff2axcodes(coefficients[-1].affine)
+ == ('R', 'A', 'S')
+ != nb.aff2axcodes(fmap_reference.affine)
+ ):
+ fmap_reference = nb.as_closest_canonical(fmap_reference)
if not aligned(fmap_reference.affine, coefficients[-1].affine):
raise ValueError('Reference passed is not aligned with spline grids')
reference, _ = ensure_positive_cosines(fmap_reference)
diff --git a/fmriprep/utils/bids.py b/fmriprep/utils/bids.py
index db18d8c01..3f26c71ce 100644
--- a/fmriprep/utils/bids.py
+++ b/fmriprep/utils/bids.py
@@ -28,6 +28,7 @@
import os
import sys
from collections import defaultdict
+from functools import cache
from pathlib import Path
from bids.layout import BIDSLayout
@@ -38,6 +39,15 @@
from ..data import load as load_data
+@cache
+def _get_layout(derivatives_dir: Path) -> BIDSLayout:
+ import niworkflows.data
+
+ return BIDSLayout(
+ derivatives_dir, config=[niworkflows.data.load('nipreps.json')], validate=False
+ )
+
+
def collect_derivatives(
derivatives_dir: Path,
entities: dict,
@@ -57,8 +67,7 @@ def collect_derivatives(
patterns = _patterns
derivs_cache = defaultdict(list, {})
- layout = BIDSLayout(derivatives_dir, config=['bids', 'derivatives'], validate=False)
- derivatives_dir = Path(derivatives_dir)
+ layout = _get_layout(derivatives_dir)
# search for both boldrefs
for k, q in spec['baseline'].items():
@@ -86,6 +95,31 @@ def collect_derivatives(
return derivs_cache
+def collect_fieldmaps(
+ derivatives_dir: Path,
+ entities: dict,
+ spec: dict | None = None,
+):
+ """Gather existing derivatives and compose a cache."""
+ if spec is None:
+ spec = json.loads(load_data.readable('fmap_spec.json').read_text())['queries']
+
+ fmap_cache = defaultdict(dict, {})
+ layout = _get_layout(derivatives_dir)
+
+ fmapids = layout.get_fmapids(**entities)
+
+ for fmapid in fmapids:
+ for k, q in spec['fieldmaps'].items():
+ query = {**entities, **q}
+ item = layout.get(return_type='filename', fmapid=fmapid, **query)
+ if not item:
+ continue
+ fmap_cache[fmapid][k] = item[0] if len(item) == 1 else item
+
+ return fmap_cache
+
+
def write_bidsignore(deriv_dir):
bids_ignore = (
'*.html',
diff --git a/fmriprep/workflows/base.py b/fmriprep/workflows/base.py
index c8a2fb8a7..5cf07de64 100644
--- a/fmriprep/workflows/base.py
+++ b/fmriprep/workflows/base.py
@@ -30,6 +30,7 @@
"""
import os
+import re
import sys
import warnings
from copy import deepcopy
@@ -337,6 +338,9 @@ def init_single_subject_wf(subject_id: str):
# allow to run with anat-fast-track on fMRI-only dataset
if 't1w_preproc' in anatomical_cache and not subject_data['t1w']:
+ config.loggers.workflow.debug(
+ 'No T1w image found; using precomputed T1w image: %s', anatomical_cache['t1w_preproc']
+ )
workflow.connect([
(bidssrc, bids_info, [(('bold', fix_multi_T1w_source_name), 'in_file')]),
(anat_fit_wf, summary, [('outputnode.t1w_preproc', 't1w')]),
@@ -533,7 +537,21 @@ def init_single_subject_wf(subject_id: str):
if config.workflow.anat_only:
return clean_datasinks(workflow)
- fmap_estimators, estimator_map = map_fieldmap_estimation(
+ fmap_cache = {}
+ if config.execution.derivatives:
+ from fmriprep.utils.bids import collect_fieldmaps
+
+ for deriv_dir in config.execution.derivatives.values():
+ fmaps = collect_fieldmaps(
+ derivatives_dir=deriv_dir,
+ entities={'subject': subject_id},
+ )
+ config.loggers.workflow.debug(
+ 'Detected precomputed fieldmaps in %s for fieldmap IDs: %s', deriv_dir, list(fmaps)
+ )
+ fmap_cache.update(fmaps)
+
+ all_estimators, estimator_map = map_fieldmap_estimation(
layout=config.execution.layout,
subject_id=subject_id,
bold_data=bold_runs,
@@ -543,6 +561,48 @@ def init_single_subject_wf(subject_id: str):
filters=config.execution.get().get('bids_filters', {}).get('fmap'),
)
+ fmap_buffers = {
+ field: pe.Node(niu.Merge(2), name=f'{field}_merge', run_without_submitting=True)
+ for field in ['fmap', 'fmap_ref', 'fmap_coeff', 'fmap_mask', 'fmap_id', 'sdc_method']
+ }
+
+ fmap_estimators = []
+ if all_estimators:
+ # Find precomputed fieldmaps that apply to this workflow
+ pared_cache = {}
+ for est in all_estimators:
+ if found := fmap_cache.get(re.sub(r'[^a-zA-Z0-9]', '', est.bids_id)):
+ pared_cache[est.bids_id] = found
+ else:
+ fmap_estimators.append(est)
+
+ if pared_cache:
+ config.loggers.workflow.info(
+ 'Precomputed B0 field inhomogeneity maps found for the following '
+ f'{len(pared_cache)} estimator(s): {list(pared_cache)}.'
+ )
+
+ fieldmaps = [fmap['fieldmap'] for fmap in pared_cache.values()]
+ refs = [fmap['magnitude'] for fmap in pared_cache.values()]
+ coeffs = [fmap['coeffs'] for fmap in pared_cache.values()]
+ config.loggers.workflow.debug('Reusing fieldmaps: %s', fieldmaps)
+ config.loggers.workflow.debug('Reusing references: %s', refs)
+ config.loggers.workflow.debug('Reusing coefficients: %s', coeffs)
+
+ fmap_buffers['fmap'].inputs.in1 = fieldmaps
+ fmap_buffers['fmap_ref'].inputs.in1 = refs
+ fmap_buffers['fmap_coeff'].inputs.in1 = coeffs
+
+ # Note that masks are not emitted. The BOLD-fmap transforms cannot be
+ # computed with precomputed fieldmaps until we either start emitting masks
+ # or start skull-stripping references on the fly.
+ fmap_buffers['fmap_mask'].inputs.in1 = [
+ pared_cache[fmapid].get('mask', 'MISSING') for fmapid in pared_cache
+ ]
+ fmap_buffers['fmap_id'].inputs.in1 = list(pared_cache)
+ fmap_buffers['sdc_method'].inputs.in1 = ['precomputed'] * len(pared_cache)
+
+ # Estimators without precomputed fieldmaps
if fmap_estimators:
config.loggers.workflow.info(
'B0 field inhomogeneity map will be estimated with the following '
@@ -573,24 +633,31 @@ def init_single_subject_wf(subject_id: str):
if node.split('.')[-1].startswith('ds_'):
fmap_wf.get_node(node).interface.out_path_base = ''
+ workflow.connect([
+ (fmap_wf, fmap_buffers[field], [
+ # We get "sdc_method" as "method" from estimator workflows
+ # All else stays the same, and no other sdc_ prefixes are used
+ (f'outputnode.{field.removeprefix("sdc_")}', 'in2'),
+ ])
+ for field in fmap_buffers
+ ]) # fmt:skip
+
fmap_select_std = pe.Node(
KeySelect(fields=['std2anat_xfm'], key='MNI152NLin2009cAsym'),
name='fmap_select_std',
run_without_submitting=True,
)
if any(estimator.method == fm.EstimatorType.ANAT for estimator in fmap_estimators):
- # fmt:off
workflow.connect([
(anat_fit_wf, fmap_select_std, [
('outputnode.std2anat_xfm', 'std2anat_xfm'),
('outputnode.template', 'keys')]),
- ])
- # fmt:on
+ ]) # fmt:skip
for estimator in fmap_estimators:
config.loggers.workflow.info(
f"""\
-Setting-up fieldmap "{estimator.bids_id}" ({estimator.method}) with \
+Setting up fieldmap "{estimator.bids_id}" ({estimator.method}) with \
<{', '.join(s.path.name for s in estimator.sources)}>"""
)
@@ -633,7 +700,6 @@ def init_single_subject_wf(subject_id: str):
syn_preprocessing_wf.inputs.inputnode.in_epis = sources
syn_preprocessing_wf.inputs.inputnode.in_meta = source_meta
- # fmt:off
workflow.connect([
(anat_fit_wf, syn_preprocessing_wf, [
('outputnode.t1w_preproc', 'inputnode.in_anat'),
@@ -649,8 +715,7 @@ def init_single_subject_wf(subject_id: str):
('outputnode.anat_mask', f'in_{estimator.bids_id}.anat_mask'),
('outputnode.sd_prior', f'in_{estimator.bids_id}.sd_prior'),
]),
- ])
- # fmt:on
+ ]) # fmt:skip
# Append the functional section to the existing anatomical excerpt
# That way we do not need to stream down the number of bold datasets
@@ -718,17 +783,11 @@ def init_single_subject_wf(subject_id: str):
),
]),
]) # fmt:skip
- if fieldmap_id:
- workflow.connect([
- (fmap_wf, bold_wf, [
- ('outputnode.fmap', 'inputnode.fmap'),
- ('outputnode.fmap_ref', 'inputnode.fmap_ref'),
- ('outputnode.fmap_coeff', 'inputnode.fmap_coeff'),
- ('outputnode.fmap_mask', 'inputnode.fmap_mask'),
- ('outputnode.fmap_id', 'inputnode.fmap_id'),
- ('outputnode.method', 'inputnode.sdc_method'),
- ]),
- ]) # fmt:skip
+
+ workflow.connect([
+ (buffer, bold_wf, [('out', f'inputnode.{field}')])
+ for field, buffer in fmap_buffers.items()
+ ]) # fmt:skip
if config.workflow.level == 'full':
if template_iterator_wf is not None:
diff --git a/fmriprep/workflows/bold/fit.py b/fmriprep/workflows/bold/fit.py
index a585d5646..0445e4de3 100644
--- a/fmriprep/workflows/bold/fit.py
+++ b/fmriprep/workflows/bold/fit.py
@@ -236,8 +236,8 @@ def init_bold_fit_wf(
# Boolean used to update workflow self-descriptions
multiecho = len(bold_series) > 1
- have_hmcref = 'hmc_boldref' in precomputed
- have_coregref = 'coreg_boldref' in precomputed
+ hmc_boldref = precomputed.get('hmc_boldref')
+ coreg_boldref = precomputed.get('coreg_boldref')
# Can contain
# 1) boldref2fmap
# 2) boldref2anat
@@ -304,10 +304,30 @@ def init_bold_fit_wf(
niu.IdentityInterface(fields=['boldref', 'boldmask']), name='regref_buffer'
)
+ if hmc_boldref:
+ hmcref_buffer.inputs.boldref = hmc_boldref
+ config.loggers.workflow.debug('Reusing motion correction reference: %s', hmc_boldref)
+ if hmc_xforms:
+ hmc_buffer.inputs.hmc_xforms = hmc_xforms
+ config.loggers.workflow.debug('Reusing motion correction transforms: %s', hmc_xforms)
+ if boldref2fmap_xform:
+ fmapreg_buffer.inputs.boldref2fmap_xfm = boldref2fmap_xform
+ config.loggers.workflow.debug('Reusing BOLD-to-fieldmap transform: %s', boldref2fmap_xform)
+ if coreg_boldref:
+ regref_buffer.inputs.boldref = coreg_boldref
+ config.loggers.workflow.debug('Reusing coregistration reference: %s', coreg_boldref)
+ fmapref_buffer.inputs.sbref_files = sbref_files
+
summary = pe.Node(
FunctionalSummary(
distortion_correction='None', # Can override with connection
- registration=('FSL', 'FreeSurfer')[config.workflow.run_reconall],
+ registration=(
+ 'Precomputed'
+ if boldref2anat_xform
+ else 'FreeSurfer'
+ if config.workflow.run_reconall
+ else 'FSL'
+ ),
registration_dof=config.workflow.bold2anat_dof,
registration_init=config.workflow.bold2anat_init,
pe_direction=metadata.get('PhaseEncodingDirection'),
@@ -331,12 +351,11 @@ def init_bold_fit_wf(
func_fit_reports_wf = init_func_fit_reports_wf(
# TODO: Enable sdc report even if we find coregref
- sdc_correction=not (have_coregref or fieldmap_id is None),
+ sdc_correction=not (coreg_boldref or fieldmap_id is None),
freesurfer=config.workflow.run_reconall,
output_dir=config.execution.fmriprep_dir,
)
- # fmt:off
workflow.connect([
(hmcref_buffer, outputnode, [
('boldref', 'hmc_boldref'),
@@ -365,15 +384,14 @@ def init_bold_fit_wf(
('boldref2anat_xfm', 'inputnode.boldref2anat_xfm'),
]),
(summary, func_fit_reports_wf, [('out_report', 'inputnode.summary_report')]),
- ])
- # fmt:on
+ ]) # fmt:skip
# Stage 1: Generate motion correction boldref
hmc_boldref_source_buffer = pe.Node(
niu.IdentityInterface(fields=['in_file']),
name='hmc_boldref_source_buffer',
)
- if not have_hmcref:
+ if not hmc_boldref:
config.loggers.workflow.info('Stage 1: Adding HMC boldref workflow')
hmc_boldref_wf = init_raw_boldref_wf(
name='hmc_boldref_wf',
@@ -407,7 +425,6 @@ def init_bold_fit_wf(
]) # fmt:skip
else:
config.loggers.workflow.info('Found HMC boldref - skipping Stage 1')
- hmcref_buffer.inputs.boldref = precomputed['hmc_boldref']
validation_and_dummies_wf = init_validation_and_dummies_wf(bold_file=bold_file)
@@ -445,15 +462,13 @@ def init_bold_fit_wf(
]) # fmt:skip
else:
config.loggers.workflow.info('Found motion correction transforms - skipping Stage 2')
- hmc_buffer.inputs.hmc_xforms = hmc_xforms
# Stage 3: Create coregistration reference
# Fieldmap correction only happens during fit if this stage is needed
- if not have_coregref:
+ if not coreg_boldref:
config.loggers.workflow.info('Stage 3: Adding coregistration boldref workflow')
# Select initial boldref, enhance contrast, and generate mask
- fmapref_buffer.inputs.sbref_files = sbref_files
if sbref_files and nb.load(sbref_files[0]).ndim > 3:
raw_sbref_wf = init_raw_boldref_wf(
name='raw_sbref_wf',
@@ -517,7 +532,6 @@ def init_bold_fit_wf(
)
ds_fmapreg_wf.inputs.inputnode.source_files = [bold_file]
- # fmt:off
workflow.connect([
(enhance_boldref_wf, fmapreg_wf, [
('outputnode.bias_corrected_file', 'inputnode.target_ref'),
@@ -530,10 +544,7 @@ def init_bold_fit_wf(
(fmapreg_wf, itk_mat2txt, [('outputnode.target2fmap_xfm', 'in_xfms')]),
(itk_mat2txt, ds_fmapreg_wf, [('out_xfm', 'inputnode.xform')]),
(ds_fmapreg_wf, fmapreg_buffer, [('outputnode.xform', 'boldref2fmap_xfm')]),
- ])
- # fmt:on
- else:
- fmapreg_buffer.inputs.boldref2fmap_xfm = boldref2fmap_xform
+ ]) # fmt:skip
unwarp_wf = init_unwarp_wf(
free_mem=config.environment.free_mem,
@@ -592,12 +603,11 @@ def init_bold_fit_wf(
]) # fmt:skip
else:
config.loggers.workflow.info('Found coregistration reference - skipping Stage 3')
- regref_buffer.inputs.boldref = precomputed['coreg_boldref']
# TODO: Allow precomputed bold masks to be passed
# Also needs consideration for how it interacts above
skullstrip_precomp_ref_wf = init_skullstrip_bold_wf(name='skullstrip_precomp_ref_wf')
- skullstrip_precomp_ref_wf.inputs.inputnode.in_file = precomputed['coreg_boldref']
+ skullstrip_precomp_ref_wf.inputs.inputnode.in_file = coreg_boldref
workflow.connect([
(skullstrip_precomp_ref_wf, regref_buffer, [('outputnode.mask_file', 'boldmask')])
]) # fmt:skip