Skip to content

Commit b95b66f

Browse files
authored
Merge pull request #51 from ME-ICA/switch-data
Switch data to preprocessed PAFIN dataset
2 parents 4d0a94d + 98ea340 commit b95b66f

25 files changed

+344
-456
lines changed

.github/workflows/build.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,10 @@ jobs:
1414
- uses: actions/checkout@v5
1515

1616
# Install dependencies
17-
- name: Set up Python 3.10
17+
- name: Set up Python 3.12
1818
uses: actions/setup-python@v6
1919
with:
20-
python-version: "3.10"
20+
python-version: "3.12"
2121

2222
- name: Cache
2323
uses: actions/cache@v4

_config.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,8 @@ html:
2828
use_issues_button : true # Whether to add an "open an issue" button
2929
extra_navbar : Powered by <a href="https://jupyterbook.org">Jupyter Book</a> # Will be displayed underneath the left navbar.
3030
extra_footer : "" # Will be displayed underneath the footer.
31-
google_analytics_id : "" # A GA id that can be used to track book views.
31+
analytics:
32+
google_analytics_id : "" # A GA id that can be used to track book views.
3233
home_page_in_navbar : true # Whether to include your home page in the left Navigation Bar
3334
baseurl : "" # The base URL where your book will be hosted. Used for creating image previews and social links. e.g.: https://mypage.com/mybook/
3435
comments:

binder/data_requirement.json

Lines changed: 0 additions & 6 deletions
This file was deleted.

content/00_Download_Data.md

Lines changed: 34 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@ jupytext:
33
text_representation:
44
extension: .md
55
format_name: myst
6-
format_version: 0.13
7-
jupytext_version: 1.10.3
6+
jupytext_version: 1.18.1
87
kernelspec:
98
display_name: Python 3
109
language: python
@@ -19,39 +18,49 @@ For more information about these datasets, see {ref}`content:open-datasets`.
1918

2019
```python
2120
import os
22-
from pprint import pprint
2321

24-
from tedana import datasets
22+
from datalad import api as dapi
2523

26-
DATA_DIR = os.path.abspath("../data")
24+
DATA_DIR = os.path.abspath('../data')
2725

28-
euskalibur_dataset = datasets.fetch_euskalibur(
29-
n_subjects=5,
30-
low_resolution=False,
31-
data_dir=DATA_DIR,
26+
# Download PAFIN fMRIPrep data
27+
dset_dir = os.path.join(DATA_DIR, 'ds006185')
28+
os.makedirs(dset_dir, exist_ok=True)
29+
dapi.install(
30+
path=dset_dir,
31+
source='https://github.com/OpenNeuroDatasets/ds006185.git',
3232
)
33-
pprint(euskalibur_dataset)
34-
35-
cambridge_dataset = datasets.fetch_cambridge(
36-
n_subjects=5,
37-
low_resolution=False,
38-
data_dir=DATA_DIR,
39-
)
40-
pprint(cambridge_dataset)
33+
dapi.get(os.path.join(dset_dir, 'sub-24053', 'ses-1', 'func', 'sub-24053_ses-1_task-rat_rec-nordic_*'), recursive=True)
34+
dapi.get(os.path.join(dset_dir, 'sub-24053', 'ses-1', 'anat', 'sub-24053_ses-1_*'), recursive=True)
4135
```
4236

43-
For now, we will use repo2data to download some data we're storing on Google Drive.
37+
For now, we will use the Datalab API to download some data we're storing on OpenNeuro.
4438

4539
```{code-cell} ipython3
40+
:tags: [hide-output]
41+
4642
import os
43+
from pathlib import Path
4744
48-
from repo2data.repo2data import Repo2Data
45+
from datalad import api as dapi
4946
50-
# Install the data if running locally, or point to cached data if running on neurolibre
51-
DATA_REQ_FILE = os.path.join("../binder/data_requirement.json")
47+
DATA_DIR = os.path.abspath('../data')
48+
49+
# Download PAFIN fMRIPrep data
50+
dset_dir = os.path.join(DATA_DIR, 'ds006185')
51+
os.makedirs(dset_dir, exist_ok=True)
52+
dapi.install(
53+
path=dset_dir,
54+
source='https://github.com/OpenNeuroDatasets/ds006185.git',
55+
)
56+
subj_dir = os.path.join(dset_dir, 'sub-24053', 'ses-1')
57+
func_dir = Path(os.path.join(subj_dir, 'func'))
58+
func_files = list(func_dir.glob('sub-24053_ses-1_task-rat_rec-nordic_*'))
59+
for f in func_files:
60+
dapi.get(f)
5261
53-
# Download data
54-
repo2data = Repo2Data(DATA_REQ_FILE)
55-
data_path = repo2data.install()
56-
data_path = os.path.abspath(data_path[0])
62+
anat_dir = Path(os.path.join(subj_dir, 'anat'))
63+
anat_files = list(anat_dir.glob('*'))
64+
for f in anat_files:
65+
dapi.get(f)
5766
```

content/01_Optimal_Combination_with_t2smap.md

Lines changed: 30 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@ jupytext:
33
text_representation:
44
extension: .md
55
format_name: myst
6-
format_version: 0.13
7-
jupytext_version: 1.10.3
6+
jupytext_version: 1.18.1
87
kernelspec:
98
display_name: Python 3
109
language: python
@@ -16,51 +15,43 @@ kernelspec:
1615
Use `t2smap` {cite:p}`DuPre2021` to combine data.
1716

1817
```{code-cell} ipython3
18+
import json
1919
import os
2020
from glob import glob
2121
2222
import matplotlib.pyplot as plt
23+
import nibabel as nb
2324
import numpy as np
2425
from myst_nb import glue
2526
from nilearn import image, plotting
26-
from repo2data.repo2data import Repo2Data
2727
from tedana import workflows
2828
29-
# Install the data if running locally, or point to cached data if running on neurolibre
30-
DATA_REQ_FILE = os.path.join("../binder/data_requirement.json")
31-
32-
# Download data
33-
repo2data = Repo2Data(DATA_REQ_FILE)
34-
data_path = repo2data.install()
35-
data_path = os.path.abspath(data_path[0])
29+
data_path = os.path.abspath('../DATA')
3630
```
3731

3832
```{code-cell} ipython3
39-
func_dir = os.path.join(data_path, "func/")
40-
data_files = [
41-
os.path.join(
42-
func_dir,
43-
"sub-04570_task-rest_echo-1_space-scanner_desc-partialPreproc_bold.nii.gz",
44-
),
45-
os.path.join(
46-
func_dir,
47-
"sub-04570_task-rest_echo-2_space-scanner_desc-partialPreproc_bold.nii.gz",
48-
),
49-
os.path.join(
50-
func_dir,
51-
"sub-04570_task-rest_echo-3_space-scanner_desc-partialPreproc_bold.nii.gz",
52-
),
53-
os.path.join(
54-
func_dir,
55-
"sub-04570_task-rest_echo-4_space-scanner_desc-partialPreproc_bold.nii.gz",
33+
func_dir = os.path.join(data_path, "ds006185/sub-24053/ses-1/func/")
34+
data_files = sorted(
35+
glob(
36+
os.path.join(
37+
func_dir,
38+
"sub-24053_ses-1_task-rat_rec-nordic_dir-PA_run-01_echo-*_part-mag_desc-preproc_bold.nii.gz",
39+
),
5640
),
57-
]
58-
echo_times = [12.0, 28.0, 44.0, 60.0]
41+
)
42+
echo_times = []
43+
for f in data_files:
44+
json_file = f.replace('.nii.gz', '.json')
45+
with open(json_file, 'r') as fo:
46+
metadata = json.load(fo)
47+
echo_times.append(metadata['EchoTime'] * 1000)
5948
mask_file = os.path.join(
60-
func_dir, "sub-04570_task-rest_space-scanner_desc-brain_mask.nii.gz"
49+
func_dir,
50+
"sub-24053_ses-1_task-rat_rec-nordic_dir-PA_run-01_part-mag_desc-brain_mask.nii.gz"
6151
)
6252
confounds_file = os.path.join(
63-
func_dir, "sub-04570_task-rest_desc-confounds_timeseries.tsv"
53+
func_dir,
54+
"sub-24053_ses-1_task-rat_rec-nordic_dir-PA_run-01_part-mag_desc-confounds_timeseries.tsv",
6455
)
6556
6657
out_dir = os.path.join(data_path, "t2smap")
@@ -72,8 +63,9 @@ workflows.t2smap_workflow(
7263
echo_times,
7364
out_dir=out_dir,
7465
mask=mask_file,
75-
prefix="sub-04570_task-rest_space-scanner",
76-
fittype="curvefit",
66+
prefix="sub-24053_ses-1_task-rat_rec-nordic_dir-PA_run-01",
67+
fittype="loglin",
68+
overwrite=True,
7769
)
7870
```
7971

@@ -86,7 +78,7 @@ print("\n".join(out_files))
8678
```{code-cell} ipython3
8779
fig, ax = plt.subplots(figsize=(16, 8))
8880
plotting.plot_stat_map(
89-
os.path.join(out_dir, "sub-04570_task-rest_space-scanner_T2starmap.nii.gz"),
81+
os.path.join(out_dir, "sub-24053_ses-1_task-rat_rec-nordic_dir-PA_run-01_T2starmap.nii.gz"),
9082
vmax=0.6,
9183
draw_cross=False,
9284
bg_img=None,
@@ -106,7 +98,7 @@ T2* map estimated from multi-echo data using tedana's {py:func}`~tedana.workflow
10698
```{code-cell} ipython3
10799
fig, ax = plt.subplots(figsize=(16, 8))
108100
plotting.plot_stat_map(
109-
os.path.join(out_dir, "sub-04570_task-rest_space-scanner_S0map.nii.gz"),
101+
os.path.join(out_dir, "sub-24053_ses-1_task-rat_rec-nordic_dir-PA_run-01_S0map.nii.gz"),
110102
vmax=8000,
111103
draw_cross=False,
112104
bg_img=None,
@@ -164,7 +156,7 @@ plotting.plot_epi(
164156
plotting.plot_epi(
165157
image.mean_img(
166158
os.path.join(
167-
out_dir, "sub-04570_task-rest_space-scanner_desc-optcom_bold.nii.gz"
159+
out_dir, "sub-24053_ses-1_task-rat_rec-nordic_dir-PA_run-01_desc-optcom_bold.nii.gz"
168160
)
169161
),
170162
draw_cross=False,
@@ -193,7 +185,7 @@ te30_tsnr = image.math_img(
193185
oc_tsnr = image.math_img(
194186
"(np.nanmean(img, axis=3) / np.nanstd(img, axis=3)) * mask",
195187
img=os.path.join(
196-
out_dir, "sub-04570_task-rest_space-scanner_desc-optcom_bold.nii.gz"
188+
out_dir, "sub-24053_ses-1_task-rat_rec-nordic_dir-PA_run-01_desc-optcom_bold.nii.gz"
197189
),
198190
mask=mask_file,
199191
)
@@ -254,7 +246,7 @@ Carpet plot of the second echo's data.
254246
```{code-cell} ipython3
255247
fig, ax = plt.subplots(figsize=(16, 8))
256248
plotting.plot_carpet(
257-
os.path.join(out_dir, "sub-04570_task-rest_space-scanner_desc-optcom_bold.nii.gz"),
249+
os.path.join(out_dir, "sub-24053_ses-1_task-rat_rec-nordic_dir-PA_run-01_desc-optcom_bold.nii.gz"),
258250
axes=ax,
259251
)
260252
glue("figure_optcom_carpet", fig, display=False)

0 commit comments

Comments
 (0)