|
20 | 20 |
|
21 | 21 | 5. Repeat the process for the ``fsaverage`` template coordinate frame. |
22 | 22 |
|
| 23 | +6. Repeat the process for one of the other standard template coordinate frames |
| 24 | + allowed by BIDS. |
| 25 | +
|
23 | 26 | The iEEG data will be written by :func:`write_raw_bids` with |
24 | 27 | the addition of extra metadata elements in the following files: |
25 | 28 |
|
|
50 | 53 | # %% |
51 | 54 |
|
52 | 55 | import os.path as op |
| 56 | +import numpy as np |
53 | 57 | import shutil |
54 | 58 |
|
55 | 59 | from nilearn.plotting import plot_anat |
|
331 | 335 | # ``montage.add_estimated_fiducials(template, os.environ['FREESURFER_HOME'])`` |
332 | 336 | # where ``template`` maybe be ``cvs_avg35_inMNI152`` for instance |
333 | 337 | raw.set_montage(montage) |
| 338 | + |
| 339 | +# %% |
| 340 | +# Step 6: Store coordinates in another template space accepted by BIDS |
| 341 | +# -------------------------------------------------------------------- |
| 342 | +# As of this writing, BIDS accepts channel coordinates in reference to the |
| 343 | +# the following template spaces: ``ICBM452AirSpace``, ``ICBM452Warp5Space``, |
| 344 | +# ``IXI549Space``, ``fsaverage``, ``fsaverageSym``, ``fsLR``, ``MNIColin27``, |
| 345 | +# ``MNI152Lin``, ``MNI152NLin2009[a-c][Sym|Asym]``, ``MNI152NLin6Sym``, |
| 346 | +# ``MNI152NLin6ASym``, ``MNI305``, ``NIHPD``, ``OASIS30AntsOASISAnts``, |
| 347 | +# ``OASIS30Atropos``, ``Talairach`` and ``UNCInfant``. As discussed above, |
| 348 | +# it is recommended to share the coordinates in the individual subject's |
| 349 | +# anatomical reference frame so that researchers who use the data can |
| 350 | +# transform the coordinates to any of these templates that they choose. If |
| 351 | +# BIDS-formatted data is shared in one of these template coordinate frames, |
| 352 | +# the data can still be analyzed in MNE-Python. However, MNE-Python only |
| 353 | +# recognizes a few coordinate frames (so that coordinate frames that are |
| 354 | +# not regularly used by the MNE community don't misleadingly appear to be |
| 355 | +# being fully maintained when they are not) so you'll need a bit more |
| 356 | +# know-how, which we will go over below. |
| 357 | + |
| 358 | +# ensure the output path doesn't contain any leftover files from previous |
| 359 | +# tests and example runs |
| 360 | +if op.exists(bids_root): |
| 361 | + shutil.rmtree(bids_root) |
| 362 | + |
| 363 | + |
| 364 | +# first we'll write our data as if it were MNI152NLin2009bAsym coordinates |
| 365 | +# (you would need to transform your coordinates to this template first) |
| 366 | +bids_path.update(datatype='ieeg', space='MNI152NLin2009bAsym') |
| 367 | + |
| 368 | +# load our raw data again |
| 369 | +raw = mne.io.read_raw_fif(op.join( |
| 370 | + misc_path, 'seeg', 'sample_seeg_ieeg.fif')) |
| 371 | +raw.info['line_freq'] = 60 # specify power line frequency as required by BIDS |
| 372 | + |
| 373 | +# get the montage as stored in raw |
| 374 | +# MNE stores coordinates in raw objects in "head" coordinates for consistency |
| 375 | +montage = raw.get_montage() |
| 376 | + |
| 377 | +# define a transform to MNI152NLin2009bAsym (fake it) |
| 378 | +# MNE-Python doesn't recognize MNI152NLin2009bAsym, so we have to use |
| 379 | +# the unknown coordinate frame |
| 380 | +head_template_t = np.array([[1.0, 0.1, 0.2, 10.1], |
| 381 | + [-0.1, 1.0, 0.1, -20.3], |
| 382 | + [0.2, -0.1, 1.0, -30.7], |
| 383 | + [0.0, 0.0, 0.0, 1.0]]) |
| 384 | +head_template_trans = mne.transforms.Transform( |
| 385 | + fro='head', to='unknown', trans=head_template_t) |
| 386 | +montage.apply_trans(head_template_trans) |
| 387 | + |
| 388 | +# write to BIDS, with the montage in fsaverage coordinates |
| 389 | +write_raw_bids(raw, bids_path, anonymize=dict(daysback=40000), |
| 390 | + montage=montage, overwrite=True) |
| 391 | + |
| 392 | +# %% |
| 393 | +# Now, let's see how we would work with the data in MNE. As shown below, the |
| 394 | +# montage has the same coordinates as when it was written, but the coordinate |
| 395 | +# frame is unknown. |
| 396 | + |
| 397 | +# read back in the raw data |
| 398 | +raw = read_raw_bids(bids_path=bids_path) |
| 399 | + |
| 400 | +# get the montage |
| 401 | +montage2 = raw.get_montage() |
| 402 | +print('Montage set to: ' + montage2.get_positions()['coord_frame']) |
| 403 | + |
| 404 | +# check that we can recover the coordinates |
| 405 | +print('Recovered coordinate: {recovered}\n' |
| 406 | + 'Saved coordinate: {saved}'.format( |
| 407 | + recovered=montage2.dig[0]['r'], |
| 408 | + saved=montage.dig[0]['r'])) |
| 409 | + |
| 410 | +# %% |
| 411 | +# To work with this data in the template coordinate frame, all the same steps |
| 412 | +# can be followed in :ref:`tut-working-with-seeg` and |
| 413 | +# :ref:`tut-working-with-ecog` after the montage is transformed to surface |
| 414 | +# RAS coordinates for the template MRI (if it isn't there already). |
| 415 | +# |
| 416 | +# First we'll need the ``subjects_dir`` where the recon-all for the template |
| 417 | +# brain is stored. |
| 418 | +# |
| 419 | +# .. code-block:: python |
| 420 | +# |
| 421 | +# subjects_dir = op.join(misc_path, 'subjects') # for example |
| 422 | +# # add some plotting keyword arguments |
| 423 | +# brain_kwargs = dict(cortex='low_contrast', alpha=0.2, background='white') |
| 424 | +# |
| 425 | +# If the montage is already in surface RAS for the template MRI, we can use: |
| 426 | +# |
| 427 | +# .. code-block:: python |
| 428 | +# |
| 429 | +# # identity transform since 'unknown' is already 'mri' == surface RAS |
| 430 | +# # for the template brain MRI |
| 431 | +# trans = mne.transforms.Transform( |
| 432 | +# fro='unknown', |
| 433 | +# to='mri', |
| 434 | +# trans=np.eye(4) |
| 435 | +# ) |
| 436 | +# brain = mne.viz.Brain('sample_seeg', subjects_dir=subjects_dir, |
| 437 | +# **brain_kwargs) |
| 438 | +# brain.add_sensors(raw.info, trans=trans) |
| 439 | +# |
| 440 | +# If the montage was in voxel coordinates, we'll first have to transform |
| 441 | +# to surface RAS: |
| 442 | +# |
| 443 | +# .. code-block:: python |
| 444 | +# |
| 445 | +# import nibabel as nib |
| 446 | +# template_T1 = nib.load(op.join(subjects_dir, 'MNI152NLin2009bAsym', |
| 447 | +# 'mri', 'T1.mgz')) |
| 448 | +# trans = mne.transforms.Transform( # use vox to surface RAS transform |
| 449 | +# fro='unknown', |
| 450 | +# to='mri', |
| 451 | +# trans=template_T1.header.get_vox2ras_tkr() |
| 452 | +# ) |
| 453 | +# brain = mne.viz.Brain( |
| 454 | +# 'sample_seeg', subjects_dir=subjects_dir, **brain_kwargs) |
| 455 | +# brain.add_sensors(raw.info, trans=trans) |
| 456 | +# |
| 457 | +# |
| 458 | +# Finally, if the montage was in scanner RAS coordinates, we'll have to |
| 459 | +# transform it back to voxels first before going to surface RAS: |
| 460 | +# |
| 461 | +# .. code-block:: python |
| 462 | +# |
| 463 | +# import nibabel as nib |
| 464 | +# template_T1 = nib.load(op.join(subjects_dir, 'MNI152NLin2009bAsym', |
| 465 | +# 'mri', 'T1.mgz')) |
| 466 | +# ras_vox_t = template_T1.header.get_ras2vox() |
| 467 | +# vox_mri_t = template_T1.header.get_vox2ras_tkr() |
| 468 | +# ras_mri_t = np.dot(ras_vox_t, vox_mri_t) # ras->vox with vox->mri |
| 469 | +# trans = mne.transforms.Transform( |
| 470 | +# fro='unknown', |
| 471 | +# to='mri', |
| 472 | +# trans=ras_mri_t |
| 473 | +# ) |
| 474 | +# brain = mne.viz.Brain( |
| 475 | +# 'sample_seeg', subjects_dir=subjects_dir, **brain_kwargs) |
| 476 | +# brain.add_sensors(raw.info, trans=trans) |
0 commit comments