Last active
November 23, 2021 13:33
-
-
Save GuillaumeFavelier/b470330fa71cfdef4c7be94b966ef49b to your computer and use it in GitHub Desktop.
Little tour of MNE features with the ipyvtk backend
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"%matplotlib inline\n", | |
"import mne\n", | |
"mne.viz.set_3d_backend('notebook') # set the 3d backend" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Visualize STC" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"import os\n", | |
"from mne.datasets import sample\n", | |
"data_path = sample.data_path()\n", | |
"sample_dir = os.path.join(data_path, 'MEG', 'sample')\n", | |
"subjects_dir = os.path.join(data_path, 'subjects')\n", | |
"fname_stc = os.path.join(sample_dir, 'sample_audvis-meg')\n", | |
"stc = mne.read_source_estimate(fname_stc, subject='sample')\n", | |
"initial_time = 0.13\n", | |
"brain = stc.plot(subjects_dir=subjects_dir, initial_time=initial_time,\n", | |
" clim=dict(kind='value', lims=[3, 6, 9]),\n", | |
" size=600, background=\"white\", hemi='lh')" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Plotting the full vector-valued MNE solution" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"import os.path as op\n", | |
"import mne\n", | |
"from mne.minimum_norm import read_inverse_operator, apply_inverse\n", | |
"\n", | |
"mne.viz.set_3d_backend(\"notebook\")\n", | |
"\n", | |
"# Set dir\n", | |
"data_path = mne.datasets.sample.data_path()\n", | |
"subject = 'sample'\n", | |
"data_dir = op.join(data_path, 'MEG', subject)\n", | |
"subjects_dir = op.join(data_path, 'subjects')\n", | |
"fname_evoked = data_dir + '/sample_audvis-ave.fif'\n", | |
"condition = 'Left Auditory'\n", | |
"evoked = mne.read_evokeds(fname_evoked, condition=condition,\n", | |
" baseline=(None, 0))\n", | |
"inv = read_inverse_operator(\n", | |
" data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif')\n", | |
"stc_vec = apply_inverse(evoked, inv, pick_ori='vector')" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"brain = stc_vec.plot(hemi='both', views=['lat', 'med'], size=600, background='white',\n", | |
" initial_time=0.1, subjects_dir=subjects_dir)" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Source alignment and coordinate frames" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"import os.path as op\n", | |
"\n", | |
"import numpy as np\n", | |
"import nibabel as nib\n", | |
"from scipy import linalg\n", | |
"\n", | |
"import mne\n", | |
"from mne.io.constants import FIFF\n", | |
"\n", | |
"data_path = mne.datasets.sample.data_path()\n", | |
"subjects_dir = op.join(data_path, 'subjects')\n", | |
"raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')\n", | |
"trans_fname = op.join(data_path, 'MEG', 'sample',\n", | |
" 'sample_audvis_raw-trans.fif')\n", | |
"raw = mne.io.read_raw_fif(raw_fname)\n", | |
"trans = mne.read_trans(trans_fname)\n", | |
"src = mne.read_source_spaces(op.join(subjects_dir, 'sample', 'bem',\n", | |
" 'sample-oct-6-src.fif'))\n", | |
"\n", | |
"# load the T1 file and change the header information to the correct units\n", | |
"t1w = nib.load(op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz'))\n", | |
"t1w = nib.Nifti1Image(t1w.dataobj, t1w.affine)\n", | |
"t1w.header['xyzt_units'] = np.array(10, dtype='uint8')\n", | |
"t1_mgh = nib.MGHImage(t1w.dataobj, t1w.affine)\n", | |
"\n", | |
"fig = mne.viz.plot_alignment(raw.info, trans=trans, subject='sample',\n", | |
" subjects_dir=subjects_dir, surfaces='head-dense',\n", | |
" show_axes=True, dig=True, eeg=[], meg='sensors',\n", | |
" coord_frame='meg')\n", | |
"mne.viz.set_3d_view(fig, 45, 90, distance=0.6, focalpoint=(0., 0., 0.))\n", | |
"print('Distance from head origin to MEG origin: %0.1f mm'\n", | |
" % (1000 * np.linalg.norm(raw.info['dev_head_t']['trans'][:3, 3])))\n", | |
"print('Distance from head origin to MRI origin: %0.1f mm'\n", | |
" % (1000 * np.linalg.norm(trans['trans'][:3, 3])))\n", | |
"dists = mne.dig_mri_distances(raw.info, trans, 'sample',\n", | |
" subjects_dir=subjects_dir)\n", | |
"print('Distance from %s digitized points to head surface: %0.1f mm'\n", | |
" % (len(dists), 1000 * np.mean(dists)))" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Cross-hemisphere comparison" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"data_dir = mne.datasets.sample.data_path()\n", | |
"subjects_dir = data_dir + '/subjects'\n", | |
"stc_path = data_dir + '/MEG/sample/sample_audvis-meg-eeg'\n", | |
"stc = mne.read_source_estimate(stc_path, 'sample')\n", | |
"\n", | |
"# First, morph the data to fsaverage_sym, for which we have left_right\n", | |
"# registrations:\n", | |
"stc = mne.compute_source_morph(stc, 'sample', 'fsaverage_sym', smooth=5,\n", | |
" warn=False,\n", | |
" subjects_dir=subjects_dir).apply(stc)\n", | |
"\n", | |
"# Compute a morph-matrix mapping the right to the left hemisphere,\n", | |
"# and vice-versa.\n", | |
"morph = mne.compute_source_morph(stc, 'fsaverage_sym', 'fsaverage_sym',\n", | |
" spacing=stc.vertices, warn=False,\n", | |
" subjects_dir=subjects_dir, xhemi=True,\n", | |
" verbose='error') # creating morph map\n", | |
"stc_xhemi = morph.apply(stc)\n", | |
"\n", | |
"# Now we can subtract them and plot the result:\n", | |
"diff = stc - stc_xhemi\n", | |
"\n", | |
"diff.plot(hemi='lh', subjects_dir=subjects_dir, initial_time=0.07,\n", | |
" size=(800, 600))" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## View location of sensors over brain surface" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"import os\n", | |
"import numpy as np\n", | |
"import matplotlib.pyplot as plt\n", | |
"from itertools import compress\n", | |
"\n", | |
"fnirs_data_folder = mne.datasets.fnirs_motor.data_path()\n", | |
"fnirs_cw_amplitude_dir = os.path.join(fnirs_data_folder, 'Participant-1')\n", | |
"raw_intensity = mne.io.read_raw_nirx(fnirs_cw_amplitude_dir, verbose=True)\n", | |
"raw_intensity.load_data()\n", | |
"subjects_dir = mne.datasets.sample.data_path() + '/subjects'\n", | |
"\n", | |
"fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white')\n", | |
"fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True,\n", | |
" subject='fsaverage', coord_frame='mri',\n", | |
" trans='fsaverage', surfaces=['brain'],\n", | |
" fnirs=['channels', 'pairs',\n", | |
" 'sources', 'detectors'],\n", | |
" subjects_dir=subjects_dir, fig=fig)\n", | |
"mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=60, distance=0.4,\n", | |
" focalpoint=(0., -0.01, 0.02))\n" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Working with sEEG data" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"import os.path as op\n", | |
"\n", | |
"import numpy as np\n", | |
"import pandas as pd\n", | |
"\n", | |
"from mne.datasets import fetch_fsaverage\n", | |
"\n", | |
"print(__doc__)\n", | |
"\n", | |
"# paths to mne datasets - sample sEEG and FreeSurfer's fsaverage subject\n", | |
"# which is in MNI space\n", | |
"misc_path = mne.datasets.misc.data_path()\n", | |
"sample_path = mne.datasets.sample.data_path()\n", | |
"subject = 'fsaverage'\n", | |
"subjects_dir = sample_path + '/subjects'\n", | |
"\n", | |
"# use mne-python's fsaverage data\n", | |
"fetch_fsaverage(subjects_dir=subjects_dir, verbose=True) # downloads if needed\n", | |
"# In mne-python, the electrode coordinates are required to be in meters\n", | |
"elec_df = pd.read_csv(misc_path + '/seeg/sample_seeg_electrodes.tsv',\n", | |
" sep='\\t', header=0, index_col=None)\n", | |
"ch_names = elec_df['name'].tolist()\n", | |
"ch_coords = elec_df[['x', 'y', 'z']].to_numpy(dtype=float)\n", | |
"\n", | |
"# the test channel coordinates were in mm, so we convert them to meters\n", | |
"ch_coords = ch_coords / 1000.\n", | |
"\n", | |
"# create dictionary of channels and their xyz coordinates (now in MNI space)\n", | |
"ch_pos = dict(zip(ch_names, ch_coords))\n", | |
"\n", | |
"# Ideally the nasion/LPA/RPA will also be present from the digitization, here\n", | |
"# we use fiducials estimated from the subject's FreeSurfer MNI transformation:\n", | |
"lpa, nasion, rpa = mne.coreg.get_mni_fiducials(\n", | |
" subject, subjects_dir=subjects_dir)\n", | |
"lpa, nasion, rpa = lpa['r'], nasion['r'], rpa['r']\n", | |
"\n", | |
"montage = mne.channels.make_dig_montage(\n", | |
" ch_pos, coord_frame='mri', nasion=nasion, lpa=lpa, rpa=rpa)\n", | |
"print('Created %s channel positions' % len(ch_names))\n", | |
"\n", | |
"trans = mne.channels.compute_native_head_t(montage)\n", | |
"print(trans)\n", | |
"\n", | |
"# first we'll load in the sample dataset\n", | |
"raw = mne.io.read_raw_edf(misc_path + '/seeg/sample_seeg.edf')\n", | |
"\n", | |
"# drop bad channels\n", | |
"raw.info['bads'].extend([ch for ch in raw.ch_names if ch not in ch_names])\n", | |
"raw.load_data()\n", | |
"raw.drop_channels(raw.info['bads'])\n", | |
"raw.crop(0, 2) # just process 2 sec of data for speed\n", | |
"\n", | |
"# attach montage\n", | |
"raw.set_montage(montage)\n", | |
"\n", | |
"# set channel types to sEEG (instead of EEG) that have actual positions\n", | |
"raw.set_channel_types(\n", | |
" {ch_name: 'seeg' if np.isfinite(ch_pos[ch_name]).all() else 'misc'\n", | |
" for ch_name in raw.ch_names})\n", | |
"\n", | |
"fig = mne.viz.plot_alignment(raw.info, trans, 'fsaverage',\n", | |
" subjects_dir=subjects_dir, show_axes=True,\n", | |
" surfaces=[\"pial\", \"head\"])" | |
] | |
}, | |
{ | |
"cell_type": "markdown", | |
"metadata": {}, | |
"source": [ | |
"## Coregistration" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"%matplotlib inline\n", | |
"mne.viz.set_3d_backend('notebook') # set the 3d backend\n", | |
"\n", | |
"import os.path as op\n", | |
"from mne.gui._coreg import CoregistrationUI\n", | |
"from mne.io import read_info\n", | |
"\n", | |
"data_path = mne.datasets.sample.data_path()\n", | |
"subjects_dir = op.join(data_path, 'subjects')\n", | |
"subject = 'sample'\n", | |
"fname_raw = op.join(data_path, 'MEG', subject, subject + '_audvis_raw.fif')\n", | |
"fname_trans = op.join(data_path, 'MEG', subject,\n", | |
" subject + 'audvis_raw_auto-trans.fif')\n", | |
"src = mne.read_source_spaces(op.join(subjects_dir, subject, 'bem',\n", | |
" 'sample-oct-6-src.fif'))\n", | |
"coreg = CoregistrationUI(fname_raw, subject, subjects_dir)" | |
] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"display_name": "Python (mne-py38)", | |
"language": "python", | |
"name": "mne-py38" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 3 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython3", | |
"version": "3.8.12" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 4 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment