Last active
December 18, 2018 17:50
-
-
Save larsoner/43e57b693223583d190f579a3e03d6cb to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# need to be on larsoner:envcorr | |
import os.path as op | |
from utils import compute_envelope_correllation | |
import mne | |
import numpy as np | |
""" | |
data_path = mne.datasets.brainstorm.bst_resting.data_path() | |
subjects_dir = op.join(data_path, 'subjects') | |
subject = 'bst_resting' | |
trans = op.join(data_path, 'MEG', 'bst_resting', 'bst_resting-trans.fif') | |
src = op.join(subjects_dir, subject, 'bem', subject + '-oct-6-src.fif') | |
bem = op.join(subjects_dir, subject, 'bem', subject + '-5120-bem-sol.fif') | |
erm_fname = op.join(data_path, 'MEG', 'bst_resting', | |
'subj002_noise_20111104_02.ds') | |
raw_fname = op.join(data_path, 'MEG', 'bst_resting', | |
'subj002_spontaneous_20111102_01_AUX.ds') | |
decim = 20 | |
rename = True | |
compensate = False | |
reader = mne.io.read_raw_ctf | |
# """ | |
# """ | |
data_path = mne.datasets.opm.data_path() | |
subject = 'OPM_sample' | |
subjects_dir = op.join(data_path, 'subjects') | |
bem_dir = op.join(subjects_dir, subject, 'bem') | |
bem = op.join(subjects_dir, subject, 'bem', | |
subject + '-5120-5120-5120-bem-sol.fif') | |
src = op.join(bem_dir, '%s-oct6-src.fif' % subject) | |
raw_fname = data_path + '/MEG/SQUID/SQUID_resting_state.fif' | |
erm_fname = data_path + '/MEG/SQUID/SQUID_empty_room.fif' | |
trans = data_path + '/MEG/SQUID/SQUID-trans.fif' | |
decim = 5 | |
# opm_fname = data_path + '/MEG/OPM/OPM_resting_state_raw.fif' | |
# opm_erm_fname = data_path + '/MEG/OPM/OPM_empty_room_raw.fif' | |
# opm_trans_fname = None | |
# opm_coil_def_fname = op.join(data_path, 'MEG', 'OPM', 'coil_def.dat') | |
rename = compensate = False | |
reader = mne.io.read_raw_fif | |
# """ | |
print('Computing covariance from broadband empty room data') | |
erm = reader(erm_fname, verbose='error') | |
if compensate: | |
erm.apply_gradient_compensation(3) | |
erm.crop(0, 60).load_data().filter(None, 40, n_jobs='cuda') | |
if rename: | |
erm.rename_channels(lambda x: x.replace('4408', '4407')) # to match raw | |
cov = mne.compute_raw_covariance(erm) | |
del erm | |
# Just use the first run for speed | |
print('Loading, filtering, epoching') | |
raw = reader(raw_fname, verbose='error') | |
if compensate: | |
raw.apply_gradient_compensation(3) | |
raw.crop(0, 60).load_data().pick_types(meg=True, eeg=False) | |
raw.filter(14, 30, n_jobs='cuda') | |
raw.apply_hilbert(envelope=False) | |
duration, overlap = 5., 0. | |
events = mne.make_fixed_length_events(raw, duration=duration, overlap=overlap) | |
epochs = mne.Epochs(raw, events=events, tmin=0, tmax=duration, | |
baseline=None, reject=None, decim=decim, preload=True) | |
print('Computing forward and inverse') | |
fwd = mne.make_forward_solution(raw.info, trans, src, bem) | |
inv = mne.minimum_norm.make_inverse_operator(epochs.info, fwd, cov) | |
del src, bem | |
print('Loading labels') | |
labels = mne.morph_labels( | |
mne.read_labels_from_annot('fsaverage', 'aparc_sub'), subject) | |
print('Compute activity in labels and envelope correlation') | |
lambda2 = 1. / 9. | |
stcs = mne.minimum_norm.apply_inverse_epochs( | |
epochs, inv, lambda2, pick_ori='normal') | |
label_ts = mne.extract_label_time_course( | |
stcs, labels, inv['src'], mode='pca_flip', return_generator=True) | |
n_labels = len(labels) | |
orth_corrs = np.empty((len(epochs), n_labels, n_labels), dtype=np.float) | |
for ii, label_t in enumerate(label_ts): | |
corr = compute_envelope_correllation(label_t) | |
orth_corrs[ii] = corr | |
corr_ave = np.median(orth_corrs, axis=0) # across epochs | |
degrees = np.sum(corr_ave > 0.15, axis=0) # crude approx of degree | |
print('Transforming from labels back to STC to plot') | |
stc = mne.labels_to_stc(labels, degrees) | |
brain = stc.plot( | |
clim=dict(kind='percent', lims=[0, 50, 100]), | |
subjects_dir=subjects_dir, views=['lat', 'med'], hemi='split', | |
smoothing_steps=5) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment