Last active
April 1, 2019 07:19
-
-
Save larsoner/25fa656c6d6e0b02b56c40a571bfb77c to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
""" | |
PySurfer-like plotting using VisPy scene. | |
""" | |
import numpy as np | |
from vispy import scene, app | |
import mne | |
import nibabel | |
app.use_app('pyqt5') | |
data_path = mne.datasets.sample.data_path() | |
subjects_dir = data_path + '/subjects' | |
subj_dir = subjects_dir + '/sample/surf/' | |
stc = mne.read_source_estimate(data_path + '/MEG/sample/sample_audvis-meg-eeg') | |
stc.crop(0.09, 0.09) | |
morph = mne.compute_source_morph(stc, 'sample', 'sample', spacing=None, | |
smooth=10, subjects_dir=subjects_dir) | |
stc = morph.apply(stc) | |
azimuths = dict(lh=dict(lat=270, med=90), rh=dict(lat=90, med=270)) | |
hemis = ['lh', 'rh'] | |
views = ['lat'] | |
hemi_data = dict(lh=stc.data[:len(stc.vertices[0]), 0], | |
rh=stc.data[len(stc.vertices[0]):, 0]) | |
canvas = scene.SceneCanvas(keys='interactive', bgcolor='w', | |
config=dict(samples=16), size=(400 * len(hemis), | |
400 * len(views))) | |
grid = canvas.central_widget.add_grid(spacing=0, margin=0) | |
def update_light_dir(view): | |
for mesh in view.children[0].children: | |
if isinstance(mesh, scene.visuals.Mesh): | |
# From upper right | |
mesh.light_dir = view.camera.transform.map((0, 1, 0))[:3] | |
for hi, hemi in enumerate(hemis): | |
data = hemi_data[hemi] | |
surf = subj_dir + '%s.inflated' % hemi | |
curv = subj_dir + '%s.curv' % hemi | |
rr, tris = nibabel.freesurfer.read_geometry(surf) | |
tris = tris.astype(np.uint32) | |
assert len(data) == len(rr) | |
curv = nibabel.freesurfer.read_morph_data(curv) | |
curv = (curv > 0).astype(float) | |
curv = (curv - 0.5) / 3 + 0.5 | |
curv = curv[:, np.newaxis] * [1, 1, 1] | |
label = mne.read_label( | |
data_path + '/MEG/sample/labels/Aud-%s.label' % hemi) | |
label = np.where(np.in1d(np.arange(len(rr)), label.vertices), 1., 0.) | |
label = np.tile(label[:, np.newaxis], (1, 4)) | |
label[..., [0, 2]] = 0. # green | |
label[..., -1] *= 0.25 # alpha | |
for vi, view in enumerate(views): | |
v = grid.add_view(row=vi, col=hi) | |
for vc in (curv, data, label): | |
if vc is curv: | |
mesh = scene.visuals.Mesh(vertices=rr, faces=tris, | |
vertex_colors=vc, shading='smooth') | |
mesh.set_gl_state( | |
depth_test=True, cull_face='back', blend=False, | |
polygon_offset=(0., 0.), polygon_offset_fill=True) | |
elif vc is data: | |
mesh = scene.visuals.Mesh(vertices=rr, faces=tris, | |
vertex_values=vc, shading='smooth') | |
mesh.cmap = 'RdYeBuCy' | |
mesh.clim = [-20, 20] | |
mesh.set_gl_state( | |
depth_test=True, cull_face='back', blend=True, | |
blend_func=('src_alpha', 'one_minus_src_alpha'), | |
polygon_offset=(-1., -1.), polygon_offset_fill=True) | |
else: | |
assert vc is label | |
mesh = scene.visuals.Mesh(vertices=rr, faces=tris, | |
vertex_colors=vc, shading='smooth') | |
mesh.set_gl_state( | |
depth_test=True, cull_face='back', blend=True, | |
blend_func=('src_alpha', 'one_minus_src_alpha'), | |
polygon_offset=(-2., -2.), polygon_offset_fill=True) | |
mesh.shininess = 0. | |
mesh.ambient_light_color = (0.5, 0.5, 0.5) | |
v.add(mesh) | |
v.camera = scene.TurntableCamera( | |
up='+z', elevation=0, azimuth=azimuths[hemi][view], fov=0) | |
v.camera.scale_factor = 300 | |
update_light_dir(v) | |
# XXX Eventually this should be something that we can set in the visual, | |
# i.e. to use camera coords rather than world coords | |
@canvas.events.mouse_move.connect | |
def on_mouse_move(event): | |
if not event.is_dragging: | |
return | |
for view in grid.children: | |
update_light_dir(view) | |
canvas.show() | |
canvas.app.run() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment