Created
September 23, 2014 22:38
-
-
Save satra/82dbf385bcba9d97fd9b to your computer and use it in GitHub Desktop.
Diffusion processing with bedpostx_gpu, tracula and dipy
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
from glob import glob | |
from nipype import config | |
config.enable_provenance() | |
from nipype import Node, Function, Workflow, IdentityInterface | |
from nipype.interfaces.io import SelectFiles, DataSink | |
import os | |
from glob import glob | |
data_dir = '/om/user/satra/projects/TEXASTBI/data/' | |
tracula_dir = '/om/user/satra/projects/TEXASTBI/tracula' | |
sids = [val.split('/')[-3] for val in glob(data_dir + 'S*/dmri/*AP*.nii.gz')] | |
if not os.path.exists(tracula_dir): | |
os.mkdir(tracula_dir) | |
def run_prep(sid, template, data_dir): | |
from glob import glob | |
import os | |
nifti = os.path.abspath(glob(os.path.join(data_dir, '%s/dmri/*AP*.nii.gz' % sid))[0]) | |
bvec = os.path.abspath(glob(os.path.join(data_dir, '%s/dmri/*.bvecs' % sid))[0]) | |
bval = os.path.abspath(glob(os.path.join(data_dir, '%s/dmri/*.bvals' % sid))[0]) | |
from string import Template | |
with open(template, 'rt') as fp: | |
tpl = Template(fp.read()) | |
out = tpl.substitute(subjects=sid, bvec=bvec, bval=bval, niftis=nifti) | |
config_file = os.path.join(os.getcwd(), 'config_%s' % sid) | |
with open(config_file, 'wt') as fp: | |
fp.write(out) | |
from nipype import config | |
config.enable_provenance() | |
from nipype.interfaces.base import CommandLine | |
from nipype.pipeline.engine import Node | |
node = Node(CommandLine('trac-all -prep -c %s' % config_file, terminal_output='allatonce'), | |
name='trac-prep-%s' % sid) | |
node.base_dir = os.getcwd() | |
node.run() | |
return sid, config_file | |
def run_bedpost(sid, tracula_dir): | |
import os | |
from nipype import config | |
config.enable_provenance() | |
from nipype.interfaces.base import CommandLine | |
pwd = os.getcwd() | |
os.chdir(os.path.join(tracula_dir, sid)) | |
bedpost = CommandLine('bedpostx_gpu dmri', terminal_output='allatonce') | |
bedpost.run() | |
os.chdir(pwd) | |
return sid | |
def run_path(sid, config_file): | |
import os | |
from nipype import config | |
config.enable_provenance() | |
from nipype.interfaces.base import CommandLine | |
from nipype.pipeline.engine import Node | |
node = Node(CommandLine('trac-all -path -c %s' % config_file, terminal_output='allatonce'), | |
name='trac-path-%s' % sid) | |
node.base_dir = os.getcwd() | |
node.run() | |
return sid | |
def dmri_recon(sid, tracula_dir, recon='csd', num_threads=1): | |
import os | |
oldval = None | |
if 'MKL_NUM_THREADS' in os.environ: | |
oldval = os.environ['MKL_NUM_THREADS'] | |
os.environ['MKL_NUM_THREADS'] = '%d' % num_threads | |
ompoldval = None | |
if 'OMP_NUM_THREADS' in os.environ: | |
ompoldval = os.environ['OMP_NUM_THREADS'] | |
os.environ['OMP_NUM_THREADS'] = '%d' % num_threads | |
import nibabel as nib | |
import numpy as np | |
from glob import glob | |
fimg = os.path.abspath(glob(os.path.join(tracula_dir, '%s/dmri/dwi.nii.gz' % sid))[0]) | |
fbvec = os.path.abspath(glob(os.path.join(tracula_dir, '%s/dmri/bvecs' % sid))[0]) | |
fbval = os.path.abspath(glob(os.path.join(tracula_dir, '%s/dmri/bvals' % sid))[0]) | |
img = nib.load(fimg) | |
data = img.get_data() | |
prefix = sid | |
from dipy.io import read_bvals_bvecs | |
from dipy.core.gradients import vector_norm | |
bvals, bvecs = read_bvals_bvecs(fbval, fbvec) | |
b0idx = [] | |
for idx, val in enumerate(bvals): | |
if val < 1: | |
pass | |
#bvecs[idx] = [1, 0, 0] | |
else: | |
b0idx.append(idx) | |
bvecs[b0idx, :] = bvecs[b0idx, :]/vector_norm(bvecs[b0idx])[:, None] | |
from dipy.core.gradients import gradient_table | |
gtab = gradient_table(bvals, bvecs) | |
from dipy.reconst.csdeconv import auto_response | |
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) | |
#from dipy.segment.mask import median_otsu | |
#b0_mask, mask = median_otsu(data[:, :, :, b0idx].mean(axis=3).squeeze(), 4, 4) | |
fmask1 = os.path.abspath(glob(os.path.join(tracula_dir, | |
'%s/dlabel/diff/aparc+aseg_mask.bbr.nii.gz' % sid))[0]) | |
fmask2 = os.path.abspath(glob(os.path.join(tracula_dir, | |
'%s/dlabel/diff/notventricles.bbr.nii.gz' % sid))[0]) | |
mask = (nib.load(fmask1).get_data() > 0.5) * nib.load(fmask2).get_data() | |
useFA = True | |
if recon == 'csd': | |
from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel | |
model = ConstrainedSphericalDeconvModel(gtab, response) | |
useFA = True | |
elif recon == 'csa': | |
from dipy.reconst.shm import CsaOdfModel, normalize_data | |
model = CsaOdfModel(gtab, 4) | |
useFA = False | |
else: | |
raise ValueError('only csd, csa supported currently') | |
from dipy.reconst.dsi import (DiffusionSpectrumDeconvModel, | |
DiffusionSpectrumModel) | |
model = DiffusionSpectrumDeconvModel(gtab) | |
#fit = model.fit(data) | |
from dipy.data import get_sphere | |
sphere = get_sphere('symmetric724') | |
#odfs = fit.odf(sphere) | |
from dipy.reconst.peaks import peaks_from_model | |
peaks = peaks_from_model(model=model, | |
data=data, | |
sphere=sphere, | |
mask=mask, | |
return_sh=True, | |
return_odf=False, | |
normalize_peaks=True, | |
npeaks=5, | |
relative_peak_threshold=.5, | |
min_separation_angle=25, | |
parallel=num_threads > 1, | |
nbr_processes=num_threads) | |
from dipy.reconst.dti import TensorModel | |
tenmodel = TensorModel(gtab) | |
tenfit = tenmodel.fit(data, mask) | |
from dipy.reconst.dti import fractional_anisotropy | |
FA = fractional_anisotropy(tenfit.evals) | |
FA[np.isnan(FA)] = 0 | |
fa_img = nib.Nifti1Image(FA, img.get_affine()) | |
tensor_fa_file = os.path.abspath('%s_tensor_fa.nii.gz' % prefix) | |
nib.save(fa_img, tensor_fa_file) | |
evecs = tenfit.evecs | |
evec_img = nib.Nifti1Image(evecs, img.get_affine()) | |
tensor_evec_file = os.path.abspath('%s_tensor_evec.nii.gz' % prefix) | |
nib.save(evec_img, tensor_evec_file) | |
#from dipy.reconst.dti import quantize_evecs | |
#peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices) | |
#eu = EuDX(FA, peak_indices, odf_vertices = sphere.vertices, a_low=0.2, seeds=10**6, ang_thr=35) | |
fa_img = nib.Nifti1Image(peaks.gfa, img.get_affine()) | |
model_gfa_file = os.path.abspath('%s_%s_gfa.nii.gz' % (prefix, recon)) | |
nib.save(fa_img, model_gfa_file) | |
from dipy.tracking.eudx import EuDX | |
if useFA: | |
eu = EuDX(FA, peaks.peak_indices[..., 0], odf_vertices = sphere.vertices, | |
a_low=0.1, seeds=10**6, ang_thr=35) | |
else: | |
eu = EuDX(peaks.gfa, peaks.peak_indices[..., 0], odf_vertices = sphere.vertices, | |
a_low=0.1, seeds=10**6, ang_thr=35) | |
#import dipy.tracking.metrics as dmetrics | |
streamlines = ((sl, None, None) for sl in eu) # if dmetrics.length(sl) > 15) | |
hdr = nib.trackvis.empty_header() | |
hdr['voxel_size'] = fa_img.get_header().get_zooms()[:3] | |
hdr['voxel_order'] = 'LAS' | |
hdr['dim'] = FA.shape[:3] | |
sl_fname = os.path.abspath('%s_%s_streamline.trk' % (prefix, recon)) | |
nib.trackvis.write(sl_fname, streamlines, hdr, points_space='voxel') | |
if oldval: | |
os.environ['MKL_NUM_THREADS'] = oldval | |
else: | |
del os.environ['MKL_NUM_THREADS'] | |
if ompoldval: | |
os.environ['OMP_NUM_THREADS'] = ompoldval | |
else: | |
del os.environ['OMP_NUM_THREADS'] | |
return tensor_fa_file, tensor_evec_file, model_gfa_file, sl_fname | |
infosource = Node(IdentityInterface(fields=['subject_id']), name='infosource') | |
infosource.iterables = ('subject_id', sids) | |
node1 = Node(Function(input_names=['sid', 'template', 'data_dir'], | |
output_names=['sid', 'config_file'], function=run_prep), | |
name='trac-prep') | |
node1.inputs.template = os.path.abspath('tracula_config') | |
node1.inputs.data_dir = data_dir | |
node2 = Node(Function(input_names=['sid', 'tracula_dir'], | |
output_names=['sid'], function=run_bedpost), | |
name='trac-bedp') | |
node2.inputs.tracula_dir = tracula_dir | |
node2.plugin_args = {'sbatch_args': '--gres=gpu:1'} | |
node3 = Node(Function(input_names=['sid', 'config_file'], | |
output_names=['sid'], function=run_path), | |
name='trac-path') | |
tracker = Node(Function(input_names=['sid', 'tracula_dir', 'recon', 'num_threads'], | |
output_names=['tensor_fa_file', 'tensor_evec_file', 'model_gfa_file', | |
'model_track_file'], | |
function=dmri_recon), name='tracker') | |
tracker.inputs.recon = 'csd' | |
tracker.inputs.tracula_dir = tracula_dir | |
num_threads = 20 | |
tracker.inputs.num_threads = num_threads | |
tracker.plugin_args = {'sbatch_args': '-p om_interactive -t 01:00:00 --mem=%dG -N 1 -c %d' % (3 * num_threads, | |
num_threads), | |
'overwrite': True} | |
ds = Node(DataSink(parameterization=False), name='sinker') | |
ds.inputs.base_directory = tracula_dir | |
ds.plugin_args = {'sbatch_args': '-p om_interactive -t 00:10:00 -N 1 -c 1', | |
'overwrite': True} | |
wf = Workflow(name='texas-tracula') | |
wf.connect(infosource, 'subject_id', node1, 'sid') | |
wf.connect(node1, 'sid', node2, 'sid') | |
wf.connect(node1, 'sid', tracker, 'sid') | |
wf.connect(node2, 'sid', node3, 'sid') | |
wf.connect(node1, 'config_file', node3, 'config_file') | |
wf.connect(node1, 'sid', ds, 'container') | |
wf.connect(tracker, 'tensor_fa_file', ds, 'recon.@fa') | |
wf.connect(tracker, 'tensor_evec_file', ds, 'recon.@evec') | |
wf.connect(tracker, 'model_gfa_file', ds, 'recon.@gfa') | |
wf.connect(tracker, 'model_track_file', ds, 'recon.@track') | |
wf.base_dir = '/om/scratch/Fri/satra/' | |
wf.run(plugin='SLURM', plugin_args={'sbatch_args': '-p om_all_nodes -t 2:00:00 --mem=16384 -N1'}) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# | |
# dmrirc.example | |
# | |
# This file contains commands that will be run by trac-all before an analysis. | |
# It is used to set all parameters needed for the analysis. | |
# | |
# Remove a parameter from your dmrirc file if you want use the default value. | |
# Parameters that don't have default values must be specified. | |
# | |
# Any other commands that you might want to run before an analysis can be added | |
# to this file. | |
# | |
# Terms and conditions for use, reproduction, distribution and contribution | |
# are found in the 'FreeSurfer Software License Agreement' contained | |
# in the file 'LICENSE' found in the FreeSurfer distribution, and here: | |
# | |
# https://surfer.nmr.mgh.harvard.edu/fswiki/FreeSurferSoftwareLicense | |
# | |
# Reporting: [email protected] | |
# | |
# | |
# FreeSurfer SUBJECTS_DIR | |
# T1 images and FreeSurfer segmentations are expected to be found here | |
# | |
setenv SUBJECTS_DIR /om/user/satra/projects/TEXASTBI/fsdata | |
# Output directory where trac-all results will be saved | |
# Default: Same as SUBJECTS_DIR | |
# | |
set dtroot = /om/user/satra/projects/TEXASTBI/tracula | |
# Subject IDs | |
# | |
set subjlist = ($subjects) | |
# In case you want to analyze only Huey and Louie | |
# Default: Run analysis on all subjects | |
# | |
# set runlist = (1 3) | |
# Input diffusion DICOMs (file names relative to dcmroot) | |
# If original DICOMs don't exist, these can be in other image format | |
# but then the gradient table and b-value table must be specified (see below) | |
# | |
#set dcmroot = /path/to/dicoms/of/ducks | |
set dcmlist = ($niftis) | |
# Diffusion gradient tables (if there is a different one for each scan) | |
# Must be specified if inputs are not MGH DICOMs | |
# The tables must have either three columns, where each row is a gradient vector | |
# or three rows, where each column is a gradient vector | |
# There must be as many gradient vectors as volumes in the diffusion data set | |
# Default: Read from DICOM header | |
# | |
#set bveclist = (...) | |
# Diffusion gradient table (if using the same one for all scans) | |
# Must be specified if inputs are not MGH DICOMs | |
# The table must have either three columns, where each row is a gradient vector | |
# or three rows, where each column is a gradient vector | |
# There must be as many gradient vectors as volumes in the diffusion data set | |
# Default: Read from DICOM header | |
# | |
set bvecfile = $bvec | |
# Diffusion b-value table | |
# Must be specified if inputs are not MGH DICOMs | |
# There must be as many b-values as volumes in the diffusion data set | |
# Default: Read from DICOM header | |
# | |
set bvalfile = $bval | |
# Perform registration-based B0-inhomogeneity compensation? | |
# Default: 0 (no) | |
# | |
set dob0 = 0 | |
# Input B0 field map magnitude DICOMs (file names relative to dcmroot) | |
# Only used if dob0 = 1 | |
# Default: None | |
# | |
# set b0mlist = (huey/fmag/XXX-1.dcm dewey/fmag/XXX-1.dcm louie/fmag/XXX-1.dcm) | |
# Input B0 field map phase DICOMs (file names relative to dcmroot) | |
# Only used if dob0 = 1 | |
# Default: None | |
# | |
# set b0plist = (huey/fphas/XXX-1.dcm dewey/fphas/XXX-1.dcm louie/fphas/XXX-1.dcm) | |
# Echo spacing for field mapping sequence (from sequence printout) | |
# Only used if dob0 = 1 | |
# Default: None | |
# | |
# set echospacing = 0.7 | |
# Perform registration-based eddy-current compensation? | |
# Default: 1 (yes) | |
# | |
set doeddy = 1 | |
# Rotate diffusion gradient vectors to match eddy-current compensation? | |
# Only used if doeddy = 1 | |
# Default: 1 (yes) | |
# | |
set dorotbvecs = 1 | |
# Fractional intensity threshold for BET mask extraction from low-b images | |
# This mask is used only if usemaskanat = 0 | |
# Default: 0.3 | |
# | |
set thrbet = 0.5 | |
# Perform diffusion-to-T1 registration by flirt? | |
# Default: 0 (no) | |
# | |
set doregflt = 0 | |
# Perform diffusion-to-T1 registration by bbregister? | |
# Default: 1 (yes) | |
# | |
set doregbbr = 1 | |
# Perform registration of T1 to MNI template? | |
# Default: 1 (yes) | |
# | |
set doregmni = 1 | |
# MNI template | |
# Only used if doregmni = 1 | |
# Default: $$FSLDIR/data/standard/MNI152_T1_1mm_brain.nii.gz | |
# | |
# set mnitemp = /path/to/mni_template.nii.gz | |
# Perform registration of T1 to CVS template? | |
# Default: 0 (no) | |
# | |
set doregcvs = 0 | |
# CVS template subject ID | |
# Only used if doregcvs = 1 | |
# Default: cvs_avg35 | |
# | |
set cvstemp = donald | |
# Parent directory of the CVS template subject | |
# Only used if doregcvs = 1 | |
# Default: $$FREESURFER_HOME/subjects | |
# | |
set cvstempdir = /path/to/cvs/atlases/of/ducks | |
# Use brain mask extracted from T1 image instead of low-b diffusion image? | |
# Has no effect if there is no T1 data | |
# Default: 1 (yes) | |
# | |
set usemaskanat = 1 | |
# Paths to reconstruct | |
# Default: All paths in the atlas | |
# | |
set pathlist = ( lh.cst_AS rh.cst_AS \ | |
lh.unc_AS rh.unc_AS \ | |
lh.ilf_AS rh.ilf_AS \ | |
fmajor_PP fminor_PP \ | |
lh.atr_PP rh.atr_PP \ | |
lh.ccg_PP rh.ccg_PP \ | |
lh.cab_PP rh.cab_PP \ | |
lh.slfp_PP rh.slfp_PP \ | |
lh.slft_PP rh.slft_PP ) | |
# Number of path control points | |
# It can be a single number for all paths or a different number for each of the | |
# paths specified in pathlist | |
# Default: 7 for the forceps major, 6 for the corticospinal tract, | |
# 4 for the angular bundle, and 5 for all other paths | |
# | |
set ncpts = (6 6 5 5 5 5 7 5 5 5 5 5 4 4 5 5 5 5) | |
# List of training subjects | |
# This text file lists the locations of training subject directories | |
# Default: $$FREESURFER_HOME/trctrain/trainlist.txt | |
# | |
set trainfile = $$FREESURFER_HOME/trctrain/trainlist.txt | |
# Number of "sticks" (anisotropic diffusion compartments) in the bedpostx | |
# ball-and-stick model | |
# Default: 2 | |
# | |
set nstick = 2 | |
# Number of MCMC burn-in iterations | |
# (Path samples drawn initially by MCMC algorithm and discarded) | |
# Default: 200 | |
# | |
set nburnin = 200 | |
# Number of MCMC iterations | |
# (Path samples drawn by MCMC algorithm and used to estimate path distribution) | |
# Default: 7500 | |
# | |
set nsample = 7500 | |
# Frequency with which MCMC path samples are retained for path distribution | |
# Default: 5 (keep every 5th sample) | |
# | |
set nkeep = 5 | |
# Reinitialize path reconstruction? | |
# This is an option of last resort, to be used only if one of the reconstructed | |
# pathway distributions looks like a single curve. This is a sign that the | |
# initial guess for the pathway was problematic, perhaps due to poor alignment | |
# between the individual and the atlas. Setting the reinit parameter to 1 and | |
# rerunning "trac-all -prior" and "trac-all -path", only for the specific | |
# subjects and pathways that had this problem, will attempt to reconstruct them | |
# with a different initial guess. | |
# Default: 0 (do not reinitialize) | |
# | |
set reinit = 0 | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment