Created
February 22, 2011 03:14
-
-
Save satra/838152 to your computer and use it in GitHub Desktop.
nipype interface use to preprocess the haxby dataset
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| Preprocess the Haxby data with SPM+FSL: normalize to MNI | |
| """ | |
| import os | |
| import urllib | |
| import tarfile | |
| import gzip | |
| import glob | |
| import shutil | |
| import numpy as np | |
| from nipype.interfaces import spm | |
| from nipype.interfaces import fsl | |
| from nipype.interfaces import matlab # how to run matlab | |
| import nibabel | |
| ################################################################################ | |
| # Set the way matlab should be called | |
| #matlab.MatlabCommand.set_default_matlab_cmd("/volatile/varoquau/usr/bin/matlab -nodesktop -nosplash") | |
| matlab.MatlabCommand.set_default_paths('/software/spm8/') | |
| # Tell fsl to generate all output in uncompressed nifti format, so that | |
| # matlab can read them | |
| fsl.FSLCommand.set_default_output_type('NIFTI') | |
| #os.environ['FSLDIR'] = '/volatile/varoquau/Programs/fsl/' | |
| #os.environ['PATH'] = '%s:%s/bin' % (os.environ['PATH'], | |
| # os.environ['FSLDIR']) | |
| ################################################################################ | |
| def unzip_nii_gz(dirname): | |
| # Because SPM does not understand .nii.gz | |
| for filename in glob.glob('%s/*.nii.gz' % dirname): | |
| f_in = gzip.open(filename, 'rb') | |
| f_out = open(filename[:-3], 'wb') | |
| f_out.writelines(f_in) | |
| f_out.close() | |
| f_in.close() | |
| os.remove(filename) | |
| data_dir = '/software/temp/nipype-tutorial/temp/haxby' | |
| if not os.path.exists(data_dir): | |
| os.makedirs(data_dir) | |
| # Unzip the templates so that SPM can use them | |
| templates = ['MNI152_T1_1mm.nii', 'MNI152_T1_1mm_brain.nii'] | |
| for template in templates: | |
| f_in = gzip.open(fsl.base.Info.standard_image(template + '.gz'), 'rb') | |
| f_out = open(os.path.join(data_dir,template), 'wb') | |
| f_out.writelines(f_in) | |
| f_out.close() | |
| f_in.close() | |
| templates = [os.path.join(data_dir, t) for t in templates] | |
| for subj in [1, 2, 3, 4]: | |
| pwd = os.getcwd() | |
| os.chdir(os.path.join(data_dir)) | |
| # Subject 5 and 6 are not useable | |
| if not os.path.exists('sub%i.tar.gz' % subj): | |
| # Download the data | |
| print "Downloading data, Please Wait 5mn" | |
| opener = urllib.urlopen( | |
| 'http://data.pymvpa.org/datasets/haxby2001/subj%i-2010.01.14.tar.gz' | |
| % subj) | |
| open('%s/sub%i.tar.gz' % (data_dir, subj), 'wb').write(opener.read()) | |
| if not os.path.exists('%s/subj%i' % (data_dir, subj)): | |
| print "Untaring" | |
| # Extract the data | |
| tar_file = tarfile.open('%s/sub%i.tar.gz' % (data_dir, subj)) | |
| tar_file.extractall('.') | |
| tar_file.close() | |
| unzip_nii_gz('subj%i' % subj) | |
| for filename in glob.glob('%s/subj%i/w*.nii' % (data_dir, subj)): | |
| os.remove(filename) | |
| for filename in glob.glob('%s/subj%i/*.mat' % (data_dir, subj)): | |
| os.remove(filename) | |
| normalize = spm.Normalize() | |
| normalize.inputs.source = '%s/subj%i/anat.nii' % (data_dir, subj) | |
| normalize.inputs.template = templates[0] | |
| normalize.inputs.apply_to_files = '%s/subj%i/bold.nii' %(data_dir, subj) | |
| normalize.inputs.affine_regularization_type = 'none' | |
| normalize.inputs.nonlinear_iterations = 48 | |
| normalize.inputs.nonlinear_regularization = 10 | |
| print 'Starting normalize first run' | |
| results = normalize.run() | |
| for filename in glob.glob('spm_*.ps'): | |
| shutil.move(filename, '%s/subj%i/first_run_%s' % | |
| (data_dir, subj, filename)) | |
| print results.runtime.stdout | |
| # Skull strip | |
| stripper = fsl.BET() | |
| stripper.inputs.mask = True | |
| stripper.inputs.in_file = '%s/subj%i/wanat.nii' % (data_dir, subj) | |
| stripper.inputs.out_file = '%s/subj%i/wanat_brain.nii' % (data_dir, subj) | |
| print 'Starting skull stripping' | |
| results = stripper.run() | |
| unzip_nii_gz('%s/subj%i' % (data_dir, subj)) | |
| # We run the normalize a second time on the skull-stripped data | |
| normalize.inputs.source = '%s/subj%i/wanat_brain.nii' % (data_dir, subj) | |
| normalize.inputs.template = templates[1] | |
| normalize.inputs.apply_to_files = '%s/subj%i/wbold.nii' % (data_dir, subj) | |
| normalize.inputs.affine_regularization_type = 'none' | |
| normalize.inputs.nonlinear_iterations = 48 | |
| normalize.inputs.nonlinear_regularization = 10 | |
| print 'Starting normalize second run' | |
| results = normalize.run() | |
| for filename in glob.glob('spm_*.ps'): | |
| shutil.move(filename, '%s/subj%i/second_run_%s' % | |
| (data_dir, subj, filename)) | |
| print results.runtime.stdout | |
| os.chdir(pwd) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| Preprocess the Haxby data with SPM: normalize to MNI | |
| """ | |
| import os | |
| import urllib | |
| import tarfile | |
| import gzip | |
| import glob | |
| import shutil | |
| import numpy as np | |
| from nipype.interfaces import spm | |
| from nipype.interfaces import matlab # how to run matlab | |
| ################################################################################ | |
| # Set the way matlab should be called | |
| #matlab.MatlabCommand.set_default_matlab_cmd("/volatile/varoquau/usr/bin/matlab -nodesktop -nosplash") | |
| matlab.MatlabCommand.set_default_paths('/software/spm8/') | |
| ################################################################################ | |
| def unzip_nii_gz(dirname): | |
| # Because SPM does not understand .nii.gz | |
| for filename in glob.glob('%s/*.nii.gz' % dirname): | |
| f_in = gzip.open(filename, 'rb') | |
| f_out = open(filename[:-3], 'wb') | |
| f_out.writelines(f_in) | |
| f_out.close() | |
| f_in.close() | |
| os.remove(filename) | |
| data_dir = '/software/temp/nipype-tutorial/temp/haxby' | |
| if not os.path.exists(data_dir): | |
| os.makedirs(data_dir) | |
| for subj in [1, 2, 3, 4]: | |
| pwd = os.getcwd() | |
| os.chdir(os.path.join(data_dir)) | |
| # Subject 5 and 6 are not useable | |
| if not os.path.exists('sub%i.tar.gz' % subj): | |
| # Download the data | |
| print "Downloading data, Please Wait 5mn" | |
| opener = urllib.urlopen( | |
| 'http://data.pymvpa.org/datasets/haxby2001/subj%i-2010.01.14.tar.gz' | |
| % subj) | |
| open('%s/sub%i.tar.gz' % (data_dir, subj), 'wb').write(opener.read()) | |
| if not os.path.exists('%s/subj%i' % (data_dir, subj)): | |
| print "Untaring" | |
| # Extract the data | |
| tar_file = tarfile.open('%s/sub%i.tar.gz' % (data_dir, subj)) | |
| tar_file.extractall('.') | |
| tar_file.close() | |
| unzip_nii_gz('subj%i' % subj) | |
| os.chdir('%s/subj%i' % (data_dir, subj)) | |
| for filename in glob.glob('%s/subj%i/w*.nii' % (data_dir, subj)): | |
| os.remove(filename) | |
| for filename in glob.glob('%s/subj%i/*.mat' % (data_dir, subj)): | |
| os.remove(filename) | |
| # we use unified segmentation to normalize to MNI space | |
| segment = spm.Segment() | |
| segment.inputs.data = '%s/subj%i/anat.nii' % (data_dir, subj) | |
| segment.inputs.gm_output_type = [True,True,False] | |
| print 'Starting segment' | |
| results = segment.run() | |
| print results.runtime.stdout | |
| # We run normalize to apply the transformation to the functional runs | |
| normalize = spm.Normalize() | |
| normalize.inputs.parameter_file = results.outputs.transformation_mat | |
| normalize.inputs.apply_to_files = '%s/subj%i/bold.nii' % (data_dir, subj) | |
| normalize.inputs.jobtype = 'write' | |
| print 'Starting normalize run' | |
| results = normalize.run() | |
| print results.runtime.stdout | |
| os.chdir(pwd) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| Preprocess the Haxby data with SPM: coregistration and realign | |
| """ | |
| import os | |
| import urllib | |
| import tarfile | |
| import gzip | |
| import glob | |
| import shutil | |
| import numpy as np | |
| from nipype.interfaces import io | |
| from nipype.interfaces import spm | |
| from nipype.interfaces import matlab # how to run matlab | |
| from nipype.pipeline import engine as pe | |
| ################################################################################ | |
| # Set the way matlab should be called | |
| #matlab.MatlabCommand.set_default_matlab_cmd("/volatile/varoquau/usr/bin/matlab -nodesktop -nosplash") | |
| matlab.MatlabCommand.set_default_paths('/software/spm8/') | |
| ################################################################################ | |
| def unzip_nii_gz(dirname): | |
| # Because SPM does not understand .nii.gz | |
| for filename in glob.glob('%s/*.nii.gz' % dirname): | |
| f_in = gzip.open(filename, 'rb') | |
| f_out = open(filename[:-3], 'wb') | |
| f_out.writelines(f_in) | |
| f_out.close() | |
| f_in.close() | |
| os.remove(filename) | |
| data_dir = '/software/temp/nipype-tutorial/temp/haxby' | |
| if not os.path.exists(data_dir): | |
| os.makedirs(data_dir) | |
| for subj in [1, 2, 3, 4]: | |
| pwd = os.getcwd() | |
| os.chdir(os.path.join(data_dir)) | |
| # Subject 5 and 6 are not useable | |
| if not os.path.exists('sub%i.tar.gz' % subj): | |
| # Download the data | |
| print "Downloading data, Please Wait 5mn" | |
| opener = urllib.urlopen( | |
| 'http://data.pymvpa.org/datasets/haxby2001/subj%i-2010.01.14.tar.gz' | |
| % subj) | |
| open('%s/sub%i.tar.gz' % (data_dir, subj), 'wb').write(opener.read()) | |
| if not os.path.exists('%s/subj%i' % (data_dir, subj)): | |
| print "Untaring" | |
| # Extract the data | |
| tar_file = tarfile.open('%s/sub%i.tar.gz' % (data_dir, subj)) | |
| tar_file.extractall('.') | |
| tar_file.close() | |
| unzip_nii_gz('subj%i' % subj) | |
| os.chdir('%s/subj%i' % (data_dir, subj)) | |
| for filename in glob.glob('%s/subj%i/w*.nii' % (data_dir, subj)): | |
| os.remove(filename) | |
| for filename in glob.glob('%s/subj%i/*.mat' % (data_dir, subj)): | |
| os.remove(filename) | |
| workflow = pe.Workflow(name='subj%d_cache'%subj) | |
| workflow.base_dir = '%s/workdir' % (data_dir) | |
| # we use unified segmentation to normalize to MNI space | |
| segment = pe.Node(spm.Segment(), name='segment') | |
| segment.inputs.data = '%s/subj%i/anat.nii' % (data_dir, subj) | |
| segment.inputs.gm_output_type = [True,True,False] | |
| # We run normalize to apply the transformation to the functional runs | |
| normalize = pe.Node(spm.Normalize(), name='normalize') | |
| workflow.connect(segment, 'transformation_mat', normalize, 'parameter_file') | |
| normalize.inputs.apply_to_files = '%s/subj%i/bold.nii' % (data_dir, subj) | |
| normalize.inputs.jobtype = 'write' | |
| datasink = pe.Node(io.DataSink(parameterization=False), name='sinker') | |
| datasink.inputs.base_dir = '%s/subj%i' % (data_dir, subj) | |
| workflow.connect(segment, 'transformation_mat', datasink, '@transform') | |
| workflow.connect(segment, 'inverse_transformation_mat', datasink, '@invtransform') | |
| workflow.connect(segment, 'normalized_gm_image', datasink, '@norm_gm') | |
| workflow.connect(normalize, 'normalized_files', datasink, '@norm_bold') | |
| print 'Starting workflow' | |
| workflow.run() | |
| os.chdir(pwd) |
Author
because we don't have a urlgrabber+untarrer - didn't want to write one, right now!
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Why not use iterables on the workflow version?