Last active
March 10, 2024 03:20
-
-
Save dangom/83cc80fb5c08b41316a5485ba3a1b2dd to your computer and use it in GitHub Desktop.
Optimal echo combination accounting for motion correction
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" Echo combination. | |
1. Motion correction | |
2. Compute weights on motion corrected data | |
3. Apply inverse transform on weights | |
4. Apply weights on original data to minimize interpolation. | |
Methods: | |
For the echo combination, the first echo was motion corrected using ANTs for | |
rigid transformation. The transforms were subsequently applied to the later | |
echoes. The mean images of the motion corrected echoes were subsequently used | |
for estimating a T2star map. Weights for each echo were computed from the T2star | |
map assuming a simple exponential decay model. The weights were then applied to | |
each volume of the original datasets after applying an inverse transformation | |
corresponding to the motion correction. Finally, the weighted echoes were | |
averaged to form an optimally combined image. | |
""" | |
import ants | |
from tedana.utils import make_adaptive_mask | |
from tedana.decay import fit_decay | |
import numpy as np | |
from functools import reduce | |
import operator | |
from typing import List, Tuple | |
def multiecho_motion_correction( | |
echoes: List[ants.ANTsImage], mask: ants.ANTsImage | |
) -> Tuple[List[ants.ANTsImage], List[List[str]]]: | |
"""Given a sorted list of ants images, each a 4D image with a different | |
echo-time, this function will compute parameter estimates from the first echo | |
and apply to all echoes, returning both a list of motion corrected ants images, | |
and a list of the transformation files corresponding to the motion correction. | |
The motion correction is performed using a rigid transformation. | |
""" | |
echo_0: ants.ANTsImage = echoes[0] | |
mcf_0: dict = ants.motion_correction(echo_0, mask=mask, type_of_transform="BOLDRigid") | |
mcf_others = [apply_to_other_echo(x, mcf_0) for x in echoes[1:]] | |
return [mcf_0["motion_corrected"], *mcf_others], mcf_0["motion_parameters"] | |
def apply_to_other_echo(other_echo: ants.ANTsImage, mcf: dict) -> ants.ANTsImage: | |
"""Given a ants image (other_echo) and an ANTs motion corrected output | |
(dict with 'motion_corrected' and 'motion_parameters' keys), return the other_echo | |
motion corrected with the transforms from the first echo. | |
""" | |
fixed_list = ants.ndimage_to_list(ants.image_clone(mcf["motion_corrected"])) | |
moving_list = ants.ndimage_to_list(other_echo) | |
transforms = mcf["motion_parameters"] | |
out: List[ants.ANTsImage] = [ | |
ants.apply_transforms(fixed, moving, transform) | |
for fixed, moving, transform in zip(fixed_list, moving_list, transforms) | |
] | |
image_target: ants.ANTsImage = ants.image_clone(mcf["motion_corrected"]) | |
return ants.list_to_ndimage(image_target, out) | |
def get_weights(list_mcf: List[ants.ANTsImage], tes: np.array) -> List[ants.ANTsImage]: | |
"""Uses tedana to compute a t2star map and weights for optimal combination. | |
Returns a list with 3D weights corresponding to each echo. | |
""" | |
catd: np.array = np.stack( | |
[ants.get_average_of_timeseries(x).flatten() for x in list_mcf], axis=-1 | |
) | |
# Use TEDANA to generate a t2star map. | |
mask, masksum = make_adaptive_mask(catd[:, :, np.newaxis], getsum=True) | |
_, _, _, _, t2sfull, _ = fit_decay(catd[:, :, np.newaxis], tes, mask, masksum) | |
weights: List[np.array] = [te * np.exp(-te / t2sfull) for te in tes] | |
# Normalise weights | |
normfactor: np.array = reduce(operator.add, weights) | |
weights = [np.nan_to_num(weight / normfactor) for weight in weights] | |
# Reshape and bring the weights into an ants.ANTsImage object. | |
refs: List[ants.ANTsImage] = [ | |
ants.image_clone(ants.slice_image(x, 3, 0)) for x in list_mcf | |
] | |
for ref, weight in zip(refs, weights): | |
ref[:, :, :] = np.reshape(weight, ref.shape) | |
return refs | |
def apply_weights( | |
echoes: List[ants.ANTsImage], | |
weights: List[ants.ANTsImage], | |
motion_parameters: List[List[str]], | |
) -> List[ants.ANTsImage]: | |
"""Take a list of 4D echoes and a list of 3D weights, and apply the weights | |
to the echoes. The motion_parameters are used to register the weights to each | |
individual volume of the 4D echo. | |
Return a list of weighted echoes the same size as the input echoes. | |
""" | |
weighted_imgs: List = [] | |
for echo, weight in zip(echoes, weights): | |
# Need to break a 4d file into 3d to apply transforms to individually. | |
echo_lst_3d: List[ants.ANTsImage] = ants.ndimage_to_list(echo) | |
# Here apply transform, but invert them since we want to "undo" the | |
# motion correction. | |
weighted_echo_lst_3d = [ | |
ants.apply_transforms(x, weight, t, whichtoinvert=[True]) * x | |
for x, t in zip(echo_lst_3d, motion_parameters) | |
] | |
# Bring them back to 4d | |
image_target: ants.ANTsImage = ants.image_clone(echo) | |
weighted_imgs.append(ants.list_to_ndimage(image_target, weighted_echo_lst_3d)) | |
return weighted_imgs | |
def optimal_combination(echoes: List[str], tes: List[float], save_weights: bool = False): | |
"""Take a list of echo filenames and a list of TEs and return an optimally | |
combined image (using a T2star weighting approach) | |
""" | |
loaded_data = [ants.image_read(x) for x in echoes] | |
mask = ants.get_mask(ants.get_average_of_timeseries(loaded_data[0])) | |
list_mcf, motion_pars = multiecho_motion_correction(loaded_data, mask) | |
weights = get_weights(list_mcf, np.array(tes)) | |
if save_weights: | |
raise NotImplementedError("Saving weights is not yet available.") | |
weighted_data = apply_weights(loaded_data, weights, motion_pars) | |
# At this point the weights have already been normalised, so we can simply | |
# sum all echoes. | |
combined: ants.ANTsImage = reduce(operator.add, weighted_data) | |
return combined |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hi David, please check out this repo: https://github.com/Donders-Institute/multiecho