Last active
January 27, 2017 08:55
-
-
Save rabernat/4e6ba6e199b5c7647c411da2ad7e62db to your computer and use it in GitHub Desktop.
aggregation routines for coarse graining
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# implement my own aggregation downsampling function | |
from itertools import product | |
import numpy as np | |
import xarray as xr | |
def aggregate(data, factor=2, mean=True): | |
ndim = data.ndim | |
shape = data.shape | |
# promote single value to list | |
if isinstance(factor, int): | |
factors = ndim * [factor,] | |
# check we have the right number of dimensions | |
assert len(factors) == ndim | |
# make sure shapes are compatible | |
for s, fac in zip(shape, factors): | |
assert s % factor == 0 | |
out = 0 | |
# it is lazy to use a set...don't have to figure out the necessary logic | |
slices = [] | |
for start_indices in product(*[range(f) for f in factors]): | |
slices.append( | |
[slice(sidx, s, factor) for sidx, s in zip(start_indices, shape)] | |
) | |
# how would we generalize to other reduce functions? | |
result = reduce(np.add, [data[sl] for sl in slices]) | |
if mean: | |
result /= len(slices) | |
return result | |
# wrap in xarray | |
def xr_aggregate(data, **kwargs): | |
assert isinstance(data, xr.DataArray) | |
newdata = aggregate(data.values, **kwargs) | |
coords = {k: aggregate(data[k].values, **kwargs) for k in data.dims} | |
return xr.DataArray(newdata, dims=data.dims, coords=coords, attrs=data.attrs) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment