Last active
November 17, 2021 18:47
-
-
Save brews/83d5e6364858d406367dcd06ce6e264a to your computer and use it in GitHub Desktop.
Idea to quickly test that QPLAD adjustment factors (AF) and quantiles are matched correctly.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| """ | |
| Idea to quickly test that QPLAD adjustment factors (AF) and quantiles are matched correctly. | |
| This uses ``dodola`` https://github.com/ClimateImpactLab/dodola/tree/f331623dfaffd3d341d27a3bc36f156646c3620f | |
| """ | |
| import numpy as np | |
| import xarray as xr | |
| from dodola.core import ( | |
| train_quantiledeltamapping, | |
| adjust_quantiledeltamapping, | |
| train_analogdownscaling, | |
| adjust_analogdownscaling, | |
| ) | |
| def test_qplad_integration_af_quantiles(): | |
| """ | |
| Test QPLAD correctly matches adjustmentfactor and quantiles for lat and dayofyear | |
| The strategy is to bias-correct a Dataset of ones, and then try to | |
| downscale it to two gridpoints with QPLAD. In one case we ta take the | |
| adjustment factors for a single dayofyear and manually change it to | |
| 0.0. Then check for the corresponding change in the output dataset. In | |
| the other case we take the adjustment factors for one of the two | |
| latitudes we're downscaling to and manually change it to 0.0. We then | |
| check for the corresponding change in the output dataset for that latitude. | |
| """ | |
| kind = "*" | |
| lat = [1.0, 1.5] | |
| time = xr.cftime_range(start="1994-12-17", end="2015-01-15", calendar="noleap") | |
| variable = "scen" | |
| data_ref = xr.DataArray( | |
| np.ones((len(time), len(lat)), dtype="float64"), | |
| ## If you want something more sophisticated: | |
| # np.arange(len(time) * len(lat), dtype="float64").reshape(len(time), len(lat)), | |
| coords={"time": time, "lat": lat}, | |
| attrs={"units": "K"}, | |
| name=variable, | |
| ).chunk({"time": -1, "lat": -1}) | |
| data_train = data_ref + 2 | |
| data_train.attrs["units"] = "K" | |
| ref_fine = data_ref.to_dataset() | |
| ds_train = data_train.to_dataset() | |
| # take the mean across space to represent coarse reference data for AFs | |
| ds_ref_coarse = ref_fine.mean(["lat"], keep_attrs=True) | |
| ds_train = ds_train.mean(["lat"], keep_attrs=True) | |
| # tile the fine resolution grid with the coarse resolution ref data | |
| ref_coarse = ds_ref_coarse.broadcast_like(ref_fine) | |
| ds_bc = ds_train | |
| ds_bc[variable].attrs["units"] = "K" | |
| # this is an integration test between QDM and QPLAD, so use QDM services | |
| # for bias correction | |
| target_year = 2005 | |
| qdm_model = train_quantiledeltamapping( | |
| reference=ds_ref_coarse, historical=ds_train, variable=variable, kind=kind | |
| ) | |
| biascorrected_coarse = adjust_quantiledeltamapping( | |
| simulation=ds_bc, | |
| variable=variable, | |
| qdm=qdm_model.ds, | |
| years=[target_year], | |
| include_quantiles=True, | |
| ) | |
| # make bias corrected data on the fine resolution grid | |
| biascorrected_fine = biascorrected_coarse.broadcast_like( | |
| ref_fine.sel( | |
| time=slice("{}-01-01".format(target_year), "{}-12-31".format(target_year)) | |
| ) | |
| ) | |
| qplad_model = train_analogdownscaling( | |
| coarse_reference=ref_coarse, | |
| fine_reference=ref_fine, | |
| variable=variable, | |
| kind=kind, | |
| ) | |
| # TODO: These prob should be two separate tests with setup fixtures... | |
| spoiled_time = qplad_model.ds.copy(deep=True) | |
| spoiled_latitude = qplad_model.ds.copy(deep=True) | |
| # Spoil one dayoftheyear value in adjustment factors (force it to be 0.0) | |
| # and test that the spoiled value correctly propigates through to output. | |
| time_idx_to_spoil = 25 | |
| spoiled_time["af"][:, time_idx_to_spoil, :] = 0.0 | |
| qplad_model.ds = spoiled_time | |
| downscaled = adjust_analogdownscaling( | |
| simulation=biascorrected_fine.set_coords( | |
| ["sim_q"] | |
| ), # func assumes sim_q is coordinate... | |
| qplad=qplad_model, | |
| variable=variable, | |
| ) | |
| # All but two values should be 1.0... | |
| assert (downscaled[variable].values == 1.0).sum() == 728 | |
| # We should have 2 `0.0` entires. One in each lat... | |
| assert (downscaled[variable].values == 0.0).sum() == 2 | |
| # All our 0.0s should be in this dayofyear/time slice in output dataset. | |
| np.testing.assert_array_equal( | |
| downscaled[variable].values[time_idx_to_spoil, :], np.array([0.0, 0.0]) | |
| ) | |
| # Similar to above, spoil one lat value in adjustment factors | |
| # (force it to be 0.0) and test that the spoiled value correctly | |
| # propigates through to output. | |
| spoiled_latitude = qplad_model.ds.copy(deep=True) | |
| laitutde_idx_to_spoil = 0 | |
| spoiled_latitude["af"][laitutde_idx_to_spoil, ...] = 0.0 | |
| qplad_model.ds = spoiled_latitude | |
| downscaled = adjust_analogdownscaling( | |
| simulation=biascorrected_fine.set_coords( | |
| ["sim_q"] | |
| ), # func assumes sim_q is coordinate... | |
| qplad=qplad_model, | |
| variable=variable, | |
| ) | |
| # Half of values in output should be 1.0... | |
| assert (downscaled[variable].values == 1.0).sum() == 364 | |
| # The other half should be `0.0` due to the spoiled data... | |
| assert (downscaled[variable].values == 0.0).sum() == 366 | |
| # All our 0.0s should be in this single lat in output dataset. | |
| assert all(downscaled[variable].values[:, laitutde_idx_to_spoil] == 0.0) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment