Skip to content

Instantly share code, notes, and snippets.

@Kelvinrr
Last active February 10, 2021 21:09
Show Gist options
  • Select an option

  • Save Kelvinrr/a79ee4008b41ba8b2e2389ea8e37fa95 to your computer and use it in GitHub Desktop.

Select an option

Save Kelvinrr/a79ee4008b41ba8b2e2389ea8e37fa95 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# CD Test (Points)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Template for running CD algorithms through parameterizations. For these runs, focus on parameterizing CD algorithms, see at what points affine offset and sun azimuth directions begin to make TPR drop. \n",
"\n",
"\n",
"Steps: \n",
"1. generate base DEM\n",
"2. create after DEM by filling depressions (use defaults for now)\n",
"3. add craters to after image (use defaults for now)\n",
"4. hillshade both images (use azi param to control light direction)\n",
"5. apply noise to both images\n",
"6. apply affine to both images and use after affine on truth data (stick with offset differences for now, as it tends to be the most common registration error)\n",
"7. run before and after through a CD algorithm \n",
"8. compute TPR and FPR \n",
" * TPR = TP/TP+FP\n",
" * FPR = FP/FP+TP\n",
" * IF function returns points, TP = point equals to or is contained by set of \"truth\" points/polys 1 pixel buffer. \n",
" * ELSE function returns polygons, TP = polygon has >50% intersection with \"truth\" poly. "
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"os.environ['ISISROOT'] = '/usgs/cpkgs/anaconda3_linux/envs/isis3.9.0'\n",
"\n",
"from numpy import random\n",
"from scipy.ndimage.interpolation import zoom\n",
"import scipy\n",
"\n",
"import os\n",
"import math\n",
"\n",
"from PIL import Image\n",
"import numpy as np\n",
"from scipy.misc import imresize\n",
"\n",
"import shapely\n",
"from shapely.geometry import Point, MultiPolygon\n",
"\n",
"import matplotlib\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.image as mpimg\n",
"\n",
"from autocnet.utils.utils import bytescale, cartesian\n",
"from autocnet.examples import get_path\n",
"from autocnet.cg import change_detection as cd\n",
"from autocnet.cg import cg\n",
"\n",
"def cartesian_product(*arrays):\n",
" la = len(arrays)\n",
" dtype = \"object\"\n",
" arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)\n",
" for i, a in enumerate(np.ix_(*arrays)):\n",
" arr[...,i] = a\n",
" return arr.reshape(-1, la)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Generate before and after images "
]
},
{
"cell_type": "code",
"execution_count": 37,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Warning! No geotransform defined. Choosing a standard one! (Top left cell's top let corner at <0,0>; cells are 1x1.)\n",
"Warning! No geotransform defined. Choosing a standard one! (Top left cell's top let corner at <0,0>; cells are 1x1.)\n"
]
}
],
"source": [
"# Generate base DEM \n",
"\n",
"before_dem = cd.generate_dem(alpha=1)"
]
},
{
"cell_type": "code",
"execution_count": 103,
"metadata": {},
"outputs": [],
"source": [
"# Generate Boulders\n",
"\n",
"before_dem, before_polys, after_dem, after_polys = cd.generate_boulder_field(before_dem, 5)"
]
},
{
"cell_type": "code",
"execution_count": 119,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Warning! No geotransform defined. Choosing a standard one! (Top left cell's top let corner at <0,0>; cells are 1x1.)\n",
"Warning! No geotransform defined. Choosing a standard one! (Top left cell's top let corner at <0,0>; cells are 1x1.)\n",
"Warning! No geotransform defined. Choosing a standard one! (Top left cell's top let corner at <0,0>; cells are 1x1.)\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/krodriguez/repos/autocnet/autocnet/cg/change_detection.py:548: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n",
" dmask[[*cluster.T]] = True\n"
]
}
],
"source": [
"# compute Depressions\n",
"\n",
"after_dem, change_poly = cd.compute_depression(after_dem, alpha=1.4, return_polygon=True)\n",
"change_poly = shapely.ops.cascaded_union([change_poly, MultiPolygon(after_polys)])"
]
},
{
"cell_type": "code",
"execution_count": 139,
"metadata": {},
"outputs": [],
"source": [
"# Hillshade \n",
"\n",
"azi_before = 190 \n",
"azi_after = 210 \n",
"\n",
"before_image = cd.hillshade(before_dem, azi=azi_before).mean(axis=2)\n",
"after_image = cd.hillshade(after_dem, azi=azi_after).mean(axis=2)"
]
},
{
"cell_type": "code",
"execution_count": 152,
"metadata": {},
"outputs": [],
"source": [
"# Add noise\n",
"noise_stddev_before = np.std(before_image)/10\n",
"noise_stddev_after = np.std(after_image)/10\n",
"\n",
"before_image += np.random.normal(0, noise_stddev_before, before_image.shape)\n",
"after_image += np.random.normal(0, noise_stddev_after, after_image.shape)"
]
},
{
"cell_type": "code",
"execution_count": 153,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# TODO: Add affine distortion "
]
},
{
"cell_type": "code",
"execution_count": 178,
"metadata": {},
"outputs": [],
"source": [
"diff = (after_image-before_image).astype(float)\n",
"# diff[diff==0] = np.nan\n",
"\n",
"fig, axs = plt.subplots(2,2, figsize=(10,10))\n",
"\n",
"axs[0][0].imshow(before_image, cmap=\"bone\")\n",
"axs[0][0].set_title('\"Before\" Hillshade')\n",
"axs[0][1].imshow(after_image, cmap=\"bone\")\n",
"axs[0][1].set_title('\"After\" Hillshade')\n",
"\n",
"axs[1][0].imshow(after_image, cmap='bone')\n",
"\n",
"if isinstance(change_poly, MultiPolygon):\n",
" for geom in change_poly.geoms: \n",
" xs, ys = geom.exterior.xy \n",
" axs[1][0].fill(ys, xs, alpha=1, fc='black', ec='none')\n",
"else: \n",
" xs, ys = change_poly.exterior.xy \n",
" axs[1][0].fill(ys, xs, alpha=1, fc='black', ec='none')\n",
"axs[1][0].set_title(\"Change Polygons\")\n",
" \n",
"axs[1][1].imshow(diff, cmap=\"magma\")\n",
"axs[1][1].set_title('Diff')\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Generate CD parameters"
]
},
{
"cell_type": "code",
"execution_count": 169,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"9X3 Matrix\n"
]
}
],
"source": [
"# Change these based on algorithm \n",
"\n",
"nfeatures = [500, 1000, 2000]\n",
"scalefactor = [1.5]\n",
"nlevels = [3]\n",
"extractor_params = [dict(zip([\"nfeatures\", \"scaleFactor\", \"nlevels\"], ps)) for ps in cartesian_product(nfeatures, scalefactor, nlevels)] \n",
"\n",
"min_samples = [5, 15, 20]\n",
"max_eps = [1, 20, 30]\n",
"xi = [.5]\n",
"eps = [.5]\n",
"cluster_params = [dict(zip([\"min_samples\", \"max_eps\", \"eps\", \"xi\"], ps)) for ps in cartesian_product(min_samples, max_eps, eps, xi)] \n",
"\n",
"print(f\"{len(cluster_params)}X{len(extractor_params)} Matrix\")\n",
"parameters = cartesian_product(extractor_params, cluster_params)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Run OKB with Params"
]
},
{
"cell_type": "code",
"execution_count": 170,
"metadata": {},
"outputs": [],
"source": [
"def image_diff_abs(arr1, arr2):\n",
" return np.abs(cd.image_diff(arr1, arr2))\n",
"\n",
"results = [cd.okubogar_detector(before_image, \n",
" after_image, \n",
" nbins=50, \n",
" extractor_kwargs=params[0]) for params in parameters]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Plot results "
]
},
{
"cell_type": "code",
"execution_count": 181,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"nx = len(extractor_params)\n",
"ny = len(cluster_params)\n",
"\n",
"fig, axs = plt.subplots(ny, nx, sharex='col', sharey='row')\n",
"fig.set_figheight(40)\n",
"fig.set_figwidth(20)\n",
"\n",
"for ax in axs.flat:\n",
" ax.set(xlabel='x-label', ylabel='y-label')\n",
" \n",
"xs, ys = np.mgrid[:ny, :nx]\n",
"truth_poly = change_poly.buffer(1)\n",
"\n",
"for i, coord in enumerate(zip(xs.flatten(),ys.flatten())):\n",
" x,y = coord\n",
" df, hmap, diff = results[i]\n",
" \n",
" # get true positives and false negatives\n",
" true_positives = 0\n",
"\n",
" # needs to be within one pixel of truth\n",
" for p in df.geometry:\n",
" p = Point(p.y, p.x)\n",
" if p.intersects(truth_poly):\n",
" true_positives+=1\n",
" \n",
" false_positives = len(df.geometry) - true_positives \n",
" ratio = true_positives/len(df.geometry)\n",
" \n",
" axs[x,y].set_title(f\"{i} | tp: {true_positives}, fp: {false_positives}, score: {ratio}\")\n",
" axs[x,y].set(xlabel=f\"ex {parameters[i][1]}\", ylabel=f\"cl {parameters[i][0]}\")\n",
" axs[x,y].imshow(after_image, cmap=\"bone\", alpha=0.2)\n",
" \n",
" if isinstance(truth_poly, MultiPolygon):\n",
" for geom in truth_poly.geoms: \n",
" xs, ys = geom.exterior.xy \n",
" axs[x][y].fill(ys, xs, alpha=0.5, fc='black', ec='none')\n",
" else: \n",
" xs, ys = truth_poly.exterior.xy \n",
" axs[x][y].fill(ys, xs, alpha=0.5, fc='black', ec='none')\n",
" \n",
" axs[x,y].scatter(df.x, df.y, c=\"r\", alpha=1, s=1)\n",
"\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "AutocnetDev",
"language": "python",
"name": "autocnetdev"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment