Created
August 6, 2014 19:12
-
-
Save danhammer/6509caedb434e6078289 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import scipy | |
from scipy import ndimage | |
import copy | |
import matplotlib | |
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! | |
import matplotlib.pyplot as plt | |
import numpy as np | |
from spaceknow.horik import tools | |
def _prep_image(location, threshold=175, filter_level=1.5): | |
# Returns a normalized, smoothed image for object identification and | |
# analysis. Note that the parameters are hardcoded for digital globe | |
# imagery. TODO: Make this general, a function of image resolution | |
img = scipy.misc.imread(location, flatten=1) | |
img = ndimage.gaussian_filter(img, filter_level) | |
normed = tools.normalize(img) | |
if threshold: | |
normed[normed < threshold] = 0 | |
return normed | |
def _cluster(img): | |
# Accepts a single channel image and returns connected clusters in a 2D | |
# array, where the pixel value is the numeric label for each segmented | |
# cluster | |
blobs, n_blobs = ndimage.label(img) | |
return blobs | |
def _perimeter(b): | |
# Calculates the ratio of area to perimeter of the 1s in the supplied | |
# binary image `b`. Effectively calculates how condensed or stocky the | |
# object is, rather than long and skinny (which will yield a relatively | |
# low ratio). | |
perimeter = np.sum(b[:, 1:] != b[:, :-1]) + np.sum(b[1:, :] != b[:-1, :]) | |
return perimeter | |
def _object_stats(clustered_img, label): | |
# Accepts an image that has already been split into cluster labels and | |
# returns statistics for the object with the supplied label | |
img = clustered_img == label | |
perimeter = _perimeter(img) | |
area = np.sum(img) | |
return dict(area=area, perimeter=area/float(perimeter)) | |
def _screener(obj_dict, param_dict): | |
# Accepts a dictionary that describes an object along with a dictionary | |
# that describes the parameters of the target object. Compares the two. | |
area = obj_dict['area'] | |
peri = obj_dict['perimeter'] | |
a = (area <= param_dict['area_max'] and area >= param_dict['area_min']) | |
p = (peri <= param_dict['peri_max'] and peri >= param_dict['peri_min']) | |
return np.all([a, p]) | |
def _identifier(location, params): | |
# Identifies objects from the image at the supplied location, given the | |
# supplied parameters | |
img = _prep_image(location, threshold=params['threshold'], | |
filter_level=params['filter_level']) | |
clustered = _cluster(img) | |
stats = {c: _object_stats(clustered, c) for c in set(clustered.flatten())} | |
filtered = {k: v for k, v in stats.items() if _screener(v, params)} | |
idx = np.in1d(clustered.ravel(), filtered.keys()) | |
return dict(n=len(filtered.items()), img=idx.reshape(clustered.shape)) | |
def gen_intel(location, params): | |
# Given a location of a high resolution image, outline the bright objects | |
# of the default dimension | |
raw_img = scipy.misc.imread(location) | |
analysis = _identifier(location, params) | |
idx = analysis['img'] | |
mask = (idx == False) | |
nrow, ncol, nband = raw_img.shape | |
if nband < 4: | |
overlay = tools.add_opacity_band(raw_img) | |
else: | |
overlay = copy.copy(raw_img) | |
overlay[mask, :] = 0 | |
overlay[idx] = [255, 0, 0, 150] | |
plt.imshow(raw_img) | |
plt.imshow(overlay) | |
plt.axis('off') | |
name = location.split('/')[-1].split('.')[0] | |
img_path = '%s/%s_overlay.png' % ('/tmp', name) | |
plt.savefig(img_path, bbox_inches='tight') | |
return dict(n=analysis['n'], img_location=img_path) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment