Skip to content

Instantly share code, notes, and snippets.

@kor01
Last active November 28, 2017 01:26
Show Gist options
  • Save kor01/3b4721113799e89109f61f5da994cb3c to your computer and use it in GitHub Desktop.
Save kor01/3b4721113799e89109f61f5da994cb3c to your computer and use it in GitHub Desktop.
[CarND_classifiers] #CarND #classifiers
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bbox-example-image.jpg')
# Define a function that takes an image, a list of bounding boxes,
# and optional color tuple and line thickness as inputs
# then draws boxes in that color on the output
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# make a copy of the image
draw_img = np.copy(img)
# draw each bounding box on your image copy using cv2.rectangle()
# return the image copy with boxes drawn
for box in bboxes:
(x1, y1), (x2, y2) = box
cv2.rectangle(draw_img, (x1, y1), (x2, y2), color, thick)
return draw_img # Change this line to return image copy with boxes
# Add bounding boxes in this format, these are just example coordinates.
bboxes = [((300, 500), (380, 560)), ((300, 300), (400, 400))]
result = draw_boxes(image, bboxes)
plt.imshow(result)
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('cutout1.jpg')

# Define a function to compute color histogram features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the RGB channels separately
rhist = np.histogram(img[:,:,0], bins=32, range=(0, 256))
ghist = np.histogram(img[:,:,1], bins=32, range=(0, 256))
bhist = np.histogram(img[:,:,2], bins=32, range=(0, 256))
# Generating bin centers
bin_edges = rhist[1]
bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges)-1])/2
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((rhist[0], ghist[0], bhist[0]))
# Return the individual histograms, bin_centers and feature vector
return rhist, ghist, bhist, bin_centers, hist_features
rh, gh, bh, bincen, feature_vec = color_hist(image, nbins=32, bins_range=(0, 256))
# Plot a figure with all three bar charts
if rh is not None:
fig = plt.figure(figsize=(12,3))
plt.subplot(131)
plt.bar(bincen, rh[0])
plt.xlim(0, 256)
plt.title('R Histogram')
plt.subplot(132)
plt.bar(bincen, gh[0])
plt.xlim(0, 256)
plt.title('G Histogram')
plt.subplot(133)
plt.bar(bincen, bh[0])
plt.xlim(0, 256)
plt.title('B Histogram')
fig.tight_layout()
else:
print('Your function is returning None for at least one variable...')
from sklearn.svm import SVC
# gamma controls margin size (in loss function)
# C controls classfying error penalty
clf = SVC(C=1.0, gamma=1000)
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(...)
pred = clf.predict(...)
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
def bin_spatial(img, color_space='RGB', size=(32, 32)):
# Convert image to new color space (if specified)
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
else: feature_image = np.copy(img)
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(feature_image, size).ravel()
# Return the feature vector
return features
# Define a function to search for template matches
# and return a list of bounding boxes
def find_matches(img, template_list):
# Define an empty list to take bbox coords
bbox_list = []
# Define matching method
# Other options include: cv2.TM_CCORR_NORMED', 'cv2.TM_CCOEFF', 'cv2.TM_CCORR',
# 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED'
method = cv2.TM_CCOEFF_NORMED
# Iterate through template list
for temp in template_list:
# Read in templates one by one
tmp = mpimg.imread(temp)
# Use cv2.matchTemplate() to search the image
result = cv2.matchTemplate(img, tmp, method)
# Use cv2.minMaxLoc() to extract the location of the best match
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# Determine a bounding box for the match
w, h = (tmp.shape[1], tmp.shape[0])
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
# Append bbox position to list
bbox_list.append((top_left, bottom_right))
# Return the list of bounding boxes
return bbox_list
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment