Created
July 17, 2019 20:25
-
-
Save lee2sman/2293d3023e7e99d2dd5015c5e98eb3f7 to your computer and use it in GitHub Desktop.
grab camera feed as frame, process frame w/ edge detection, hough lines. draw best fit lines. no driving.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import matplotlib.pyplot as plt | |
import matplotlib.image as mpimg | |
import numpy as np | |
import cv2 | |
import math | |
# ARE YOU CURIOUS HOW THIS ANSWER DIFFERS FROM THE CODE PROVIDED? | |
# SCROLL DOWN.... | |
def region_of_interest(img, vertices): | |
mask = np.zeros_like(img) | |
match_mask_color = 255 | |
cv2.fillPoly(mask, vertices, match_mask_color) | |
masked_image = cv2.bitwise_and(img, mask) | |
return masked_image | |
def draw_lines(img, lines, color=[255, 0, 0], thickness=3): | |
line_img = np.zeros( #creates a blank image array same size as our camera image | |
( | |
img.shape[0], #width of image | |
img.shape[1], #height of image | |
3 #colorspace of image B, G, R | |
), | |
dtype=np.uint8 | |
) | |
img = np.copy(img) #copy current img frame into an array with same name | |
if lines is None: #if no lines detected, return | |
return | |
for line in lines: | |
for x1, y1, x2, y2 in line: | |
cv2.line(line_img, (x1, y1), (x2, y2), color, thickness) #draw a line onto the blank line_img array from above | |
img = cv2.addWeighted(img, 0.8, line_img, 1.0, 0.0) #now add our line_img array onto the camera frame img array at 0.8 opacity | |
return img | |
def pipeline(image): | |
""" | |
An image processing pipeline which will output | |
an image with the lane lines annotated. | |
""" | |
height = image.shape[0] | |
width = image.shape[1] | |
region_of_interest_vertices = [ | |
(0, height), | |
(width / 2, height / 2), | |
(width, height), | |
] | |
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) | |
cannyed_image = cv2.Canny(gray_image, 100, 200) | |
cropped_image = region_of_interest( | |
cannyed_image, | |
np.array( | |
[region_of_interest_vertices], | |
np.int32 | |
), | |
) | |
lines = cv2.HoughLinesP( | |
cropped_image, | |
rho=6, | |
theta=np.pi / 60, | |
threshold=160, | |
lines=np.array([]), | |
minLineLength=40, | |
maxLineGap=25 | |
) | |
left_line_x = [] | |
left_line_y = [] | |
right_line_x = [] | |
right_line_y = [] | |
if not np.any(lines): | |
return image | |
for line in lines: | |
for x1, y1, x2, y2 in line: | |
slope = float(y2 - y1) / (x2 - x1) #TODO: instead of /0 error, if x2 - x1 == 0, set slope to a high number | |
if math.fabs(slope) < 0.5: | |
continue #if not steep enough (horiz line), then ignore the line | |
if slope <= 0: #if less than 0 extend left because our y coordinate system at top is 0, bottom is full height | |
left_line_x.extend([x1, x2]) | |
left_line_y.extend([y1, y2]) | |
else: | |
right_line_x.extend([x1, x2]) | |
right_line_y.extend([y1, y2]) | |
if len(left_line_x)==0 or len(right_line_x)==0: | |
return image #if we don't have coordinates, just return an image without lines on it | |
#this next section draws the lines on top of image | |
min_y = int(image.shape[0] * (3 / 5)) #min y for the lines (we count down from top) is 3/5 of the page | |
max_y = int(image.shape[0]) #max y is at the bottom of the screen | |
#create an algebraic function to represent the left line we are creating (y = mx + b), actually (x = my + b) | |
poly_left = np.poly1d(np.polyfit( | |
left_line_y, | |
left_line_x, | |
deg=1 | |
)) | |
left_x_start = int(poly_left(max_y)) #this is the function x = my + b. input max_y, get out a left_x_start | |
left_x_end = int(poly_left(min_y)) | |
poly_right = np.poly1d(np.polyfit( | |
right_line_y, | |
right_line_x, | |
deg=1 | |
)) | |
right_x_start = int(poly_right(max_y)) | |
right_x_end = int(poly_right(min_y)) | |
line_image = draw_lines( | |
image, | |
[[ | |
[left_x_start, max_y, left_x_end, min_y], | |
[right_x_start, max_y, right_x_end, min_y], | |
]], | |
thickness=5, | |
) | |
return line_image | |
# THE CHANGE FROM THE PROVIDED CODE, STARTS HERE... | |
# 1. Copy in the code from "laptopwebcam.py" to "Problem3Solution.py" | |
# 2. We don't need to "import cv2" again, so we can delete it. | |
# 3. Call "pipeline" with the video frame | |
cam = cv2.VideoCapture(0) | |
cv2.namedWindow("test") | |
img_counter = 0 | |
while True: | |
ret, frame = cam.read() | |
frame = pipeline(frame) # Call "pipeline" with the video frame | |
cv2.imshow("test", frame) | |
if not ret: | |
break | |
k = cv2.waitKey(1) | |
if k%256 == 27: | |
# ESC pressed | |
print("Escape hit, closing...") | |
break | |
elif k%256 == 32: | |
# SPACE pressed | |
img_name = "opencv_frame_{}.png".format(img_counter) | |
cv2.imwrite(img_name, frame) | |
print("{} written!".format(img_name)) | |
img_counter += 1 | |
cam.release() | |
cv2.destroyAllWindows() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment