Created
July 17, 2019 22:18
-
-
Save lee2sman/262d08518d03e69a1d7f94a108f48775 to your computer and use it in GitHub Desktop.
current pipeline file for selfdriving car. line 149 starting values to be tested and altered....
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import matplotlib.pyplot as plt | |
import matplotlib.image as mpimg | |
import numpy as np | |
import cv2 | |
import math | |
cv2.namedWindow("test") | |
def region_of_interest(img, vertices): | |
mask = np.zeros_like(img) | |
match_mask_color = 255 | |
cv2.fillPoly(mask, vertices, match_mask_color) | |
masked_image = cv2.bitwise_and(img, mask) | |
return masked_image | |
def draw_lines(img, lines, color=[255, 0, 0], thickness=3): | |
line_img = np.zeros( #creates a blank image array same size as our camera image | |
( | |
img.shape[0], #width of image | |
img.shape[1], #height of image | |
3 #colorspace of image B, G, R | |
), | |
dtype=np.uint8 | |
) | |
img = np.copy(img) #copy current img frame into an array with same name | |
if lines is None: #if no lines detected, return | |
return | |
for line in lines: | |
for x1, y1, x2, y2 in line: | |
cv2.line(line_img, (x1, y1), (x2, y2), color, thickness) #draw a line onto the blank line_img array from above | |
img = cv2.addWeighted(img, 0.8, line_img, 1.0, 0.0) #now add our line_img array onto the camera frame img array at 0.8 opacity | |
return img | |
# The pipeline function takes in a numpy array of dimensions: | |
# "height, width, color-space" | |
# and MUST return an image of the SAME dimensions | |
# | |
# The pipeline function also takes a motorq. To make the motors move | |
# add messages to the queue of the form: | |
# motorq.put( [ left-motor-speed , right-motor-speed ] ) | |
# i.e. motorq.put([32768,32768]) # make the motors go full-speed forward | |
def pipeline(image,motorq): | |
#print("running pipeline...") | |
#TEST | |
# motorq.put( [ 5000 , 5000 ] ) | |
# THINGS YOU SHOULD DO... | |
# 1. Copy the code INSIDE your pipeline function here. | |
# 2. Ensure the pipeline function takes BOTH the image and motorq. | |
#motorq.put([32768,32768]) # make the motors go full-speed forward | |
height = image.shape[0] | |
width = image.shape[1] | |
region_of_interest_vertices = [ | |
(0, height), | |
(width / 2, height / 2), | |
(width, height), | |
] | |
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) | |
cannyed_image = cv2.Canny(gray_image, 100, 200) | |
cropped_image = region_of_interest( | |
cannyed_image, | |
np.array( | |
[region_of_interest_vertices], | |
np.int32 | |
), | |
) | |
lines = cv2.HoughLinesP( | |
cropped_image, | |
rho=6, | |
theta=np.pi / 60, | |
threshold=160, | |
lines=np.array([]), | |
minLineLength=40, | |
maxLineGap=25 | |
) | |
left_line_x = [] | |
left_line_y = [] | |
right_line_x = [] | |
right_line_y = [] | |
if not np.any(lines): #didn't find any lines | |
motorq.put( [ 0 , 0 ] ) #set speed to zero if no lines detected | |
return image #exit function, don't keep processing below | |
slope_L = 0 | |
slope_R = 0 | |
for line in lines: | |
for x1, y1, x2, y2 in line: | |
slope = float(y2 - y1) / (x2 - x1) #TODO: instead of /0 error, if x2 - x1 == 0, set slope to a high number | |
if math.fabs(slope) < 0.5: | |
continue #if not steep enough (horiz line), then ignore the line | |
if slope <= 0: #if less than 0 extend left because our y coordinate system at top is 0, bottom is full height | |
left_line_x.extend([x1, x2]) | |
left_line_y.extend([y1, y2]) | |
slope_L = slope | |
else: | |
right_line_x.extend([x1, x2]) | |
right_line_y.extend([y1, y2]) | |
slope_R = slope | |
if len(left_line_x)==0 or len(right_line_x)==0: | |
return image #if we don't have coordinates, just return an image without lines on it | |
#this next section draws the lines on top of image | |
min_y = int(image.shape[0] * (3 / 5)) #min y for the lines (we count down from top) is 3/5 of the page | |
max_y = int(image.shape[0]) #max y is at the bottom of the screen | |
#create an algebraic function to represent the left line we are creating (y = mx + b), actually (x = my + b) | |
poly_left = np.poly1d(np.polyfit( | |
left_line_y, | |
left_line_x, | |
deg=1 | |
)) | |
left_x_start = int(poly_left(max_y)) #this is the function x = my + b. input max_y, get out a left_x_start | |
left_x_end = int(poly_left(min_y)) | |
poly_right = np.poly1d(np.polyfit( | |
right_line_y, | |
right_line_x, | |
deg=1 | |
)) | |
right_x_start = int(poly_right(max_y)) | |
right_x_end = int(poly_right(min_y)) | |
line_image = draw_lines( | |
image, | |
[[ | |
[left_x_start, max_y, left_x_end, min_y], | |
[right_x_start, max_y, right_x_end, min_y], | |
]], | |
thickness=5, | |
) | |
print(str(slope_L+slope_R)) | |
direction = slope_L+slope_R | |
motorq.put( [ 5000 + 1000*direction, 5000 - 1000*direction ] ) | |
return line_image |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment