Created
August 30, 2019 12:59
-
-
Save sandlbn/5528deed988403926d3ac2402b9f60df to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
import sys, serial, struct | |
import cv2 | |
import numpy as np | |
import time | |
import os | |
from time import sleep | |
move = True | |
port = '/dev/ttyACM0' | |
sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE, | |
xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True) | |
sp.setDTR(True) # dsrdtr is ignored on Windows. | |
def capture(f): | |
sp.write(b"snap") | |
sp.flush() | |
#print("test") | |
size = struct.unpack('<L', sp.read(4))[0] | |
#print("size: " + str(size)) | |
img = sp.read(size) | |
#print("read size") | |
w = f.write(img) | |
#print("wrote to file") | |
image = cv2.imread("img.jpg") | |
while(image is None): | |
#print("image is None") | |
f.write(img) | |
image = cv2.imread("img.jpg") | |
return image | |
background = 0 | |
print("Learning background") | |
with open("img.jpg", "wb") as f: | |
#print("1") | |
for i in range(30): | |
#print("2") | |
background = capture(f); | |
background = np.flip(background, axis=1) | |
print("Done. Place cube") | |
#keep track of object centers. Cube will have the highest count for its center | |
#this is to account for false objects in the camera that happen occasionally. | |
obj_centers = {} | |
count = 0 | |
true_center = (104, 106.5) | |
center_coords = (0,0) | |
while(True): | |
with open("img.jpg", "wb") as f: | |
image = capture(f) | |
image = np.flip(image, axis=1) | |
hsv = cv2.cvtColor(image,cv2.COLOR_BGR2HSV) | |
# Range for lower red | |
lower_red = np.array([0,120,70]) | |
upper_red = np.array([10,255,255]) | |
mask1 = cv2.inRange(hsv, lower_red, upper_red) | |
# Range for upper range | |
lower_red = np.array([170,120,70]) | |
upper_red = np.array([180,255,255]) | |
mask2 = cv2.inRange(hsv,lower_red,upper_red) | |
mask1 = mask1+mask2 | |
#creating an inverted mask to segment out the cloth from the frame | |
mask2 = cv2.bitwise_not(mask1) | |
#Segmenting the cloth out of the frame using bitwise and with the inverted mask | |
res1 = cv2.bitwise_and(image,image,mask=mask2) | |
# creating image showing static background frame pixels only for the masked region | |
res2 = cv2.bitwise_and(background, background, mask = mask1) | |
#Generating the final output | |
final_output = cv2.addWeighted(res1,1,res2,1,0) | |
#Interested in mask2 | |
img = cv2.pyrDown(mask2, cv2.IMREAD_UNCHANGED) | |
ret, threshed_img = cv2.threshold(img, | |
127, 255, cv2.THRESH_BINARY) | |
# find contours and get the external one | |
contours, hier = cv2.findContours(threshed_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) | |
#image, contours, hier = cv2.findContours(threshed_img, cv2.RETR_TREE, | |
# cv2.CHAIN_APPROX_SIMPLE) | |
# with each contour, draw boundingRect in green | |
# a minAreaRect in red and | |
# a minEnclosingCircle in blue | |
for c in contours: | |
# get the bounding rect | |
x, y, w, h = cv2.boundingRect(c) | |
# draw a green rectangle to visualize the bounding rect | |
if(w*h < 1000): | |
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2) | |
# get the min area rect | |
rect = cv2.minAreaRect(c) | |
box = cv2.boxPoints(rect) | |
# convert all coordinates floating point values to int | |
box = np.int0(box) | |
# draw a red 'nghien' rectangle | |
cv2.drawContours(img, [box], 0, (0, 0, 255)) | |
#print(len(contours)) | |
#cv2.drawContours(img, contours, -1, (255, 255, 0), 1) | |
x_cent = x + w/2 | |
y_cent = y + h/2 | |
if(count < 50): | |
print(count) | |
temp_pair = (x_cent, y_cent) | |
if temp_pair in obj_centers: | |
obj_centers[temp_pair] = obj_centers[temp_pair] + 1 | |
else: | |
obj_centers.update({temp_pair : 1}) | |
count = count + 1 | |
else: | |
print("found box center") | |
center_coords = max(obj_centers, key=lambda key: obj_centers[key]) | |
print("center: (" + str(center_coords[0]) + "," + str(center_coords[1]) + ")") | |
#if(len(contours) != 2): | |
#swift.set_buzzer(frequency=1000, duration=1) | |
#print(obj_centers) | |
#move robot to correct position | |
dx = (center_coords[0] - true_center[0])*1.25 | |
dy = (center_coords[1] - true_center[1])*1.25 | |
moves = (-dy, dx) | |
movxy = "echo \"G2204 X" + str(moves[0]) + " Y" + str(moves[1]) + " Z0 F1000\\n\" > /dev/ttyACM1" | |
movdwn = "echo \"G2204 X0 Y0 Z-200 F1000\\n\" > /dev/ttyACM1" | |
claw_close = "echo \"M2232 V1\" > /dev/ttyACM1" | |
movup = "echo \"G2204 X0 Y0 Z200 F1000\\n\" > /dev/ttyACM1" | |
print(movxy) | |
if(move): | |
os.system(movxy) | |
os.system(movdwn) | |
os.system(claw_close) | |
sleep(3) | |
os.system(movup) | |
break | |
#print("cv2 read image") | |
cv2.imshow('image',img) | |
#print("cv2 showed image") | |
k = cv2.waitKey(1) | |
#print("waited for key") | |
if k == 27: | |
sp.close() | |
break; | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment