Last active
July 26, 2019 18:21
-
-
Save oandrienko/039f5aa1709f352207ca2346ddad208a to your computer and use it in GitHub Desktop.
An image processing script for measurments
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
#title :measure.py | |
#description :An image processing script for measurments | |
#author :Oles Andrienko, Adrian Rosebrock | |
#date :20161110 | |
#version :0.1 | |
#usage :python measure.py [-l] <reference_length> | |
#python_version :2.7.12 | |
#============================================================================== | |
# Open CV image processing library | |
import cv2 | |
# utility libraries for abstraction | |
from scipy.spatial import distance | |
import numpy as np | |
from imutils import perspective | |
from imutils import contours | |
import imutils | |
# Raspberry Pi camera interface module | |
# with Numpy array sub-module | |
from picamera import PiCamera | |
from picamera.array import PiRGBArray | |
# other general imports | |
import argparse | |
import time | |
# command line interface for script | |
ap = argparse.ArgumentParser() | |
ap.add_argument("-l", "--length", type=float, required=True, | |
help="length of the top-most object in the image in milimeters") | |
args = vars(ap.parse_args()) | |
# initialize video capture with camera, and loop through frames | |
camera = PiCamera() | |
camera.resolution = (640, 480) | |
camera.framerate = 32 | |
rawCapture = PiRGBArray(camera, size=(640, 480)) | |
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): | |
# read frame from camera object as numpy array | |
image = frame.array | |
# mask image to filter out noise | |
x,y = 235,140 | |
w,h = 235,275 | |
#mask = np.zeros(image.shape, np.uint8) | |
#mask[y:y+h, x:x+w] = image[y:y+h, x:x+w] | |
image = image[y:y+h, x:x+w] | |
# load the image, convert it to grayscale, and blur image to remove guassian noise | |
# use a 7x7px kernal for bluring with default standard deviations | |
# adjuste kernal size to improve edge detection (larger kernal results in more blur) | |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) | |
gray = cv2.GaussianBlur(gray, (3, 3), 0) | |
# perform edge detection using multi-stage detection Canny algorithm | |
# then dilate and erode edge boundries to close gaps in between edges | |
# the Canny edge detection algorithm requires two threshhold parameters | |
# threshhold must be applied before countour detection to remove noise | |
# larger threshhold provides longer lines | |
#ret,thresh = cv2.threshold(gray,127,255,0) | |
edged = cv2.Canny(gray, 50, 100) | |
edged = cv2.dilate(edged, None, iterations=1) | |
edged = cv2.erode(edged, None, iterations=1) | |
# identify all contour points by feeding in the required edged image | |
# prevents redundant countour points with 'CHAIN_APPROX_SIMPLE' flag | |
# remove any nested object countours with 'RETR_EXTERNAL' flag | |
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, | |
cv2.CHAIN_APPROX_SIMPLE) | |
cnts = cnts[0] if imutils.is_cv2() else cnts[1] | |
# sort the contours from left-to-right | |
# initalize pixels-per-metric calibration variable | |
(cnts, _) = contours.sort_contours(cnts, 'top-to-bottom') | |
pixelsPerMetric = None | |
orig = image.copy() | |
# loop through each countour and preform measurments | |
for c in cnts[1:]: | |
# check the area of the object the countours enclose | |
# apply against a threshhold and ignore if insignificant | |
# FIX: this should be adjusted, could be larger | |
if cv2.contourArea(c) < 100: | |
continue | |
# compute the rotated bounding box coordinates of the contour | |
# we form a numpy array from the points to pass to the imutils library | |
box = cv2.minAreaRect(c) | |
box = cv2.boxPoints(box) | |
box = np.array(box, dtype="int") | |
# order the points in the contour such that they appear | |
# in top-left, top-right, bottom-right, and bottom-left | |
# order, then draw the rotated bounding box | |
box = perspective.order_points(box) | |
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2) | |
# unpack countour points then draw them on the original image | |
for (x, y) in box: | |
cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1) | |
# unpack the bounding box coordinates | |
# then compute the Euclidean distance between the top right and bottom right | |
(tL, tR, bR, bL) = box | |
length = distance.euclidean(tR, bR) | |
# initialize pixels per metric as a ratio of supplied width | |
if pixelsPerMetric is None: | |
pixelsPerMetric = length / args["width"] | |
# compute the size of the object | |
length = length / pixelsPerMetric | |
# draw the object sizes on the image | |
(tlX, tlY) = tR | |
cv2.putText(orig, "{:.1f}in".format(length), | |
(int(tlX + 15), int(tlY - 15)), cv2.FONT_HERSHEY_SIMPLEX, | |
0.65, (255, 255, 255), 2) | |
# show the output image | |
cv2.imshow("Orig Image", orig) | |
#cv2.imshow("Gray Image", gray) | |
#cv2.imshow("Thresh Image", thresh) | |
#cv2.imshow("Edged Image", edged) | |
#clear image stream | |
rawCapture.truncate(0) | |
if cv2.waitKey(1) & 0xFF == ord('q'): | |
break | |
# cleanup the camera and close any open windows | |
camera.release() | |
cv2.destroyAllWindows() | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hello Sir, very nice, can I ask if this script can be used to measure object in real time ?