Last active
April 27, 2018 21:20
-
-
Save jeffrafter/ab31d904b04d32049296dcb5293e8160 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
nj = require 'numjs' | |
u = new Utils() | |
u.loadImageToCanvas("./img1.jpg","canvasInput") | |
cannyEdgeDetection = () => | |
src = cv.imread('canvasInput') | |
dst = new cv.Mat() | |
cv.cvtColor(src, src, cv.COLOR_RGB2GRAY, 0) | |
cv.Canny(src, dst, 50, 100, 3, false) | |
cv.imshow('cannyOutput', dst) | |
src.delete(); dst.delete() | |
contourDetection = () => | |
src = cv.imread('cannyOutput') | |
dst = cv.Mat.zeros(src.rows, src.cols, cv.CV_8UC3) # Note had to switch rows/cols order here | |
cv.cvtColor(src, src, cv.COLOR_RGBA2GRAY, 0) | |
cv.threshold(src, src, 120, 200, cv.THRESH_BINARY) | |
contours = new cv.MatVector() | |
hierarchy = new cv.Mat() | |
cv.findContours(src, contours, hierarchy, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) | |
# Todo sort? | |
screenCnt = null | |
for i in [0..contours.size()-1] | |
c = contours.get(i) | |
perimeter = cv.arcLength(c, true) | |
approx = new cv.Mat() | |
cv.approxPolyDP(c, approx, 0.02 * perimeter, true) | |
if (cv.contourArea(approx) > 1000) | |
screenCnt = approx | |
matVec = new cv.MatVector() | |
matVec.push_back(approx) | |
cv.drawContours(dst, matVec, -1, new cv.Scalar(0,255,0), 2) | |
cv.imshow('contourOutput', dst) | |
src.delete(); dst.delete(); contours.delete(); hierarchy.delete() | |
return screenCnt | |
orderPoints = (pts) => | |
# initialzie a list of coordinates that will be ordered | |
# such that the first entry in the list is the top-left, | |
# the second entry is the top-right, the third is the | |
# bottom-right, and the fourth is the bottom-left | |
pts = pts.data32S | |
pts = [ | |
[pts[0], pts[1]], | |
[pts[2], pts[3]], | |
[pts[4], pts[5]], | |
[pts[6], pts[7]] | |
] | |
rect = {} | |
# the top-left point will have the smallest sum, whereas | |
# the bottom-right point will have the largest sum | |
sum = pts.sort (a, b) -> | |
(a[0] + a[1]) - (b[0] + b[1]) | |
rect.tl = sum[0] | |
rect.br = sum[3] | |
# now, compute the difference between the points, the | |
# top-right point will have the smallest difference, | |
# whereas the bottom-left will have the largest difference | |
diff = pts.sort (a, b) -> | |
(a[0] - a[1]) - (b[0] - b[1]) | |
rect.tr = sum[0] | |
rect.bl = sum[3] | |
# return the ordered coordinates | |
return rect | |
fourPointTransform = (rect) => | |
src = cv.imread('canvasInput') | |
# obtain a consistent order of the points and unpack them | |
# individually | |
tl = rect.tl | |
tr = rect.tr | |
br = rect.br | |
bl = rect.bl | |
# compute the width of the new image, which will be the | |
# maximum distance between bottom-right and bottom-left | |
# x-coordiates or the top-right and top-left x-coordinates | |
widthA = Math.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) | |
widthB = Math.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) | |
maxWidth = Math.max(Math.floor(widthA), Math.floor(widthB)) | |
# compute the height of the new image, which will be the | |
# maximum distance between the top-right and bottom-right | |
# y-coordinates or the top-left and bottom-left y-coordinates | |
heightA = Math.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) | |
heightB = Math.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) | |
maxHeight = Math.max(Math.floor(heightA), Math.floor(heightB)) | |
# now that we have the dimensions of the new image, construct | |
# the set of destination points to obtain a "birds eye view", | |
# (i.e. top-down view) of the image, again specifying points | |
# in the top-left, top-right, bottom-right, and bottom-left | |
# order | |
dst = cv.matFromArray(4, 1, cv.CV_32FC2, [ | |
0, 0, | |
maxWidth - 1, 0, | |
maxWidth - 1, maxHeight - 1, | |
0, maxHeight - 1, | |
]) | |
# compute the perspective transform matrix and then apply it | |
# https://docs.opencv.org/3.3.1/dd/d52/tutorial_js_geometric_transformations.html | |
rect = cv.matFromArray(4, 1, cv.CV_32FC2, [ | |
rect.tl[0], rect.tl[1], | |
rect.tr[0], rect.tr[1], | |
rect.br[0], rect.br[1], | |
rect.bl[0], rect.bl[1], | |
]) | |
M = cv.getPerspectiveTransform(rect, dst) | |
dsize = new cv.Size(maxWidth, maxHeight) | |
warped = new cv.Mat() | |
cv.warpPerspective(src, warped, M, dsize, cv.INTER_LINEAR, cv.BORDER_CONSTANT, new cv.Scalar()) | |
cv.cvtColor(warped, warped, cv.COLOR_BGR2GRAY, 0) | |
output = new cv.Mat() | |
# https://docs.opencv.org/3.3.1/d7/dd0/tutorial_js_thresholding.html | |
cv.adaptiveThreshold(warped, output, 200, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 3, 2) | |
# warped = (warped > T).astype("uint8") * 255 | |
# for i in [0..warped.data32S.length-1] | |
# greater = (warped.data32S[i] > T.data32S[i]) | |
# warped.data32S[i] = (0 + (greater ? 255 * 255 * 255 * 255 : 0)) | |
# return the warped image | |
cv.imshow('warpedOutput', output) | |
# Delete some stuff? | |
u.loadOpenCv => | |
cannyEdgeDetection() | |
screenCnt = contourDetection() | |
rect = orderPoints(screenCnt) | |
fourPointTransform(rect) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment