Created
June 20, 2016 03:20
-
-
Save OlegJakushkin/96b69065fb4127216c02da6be4b110dd to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# coding=UTF-8 | |
import cv2 | |
import numpy as np | |
from matplotlib import pyplot as plt | |
img1 = cv2.imread('hp-label.PNG', 0) # trainImage | |
img2 = cv2.imread('input.PNG', 0) # queryImage | |
orb = cv2.ORB_create(edgeThreshold=4, patchSize=16) # ORB | |
# http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_fast/py_fast.html | |
# Вместо детекторов можно пытаться использовать совпадения по шаблону http://robocraft.ru/blog/computervision/3046.html | |
# найдем точки | |
kp1 = orb.detect(img1, None) | |
kp2 = orb.detect(img2, None) | |
# получим сжатое представление о найденных точках | |
kp1, des1 = orb.compute(img1, kp1) | |
kp2, des2 = orb.compute(img2, kp2) | |
# зарисуем ключевые точки и сохраним | |
img3 = cv2.drawKeypoints(img1, kp1, None, color=(255, 0, 0)) | |
img4 = cv2.drawKeypoints(img2, kp2, None, color=(255, 0, 0)) | |
cv2.imwrite('tmp_hp-label.png', img3) | |
cv2.imwrite('tmp_input.png', img4) | |
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) | |
# Match descriptors. | |
matches = bf.match(des1,des2) | |
# Sort them in the order of their distance. | |
matches = sorted(matches, key = lambda x:x.distance) | |
#зарисуем совпадения | |
img5 = cv2.drawMatches(img1,kp1,img2,kp2,matches, None, flags=2) | |
cv2.imwrite('tmp_input_vs_label.png', img5) | |
#будем искать объекты на картинке | |
good = matches | |
for m in matches: | |
if m.distance < 0.7: | |
good.append(m) | |
#Кривая выдклялка - но она нам и не понадобится - у нас будет http://www.learnopencv.com/blob-detection-using-opencv-python-c/ | |
MIN_MATCH_COUNT=10 | |
if len(good)>MIN_MATCH_COUNT: | |
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) | |
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) | |
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) | |
matchesMask = mask.ravel().tolist() | |
h,w = img1.shape | |
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) | |
dst = cv2.perspectiveTransform(pts,M) | |
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) | |
draw_params = dict(matchColor = (0,255,0), # draw matches in green color | |
singlePointColor = None, | |
matchesMask = matchesMask, # draw only inliers | |
flags = 2) | |
img6 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) | |
cv2.imwrite('tmp_selection_result.png', img6) | |
plt.imshow(img6, 'gray') | |
plt.show() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment