Last active
April 22, 2020 15:51
-
-
Save varunpalekar/771d1135313211a393a7051b77ce6dd1 to your computer and use it in GitHub Desktop.
OpenCV Python
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import cv2 | |
import pathlib | |
import datetime | |
import operator | |
import numpy as np | |
import pickle | |
detector = cv2.SURF(200) | |
norm = cv2.NORM_L2 | |
FLANN_INDEX_KDTREE = 1 | |
MIN_MATCH_PERCENTAGE = 80 | |
flann_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) | |
matcher = cv2.FlannBasedMatcher(flann_params, {}) | |
keypoints = [] | |
files = [] | |
descriptos = [] | |
def train_image(folder): | |
folder = pathlib.Path(folder) | |
for file in folder.iterdir(): | |
if '.jpg' in str(file): | |
img = cv2.imread(str(file), 0) | |
files.append(str(file)) | |
kp, desc = detector.detectAndCompute(img, None) | |
descriptos.append(desc) | |
keypoints.append(kp) | |
def query_image(file): | |
img = cv2.imread(str(file), 0) | |
kp, desc = detector.detectAndCompute(img, None) | |
matches = matcher.knnMatch(desc, k=2) | |
best_img = {} | |
best_match = {} | |
for m, n in matches: | |
if m.distance < 0.9 * n.distance: | |
if m.imgIdx not in best_match: | |
best_match[m.imgIdx] = [] | |
best_img[m.imgIdx] = 0 | |
best_match[m.imgIdx].append(m) | |
best_img[m.imgIdx] += 1 | |
best_match_image_index = sorted(best_img, reverse=True )[0] | |
best_match[ best_match_image_index ] | |
keypoints[ best_match_image_index ] | |
MIN_MATCH_COUNT = ( len(keypoints[best_match_image_index]) / 100.0 ) * MIN_MATCH_PERCENTAGE | |
print "Min Match: %s " % MIN_MATCH_COUNT | |
if len(best_match[ best_match_image_index ]) > MIN_MATCH_COUNT: | |
src_pts = np.float32([keypoints[best_match_image_index][m.queryIdx].pt for m in best_match[ best_match_image_index ]]).reshape(-1, 1, 2) | |
dst_pts = np.float32([kp[m.trainIdx].pt for m in best_match[ best_match_image_index ]]).reshape(-1, 1, 2) | |
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) | |
ransac_percentage = (len(mask)/len(dst_pts) ) * 100 | |
if (ransac_percentage > 65): | |
print "Ransac Percentage: [%s]" % ransac_percentage | |
print "Best image match with ID: [%s] and name: [%s]" % ( best_match_image_index, files[best_match_image_index] ) | |
else: | |
print "Image not matched" | |
print "Best image match with ID: [%s] and name: [%s]" % (best_match_image_index, files[best_match_image_index]) | |
else: | |
print "70 percentage image not matched: [%s]" % len(best_match[ best_match_image_index ]) | |
def save_keypoints(keypts): | |
store_keypoints = [] | |
for keypoint in keypts: | |
temp = [] | |
for point in keypoint: | |
temp.append((point.pt, point.size, point.angle, point.response, point.octave, point.class_id)) | |
store_keypoints.append(temp) | |
afile = open('keypoints', 'wb') | |
pickle.dump(store_keypoints, afile) | |
afile.close() | |
def load_keypoints(): | |
keypoints = [] | |
keypts = pickle.load(open( 'keypoints' , "rb" )) | |
for keypoint in keypts: | |
temp = [] | |
for point in keypoint: | |
temp.append( cv2.KeyPoint(x=point[0][0], y=point[0][1], _size=point[1], _angle=point[2], _response=point[3], | |
_octave=point[4], _class_id=point[5]) ) | |
keypoints.append(temp) | |
return keypoints | |
def save_desc(desc): | |
afile = open('descriptors', 'wb') | |
pickle.dump(desc, afile) | |
afile.close() | |
def load_desc(): | |
return pickle.load( open( 'descriptors' , "rb" )) | |
def save_filenames(files): | |
pickle.dump(files, open('filenames', 'wb') ) | |
def load_filenames(): | |
return pickle.load(open('filenames', "rb")) | |
def train(): | |
train_image('captchaStore') | |
save_keypoints(keypoints) | |
save_desc(descriptos) | |
save_filenames(files) | |
def load(): | |
global keypoints | |
global descriptos | |
global files | |
keypoints = load_keypoints() | |
descriptos = load_desc() | |
files = load_filenames() | |
start = datetime.datetime.now() | |
load() | |
print "Load time: [%s]" % (( datetime.datetime.now() - start ).microseconds / 1000 ) | |
training = datetime.datetime.now() | |
matcher.add(descriptos) | |
matcher.train() | |
trained_desc = matcher.getTrainDescriptors() | |
print "Train time: [%s]" % (( datetime.datetime.now() - training ).microseconds / 1000 ) | |
query =datetime.datetime.now() | |
query_image('6.jpg') | |
print "query time: [%s]" % ((datetime.datetime.now() - query).microseconds / 1000 ) | |
print "Total time: [%s]" % ((datetime.datetime.now() - start).microseconds / 1000 ) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment