Last active
January 20, 2017 09:35
-
-
Save sylvchev/62012c110515f68261087ceddd392b14 to your computer and use it in GitHub Desktop.
Detect and estimate the pose of some objects with SURF keypoints
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
cmake_minimum_required(VERSION 2.8) | |
project( detection_pose_estimation_surf ) | |
find_package( OpenCV REQUIRED ) | |
add_executable( detection_pose_estimation_surf detection_pose_estimation_surf.cpp ) | |
target_link_libraries( detection_pose_estimation_surf ${OpenCV_LIBS} ) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include <stdio.h> | |
#include <iostream> | |
#include "opencv2/core.hpp" | |
#include "opencv2/imgproc.hpp" | |
#include "opencv2/features2d.hpp" | |
#include "opencv2/highgui.hpp" | |
#include "opencv2/calib3d.hpp" | |
#include "opencv2/xfeatures2d.hpp" | |
using namespace cv; | |
using namespace cv::xfeatures2d; | |
void readme(); | |
int main( int argc, char** argv ) | |
{ | |
if( argc != 3 ) | |
{ | |
std::cout << " Usage: ./detection_pose_estimation <imgtrain> <imgtest>" << std::endl; | |
return -1; | |
} | |
Mat img_object = imread( argv[1], IMREAD_GRAYSCALE ); | |
Mat img_scene = imread( argv[2], IMREAD_GRAYSCALE ); | |
if( !img_object.data || !img_scene.data ) | |
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; } | |
//-- Extract only the center of train image as ROI | |
Mat roi_object(img_object, Rect (192, 112, 255, 255)); //Rect (192, 112, 448, 368)); | |
//-- Detect SURF keypoints on ROI and test image | |
int minHessian = 800; | |
Ptr<SURF> detector = SURF::create( minHessian ); | |
detector->setUpright(false); | |
detector->setExtended(true); | |
std::vector<KeyPoint> keypoints_object, keypoints_scene; | |
Mat descriptors_object, descriptors_scene; | |
detector->detectAndCompute( roi_object, Mat(), keypoints_object, descriptors_object ); | |
detector->detectAndCompute( img_scene, Mat(), keypoints_scene, descriptors_scene ); | |
//-- FLANN matcher to identify the pair of descriptors | |
FlannBasedMatcher matcher; | |
std::vector< DMatch > matches; | |
matcher.match( descriptors_object, descriptors_scene, matches ); | |
double max_dist = 0; double min_dist = 100; | |
//-- Quick calculation of max and min distances between keypoints | |
for( int i = 0; i < descriptors_object.rows; i++ ) | |
{ double dist = matches[i].distance; | |
if( dist < min_dist ) min_dist = dist; | |
if( dist > max_dist ) max_dist = dist; | |
} | |
//-- "good" matches are less than 3*min_dist ) | |
std::vector< DMatch > good_matches; | |
for( int i = 0; i < descriptors_object.rows; i++ ) | |
{ if( matches[i].distance < 3*min_dist ) | |
{ good_matches.push_back( matches[i]); } | |
} | |
Mat img_matches; | |
drawMatches( roi_object, keypoints_object, img_scene, keypoints_scene, | |
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), | |
std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); | |
//-- Localize the object | |
std::vector<Point2f> obj; | |
std::vector<Point2f> scene; | |
for( size_t i = 0; i < good_matches.size(); i++ ) | |
{ | |
//-- store good matches | |
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt ); | |
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt ); | |
} | |
Mat mask; | |
Mat H = findHomography( obj, scene, RANSAC, 3.0, mask); | |
//-- Corner of the object image ROI | |
std::vector<Point2f> obj_corners(4); | |
obj_corners[0] = cvPoint( 0, 0 ); | |
obj_corners[1] = cvPoint( roi_object.cols, 0 ); | |
obj_corners[2] = cvPoint( roi_object.cols, roi_object.rows ); | |
obj_corners[3] = cvPoint( 0, roi_object.rows ); | |
//-- Get the center and top of image for computing a direction vector | |
std::vector<Point2f> obj_orien(2); | |
obj_orien[0] = cvPoint( 128, 128 ); | |
obj_orien[1] = cvPoint( 128, 0 ); | |
std::vector<Point2f> scene_corners(4); | |
std::vector<Point2f> scene_orien(2); | |
perspectiveTransform( obj_corners, scene_corners, H); | |
perspectiveTransform( obj_orien, scene_orien, H); | |
//-- Contour lines of the detected object | |
line( img_matches, scene_corners[0] + Point2f( roi_object.cols, 0), scene_corners[1] + Point2f( roi_object.cols, 0), Scalar(0, 0, 255), 4 ); | |
line( img_matches, scene_corners[1] + Point2f( roi_object.cols, 0), scene_corners[2] + Point2f( roi_object.cols, 0), Scalar( 0, 0, 255), 4 ); | |
line( img_matches, scene_corners[2] + Point2f( roi_object.cols, 0), scene_corners[3] + Point2f( roi_object.cols, 0), Scalar( 0, 0, 255), 4 ); | |
line( img_matches, scene_corners[3] + Point2f( roi_object.cols, 0), scene_corners[0] + Point2f( roi_object.cols, 0), Scalar( 0, 0, 255), 4 ); | |
//-- Orientation vector | |
line( img_matches, scene_orien[0] + Point2f( roi_object.cols, 0), scene_orien[1] + Point2f( roi_object.cols, 0), Scalar(255, 0, 0), 4 ); | |
Vec2f u(obj_orien[1].x-obj_orien[0].x, obj_orien[1].y-obj_orien[0].y), v(scene_orien[1].x-scene_orien[0].x, scene_orien[1].y-scene_orien[0].y); | |
float a = acos(u.dot(v)/(norm(u)*norm(v)))*180./CV_PI; | |
printf ("orientation: %g - recognition: %g (%d/%d points)\n", a, | |
float(good_matches.size())/matches.size(), int(good_matches.size()), | |
int (matches.size())); | |
//-- Show detected matches | |
imshow( "Good Matches & Object detection", img_matches ); | |
waitKey(0); | |
// imwrite("detection_pose_estimation_surf.jpg", img_matches ); | |
return 0; | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment