The code below is a slight modification of code from openCv 2.3 tutorial.
You can use it to find image A in Image B. It uses SURF (Speeded Up Robust Features)
http://www.vision.ee.ethz.ch/~surf/eccv06.pdf[
^]
FLANN (Fast Library for Approximate Nearest Neighbors)
http://opencv.willowgarage.com/documentation/cpp/flann_fast_approximate_nearest_neighbor_search.html[
^]
for matching.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
int main( int argc, char** argv )
{
if( argc != 3 ) return -1;
Mat img_A = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_B = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_A.data || !img_B.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<keypoint> keypoints_A, keypoints_B;
detector.detect( img_A, keypoints_A );
detector.detect( img_B, keypoints_B );
SurfDescriptorExtractor extractor;
Mat descriptors_A, descriptors_B;
extractor.compute( img_A, keypoints_A, descriptors_A );
extractor.compute( img_B, keypoints_B, descriptors_B );
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_A, descriptors_B, matches );
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_A.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_A.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;
drawMatches( img_A, keypoints_A, img_B, keypoints_B,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
std::vector<point2f> A;
std::vector<point2f> B;
for( int i = 0; i < good_matches.size(); i++ )
{
A.push_back( keypoints_A[ good_matches[i].queryIdx ].pt );
B.push_back( keypoints_B[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( A, B, CV_RANSAC );
std::vector<point2f> A_corners(4);
A_corners[0] = cvPoint(0,0);
A_corners[1] = cvPoint( img_A.cols, 0 );
A_corners[2] = cvPoint( img_A.cols, img_A.rows );
A_corners[3] = cvPoint( 0, img_A.rows );
std::vector<point2f> B_corners(4);
perspectiveTransform( A_corners, B_corners, H);
line( img_matches, B_corners[0] + Point2f( img_A.cols, 0), B_corners[1] + Point2f( img_A.cols, 0),Scalar(0,0,255));
line( img_matches, B_corners[1] + Point2f( img_A.cols, 0), B_corners[2] + Point2f( img_A.cols, 0),Scalar(0,0,255));
line( img_matches, B_corners[2] + Point2f( img_A.cols, 0), B_corners[3] + Point2f( img_A.cols, 0),Scalar(0,0,255));
line( img_matches, B_corners[3] + Point2f( img_A.cols, 0), B_corners[0] + Point2f( img_A.cols, 0),Scalar(0,0,255));
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return 0;
}
hope this helps.