surf OpenCV Demo分析(find_obj.cpp)

#include 「stdafx.h」
#include <cv.h>
#include <highgui.h>
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <vector>
using namespace std;
IplImage *image = 0;
double compareSURFDescriptors( const float* d1, const float* d2, double 
best, int length )
{
double total_cost = 0;
assert( length % 4 == 0 );
for( int i = 0; i < length; i += 4 )
{
double t0 = d1[i] – d2[i];
double t1 = d1[i+1] – d2[i+1];
double t2 = d1[i+2] – d2[i+2];
double t3 = d1[i+3] – d2[i+3];
total_cost += t0*t0 + t1*t1 + t2*t2 + t3*t3;
if( total_cost > best )
break;
}
return total_cost;
}
int naiveNearestNeighbor( const float* vec, int laplacian,
const CvSeq* model_keypoints,
const CvSeq* model_descriptors )
{
int length = (int)(model_descriptors->elem_size/sizeof(float));
int i, neighbor = -1;
double d, dist1 = 1e6, dist2 = 1e6;
CvSeqReader reader, kreader;
cvStartReadSeq( model_keypoints, &kreader, 0 );
cvStartReadSeq( model_descriptors, &reader, 0 );
for( i = 0; i < model_descriptors->total; i++ )
{
const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
const float* mvec = (const float*)reader.ptr;
CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
if( laplacian != kp->laplacian )
continue;
d = compareSURFDescriptors( vec, mvec, dist2, length );
if( d < dist1 )
{
dist2 = dist1;
dist1 = d;
neighbor = i;
}
else if ( d < dist2 )
dist2 = d;
}
if ( dist1 < 0.6*dist2 )
return neighbor;
return -1;
}
//用於找到兩幅圖像之間匹配的點對,並把匹配的點對存儲在 ptpairs 
向量中,其中物體(object)圖像的特徵點
//及其相應的描述器(局部特徵)分別存儲在 objectKeypoints 和 
objectDescriptors,場景(image)圖像的特
//徵點及其相應的描述器(局部特徵)分別存儲在 imageKeypoints和 
imageDescriptors
void findPairs( const CvSeq* objectKeypoints, const CvSeq* 
objectDescriptors,
const CvSeq* imageKeypoints, const CvSeq* imageDescriptors, 
vector<int>& ptpairs )
{
int i;
CvSeqReader reader, kreader;
cvStartReadSeq( objectKeypoints, &kreader );
cvStartReadSeq( objectDescriptors, &reader );
ptpairs.clear();
for( i = 0; i < objectDescriptors->total; i++ )
{
const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;
const float* descriptor = (const float*)reader.ptr;
CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );
CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );
int nearest_neighbor = naiveNearestNeighbor( descriptor, kp->laplacian, 
imageKeypoints, imageDescriptors );
if( nearest_neighbor >= 0 )
{
ptpairs.push_back(i);
ptpairs.push_back(nearest_neighbor);
}
}
}
//用於尋找物體(object)在場景(image)中的位置,位置信息保存在參數dst_corners中,參數src_corners由物
//體(object的width幾height等決定,其餘部分參數如上findPairs
int locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* 
objectDescriptors,
const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,
const CvPoint src_corners[4], CvPoint dst_corners[4] )
{
double h[9];
CvMat _h = cvMat(3, 3, CV_64F, h);
vector<int> ptpairs;
vector<CvPoint2D32f> pt1, pt2;
CvMat _pt1, _pt2;
int i, n;
findPairs( objectKeypoints, objectDescriptors, imageKeypoints, 
imageDescriptors, ptpairs );
n = ptpairs.size()/2;
if( n < 4 )
return 0;
pt1.resize(n);
pt2.resize(n);
for( i = 0; i < n; i++ )
{
pt1[i] = 
((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))->pt;
pt2[i] = 
((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))->pt;
}
_pt1 = cvMat(1, n, CV_32FC2, &pt1[0] );
_pt2 = cvMat(1, n, CV_32FC2, &pt2[0] );
if( !cvFindHomography( &_pt1, &_pt2, &_h, CV_RANSAC, 5 ))
return 0;
for( i = 0; i < 4; i++ )
{
double x = src_corners[i].x, y = src_corners[i].y;
double Z = 1./(h[6]*x + h[7]*y + h[8]);
double X = (h[0]*x + h[1]*y + h[2])*Z;
double Y = (h[3]*x + h[4]*y + h[5])*Z;
dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));
}
return 1;
}
int main(int argc, char** argv)
{
//物體(object)和場景(scene)的圖像向來源
const char* object_filename = argc == 3 ? argv[1] : 「box.png」;
const char* scene_filename = argc == 3 ? argv[2] : 
「box_in_scene.png」;
//內存存儲器
CvMemStorage* storage = cvCreateMemStorage(0);
cvNamedWindow(「Object」, 1);
cvNamedWindow(「Object Correspond」, 1);
//顏色值
static CvScalar colors[] =
{
{{0,0,255}},
{{0,128,255}},
{{0,255,255}},
{{0,255,0}},
{{255,128,0}},
{{255,255,0}},
{{255,0,0}},
{{255,0,255}},
{{255,255,255}}
};
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE 
);
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE 
);
if( !object || !image )
{
fprintf( stderr, 「Can not load %s and/or %s\n」
「Usage: find_obj [<object_filename> <scene_filename>]\n」,
object_filename, scene_filename );
exit(-1);
}
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);
cvCvtColor( object, object_color, CV_GRAY2BGR );
//物體(object)和場景(scene)的圖像的特徵點
CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
int i;
//定義Surf算法要用的參數分別爲 threshold 和 
extended
CvSURFParams params = cvSURFParams(500, 1);
double tt = (double)cvGetTickCount();
//提取物體(object)和場景(scene)的圖像的特徵點及其描述器
cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, 
storage, params );
printf(「Object Descriptors: %d\n」, objectDescriptors->total);
cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, 
storage, params );
printf(「Image Descriptors: %d\n」, imageDescriptors->total);
//計算所消耗的時間
tt = (double)cvGetTickCount() – tt;
printf( 「Extraction time = %gms\n」, tt/(cvGetTickFrequency()*1000.));
CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, 
object->height}, {0, object-
>height}};
//定義感興趣的區域
CvPoint dst_corners[4];
IplImage* correspond = cvCreateImage( cvSize(image->width, 
object->height+image->height), 8, 1 );
//設置感興趣區域
cvSetImageROI( correspond, cvRect( 0, 0, object->width, 
object->height ) );
cvCopy( object, correspond );
cvSetImageROI( correspond, cvRect( 0, object->height, 
correspond->width, correspond->height ) );
cvCopy( image, correspond );
cvResetImageROI( correspond );
//尋找物體(object)在場景(image)中的位置,並將信息保存
if( locatePlanarObject( objectKeypoints, objectDescriptors, 
imageKeypoints,
imageDescriptors, src_corners, dst_corners ))
{
for( i = 0; i < 4; i++ )
{
CvPoint r1 = dst_corners[i%4];
CvPoint r2 = dst_corners[(i+1)%4];
cvLine( correspond, cvPoint(r1.x, r1.y+object->height ),
cvPoint(r2.x, r2.y+object->height ), colors[8] );
}
}
//定義並保存物體(object)在場景(image)圖形之間的匹配點對,並將其存儲在向量 
ptpairs 中,以後能夠對
//ptpairs 進行操做
vector<int> ptpairs;
findPairs( objectKeypoints, objectDescriptors, imageKeypoints, 
imageDescriptors, ptpairs );
//顯示匹配結果
for( i = 0; i < (int)ptpairs.size(); i += 2 )
{
CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs[i] 
);
CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, ptpairs[i+1] 
);
cvLine( correspond, cvPointFrom32f(r1->pt),
cvPoint(cvRound(r2->pt.x), cvRound(r2->pt.y+object->height)), 
colors[8] );
}
cvShowImage( 「Object Correspond」, correspond );
//顯示物體(object)的全部特徵點
for( i = 0; i < objectKeypoints->total; i++ )
{
CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );
CvPoint center;
int radius;
center.x = cvRound(r->pt.x);
center.y = cvRound(r->pt.y);
radius = cvRound(r->size*1.2/9.*2);
cvCircle( object_color, center, radius, colors[0], 1, 8, 0 );
}
cvShowImage( 「Object」, object_color );
cvWaitKey(0);
//釋放窗口所佔用的內存
cvDestroyWindow(「Object」);
cvDestroyWindow(「Object Correspond」);
return 0;
}

surf OpenCV Demo分析(find_obj.cpp)  

 

OpenCV2.1中有關於Surf算法的簡單示例(1.1以上的版本都添加了這個算法),在路徑:C:\Program Files\OpenCV2.1\samples\c下,名爲find_obj.cpp,運行它能夠直接觀察到相應結果。爲了便於介紹這個示例,簡單作了以下修改(只是刪掉一些代碼,可是對於如何使用Surf算法沒有影響)。ios

修改後的代碼及其註釋以下:(主要是介紹這個main函數)算法

#include 「stdafx.h」app

#include <cv.h>函數

#include <highgui.h>ui

#include <ctype.h>spa

#include <stdio.h>調試

#include <stdlib.h>rest

#include <iostream>code

#include <vector>索引

using namespace std;

IplImage *image = 0;

double compareSURFDescriptors( const float* d1, const float* d2, double best, int length )

{

double total_cost = 0;

assert( length % 4 == 0 );

for( int i = 0; i < length; i += 4 )

{

double t0 = d1[i] – d2[i];

double t1 = d1[i+1] – d2[i+1];

double t2 = d1[i+2] – d2[i+2];

double t3 = d1[i+3] – d2[i+3];

total_cost += t0*t0 + t1*t1 + t2*t2 + t3*t3;

if( total_cost > best )

break;

}

return total_cost;

}

int naiveNearestNeighbor( const float* vec, int laplacian,

const CvSeq* model_keypoints,

const CvSeq* model_descriptors )

{

int length = (int)(model_descriptors->elem_size/sizeof(float));

int i, neighbor = -1;

double d, dist1 = 1e6, dist2 = 1e6;

CvSeqReader reader, kreader;

cvStartReadSeq( model_keypoints, &kreader, 0 );

cvStartReadSeq( model_descriptors, &reader, 0 );

for( i = 0; i < model_descriptors->total; i++ )

{

const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;

const float* mvec = (const float*)reader.ptr;

CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );

CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );

if( laplacian != kp->laplacian )

continue;

d = compareSURFDescriptors( vec, mvec, dist2, length );

if( d < dist1 )

{

dist2 = dist1;

dist1 = d;

neighbor = i;

}

else if ( d < dist2 )

dist2 = d;

}

if ( dist1 < 0.6*dist2 )

return neighbor;

return -1;

}

//用於找到兩幅圖像之間匹配的點對,並把匹配的點對存儲在 ptpairs 向量中,其中物體(object)圖像的特徵點

//及其相應的描述器(局部特徵)分別存儲在 objectKeypoints 和 objectDescriptors,場景(image)圖像的特

//徵點及其相應的描述器(局部特徵)分別存儲在 imageKeypoints和 imageDescriptors

void findPairs( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,

const CvSeq* imageKeypoints, const CvSeq* imageDescriptors, vector<int>& ptpairs )

{

int i;

CvSeqReader reader, kreader;

cvStartReadSeq( objectKeypoints, &kreader );

cvStartReadSeq( objectDescriptors, &reader );

ptpairs.clear();

for( i = 0; i < objectDescriptors->total; i++ )

{

const CvSURFPoint* kp = (const CvSURFPoint*)kreader.ptr;

const float* descriptor = (const float*)reader.ptr;

CV_NEXT_SEQ_ELEM( kreader.seq->elem_size, kreader );

CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader );

int nearest_neighbor = naiveNearestNeighbor( descriptor, kp->laplacian, imageKeypoints, imageDescriptors );

if( nearest_neighbor >= 0 )

{

ptpairs.push_back(i);

ptpairs.push_back(nearest_neighbor);

}

}

}

//用於尋找物體(object)在場景(image)中的位置,位置信息保存在參數dst_corners中,參數src_corners由物

//體(object的width幾height等決定,其餘部分參數如上findPairs

int locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,

const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,

const CvPoint src_corners[4], CvPoint dst_corners[4] )

{

double h[9];

CvMat _h = cvMat(3, 3, CV_64F, h);

vector<int> ptpairs;

vector<CvPoint2D32f> pt1, pt2;

CvMat _pt1, _pt2;

int i, n;

findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

n = ptpairs.size()/2;

if( n < 4 )

return 0;

pt1.resize(n);

pt2.resize(n);

for( i = 0; i < n; i++ )

{

pt1[i] = ((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))->pt;

pt2[i] = ((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))->pt;

}

_pt1 = cvMat(1, n, CV_32FC2, &pt1[0] );

_pt2 = cvMat(1, n, CV_32FC2, &pt2[0] );

if( !cvFindHomography( &_pt1, &_pt2, &_h, CV_RANSAC, 5 ))

return 0;

for( i = 0; i < 4; i++ )

{

double x = src_corners[i].x, y = src_corners[i].y;

double Z = 1./(h[6]*x + h[7]*y + h[8]);

double X = (h[0]*x + h[1]*y + h[2])*Z;

double Y = (h[3]*x + h[4]*y + h[5])*Z;

dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));

}

return 1;

}

int main(int argc, char** argv)

{

//物體(object)和場景(scene)的圖像向來源

const char* object_filename = argc == 3 ? argv[1] : 「box.png」;

const char* scene_filename = argc == 3 ? argv[2] : 「box_in_scene.png」;

//內存存儲器

CvMemStorage* storage = cvCreateMemStorage(0);

cvNamedWindow(「Object」, 1);

cvNamedWindow(「Object Correspond」, 1);

//顏色值

static CvScalar colors[] =

{

{{0,0,255}},

{{0,128,255}},

{{0,255,255}},

{{0,255,0}},

{{255,128,0}},

{{255,255,0}},

{{255,0,0}},

{{255,0,255}},

{{255,255,255}}

};

IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );

IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );

if( !object || !image )

{

fprintf( stderr, 「Can not load %s and/or %s\n」

「Usage: find_obj [<object_filename> <scene_filename>]\n」,

object_filename, scene_filename );

exit(-1);

}

IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);

cvCvtColor( object, object_color, CV_GRAY2BGR );

//物體(object)和場景(scene)的圖像的特徵點

CvSeq *objectKeypoints = 0, *objectDescriptors = 0;

CvSeq *imageKeypoints = 0, *imageDescriptors = 0;

int i;

//定義Surf算法要用的參數分別爲 threshold 和 extended

CvSURFParams params = cvSURFParams(500, 1);

double tt = (double)cvGetTickCount();

//提取物體(object)和場景(scene)的圖像的特徵點及其描述器

cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params );

printf(「Object Descriptors: %d\n」, objectDescriptors->total);

cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );

printf(「Image Descriptors: %d\n」, imageDescriptors->total);

//計算所消耗的時間

tt = (double)cvGetTickCount() – tt;

printf( 「Extraction time = %gms\n」, tt/(cvGetTickFrequency()*1000.));

CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object-

>height}};

//定義感興趣的區域

CvPoint dst_corners[4];

IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );

//設置感興趣區域

cvSetImageROI( correspond, cvRect( 0, 0, object->width, object->height ) );

cvCopy( object, correspond );

cvSetImageROI( correspond, cvRect( 0, object->height, correspond->width, correspond->height ) );

cvCopy( image, correspond );

cvResetImageROI( correspond );

//尋找物體(object)在場景(image)中的位置,並將信息保存

if( locatePlanarObject( objectKeypoints, objectDescriptors, imageKeypoints,

imageDescriptors, src_corners, dst_corners ))

{

for( i = 0; i < 4; i++ )

{

CvPoint r1 = dst_corners[i%4];

CvPoint r2 = dst_corners[(i+1)%4];

cvLine( correspond, cvPoint(r1.x, r1.y+object->height ),

cvPoint(r2.x, r2.y+object->height ), colors[8] );

}

}

//定義並保存物體(object)在場景(image)圖形之間的匹配點對,並將其存儲在向量 ptpairs 中,以後能夠對

//ptpairs 進行操做

vector<int> ptpairs;

findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

//顯示匹配結果

for( i = 0; i < (int)ptpairs.size(); i += 2 )

{

CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs[i] );

CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, ptpairs[i+1] );

cvLine( correspond, cvPointFrom32f(r1->pt),

cvPoint(cvRound(r2->pt.x), cvRound(r2->pt.y+object->height)), colors[8] );

}

cvShowImage( 「Object Correspond」, correspond );

//顯示物體(object)的全部特徵點

for( i = 0; i < objectKeypoints->total; i++ )

{

CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );

CvPoint center;

int radius;

center.x = cvRound(r->pt.x);

center.y = cvRound(r->pt.y);

radius = cvRound(r->size*1.2/9.*2);

cvCircle( object_color, center, radius, colors[0], 1, 8, 0 );

}

cvShowImage( 「Object」, object_color );

cvWaitKey(0);

//釋放窗口所佔用的內存

cvDestroyWindow(「Object」);

cvDestroyWindow(「Object Correspond」);

return 0;

}

經過調試運行,能夠獲得dst_corners中的數據以下:

ptpairs 中的數據以下:

也就是:

[78]

(

29 484

77 134

82 274

206 797

228 210

243 203

244 203

249 404

295 105

347 451

360 142

417 190

427 191

436 198

445 204

452 211

466 218

473 105

486 684

502 133

521 169

522 178

527 190

530 190

532 450

533 198

535 197

539 205

542 202

544 208

547 483

558 412

559 412

583 623

586 624

587 624

594 748

595 654

597 657

)

總共有39對匹配點,第一列表示物體(object)圖像中匹配上的點,第一列表示場景(image)圖像中匹配的點,其實也就是物體(object)圖像中第28個特徵點和場景(image)圖像中第484個特徵點相匹配。

經過這種索引(能夠這麼說ptpairs中存儲的是索引)能夠求的那個特徵點的座標,以下:

//取得圖像中第i個特徵點

CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );

//經過ptpairs取得圖像中第ptpairs[i]特徵點,這個特徵點是匹配上的點

CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs[i] );

運行示意圖以下:

關於find_obj.cpp中的

#ifdef USE_FLANN

flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

#else

findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

#endif

不外乎就是在與說明是否採用 approximate nearest-neighbor 方法來尋找匹配點對。

相關文章
相關標籤/搜索