本次范例讲的都是检测角点的算法,在这里我们会讲到,harris角点检测,Shi-Tomasi角点检测,FAST角点检测,尺度不变surf检测,尺度不变sift检测,特征点的描述。由于是算法问题,相对来说会比较复杂,而且都是一些比较经典的算法,如果只是纯粹的想要用算法来实现一些功能,那么只要调用OpenCV几个简单的函数就可以了,但是如果想学习一下理论知识,为以后自己的研究有所帮助,而且想理解函数的参数如何改变,那么还是得硬着头皮去看看原理吧,本人也是看了挺久的算法原理,但是还是没有完全理解透,所以在这里只是贴出我看过的比较有用的博客,还有一些自己编译的代码和实现结果,记录一下这个过程,方便以后可以进一步的研究。
Harris:Opencv学习笔记(五)Harris角点检测
Shi-Tomasi:【OpenCV】角点检测:Harris角点及Shi-Tomasi角点检测
FAST:OpenCV学习笔记(四十六)——FAST特征点检测features2D
SIFT:【OpenCV】SIFT原理与源码分析
SURF: 特征点检测学习_2(surf算法)
//计算角点响应函数以及非最大值抑制
void detect(const Mat &image){
//opencv自带的角点响应函数计算函数
cornerHarris (image,cornerStrength,neighbourhood,aperture,k);
double minStrength;
//计算最大最小响应值
minMaxLoc (cornerStrength,&minStrength,&maxStrength);
Mat dilated;
//默认3*3核膨胀,膨胀之后,除了局部最大值点和原来相同,其它非局部最大值点被
//3*3邻域内的最大值点取代
dilate (cornerStrength,dilated,cv::Mat());
//与原图相比,只剩下和原图值相同的点,这些点都是局部最大值点,保存到localMax
compare(cornerStrength,dilated,localMax,cv::CMP_EQ);
}
void cornerHarris(InputArray src, OutputArray dst, int blockSize, int apertureSize, double k, int borderType=BORDER_DEFAULT )
void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop)
//获取角点图
Mat getCornerMap(double qualityLevel) {
Mat cornerMap;
// 根据角点响应最大值计算阈值
thresholdvalue= qualityLevel*maxStrength;
threshold(cornerStrength,cornerTh,
thresholdvalue,255,cv::THRESH_BINARY);
// 转为8-bit图
cornerTh.convertTo(cornerMap,CV_8U);
// 和局部最大值图与,剩下角点局部最大值图,即:完成非最大值抑制
bitwise_and(cornerMap,localMax,cornerMap);
return cornerMap;
}
void bitwise_and(InputArray src1, InputArray src2, OutputArray dst, InputArray mask=noArray())
void goodFeaturesDetect()
{
// 改进的harris角点检测方法
vector<Point> corners;
goodFeaturesToTrack(image,corners,
200,
//角点最大数目
0.01,
// 质量等级,这里是0.01*max(min(e1,e2)),e1,e2是harris矩阵的特征值
10);
// 两个角点之间的距离容忍度
harris().drawOnImage(image,corners);//标记角点
imshow (winname,image);
}
void goodFeaturesToTrack(InputArray image, OutputArray corners, int maxCorners, double qualityLevel, double minDistance, InputArray mask=noArray(), int blockSize=3, bool useHarrisDetector=false, double k=0.04 )
void fastDetect()
{
//快速角点检测
vector<KeyPoint> keypoints;
FastFeatureDetector fast(40,true);
fast.detect (image,keypoints);
drawKeypoints (image,keypoints,image,Scalar::all(255),DrawMatchesFlags::DRAW_OVER_OUTIMG);
imshow (winname,image);
}
FastFeatureDetector( int threshold=1, bool nonmaxSuppression=true );
void FeatureDetector::detect(const Mat& image, vector<KeyPoint>& keypoints, const Mat& mask=Mat() ) const
void FeatureDetector::detect(const vector<Mat>& images, vector<vector<KeyPoint>>& keypoints, const vector<Mat>& masks=vector<Mat>() ) const
void drawKeypoints(const Mat& image, const vector<KeyPoint>& keypoints, Mat& outImg, const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT )
struct DrawMatchesFlags
{
enum
{
DEFAULT = 0, // 会创建一个输出矩阵,两张源文件,匹配结果,
// 和keypoints将会被绘制在输出图像中
// 对于每一个keypoints点,只有中心被绘制,
// 不绘制半径和方向
DRAW_OVER_OUTIMG = 1, // 不创建输出图像,匹配结构绘制在已经存在的输出图像中
NOT_DRAW_SINGLE_POINTS = 2, // 单独的keypoints点不被绘制
DRAW_RICH_KEYPOINTS = 4 // 对于每一个keypoints点,半径和方向都被绘制
};
};
void siftDetect()
{
vector<KeyPoint> keypoints;
SiftFeatureDetector sift(0.03,10);
sift.detect(image,keypoints);
drawKeypoints(image,keypoints,image,Scalar(255,255,255),DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imshow (winname,image);
}
SiftFeatureDetector( double threshold, double edgeThreshold,
int nOctaves=SIFT::CommonParams::DEFAULT_NOCTAVES,
int nOctaveLayers=SIFT::CommonParams::DEFAULT_NOCTAVE_LAYERS,
int firstOctave=SIFT::CommonParams::DEFAULT_FIRST_OCTAVE,
int angleMode=SIFT::CommonParams::FIRST_ANGLE );
void surfDetect()
{
vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1, descriptors_2;
//-- Step 1: Detect the keypoints using SURF Detector
SurfFeatureDetector surf(2500);
surf.detect(image,keypoints_1);
surf.detect(image2,keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
extractor.compute( image, keypoints_1, descriptors_1 );
extractor.compute( image2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BruteForceMatcher< L2<float> > matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
nth_element(matches.begin(),matches.begin()+24,matches.end());
matches.erase(matches.begin()+25,matches.end());
//-- Draw matches
Mat img_matches;
drawMatches( image, keypoints_1, image2, keypoints_2, matches, img_matches,Scalar(255,255,255) );
drawKeypoints(image,keypoints_1,image,Scalar(255,255,255),DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
//-- Show detected matches
imshow("Matches", img_matches );
imshow (winname,image);
}
SurfFeatureDetector( double hessianThreshold = 400., int octaves = 3,
int octaveLayers = 4 );
void DescriptorExtractor::compute(const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors) const
void DescriptorExtractor::compute(const vector<Mat>& images, vector<vector<KeyPoint>>& keypoints, vector<Mat>& descriptors) const
void DescriptorMatcher::match(const Mat& queryDescriptors, const Mat& trainDescriptors, vector<DMatch>& matches, const Mat& mask=Mat() ) const
void DescriptorMatcher::match(const Mat& queryDescriptors, vector<DMatch>& matches, const vector<Mat>& masks=vector<Mat>() )
void drawMatches(const Mat& img1, const vector<KeyPoint>& keypoints1, const Mat& img2, const vector<KeyPoint>& keypoints2, const vector<DMatch>& matches1to2, Mat& outImg, const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), const vector<char>& matchesMask=vector<char>(), int flags=DrawMatchesFlags::DEFAULT )
学习OpenCV范例(二十)——角点检测算法,布布扣,bubuko.com
原文:http://blog.csdn.net/chenjiazhou12/article/details/22683049