OpenCV2 特征检测与匹配
基于OpenCV3中的多种特征检测算法(包括SIFT、SURF和ORB等)所需的稳定代码现已迁移至xfeatures2d的第三方库中。这促使我更新至OpenCV 2.4.9版本。
特征提取模块能够从两幅图像中提取稳定的特征点,并通过几何建模构建三维模型,并将各幅图像融合到同一空间中以实现精确对齐;此外,该模块还可以执行图像拼接任务以整合多幅图像信息
基于尺度和旋转不变特性设计的特征检测器SIFT,在提取出稳定的特征的同时具备良好的尺度和旋转不变特性,并且其精度表现优异。然而由于计算效率较低的原因而提出了SURF算法(其计算效率是SIFT的大约3倍),此外还有ORB算法,在速度方面表现更为出色(其计算效率是SIFT的10倍左右)。然而该算法不具备尺度和旋转不变特性
SURF特征检测算子:
代码如下:
#include "highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat image01 = imread("4.jpg", 1); //右图
Mat image02 = imread("3.jpg", 1); //左图
namedWindow("p2", 0);
namedWindow("p1", 0);
imshow("p2", image01);
imshow("p1", image02);
//灰度图转换
Mat image1, image2;
cvtColor(image01, image1, CV_RGB2GRAY);
cvtColor(image02, image2, CV_RGB2GRAY);
//提取特征点
SurfFeatureDetector surfDetector(800); // 海塞矩阵阈值,在这里调整精度,值越大点越少,越精准
vector<KeyPoint> keyPoint1, keyPoint2;
surfDetector.detect(image1, keyPoint1);//检测出特征点
surfDetector.detect(image2, keyPoint2);//检测出特征点
//特征点描述,为下边的特征点匹配做准备
SurfDescriptorExtractor SurfDescriptor;
Mat imageDesc1, imageDesc2;
SurfDescriptor.compute(image1, keyPoint1, imageDesc1);//计算特征点描述
SurfDescriptor.compute(image2, keyPoint2, imageDesc2);//计算特征点描述
//获得匹配特征点,并提取最优配对
FlannBasedMatcher matcher;
vector<DMatch> matchePoints;
matcher.match(imageDesc1, imageDesc2, matchePoints, Mat());
cout << "total match points: " << matchePoints.size() << endl;
Mat img_match;
drawMatches(image01, keyPoint1, image02, keyPoint2, matchePoints, img_match);
namedWindow("match", 0);
imshow("match", img_match);
imwrite("match.jpg", img_match);
waitKey();
return 0;
}
AI写代码
效果图如下:


具有较大斜率的直线通常会被判定为不正确的匹配结果。为了消除由图像遮挡或背景复杂性导致的无法建立可靠关键点对应关系的情况,在计算机视觉领域中一种常用的方法是由格列·洛伊(Lowe)在其研究工作中提出的基于最近邻距离比次近邻距离更为严格的SIFT特征匹配方法。具体而言,在这一方法中我们从一幅图像中选取一个SIFT特征点,并寻找其在另一幅图像中与其欧式距离最近的第一和第二近邻关键点,在这两个候选关键点之间计算它们之间的欧式距离比值(ratio)。由于在实际应用中大量存在不正确的特征匹配情况(即不正确的配对),其计算出的距离比值会显著高于正常情况。因此,在设置这一比例阈值时我们需要根据具体情况来合理选择合适的范围以达到最佳的平衡效果:当这个比例阈值较低时能够有效减少误配对的数量但可能会相应地降低正确配对的比例反之则会取得相反的效果
Lowe建议采用Ratio值作为基准,并将其设定在一般性场景下(即不同尺度、旋转以及亮度变化的情况)。研究表明,在Ratio取值范围为[...]时表现出最佳性能。具体而言,在以下三种情况下应采取不同的策略:
- 当对检测精度要求较高时采用此Ratio值;
- 对于需要较多关键点配对的情况,则建议选择[...];
- 默认情况下可选用[...]作为标准
#include "highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat image01 = imread("4.jpg", 1);
Mat image02 = imread("3.jpg", 1);
imshow("p2", image01);
imshow("p1", image02);
//灰度图转换
Mat image1, image2;
cvtColor(image01, image1, CV_RGB2GRAY);
cvtColor(image02, image2, CV_RGB2GRAY);
//提取特征点
SurfFeatureDetector surfDetector(2000); // 海塞矩阵阈值,在这里调整精度,值越大点越少,越精准
vector<KeyPoint> keyPoint1, keyPoint2;
surfDetector.detect(image1, keyPoint1);
surfDetector.detect(image2, keyPoint2);
//特征点描述,为下边的特征点匹配做准备
SurfDescriptorExtractor SurfDescriptor;
Mat imageDesc1, imageDesc2;
SurfDescriptor.compute(image1, keyPoint1, imageDesc1);
SurfDescriptor.compute(image2, keyPoint2, imageDesc2);
FlannBasedMatcher matcher;
vector<vector<DMatch> > matchePoints;
vector<DMatch> GoodMatchePoints;
vector<Mat> train_desc(1, imageDesc1);
matcher.add(train_desc);
matcher.train();
matcher.knnMatch(imageDesc2, matchePoints, 2);
cout << "total match points: " << matchePoints.size() << endl;
// Lowe's algorithm,获取优秀匹配点
for (int i = 0; i < matchePoints.size(); i++)
{
if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
{
GoodMatchePoints.push_back(matchePoints[i][0]);
}
}
Mat SURF_match;
drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, SURF_match);
imshow("SURF_match ", SURF_match);
waitKey();
return 0;
}
AI写代码

#include "highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat image01 = imread("4.jpg", 1);
Mat image02 = imread("3.jpg", 1);
imshow("p2", image01);
imshow("p1", image02);
//灰度图转换
Mat image1, image2;
cvtColor(image01, image1, CV_RGB2GRAY);
cvtColor(image02, image2, CV_RGB2GRAY);
//提取特征点
SiftFeatureDetector siftDetector(800); // 海塞矩阵阈值,在这里调整精度,值越大点越少,越精准
vector<KeyPoint> keyPoint1, keyPoint2;
siftDetector.detect(image1, keyPoint1);
siftDetector.detect(image2, keyPoint2);
//特征点描述,为下边的特征点匹配做准备
SiftDescriptorExtractor SiftDescriptor;
Mat imageDesc1, imageDesc2;
SiftDescriptor.compute(image1, keyPoint1, imageDesc1);
SiftDescriptor.compute(image2, keyPoint2, imageDesc2);
FlannBasedMatcher matcher;
vector<vector<DMatch> > matchePoints;
vector<DMatch> GoodMatchePoints;
vector<Mat> train_desc(1, imageDesc1);
matcher.add(train_desc);
matcher.train();
matcher.knnMatch(imageDesc2, matchePoints, 2);
cout << "total match points: " << matchePoints.size() << endl;
// Lowe's algorithm,获取优秀匹配点
for (int i = 0; i < matchePoints.size(); i++)
{
if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
{
GoodMatchePoints.push_back(matchePoints[i][0]);
}
}
Mat SIFT_match;
drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, SIFT_match);
imshow("SIFT_match ", SIFT_match);
waitKey();
return 0;
}
AI写代码
效果如下:

ORB:
#include "highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat image01 = imread("4.jpg", 1);
Mat image02 = imread("3.jpg", 1);
imshow("p2", image01);
imshow("p1", image02);
//灰度图转换
Mat image1, image2;
cvtColor(image01, image1, CV_RGB2GRAY);
cvtColor(image02, image2, CV_RGB2GRAY);
//提取特征点
OrbFeatureDetector OrbDetector(1000); // 在这里调整精度,值越小点越少,越精准
vector<KeyPoint> keyPoint1, keyPoint2;
OrbDetector.detect(image1, keyPoint1);
OrbDetector.detect(image2, keyPoint2);
//特征点描述,为下边的征点匹配做准备
OrbDescriptorExtractor OrbDescriptor;
Mat imageDesc1, imageDesc2;
OrbDescriptor.compute(image1, keyPoint1, imageDesc1);
OrbDescriptor.compute(image2, keyPoint2, imageDesc2);
flann::Index flannIndex(imageDesc1, flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);
vector<DMatch> GoodMatchePoints;
Mat macthIndex(imageDesc2.rows, 2, CV_32SC1), matchDistance(imageDesc2.rows, 2, CV_32FC1);
flannIndex.knnSearch(imageDesc2, macthIndex, matchDistance, 2, flann::SearchParams());
// Lowe's algoithm,获取优秀匹配点
for (int i = 0; i < matchDistance.rows; i++)
{
if (matchDistance.at<float>(i, 0) < 0.6 * matchDistance.at<float>(i, 1))
{
DMatch dmatches(i, macthIndex.at<int>(i, 0), matchDistance.at<float>(i, 0));
GoodMatchePoints.push_back(dmatches);
}
}
Mat ORB_match;
drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, ORB_match);
imshow("ORB_match ", ORB_match);
waitKey();
return 0;
}
AI写代码

Fast Algorithm (通过加速分割测试提取特征) ,其中Features来自Features from Accelerated Segment Test ) 。该算子专用于快速识别关键点位置,在计算过程中只需比较相邻像素的差异程度即可确定其为关键点位置。
跟Harris检测器的情况一样, FAST算法源于对构成角点的定义。FAST对角点的定义基于候选特征点周围的图像强度值。 以某个点为中心作一个圆, 根据圆上的像素值判断该点是否为关键点。 如果存在这样一段圆弧, 它的连续长度超过周长的3/4, 并且它上面所有像素的强度值都与圆心的强度值明显不同(全部更黑或更亮) , 那么就认定这是一个关键点。
用这个算法检测兴趣点的速度非常快, 因此十分适合需要优先考虑速度的应用。 这些应用包括实时视觉跟踪、 目标识别等, 它们需要在实时视频流中跟踪或匹配多个点。
我们使用FastFeatureDetector 进行特征点提取,因为opencv没有提供fast专用的描述子提取器,所以我们借用SiftDescriptorExtractor 来实现描述子的提取。
。要完成特征点的匹配第一个步骤就是找出每幅图像的特征点,这叫做特征检测,比如我们使用FastFeatureDetector、SiftFeatureDetector都是特征检测的模块。我们得到这些图像的特征点后,我们就对这些特征点进行进一步的分析,用一些数学上的特征对其进行描述,如梯度直方图,局部随机二值特征等。所以在这一步我们可以选择其他描述子提取器对这些点进行特征描述,进而完成特征点的精确匹配。在OpenCV中SURF,ORB,SIFT既包含FeatureDetector,又包DescriptorExtractor,所以我们使用上述三种算法做特征匹配时,都用其自带的方法配套使用。除此之外,如果我们相用FAST角点检测并作特征点匹配该怎么办?此时可以使用上述的FastFeatureDetector + BriefDescriptorExtractor 的方式,这种组合方式其实就是著名的ORB算法。所以特征点检测和特征点匹配是两种不同的步骤,我们只需根据自己项目的需求对这两个步骤的方法随意组合就好。
#include "highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat image01 = imread("4.jpg", 1);
Mat image02 = imread("3.jpg", 1);
imshow("p2", image01);
imshow("p1", image02);
//灰度图转换
Mat image1, image2;
cvtColor(image01, image1, CV_RGB2GRAY);
cvtColor(image02, image2, CV_RGB2GRAY);
//提取特征点
FastFeatureDetector Detector(50); //阈值
vector<KeyPoint> keyPoint1, keyPoint2;
Detector.detect(image1, keyPoint1);
Detector.detect(image2, keyPoint2);
//特征点描述,为下边的特征点匹配做准备
SiftDescriptorExtractor Descriptor;
Mat imageDesc1, imageDesc2;
Descriptor.compute(image1, keyPoint1, imageDesc1);
Descriptor.compute(image2, keyPoint2, imageDesc2);
BruteForceMatcher< L2<float> > matcher;
vector<vector<DMatch> > matchePoints;
vector<DMatch> GoodMatchePoints;
vector<Mat> train_desc(1, imageDesc1);
matcher.add(train_desc);
matcher.train();
matcher.knnMatch(imageDesc2, matchePoints, 2);
cout << "total match points: " << matchePoints.size() << endl;
// Lowe's algorithm,获取优秀匹配点
for (int i = 0; i < matchePoints.size(); i++)
{
if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
{
GoodMatchePoints.push_back(matchePoints[i][0]);
}
}
Mat FAST_match;
drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, FAST_match);
imshow("FAST_match", FAST_match);
imwrite("FAST_match.jpg", FAST_match);
waitKey();
return 0;
}
AI写代码

FAST效果要匹配点少,没有错误的匹配。
Harris角点:
#include "highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat image01 = imread("4.jpg", 1);
Mat image02 = imread("3.jpg", 1);
imshow("p2", image01);
imshow("p1", image02);
//灰度图转换
Mat image1, image2;
cvtColor(image01, image1, CV_RGB2GRAY);
cvtColor(image02, image2, CV_RGB2GRAY);
//提取特征点
GoodFeaturesToTrackDetector Detector(500); //最大点数,值越大,点越多
vector<KeyPoint> keyPoint1, keyPoint2;
Detector.detect(image1, keyPoint1);
Detector.detect(image2, keyPoint2);
//特征点描述,为下边的特征点匹配做准备
SiftDescriptorExtractor Descriptor;
Mat imageDesc1, imageDesc2;
Descriptor.compute(image1, keyPoint1, imageDesc1);
Descriptor.compute(image2, keyPoint2, imageDesc2);
BruteForceMatcher< L2<float> > matcher;
vector<vector<DMatch> > matchePoints;
vector<DMatch> GoodMatchePoints;
vector<Mat> train_desc(1, imageDesc1);
matcher.add(train_desc);
matcher.train();
matcher.knnMatch(imageDesc2, matchePoints, 2);
cout << "total match points: " << matchePoints.size() << endl;
// Lowe's algorithm,获取优秀匹配点
for (int i = 0; i < matchePoints.size(); i++)
{
if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
{
GoodMatchePoints.push_back(matchePoints[i][0]);
}
}
Mat Harris_match;
drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, Harris_match);
imshow("Harris_match ", Harris_match);
imwrite("Harris_match.jpg", Harris_match);
waitKey();
return 0;
}
AI写代码

