Advertisement

opencv图像特征提取和匹配

阅读量:

opencv图像特征提取和匹配

最近刚开始做,全景图像拼接与融合,使用的是opencv2.4.9,因为3以后没有nonfree……我又不会弄什么cmake和contirb,所以用了2.4.9 。
目前在弄两张图的拼接,下一步是多张图像拼起来,但是我还不太会(其实是完全不会)怎么拼起来,试了好几次都没有弄出来,所以先分享一下特征提取和匹配。
希望大佬们能教一下我怎么快速实现多图拼接。

实验数据链接: http://www.csdata.org/p/387/
在这里插入图片描述在这里插入图片描述
在这里插入图片描述在这里插入图片描述

复制代码
    #include <stdio.h>
    #include <iostream>
    #include "opencv2/core/core.hpp"
    #include "opencv2/features2d/features2d.hpp"
    #include "opencv2/highgui/highgui.hpp"
    #include "opencv2/nonfree/features2d.hpp"
    #include "opencv2/legacy/legacy.hpp"
    #include "opencv2/calib3d/calib3d.hpp"
    
    using namespace cv;
    using namespace std;
    
    int main(int argc, char** argv)
    {
    	//读取图像
    	Mat img_1 = imread("C:\ Users\ 12757\ Desktop\ 毕设数据\ dataset (1)\ data15\ 1.bmp");
    	Mat img_2 = imread("C:\ Users\ 12757\ Desktop\ 毕设数据\ dataset (1)\ data15\ 2.bmp");
    	if (!img_1.data || !img_2.data)
    	{
    		std::cout << "--(!)Error reading images " << std::endl;
    		return -1;
    	}
    
    	//特征检测
    	//int minHessian = 400;
    	SiftFeatureDetector detector;
    	std::vector<KeyPoint> keypoints_1, keypoints_2;//构造2个专门由点组成的点向量用来存储特征点
    	detector.detect(img_1, keypoints_1);//将img_1图像中检测到的特征点存储起来放在keypoints_1中
    	detector.detect(img_2, keypoints_2);
    
    	//绘制特征点
    	Mat img_keypoints_1; Mat img_keypoints_2;
    	drawKeypoints(img_1, keypoints_1, img_keypoints_1);
    	Scalar::all(-1), DrawMatchesFlags::DEFAULT;
    	drawKeypoints(img_2, keypoints_2, img_keypoints_2);
    	Scalar::all(-1), DrawMatchesFlags::DEFAULT;
    
    	//特征匹配
    	SiftDescriptorExtractor extractor;//定义描述子对象
    	Mat descriptors_1, descriptors_2;//存放特征向量的矩阵
    	extractor.compute(img_1, keypoints_1, descriptors_1);//计算特征向量
    	extractor.compute(img_2, keypoints_2, descriptors_2);
    	BFMatcher matcher(NORM_L1);//BFmatch暴力匹配
    	vector<DMatch>matches;//定义匹配结果变量
    	matcher.match(descriptors_1, descriptors_2, matches);//实现描述符之间的匹配
    
    // 移除不好的匹配点
    	double max_dist = 0, min_dist = 100;
    	for (int i = 0; i < descriptors_1.rows; i++)//求向量距离最大最小值
    	{
    		double dst = matches[i].distance;
    		if (dst < min_dist)min_dist = dst;
    		if (dst > max_dist)max_dist = dst;
    	}
    	std::vector<DMatch>good_matches;//定义筛选后的匹配结果变量
    	for (int i = 0; i < descriptors_1.rows; i++)
    	{
    		if (matches[i].distance < 3 * min_dist)
    		{
    			good_matches.push_back(matches[i]);
    		}
    	}
    	std::vector<Point2f>obj;
    	std::vector<Point2f>scene;
    	for (int i = 0; i < good_matches.size(); i++)
    	{
    		obj.push_back(keypoints_1[good_matches[i].queryIdx].pt);
    		scene.push_back(keypoints_2[good_matches[i].trainIdx].pt);
    	}
    	Mat img_matches;
    	drawMatches(img_1, keypoints_1, img_2, keypoints_2,  good_matches,img_matches);//将匹配出来的结果放入内存img_matches中
    	imshow("Matches", img_matches);//显示匹配线段
    
    	//求取变换矩阵
    	Mat homo = findHomography(keypoints_1, keypoints_2, CV_RANSAC);
    	Mat imageTransform1;
    	warpPerspective(img_1, imageTransform1, homo,Size(img_2.cols + img_1.cols, img_2.rows));//变换
    	img_1.copyTo(Mat(imageTransform1, Rect(img_1.cols, 0, img_2.cols, img_2.rows)));
    	//imwrite("tiled.bmp", imageTransform1);
    	imshow("tiledimage", imageTransform1);
    	
    
    	
    		waitKey(0);
    	return (0);
    }

全部评论 (0)

还没有任何评论哟~