ORB、SIFT、SURF、手动配准VS程序以及配准后图像融合方法

忘是亡心i 2022-01-17 03:57 349阅读 0赞
  1. #include<opencv2\highgui\highgui.hpp>
  2. #include "opencv2/nonfree/nonfree.hpp"
  3. #include "opencv2/legacy/legacy.hpp"
  4. #include <iostream>
  5. using namespace cv;
  6. using namespace std;
  7. #if SURF
  8. //SURF
  9. int main()
  10. {
  11. Mat image01 = imread("0007.jpg", 1);
  12. Mat image02 = imread("A07.jpg", 1);
  13. imshow("p2", image01);
  14. imshow("p1", image02);
  15. //灰度图转换
  16. Mat image1, image2;
  17. cvtColor(image01, image1, CV_RGB2GRAY);
  18. cvtColor(image02, image2, CV_RGB2GRAY);
  19. //提取特征点
  20. SurfFeatureDetector surfDetector(2000); // 海塞矩阵阈值,在这里调整精度,值越大点越少,越精准
  21. vector<KeyPoint> keyPoint1, keyPoint2;
  22. surfDetector.detect(image1, keyPoint1);
  23. surfDetector.detect(image2, keyPoint2);
  24. //特征点描述,为下边的特征点匹配做准备
  25. SurfDescriptorExtractor SurfDescriptor;
  26. Mat imageDesc1, imageDesc2;
  27. SurfDescriptor.compute(image1, keyPoint1, imageDesc1);
  28. SurfDescriptor.compute(image2, keyPoint2, imageDesc2);
  29. FlannBasedMatcher matcher;
  30. vector<vector<DMatch> > matchePoints;
  31. vector<DMatch> GoodMatchePoints;
  32. vector<Mat> train_desc(1, imageDesc1);
  33. matcher.add(train_desc);
  34. matcher.train();
  35. matcher.knnMatch(imageDesc2, matchePoints, 2);
  36. cout << "total match points: " << matchePoints.size() << endl;
  37. // Lowe's algorithm,获取优秀匹配点
  38. for (int i = 0; i < matchePoints.size(); i++)
  39. {
  40. if (matchePoints[i][0].distance < 0.9 * matchePoints[i][1].distance)
  41. {
  42. GoodMatchePoints.push_back(matchePoints[i][0]);
  43. }
  44. }
  45. Mat first_match;
  46. drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
  47. imshow("first_match ", first_match);
  48. waitKey();
  49. return 0;
  50. }
  51. #elif 0
  52. //sift
  53. int main()
  54. {
  55. Mat image01 = imread("0366.jpg", 1);
  56. Mat image02 = imread("img441.png", 1);
  57. imshow("p2", image01);
  58. imshow("p1", image02);
  59. //灰度图转换
  60. Mat image1, image2;
  61. cvtColor(image01, image1, CV_RGB2GRAY);
  62. cvtColor(image02, image2, CV_RGB2GRAY);
  63. //提取特征点
  64. SiftFeatureDetector siftDetector(800); // 海塞矩阵阈值,在这里调整精度,值越大点越少,越精准
  65. vector<KeyPoint> keyPoint1, keyPoint2;
  66. siftDetector.detect(image1, keyPoint1);
  67. siftDetector.detect(image2, keyPoint2);
  68. //特征点描述,为下边的特征点匹配做准备
  69. SiftDescriptorExtractor SiftDescriptor;
  70. Mat imageDesc1, imageDesc2;
  71. SiftDescriptor.compute(image1, keyPoint1, imageDesc1);
  72. SiftDescriptor.compute(image2, keyPoint2, imageDesc2);
  73. FlannBasedMatcher matcher;
  74. vector<vector<DMatch> > matchePoints;
  75. vector<DMatch> GoodMatchePoints;
  76. vector<Mat> train_desc(1, imageDesc1);
  77. matcher.add(train_desc);
  78. matcher.train();
  79. matcher.knnMatch(imageDesc2, matchePoints, 2);
  80. cout << "total match points: " << matchePoints.size() << endl;
  81. // Lowe's algorithm,获取优秀匹配点
  82. for (int i = 0; i < matchePoints.size(); i++)
  83. {
  84. if (matchePoints[i][0].distance < 0.6 * matchePoints[i][1].distance)
  85. {
  86. GoodMatchePoints.push_back(matchePoints[i][0]);
  87. }
  88. }
  89. Mat first_match;
  90. drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
  91. imshow("first_match ", first_match);
  92. imwrite("first_match.jpg", first_match);
  93. waitKey();
  94. return 0;
  95. }
  96. #elif 0
  97. //ORB
  98. int main()
  99. {
  100. Mat image01 = imread("0366.jpg", 1);
  101. Mat image02 = imread("img441.png", 1);
  102. imshow("p2", image01);
  103. imshow("p1", image02);
  104. //灰度图转换
  105. Mat image1, image2;
  106. cvtColor(image01, image1, CV_RGB2GRAY);
  107. cvtColor(image02, image2, CV_RGB2GRAY);
  108. //提取特征点
  109. OrbFeatureDetector OrbDetector(1000); // 在这里调整精度,值越小点越少,越精准
  110. vector<KeyPoint> keyPoint1, keyPoint2;
  111. OrbDetector.detect(image1, keyPoint1);
  112. OrbDetector.detect(image2, keyPoint2);
  113. //特征点描述,为下边的特征点匹配做准备
  114. OrbDescriptorExtractor OrbDescriptor;
  115. Mat imageDesc1, imageDesc2;
  116. OrbDescriptor.compute(image1, keyPoint1, imageDesc1);
  117. OrbDescriptor.compute(image2, keyPoint2, imageDesc2);
  118. flann::Index flannIndex(imageDesc1, flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);
  119. vector<DMatch> GoodMatchePoints;
  120. Mat macthIndex(imageDesc2.rows, 2, CV_32SC1), matchDistance(imageDesc2.rows, 2, CV_32FC1);
  121. flannIndex.knnSearch(imageDesc2, macthIndex, matchDistance, 2, flann::SearchParams());
  122. // Lowe's algorithm,获取优秀匹配点
  123. for (int i = 0; i < matchDistance.rows; i++)
  124. {
  125. if (matchDistance.at<float>(i,0) < 0.6 * matchDistance.at<float>(i, 1))
  126. {
  127. DMatch dmatches(i, macthIndex.at<int>(i, 0), matchDistance.at<float>(i, 0));
  128. GoodMatchePoints.push_back(dmatches);
  129. }
  130. }
  131. Mat first_match;
  132. drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
  133. imshow("first_match ", first_match);
  134. imwrite("first_match.jpg", first_match);
  135. waitKey();
  136. return 0;
  137. }
  138. #elif 1
  139. //手工标注配准
  140. vector<Point2f> imagePoints1, imagePoints2;
  141. Mat ref_win, src_win;
  142. int pcount = 0;
  143. //Mat二进制文件写
  144. bool matWrite(string filename, Mat &M){
  145. Mat M_copy;
  146. M.copyTo(M_copy);
  147. FILE* file = fopen(filename.c_str(), "wb");
  148. if (file == NULL || M.empty())
  149. return false;
  150. fwrite("CmMat", sizeof(char), 5, file);
  151. int headData[3] = { M_copy.cols, M_copy.rows, M_copy.type() };
  152. fwrite(headData, sizeof(int), 3, file);
  153. fwrite(M.data, sizeof(char), M.step * M.rows, file);
  154. fclose(file);
  155. return true;
  156. }
  157. //Mat二进制文件读
  158. bool matRead(const string& filename, Mat& M){
  159. //Mat M_copy;
  160. FILE* f = fopen(filename.c_str(), "rb");
  161. if (f == NULL)
  162. return false;
  163. char buf[8];
  164. int pre = fread(buf, sizeof(char), 5, f);
  165. if (strncmp(buf, "CmMat", 5) != 0) {
  166. printf("Invalidate CvMat data file %s\n", filename.c_str());
  167. return false;
  168. }
  169. int headData[3]; // Width, height, type
  170. fread(headData, sizeof(int), 3, f);
  171. Mat M_copy(headData[1], headData[0], headData[2]);
  172. fread(M.data, sizeof(char), M.step * M.rows, f);
  173. fclose(f);
  174. M_copy.copyTo(M);
  175. return true;
  176. }
  177. void on_mouse1(int event, int x, int y, int flags, void *ustc)
  178. {
  179. if (event == CV_EVENT_LBUTTONDOWN)
  180. {
  181. Point p = Point(x, y);
  182. circle(ref_win, p, 1, Scalar(0, 0, 255), -1);
  183. imshow("底图", ref_win);
  184. imagePoints1.push_back(p);
  185. cout << "底图: " << p << endl;
  186. pcount++;
  187. cout << "ponit num:" << pcount << endl;
  188. }
  189. }
  190. void on_mouse2(int event, int x, int y, int flags, void *ustc)
  191. {
  192. if (event == CV_EVENT_LBUTTONDOWN)
  193. {
  194. Point p = Point(x, y);
  195. circle(src_win, p, 1, Scalar(0, 0, 255), -1);
  196. imshow("待配准图", src_win);
  197. imagePoints2.push_back(p);
  198. cout << "待配准图: " << p << endl;
  199. }
  200. }
  201. int main()
  202. {
  203. Mat ref = imread("A0001.jpg");
  204. Mat src = imread("Bimg1.png");
  205. ref_win = ref.clone();
  206. src_win = src.clone();
  207. namedWindow("待配准图");
  208. namedWindow("底图");
  209. imshow("待配准图", src_win);
  210. imshow("底图", ref_win);
  211. setMouseCallback("待配准图", on_mouse2);
  212. setMouseCallback("底图", on_mouse1);
  213. waitKey();
  214. string str;
  215. printf("next?\n");
  216. cin >> str;
  217. //compute the mertix
  218. Mat homo = findHomography(imagePoints2, imagePoints1, CV_RANSAC);
  219. //matWrite("1JuZhen", homo);
  220. //Mat homo1;
  221. //matRead("1JuZhen", homo1);
  222. Mat imageTransform1;
  223. warpPerspective(src, imageTransform1, homo, Size(ref.cols, ref.rows)); //变换
  224. imshow("transform", imageTransform1);
  225. //imshow("基准图打点", ref_win);
  226. //imshow("待配准图打点", src_win);
  227. imshow("transform result", imageTransform1);
  228. imwrite("result.jpg", imageTransform1);
  229. //imwrite("src_p.jpg", src_win);
  230. //imwrite("ref_p.jpg", ref_win);
  231. waitKey();
  232. return 0;
  233. }
  234. #else
  235. void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);
  236. typedef struct
  237. {
  238. Point2f left_top;
  239. Point2f left_bottom;
  240. Point2f right_top;
  241. Point2f right_bottom;
  242. }four_corners_t;
  243. four_corners_t corners;
  244. void CalcCorners(const Mat& H, const Mat& src)
  245. {
  246. double v2[] = { 0, 0, 1 };//左上角
  247. double v1[3];//变换后的坐标值
  248. Mat V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  249. Mat V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  250. V1 = H * V2;
  251. //左上角(0,0,1)
  252. cout << "V2: " << V2 << endl;
  253. cout << "V1: " << V1 << endl;
  254. corners.left_top.x = v1[0] / v1[2];
  255. corners.left_top.y = v1[1] / v1[2];
  256. //左下角(0,src.rows,1)
  257. v2[0] = 0;
  258. v2[1] = src.rows;
  259. v2[2] = 1;
  260. V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  261. V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  262. V1 = H * V2;
  263. corners.left_bottom.x = v1[0] / v1[2];
  264. corners.left_bottom.y = v1[1] / v1[2];
  265. //右上角(src.cols,0,1)
  266. v2[0] = src.cols;
  267. v2[1] = 0;
  268. v2[2] = 1;
  269. V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  270. V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  271. V1 = H * V2;
  272. corners.right_top.x = v1[0] / v1[2];
  273. corners.right_top.y = v1[1] / v1[2];
  274. //右下角(src.cols,src.rows,1)
  275. v2[0] = src.cols;
  276. v2[1] = src.rows;
  277. v2[2] = 1;
  278. V2 = Mat(3, 1, CV_64FC1, v2); //列向量
  279. V1 = Mat(3, 1, CV_64FC1, v1); //列向量
  280. V1 = H * V2;
  281. corners.right_bottom.x = v1[0] / v1[2];
  282. corners.right_bottom.y = v1[1] / v1[2];
  283. }
  284. int main(int argc, char *argv[])
  285. {
  286. Mat image01 = imread("img1.png", 1); //右图
  287. Mat image02 = imread("img2.png", 1); //左图
  288. imshow("p2", image01);
  289. imshow("p1", image02);
  290. //灰度图转换
  291. Mat image1, image2;
  292. cvtColor(image01, image1, CV_RGB2GRAY);
  293. cvtColor(image02, image2, CV_RGB2GRAY);
  294. //提取特征点
  295. OrbFeatureDetector surfDetector(3000);
  296. vector<KeyPoint> keyPoint1, keyPoint2;
  297. surfDetector.detect(image1, keyPoint1);
  298. surfDetector.detect(image2, keyPoint2);
  299. //特征点描述,为下边的特征点匹配做准备
  300. OrbDescriptorExtractor SurfDescriptor;
  301. Mat imageDesc1, imageDesc2;
  302. SurfDescriptor.compute(image1, keyPoint1, imageDesc1);
  303. SurfDescriptor.compute(image2, keyPoint2, imageDesc2);
  304. flann::Index flannIndex(imageDesc1, flann::LshIndexParams(12, 20, 2), cvflann::FLANN_DIST_HAMMING);
  305. vector<DMatch> GoodMatchePoints;
  306. Mat macthIndex(imageDesc2.rows, 2, CV_32SC1), matchDistance(imageDesc2.rows, 2, CV_32FC1);
  307. flannIndex.knnSearch(imageDesc2, macthIndex, matchDistance, 2, flann::SearchParams());
  308. // Lowe's algorithm,获取优秀匹配点
  309. for (int i = 0; i < matchDistance.rows; i++)
  310. {
  311. if (matchDistance.at<float>(i, 0) < 0.4 * matchDistance.at<float>(i, 1))
  312. {
  313. DMatch dmatches(i, macthIndex.at<int>(i, 0), matchDistance.at<float>(i, 0));
  314. GoodMatchePoints.push_back(dmatches);
  315. }
  316. }
  317. Mat first_match;
  318. drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);
  319. imshow("first_match ", first_match);
  320. vector<Point2f> imagePoints1, imagePoints2;
  321. for (int i = 0; i<GoodMatchePoints.size(); i++)
  322. {
  323. imagePoints2.push_back(keyPoint2[GoodMatchePoints[i].queryIdx].pt);
  324. imagePoints1.push_back(keyPoint1[GoodMatchePoints[i].trainIdx].pt);
  325. }
  326. //获取图像1到图像2的投影映射矩阵 尺寸为3*3
  327. Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
  328. 也可以使用getPerspectiveTransform方法获得透视变换矩阵,不过要求只能有4个点,效果稍差
  329. //Mat homo=getPerspectiveTransform(imagePoints1,imagePoints2);
  330. cout << "变换矩阵为:\n" << homo << endl << endl; //输出映射矩阵
  331. //计算配准图的四个顶点坐标
  332. CalcCorners(homo, image01);
  333. cout << "left_top:" << corners.left_top << endl;
  334. cout << "left_bottom:" << corners.left_bottom << endl;
  335. cout << "right_top:" << corners.right_top << endl;
  336. cout << "right_bottom:" << corners.right_bottom << endl;
  337. //图像配准
  338. Mat imageTransform1, imageTransform2;
  339. warpPerspective(image01, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), image02.rows));
  340. //warpPerspective(image01, imageTransform2, adjustMat*homo, Size(image02.cols*1.3, image02.rows*1.8));
  341. imshow("直接经过透视矩阵变换", imageTransform1);
  342. imwrite("trans1.jpg", imageTransform1);
  343. waitKey();
  344. return 0;
  345. }
  346. #endif

关于图像融合:可以使用MATLAB中的waveanalyzer工具箱进行小波分解融合,也可以使用使用如下程序进行融合,不过程序里面我采用的是高频和低频取平均融合的方法-.-

  1. %合成两幅图像:将图像girl.bmp融合到图像scene.png
  2. clear
  3. %读入第一个图像Girl.bmp
  4. filename1='girl.bmp';
  5. info=imfinfo(filename1); %取一张图片的具体信息
  6. width1=info.Width;
  7. height1=info.Height;
  8. if strcmp(info.ColorType,'grayscale')==1
  9. [A,MAP]=gray2ind(imread(filename1));%将灰度图变为索引图
  10. RGB1=ind2rgb(A,MAP); %将索引图变为真彩色图
  11. end
  12. if strcmp(info.ColorType,'indexed')==1
  13. [A,MAP]=imread(filename1);
  14. RGB1=ind2rgb(A,MAP); %将索引图变为真彩色图
  15. end
  16. if strcmp(info.ColorType,'truecolor')==1
  17. RGB1=imread(filename1);
  18. end
  19. figure,imshow(RGB1);
  20. %读入第二个图像Scene.png
  21. filename2='scene.png';
  22. info=imfinfo(filename2); %取一张图片的具体信息
  23. width2=info.Width;
  24. height2=info.Height;
  25. if strcmp(info.ColorType,'grayscale')==1
  26. [A,MAP]=gray2ind(imread(filename2));%将灰度图变为索引图
  27. RGB2=ind2rgb(A,MAP); %将索引图变为真彩色图
  28. end
  29. if strcmp(info.ColorType,'indexed')==1
  30. [A,MAP]=imread(filename2);
  31. RGB2=ind2rgb(A,MAP); %将索引图变为真彩色图
  32. end
  33. if strcmp(info.ColorType,'truecolor')==1
  34. RGB2=imread(filename2);
  35. end
  36. figure,imshow(RGB2);
  37. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  38. %种子填充,得到参考图像
  39. BW=~im2bw(RGB1); %图像二值化后取反
  40. figure;imshow(BW);
  41. %选择二值化图像左上角、右上角为种子起始点,连通模板选择4邻域的,将人物外轮廓以外
  42. %的像素设置为1B,然后用BW减去B,即可。
  43. B=imfill(imfill(BW,[1,1],4),[1,width1],4);
  44. figure;imshow(B);
  45. B=~xor(BW,B); %经过异或取反()得到了人物轮廓参考图像B
  46. figure;imshow(B);
  47. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  48. %尝试边缘检测,然后再imfill填充hole的尝试,不过,效果不好没
  49. %有成功,后来知道是外轮廓底端不封闭的缘故
  50. BW2=im2bw(RGB1);
  51. figure,imshow(BW2);
  52. [m n]=size(BW2);
  53. imgn=zeros(m,n); %边界标记图像
  54. ed=[-1 -1;0 -1;1 -1;1 0;1 1;0 1;-1 1;-1 0]; %从左上角像素,逆时针搜索
  55. for i=2:m-1
  56. for j=2:n-1
  57. if BW2(i,j)==1 && imgn(i,j)==0 %当前是没标记的白色像素
  58. if sum(sum(BW2(i-1:i+1,j-1:j+1)))~=9 %块内部的白像素不标记
  59. ii=i; %像素块内部搜寻使用的坐标
  60. jj=j;
  61. imgn(i,j)=1; %本像素块第一个标记的边界,第一个边界像素为2
  62. while imgn(ii,jj)~=1 %是否沿着像素块搜寻一圈了。
  63. for k=1:8 %逆时针八邻域搜索
  64. tmpi=ii+ed(k,1); %八邻域临时坐标
  65. tmpj=jj+ed(k,2);
  66. %搜索到新边界,并且没有搜索一圈
  67. if BW2(tmpi,tmpj)==1 && imgn(tmpi,tmpj)~=1
  68. ii=tmpi; %更新内部搜寻坐标,继续搜索
  69. jj=tmpj;
  70. imgn(ii,jj)=1;%边界标记图像该像素标记,普通边界为1
  71. break;
  72. end
  73. end
  74. end
  75. end
  76. end
  77. end
  78. end
  79. figure;imshow(imgn)
  80. %imgn=imgn>=1;
  81. imfill(imgn,'hole')
  82. figure,imshow(imgn);
  83. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  84. %对第一个图像进行预处理
  85. A=im2double(RGB1); %人物原图像变为双精度型
  86. D=zeros(height1,width1); %前景和背景交界点的标志矩阵D
  87. K=3; %领域宽度
  88. for i=1:1:height1
  89. for j=1:1:width1
  90. if B(i,j)==0 %参考图像数据为0时,将第一幅图像的背景替换为蓝色
  91. A(i,j,1)=0;
  92. A(i,j,2)=0;
  93. A(i,j,3)=1.0;
  94. else %对第一个图像中的前景和背景交界处进行处理
  95. for k=1:1:K
  96. N(k)=B(i,j+k-round(K/2));
  97. end
  98. if ~all(N) %假如该点左右两个像素点存在背景点
  99. if ~isempty(find(N==1))
  100. k1=1;k2=-1;
  101. while 1 %用内点(人廓内部)替换该点颜色
  102. r=A(i,j+k1,1);g=A(i,j+k1,2);b=A(i,j+k1,3);
  103. if B(i,j+k1)==1
  104. break;
  105. end
  106. r=A(i,j+k2,1);g=A(i,j+k2,2);b=A(i,j+k2,3);
  107. if B(i,j+k2)==1
  108. break;
  109. end
  110. k1=k1+1;k2=k2-1;
  111. end
  112. A(i,j,1)=r;
  113. A(i,j,2)=g;
  114. A(i,j,3)=b;
  115. D(i,j)=1; %对前景背景交界点做标志
  116. end
  117. end
  118. end
  119. end
  120. end
  121. figure;imshow(A);
  122. x=0;y=0; %第一幅图像融合到第二幅图像的起点位置指定
  123. RGB=im2double(RGB2);
  124. %融合图像
  125. for i=1:1:height1
  126. for j=1:1:width1
  127. if B(i,j)==1
  128. if D(i,j)==1 %交界点的处理,用渐进法。
  129. RGB(y+i,x+j,1)=0.5*A(i,j,1)+0.5*RGB(y+i,x+j,1);
  130. RGB(y+i,x+j,2)=0.5*A(i,j,2)+0.5*RGB(y+i,x+j,2);
  131. RGB(y+i,x+j,3)=0.5*A(i,j,3)+0.5*RGB(y+i,x+j,3);
  132. else %前景的处理,直接用人物图像替代
  133. RGB(y+i,x+j,1)=A(i,j,1);
  134. RGB(y+i,x+j,2)=A(i,j,2);
  135. RGB(y+i,x+j,3)=A(i,j,3);
  136. end
  137. end
  138. end
  139. end
  140. figure,imshow(RGB);
  141. clc;

发表评论

表情:
评论列表 (有 0 条评论,349人围观)

还没有评论,来说两句吧...

相关阅读

    相关 ArcMap地理

    1.打开菜单栏中的Customize中的 ->ToolBars ->Georeference 2.将两幅图像进行地理配准,现在需要配准的图像上选取GCP然后在相对正确的图像上