OpenMP并行编程应用—加速OpenCV图像拼接算法汇总

上传人:碎****木 文档编号:220862054 上传时间:2021-12-09 格式:DOCX 页数:6 大小:15.99KB
返回 下载 相关 举报
OpenMP并行编程应用—加速OpenCV图像拼接算法汇总_第1页
第1页 / 共6页
OpenMP并行编程应用—加速OpenCV图像拼接算法汇总_第2页
第2页 / 共6页
OpenMP并行编程应用—加速OpenCV图像拼接算法汇总_第3页
第3页 / 共6页
亲,该文档总共6页,到这儿已超出免费预览范围,如果喜欢就下载吧!
资源描述

《OpenMP并行编程应用—加速OpenCV图像拼接算法汇总》由会员分享,可在线阅读,更多相关《OpenMP并行编程应用—加速OpenCV图像拼接算法汇总(6页珍藏版)》请在金锄头文库上搜索。

1、OpenMP 并行编程应用加速OpenCV 图像拼接算法OpenMP 是一种应用于多处理器程序设计的并行编程处理方案,它供给了对于并行编程的高层抽象,只需要在程序中添加简洁的指令,就可以编写高效的并行程序,而不用关心具体的并行实现细节,降低了并行编程的难度和简单度。也正由于OpenMP 的简洁易用性,它并不适合于需要简单的线程间同步和互斥的场合。OpenCV 中使用 Sift 或者 Surf 特征进展图像拼接的算法,需要分别对两幅或多幅图像进展特征提取和特征描述,之后再进展图像特征点的配对,图像变换等操作。不同图像的特征提取和描述的工作是整个过程中最消耗时间的,也是独立 运行的,可以使用 Op

2、enMP 进展加速。以下是不使用 OpenMP 加速的 Sift 图像拼接原程序:cpp view plain copy print?在 CODE 上查看代码片派生到我的代码片#include “highgui/highgui.hpp“#include “opencv2/nonfree/nonfree.hpp“ #include “opencv2/legacy/legacy.hpp“ #include “omp.h“using namespace cv;/计算原始图像点位在经过矩阵变换后在目标图像上对应位置Point2f getTransformPoint(const Point2f orig

3、inalPoint, const Mat &transformMaxtri);int main(int argc, char *argv)float startTime = omp_get_wtime();Mat image01 = imread(“Test01.jpg“); Mat image02 = imread(“Test02.jpg“); imshow(“拼接图像 1“, image01);imshow(“拼接图像 2“, image02);/灰度图转换Mat image1, image2;cvtColor(image01, image1, CV_RGB2GRAY); cvtColor

4、(image02, image2, CV_RGB2GRAY);/提取特征点SiftFeatureDetector siftDetector(800); / 海塞矩阵阈值vector keyPoint1, keyPoint2; siftDetector.detect(image1, keyPoint1); siftDetector.detect(image2, keyPoint2);/特征点描述,为下边的特征点匹配做预备SiftDescriptorExtractor siftDescriptor;Mat imageDesc1, imageDesc2; siftDescriptor pute(im

5、age1, keyPoint1, imageDesc1); siftDescriptor pute(image2, keyPoint2, imageDesc2);float endTime = omp_get_wtime();std:cout “不使用 OpenMP 加速消耗时间: “ endTime - startTime std:endl;/获得匹配特征点,并提取最优配对FlannBasedMatcher matcher; vector matchePoints;matcher.match(imageDesc1, imageDesc2, matchePoints, Mat(); sort(

6、matchePoints.begin(), matchePoints.end(); /特征点排序/猎取排在前 N 个的最优匹配特征点vector imagePoints1, imagePoints2; for (int i = 0; i 10; i+)imagePoints1.push_back(keyPoint1matchePointsi.queryIdx.pt); imagePoints2.push_back(keyPoint2matchePointsi.trainIdx.pt);/猎取图像 1 到图像 2 的投影映射矩阵,尺寸为3*3Mat homo = findHomography(i

7、magePoints1, imagePoints2, CV_RANSAC);Mat adjustMat = (Mat_(3, 3) 1.0, 0, image01.cols, 0, 1.0, 0, 0, 0, 1.0); Mat adjustHomo = adjustMat*homo;/猎取最强配对点在原始图像和矩阵变换后图像上的对应位置,用于图像拼接点的定位Point2f originalLinkPoint, targetLinkPoint, basedImagePoint;originalLinkPoint = keyPoint1matchePoints0.queryIdx.pt; tar

8、getLinkPoint = getTransformPoint(originalLinkPoint, adjustHomo); basedImagePoint = keyPoint2matchePoints0.trainIdx.pt;/图像配准Mat imageTransform1;warpPerspective(image01, imageTransform1, adjustMat*homo, Size(image02.cols + image01.cols + 110, image02.rows);/在最强匹配点左侧的重叠区域进展累加,是连接稳定过渡,消退突变Mat image1Over

9、lap, image2Overlap; /图 1 和图 2 的重叠局部image1Overlap = imageTransform1(Rect(Point(targetLinkPoint.x - basedImagePoint.x, 0), Point(targetLinkPoint.x, image02.rows); image2Overlap = image02(Rect(0, 0, image1Overlap.cols, image1Overlap.rows);Mat image1ROICopy = image1Overlap.clone(); /复制一份图 1 的重叠局部for (in

10、t i = 0; i image1Overlap.rows; i+)for (int j = 0; j image1Overlap.cols; j+)double weight;weight = (double)j / image1Overlap.cols; /随距离转变而转变的叠加系数image1Overlap.at(i, j)0 = (1 - weight)*image1ROICopy.at(i, j)0 + weight*image2Overlap.at(i, j)0; image1Overlap.at(i, j)1 = (1 - weight)*image1ROICopy.at(i,

11、j)1 + weight*image2Overlap.at(i, j)1; image1Overlap.at(i, j)2 = (1 - weight)*image1ROICopy.at(i, j)2 + weight*image2Overlap.at(i, j)2;Mat ROIMat = image02(Rect(Point(image1Overlap.cols, 0), Point(image02.cols, image02.rows); /图 2 中不重合的局部ROIMat.copyTo(Mat(imageTransform1, Rect(targetLinkPoint.x, 0, R

12、OIMat.cols, image02.rows); /不/ 重合的局部直接连接上去namedWindow(“拼接结果“, 0);imshow(“拼接结果“, imageTransform1); imwrite(“D:拼接结果.jpg“, imageTransform1); waitKey();return 0;/计算原始图像点位在经过矩阵变换后在目标图像上对应位置Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri)Mat originelP, targetP;originelP =

13、 (Mat_(3, 1) originalPoint.x, originalPoint.y, 1.0); targetP = transformMaxtri*originelP;float x = targetP.at(0, 0) / targetP.at(2, 0); float y = targetP.at(1, 0) / targetP.at(2, 0); return Point2f(x, y);之后在程序中参加 OpenMP 的头文件“omp.h”就可以了:cpp view plain copy print?在 CODE 上查看代码片派生到我的代码片#include “highgui

14、/highgui.hpp“#include “opencv2/nonfree/nonfree.hpp“ #include “opencv2/legacy/legacy.hpp“ #include “omp.h“using namespace cv;/计算原始图像点位在经过矩阵变换后在目标图像上对应位置Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri);int main(int argc, char *argv)float startTime = omp_get_wtime();Mat image01, image02;Mat image1, image2;vector keyPoint1, keyPoint2;Mat imageDesc1, imageDesc2;SiftFeatureDetector siftDetector(800); / 海塞矩阵阈值SiftDescriptorExtr tt951 actor

展开阅读全文
相关资源
相关搜索

当前位置:首页 > 行业资料 > 教育/培训

电脑版 |金锄头文库版权所有
经营许可证:蜀ICP备13022795号 | 川公网安备 51140202000112号