美文网首页
使用opencv对图片做特征模板匹配以及透视变换

使用opencv对图片做特征模板匹配以及透视变换

作者: leon0514 | 来源:发表于2024-01-15 16:06 被阅读0次

前言

最近遇到一个问题,摄像头偏移了一定角度或进行变焦后拍摄到的图片,如何将该张图片上识别到的物体坐标还原到未偏移或未变焦前拍摄的图片上。

图像特征提取--ORB算法

ORB(Oriented FAST and Rotated BRIEF)该特征检测算法是在著名的FAST特征检测和BRIEF特征描述子的基础上提出来的,其运行时间远远优于SIFT和SURF,可应用于实时性特征检测。具体原理不做分析,我也不太懂

局部图像描述符算法--BEBLID算法

BEBLID全称是Boosted Efficient Binary Local Image Descriptor,多项实验证明能够提高图像匹配精度,同时减少执行时间。局部图像是用于匹配存在强烈外观变化的图像,如光照变化或几何变换。它们是许多计算机视觉任务的基本组成部分,如3D重建、SLAM、图像检索、位姿估计等。它们是最流行的图像表示方法,因为局部特征是独特的,视点不变的。具体原理不做分析,我也不太懂。

特征点匹配--FLANN算法

FLANN是快速最近邻搜索包(Fast_Library_for_Approximate_Nearest_Neighbors)的简称。它是一个对大数据集和高维特征进行最近邻搜索的算法的集合,而且这些算法都已经被优化过了。

透视变换矩阵

findHomography函数用于计算多个二维点对之间的最优单映射变换矩阵 H(3行x3列) ,使用最小均方误差或者RANSAC方法

代码

  1. 给定参考图,即变化之后的图片
  2. 给定变化图,即变化之前的图片
  3. 给定变化后识别到的物体的坐标
  • 主要代码

perspective.hpp 主要代码 头文件

#ifndef PERSPECTIVE_HPP
#define PERSPECTIVE_HPP
#include "cpm/infer.hpp"
#include "perspectiveTransform/perspectiveIO.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/xfeatures2d.hpp"

namespace pt{

class PerspectiveInferImpl : public infer::Infer<PerspectiveOutputArray, PerspectiveInput>
{
public:
    PerspectiveOutputArray forward(const PerspectiveInput& input);

private:
    cv::Ptr<cv::ORB> detector_ = cv::ORB::create(10000);
    cv::Ptr<cv::xfeatures2d::BEBLID> descriptor_ = cv::xfeatures2d::BEBLID::create(0.75);
    cv::Ptr<cv::DescriptorMatcher> matcher_ = cv::DescriptorMatcher::create(cv::DescriptorMatcher::FLANNBASED);
    float min_match_point_size_ = 20;
};

std::shared_ptr<infer::Infer<PerspectiveOutputArray, PerspectiveInput>> load();

}
#endif

perspective.cpp 主要代码

#include "perspectiveTransform/perspective.hpp"
#include <chrono>
#include <vector>

namespace pt
{

static void perspectivate_coordinate(double x, double y, double* ox, double* oy,  const cv::Mat& matrix)
{
    *ox = (matrix.at<double>(0, 0) * x + matrix.at<double>(0, 1) * y + matrix.at<double>(0, 2)) /
          (matrix.at<double>(2, 0) * x + matrix.at<double>(2, 1) * y + matrix.at<double>(2, 2));
    *oy = (matrix.at<double>(1, 0) * x + matrix.at<double>(1, 1) * y + matrix.at<double>(1, 2)) /
          (matrix.at<double>(2, 0) * x + matrix.at<double>(2, 1) * y + matrix.at<double>(2, 2));

}

static PerspectiveOutputArray decode(const PerspectiveInput& input, const cv::Mat& matrix)
{
    PerspectiveOutputArray result;
    for (const auto& data : input.areas)
    {
        common::ClassfierArea area;
        area.id = data.id;
        area.label = data.label;
        double left = data.left;
        double top = data.top;
        double right = data.right;
        double bottom = data.bottom;

        perspectivate_coordinate(left, top, &left, &top, matrix);
        perspectivate_coordinate(right, bottom, &right, &bottom, matrix);

        area.left = (int)(left);
        area.top = (int)(top);
        area.right = (int)(right);
        area.bottom = (int)(bottom);
        result.push_back(area);
    }
    return result;
}

PerspectiveOutputArray PerspectiveInferImpl::forward(const PerspectiveInput& input)
{
    cv::Mat reference_image_gray;
    cv::cvtColor(input.reference_image, reference_image_gray, cv::COLOR_BGR2GRAY);
    cv::Mat transform_image_gray;
    cv::cvtColor(input.transform_image, transform_image_gray, cv::COLOR_BGR2GRAY);
    
    std::vector<cv::KeyPoint> dest_kpts, src_kpts; 
    detector_->detect(reference_image_gray, dest_kpts);
    detector_->detect(transform_image_gray, src_kpts);

    cv::Mat dest_des, src_des;
    descriptor_->compute(reference_image_gray, dest_kpts, dest_des);
    descriptor_->compute(transform_image_gray, src_kpts, src_des); 
    src_des.convertTo(src_des, CV_32F);
    dest_des.convertTo(dest_des, CV_32F);
   

    std::vector<std::vector<cv::DMatch>> knn_matches;
    matcher_->knnMatch(dest_des, src_des, knn_matches, 2);
    
    float ratio_thresh = 0.7;
    std::vector<cv::DMatch> good_matches;
    
    for (size_t i = 0; i < knn_matches.size(); i++) 
    {
        if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance) 
        {
            good_matches.push_back(knn_matches[i][0]);
        }
    }
    
    std::vector<cv::Point2f> dest_points(good_matches.size());
    std::vector<cv::Point2f> src_points(good_matches.size());
    
    for (size_t i = 0; i < good_matches.size(); i++) 
    {
        dest_points[i] = dest_kpts[good_matches[i].queryIdx].pt;
        src_points[i] = src_kpts[good_matches[i].trainIdx].pt;
    }

    cv::Mat pers_matrix;
    pers_matrix = cv::findHomography(src_points, dest_points, cv::RANSAC);
    cv::Mat pers_matrix_inv;
    cv::invert(pers_matrix, pers_matrix_inv);
    PerspectiveOutputArray result = decode(input, pers_matrix_inv);
   

    /*
    int height = input.reference_image.rows;
    int width = input.reference_image.cols;
    cv::Mat imageReg;
    cv::warpPerspective(input.transform_image, imageReg, pers_matrix, cv::Size(width, height));
    cv::imwrite("warpPerspective.jpg", imageReg);
    */
    return result;
}

std::shared_ptr<infer::Infer<PerspectiveOutputArray, PerspectiveInput>> load()
{
    return std::shared_ptr<PerspectiveInferImpl>(new PerspectiveInferImpl());
}
}

  • python代码
import cv2
import numpy as np

def cvt_pos(pos,cvt_mat_t):
    u = pos[0]
    v = pos[1]
    x = (cvt_mat_t[0][0]*u+cvt_mat_t[0][1]*v+cvt_mat_t[0][2])/(cvt_mat_t[2][0]*u+cvt_mat_t[2][1]*v+cvt_mat_t[2][2])
    y = (cvt_mat_t[1][0]*u+cvt_mat_t[1][1]*v+cvt_mat_t[1][2])/(cvt_mat_t[2][0]*u+cvt_mat_t[2][1]*v+cvt_mat_t[2][2])
    return (x,y)

img1 = cv2.imread('img1.jpg', cv2.IMREAD_GRAYSCALE) 
img2 = cv2.imread('img2.jpg', cv2.IMREAD_GRAYSCALE)
if img1 is None or img2 is None:
    print('Could not open or find the images!') 
    exit(0)
detector = cv2.ORB_create(10000)
kpts1 = detector.detect(img1, None)
kpts2 = detector.detect(img2, None)

descriptor  = cv2.xfeatures2d.BEBLID_create(0.75) 
keypoints1, descriptors1 = descriptor.compute(img1, kpts1) # 提取img1特征点
keypoints2, descriptors2 = descriptor.compute(img2, kpts2) # 提取img2特征点

descriptors1 = descriptors1.astype(np.float32)
descriptors2 = descriptors2.astype(np.float32)
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
# Since SURF is a floating-point descriptor NORM_L2 is used
matcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_FLANNBASED) 

knn_matches = matcher.knnMatch(descriptors1, descriptors2, 2) #匹配img1与img2的特征点
#-- Filter matches using the Lowe's ratio test
ratio_thresh = 0.7
good_matches = []
for m,n in knn_matches:
    if m.distance < ratio_thresh * n.distance:
        good_matches.append(m) #寻找最佳匹配特征对
#-- Draw matches
img_matches = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1]+img2.shape[1], 3), dtype=np.uint8)
cv2.drawMatches(img1, keypoints1, img2, keypoints2, good_matches, img_matches, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# 创建包含特征匹配信息的图像
#-- Show detected matches
# cv2.namedWindow('Good Matches', cv2.WINDOW_NORMAL) # 创建图像显示窗口 WINDOW_NORMAL使得图像窗口可以调整
# cv2.imshow('Good Matches', img_matches) # 显示包含特征匹配信息的图像
# cv2.waitKey(0)

points1=np.zeros((len(good_matches),2),dtype=float)
points2=np.zeros((len(good_matches),2),dtype=float)


for i in range(len(good_matches)):
    points1[i,:]=keypoints1[good_matches[i].queryIdx].pt
    points2[i,:]=keypoints2[good_matches[i].trainIdx].pt # 最佳匹配特征点位置

h, mask = cv2.findHomography(points1, points2, cv2.RANSAC) #根据最佳匹配特征点生成将img1对齐到img2的几何变换矩阵

height, width= img2.shape
img1Reg = cv2.warpPerspective(img1, h, (width, height)) # 根据前述生成的几何变换对img1进行变换


left, top = cvt_pos(start_point, np.linalg.inv(h))
right, bottom = cvt_pos(end_point, np.linalg.inv(h))

  • 效果图


    1705392378079.png

相关文章

网友评论

      本文标题:使用opencv对图片做特征模板匹配以及透视变换

      本文链接:https://www.haomeiwen.com/subject/gvdhodtx.html