图像模板匹配
原理
用T表示模板图像,I表示待匹配图像,切模板图像的宽为w高为h,用R表示匹配结果,匹配过程如下图所示:
1、平方差匹配算法method=TM_SQDIFF
这类方法利用平法差来进行匹配,最好匹配为0,而若匹配越差,匹配值则越大
2、归一化平方差匹配法method=TM_SQDIFF_NORMED
3、相关匹配法method=TM_CCORR
这类方法采用模板和图像之间的惩罚操作,所以较大的数表示匹配程度较高,0标识最坏的匹配效果;
4、归一化相关匹配法method=TM_CCORR_NORMED
5、系数匹配法method=TM_CCOEFF
这类方法将模板对其均值的相对值与图像对其均值的相关值进行匹配,1表示完美匹配,-1表示糟糕匹配,而0表示没有任何相关性;
6、归一化相关系数匹配法method=TM_CCOEFF_NORMED
代码实现
平方差匹配法
//平方差匹配法
void ColorMatch_TM_SQDIFF(const Mat &img, const Mat &templ) {
//判断图像是否存在
if (img.empty() || templ.empty()) {
cout << "Error: Input image or template is empty." << endl;
return;
}
//判断图像是否是彩色图像
if (img.channels() != 3 || templ.channels() != 3) {
cout << "Error: Both input image and template must be color images." << endl;
return;
}
int result_cols = img.cols - templ.cols + 1;//移动的列数
int result_rows = img.rows - templ.rows + 1;//移动的行数
cv::Mat result(result_rows, result_cols, CV_32FC1);//存储平方差结果 CV_32FC1 32bites F 单精度浮点数 C1 通道数1
result.setTo(Scalar::all(0));//初始化为0
for (int y = 0; y < result_rows; ++y) {
for (int x = 0; x < result_cols; ++x) {
float sqdiff = 0.0f;
for (int j = 0; j < templ.rows; ++j) {
for (int i = 0; i < templ.cols; ++i) {
Vec3b img_pixel = img.at<Vec3b>(y + j, x + i);//Vec3b 长度为3的unchar类型向量
Vec3b templ_pixel = templ.at < Vec3b>(j, i);
for (int k = 0; k < 3; ++k) {
float diff = static_cast<float>(img_pixel[k]) - static_cast<float>(templ_pixel[k]);//计算平方差
sqdiff += diff * diff;//累加
}
}
}
result.at<float>(y, x) = sqdiff;//记录结果
}
}
double minVal;//最小值
Point minLoc;//最小值所在位置
minMaxLoc(result, &minVal, nullptr, &minLoc, nullptr);//在数组中寻找全局最大最小值
Mat img_display;
img.copyTo(img_display);
rectangle(img_display, minLoc, Point(minLoc.x + templ.cols, minLoc.y + templ.rows), Scalar(0, 255, 0), 2);
imshow("SQDIFF", img_display);
waitKey(0);
}
归一化平方差匹配法
//归一化平方差匹配法
void ColorMatch_TM_SQDIFF_NORMED(const cv::Mat& image, const cv::Mat& templ) {
int result_cols = image.cols - templ.cols + 1;
int result_rows = image.rows - templ.rows + 1;
cv::Mat result(result_rows, result_cols, CV_32FC1);
for (int i = 0; i < result_rows; i++) {
for (int j = 0; j < result_cols; j++) {
cv::Rect roi(j, i, templ.cols, templ.rows);
cv::Mat image_roi = image(roi);
cv::Mat diff;
cv::absdiff(image_roi, templ, diff);
cv::Mat sqdiff;
cv::pow(diff, 2, sqdiff);
double sum_sqdiff = cv::sum(sqdiff)[0];
double norm_factor = sqrt(cv::sum(image_roi.mul(image_roi))[0] * cv::sum(templ.mul(templ))[0]);
result.at<float>(i, j) = static_cast<float>(sum_sqdiff / norm_factor);
}
}
double minVal, maxVal;
cv::Point minLoc, maxLoc;
cv::minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc);
cv::rectangle(image, minLoc, cv::Point(minLoc.x + templ.cols, minLoc.y + templ.rows), cv::Scalar(0, 255, 0), 2);
cv::imshow("SQDIFF_NORMED", image);
cv::waitKey(0);
}
相关匹配法
//相关匹配方法
void ColorMatch_TM_CCORR(const cv::Mat &img, const cv::Mat &templ)
{
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
cv::Mat result(result_rows, result_cols, CV_32FC1);
for (int i = 0; i < result_rows; i++) {
for (int j = 0; j < result_cols; j++) {
float sum = 0.0;
for (int k = 0; k < templ.rows; k++) {
for (int l = 0; l < templ.cols; l++) {
for (int c = 0; c < 3; c++) {
sum += img.at<cv::Vec3b>(i + k, j + l)[c] * templ.at<cv::Vec3b>(k, l)[c];
}
}
result.at<float>(i, j) = sum;
}
}
}
double minVal, maxVal;
cv::Point minLoc, maxLoc;
cv::minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc);
cv::rectangle(img, maxLoc, cv::Point(maxLoc.x + templ.cols, maxLoc.y + templ.rows), cv::Scalar(0, 255, 0), 2);
cv::imshow("CCORR", img);
cv::waitKey(0);
}
归一化相关匹配法
void ColorMatch_TM_CCORR_NORMED(const cv::Mat &img, const cv::Mat &templ)
{
//创造结果矩阵
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
cv::Mat result(result_rows, result_cols, CV_32FC1);
for (int i = 0; i < result_rows; i++) {
for (int j = 0; j < result_cols; j++) {
float sum = 0.0;
float img_norm = 0.0;
float templ_norm = 0.0;
for (int k = 0; k < templ.rows; k++) {
for (int l = 0; l < templ.cols; l++) {
for (int c = 0; c < 3; c++) {
float img_val = img.at<cv::Vec3b>(i + k, j + l)[c];
float templ_val = templ.at<cv::Vec3b>(k, l)[c];
sum += img_val * templ_val;//img像素*模板像素
img_norm += img_val * img_val;//img像素平方
templ_norm += templ_val * templ_val;//templ像素平方
}
}
}
result.at<float>(i, j) = sum / std::sqrt(img_norm * templ_norm);
}
}
double minVal, maxVal;
cv::Point minLoc, maxLoc;
cv::minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc);
cv::rectangle(img, maxLoc, cv::Point(maxLoc.x + templ.cols, maxLoc.y + templ.rows), cv::Scalar(0, 255, 0), 2);
cv::imshow("CCORR_NORMED", img);
cv::waitKey(0);
}
系数匹配法
//系数匹配法
void ColorMatch_TM_CCOEFF(const cv::Mat &img, const cv::Mat &templ)
{
//创造结果矩阵
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
cv::Mat result(result_rows, result_cols, CV_32FC1);
for (int i = 0; i < result_rows; i++) {
for (int j = 0; j < result_cols; j++) {
float sum = 0.0;
float img_mean = 0.0;
float templ_mean = 0.0;
float sum1 = 0.0;
//计算
for (int k = 0; k < templ.rows; k++) {
for (int l = 0; l < templ.cols; l++)
{
for (int c = 0; c < 3; c++) {
img_mean += img.at<cv::Vec3b>(i + k, j + l)[c];//img像素之和
templ_mean += templ.at<cv::Vec3b>(k, l)[c];//templ像素之和
}
}
}
img_mean /= (templ.rows * templ.cols * 3);//计算img像素均值 注意三通道
sum1 = img_mean * templ_mean;//(img像素之和*temple像素之和)的均值
for (int k = 0; k < templ.rows; k++) {
for (int l = 0; l < templ.cols; l++) {
for (int c = 0; c < 3; c++) {
float img_val = img.at<cv::Vec3b>(i + k, j + l)[c] ;
float templ_val = templ.at<cv::Vec3b>(k, l)[c] ;
sum += img_val * templ_val;//计算img*templ之和
}
}
}
result.at<float>(i, j) = sum-sum1;
}
}
double minVal, maxVal;
cv::Point minLoc, maxLoc;
cv::minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc);
cv::rectangle(img, maxLoc, cv::Point(maxLoc.x + templ.cols, maxLoc.y + templ.rows), cv::Scalar(0, 255, 0), 2);
cv::imshow("CCOEFF", img);
cv::waitKey(0);
}
归一化系数匹配法
void ColorMatch_TM_CCOEFF_NORMED(const Mat &img, const Mat &templ) {
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
Mat result(result_rows, result_cols, CV_32FC1);
for (int i = 0; i < result_rows; i++) {
for (int j = 0; j < result_cols; j++) {
float sum = 0.0;//记录最后结果
float img_mean = 0.0;
float img_mean1 = 0.0;//记录img图像像素之和平方的均值
float templ_mean = 0.0;
float templ_mean1 = 0.0;//记录templ图像像素之和平方的均值
float sum1 = 0.0;//img像素之和*temple像素之和)的均值
float sum2 = 0.0;//
float img_val1 = 0.0;
float templ_val1 = 0.0;
//计算
for (int k = 0; k < templ.rows; k++) {
for (int l = 0; l < templ.cols; l++)
{
for (int c = 0; c < 3; c++) {
img_mean += img.at<cv::Vec3b>(i + k, j + l)[c];//img像素之和
templ_mean += templ.at<cv::Vec3b>(k, l)[c];//templ像素之和
}
}
}
img_mean1 =sqrt( img_mean) / (templ.rows * templ.cols * 3);// img图像像素之和平方的均值
templ_mean1 = sqrt(templ_mean) / (templ.rows * templ.cols * 3);// templ图像像素之和平方的均值
img_mean /= (templ.rows * templ.cols * 3);//计算img像素均值 注意三通道
sum1 = img_mean * templ_mean;//(img像素之和*temple像素之和)的均值
for (int k = 0; k < templ.rows; k++) {
for (int l = 0; l < templ.cols; l++) {
for (int c = 0; c < 3; c++) {
float img_val = img.at<cv::Vec3b>(i + k, j + l)[c];//img像素之和
img_val1 += sqrt(img.at<cv::Vec3b>(i + k, j + l)[c]);//img像素之和的平方
float templ_val = templ.at<cv::Vec3b>(k, l)[c];
templ_val1 += sqrt( templ.at<cv::Vec3b>(k, l)[c]);
sum += img_val * templ_val;//计算img*templ之和
}
}
}
sum2 = std::sqrt(img_val1-img_mean1 )+ std::sqrt(templ_val1 - templ_mean1);
result.at<float>(i, j) = (sum - sum1)/sum2;
}
}
double minVal, maxVal;
Point minLoc, maxLoc;
minMaxLoc(result, &minVal, &maxVal, &minLoc, &maxLoc);
rectangle(img, maxLoc, Point(maxLoc.x + templ.cols, maxLoc.y + templ.rows), Scalar(0, 255, 0), 2);
double bestMatchThresh = 0.95; // 设置匹配度的阈值
if (maxVal >= bestMatchThresh) {
rectangle(img, maxLoc, Point(maxLoc.x + templ.cols, maxLoc.y + templ.rows), Scalar(0, 255, 0), 2);
}
imshow("CCOEFF_NORMED", img);
waitKey(0);
}
结果
原图像
模板图像
平方差匹配法
自己实现
opencv
归一化平方差匹配法method=TM_SQDIFF_NORMED
自己实现
opencv
相关匹配法method=TM_CCORR
自己实现
opencv
归一化相关匹配法method=TM_CCORR_NORMED
自己实现
opencv
系数匹配法method=TM_CCOEFF
自己实现
opencv
归一化相关系数匹配法method=TM_CCOEFF_NORMED
自己实现
文章来源:https://www.toymoban.com/news/detail-764452.html
opencv
文章来源地址https://www.toymoban.com/news/detail-764452.html
自写函数与opencv函数运行时间比较
方法 | 自己实现函数运行时间 单位:秒 | opencv运行时间 |
---|---|---|
平方差匹配法 | 12.0654 | 0.125989 |
归一化平方差匹配法 | 17.5297 | 0.0974526 |
相关匹配法 | 14.7314 | 0.0990451 |
归一化相关系数匹配法 | 19.466 | 0.0715831 |
系数匹配法 | 10.2226 | 0.096816 |
归一化系数匹配法 | 29.1532 | 0.0979519 |
到了这里,关于图像模板匹配 opencv c++实现的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!