官网链接
从估计的本质矩阵和两幅图像中的对应点恢复相机之间的旋转和平移,使用光束法则进行检验。返回通过检验的内点数目。
#include <opencv2/calib3d.hpp>
int cv::recoverPose ( InputArray E,
InputArray points1,
InputArray points2,
InputArray cameraMatrix,
OutputArray R,
OutputArray t,
InputOutputArray mask = noArray()
)
int recoverPose( InputArray E, InputArray points1, InputArray points2,
OutputArray R, OutputArray t, double focal = 1.0,
Point2d pp = Point2d(0, 0), InputOutputArray mask = noArray() );
int recoverPose( InputArray E, InputArray points1, InputArray points2,
InputArray cameraMatrix, OutputArray R, OutputArray t,
double distanceThresh, InputOutputArray mask = noArray(),
OutputArray triangulatedPoints = noArray());
E:已经求解出来的本质矩阵,它是3x3的矩阵;
points1:第一张图片中的点;
points2:第二张图片中的点;
cameraMatrix:相机内参矩阵,它是3x3的矩阵;
R:求解出来的两帧图片之间的旋转矩阵;
t:求解出来的两帧图片之间的平移向量;
focal:相机焦距;
pp:像素坐标的原点;
distanceThresh:点的距离阈值,用来滤出距离较远的点;
triangulatedPoints:通过三角化还原点;
官方例子文章来源:https://www.toymoban.com/news/detail-671257.html
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
// cametra matrix with both focal lengths = 1, and principal point = (0, 0)
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
Mat E, R, t, mask;
E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
**说明: **
1. 通过该函数求解出来的
R
,
t
R,t
R,t ,它表示的是points1到points2的变换,也就是
R
21
R_{21}
R21 ,
t
21
t_{21}
t21
2.该函数求解出来的
R
21
R_{21}
R21 ,
t
21
t_{21}
t21,已经是最合适已经通过内部的代码去掉了另外三种错误的解
3. cv::recoverPose()中points1和points2的输入顺序,必须也要和求本质矩阵时对函数cv::findEssentialMat()输入的顺序相同。
4. 使用方法,可以直接包含对应的头文件,也可以直接将函数的内部实现拷贝也可以自己实现(vins),如下:文章来源地址https://www.toymoban.com/news/detail-671257.html
int recoverPose( InputArray E, InputArray _points1, InputArray _points2, InputArray _cameraMatrix,
OutputArray _R, OutputArray _t, InputOutputArray _mask)
{
Mat points1, points2, cameraMatrix;
_points1.getMat().convertTo(points1, CV_64F);
_points2.getMat().convertTo(points2, CV_64F);
_cameraMatrix.getMat().convertTo(cameraMatrix, CV_64F);
int npoints = points1.checkVector(2);
CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
points1.type() == points2.type());
CV_Assert(cameraMatrix.rows == 3 && cameraMatrix.cols == 3 && cameraMatrix.channels() == 1);
if (points1.channels() > 1)
{
points1 = points1.reshape(1, npoints);
points2 = points2.reshape(1, npoints);
}
double fx = cameraMatrix.at<double>(0,0);
double fy = cameraMatrix.at<double>(1,1);
double cx = cameraMatrix.at<double>(0,2);
double cy = cameraMatrix.at<double>(1,2);
points1.col(0) = (points1.col(0) - cx) / fx;
points2.col(0) = (points2.col(0) - cx) / fx;
points1.col(1) = (points1.col(1) - cy) / fy;
points2.col(1) = (points2.col(1) - cy) / fy;
points1 = points1.t();
points2 = points2.t();
Mat R1, R2, t;
decomposeEssentialMat(E, R1, R2, t);
Mat P0 = Mat::eye(3, 4, R1.type());
Mat P1(3, 4, R1.type()), P2(3, 4, R1.type()), P3(3, 4, R1.type()), P4(3, 4, R1.type());
P1(Range::all(), Range(0, 3)) = R1 * 1.0; P1.col(3) = t * 1.0;
P2(Range::all(), Range(0, 3)) = R2 * 1.0; P2.col(3) = t * 1.0;
P3(Range::all(), Range(0, 3)) = R1 * 1.0; P3.col(3) = -t * 1.0;
P4(Range::all(), Range(0, 3)) = R2 * 1.0; P4.col(3) = -t * 1.0;
// Do the cheirality check.
// Notice here a threshold dist is used to filter
// out far away points (i.e. infinite points) since
// there depth may vary between postive and negtive.
double dist = 50.0;
Mat Q;
triangulatePoints(P0, P1, points1, points2, Q);
Mat mask1 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask1 = (Q.row(2) < dist) & mask1;
Q = P1 * Q;
mask1 = (Q.row(2) > 0) & mask1;
mask1 = (Q.row(2) < dist) & mask1;
triangulatePoints(P0, P2, points1, points2, Q);
Mat mask2 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask2 = (Q.row(2) < dist) & mask2;
Q = P2 * Q;
mask2 = (Q.row(2) > 0) & mask2;
mask2 = (Q.row(2) < dist) & mask2;
triangulatePoints(P0, P3, points1, points2, Q);
Mat mask3 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask3 = (Q.row(2) < dist) & mask3;
Q = P3 * Q;
mask3 = (Q.row(2) > 0) & mask3;
mask3 = (Q.row(2) < dist) & mask3;
triangulatePoints(P0, P4, points1, points2, Q);
Mat mask4 = Q.row(2).mul(Q.row(3)) > 0;
Q.row(0) /= Q.row(3);
Q.row(1) /= Q.row(3);
Q.row(2) /= Q.row(3);
Q.row(3) /= Q.row(3);
mask4 = (Q.row(2) < dist) & mask4;
Q = P4 * Q;
mask4 = (Q.row(2) > 0) & mask4;
mask4 = (Q.row(2) < dist) & mask4;
mask1 = mask1.t();
mask2 = mask2.t();
mask3 = mask3.t();
mask4 = mask4.t();
// If _mask is given, then use it to filter outliers.
if (!_mask.empty())
{
Mat mask = _mask.getMat();
CV_Assert(mask.size() == mask1.size());
bitwise_and(mask, mask1, mask1);
bitwise_and(mask, mask2, mask2);
bitwise_and(mask, mask3, mask3);
bitwise_and(mask, mask4, mask4);
}
if (_mask.empty() && _mask.needed())
{
_mask.create(mask1.size(), CV_8U);
}
CV_Assert(_R.needed() && _t.needed());
_R.create(3, 3, R1.type());
_t.create(3, 1, t.type());
int good1 = countNonZero(mask1);
int good2 = countNonZero(mask2);
int good3 = countNonZero(mask3);
int good4 = countNonZero(mask4);
if (good1 >= good2 && good1 >= good3 && good1 >= good4)
{
R1.copyTo(_R);
t.copyTo(_t);
if (_mask.needed()) mask1.copyTo(_mask);
return good1;
}
else if (good2 >= good1 && good2 >= good3 && good2 >= good4)
{
R2.copyTo(_R);
t.copyTo(_t);
if (_mask.needed()) mask2.copyTo(_mask);
return good2;
}
else if (good3 >= good1 && good3 >= good2 && good3 >= good4)
{
t = -t;
R1.copyTo(_R);
t.copyTo(_t);
if (_mask.needed()) mask3.copyTo(_mask);
return good3;
}
else
{
t = -t;
R2.copyTo(_R);
t.copyTo(_t);
if (_mask.needed()) mask4.copyTo(_mask);
return good4;
}
}
int recoverPose( InputArray E, InputArray _points1, InputArray _points2, OutputArray _R,
OutputArray _t, double focal, Point2d pp, InputOutputArray _mask)
{
Mat cameraMatrix = (Mat_<double>(3,3) << focal, 0, pp.x, 0, focal, pp.y, 0, 0, 1);
return cv::recoverPose(E, _points1, _points2, cameraMatrix, _R, _t, _mask);
}
}
到了这里,关于【Opencv】三维重建之cv::recoverPose()函数(1)的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!