根据激光三角测量法原理写的线结构光传感器的自动标定程序,之前借助matlab和自己写的几个程序完成的步骤比较复杂,由于cpp容易部署及应用到项目中以及我懒,所以花了一个下午加晚上整了这么一个程序。
标定板使用的是棋盘格标定板,标定线结构光平面方程之前需要先进行相机标定,拍摄的图像如下图所示。
需要按下面要求拍摄30张图像并存入./image文件夹:
图1-9为在平面a上标定板贴合平面平移的图像;
图9-13为在平面a上标定板店起一定高度的不同位姿图像;
图14为在平面a上激光条图像;
图15为在平面a上激光条照射到物体上的图像;
图16为在平面b上激光条图像;
图17为在平面b上激光条照射到物体上的图像;
图18-26为在平面b上标定板贴合平面平移的图像;
图27-30为在平面b上标定板店起一定高度的不同位姿图像。
注意事项:
标定板大小选择为图像高度的1/3到1/2为宜;
平面a和b应尽量平整,并尽量和相机z轴垂直;
确保每幅标定板图像在相机视野范围内并且成像清晰;
标定板垫起时最好不要倾斜角度过大;
线结构光平面标定过程中或者完成后若相机、激光器的相对位置改变
或者传感器受到剧烈碰撞时应重新标定。
上述内容在程序内也有注释。
依赖库为OpenCV和Eigen。
程序如下:
global_head.h 全局头文件
#pragma once
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <iomanip>
#include <opencv2/opencv.hpp>
#include <Eigen/Dense>
#include <Eigen/Eigenvalues>
camera_calibration.h 相机标定
#include "global_head.h"
#define BOARD_SCALE 2 //棋盘格边长(mm)
#define BOARD_HEIGHT 8 //棋盘格高度方向角点个数
#define BOARD_WIDTH 8 //棋盘格宽度方向角点个数
/**
* @brief CamraCalibration 相机标定
* @param files 文件名
* @param cameraMatrix 内参矩阵
* @param distCoeffs 畸变系数
* @param tvecsMat 平移矩阵
* @param rvecsMat 旋转矩阵
*/
void CamraCalibration(std::vector<std::string>& files, cv::Mat& cameraMatrix, cv::Mat& distCoeffs,
std::vector<cv::Mat>& tvecsMat, std::vector<cv::Mat>& rvecsMat)
{
//读取每一幅图像,从中提取出角点,然后对角点进行亚像素精确化
int image_count = 0; /* 图像数量 */
cv::Size image_size; /* 图像的尺寸 */
cv::Size board_size = cv::Size(BOARD_HEIGHT, BOARD_WIDTH); /* 标定板上每行、列的角点数 */
std::vector<cv::Point2f> image_points_buf; /* 缓存每幅图像上检测到的角点 */
std::vector<std::vector<cv::Point2f>> image_points_seq; /* 保存检测到的所有角点 */
for (int i = 0; i < files.size(); i++)
{
cv::Mat imageInput = cv::imread(files[i]);
/* 提取角点 */
if (0 == findChessboardCorners(imageInput, board_size, image_points_buf))
{
std::cout << "can not find chessboard corners!\n"; //找不到角点
continue;
}
else
{
//找到一幅有效的图片
image_count++;
if (image_count == 1) //读入第一张图片时获取图像宽高信息
{
image_size.width = imageInput.cols;
image_size.height = imageInput.rows;
}
cv::Mat view_gray;
cvtColor(imageInput, view_gray, cv::COLOR_RGB2GRAY);
/* 亚像素精确化 */
cornerSubPix(view_gray, image_points_buf, cv::Size(5, 5), cv::Size(-1, -1),
cv::TermCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 30, 0.1));
image_points_seq.push_back(image_points_buf); //保存亚像素角点
}
}
int total = image_points_seq.size();
std::cout << "共使用了" << total << "幅图片" << std::endl;
std::cout << "角点提取完成!\n";
/*棋盘三维信息*/
cv::Size square_size = cv::Size(BOARD_SCALE, BOARD_SCALE); /* 实际测量得到的标定板上每个棋盘格的大小 */
std::vector<std::vector<cv::Point3f>> object_points; /* 保存标定板上角点的三维坐标 */
cameraMatrix = cv::Mat(3, 3, CV_32FC1, cv::Scalar::all(0)); /* 摄像机内参数矩阵 */
std::vector<int> point_counts; // 每幅图像中角点的数量
distCoeffs = cv::Mat(1, 4, CV_32FC1, cv::Scalar::all(0)); /* 摄像机的畸变系数:k1,k2,p1,p2 */
/* 初始化标定板上角点的三维坐标 */
int i, j, t;
for (t = 0; t < image_count; t++)
{
std::vector<cv::Point3f> tempPointSet;
for (i = 0; i < board_size.height; i++)
{
for (j = 0; j < board_size.width; j++)
{
cv::Point3f realPoint;
/* 假设标定板放在世界坐标系中z=0的平面上 */
realPoint.x = i * square_size.width;
realPoint.y = j * square_size.height;
realPoint.z = 0;
tempPointSet.push_back(realPoint);
}
}
object_points.push_back(tempPointSet);
}
/* 初始化每幅图像中的角点数量,假定每幅图像中都可以看到完整的标定板 */
for (i = 0; i < image_count; i++)
{
point_counts.push_back(board_size.width * board_size.height);
}
/* 开始标定 */
cv::calibrateCamera(object_points, image_points_seq, image_size, cameraMatrix, distCoeffs, rvecsMat, tvecsMat, cv::CALIB_FIX_K3);
std::cout << "标定完成!\n" << std::endl;
//对标定结果进行评价
std::ofstream fout("caliberation_result.txt"); /* 保存标定结果的文件 */
double total_err = 0.0; /* 所有图像的平均误差的总和 */
double err = 0.0; /* 每幅图像的平均误差 */
std::vector<cv::Point2f> image_points2; /* 保存重新计算得到的投影点 */
std::cout << "每幅图像的标定误差:\n";
fout << "每幅图像的标定误差:\n";
for (i = 0; i < image_count; i++)
{
std::vector<cv::Point3f> tempPointSet = object_points[i];
/* 通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点 */
projectPoints(tempPointSet, rvecsMat[i], tvecsMat[i], cameraMatrix, distCoeffs, image_points2);
/* 计算新的投影点和旧的投影点之间的误差*/
std::vector<cv::Point2f> tempImagePoint = image_points_seq[i];
cv::Mat tempImagePointMat = cv::Mat(1, tempImagePoint.size(), CV_32FC2);
cv::Mat image_points2Mat = cv::Mat(1, image_points2.size(), CV_32FC2);
for (int j = 0; j < tempImagePoint.size(); j++)
{
image_points2Mat.at<cv::Vec2f>(0, j) = cv::Vec2f(image_points2[j].x, image_points2[j].y);
tempImagePointMat.at<cv::Vec2f>(0, j) = cv::Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
}
err = norm(image_points2Mat, tempImagePointMat, cv::NORM_L2);
total_err += err /= point_counts[i];
std::cout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << std::endl;
fout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << std::endl;
}
std::cout << "总体平均误差:" << total_err / image_count << "像素\n" << std::endl;
fout << "总体平均误差:" << total_err / image_count << "像素" << std::endl << std::endl;
//保存定标结果
cv::Mat rotation_matrix = cv::Mat(3, 3, CV_32FC1, cv::Scalar::all(0)); /* 保存每幅图像的旋转矩阵 */
std::cout << "相机内参数矩阵:" << std::endl;
std::cout << cameraMatrix << std::endl << std::endl;
std::cout << "畸变系数:" << std::endl;
std::cout << distCoeffs << std::endl << std::endl;
fout << "相机内参数矩阵:" << std::endl;
fout << cameraMatrix << std::endl << std::endl;
fout << "畸变系数:" << std::endl;
fout << distCoeffs << std::endl << std::endl;
for (int i = 0; i < image_count; i++)
{
fout << "第" << i + 1 << "幅图像的平移向量:" << std::endl;
fout << tvecsMat[i].t() << std::endl;
/* 将旋转向量转换为相对应的旋转矩阵 */
Rodrigues(rvecsMat[i], rotation_matrix);
fout << "第" << i + 1 << "幅图像的旋转矩阵:" << std::endl;
fout << rotation_matrix << std::endl << std::endl;
}
fout << std::endl;
}
cenline_extraction.h 线结构光中心线提取
#include "global_head.h"
/**
* @brief GetImage 取得原图
* @param img_src 图像名称
* @param src_image 原始图像
*/
void GetImage(std::string img_src, cv::Mat& src_image)
{
src_image = cv::imread(img_src, 0);
}
/**
* @brief Correction 畸变校正
* @param src_image 原始图像
* @param dst_image 结果图像
* @param cameraMatrix 内参矩阵
* @param distCoeffs 畸变系数
*/
void Correction(cv::Mat& src_image, cv::Mat& dst_image, cv::Mat& cameraMatrix, cv::Mat& distCoeffs)
{
//distCoeffs.at<double>(0, 4) = 0;
undistort(src_image, dst_image, cameraMatrix, distCoeffs);
}
/**
* @brief RemoveSmallRegion 去除面积小于给定值的连通域
* @param InputImage 输入图像
* @param OutputImage 输出图像
* @param pixel 像素值
*/
void RemoveSmallRegion(cv::Mat& InputImage, cv::Mat& OutputImage, int pixel)
{
InputImage.copyTo(OutputImage);
std::vector<cv::Vec4i> hierarchy;
std::vector<std::vector<cv::Point>> contours;
cv::findContours(InputImage, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
//计算每个轮廓的面积
double temp = 0;
for (int i = 0; i<contours.size(); ++i)
{
temp = fabs(cv::contourArea(contours[i]));
}
//去除小轮廓,保留大轮廓
for (int idx = 0; idx >= 0; idx = hierarchy[idx][0])
{
if (fabs(cv::contourArea(contours[idx])) < pixel)
{
cv::drawContours(OutputImage, contours, idx, cv::Scalar(0), cv::FILLED, 8, hierarchy);
}
}
}
/**
* @brief GrayCenter 灰度重心法提取中心线
* @param InputImage 输入图像
* @param Pt 中心点像素坐标
* @param bounding_rect 连通域外接矩形
* @param threshold 灰度阈值
*/
void GrayCenter(cv::Mat& InputImage, std::vector<cv::Point2d>& Pt, cv::Rect bounding_rect, int threshold)
{
std::vector<cv::Point2d> P;
for (int i = bounding_rect.x; i < bounding_rect.x + bounding_rect.width; ++i)
{
int sum = 0; //每列灰度值的和
double y = 0; //每列中心点纵坐标
for (int j = bounding_rect.y; j < bounding_rect.y + bounding_rect.height; ++j)
{
int s = InputImage.at<uchar>(j, i);
if (s)
{
sum += s;
y += j*s;
}
}
if (sum)
{
y /= sum;
if (InputImage.at<uchar>(y, i) > 0)
{
P.emplace_back(cv::Point2d(i, y));
}
}
}
//对中心线上的点进行平滑滤波
if (P.size() >= 3)
{
for (size_t i = 1; i < P.size() - 1; ++i)
{
P[i].y = (P[i - 1].y + P[i].y + P[i + 1].y) / 3;
}
}
//计算中心线上点的平均灰度值
if (P.size() > 0)
{
int avg_scalar = 0;
for (size_t i = 0; i < P.size(); ++i)
{
avg_scalar += InputImage.at<uchar>(round(P[i].y), round(P[i].x));
}
avg_scalar /= P.size();
if (avg_scalar < threshold) P.clear();
}
//去除中心线上灰度值过低的点
for (size_t i = 0; i < P.size(); ++i)
{
if (P[i].x >= 0 && P[i].x < InputImage.cols && P[i].y >= 0 && P[i].y < InputImage.rows)
{
if (InputImage.at<uchar>(round(P[i].y), round(P[i].x)) > threshold)
{
Pt.emplace_back(P[i]);
}
}
}
}
/**
* @brief PointSortRule 对点的坐标按x从小到大排序,若x值相同则按y从小到大排序
* @param pt1 输入点1
* @param pt2 输入点2
*/
bool PointSortRule(const cv::Point2d pt1, const cv::Point2d pt2)
{
if (pt1.x != pt2.x)
return pt1.x < pt2.x;
else
return pt1.y < pt2.y;
}
/**
* @brief CenterLine 亚像素级激光条纹中心线提取
* @param n 图像序号
* @param correct_image 校正图像
* @param P 中心点坐标值
*/
void CenterLine(int n, cv::Mat& correct_image, std::vector<cv::Point2d>& P)
{
cv::Mat dst_image = correct_image.clone();
cv::cvtColor(dst_image, dst_image, cv::COLOR_GRAY2RGB);
cv::Mat img1 = correct_image.clone(), img2;
GaussianBlur(img1, img1, cv::Size(0, 0), 1.1, 1.1); //高斯滤波
//求每列灰度值最大值
uchar *p = img1.data;
std::vector<int> max_col_scalar(img1.cols);
for (int i = 0; i < img1.cols; ++i)
{
for (int j = 0; j < img1.rows; ++j)
{
if (*(p + i + img1.cols * j) > max_col_scalar[i])
{
max_col_scalar[i] = *(p + i + img1.cols * j);
}
}
}
//按列阈值操作
p = img1.data;
for (int i = 0; i < img1.cols; ++i)
{
int threshold = std::max(max_col_scalar[i] - 20, 100);
for (int j = 0; j < img1.rows; ++j)
{
if (*(p + i + img1.cols * j) < threshold)
*(p + i + img1.cols * j) = 0;
}
}
RemoveSmallRegion(img1, img2, 10); //面积滤波
std::vector<cv::Vec4i> hierarchy;
std::vector<std::vector<cv::Point>> contours; //连通域轮廓
findContours(img2, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_NONE);
std::vector<cv::Rect> bounding_rect; //储存连通域轮廓外接矩形
for (size_t i = 0; i < contours.size(); ++i)
{
bounding_rect.emplace_back(cv::boundingRect(cv::Mat(contours[i])));
}
P.clear();
for (size_t i = 0; i < contours.size(); ++i)
{
cv::Mat img3 = img2.clone();
cv::drawContours(img3, contours, i, cv::Scalar(0), cv::FILLED, 8, hierarchy); //轮廓用黑色填充,相当于去除连通域
cv::Mat img4 = img2 - img3;
GrayCenter(img4, P, bounding_rect[i], 50); //灰度重心法提取中心线
img2 = img3;
}
std::sort(P.begin(), P.end(), PointSortRule);
std::ofstream fout(std::to_string(n) + "_2d.txt");
for (int i = 0; i < P.size(); i++)
{
fout << P[i].x << " " << P[i].y << std::endl;
cv::circle(dst_image, cv::Point(round(P[i].x), round(P[i].y)), 0.5, cv::Scalar(0, 0, 255), -1); //画出中心线
}
fout.close();
cv::imwrite(std::to_string(n) + ".bmp", dst_image);
}
/**
* @brief Point2dSperate 分离坐标点
* @param P 中心点坐标值
* @param P_plane 平面上的中心点
* @param P_object 物体上的中心点
*/
void Point2dSperate(std::vector<cv::Point2d>& P, std::vector<cv::Point2d>& P_plane, std::vector<cv::Point2d>& P_object)
{
P_plane.clear();
P_object.clear();
int object_begin, object_end;
for (int i = 1; i < P.size(); ++i)
{
if (P[i - 1].y - P[i].y > 10)
object_begin = i;
if (P[i].y - P[i - 1].y > 10)
object_end = i;
}
for (int i = 0; i < object_begin; ++i)
P_plane.push_back(P[i]);
for (int i = object_end; i < P.size(); ++i)
P_plane.push_back(P[i]);
for (int i = object_begin; i < object_end; ++i)
P_object.push_back(P[i]);
}
plane_fitness.h 平面拟合
#include "global_head.h"
/**
* @brief findPlane 最小二乘法拟合平面
* @param pts 输入三维点集
* @return 平面方程系数
*/
std::vector<double> findPlane(std::vector<cv::Point3d>& pts)
{
//最小二乘法
double A, B, C, D;
std::vector<double> parameters;
double meanX = 0, meanY = 0, meanZ = 0;
double meanXX = 0, meanYY = 0, meanZZ = 0;
double meanXY = 0, meanXZ = 0, meanYZ = 0;
for (int i = 0; i < pts.size(); ++i)
{
meanX += pts[i].x;
meanY += pts[i].y;
meanZ += pts[i].z;
meanXX += pts[i].x * pts[i].x;
meanYY += pts[i].y * pts[i].y;
meanZZ += pts[i].z * pts[i].z;
meanXY += pts[i].x * pts[i].y;
meanXZ += pts[i].x * pts[i].z;
meanYZ += pts[i].y * pts[i].z;
}
meanX /= pts.size();
meanY /= pts.size();
meanZ /= pts.size();
meanXX /= pts.size();
meanYY /= pts.size();
meanZZ /= pts.size();
meanXY /= pts.size();
meanXZ /= pts.size();
meanYZ /= pts.size();
/* eigenvector */
Eigen::Matrix3d eMat;
eMat(0, 0) = meanXX - meanX * meanX; eMat(0, 1) = meanXY - meanX * meanY; eMat(0, 2) = meanXZ - meanX * meanZ;
eMat(1, 0) = meanXY - meanX * meanY; eMat(1, 1) = meanYY - meanY * meanY; eMat(1, 2) = meanYZ - meanY * meanZ;
eMat(2, 0) = meanXZ - meanX * meanZ; eMat(2, 1) = meanYZ - meanY * meanZ; eMat(2, 2) = meanZZ - meanZ * meanZ;
Eigen::EigenSolver<Eigen::Matrix3d> xjMat(eMat);
Eigen::Matrix3d eValue = xjMat.pseudoEigenvalueMatrix();
Eigen::Matrix3d eVector = xjMat.pseudoEigenvectors();
/* the eigenvector corresponding to the minimum eigenvalue */
double v1 = eValue(0, 0);
double v2 = eValue(1, 1);
double v3 = eValue(2, 2);
int minNumber = 0;
if ((abs(v2) <= abs(v1)) && (abs(v2) <= abs(v3)))
{
minNumber = 1;
}
if ((abs(v3) <= abs(v1)) && (abs(v3) <= abs(v2)))
{
minNumber = 2;
}
A = eVector(0, minNumber);
B = eVector(1, minNumber);
C = eVector(2, minNumber);
D = -(A * meanX + B * meanY + C * meanZ);
/* result */
if (C < 0)
{
A *= -1.0;
B *= -1.0;
C *= -1.0;
D *= -1.0;
}
parameters.push_back(- A / D);
parameters.push_back(- B / D);
parameters.push_back(- C / D);
return parameters;
}
point2dto3d.h 坐标转换
#include "global_head.h"
/**
* @brief Point2dto3d 二维像素坐标转三维图像坐标
* @param n 图像序号
* @param plane 平面方程系数
* @param cameraMatrix 内参矩阵
* @param distCoeffs 畸变系数
* @param Pt2ds 二维点集
* @param Pt3ds 三维点集
*/
void Point2dto3d(int n, std::vector<double> plane, cv::Mat& cameraMatrix, cv::Mat& distCoeffs,
std::vector<cv::Point2d>& Pt2ds, std::vector<cv::Point3d>& Pt3ds)
{
//Pt3ds.clear();
double a = plane[0], b = plane[1], c = plane[2];
double u0 = cameraMatrix.at<double>(0, 2), v0 = cameraMatrix.at<double>(1, 2); //相机主点
double fx = cameraMatrix.at<double>(0, 0), fy = cameraMatrix.at<double>(1, 1); //尺度因子
std::ofstream fout(std::to_string(n) + "_3d.txt");
for (int i = 0; i < Pt2ds.size(); ++i)
{
double u = Pt2ds[i].x, v = Pt2ds[i].y;
double x1 = (double)((u - u0) / fx), y1 = (double)((v - v0) / fy);
cv::Point3d pt;
pt.z = (double)(1 / (a*x1 + b*y1 + c));
pt.x = x1*pt.z;
pt.y = y1*pt.z;
Pt3ds.push_back(pt);
fout << std::setprecision(16) << pt.x << " " << pt.y << " " << pt.z << std::endl;
}
fout.close();
}
/**
* @brief PointtoPlaneEvaluation 点到面精度评价
* @param Pt3ds 三维点集
* @param plane 平面方程系数
*/
void PointtoPlaneEvaluation(const std::vector<cv::Point3d>& Pt3ds, std::vector<double> plane)
{
double a = plane[0], b = plane[1], c = plane[2];
std::vector<double> distance;
double distance_mean = 0;
for (int i = 0; i < Pt3ds.size(); ++i)
{
double dis = abs(a*Pt3ds[i].x + b*Pt3ds[i].y + c*Pt3ds[i].z - 1) / sqrt(a*a + b*b + c*c);
distance.push_back(dis);
distance_mean += dis;
}
distance_mean /= Pt3ds.size();
std::cout << "\n距离平均值:"<< distance_mean << std::endl;
double sigma = 0;
for (int i = 0; i < Pt3ds.size(); ++i)
{
sigma += (distance[i] - distance_mean)*(distance[i] - distance_mean);
}
sigma /= Pt3ds.size();
std::cout << "距离标准差:"<< sqrt(sigma) << std::endl;
}
main.cpp 主程序
/*该程序实现的功能为线结构光平面方程的标定以及精度验证。
需要按下面要求拍摄30张图像并存入./image文件夹:
图1-9为在平面a上标定板贴合平面平移的图像;
图9-13为在平面a上标定板店起一定高度的不同位姿图像;
图14为在平面a上激光条图像;
图15为在平面a上激光条照射到物体上的图像;
图16为在平面b上激光条图像;
图17为在平面b上激光条照射到物体上的图像;
图18-26为在平面b上标定板贴合平面平移的图像;
图27-30为在平面b上标定板店起一定高度的不同位姿图像。
注意事项:
标定板大小选择为图像高度的1/3到1/2为宜;
平面a和b应尽量平整,并尽量和相机z轴垂直;
确保每幅标定板图像在相机视野范围内并且成像清晰;
标定板垫起时最好不要倾斜角度过大;
线结构光平面标定过程中或者完成后若相机、激光器的相对位置改变
或者传感器受到剧烈碰撞时应重新标定。
*/
#include "camera_calibration.h"
#include "cenline_extraction.h"
#include "plane_fitness.h"
#include "point2dto3d.h"
int main(int argc, char* argv[])
{
//相机标定//
std::vector<std::string> files;
std::vector<cv::Mat> tvecsMat, rvecsMat;
cv::Mat cameraMatrix, distCoeffs;
for (int i = 1; i <= 13; ++i)
files.push_back("images//" + std::to_string(i) + ".bmp");
for (int i = 18; i <= 30; ++i)
files.push_back("images//" + std::to_string(i) + ".bmp");
CamraCalibration(files, cameraMatrix, distCoeffs, tvecsMat, rvecsMat);
//光平面标定//
double t = 2.8; //标定板的厚度(mm)
std::vector<cv::Point3d> tranMat1, tranMat2;
for (int i = 0; i < 9; ++i)
tranMat1.push_back(cv::Point3d(tvecsMat[i].at<double>(0, 0), tvecsMat[i].at<double>(0, 1), tvecsMat[i].at<double>(0, 2) + t));
for (int i = 13; i < 22; ++i)
tranMat2.push_back(cv::Point3d(tvecsMat[i].at<double>(0, 0), tvecsMat[i].at<double>(0, 1), tvecsMat[i].at<double>(0, 2) + t));
std::vector<double> plane1 = findPlane(tranMat1), plane2 = findPlane(tranMat2);
//std::cout << plane1[0] << " " << plane1[1] << " " << plane1[2]<< std::endl << plane2[0] << " " << plane2[1] << " " << plane2[2] << std::endl;
int i = 14;
std::string filename;
cv::Mat image, image_corrected;
std::vector<cv::Point2d> Pts2d;
std::vector<cv::Point3d> Pts3d;
filename = "images//" + std::to_string(i) + ".bmp";
GetImage(filename, image);
Correction(image, image_corrected, cameraMatrix, distCoeffs);
CenterLine(i, image_corrected, Pts2d);
Point2dto3d(i, plane1, cameraMatrix, distCoeffs, Pts2d, Pts3d);
i = 16;
filename = "images//" + std::to_string(i) + ".bmp";
GetImage(filename, image);
Correction(image, image_corrected, cameraMatrix, distCoeffs);
CenterLine(i, image_corrected, Pts2d);
Point2dto3d(i, plane2, cameraMatrix, distCoeffs, Pts2d, Pts3d);
std::vector<double> light_plane = findPlane(Pts3d);
std::cout << "光平面方程系数:\n" << light_plane[0] << " " << light_plane[1] << " " << light_plane[2] << std::endl;
std::vector<cv::Point2d> Pts2d_plane, Pts2d_object;
//光平面精度评价//
//不具有普遍通用性,可以删除下面这段。
i = 15;
filename = "images//" + std::to_string(i) + ".bmp";
GetImage(filename, image);
Correction(image, image_corrected, cameraMatrix, distCoeffs);
CenterLine(i, image_corrected, Pts2d);
Point2dSperate(Pts2d, Pts2d_plane, Pts2d_object);
Pts3d.clear();
Point2dto3d(i, light_plane, cameraMatrix, distCoeffs, Pts2d_plane, Pts3d);
PointtoPlaneEvaluation(Pts3d, plane1);
Pts3d.clear();
Point2dto3d(i, light_plane, cameraMatrix, distCoeffs, Pts2d_object, Pts3d);
PointtoPlaneEvaluation(Pts3d, plane1);
system("pause");
return EXIT_SUCCESS;
}
工程文件:链接: https://pan.baidu.com/s/1EA-luW8ZZ1cvmi6H3ZIfTQ?pwd=3ktz 提取码: 3ktz 复制这段内容后打开百度网盘手机App,操作更方便哦
–来自百度网盘超级会员v3的分享
参考文献:[1]王作山. 基于激光结构光视觉引导的焊缝跟踪技术研究[D].山东大学,2019.
版权声明:本文为CSDN博主「给算法爸爸上香」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/taifyang/article/details/122639344
暂无评论