0. 简介

作为自动驾驶行业最头疼的问题之一,外参标定一直以来受到广泛的关注,尤其是最常使用的激光雷达与相机的外参标定。之前在文章:3D雷达与相机的标定方法详细教程多传感器融合感知 —传感器外参标定及在线标定学习均有提及,但是作者在调研时候发现IROS 2022年的一篇论文也讲述了一种新型的自动标定方案《Joint Camera Intrinsic and LiDAR-Camera Extrinsic Calibration》。下面我们来详细的分析一下。

1. 常见的联合标定方法

常见的摄像头-激光雷达标定方法主要分为两大类,一种是基于目标的联合标定,还有就是基于无目标的联合标定。

基于目标联合标定:这里所说的目标是指人为设计的参照物,比如棋盘格等,主要的算法是通过检测图像和激光雷达的角点,设计重投影的误差函数来完成外参的获取。

无目标联合标定:一般适用于动态标定中,依据车身周围的环境作为参照,例如匹配车道线等来完成联合标定。

本文作为一种基于联合目标的标定方法,其优点在于精度高,如果需要学习无目标联合标定的同学可以来看一下https://github.com/lovelyyoshino/SensorsCalibration这个项目。

2. 代码详解

文中做了一系列的工作用于实现激光与雷达的外参标定。首先文中提出了一种全新的标定板,用于联合标定任务,在这个标定板下可以完成相机内参,畸变因子,LiDAR-Camera外参的联合标定。设计一套数据采集,标定数据处理方案。注释版请参考链接:https://github.com/lovelyyoshino/JointCalib

首先通过检测标定板,定位角点和圆孔中心点,通过最小化3d-2d点对的重投影误差(点对源自标定板的圆孔中心和棋盘格的角点。)构建摄像头-激光雷达的非线性优化函数

// 输出rvecs :旋转向量,tvecs :位移向量
  double re_error =
      cv::calibrateCamera(_boards_pts_3d, _imgs_pts, image_size, camera_matrix,
                          k, rvecsMat, tvecsMat, CV_CALIB_FIX_PRINCIPAL_POINT); // 从make_board_points拿到的点,来完成重投影误差
  std::cout << "reprojection is " << re_error << std::endl;

  Eigen::Matrix3d camera_intrinsic; // 内参矩阵
  camera_intrinsic << camera_matrix.at<double>(0, 0),
      camera_matrix.at<double>(1, 0), camera_matrix.at<double>(2, 0),
      camera_matrix.at<double>(0, 1), camera_matrix.at<double>(1, 1),
      camera_matrix.at<double>(2, 1), camera_matrix.at<double>(0, 2),
      camera_matrix.at<double>(1, 2), camera_matrix.at<double>(2, 2);

  Eigen::VectorXd distort(2);
  distort << k.at<double>(0, 0), k.at<double>(0, 1); //畸变矩阵

  std::vector<Eigen::MatrixXd> vec_extrinsics;
  for (size_t i = 0; i < rvecsMat.size(); i++)
  {
    cv::Mat rvec = rvecsMat[i];
    cv::Mat tvec = tvecsMat[i];
    cv::Mat rot;
    cv::Rodrigues(rvec, rot); // 旋转矩阵
    std::vector<cv::Point2f> imgpoints_cir;
    cv::projectPoints(_boards_pts_cir[i], rvecsMat[i], tvecsMat[i],
                      camera_matrix, k, imgpoints_cir); // _boards_pts_cir三维点投影到图像平面
    size_t y_min = imgpoints_cir[0].y;
    size_t y_max = imgpoints_cir[0].y;
    size_t x_min = imgpoints_cir[0].x;
    size_t x_max = imgpoints_cir[0].x;
    for (size_t i = 1; i < 4; i++)
    {
      if (imgpoints_cir[i].y > y_max) // 找到图像平面上的最大和最小y值
        y_max = imgpoints_cir[i].y;
      if (imgpoints_cir[i].y < y_min)
        y_min = imgpoints_cir[i].y;
      if (imgpoints_cir[i].x > x_max) // 找到图像平面上的最大和最小x值
        x_max = imgpoints_cir[i].x;
      if (imgpoints_cir[i].x < x_min)
        x_min = imgpoints_cir[i].x;
    }
    std::vector<cv::Point2f> imgpoints_cir1; // scl
    // 找到对应的四个圆心点
    for (size_t i = 0; i < 4; i++)
    {
      if ((imgpoints_cir[i].x - x_min) <= 50 &&
          (imgpoints_cir[i].y - y_min) <= 50)
        imgpoints_cir1.push_back(imgpoints_cir[i]);
    }
    for (size_t i = 0; i < 4; i++)
    {
      if ((x_max - imgpoints_cir[i].x) <= 50 &&
          (imgpoints_cir[i].y - y_min) <= 50)
        imgpoints_cir1.push_back(imgpoints_cir[i]);
    }
    for (size_t i = 0; i < 4; i++)
    {
      if ((imgpoints_cir[i].x - x_min) <= 50 &&
          (y_max - imgpoints_cir[i].y) <= 50)
        imgpoints_cir1.push_back(imgpoints_cir[i]);
    }
    for (size_t i = 0; i < 4; i++)
    {
      if ((x_max - imgpoints_cir[i].x) <= 50 &&
          (y_max - imgpoints_cir[i].y) <= 50)
        imgpoints_cir1.push_back(imgpoints_cir[i]);
    }
    if (imgpoints_cir1.size() != 4)
    {
      std::cout << "imgpoints_cir1.size() must = 4" << std::endl; // 如果投影点不是四个,则报错
      return;
    }
    _imgs_pts_cir2D_true.push_back(imgpoints_cir1); // 将四个圆心点保存到_imgs_pts_cir2D_true中
    Eigen::Vector3d r0, r1, r2, t;
    //得到的旋转量以及平移量
    r0 << rot.at<double>(0, 0), rot.at<double>(1, 0), rot.at<double>(2, 0);
    r1 << rot.at<double>(0, 1), rot.at<double>(1, 1), rot.at<double>(2, 1);
    r2 << rot.at<double>(0, 2), rot.at<double>(1, 2), rot.at<double>(2, 2);
    t << tvec.at<double>(0, 0), tvec.at<double>(0, 1), tvec.at<double>(0, 2);

    Eigen::MatrixXd RT(3, 4);
    RT.block<3, 1>(0, 0) = r0;
    RT.block<3, 1>(0, 1) = r1;
    RT.block<3, 1>(0, 2) = r2;
    RT.block<3, 1>(0, 3) = t;
    // 将RT外参+矩阵保存到vec_extrinsics中
    vec_extrinsics.push_back(RT);
  }

对于图像使用opencv函数对标定板进行检测,对于点云,先建立一个标定板的mask,通过 RANSAC拟合mask的大致的标定板区域。然后再运用网格搜索算法精确拟合圆孔四个点。

 for (size_t i = 0; i < _imgs_pts_cir2D_true.size(); i++)
  {
    cv::Mat img = available_imgs[i].clone();
    cv::Mat undistort_img = img;
    std::vector<cv::Point2f> left_top_src, right_top_src, left_bottom_src,
        right_bottom_src;

    LidarPointPair lidar_point_pair;
    lidar_point_pair.img_index = i;
    // 2D点与3D点的对应关系
    //  left top
    lidar_point_pair.lidar_2d_point[0] = _imgs_pts_cir2D_true[i][0];
    lidar_point_pair.lidar_3d_point[0] = lidar_3d_pts_[i][0];
    // right top
    lidar_point_pair.lidar_2d_point[1] = _imgs_pts_cir2D_true[i][1];
    lidar_point_pair.lidar_3d_point[1] = lidar_3d_pts_[i][1];

    // left bottom;
    lidar_point_pair.lidar_2d_point[2] = _imgs_pts_cir2D_true[i][2];
    lidar_point_pair.lidar_3d_point[2] = lidar_3d_pts_[i][3];

    // right bottom
    lidar_point_pair.lidar_2d_point[3] = _imgs_pts_cir2D_true[i][3];
    lidar_point_pair.lidar_3d_point[3] = lidar_3d_pts_[i][2];
    if (false)
    { // Show real LiDAR pixels
      DrawCross(undistort_img, lidar_point_pair.lidar_2d_point[0]);
      DrawCross(undistort_img, lidar_point_pair.lidar_2d_point[1]);
      DrawCross(undistort_img, lidar_point_pair.lidar_2d_point[2]);
      DrawCross(undistort_img, lidar_point_pair.lidar_2d_point[3]);
      std::string save_name = "original" + std::to_string(i) + ".png";
      cv::imwrite(save_name, undistort_img);
    }

    if (i > 20) // 如果相机图片以及标定位置数量小于20.no corresponding circle center for more than 20
    {
      continue;
    }

    lidar_point_pairs.push_back(lidar_point_pair);
  }

优化两个误差,棋盘格角点的重投影误差,和圆形中心的重投影误差(lidar),最终误差是这两者的加权平均。详细参阅原始论文。

void NonlinearOptimizer::refine_lidar2camera_params(
    const LidarParams &params_,
    const std::vector<std::vector<cv::Point2f>> &imgs_pts_,
    const std::vector<std::vector<cv::Point2f>> &bords_pts_,
    const std::vector<LidarPointPair> lidar_point_pairs,
    LidarParams &refined_params_)
{
  ceres::Problem problem;
  Eigen::Matrix3d camera_matrix = params_.camera_matrix; //相机内参
  Eigen::VectorXd k = params_.k;                         //畸变矩阵
  std::vector<Eigen::MatrixXd> vec_rt = params_.vec_rt;  //旋转平移向量
  Eigen::VectorXd v_camera_matrix(5);
  v_camera_matrix << camera_matrix(0, 0), camera_matrix(0, 1),
      camera_matrix(0, 2), camera_matrix(1, 1), camera_matrix(1, 2);
  double *p_camera = v_camera_matrix.data();
  double *p_k = k.data();

  // 打包所有的旋转平移矩阵
  std::vector<Eigen::VectorXd> packet_rt;
  for (int n = 0; n < params_.vec_rt.size(); ++n)
  {
    Eigen::AngleAxisd r(vec_rt[n].block<3, 3>(0, 0));
    Eigen::VectorXd rot_vec(r.axis() * r.angle());
    Eigen::VectorXd rt(6);
    rt << rot_vec(0), rot_vec(1), rot_vec(2), vec_rt[n](0, 3), vec_rt[n](1, 3),
        vec_rt[n](2, 3);
    packet_rt.push_back(rt);
  }
  for (int n = 0; n < params_.vec_rt.size(); ++n)
  {
    Eigen::MatrixXd x1(2, bords_pts_[n].size());
    Eigen::MatrixXd x2(2, imgs_pts_[n].size());

    for (int i = 0; i < bords_pts_[n].size(); ++i)
    {
      x1(0, i) = bords_pts_[n][i].x; //棋盘格索引
      x1(1, i) = bords_pts_[n][i].y;
    }
    for (int i = 0; i < imgs_pts_[n].size(); ++i)
    {
      x2(0, i) = imgs_pts_[n][i].x; //图像索引
      x2(1, i) = imgs_pts_[n][i].y;
    }

    double *p_rt = &packet_rt[n](0);
    for (int i = 0; i < x1.cols(); i++)
    {
      //将棋盘点加入优化,完成内参的求解
      ReprojectionError *cost_function =
          new ReprojectionError(x2.col(i), x1.col(i));

      problem.AddResidualBlock(
          new ceres::AutoDiffCostFunction<ReprojectionError,
                                          2, // num_residuals
                                          5, 2, 6>(cost_function),
          NULL, p_camera, p_k, p_rt);
    }
  }
  // lidar2camera
  Eigen::Matrix<double, 3, 4> initial_rt = params_.extrinsic;
  Eigen::AngleAxisd r(initial_rt.block<3, 3>(0, 0));
  Eigen::VectorXd rot_vec(r.axis() * r.angle());
  Eigen::VectorXd rt(6);
  rt << rot_vec(0), rot_vec(1), rot_vec(2), initial_rt(0, 3), initial_rt(1, 3),
      initial_rt(2, 3);
  double *p_rt = &rt(0);
  //获取激光雷达的三维点以及投影到图像上的点的残差
  for (size_t i = 0; i < lidar_point_pairs.size(); i++)
  {
    LidarPointPair lidar_pair = lidar_point_pairs[i];
    for (size_t j = 0; j < 4; j++)
    {
      Eigen::Vector3d lidar_3d_point(lidar_pair.lidar_3d_point[j].x,
                                     lidar_pair.lidar_3d_point[j].y,
                                     lidar_pair.lidar_3d_point[j].z);
      Eigen::Vector2d lidar_2d_point(lidar_pair.lidar_2d_point[j].x,
                                     lidar_pair.lidar_2d_point[j].y);

      LidarReprojectionError *cost_function =
          new LidarReprojectionError(lidar_2d_point, lidar_3d_point);

      problem.AddResidualBlock(
          new ceres::AutoDiffCostFunction<LidarReprojectionError,
                                          2, // num_residuals
                                          5, 2, 6>(cost_function),
          NULL, p_camera, p_k, p_rt);
    }
  }

  // Configure the solver.
  ceres::Solver::Options options;
  options.linear_solver_type = ceres::DENSE_SCHUR;
  options.minimizer_progress_to_stdout = false;

  // Solve!
  ceres::Solver::Summary summary;
  ceres::Solve(options, &problem, &summary);

  // DLOG(INFO) << "Final Brief Report:\n" << summary.BriefReport() <<
  // std::endl;
  this->formate_data(v_camera_matrix, k, packet_rt, rt, refined_params_);
}

在这里插入图片描述

参考链接

https://blog.csdn.net/Yong_Qi2015/article/details/125117907
https://blog.csdn.net/lovely_yoshino/article/details/123777374
https://hermit.blog.csdn.net/article/details/122540267