PCL中的通用ICP算法

问题描述

我正在尝试解决this练习(幻灯片40)。

我下载了kitchen-small数据集并执行了以下两个步骤。

第一步:将深度图像从数据集中转换为Pointclouds。

#include <opencv2/core/core.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/ximgproc.hpp>
#include <stdio.h>
#include <iostream>
#include <string>
#include <fstream>
#include <cstdint>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/registration/icp.h>
#include <pcl/registration/gicp.h>
#include <pcl/segmentation/extract_clusters.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/features/normal_3d.h>
#include <pcl/kdtree/kdtree.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/segmentation/sac_segmentation.h>
#include <pcl/segmentation/extract_clusters.h>
#include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/features/normal_3d_omp.h>
#include <Eigen/Dense>

using namespace std;
using namespace cv;
Eigen::Matrix3f camera_matrix;

pcl::PointCloud<pcl::PointXYZRGB>::Ptr RGBDtoPCL(Mat depth_image,Mat rgb_image,Eigen::Matrix3f& _intrinsics)
{
    pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZRGB>);

    float fx = 512;//_intrinsics(0,0);
    float fy = 512;//_intrinsics(1,1);
    float cx = 320;//_intrinsics(0,2);
    float cy = 240;//_intrinsics(1,2);

    float factor = 1;

    depth_image.convertTo(depth_image,CV_32F); // convert the image data to float type 

    if (!depth_image.data) {
        std::cerr << "No depth data!!!" << std::endl;
        exit(EXIT_FAILURE);
    }

    pointcloud->width = depth_image.cols; //Dimensions must be initialized to use 2-D indexing 
    pointcloud->height = depth_image.rows;
    pointcloud->resize(pointcloud->width*pointcloud->height);

#pragma omp parallel for
    for (int v = 0; v < depth_image.rows; v += 4)
    {
        for (int u = 0; u < depth_image.cols; u += 4)
        {
            float Z = depth_image.at<float>(v,u) / factor;

            pcl::PointXYZRGB p;
            
            p.z = Z;
            p.x = (u - cx) * Z / fx;
            p.y = (v - cy) * Z / fy;
            p.r = (rgb_image.at<Vec3b>(v,u)[2]);
            p.g = (rgb_image.at<Vec3b>(v,u)[1]);
            p.b = (rgb_image.at<Vec3b>(v,u)[0]);
            //cout << p.r << endl;


            p.z = p.z;
            p.x = p.x;
            p.y = p.y;

            pointcloud->points.push_back(p);

        }
    }

    return pointcloud;

}


int main(int argc,char const *argv[])
{   
    pcl::VoxelGrid<pcl::PointXYZRGB> voxel_grid;
    pcl::normalEstimationOMP<pcl::PointXYZRGB,pcl::normal> ne;
    camera_matrix << fx,0.0f,cx,fy,cy,1.0f;

    for(int i=1; i<= 180; i++)
    {
        pcl::PointCloud<pcl::PointXYZRGB>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZRGB>);
        pcl::PointCloud<pcl::PointXYZRGB>::Ptr subsamp_cloud(new pcl::PointCloud<pcl::PointXYZRGB>);
        pcl::PointCloud<pcl::PointXYZRGBnormal> dst;

    /*Convert depth image to Pointcloud*/
        Mat input_rgdb_image,input_depth_img,scaled_depth_img;
        input_rgdb_image = imread("/home/aleio/Downloads/rgbd-scenes/kitchen_small/kitchen_small_1/kitchen_small_1_"+to_string(i)+".png");
        input_depth_img = imread("/home/aleio/Downloads/rgbd-scenes/kitchen_small/kitchen_small_1/kitchen_small_1_"+to_string(i)+"_depth.png",IMREAD_ANYDEPTH);
        input_depth_img.convertTo(scaled_depth_img,CV_32F,0.001);

        pointcloud=RGBDtoPCL(scaled_depth_img,input_rgdb_image,camera_matrix);

/*Voxelization*/
        voxel_grid.setInputCloud (pointcloud);
        voxel_grid.setLeafSize (0.005,0.005,0.005); 
        voxel_grid.filter ( *subsamp_cloud ) ;

/*Estimate normals*/     
        ne.setInputCloud (subsamp_cloud);
        pcl::search::KdTree<pcl::PointXYZRGB>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZRGB> ());
        ne.setSearchMethod (tree);
        pcl::PointCloud<pcl::normal>::Ptr cloud_normals (new pcl::PointCloud<pcl::normal>);
        ne.seTradiusSearch (0.03);
        ne.compute (*cloud_normals);

/*Create <PointXYZRGBnormal> cloud*/
        // Initialization part
        dst.width = subsamp_cloud->width;
        dst.height = subsamp_cloud->height;
        dst.is_dense = true;
        dst.points.resize(dst.width * dst.height);
        
        // Assignment part
        for (int i = 0; i < cloud_normals->points.size(); i++)
        {
        dst.points[i].x = subsamp_cloud->points[i].x;
        dst.points[i].y = subsamp_cloud->points[i].y;
        dst.points[i].z = subsamp_cloud->points[i].z;

        dst.points[i].r = subsamp_cloud->points[i].r;
        dst.points[i].g = subsamp_cloud->points[i].g;
        dst.points[i].b = subsamp_cloud->points[i].b;

        // cloud_normals -> Which you have already have; generated using pcl example code 

        dst.points[i].curvature = cloud_normals->points[i].curvature;

        dst.points[i].normal_x = cloud_normals->points[i].normal_x;
        dst.points[i].normal_y = cloud_normals->points[i].normal_y;
        dst.points[i].normal_z = cloud_normals->points[i].normal_z;
        }
/*Save Pointcloud with normals*/
        pcl::io::savePCDFileASCII ("/home/aleio/Downloads/rgbd-scenes/kitchen_small/Pointcloud/Pointcloud_"+to_string(i)+".pcd",dst);
    }

    return 0;
}

第二步:执行GICP算法。

#include <opencv2/core/core.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/ximgproc.hpp>
#include <stdio.h>
#include <iostream>
#include <string>
#include <fstream>
#include <cstdint>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/registration/icp.h>
#include <pcl/registration/gicp.h>
#include <pcl/segmentation/extract_clusters.h>
#include <pcl/filters/extract_indices.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/features/normal_3d.h>
#include <pcl/kdtree/kdtree.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/segmentation/sac_segmentation.h>
#include <pcl/segmentation/extract_clusters.h>
#include <pcl/filters/statistical_outlier_removal.h>
#include <pcl/features/normal_3d_omp.h>
#include <Eigen/Dense>


using namespace std;
using namespace cv;
Eigen::Matrix3f camera_matrix;


int main(int argc,char const *argv[])
{   
    pcl::GeneralizedIterativeClosestPoint<pcl::PointXYZRGBnormal,pcl::PointXYZRGBnormal> gicp; //generalized icp
    
    Eigen::Matrix4f globalTransform;
    globalTransform.setIdentity();

    /*Set GICP parameters,need I to adjust them?*/
    gicp.setMaxCorrespondencedistance(0.05);
    gicp.setMaximumIterations(5);
    gicp.setTransformationEpsilon(1e-8);
    
    pcl::PointCloud<pcl::PointXYZRGBnormal>::Ptr cloud_in(new pcl::PointCloud<pcl::PointXYZRGBnormal>);
    pcl::PointCloud<pcl::PointXYZRGBnormal>::Ptr cloud_out(new pcl::PointCloud<pcl::PointXYZRGBnormal>);
    pcl::PointCloud<pcl::PointXYZRGBnormal> Final; //What is that?
    pcl::PointCloud<pcl::PointXYZRGBnormal>::Ptr t_cloud(new pcl::PointCloud<pcl::PointXYZRGBnormal>); //cloud_in transformed according globalTransform

    for(int i=1; i<= 180; i+=1)
    {
        /*Declare some Pointclouds*/
        pcl::PointCloud<pcl::PointXYZRGBnormal>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGBnormal>); //loaded cloud
        pcl::PointCloud<pcl::PointXYZRGB>::Ptr xyz_final (new pcl::PointCloud<pcl::PointXYZRGB>);
        pcl::PointCloud<pcl::PointXYZRGB>::Ptr xyz_transformed (new pcl::PointCloud<pcl::PointXYZRGB>);
        pcl::PointCloud<pcl::normal>::Ptr normal(new pcl::PointCloud<pcl::normal>);
        

        if (pcl::io::loadPCDFile<pcl::PointXYZRGBnormal> ("/home/aleio/Downloads/rgbd-scenes/kitchen_small/Pointcloud/Pointcloud_"+to_string(i)+".pcd",*cloud) == -1) //* load the file
        {
            PCL_ERROR ("Couldn't read file test_pcd.pcd \n");
            return (-1);
        }
        if(i==1)
        {
            pcl::copyPointCloud(*cloud,*cloud_out);  ////////At first step I only copy the first cloud.
            continue;
        }

        pcl::copyPointCloud(*cloud,*cloud_in); ////////copy

        gicp.setInputSource(cloud_in); //New cloud
        gicp.setInputTarget(cloud_out); //Old cloud

        gicp.align(Final);
        cout << "has converged:" << gicp.hasConverged() << " score: " << gicp.getfitnessscore() << endl;
        cout << gicp.getFinalTransformation() << endl;

        globalTransform = globalTransform * gicp.getFinalTransformation();
        pcl::transformPointCloud (*cloud_in,*t_cloud,globalTransform);

/*Now I convert XYZRGBnormal clouds to XYZRGB clouds*/

/*------------------------FINAL_CLOUD-----------------------------------------*/
        // Initialization part
        xyz_final->width = Final.width;
        xyz_final->height = Final.height;
        xyz_final->is_dense = true;
        xyz_final->points.resize(xyz_final->width * xyz_final->height);
        // Assignment part
        for (int i = 0; i < t_cloud->points.size(); i++)
        {
        xyz_final->points[i].x = Final.points[i].x;
        xyz_final->points[i].y = Final.points[i].y;
        xyz_final->points[i].z = Final.points[i].z;
        xyz_final->points[i].r = Final.points[i].r;
        xyz_final->points[i].g = Final.points[i].g;
        xyz_final->points[i].b = Final.points[i].b;
        }
/*------------------------TRANSFORMED_CLOUD-----------------------------------------*/
        // Initialization part
        xyz_transformed->width = t_cloud->width;
        xyz_transformed->height = t_cloud->height;
        xyz_transformed->is_dense = true;
        xyz_transformed->points.resize(xyz_transformed->width * xyz_transformed->height);
        // Assignment part
        for (int i = 0; i < t_cloud->points.size(); i++)
        {
        xyz_transformed->points[i].x = t_cloud->points[i].x;
        xyz_transformed->points[i].y = t_cloud->points[i].y;
        xyz_transformed->points[i].z = t_cloud->points[i].z;
        xyz_transformed->points[i].r = t_cloud->points[i].r;
        xyz_transformed->points[i].g = t_cloud->points[i].g;
        xyz_transformed->points[i].b = t_cloud->points[i].b;
        }

        pcl::copyPointCloud(*cloud,*cloud_out);  ////////copy
        
    }


    return 0;
}

现在我希望,由于GICP会聚,可视化所有Final或Transformed云,因此我将获得场景的完整重建。但这没有发生,我得到一个混乱的场面。

我想念什么?

我没有得到gic.palign()方法产生的最终云的含义。应该是输入云是否与目标云对齐,即输入云是根据gicp.getFinalTransformation()方法返回的矩阵进行转换的?

解决方法

ICP用于微调已经大致对齐的云,您可以使用基于特征的对齐方法(例如SampleConsensusInitialAlignmentSampleConsensusPrerejective)来获得。

这些教程可以指导您: