1、前言
近年来,无人驾驶技术属于比较热门的话题,无人驾驶技术通常采用单目相机进行周围环境信息采集,然后通过目标识别算法对周围物体进行检测和识别。由于单目相机无法精确获取场景中目标物体的距离等深度信息,因此,越来越多的企业和高校选择使用三维激光雷达进行环境信息获取。三维激光雷达采集的数据为3D点云(包含空间物体的XYZ信息),而目前处理点云数据的工具通常只能对单帧点云数据进行处理和显示。本篇博客主要介绍一下如何使用ROS对深度学习目标识别后的3D点云数据进行显示。
2、ROS动态显示3D点云的工具
我们使用ROS自带的工具RVIZ进行点云数据显示,因为我们处理的点云数据集为KITTI官网公开的数据集,即3D点云格式为xxx.bin,标签文件格式为xxx.txt。因此,我们使用ROS对3D点云进行动态显示的一个基本思路是首先使用numpy读取KITTI数据集,然后转化为ROS格式,再使用RVIZ插件进行动态3D点云显示,当然这里我们也会将3D点云的标签(Labels)和3D包围框(3D Bbox)等进行一同显示。
OK!让我们先看一下主函数程序(程序支持扩展,可在此基础上进行二次开发):
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import os.path
import numpy as np
from PIL import Image
# import tensorflow as tf
import rospy
from sensor_msgs.msg import PointCloud2, PointField
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import Image as ImageMsg
from std_msgs.msg import Header
import time
import math
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
from visualization_msgs.msg import Marker, MarkerArray
from kitti_util import * # 相机坐标系和雷达坐标系相互转换, 参考https://github.com/charlesq34/frustum-pointnets其中KITTI下的kitti_utils.py文件
from quaternion_from_euler_convert import * # 欧拉角和四元数计算方法, code from /opt/ros/kinetic/lib/python2.7/dist-packages/tf/transformations.py
from get_kitti_object_data import *
sign_mark = 0 # 循环计数,计算当前帧mark的数量
max_arr_marker = 0 # 计算总的mark的数量(最大数量)
class BIN_TENSORFLOW_TO_ROS():
def __init__(self, pub_topic, box_topic, bin_path="", bin_file_list="", label_path="", label_file_list=""):
# ed: Publisher
self._pub = rospy.Publisher("/velodyne_points", PointCloud2, queue_size=10)
self.pub_arr_bbox = rospy.Publisher("kitti_bbox", BoundingBoxArray, queue_size=10)
self.marker_pub = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=10)
# self.get_bin_from_lidar_2d(bin_path, bin_file_list)
# self.get_label_from_lidar_2d(label_path, label_file_list)
self.bin_path = bin_path
self.bin_file_list = bin_file_list
self.label_path = label_path
self.label_file_list = label_file_list
get_data = get_kitti_data(self.bin_path, self.bin_file_list, self.label_path, self.label_file_list)
self.bin_files, self.len_files_bin = get_data.get_bin_from_lidar_2d(self.bin_path, self.bin_file_list)
self.label_files, self.len_files_label = get_data.get_label_from_lidar_2d(self.label_path, self.label_file_list)
self.idx = 0 # 305
rate = rospy.Rate(1) # 1
while not rospy.is_shutdown():
self.prediction_publish(self.idx)
self.idx += 1
print("self.idx", self.idx)
if self.idx > self.len_files_bin:
self.idx = 0
time.sleep(0.2)
if self.idx > 39:
break
# rate.sleep()
# rospy.spin()
def prediction_publish(self, idx):
global sign_mark, max_arr_marker
start = time.clock()
arr_bbox = BoundingBoxArray()
arr_bbox.header.frame_id = "velodyne"
arr_marker = MarkerArray()
# record = np.load(os.path.join(self.npy_path, self.npy_files[idx]))
# lidar = record[:, :, :5]
lidar = np.fromfile(os.path.join(self.bin_path, self.bin_files[idx]), dtype=np.float32).reshape(-1, 4) # .astype(np.float16)
# print("lidar", lidar.shape)
# point cloud for SqueezeSeg segments
cloud = lidar
header = Header()
header.stamp = rospy.Time().now()
header.frame_id = "velodyne"
'''
x = lidar[:, 0]
y = lidar[:, 1]
z = lidar[:, 2]
i = self.inte_to_rgb(lidar[:, 3])
cloud = np.stack((x, y, z, i))
'''
# point cloud segments
msg_segment = self.create_cloud_xyzil32(header, cloud) # 点云数据量大,处理的时间就越长
# msg_segment = pc2.create_cloud_xyz32(header, lidar[:, :3]) # took much time
# publish
self._pub.publish(msg_segment)
# boundingbox publish
# self.box_pub.publish()
end = time.clock()
rospy.loginfo("Point cloud processed bin. Took %d", self.idx)
label_file_frame = os.path.join(self.label_path, self.label_files[idx])
sign_mark = 0
try:
with open(label_file_frame, 'r') as f:
labels = f.readlines()
for line in labels:
line = line.split()
lab, _, _, _, _, _, _, _, h, w, l, x, y, z, rot, _ = line
h, w, l, x, y, z, rot = map(float, [h, w, l, x, y, z, rot])
if lab != 'DontCare':
bbox = BoundingBox()
bbox.header.frame_id = "velodyne"
corners_3d = np.vstack([0, -h/2, 0]) # (3, 8)
# transform the 3d bbox from object coordiante to camera_0 coordinate
R = np.array([[np.cos(rot), 0, np.sin(rot)],
[0, 1, 0],
[-np.sin(rot), 0, np.cos(rot)]])
corners_3d = np.dot(R, corners_3d).T + np.array([x, y, z])
# transform the 3d bbox from camera_0 coordinate to velodyne coordinate
corners_3d = corners_3d[:, [2, 0, 1]] * np.array([[1, -1, -1]])
bbox.pose.position.x = float(corners_3d[0, 0])
bbox.pose.position.y = float(corners_3d[0, 1])
bbox.pose.position.z = float(corners_3d[0, 2])
q = quaternion_from_euler(0, 0, -rot)
bbox.pose.orientation.x = q[0]
bbox.pose.orientation.y = q[1]
bbox.pose.orientation.z = q[2]
bbox.pose.orientation.w = q[3]
bbox.dimensions.x = float(w)
bbox.dimensions.y = float(l)
bbox.dimensions.z = float(h)
arr_bbox.boxes.append(bbox)
# print("arr_bbox.boxes", arr_bbox.boxes)
# 定义标签变量
# ROS显示标签之后无法清除,后面将多余的标签设置为"空"符号型
marker = Marker()
marker.header.frame_id = 'velodyne'
# marker.header.stamp = header.stamp
marker.type = Marker.TEXT_VIEW_FACING
marker.ns = "test"
sign_mark = sign_mark + 1
marker.id = sign_mark
marker.pose.orientation.w = 1.0
marker.color.r = 255
marker.color.b = 0
marker.color.g = 255
marker.color.a = 1.0 # 标签的透明度
marker.scale.x = 5.0
marker.scale.y = 5.0
marker.scale.z = 3.0
marker.pose.position.x = float(corners_3d[0, 0])
marker.pose.position.y = float(corners_3d[0, 1])
marker.pose.position.z = float(corners_3d[0, 2]) + 2.0
marker.text = str(lab) # text类型必须是string
arr_marker.markers.append(marker)
# arr_marker.markers.clear()
# 下面开始判断, 当前帧的标签是否比上一帧的标签多. 如果比上一帧多, 那么将多余的全部置为"空"
if len(arr_marker.markers) <= max_arr_marker:
for sign_mark_over in range(sign_mark + 1, max_arr_marker + 1):
marker = Marker()
marker.header.frame_id = 'velodyne'
# marker.header.stamp = header.stamp
marker.type = Marker.TEXT_VIEW_FACING
marker.ns = "test"
marker.id = sign_mark_over
marker.pose.orientation.w = 1.0
marker.color.r = 0
marker.color.b = 0
marker.color.g = 0
marker.color.a = 0
marker.scale.x = 0
marker.scale.y = 0
marker.scale.z = 0
marker.pose.position.x = 0
marker.pose.position.y = 0
marker.pose.position.z = 0
marker.text = str(" ")
arr_marker.markers.append(marker)
if len(arr_marker.markers) > max_arr_marker:
max_arr_marker = len(arr_marker.markers)
# 发布bbox话题
# arr_bbox.header.stamp = rospy.Time.now()
# print("arr_bbox.boxes.size() : {} ".format(len(arr_bbox.boxes)))
if len(arr_bbox.boxes) is not 0:
self.pub_arr_bbox.publish(arr_bbox)
arr_bbox.boxes.clear()
# 发布marker话题
if len(arr_marker.markers) is not 0:
self.marker_pub.publish(arr_marker)
arr_marker.markers.clear()
# print(len(arr_marker.markers), sign_mark)
except:
pass
# create pc2_msg with 5 fields
def create_cloud_xyzil32(self, header, points):
fields = [PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('intensity', 12, PointField.FLOAT32, 1)]
return pc2.create_cloud(header, fields, points)
if __name__ == '__main__':
rospy.init_node('bin2rosvis_node')
bin_path = rospy.get_param('bin_path')
bin_file_list = rospy.get_param('bin_file_list')
label_path = rospy.get_param('label_path')
label_file_list = rospy.get_param('label_file_list')
pub_topic = rospy.get_param('pub_topic')
box_topic = rospy.get_param('box_topic')
# gpu = rospy.get_param('gpu')
bin_tensorflow_to_ros = BIN_TENSORFLOW_TO_ROS(pub_topic=pub_topic,
box_topic=box_topic,
bin_path=bin_path,
bin_file_list=bin_file_list,
label_path=label_path,
label_file_list=label_file_list)
这里我们需要配置几个参数,如3D点云(xxx.bin)文件的路径、标签(Labels)文件的路径等等。为了方便我们运行程序,我们可以制作启动文件(xxx.launch)。
<launch>
<param name="bin_path" type="string" value="$(find bin2rosvis)/src/data/4haolouqian16/velodyne/" />
<param name="bin_file_list" type="string" value="$(find bin2rosvis)/src/data/ImageSet/val_16_vis.txt" /> -->
<param name="label_path" type="string" value="$(find bin2rosvis)/src/data/4haolouqian16/pred/" />
<param name="label_file_list" type="string" value="$(find bin2rosvis)/src/data/ImageSet/val_16_vis.txt" />
<param name="pub_topic" type="string" value="/bin2ros_vis/points" />
<param name="box_topic" type="string" value="/bin2ros_vis/boundingbox" />
<param name="gpu" type="string" value="0" />
<node pkg="bin2rosvis" type="kitti_player.py" name="bin2rosvis_node" output="screen" />
<!-- <node name="rviz" pkg="rviz" type="rviz" args="-d $(find bin2rosvis)/rviz/kitti_player_rviz_car.rviz" />
<include file="$(find rviz_car_model)/launch/default.launch">
<arg name="fixed_frame" value="velodyne"/>
<arg name="transform" value="0 0 -1.5 0 0 0 1"/>
</include> -->
</launch>
最后,我们只需要使用catkin_make命令编译,然后运行就可以看到ROS动态显示点云的效果了,显示效果如下图所示。
欢迎大家在评论区交流哦~
评论(0)
您还未登录,请登录后发表或查看评论