这篇博客主要记录自己对于OriginBot-相机驱动与可视化代码的学习与理解,我会注释写在代码文件中。


文档中,提供了两种驱动摄像头的方法:一个启动之后可以通过页面实时展示画面和人体检测算法的结果,另一种方法启动之后只是通过一个话题来发布图像数据。


可以通过浏览器查看的启动方式

文档里面说的很清楚,用以下命令启动:

ros2 launch originbot_bringup camera_websoket_display.launch.py

启动之后用浏览器打开 http://IP:8000 即可,

这个命令最后执行的代码是originbot.originbot_bringup.launch.camera_websoket_display.launch.py, 具体内容如下:

import os

from launch import LaunchDescription
from launch_ros.actions import Node

from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from ament_index_python import get_package_share_directory
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration


def generate_launch_description():
    mipi_cam_device_arg = DeclareLaunchArgument(
        'device',
        default_value='GC4663',
        description='mipi camera device')

    # 这里是实际启动摄像头的Node,最终执行的事mipi_cam.launch.py,会在下面单独解释这个代码
    mipi_node = IncludeLaunchDescription(
        PythonLaunchDescriptionSource(
            os.path.join(
                get_package_share_directory('mipi_cam'),
                'launch/mipi_cam.launch.py')),
        launch_arguments={
            'mipi_image_width': '960',
            'mipi_image_height': '544',
            'mipi_io_method': 'shared_mem',
            'mipi_video_device': LaunchConfiguration('device')
        }.items()
    )

    # nv12->jpeg
    # 这里调用了TogetheROS.Bot的图像编解码模块,目的是为了提升性能,具体参考:
    # https://developer.horizon.cc/documents_tros/quick_demo/hobot_codec
    jpeg_codec_node = IncludeLaunchDescription(
        PythonLaunchDescriptionSource(
            os.path.join(
                get_package_share_directory('hobot_codec'),
                'launch/hobot_codec_encode.launch.py')),
        launch_arguments={
            'codec_in_mode': 'shared_mem',
            'codec_out_mode': 'ros',
            'codec_sub_topic': '/hbmem_img',
            'codec_pub_topic': '/image'
        }.items()
    )

    # web
    # 这个就是启动web的部分,实际上背后是一个Nginx静态服务器,
    # 订阅了image来展示图片,订阅了smart_topic来获取人体检测的数据
    # 这里最后是执行了websocket.laucn.py这个代码,下面再详细解释
    web_smart_topic_arg = DeclareLaunchArgument(
        'smart_topic',
        default_value='/hobot_mono2d_body_detection',
        description='websocket smart topic')
    web_node = IncludeLaunchDescription(
        PythonLaunchDescriptionSource(
            os.path.join(
                get_package_share_directory('websocket'),
                'launch/websocket.launch.py')),
        launch_arguments={
            'websocket_image_topic': '/image',
            'websocket_smart_topic': LaunchConfiguration('smart_topic')
        }.items()
    )

    # mono2d body detection
    # TogetheROS.Bot的人体检测功能,
    # 会订阅/image_raw或者/hbmem_img的图片数据来做检测,
    # 然后把检测结果发布到hobot_mono2d_body_detection,
    # 我在https://www.guyuehome.com/45835里面有用到这个模块,也有相对详细的介绍,可以查看
    # 源码和官方文档在:https://developer.horizon.cc/documents_tros/quick_demo/hobot_codec
    mono2d_body_pub_topic_arg = DeclareLaunchArgument(
        'mono2d_body_pub_topic',
        default_value='/hobot_mono2d_body_detection',
        description='mono2d body ai message publish topic')
    mono2d_body_det_node = Node(
        package='mono2d_body_detection',
        executable='mono2d_body_detection',
        output='screen',
        parameters=[
            {"ai_msg_pub_topic_name": LaunchConfiguration(
                'mono2d_body_pub_topic')}
        ],
        arguments=['--ros-args', '--log-level', 'warn']
    )

    return LaunchDescription([
        mipi_cam_device_arg,
        # image publish
        mipi_node,
        # image codec
        jpeg_codec_node,
        # body detection
        mono2d_body_pub_topic_arg,
        mono2d_body_det_node,
        # web display
        web_smart_topic_arg,
        web_node
    ])

上面的代码里面调用了mipi_cam.launch.pywebsocket.launch.py, 现在分别来介绍。

以下是originbot.mipi_cam.launch.mipi_cam.launch.py的内容:

from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node


def generate_launch_description():
    return LaunchDescription([
        DeclareLaunchArgument(
            'mipi_camera_calibration_file_path',
            default_value='/userdata/dev_ws/src/origineye/mipi_cam/config/SC132GS_calibration.yaml',
            description='mipi camera calibration file path'),
        DeclareLaunchArgument(
            'mipi_out_format',
            default_value='nv12',
            description='mipi camera out format'),
        DeclareLaunchArgument(
            'mipi_image_width',
            default_value='1088',
            description='mipi camera out image width'),
        DeclareLaunchArgument(
            'mipi_image_height',
            default_value='1280',
            description='mipi camera out image height'),
        DeclareLaunchArgument(
            'mipi_io_method',
            default_value='shared_mem',
            description='mipi camera out io_method'),
        DeclareLaunchArgument(
            'mipi_video_device',
            default_value='F37',
            description='mipi camera device'),
        # 启动图片发布pkg
        Node(
            package='mipi_cam',
            executable='mipi_cam',
            output='screen',
            parameters=[
                {"camera_calibration_file_path": LaunchConfiguration(
                    'mipi_camera_calibration_file_path')},
                {"out_format": LaunchConfiguration('mipi_out_format')},
                {"image_width": LaunchConfiguration('mipi_image_width')},
                {"image_height": LaunchConfiguration('mipi_image_height')},
                {"io_method": LaunchConfiguration('mipi_io_method')},
                {"video_device": LaunchConfiguration('mipi_video_device')},
                {"rotate_degree": 90},
            ],
            arguments=['--ros-args', '--log-level', 'error']
        )
    ])

这段代码其实也很简单,就是一些参数声明,但是如果使用了OriginBot一段时间的小伙伴应该记得,小车启动摄像头后,会通过一个叫做/image_raw的话题发布图像数据,这个话题在这里没有提到。

这一部分在originbot.mipi_cam.src.mipi_cam_node.cpp 里面的236行, 函数如下:

  if (io_method_name_.compare("ros") == 0) {
    image_pub_ =
      this->create_publisher<sensor_msgs::msg::Image>("image_raw", PUB_BUF_NUM);
  }

最后讲一下websocket.launch.py, 这个代码实际上是TogetheROS.Bot的,不是OriginBot自己开发的,位于/opt/tros/share/websocket/launch 目录下,

import os
import subprocess

from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch_ros.actions import Node
from launch.substitutions import LaunchConfiguration
from ament_index_python.packages import get_package_prefix


def generate_launch_description():
    # 启动webserver服务
    name = 'nginx'
    nginx = "./sbin/" + name
    webserver = nginx + " -p ."
    launch_webserver = True
    # 查询进程列表,获取所有包含 webserver 字符串的进程
    processes = subprocess.check_output(['ps', 'ax'], universal_newlines=True)
    processes = [p.strip() for p in processes.split('\n') if webserver in p]

    # 如果有进程,说明目标程序已经在运行
    if len(processes) > 0:
        launch_webserver = False

    if launch_webserver:
        print("launch webserver")
        pwd_path = os.getcwd()
        print("pwd_path is ", pwd_path)
        webserver_path = os.path.join(get_package_prefix('websocket'),
                                      "lib/websocket/webservice")
        print("webserver_path is ", webserver_path)
        os.chdir(webserver_path)
        # os.chmod(nginx, stat.S_IRWXU)
        print("launch webserver cmd is ", webserver)
        os.system(webserver)
        os.chdir(pwd_path)
    else:
        print("webserver has launch")

    return LaunchDescription([
        DeclareLaunchArgument(
            'websocket_image_topic',
            default_value='/image_jpeg',
            description='image subscribe topic name'),
        DeclareLaunchArgument(
            'websocket_image_type',
            default_value='mjpeg',
            description='image type'),
        DeclareLaunchArgument(
            'websocket_only_show_image',
            default_value='False',
            description='only show image'),
        DeclareLaunchArgument(
            'websocket_output_fps',
            default_value='0',
            description='output fps'),
        DeclareLaunchArgument(
            'websocket_smart_topic',
            default_value='/hobot_mono2d_body_detection',
            description='smart message subscribe topic name'),
        Node(
            package='websocket',
            executable='websocket',
            output='screen',
            parameters=[
                {"image_topic": LaunchConfiguration('websocket_image_topic')},
                {"image_type": LaunchConfiguration('websocket_image_type')},
                {"only_show_image": LaunchConfiguration(
                    'websocket_only_show_image')},
                {"output_fps": LaunchConfiguration('websocket_output_fps')},
                {"smart_topic": LaunchConfiguration('websocket_smart_topic')}
            ],
            arguments=['--ros-args', '--log-level', 'error']
        )
    ])

可以看到,这个launch文件里面其实其启动了一个nginx进程和一个websocket Node, 这个websocket的源代码在这里,不过感觉一般开发者没必要深究了,满足自己的需求的话,直接使用即可,毕竟这就是地平线开发TogetheROS.Bot的目的。

通过话题来传输图像数据

官网是通过一下命令来启动的:

ros2 launch originbot_bringup camera.launch.py

其中camera.launch.py位于originbot/originbot_bringup/launch目录下,内容余下:

from launch import LaunchDescription
from launch_ros.actions import Node

def generate_launch_description():
    return LaunchDescription([
        Node(
            package='mipi_cam',
            executable='mipi_cam',
            output='screen',
            parameters=[
                {"mipi_camera_calibration_file_path": "/opt/tros/lib/mipi_cam/config/GC4663_calibration.yaml"},
                {"out_format": "bgr8"},
                {"image_width": 960},
                {"image_height": 544},
                {"io_method": "ros"},
                {"mipi_video_device": "GC4663"}
            ],
            arguments=['--ros-args', '--log-level', 'error']
        ),
        Node(
            package='originbot_demo',
            executable='transport_img',
            arguments=['--ros-args', '--log-level', 'error']
        ),
    ])

这个launch文件里面启动了两个Node,一个是mipi_cam,这个在前面已经介绍过了,另一个Node运行的是originbot/originbot_demo/originbot_demo/transport_img.py, 内容如下:

import cv2
import numpy as np
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge


class ImageCompressor(Node):
    def __init__(self):
        super().__init__('image_compressor')
        self.bridge = CvBridge()
        self.image_sub = self.create_subscription(Image, 'image_raw', self.callback, 10)
        self.compressed_pub = self.create_publisher(CompressedImage, 'compressed_image', 10)
        self.bgr8_pub = self.create_publisher(Image, 'bgr8_image', 10)

    def callback(self, msg):
        cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')

        encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 50]
        _, compressed = cv2.imencode('.jpg', cv_image, encode_param)

        compressed_msg = CompressedImage()
        compressed_msg.header = msg.header
        compressed_msg.format = 'jpeg'
        compressed_msg.data = np.array(compressed).tostring()

        self.compressed_pub.publish(compressed_msg)
        decompressed = cv2.imdecode(np.frombuffer(compressed_msg.data, np.uint8), cv2.IMREAD_COLOR)
        bgr8_msg = self.bridge.cv2_to_imgmsg(decompressed, encoding='bgr8')
        self.compressed_pub.publish(compressed_msg)
        self.bgr8_pub.publish(bgr8_msg)

def main(args=None):
    rclpy.init(args=args)
    compressor = ImageCompressor()
    rclpy.spin(compressor)
    rclpy.shutdown()


if __name__ == '__main__':
    main()

这段代码里面创建了一个叫做image_compressor的Node,这个Node订阅来自image_raw的图像数据(由前面的mipi_cam发布),然后处理之后再通过compressed_imagebgr8_image两个话题发布出去。中间所做的处理也就压缩,便于提升网络传输性能。