From c8c91a430e530fb555525e83789f7b213ca00911 Mon Sep 17 00:00:00 2001 From: Jeremy Leibs Date: Wed, 8 Mar 2023 13:52:29 -0500 Subject: [PATCH] Initial TurtleBot subscriber demo (#1523) * Initial TurtleBot subscriber demo * Standard example arg-parsing * Simplify TF lookups by storing a map * Use qualified calls for rerun_urdf for clarity * Add comment about why we're patching points.fields * Also log scalars for linear and angular velocity * Links to new github issues --- .mypy.ini | 2 +- examples/python/ros/README.md | 47 +++++ examples/python/ros/main.py | 289 +++++++++++++++++++++++++++ examples/python/ros/requirements.txt | 4 + examples/python/ros/rerun_urdf.py | 84 ++++++++ 5 files changed, 425 insertions(+), 1 deletion(-) create mode 100644 examples/python/ros/README.md create mode 100644 examples/python/ros/main.py create mode 100644 examples/python/ros/requirements.txt create mode 100644 examples/python/ros/rerun_urdf.py diff --git a/.mypy.ini b/.mypy.ini index 54cb1f7f20ae..3975c2924ca1 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -1,6 +1,6 @@ [mypy] files = rerun_py/rerun_sdk/rerun, rerun_py/tests, examples/python -exclude = examples/python/objectron/proto +exclude = examples/python/objectron/proto|examples/python/ros namespace_packages = True show_error_codes = True strict = True diff --git a/examples/python/ros/README.md b/examples/python/ros/README.md new file mode 100644 index 000000000000..5b4345c224fd --- /dev/null +++ b/examples/python/ros/README.md @@ -0,0 +1,47 @@ +# Overview + +A minimal example of creating a ROS node that subscribes to topics and converts the messages to rerun log calls. + +The solution here is mostly a toy example to show how ROS concepts can be mapped to Rerun. Fore more information on +future improved ROS support, see the tracking issue: [#1527](https://github.com/rerun-io/rerun/issues/1537) + +NOTE: Unlike many of the other examples, this example requires a system installation of ROS +in addition to the packages from requirements.txt. + +# Dependencies + +This example was developed and tested on top of [ROS2 Humble Hawksbill](https://docs.ros.org/en/humble/index.html) +and the the [turtlebot3 navigation example](https://navigation.ros.org/getting_started/index.html). + +Installing ROS is outside the scope of this example, but you will need the equivalent of the following packages: +``` +sudo apt install ros-humble-desktop gazebo ros-humble-navigation2 ros-humble-turtlebot3 ros-humble-turtlebot3-gazebo +``` + +In addition to installing the dependencies from `requirements.txt` into a venv you will also need to source the +ROS setup script: +``` +source venv/bin/active +source /opt/ros/humble/setup.bash +``` + + +# Running + +First, in one terminal launch the nav2 turtlebot demo: +``` +source /opt/ros/humble/setup.bash +export TURTLEBOT3_MODEL=waffle +export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/humble/share/turtlebot3_gazebo/models + +ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False +``` + +As described in the nav demo, use the rviz window to initialize the pose estimate and set a navigation goal. + +You can now connect to the running ROS system by running: +``` +python3 examples/python/ros/main.py +``` + + diff --git a/examples/python/ros/main.py b/examples/python/ros/main.py new file mode 100644 index 000000000000..15e268a2a4fe --- /dev/null +++ b/examples/python/ros/main.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python3 +""" +Simple example of a ROS node that republishes some common types to Rerun. + +The solution here is mostly a toy example to show how ROS concepts can be +mapped to Rerun. Fore more information on future improved ROS support, +see the tracking issue: https://github.com/rerun-io/rerun/issues/1537 + +NOTE: Unlike many of the other examples, this example requires a system installation of ROS +in addition to the packages from requirements.txt. +""" + +import argparse +import sys + +import numpy as np +import rerun as rr + +try: + import cv_bridge + import laser_geometry + import rclpy + import rerun_urdf + import trimesh + from image_geometry import PinholeCameraModel + from nav_msgs.msg import Odometry + from numpy.lib.recfunctions import structured_to_unstructured + from rclpy.callback_groups import ReentrantCallbackGroup + from rclpy.node import Node + from rclpy.qos import QoSDurabilityPolicy, QoSProfile + from rclpy.time import Duration, Time + from sensor_msgs.msg import CameraInfo, Image, LaserScan, PointCloud2, PointField + from sensor_msgs_py import point_cloud2 + from std_msgs.msg import String + from tf2_ros import TransformException + from tf2_ros.buffer import Buffer + from tf2_ros.transform_listener import TransformListener + +except ImportError: + print( + """ +Could not import the required ROS2 packages. + +Make sure you have installed ROS2 (https://docs.ros.org/en/humble/index.html) +and sourced /opt/ros/humble/setup.bash + +See: README.md for more details. +""" + ) + sys.exit(1) + + +class TurtleSubscriber(Node): # type: ignore[misc] + def __init__(self) -> None: + super().__init__("rr_turtlebot") + + # Used for subscribing to latching topics + latching_qos = QoSProfile(depth=1, durability=QoSDurabilityPolicy.TRANSIENT_LOCAL) + + # Allow concurrent callbacks + self.callback_group = ReentrantCallbackGroup() + + # Subscribe to TF topics + self.tf_buffer = Buffer() + self.tf_listener = TransformListener(self.tf_buffer, self) + + # Define a mapping for transforms + self.path_to_frame = { + "map": "map", + "map/points": "camera_depth_frame", + "map/robot": "base_footprint", + "map/robot/scan": "base_scan", + "map/robot/camera": "camera_rgb_optical_frame", + "map/robot/camera/points": "camera_depth_frame", + } + + # Assorted helpers for data conversions + self.model = PinholeCameraModel() + self.cv_bridge = cv_bridge.CvBridge() + self.laser_proj = laser_geometry.laser_geometry.LaserProjection() + + # Log a bounding box as a visual placeholder for the map + # # TODO(jleibs): Log the real map once [#1531](https://github.com/rerun-io/rerun/issues/1531) is merged + rr.log_obb( + "map/box", + half_size=[6, 6, 2], + position=[0, 0, 1], + color=[255, 255, 255, 255], + timeless=True, + ) + + # Subscriptions + self.info_sub = self.create_subscription( + CameraInfo, + "/intel_realsense_r200_depth/camera_info", + self.cam_info_callback, + 10, + callback_group=self.callback_group, + ) + + self.odom_sub = self.create_subscription( + Odometry, + "/odom", + self.odom_callback, + 10, + callback_group=self.callback_group, + ) + + self.img_sub = self.create_subscription( + Image, + "/intel_realsense_r200_depth/image_raw", + self.image_callback, + 10, + callback_group=self.callback_group, + ) + + self.points_sub = self.create_subscription( + PointCloud2, + "/intel_realsense_r200_depth/points", + self.points_callback, + 10, + callback_group=self.callback_group, + ) + + self.scan_sub = self.create_subscription( + LaserScan, + "/scan", + self.scan_callback, + 10, + callback_group=self.callback_group, + ) + + # The urdf is published as latching + self.urdf_sub = self.create_subscription( + String, + "/robot_description", + self.urdf_callback, + qos_profile=latching_qos, + callback_group=self.callback_group, + ) + + def log_tf_as_rigid3(self, path: str, time: Time) -> None: + """ + Helper to look up a transform with tf and log using `log_rigid3`. + + Note: we do the lookup on the client side instead of re-logging the raw transforms until + Rerun has support for Derived Transforms [#1533](https://github.com/rerun-io/rerun/issues/1533) + """ + # Get the parent path + parent_path = path.rsplit("/", 1)[0] + + # Find the corresponding frames from the mapping + child_frame = self.path_to_frame[path] + parent_frame = self.path_to_frame[parent_path] + + # Do the TF lookup to get transform from child (source) -> parent (target) + try: + tf = self.tf_buffer.lookup_transform(parent_frame, child_frame, time, timeout=Duration(seconds=0.1)) + t = tf.transform.translation + q = tf.transform.rotation + rr.log_rigid3(path, parent_from_child=([t.x, t.y, t.z], [q.x, q.y, q.z, q.w])) + except TransformException as ex: + print("Failed to get transform: {}".format(ex)) + + def cam_info_callback(self, info: CameraInfo) -> None: + """Log a `CameraInfo` with `log_pinhole`.""" + time = Time.from_msg(info.header.stamp) + rr.set_time_nanos("ros_time", time.nanoseconds) + + self.model.fromCameraInfo(info) + + rr.log_pinhole( + "map/robot/camera/img", + child_from_parent=self.model.intrinsicMatrix(), + width=self.model.width, + height=self.model.height, + ) + + def odom_callback(self, odom: Odometry) -> None: + """Update transforms when odom is updated.""" + time = Time.from_msg(odom.header.stamp) + rr.set_time_nanos("ros_time", time.nanoseconds) + + # Capture time-series data for the linear and angular velocities + rr.log_scalar("odometry/vel", odom.twist.twist.linear.x) + rr.log_scalar("odometry/ang_vel", odom.twist.twist.angular.z) + + # Update the robot pose itself via TF + self.log_tf_as_rigid3("map/robot", time) + + def image_callback(self, img: Image) -> None: + """Log an `Image` with `log_image` using `cv_bridge`.""" + time = Time.from_msg(img.header.stamp) + rr.set_time_nanos("ros_time", time.nanoseconds) + + rr.log_image("map/robot/camera/img", self.cv_bridge.imgmsg_to_cv2(img)) + self.log_tf_as_rigid3("map/robot/camera", time) + + def points_callback(self, points: PointCloud2) -> None: + """Log a `PointCloud2` with `log_points`.""" + time = Time.from_msg(points.header.stamp) + rr.set_time_nanos("ros_time", time.nanoseconds) + + pts = point_cloud2.read_points(points, field_names=["x", "y", "z"], skip_nans=True) + + # The realsense driver exposes a float field called 'rgb', but the data is actually stored + # as bytes within the payload (not a float at all). Patch points.field to use the correct + # r,g,b, offsets so we can extract them with read_points. + points.fields = [ + PointField(name="r", offset=16, datatype=PointField.UINT8, count=1), + PointField(name="g", offset=17, datatype=PointField.UINT8, count=1), + PointField(name="b", offset=18, datatype=PointField.UINT8, count=1), + ] + + colors = point_cloud2.read_points(points, field_names=["r", "g", "b"], skip_nans=True) + + pts = structured_to_unstructured(pts) + colors = colors = structured_to_unstructured(colors) + + # Log points once rigidly under robot/camera/points. This is a robot-centric + # view of the world. + rr.log_points("map/robot/camera/points", positions=pts, colors=colors) + self.log_tf_as_rigid3("map/robot/camera/points", time) + + # Log points a second time after transforming to the map frame. This is a map-centric + # view of the world. + # + # Once Rerun supports fixed-frame aware transforms [#1522](https://github.com/rerun-io/rerun/issues/1522) + # this will no longer be necessary. + rr.log_points("map/points", positions=pts, colors=colors) + self.log_tf_as_rigid3("map/points", time) + + def scan_callback(self, scan: LaserScan) -> None: + """ + Log a LaserScan after transforming it to line-segments. + + Note: we do a client-side transformation of the LaserScan data into Rerun + points / lines until Rerun has native support for LaserScan style projections: + [#1534](https://github.com/rerun-io/rerun/issues/1534) + """ + time = Time.from_msg(scan.header.stamp) + rr.set_time_nanos("ros_time", time.nanoseconds) + + # Project the laser scan to a collection of points + points = self.laser_proj.projectLaser(scan) + pts = point_cloud2.read_points(points, field_names=["x", "y", "z"], skip_nans=True) + pts = structured_to_unstructured(pts) + + # Turn every pt into a line-segment from the origin to the point. + origin = (pts / np.linalg.norm(pts, axis=1).reshape(-1, 1)) * 0.3 + segs = np.hstack([origin, pts]).reshape(pts.shape[0] * 2, 3) + + rr.log_line_segments("map/robot/scan", segs, stroke_width=0.005) + self.log_tf_as_rigid3("map/robot/scan", time) + + def urdf_callback(self, urdf_msg: String) -> None: + """Log a URDF using `log_scene` from `rerun_urdf`.""" + urdf = rerun_urdf.load_urdf_from_msg(urdf_msg) + + # The turtlebot URDF appears to have scale set incorrectly for the camera-link + # Although rviz loads it properly `yourdfpy` does not. + orig, _ = urdf.scene.graph.get("camera_link") + scale = trimesh.transformations.scale_matrix(0.00254) + urdf.scene.graph.update(frame_to="camera_link", matrix=orig.dot(scale)) + scaled = urdf.scene.scaled(1.0) + + rerun_urdf.log_scene(scene=scaled, node=urdf.base_link, path="map/robot/urdf", timeless=True) + + +def main() -> None: + parser = argparse.ArgumentParser(description="Simple example of a ROS node that republishes to Rerun.") + rr.script_add_args(parser) + args, unknownargs = parser.parse_known_args() + rr.script_setup(args, "turtlebot_viz") + + # Any remaining args go to rclpy + rclpy.init(args=unknownargs) + + turtle_subscriber = TurtleSubscriber() + + # Use the MultiThreadedExecutor so that calls to `lookup_transform` don't block the other threads + rclpy.spin(turtle_subscriber, executor=rclpy.executors.MultiThreadedExecutor()) + + turtle_subscriber.destroy_node() + rclpy.shutdown() + + +if __name__ == "__main__": + main() diff --git a/examples/python/ros/requirements.txt b/examples/python/ros/requirements.txt new file mode 100644 index 000000000000..5c08038be119 --- /dev/null +++ b/examples/python/ros/requirements.txt @@ -0,0 +1,4 @@ +numpy +opencv-python +rerun-sdk +yourdfpy diff --git a/examples/python/ros/rerun_urdf.py b/examples/python/ros/rerun_urdf.py new file mode 100644 index 000000000000..64449a7e4e77 --- /dev/null +++ b/examples/python/ros/rerun_urdf.py @@ -0,0 +1,84 @@ +import io +import os +from typing import Optional, cast +from urllib.parse import urlparse + +import numpy as np +import rerun as rr +import trimesh +from ament_index_python.packages import get_package_share_directory +from std_msgs.msg import String +from yourdfpy import URDF + + +def ament_locate_package(fname: str) -> str: + """Helper to locate urdf resources via ament.""" + if not fname.startswith("package://"): + return fname + parsed = urlparse(fname) + return os.path.join(get_package_share_directory(parsed.netloc), parsed.path[1:]) + + +def load_urdf_from_msg(msg: String) -> URDF: + """Load a URDF file using `yourdfpy` and find resources via ament.""" + f = io.StringIO(msg.data) + return URDF.load(f, filename_handler=ament_locate_package) + + +def log_scene(scene: trimesh.Scene, node: str, path: Optional[str] = None, timeless: bool = False) -> None: + """Log a trimesh scene to rerun.""" + path = path + "/" + node if path else node + + parent = scene.graph.transforms.parents.get(node) + children = scene.graph.transforms.children.get(node) + + node_data = scene.graph.get(frame_to=node, frame_from=parent) + + if node_data: + # Log the transform between this node and its direct parent (if it has one!). + if parent: + world_from_mesh = node_data[0] + t = trimesh.transformations.translation_from_matrix(world_from_mesh) + q = trimesh.transformations.quaternion_from_matrix(world_from_mesh) + # `trimesh` stores quaternions in `wxyz` format, rerun needs `xyzw` + # TODO(jleibs): Remove conversion once [#883](https://github.com/rerun-io/rerun/issues/883) is closed + q = np.array([q[1], q[2], q[3], q[0]]) + rr.log_rigid3(path, parent_from_child=(t, q), timeless=timeless) + + # Log this node's mesh, if it has one. + mesh = cast(trimesh.Trimesh, scene.geometry.get(node_data[1])) + if mesh: + # If vertex colors are set, use the average color as the albedo factor + # for the whole mesh. + vertex_colors = None + try: + colors = np.mean(mesh.visual.vertex_colors, axis=0) + if len(colors) == 4: + vertex_colors = np.array(colors) / 255.0 + except Exception: + pass + + # If trimesh gives us a single vertex color for the entire mesh, we can interpret that + # as an albedo factor for the whole primitive. + visual_color = None + try: + colors = mesh.visual.to_color().vertex_colors + if len(colors) == 4: + visual_color = np.array(colors) / 255.0 + except Exception: + pass + + albedo_factor = vertex_colors if vertex_colors is not None else visual_color + + rr.log_mesh( + path, + mesh.vertices, + indices=mesh.faces, + normals=mesh.vertex_normals, + albedo_factor=albedo_factor, + timeless=timeless, + ) + + if children: + for child in children: + log_scene(scene, child, path, timeless)