diff --git a/docs/index.rst b/docs/index.rst index d5c3ad249e5..7acde411ec1 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -86,6 +86,7 @@ Table of Contents source/overview/developer-guide/index source/overview/core-concepts/index + source/overview/sensors/index source/overview/environments source/overview/reinforcement-learning/index source/overview/teleop_imitation @@ -98,7 +99,7 @@ Table of Contents source/features/hydra source/features/multi_gpu - source/features/tiled_rendering + Tiled Rendering source/features/reproducibility .. toctree:: diff --git a/docs/source/_static/overview/overview_sensors_contact_diagram.png b/docs/source/_static/overview/overview_sensors_contact_diagram.png new file mode 100644 index 00000000000..91b05c51fa8 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_contact_diagram.png differ diff --git a/docs/source/_static/overview/overview_sensors_depth.png b/docs/source/_static/overview/overview_sensors_depth.png new file mode 100644 index 00000000000..5437e0bf8db Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_depth.png differ diff --git a/docs/source/_static/overview/overview_sensors_frame_transformer.png b/docs/source/_static/overview/overview_sensors_frame_transformer.png new file mode 100644 index 00000000000..c16944060e8 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_frame_transformer.png differ diff --git a/docs/source/_static/overview/overview_sensors_ft_visualizer.png b/docs/source/_static/overview/overview_sensors_ft_visualizer.png new file mode 100644 index 00000000000..c16ea4aa052 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_ft_visualizer.png differ diff --git a/docs/source/_static/overview/overview_sensors_instance.png b/docs/source/_static/overview/overview_sensors_instance.png new file mode 100644 index 00000000000..8683e719464 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_instance.png differ diff --git a/docs/source/_static/overview/overview_sensors_instanceID.png b/docs/source/_static/overview/overview_sensors_instanceID.png new file mode 100644 index 00000000000..2a3d4d90361 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_instanceID.png differ diff --git a/docs/source/_static/overview/overview_sensors_normals.png b/docs/source/_static/overview/overview_sensors_normals.png new file mode 100644 index 00000000000..6e3ab7f3cda Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_normals.png differ diff --git a/docs/source/_static/overview/overview_sensors_rc_patterns.png b/docs/source/_static/overview/overview_sensors_rc_patterns.png new file mode 100644 index 00000000000..fc3c001b148 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_rc_patterns.png differ diff --git a/docs/source/_static/overview/overview_sensors_rc_visualizer.png b/docs/source/_static/overview/overview_sensors_rc_visualizer.png new file mode 100644 index 00000000000..ac4a06595eb Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_rc_visualizer.png differ diff --git a/docs/source/_static/overview/overview_sensors_rgb.png b/docs/source/_static/overview/overview_sensors_rgb.png new file mode 100644 index 00000000000..2f17e810392 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_rgb.png differ diff --git a/docs/source/_static/overview/overview_sensors_semantic.png b/docs/source/_static/overview/overview_sensors_semantic.png new file mode 100644 index 00000000000..aa14f16e150 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_semantic.png differ diff --git a/docs/source/features/tiled_rendering.rst b/docs/source/overview/sensors/camera.rst similarity index 68% rename from docs/source/features/tiled_rendering.rst rename to docs/source/overview/sensors/camera.rst index d8ee1d11187..0ef72196c59 100644 --- a/docs/source/features/tiled_rendering.rst +++ b/docs/source/overview/sensors/camera.rst @@ -1,7 +1,15 @@ -Tiled-Camera Rendering -====================== +.. _overview_sensors_camera: -.. currentmodule:: omni.isaac.lab + +Camera +======== + +Camera sensors are uniquely defined by the use of the ``render_product``, a structure for managing data generated by the rendering pipeline (images). Isaac Lab provides the ability to fully control how these renderings are created through camera parameters like focal length, pose, type, etc... and what kind of data you want to render through the use of Annotators, allowing you to record not only RGB, but also Instance segmentation, object pose, object ID, etc... + +Rendered images are unique among the supported data types in Isaac Lab due to the inherently large bandwidth requirements for moving those data. A single 800 x 600 image with 32-bit color (a single float per pixel) clocks in at just under 2 MB. If we render at 60 fps and record every frame, that camera needs to move 120 MB/s. Multiply this by the number of cameras in an environment and environments in a simulation, and you can quickly see how scaling a naive vectorization of camera data could lead to bandwidth challenges. NVIDIA's Isaac Lab leverages our expertise in GPU hardware to provide an API that specifically addresses these scaling challenges in the rendering pipeline. + +Tiled Rendering +~~~~~~~~~~~~~~~~~ .. note:: @@ -10,19 +18,9 @@ Tiled-Camera Rendering Tiled rendering in combination with image processing networks require heavy memory resources, especially at larger resolutions. We recommend running 512 cameras in the scene on RTX 4090 GPUs or similar. +The Tiled Rendering APIs provide a vectorized interface for collecting data from camera sensors. This is useful for reinforcement learning environments where parallelization can be exploited to accelerate data collection and thus the training loop. Tiled rendering works by using a single ``render_product`` for **all** clones of a single camera in the scene. The desired dimensions of a single image and the number of environments are used to compute a much larger ``render_product``, consisting of the tiled individual renders from the separate clones of the camera. When all cameras have populated their buffers the render product is "completed" and can be moved around as a single, large image, dramatically reducing the overhead for moving the data from the host to the device, for example. Only a single call is used to synchronize the device data, instead of one call per camera, and this is a big part of what makes the Tiled Rendering API more efficient for working with vision data. -Tiled rendering APIs provide a vectorized interface for collecting data from camera sensors. -This is useful for reinforcement learning environments requiring vision in the loop. -Tiled rendering works by concatenating camera outputs from multiple cameras and rendering -one single large image instead of multiple smaller images that would have been produced -by each individual camera. This reduces the amount of time required for rendering and -provides a more efficient API for working with vision data. - -Isaac Lab provides tiled rendering APIs for RGB, depth, along with other annotators through the :class:`~sensors.TiledCamera` -class. Configurations for the tiled rendering APIs can be defined through the :class:`~sensors.TiledCameraCfg` -class, specifying parameters such as the regex expression for all camera paths, the transform -for the cameras, the desired data type, the type of cameras to add to the scene, and the camera -resolution. +Isaac Lab provides tiled rendering APIs for RGB, depth, along with other annotators through the :class:`~sensors.TiledCamera` class. Configurations for the tiled rendering APIs can be defined through the :class:`~sensors.TiledCameraCfg` class, specifying parameters such as the regex expression for all camera paths, the transform for the cameras, the desired data type, the type of cameras to add to the scene, and the camera resolution. .. code-block:: python @@ -37,8 +35,7 @@ resolution. height=80, ) -To access the tiled rendering interface, a :class:`~sensors.TiledCamera` object can be created and used -to retrieve data from the cameras. +To access the tiled rendering interface, a :class:`~sensors.TiledCamera` object can be created and used to retrieve data from the cameras. .. code-block:: python @@ -46,19 +43,17 @@ to retrieve data from the cameras. data_type = "rgb" data = tiled_camera.data.output[data_type] -The returned data will be transformed into the shape (num_cameras, height, width, num_channels), which -can be used directly as observation for reinforcement learning. +The returned data will be transformed into the shape (num_cameras, height, width, num_channels), which can be used directly as observation for reinforcement learning. -When working with rendering, make sure to add the ``--enable_cameras`` argument when launching the -environment. For example: +When working with rendering, make sure to add the ``--enable_cameras`` argument when launching the environment. For example: .. code-block:: shell python source/standalone/workflows/rl_games/train.py --task=Isaac-Cartpole-RGB-Camera-Direct-v0 --headless --enable_cameras -Annotators and Data Types -------------------------- +Annotators +~~~~~~~~~~~~~~~~~ Both :class:`~sensors.TiledCamera` and :class:`~sensors.Camera` classes provide APIs for retrieving various types annotator data from replicator: @@ -76,6 +71,11 @@ Both :class:`~sensors.TiledCamera` and :class:`~sensors.Camera` classes provide RGB and RGBA ~~~~~~~~~~~~ +.. figure:: ../../_static/overview/overview_sensors_rgb.png + :align: center + :figwidth: 100% + :alt: A scene captured in RGB + ``rgb`` data type returns a 3-channel RGB colored image of type ``torch.uint8``, with dimension (B, H, W, 3). ``rgba`` data type returns a 4-channel RGBA colored image of type ``torch.uint8``, with dimension (B, H, W, 4). @@ -85,6 +85,11 @@ To convert the ``torch.uint8`` data to ``torch.float32``, divide the buffer by 2 Depth and Distances ~~~~~~~~~~~~~~~~~~~ +.. figure:: ../../_static/overview/overview_sensors_depth.png + :align: center + :figwidth: 100% + :alt: A scene captured in RGB + ``distance_to_camera`` returns a single-channel depth image with distance to the camera optical center. The dimension for this annotator is (B, H, W, 1) and has type ``torch.float32``. ``distance_to_image_plane`` returns a single-channel depth image with distances of 3D points from the camera plane along the camera's Z-axis. The dimension for this annotator is (B, H, W, 1) and has type ``torch.float32``. @@ -94,6 +99,11 @@ Depth and Distances Normals ~~~~~~~ +.. figure:: ../../_static/overview/overview_sensors_normals.png + :align: center + :figwidth: 100% + :alt: A scene captured in RGB + ``normals`` returns an image containing the local surface normal vectors at each pixel. The buffer has dimension (B, H, W, 3), containing the (x, y, z) information for each vector, and has data type ``torch.float32``. Motion Vectors @@ -104,6 +114,11 @@ Motion Vectors Semantic Segmentation ~~~~~~~~~~~~~~~~~~~~~ +.. figure:: ../../_static/overview/overview_sensors_semantic.png + :align: center + :figwidth: 100% + :alt: A scene captured in RGB + ``semantic_segmentation`` outputs semantic segmentation of each entity in the camera’s viewport that has semantic labels. In addition to the image buffer, an ``info`` dictionary can be retrieved with ``tiled_camera.data.info['semantic_segmentation']`` containing ID to labels information. - If ``colorize_semantic_segmentation=True`` in the camera config, a 4-channel RGBA image will be returned with dimension (B, H, W, 4) and type ``torch.uint8``. The info ``idToLabels`` dictionary will be the mapping from color to semantic labels. @@ -113,6 +128,11 @@ Semantic Segmentation Instance ID Segmentation ~~~~~~~~~~~~~~~~~~~~~~~~ +.. figure:: ../../_static/overview/overview_sensors_instanceID.png + :align: center + :figwidth: 100% + :alt: A scene captured in RGB + ``instance_id_segmentation_fast`` outputs instance ID segmentation of each entity in the camera’s viewport. The instance ID is unique for each prim in the scene with different paths. In addition to the image buffer, an ``info`` dictionary can be retrieved with ``tiled_camera.data.info['instance_id_segmentation_fast']`` containing ID to labels information. The main difference between ``instance_id_segmentation_fast`` and ``instance_segmentation_fast`` are that instance segmentation annotator goes down the hierarchy to the lowest level prim which has semantic labels, where instance ID segmentation always goes down to the leaf prim. @@ -124,11 +144,18 @@ The main difference between ``instance_id_segmentation_fast`` and ``instance_seg Instance Segmentation """"""""""""""""""""" +.. figure:: ../../_static/overview/overview_sensors_instance.png + :align: center + :figwidth: 100% + :alt: A scene captured in RGB + ``instance_segmentation_fast`` outputs instance segmentation of each entity in the camera’s viewport. In addition to the image buffer, an ``info`` dictionary can be retrieved with ``tiled_camera.data.info['instance_segmentation_fast']`` containing ID to labels and ID to semantic information. -- If ``colorize_instance_segmentation=True`` in the camera config, a 4-channel RGBA image will be returned with dimension (B, H, W, 4) and type ``torch.uint8``. The info ``idToLabels`` dictionary will be the mapping from color to USD prim path of that semantic entity. The info ``idToSemantics`` dictionary will be the mapping from color to semantic labels of that semantic entity. +- If ``colorize_instance_segmentation=True`` in the camera config, a 4-channel RGBA image will be returned with dimension (B, H, W, 4) and type ``torch.uint8``. + +- If ``colorize_instance_segmentation=False``, a buffer of dimension (B, H, W, 1) of type ``torch.int32`` will be returned, containing the instance ID of each pixel. -- If ``colorize_instance_segmentation=False``, a buffer of dimension (B, H, W, 1) of type ``torch.int32`` will be returned, containing the instance ID of each pixel. The info ``idToLabels`` dictionary will be the mapping from instance ID to USD prim path of that semantic entity. The info ``idToSemantics`` dictionary will be the mapping from instance ID to semantic labels of that semantic entity. +The info ``idToLabels`` dictionary will be the mapping from color to USD prim path of that semantic entity. The info ``idToSemantics`` dictionary will be the mapping from color to semantic labels of that semantic entity. Current Limitations diff --git a/docs/source/overview/sensors/contact_sensor.rst b/docs/source/overview/sensors/contact_sensor.rst new file mode 100644 index 00000000000..72239fd2875 --- /dev/null +++ b/docs/source/overview/sensors/contact_sensor.rst @@ -0,0 +1,145 @@ +.. _overview_sensors_contact: + +Contact Sensor +================ + +.. figure:: ../../_static/overview/overview_sensors_contact_diagram.png + :align: center + :figwidth: 100% + :alt: A contact sensor with filtering + +The contact sensor is designed to return the net contact force acting on a given ridgid body. The sensor is written to behave as a physical object, and so the "scope" of the contact sensor is limited to the body (or bodies) that defines it. There are multiple ways to define this scope, depending on your need to filter the forces coming from the contact. + +By default, the reported force is the total contact force, but your application may only care about contact forces due to specific objects. Retrieving contact forces from specific objects requires filtering, and this can only be done in a "many-to-one" way. A multi-legged robot that needs filterable contact information for its feet would require one sensor per foot to be defined in the environment, but a robotic hand with contact sensors on the tips of each finger can be defined with a single sensor. + +Consider a simple environment with an Anymal Quadruped and a block + +.. code-block:: python + + @configclass + class ContactSensorsSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Rigid Object + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.5,0.5,0.1), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.5, 0.5, 0.05)), + ) + + contact_forces_LF = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/LF_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Cube"], + ) + + contact_forces_RF = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/RF_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Cube"], + ) + + contact_forces_H = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/.*H_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + ) + +We define the sensors on the feet of the robot in two different ways. The front feet are independent sensors (one sensor body per foot) and the "Cube" is placed under the left foot. The hind feet are defined as a single sensor with multiple bodies. + +We can then run the scene and print the data from the sensors + +.. code-block:: python + + def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + . + . + . + # Simulate physics + while simulation_app.is_running(): + . + . + . + # print information from the sensors + print("-------------------------------") + print(scene["contact_forces_LF"]) + print("Received force matrix of: ", scene["contact_forces_LF"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_LF"].data.net_forces_w) + print("-------------------------------") + print(scene["contact_forces_RF"]) + print("Received force matrix of: ", scene["contact_forces_RF"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_RF"].data.net_forces_w) + print("-------------------------------") + print(scene["contact_forces_H"]) + print("Received force matrix of: ", scene["contact_forces_H"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_H"].data.net_forces_w) + +Here, we print both the net contact force and the filtered force matrix for each contact sensor defined in the scene. The front left and front right feet report the following + +.. code-block:: bash + + ------------------------------- + Contact sensor @ '/World/envs/env_.*/Robot/LF_FOOT': + view type : + update period (s) : 0.0 + number of bodies : 1 + body names : ['LF_FOOT'] + + Received force matrix of: tensor([[[[-1.3923e-05, 1.5727e-04, 1.1032e+02]]]], device='cuda:0') + Received contact force of: tensor([[[-1.3923e-05, 1.5727e-04, 1.1032e+02]]], device='cuda:0') + ------------------------------- + Contact sensor @ '/World/envs/env_.*/Robot/RF_FOOT': + view type : + update period (s) : 0.0 + number of bodies : 1 + body names : ['RF_FOOT'] + + Received force matrix of: tensor([[[[0., 0., 0.]]]], device='cuda:0') + Received contact force of: tensor([[[1.3529e-05, 0.0000e+00, 1.0069e+02]]], device='cuda:0') + +Notice that even with filtering, both sensors report the net contact force acting on the foot. However only the left foot has a non zero "force matrix", because the right foot isn't standing on the filtered body, ``/World/envs/env_.*/Cube``. Now, checkout the data coming from the hind feet! + +.. code-block:: bash + + ------------------------------- + Contact sensor @ '/World/envs/env_.*/Robot/.*H_FOOT': + view type : + update period (s) : 0.0 + number of bodies : 2 + body names : ['LH_FOOT', 'RH_FOOT'] + + Received force matrix of: None + Received contact force of: tensor([[[9.7227e-06, 0.0000e+00, 7.2364e+01], + [2.4322e-05, 0.0000e+00, 1.8102e+02]]], device='cuda:0') + +In this case, the contact sensor has two bodies: the left and right hind feet. When the force matrix is queried, the result is ``None`` because this is a many body sensor, and presently Isaac Lab only supports "many to one" contact force filtering. Unlike the single body contact sensor, the reported force tensor has multiple entries, with each "row" corresponding to the contact force on a single body of the sensor (matching the ordering at construction). + +.. dropdown:: Code for contact_sensor.py + :icon: code + + .. literalinclude:: ../../../../source/standalone/demos/sensors/contact_sensor.py + :language: python + :linenos: diff --git a/docs/source/overview/sensors/frame_transformer.rst b/docs/source/overview/sensors/frame_transformer.rst new file mode 100644 index 00000000000..d60dd9304c0 --- /dev/null +++ b/docs/source/overview/sensors/frame_transformer.rst @@ -0,0 +1,158 @@ +.. _overview_sensors_frame_transformer: + +Frame Transformer +==================== + +.. figure:: ../../_static/overview/overview_sensors_frame_transformer.png + :align: center + :figwidth: 100% + :alt: A diagram outlining the basic geometry of frame transformations + +.. + Do YOU want to know where things are relative to other things at a glance? Then the frame transformer is the sensor for you!* + +One of the most common operations that needs to be performed within a physics simulation is the frame transformation: rewriting a vector or quaternion in the basis of an arbitrary euclidean coordinate system. There are many ways to accomplish this within Isaac and USD, but these methods can be cumbersome to implement within Isaac Lab's GPU based simulation and cloned environments. To mitigate this problem, we have designed the Frame Transformer Sensor, that tracks and calculate the relative frame transformations for rigid bodies of interest to the scene. + +The sensory is minimally defined by a source frame and a list of target frames. These definitions take the form of a prim path (for the source) and list of regex capable prim paths the rigid bodies to be tracked (for the targets). + +.. code-block:: python + + @configclass + class FrameTransformerSensorSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Rigid Object + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(1,1,1), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(5, 0, 0.5)), + ) + + specific_transforms = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/LF_FOOT"), + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/RF_FOOT"), + ], + debug_vis=True, + ) + + cube_transform = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Cube") + ], + debug_vis=False, + ) + + robot_transforms = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/.*") + ], + debug_vis=False, + ) + + +We can now run the scene and query the sensor for data + +.. code-block:: python + + def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + . + . + . + # Simulate physics + while simulation_app.is_running(): + . + . + . + + # print information from the sensors + print("-------------------------------") + print(scene["specific_transforms"]) + print("relative transforms:", scene["specific_transforms"].data.target_pos_source) + print("relative orientations:", scene["specific_transforms"].data.target_quat_source) + print("-------------------------------") + print(scene["cube_transform"]) + print("relative transform:", scene["cube_transform"].data.target_pos_source) + print("-------------------------------") + print(scene["robot_transforms"]) + print("relative transforms:", scene["robot_transforms"].data.target_pos_source) + +Let's take a look at the result for tracking specific objects. First, we can take a look at the data coming from the sensors on the feet + +.. code-block:: bash + + ------------------------------- + FrameTransformer @ '/World/envs/env_.*/Robot/base': + tracked body frames: ['base', 'LF_FOOT', 'RF_FOOT'] + number of envs: 1 + source body frame: base + target frames (count: ['LF_FOOT', 'RF_FOOT']): 2 + + relative transforms: tensor([[[ 0.4658, 0.3085, -0.4840], + [ 0.4487, -0.2959, -0.4828]]], device='cuda:0') + relative orientations: tensor([[[ 0.9623, 0.0072, -0.2717, -0.0020], + [ 0.9639, 0.0052, -0.2663, -0.0014]]], device='cuda:0') + +.. figure:: ../../_static/overview/overview_sensors_ft_visualizer.png + :align: center + :figwidth: 100% + :alt: The frame transformer visualizer + +By activating the visualizer, we can see that the frames of the feet are rotated "upward" slightly. We can also see the explicit relative positions and rotations by querying the sensor for data, which returns these values as a list with the same order as the tracked frames. This becomes even more apparent if we examine the transforms specified by regex. + +.. code-block:: bash + + ------------------------------- + FrameTransformer @ '/World/envs/env_.*/Robot/base': + tracked body frames: ['base', 'LF_FOOT', 'LF_HIP', 'LF_SHANK', 'LF_THIGH', 'LH_FOOT', 'LH_HIP', 'LH_SHANK', 'LH_THIGH', 'RF_FOOT', 'RF_HIP', 'RF_SHANK', 'RF_THIGH', 'RH_FOOT', 'RH_HIP', 'RH_SHANK', 'RH_THIGH', 'base'] + number of envs: 1 + source body frame: base + target frames (count: ['LF_FOOT', 'LF_HIP', 'LF_SHANK', 'LF_THIGH', 'LH_FOOT', 'LH_HIP', 'LH_SHANK', 'LH_THIGH', 'RF_FOOT', 'RF_HIP', 'RF_SHANK', 'RF_THIGH', 'RH_FOOT', 'RH_HIP', 'RH_SHANK', 'RH_THIGH', 'base']): 17 + + relative transforms: tensor([[[ 4.6581e-01, 3.0846e-01, -4.8398e-01], + [ 2.9990e-01, 1.0400e-01, -1.7062e-09], + [ 2.1409e-01, 2.9177e-01, -2.4214e-01], + [ 3.5980e-01, 1.8780e-01, 1.2608e-03], + [-4.8813e-01, 3.0973e-01, -4.5927e-01], + [-2.9990e-01, 1.0400e-01, 2.7044e-09], + [-2.1495e-01, 2.9264e-01, -2.4198e-01], + [-3.5980e-01, 1.8780e-01, 1.5582e-03], + [ 4.4871e-01, -2.9593e-01, -4.8277e-01], + [ 2.9990e-01, -1.0400e-01, -2.7057e-09], + [ 1.9971e-01, -2.8554e-01, -2.3778e-01], + [ 3.5980e-01, -1.8781e-01, -9.1049e-04], + [-5.0090e-01, -2.9095e-01, -4.5746e-01], + [-2.9990e-01, -1.0400e-01, 6.3592e-09], + [-2.1860e-01, -2.8251e-01, -2.5163e-01], + [-3.5980e-01, -1.8779e-01, -1.8792e-03], + [ 0.0000e+00, 0.0000e+00, 0.0000e+00]]], device='cuda:0') + +Here, the sensor is tracking all rigid body children of ``Robot/base``, but this expression is **inclusive**, meaning that the source body itself is also a target. This can be seen both by examining the source and target list, where ``base`` appears twice, and also in the returned data, where the sensor returns the relative transform to itself, (0, 0, 0). + +.. dropdown:: Code for frame_transformer_sensor.py + :icon: code + + .. literalinclude:: ../../../../source/standalone/demos/sensors/frame_transformer_sensor.py + :language: python + :linenos: diff --git a/docs/source/overview/sensors/index.rst b/docs/source/overview/sensors/index.rst new file mode 100644 index 00000000000..496b4464a1d --- /dev/null +++ b/docs/source/overview/sensors/index.rst @@ -0,0 +1,20 @@ +.. _overview_sensors: + +Sensors +========= + +In this section, we will overview the various sensor APIs provided by Isaac Lab. + +Every sensor in Isaac Lab inherits from the ``SensorBase`` abstract class that provides the core functionality inherent to all sensors, which is to provide access to "measurements" of the scene. These measurements can take many forms such as ray-casting results, camera rendered images, or even simply ground truth data queried directly from the simulation (such as poses). Whatever the data may be, we can think of the sensor as having a buffer that is periodically updated with measurements by querying the scene. This ``update_period`` is defined in "simulated" seconds, meaning that even if the flow of time in the simulation is dilated relative to the real world, the sensor will update at the appropriate rate. The ``SensorBase`` is also designed with vectorizability in mind, holding the buffers for all copies of the sensor across cloned environments. + +Updating the buffers is done by overriding the ``_update_buffers_impl`` abstract method of the ``SensorBase`` class. On every time-step of the simulation, ``dt``, all sensors are queried for an update. During this query, the total time since the last update is incremented by ``dt`` for every buffer managed by that particular sensor. If the total time is greater than or equal to the ``update_period`` for a buffer, then that buffer is flagged to be updated on the next query. + +The following pages describe the available sensors in more detail: + +.. toctree:: + :maxdepth: 1 + + camera + contact_sensor + frame_transformer + ray_caster diff --git a/docs/source/overview/sensors/ray_caster.rst b/docs/source/overview/sensors/ray_caster.rst new file mode 100644 index 00000000000..b2846c96025 --- /dev/null +++ b/docs/source/overview/sensors/ray_caster.rst @@ -0,0 +1,108 @@ +.. _overview_sensors_ray_caster: + +Ray Caster +============= + +.. figure:: ../../_static/overview/overview_sensors_rc_patterns.png + :align: center + :figwidth: 100% + :alt: A diagram outlining the basic geometry of frame transformations + +The Ray Caster sensor (and the ray caster camera) are similar to RTX based rendering in that they both involve casting rays. The difference here is that the rays cast by the Ray Caster sensor return strictly collision information along the cast, and the direction of each individual ray can be specified. They do not bounce, nor are they affected by things like materials or opacity. For each ray specified by the sensor, a line is traced along the path of the ray and the location of first collision with the specified mesh is returned. This is the method used by some of our quadruped examples to measure the local height field. + +To keep the sensor performant when there are many cloned environments, the line tracing is done directly in `Warp `_. This is the reason why specific meshes need to be identified to cast against: that mesh data is loaded onto the device by warp when the sensor is initialized. As a consequence, the current iteration of this sensor only works for literally static meshes (meshes that *are not changed from the defaults specified in their USD file*). This constraint will be removed in future releases. + +Using a ray caster sensor requires a **pattern** and a parent xform to be attached to. The pattern defines how the rays are cast, while the prim properties defines the orientation and position of the sensor (additional offsets can be specified for more exact placement). Isaac Lab supports a number of ray casting pattern configurations, including a generic LIDAR and grid pattern. + +.. code-block:: python + + @configclass + class RaycasterSensorSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane with texture for interesting casting results + ground = AssetBaseCfg( + prim_path="/World/Ground", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd", + scale = (1,1,1), + ) + ) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + ray_caster = RayCasterCfg( + prim_path="{ENV_REGEX_NS}/Robot/base/lidar_cage", + update_period = 1/60, + offset=RayCasterCfg.OffsetCfg(pos=(0,0,0.5)), + mesh_prim_paths=["/World/Ground"], + pattern_cfg=patterns.LidarPatternCfg( + channels = 100, + vertical_fov_range=[-90, 90], + horizontal_fov_range = [-90,90], + horizontal_res=1.0), + debug_vis=True, + ) + + +Notice that the units on the pattern config is in degrees! Also, we enable visualization here to explicitly show the pattern in the rendering, but this is not required and should be disabled for performance tuning. + +.. figure:: ../../_static/overview/overview_sensors_rc_visualizer.png + :align: center + :figwidth: 100% + :alt: Lidar Pattern visualized + +Querying the sensor for data can be done at simulation run time like any other sensor. + +.. code-block:: python + + def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + . + . + . + # Simulate physics + while simulation_app.is_running(): + . + . + . + # print information from the sensors + print("-------------------------------") + print(scene["ray_caster"]) + print("Ray cast hit results: ", scene["ray_caster"].data.ray_hits_w) + + +.. code-block:: bash + + ------------------------------- + Ray-caster @ '/World/envs/env_.*/Robot/base/lidar_cage': + view type : + update period (s) : 0.016666666666666666 + number of meshes : 1 + number of sensors : 1 + number of rays/sensor: 18000 + total number of rays : 18000 + Ray cast hit results: tensor([[[-0.3698, 0.0357, 0.0000], + [-0.3698, 0.0357, 0.0000], + [-0.3698, 0.0357, 0.0000], + ..., + [ inf, inf, inf], + [ inf, inf, inf], + [ inf, inf, inf]]], device='cuda:0') + ------------------------------- + +Here we can see the data returned by the sensor itself. Notice first that there are 3 closed brackets at the beginning and the end: this is because the data returned is batched by the number of sensors. The ray cast pattern itself has also been flattened, and so the dimensions of the array are ``[N, B, 3]`` where ``N`` is the number of sensors, ``B`` is the number of cast rays in the pattern, and 3 is the dimension of the casting space. Finally, notice that the first several values in this casting pattern are the same: this is because the lidar pattern is spherical and we have specified our FOV to be hemispherical, which includes the poles. In this configuration, the "flattening pattern" becomes apparent: the first 180 entries will be the same because it's the bottom pole of this hemisphere, and there will be 180 of them because our horizontal FOV is 180 degrees with a resolution of 1 degree. + +You can use this script to experiment with pattern configurations and build an intuition about how the data is stored by altering the ``triggered`` variable on line 99. + +.. dropdown:: Code for raycaster_sensor.py + :icon: code + + .. literalinclude:: ../../../../source/standalone/demos/sensors/raycaster_sensor.py + :language: python + :linenos: diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/assets/rigid_object/rigid_object.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/assets/rigid_object/rigid_object.py index e44ee7eddfa..2aa0724df9d 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/assets/rigid_object/rigid_object.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/assets/rigid_object/rigid_object.py @@ -257,6 +257,9 @@ def set_external_force_and_torque( self._external_torque_b[env_ids, body_ids] = torques else: self.has_external_wrench = False + # reset external wrench + self._external_force_b[env_ids] = 0.0 + self._external_torque_b[env_ids] = 0.0 """ Internal helper. diff --git a/source/standalone/demos/sensors/contact_sensor.py b/source/standalone/demos/sensors/contact_sensor.py new file mode 100644 index 00000000000..35de7fe6679 --- /dev/null +++ b/source/standalone/demos/sensors/contact_sensor.py @@ -0,0 +1,176 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from omni.isaac.lab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on adding sensors on a robot.") +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.assets import AssetBaseCfg, RigidObjectCfg +from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg +from omni.isaac.lab.sensors import ContactSensorCfg +from omni.isaac.lab.utils import configclass + +## +# Pre-defined configs +## +from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip + + +@configclass +class ContactSensorSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Rigid Object + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.5, 0.5, 0.1), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.5, 0.5, 0.05)), + ) + + contact_forces_LF = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/LF_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Cube"], + ) + + contact_forces_RF = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/RF_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Cube"], + ) + + contact_forces_H = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/.*H_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + ) + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Run the simulator.""" + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + # Simulate physics + while simulation_app.is_running(): + + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = scene["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + scene["robot"].write_root_state_to_sim(root_state) + # set joint positions with some noise + joint_pos, joint_vel = ( + scene["robot"].data.default_joint_pos.clone(), + scene["robot"].data.default_joint_vel.clone(), + ) + joint_pos += torch.rand_like(joint_pos) * 0.1 + scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + scene.reset() + print("[INFO]: Resetting robot state...") + # Apply default actions to the robot + # -- generate actions/commands + targets = scene["robot"].data.default_joint_pos + # -- apply action to the robot + scene["robot"].set_joint_position_target(targets) + # -- write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # update sim-time + sim_time += sim_dt + count += 1 + # update buffers + scene.update(sim_dt) + + # print information from the sensors + print("-------------------------------") + print(scene["contact_forces_LF"]) + print("Received force matrix of: ", scene["contact_forces_LF"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_LF"].data.net_forces_w) + print("-------------------------------") + print(scene["contact_forces_RF"]) + print("Received force matrix of: ", scene["contact_forces_RF"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_RF"].data.net_forces_w) + print("-------------------------------") + print(scene["contact_forces_H"]) + print("Received force matrix of: ", scene["contact_forces_H"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_H"].data.net_forces_w) + + +def main(): + """Main function.""" + + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) + # design scene + scene_cfg = ContactSensorSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/source/standalone/demos/sensors/frame_transformer_sensor.py b/source/standalone/demos/sensors/frame_transformer_sensor.py new file mode 100644 index 00000000000..e66e59f5007 --- /dev/null +++ b/source/standalone/demos/sensors/frame_transformer_sensor.py @@ -0,0 +1,170 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import argparse + +from omni.isaac.lab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on adding sensors on a robot.") +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.assets import AssetBaseCfg, RigidObjectCfg +from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg +from omni.isaac.lab.sensors import FrameTransformerCfg +from omni.isaac.lab.utils import configclass + +## +# Pre-defined configs +## +from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip + + +@configclass +class FrameTransformerSensorSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Rigid Object + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(1, 1, 1), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(5, 0, 0.5)), + ) + + specific_transforms = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/LF_FOOT"), + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/RF_FOOT"), + ], + debug_vis=True, + ) + + cube_transform = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Cube")], + debug_vis=False, + ) + + robot_transforms = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/.*")], + debug_vis=False, + ) + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Run the simulator.""" + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + # Simulate physics + while simulation_app.is_running(): + + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = scene["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + scene["robot"].write_root_state_to_sim(root_state) + # set joint positions with some noise + joint_pos, joint_vel = ( + scene["robot"].data.default_joint_pos.clone(), + scene["robot"].data.default_joint_vel.clone(), + ) + joint_pos += torch.rand_like(joint_pos) * 0.1 + scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + scene.reset() + print("[INFO]: Resetting robot state...") + # Apply default actions to the robot + # -- generate actions/commands + targets = scene["robot"].data.default_joint_pos + # -- apply action to the robot + scene["robot"].set_joint_position_target(targets) + # -- write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # update sim-time + sim_time += sim_dt + count += 1 + # update buffers + scene.update(sim_dt) + + # print information from the sensors + print("-------------------------------") + print(scene["specific_transforms"]) + print("relative transforms:", scene["specific_transforms"].data.target_pos_source) + print("relative orientations:", scene["specific_transforms"].data.target_quat_source) + print("-------------------------------") + print(scene["cube_transform"]) + print("relative transform:", scene["cube_transform"].data.target_pos_source) + print("-------------------------------") + print(scene["robot_transforms"]) + print("relative transforms:", scene["robot_transforms"].data.target_pos_source) + + +def main(): + """Main function.""" + + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) + # design scene + scene_cfg = FrameTransformerSensorSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/source/standalone/demos/sensors/raycaster_sensor.py b/source/standalone/demos/sensors/raycaster_sensor.py new file mode 100644 index 00000000000..db7d74d3de7 --- /dev/null +++ b/source/standalone/demos/sensors/raycaster_sensor.py @@ -0,0 +1,160 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import argparse +import numpy as np + +from omni.isaac.lab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on adding sensors on a robot.") +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import omni.isaac.lab.sim as sim_utils +from omni.isaac.lab.assets import AssetBaseCfg +from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg +from omni.isaac.lab.sensors.ray_caster import RayCasterCfg, patterns +from omni.isaac.lab.utils import configclass +from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR + +## +# Pre-defined configs +## +from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip + + +@configclass +class RaycasterSensorSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg( + prim_path="/World/Ground", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd", + scale=(1, 1, 1), + ), + ) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + ray_caster = RayCasterCfg( + prim_path="{ENV_REGEX_NS}/Robot/base/lidar_cage", + update_period=1 / 60, + offset=RayCasterCfg.OffsetCfg(pos=(0, 0, 0.5)), + mesh_prim_paths=["/World/Ground"], + pattern_cfg=patterns.LidarPatternCfg( + channels=100, vertical_fov_range=[-90, 90], horizontal_fov_range=[-90, 90], horizontal_res=1.0 + ), + debug_vis=not args_cli.headless, + ) + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Run the simulator.""" + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + triggered = True + countdown = 42 + + # Simulate physics + while simulation_app.is_running(): + + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = scene["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + scene["robot"].write_root_state_to_sim(root_state) + # set joint positions with some noise + joint_pos, joint_vel = ( + scene["robot"].data.default_joint_pos.clone(), + scene["robot"].data.default_joint_vel.clone(), + ) + joint_pos += torch.rand_like(joint_pos) * 0.1 + scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + scene.reset() + print("[INFO]: Resetting robot state...") + # Apply default actions to the robot + # -- generate actions/commands + targets = scene["robot"].data.default_joint_pos + # -- apply action to the robot + scene["robot"].set_joint_position_target(targets) + # -- write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # update sim-time + sim_time += sim_dt + count += 1 + # update buffers + scene.update(sim_dt) + + # print information from the sensors + print("-------------------------------") + print(scene["ray_caster"]) + print("Ray cast hit results: ", scene["ray_caster"].data.ray_hits_w) + + if not triggered: + if countdown > 0: + countdown -= 1 + continue + data = scene["ray_caster"].data.ray_hits_w.cpu().numpy() + np.save("cast_data.npy", data) + triggered = True + else: + continue + + +def main(): + """Main function.""" + + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) + # design scene + scene_cfg = RaycasterSensorSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close()