diff --git a/docs/source/_static/overview/overview_sensors_contact_diagram.png b/docs/source/_static/overview/overview_sensors_contact_diagram.png new file mode 100644 index 00000000000..91b05c51fa8 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_contact_diagram.png differ diff --git a/docs/source/_static/overview/overview_sensors_frame_transformer.png b/docs/source/_static/overview/overview_sensors_frame_transformer.png new file mode 100644 index 00000000000..c16944060e8 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_frame_transformer.png differ diff --git a/docs/source/_static/overview/overview_sensors_ft_visualizer.png b/docs/source/_static/overview/overview_sensors_ft_visualizer.png new file mode 100644 index 00000000000..c16ea4aa052 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_ft_visualizer.png differ diff --git a/docs/source/_static/overview/overview_sensors_rc_patterns.png b/docs/source/_static/overview/overview_sensors_rc_patterns.png new file mode 100644 index 00000000000..fc3c001b148 Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_rc_patterns.png differ diff --git a/docs/source/_static/overview/overview_sensors_rc_visualizer.png b/docs/source/_static/overview/overview_sensors_rc_visualizer.png new file mode 100644 index 00000000000..ac4a06595eb Binary files /dev/null and b/docs/source/_static/overview/overview_sensors_rc_visualizer.png differ diff --git a/docs/source/overview/sensors/camera.rst b/docs/source/overview/sensors/camera.rst index 5e30c1a2084..d0408ab9ed3 100644 --- a/docs/source/overview/sensors/camera.rst +++ b/docs/source/overview/sensors/camera.rst @@ -154,4 +154,3 @@ Instance Segmentation - If ``colorize_instance_segmentation=True`` in the camera config, a 4-channel RGBA image will be returned with dimension (B, H, W, 4) and type ``torch.uint8``. The info ``idToLabels`` dictionary will be the mapping from color to USD prim path of that semantic entity. The info ``idToSemantics`` dictionary will be the mapping from color to semantic labels of that semantic entity. - If ``colorize_instance_segmentation=False``, a buffer of dimension (B, H, W, 1) of type ``torch.int32`` will be returned, containing the instance ID of each pixel. The info ``idToLabels`` dictionary will be the mapping from instance ID to USD prim path of that semantic entity. The info ``idToSemantics`` dictionary will be the mapping from instance ID to semantic labels of that semantic entity. - diff --git a/docs/source/overview/sensors/contact_sensor.rst b/docs/source/overview/sensors/contact_sensor.rst index 91cef3e79bd..cce3c5c2143 100644 --- a/docs/source/overview/sensors/contact_sensor.rst +++ b/docs/source/overview/sensors/contact_sensor.rst @@ -1,6 +1,317 @@ +.. _overview_sensors_contact: + Contact Sensor ================ -The contact sensor is designed to return the net normal force acting on a given ridged body. The sensor is written to behave as a physical object, and so the "scope" of the contact sensor is limited to the body that defines it. A multi-leged robot that needs contact information for its feet would require one sensor per foot to be defined in the environment. +.. figure:: ../../_static/overview/overview_sensors_contact_diagram.png + :align: center + :figwidth: 100% + :alt: A contact sensor with filtering + +The contact sensor is designed to return the net contact force acting on a given ridged body. The sensor is written to behave as a physical object, and so the "scope" of the contact sensor is limited to the body (or bodies) that defines it. There are multiple ways to define this scope, depending on your need to filter the forces coming from the contact. + +By default, the reported force is the total contact force, but your application may only care about contact forces due to specific objects. Retrieving contact forces from specific objects requires filtering, and this can only been done in a "many-to-one" way. A multi-leged robot that needs filterable contact information for its feet would require one sensor per foot to be defined in the environment, but a robotic hand with contact sensors on the tips of each finger can be defined with a single sensor. + +Consider a simple environment with an Anymal Quadruped and a block + +.. code-block:: python + + @configclass + class ContactSensorsSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Rigid Object + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.5,0.5,0.1), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.5, 0.5, 0.05)), + ) + + contact_forces_LF = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/LF_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Cube"], + ) + + contact_forces_RF = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/RF_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Cube"], + ) + + contact_forces_H = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/.*H_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + ) + +We define the sensors on the feet of the robot in two different ways. The front feet are independent sensors (one sensor body per foot) and the "Cube" is placed under the left foot. The hind feet are defined as a single sensor with multiple bodies. + +We can then run the scene and print the data from the sensors + +.. code-block:: python + + def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + . + . + . + # Simulate physics + while simulation_app.is_running(): + . + . + . + # print information from the sensors + print("-------------------------------") + print(scene["contact_forces_LF"]) + print("Received force matrix of: ", scene["contact_forces_LF"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_LF"].data.net_forces_w) + print("-------------------------------") + print(scene["contact_forces_RF"]) + print("Received force matrix of: ", scene["contact_forces_RF"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_RF"].data.net_forces_w) + print("-------------------------------") + print(scene["contact_forces_H"]) + print("Received force matrix of: ", scene["contact_forces_H"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_H"].data.net_forces_w) + +Here, we print both the net contact force and the filtered force matrix for each contact sensor defined in the scene. The front left and front right feet report the following + +.. code-block:: bash + + ------------------------------- + Contact sensor @ '/World/envs/env_.*/Robot/LF_FOOT': + view type : + update period (s) : 0.0 + number of bodies : 1 + body names : ['LF_FOOT'] + + Received force matrix of: tensor([[[[-1.3923e-05, 1.5727e-04, 1.1032e+02]]]], device='cuda:0') + Received contact force of: tensor([[[-1.3923e-05, 1.5727e-04, 1.1032e+02]]], device='cuda:0') + ------------------------------- + Contact sensor @ '/World/envs/env_.*/Robot/RF_FOOT': + view type : + update period (s) : 0.0 + number of bodies : 1 + body names : ['RF_FOOT'] + + Received force matrix of: tensor([[[[0., 0., 0.]]]], device='cuda:0') + Received contact force of: tensor([[[1.3529e-05, 0.0000e+00, 1.0069e+02]]], device='cuda:0') + +Notice that even with filtering, both sensors report the net contact force acting on the foot. However only the left foot has a non zero "force matrix", because the right foot isn't standing on the filtered body, ``/World/envs/env_.*/Cube``. Now, checkout the data coming from the hind feet! + +.. code-block:: bash + + ------------------------------- + Contact sensor @ '/World/envs/env_.*/Robot/.*H_FOOT': + view type : + update period (s) : 0.0 + number of bodies : 2 + body names : ['LH_FOOT', 'RH_FOOT'] + + Received force matrix of: None + Received contact force of: tensor([[[9.7227e-06, 0.0000e+00, 7.2364e+01], + [2.4322e-05, 0.0000e+00, 1.8102e+02]]], device='cuda:0') + +In this case, the contact sensor has two bodies: the left and right hind feet. When the force matrix is queried, the result is ``None`` because this is a many body sensor, and presently Isaac Lab only supports "many to one" contact force filtering. Unlike the single body contact sensor, the reported force tensor has multiple entries, with each "row" corresponding to the contact force on a single body of the sensor (matching the ordering at construction). + +.. code-block:: python + + # Copyright (c) 2022-2024, The Isaac Lab Project Developers. + # All rights reserved. + # + # SPDX-License-Identifier: BSD-3-Clause + + """Launch Isaac Sim Simulator first.""" + + import argparse + + from omni.isaac.lab.app import AppLauncher + + # add argparse arguments + parser = argparse.ArgumentParser(description="Tutorial on adding sensors on a robot.") + parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") + # append AppLauncher cli args + AppLauncher.add_app_launcher_args(parser) + # parse the arguments + args_cli = parser.parse_args() + + # launch omniverse app + app_launcher = AppLauncher(args_cli) + simulation_app = app_launcher.app + + """Rest everything follows.""" + + import torch + + import omni.isaac.lab.sim as sim_utils + from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg, RigidObject + from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg + from omni.isaac.lab.sensors import ContactSensorCfg + from omni.isaac.lab.utils import configclass + + ## + # Pre-defined configs + ## + from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip + + + @configclass + class ContactSensorSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Rigid Object + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.5,0.5,0.1), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.5, 0.5, 0.05)), + ) + + contact_forces_LF = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/LF_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Cube"], + ) + + contact_forces_RF = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/RF_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + filter_prim_paths_expr=["{ENV_REGEX_NS}/Cube"], + ) + + contact_forces_H = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/.*H_FOOT", + update_period=0.0, + history_length=6, + debug_vis=True, + ) + + + def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Run the simulator.""" + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + # Simulate physics + while simulation_app.is_running(): + + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = scene["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + scene["robot"].write_root_state_to_sim(root_state) + # set joint positions with some noise + joint_pos, joint_vel = ( + scene["robot"].data.default_joint_pos.clone(), + scene["robot"].data.default_joint_vel.clone(), + ) + joint_pos += torch.rand_like(joint_pos) * 0.1 + scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + scene.reset() + print("[INFO]: Resetting robot state...") + # Apply default actions to the robot + # -- generate actions/commands + targets = scene["robot"].data.default_joint_pos + # -- apply action to the robot + scene["robot"].set_joint_position_target(targets) + # -- write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # update sim-time + sim_time += sim_dt + count += 1 + # update buffers + scene.update(sim_dt) + + # print information from the sensors + print("-------------------------------") + print(scene["contact_forces_LF"]) + print("Received force matrix of: ", scene["contact_forces_LF"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_LF"].data.net_forces_w) + print("-------------------------------") + print(scene["contact_forces_RF"]) + print("Received force matrix of: ", scene["contact_forces_RF"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_RF"].data.net_forces_w) + print("-------------------------------") + print(scene["contact_forces_H"]) + print("Received force matrix of: ", scene["contact_forces_H"].data.force_matrix_w) + print("Received contact force of: ", scene["contact_forces_H"].data.net_forces_w) + + + def main(): + """Main function.""" + + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) + # design scene + scene_cfg = ContactSensorSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + -To define a contact sensor, you must specify both the rigid body of the sensor, but the bodies with wich it may collide. These data are used to index the parts of the GPU simulation state relavent to retrieving contact information, and allows for Isaac Lab \ No newline at end of file + if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/docs/source/overview/sensors/frame_transformer.rst b/docs/source/overview/sensors/frame_transformer.rst index 1593a5d21d9..d26722554ee 100644 --- a/docs/source/overview/sensors/frame_transformer.rst +++ b/docs/source/overview/sensors/frame_transformer.rst @@ -1,2 +1,325 @@ +.. _overview_sensors_frame_transformer: + Frame Transformer -==================== \ No newline at end of file +==================== + +.. figure:: ../../_static/overview/overview_sensors_frame_transformer.png + :align: center + :figwidth: 100% + :alt: A diagram outlining the basic geometry of frame transformations + +.. + Do YOU want to know where things are relative to other things at a glance? Then the frame transformer is the sensor for you!* + +One of the most common operations that needs to be performed within a physics simulation is the frame transformation: rewriting a vector or quaternion in the basis of an arbitrary euclidean coordinate system. There are many ways to accomplish this within Isaac and USD, but these methods can be cumbersome to implement within Isaac Lab's GPU based simulation and cloned environments. To mitigate this problem, we have designed the Frame Transformer Sensor, that tracks and calculate the relative frame transformations for rigid bodies of interest to the scene. + +The sensory is minimally defined by a source frame and a list of target frames. These definitions take the form of a prim path (for the source) and list of regex capable prim paths the rigid bodies to be tracked (for the targets). + +.. code-block:: python + + @configclass + class FrameTransformerSensorSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Rigid Object + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(1,1,1), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(5, 0, 0.5)), + ) + + specific_transforms = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/LF_FOOT"), + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/RF_FOOT"), + ], + debug_vis=True, + ) + + cube_transform = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Cube") + ], + debug_vis=False, + ) + + robot_transforms = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/.*") + ], + debug_vis=False, + ) + + +We can now run the scene and query the sensor for data + +.. code-block:: python + + def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + . + . + . + # Simulate physics + while simulation_app.is_running(): + . + . + . + + # print information from the sensors + print("-------------------------------") + print(scene["specific_transforms"]) + print("relative transforms:", scene["specific_transforms"].data.target_pos_source) + print("relative orientations:", scene["specific_transforms"].data.target_quat_source) + print("-------------------------------") + print(scene["cube_transform"]) + print("relative transform:", scene["cube_transform"].data.target_pos_source) + print("-------------------------------") + print(scene["robot_transforms"]) + print("relative transforms:", scene["robot_transforms"].data.target_pos_source) + +Let's take a look at the result for tracking specific objects. First, we can take a look at the data coming from the sensors on the feet + +.. code-block:: bash + + ------------------------------- + FrameTransformer @ '/World/envs/env_.*/Robot/base': + tracked body frames: ['base', 'LF_FOOT', 'RF_FOOT'] + number of envs: 1 + source body frame: base + target frames (count: ['LF_FOOT', 'RF_FOOT']): 2 + + relative transforms: tensor([[[ 0.4658, 0.3085, -0.4840], + [ 0.4487, -0.2959, -0.4828]]], device='cuda:0') + relative orientations: tensor([[[ 0.9623, 0.0072, -0.2717, -0.0020], + [ 0.9639, 0.0052, -0.2663, -0.0014]]], device='cuda:0') + +.. figure:: ../../_static/overview/overview_sensors_ft_visualizer.png + :align: center + :figwidth: 100% + :alt: The frame transformer visualizer + +By activating the visualizer, we can see that the frames of the feet are rotated "upward" slightly. We can also see the explicit relative positions and rotations by querying the sensor for data, which returns these values as a list with the same order as the tracked frames. This becomes even more apparent if we examine the transforms specified by regex. + +.. code-block:: bash + + ------------------------------- + FrameTransformer @ '/World/envs/env_.*/Robot/base': + tracked body frames: ['base', 'LF_FOOT', 'LF_HIP', 'LF_SHANK', 'LF_THIGH', 'LH_FOOT', 'LH_HIP', 'LH_SHANK', 'LH_THIGH', 'RF_FOOT', 'RF_HIP', 'RF_SHANK', 'RF_THIGH', 'RH_FOOT', 'RH_HIP', 'RH_SHANK', 'RH_THIGH', 'base'] + number of envs: 1 + source body frame: base + target frames (count: ['LF_FOOT', 'LF_HIP', 'LF_SHANK', 'LF_THIGH', 'LH_FOOT', 'LH_HIP', 'LH_SHANK', 'LH_THIGH', 'RF_FOOT', 'RF_HIP', 'RF_SHANK', 'RF_THIGH', 'RH_FOOT', 'RH_HIP', 'RH_SHANK', 'RH_THIGH', 'base']): 17 + + relative transforms: tensor([[[ 4.6581e-01, 3.0846e-01, -4.8398e-01], + [ 2.9990e-01, 1.0400e-01, -1.7062e-09], + [ 2.1409e-01, 2.9177e-01, -2.4214e-01], + [ 3.5980e-01, 1.8780e-01, 1.2608e-03], + [-4.8813e-01, 3.0973e-01, -4.5927e-01], + [-2.9990e-01, 1.0400e-01, 2.7044e-09], + [-2.1495e-01, 2.9264e-01, -2.4198e-01], + [-3.5980e-01, 1.8780e-01, 1.5582e-03], + [ 4.4871e-01, -2.9593e-01, -4.8277e-01], + [ 2.9990e-01, -1.0400e-01, -2.7057e-09], + [ 1.9971e-01, -2.8554e-01, -2.3778e-01], + [ 3.5980e-01, -1.8781e-01, -9.1049e-04], + [-5.0090e-01, -2.9095e-01, -4.5746e-01], + [-2.9990e-01, -1.0400e-01, 6.3592e-09], + [-2.1860e-01, -2.8251e-01, -2.5163e-01], + [-3.5980e-01, -1.8779e-01, -1.8792e-03], + [ 0.0000e+00, 0.0000e+00, 0.0000e+00]]], device='cuda:0') + +Here, the sensor is tracking all rigid body children of ``Robot/base``, but this expression is **inclusive**, meaning that the source body itself is also a target. This can be seen both by examining the source and target list, where ``base`` appears twice, and also in the returned data, where the sensor returns the relative transform to itself, (0, 0, 0). + +.. code-block:: python + + # Copyright (c) 2022-2024, The Isaac Lab Project Developers. + # All rights reserved. + # + # SPDX-License-Identifier: BSD-3-Clause + + import argparse + + from omni.isaac.lab.app import AppLauncher + + # add argparse arguments + parser = argparse.ArgumentParser(description="Tutorial on adding sensors on a robot.") + parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") + # append AppLauncher cli args + AppLauncher.add_app_launcher_args(parser) + # parse the arguments + args_cli = parser.parse_args() + + # launch omniverse app + app_launcher = AppLauncher(args_cli) + simulation_app = app_launcher.app + + """Rest everything follows.""" + + import torch + + import omni.isaac.lab.sim as sim_utils + from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg, RigidObject + from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg + from omni.isaac.lab.sensors import FrameTransformer, FrameTransformerCfg, OffsetCfg + from omni.isaac.lab.utils import configclass + + ## + # Pre-defined configs + ## + from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip + + + @configclass + class FrameTransformerSensorSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Rigid Object + cube = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(1,1,1), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=100.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + physics_material=sim_utils.RigidBodyMaterialCfg(static_friction=1.0), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(5, 0, 0.5)), + ) + + specific_transforms = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/LF_FOOT"), + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/RF_FOOT"), + ], + debug_vis=True, + ) + + cube_transform = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Cube") + ], + debug_vis=False, + ) + + robot_transforms = FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/.*") + ], + debug_vis=False, + ) + + def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Run the simulator.""" + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + # Simulate physics + while simulation_app.is_running(): + + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = scene["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + scene["robot"].write_root_state_to_sim(root_state) + # set joint positions with some noise + joint_pos, joint_vel = ( + scene["robot"].data.default_joint_pos.clone(), + scene["robot"].data.default_joint_vel.clone(), + ) + joint_pos += torch.rand_like(joint_pos) * 0.1 + scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + scene.reset() + print("[INFO]: Resetting robot state...") + # Apply default actions to the robot + # -- generate actions/commands + targets = scene["robot"].data.default_joint_pos + # -- apply action to the robot + scene["robot"].set_joint_position_target(targets) + # -- write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # update sim-time + sim_time += sim_dt + count += 1 + # update buffers + scene.update(sim_dt) + + # print information from the sensors + print("-------------------------------") + print(scene["specific_transforms"]) + print("relative transforms:", scene["specific_transforms"].data.target_pos_source) + print("relative orientations:", scene["specific_transforms"].data.target_quat_source) + print("-------------------------------") + print(scene["cube_transform"]) + print("relative transform:", scene["cube_transform"].data.target_pos_source) + print("-------------------------------") + print(scene["robot_transforms"]) + print("relative transforms:", scene["robot_transforms"].data.target_pos_source) + def main(): + """Main function.""" + + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) + # design scene + scene_cfg = FrameTransformerSensorSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + + if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/docs/source/overview/sensors/index.rst b/docs/source/overview/sensors/index.rst index f822087dc17..99efe86fd80 100644 --- a/docs/source/overview/sensors/index.rst +++ b/docs/source/overview/sensors/index.rst @@ -3,11 +3,11 @@ Sensors ========= -In this section, we will overview the various sensor APIs provided by Isaac Lab. +In this section, we will overview the various sensor APIs provided by Isaac Lab. Every sensor in Isaac Lab inherits from the ``SensorBase`` abstract class that provides the core functionality inherent to all sensors, which is to provide access to "measurements" of the scene. These measurements can take many forms such as ray-casting results, camera rendered images, or even simply ground truth data queried directly from the simulation (such as poses). Whatever the data may be, we can think of the sensor as having a buffer that is periodically updated with measurements by querying the scene. This ``update_period`` is defined in "simulated" seconds, meaning that even if the flow of time in the simulation is dilated relative to the real world, the sensor will update at the appropriate rate. For example, a sensor with an update period of 1.0 second will update its data 10 times a second in a simulation running at a rate of 10 seconds / second. The ``SensorBase`` is also designed with vectorizability in mind, holding the buffers for all copies of the sensor across cloned environments. -Updating the buffers is done by overriding the ``_update_buffers_impl`` abstract method of the ``SensorBase`` class. On every time-step of the simulation, `dt`, all sensors are queried for an update. During this query, the total time since the last update is incremented by `dt` for every buffer managed by that particular sensor. If the total time is greater than or equal to the ``update_period`` for a buffer, then that buffer is flagged to be updated on the next query. +Updating the buffers is done by overriding the ``_update_buffers_impl`` abstract method of the ``SensorBase`` class. On every time-step of the simulation, ``dt``, all sensors are queried for an update. During this query, the total time since the last update is incremented by ``d`t`` for every buffer managed by that particular sensor. If the total time is greater than or equal to the ``update_period`` for a buffer, then that buffer is flagged to be updated on the next query. .. toctree:: :maxdepth: 1 diff --git a/docs/source/overview/sensors/ray_caster.rst b/docs/source/overview/sensors/ray_caster.rst index 5ebc1a89501..fbf93ec727a 100644 --- a/docs/source/overview/sensors/ray_caster.rst +++ b/docs/source/overview/sensors/ray_caster.rst @@ -1,4 +1,268 @@ +.. _overview_sensors_ray_caster: + Ray Caster ============= -The Ray Caster sensor (and the ray caster camera) are similar to RTX based rendering in that they both involve casting rays. The difference here is that the rays cast by the Ray Caster sensor return strictly collision information along the cast, and the direction of each individual ray can be specified. They do not bounce, nor are they affected by things like materials or opacity. For each ray specified by the sensor, a line is traced along the path of the ray and the location of first collision with the specified mesh is returned. This is the method used by some of our quadruped examples to measure the local height field \ No newline at end of file +.. figure:: ../../_static/overview/overview_sensors_rc_patterns.png + :align: center + :figwidth: 100% + :alt: A diagram outlining the basic geometry of frame transformations + +The Ray Caster sensor (and the ray caster camera) are similar to RTX based rendering in that they both involve casting rays. The difference here is that the rays cast by the Ray Caster sensor return strictly collision information along the cast, and the direction of each individual ray can be specified. They do not bounce, nor are they affected by things like materials or opacity. For each ray specified by the sensor, a line is traced along the path of the ray and the location of first collision with the specified mesh is returned. This is the method used by some of our quadruped examples to measure the local height field. + +To keep the sensor performant when there are many cloned environments, the line tracing is done directly in `Warp `_. This is the reason why specific meshes need to be identified to cast against: that mesh data is loaded onto the device by warp when the sensor is initialized. As a consequence, the current iteration of this sensor only works for literally static meshes (meshes that *are not changed from the defaults specified in their USD file*). This constraint will be removed in future releases. + +Using a ray caster sensor requires a **pattern** and a parent xform to be attached to. The pattern defines how the rays are cast, while the prim properties defines the orientation and position of the sensor (additional offsets can be specified for more exact placement). Isaac Lab supports a number of ray casting pattern configurations, including a generic LIDAR and grid pattern. + +.. code-block:: python + + @configclass + class RaycasterSensorSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane with texture for interesting casting results + ground = AssetBaseCfg( + prim_path="/World/Ground", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd", + scale = (1,1,1), + ) + ) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + ray_caster = RayCasterCfg( + prim_path="{ENV_REGEX_NS}/Robot/base/lidar_cage", + update_period = 1/60, + offset=RayCasterCfg.OffsetCfg(pos=(0,0,0.5)), + mesh_prim_paths=["/World/Ground"], + pattern_cfg=patterns.LidarPatternCfg( + channels = 100, + vertical_fov_range=[-90, 90], + horizontal_fov_range = [-90,90], + horizontal_res=1.0), + debug_vis=True, + ) + + +Notice that the units on the pattern config is in degrees! Also, we enable visualization here to explicitly show the pattern in the rendering, but this is not required and should be disabled for performance tuning. Querying the sensor for data can be done at simulation run time like any other sensor. + +.. figure:: ../../_static/overview/overview_sensors_rc_visualizer.png + :align: center + :figwidth: 100% + :alt: Lidar Pattern visualized + +Querying the sensor for data can be done at simulation run time like any other sensor. + +.. code-block:: python + + def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + . + . + . + # Simulate physics + while simulation_app.is_running(): + . + . + . + # print information from the sensors + print("-------------------------------") + print(scene["ray_caster"]) + print("Ray cast hit results: ", scene["ray_caster"].data.ray_hits_w) + + +.. code-block:: bash + + ------------------------------- + Ray-caster @ '/World/envs/env_.*/Robot/base/lidar_cage': + view type : + update period (s) : 0.016666666666666666 + number of meshes : 1 + number of sensors : 1 + number of rays/sensor: 18000 + total number of rays : 18000 + Ray cast hit results: tensor([[[-0.3698, 0.0357, 0.0000], + [-0.3698, 0.0357, 0.0000], + [-0.3698, 0.0357, 0.0000], + ..., + [ inf, inf, inf], + [ inf, inf, inf], + [ inf, inf, inf]]], device='cuda:0') + ------------------------------- + +Here we can see the data returned by the sensor itself. Notice first that there are 3 closed brackets at the beginning and the end: this is because the data returned is batched by the number of sensors. The ray cast pattern itself has also been flattened, and so the dimensions of the array are ``[N, B, 3]`` where ``N`` is the number of sensors, ``B`` is the number of cast rays in the pattern, and 3 is the dimension of the casting space. Finally, notice that the first several values in this casting pattern are the same: this is because the lidar pattern is spherical and we have specified our FOV to be hemispherical, which includes the poles. In this configuration, the "flattening pattern" becomes apparent: the first 180 entries will be the same because it's the bottom pole of this hemisphere, and there will be 180 of them because our horizontal FOV is 180 degrees with a resolution of 1 degree. + + You can use this script to experiment with pattern configurations and build an intuition about how the data is stored by altering the ``triggered`` variable on line 99. + +.. code-block:: python + + # Copyright (c) 2022-2024, The Isaac Lab Project Developers. + # All rights reserved. + # + # SPDX-License-Identifier: BSD-3-Clause + + import argparse + import numpy as np + + from omni.isaac.lab.app import AppLauncher + + # add argparse arguments + parser = argparse.ArgumentParser(description="Tutorial on adding sensors on a robot.") + parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") + # append AppLauncher cli args + AppLauncher.add_app_launcher_args(parser) + # parse the arguments + args_cli = parser.parse_args() + + # launch omniverse app + app_launcher = AppLauncher(args_cli) + simulation_app = app_launcher.app + + """Rest everything follows.""" + + import torch + + import omni.isaac.lab.sim as sim_utils + from omni.isaac.lab.assets import ArticulationCfg, AssetBaseCfg + from omni.isaac.lab.scene import InteractiveScene, InteractiveSceneCfg + from omni.isaac.lab.sensors.ray_caster import RayCaster, RayCasterCfg, patterns + from omni.isaac.lab.utils import configclass + from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR + + ## + # Pre-defined configs + ## + from omni.isaac.lab_assets.anymal import ANYMAL_C_CFG # isort: skip + + + @configclass + class RaycasterSensorSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg( + prim_path="/World/Ground", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd", + scale = (1,1,1), + ) + ) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + ray_caster = RayCasterCfg( + prim_path="{ENV_REGEX_NS}/Robot/base/lidar_cage", + update_period = 1/60, + offset=RayCasterCfg.OffsetCfg(pos=(0,0,0.5)), + mesh_prim_paths=["/World/Ground"], + pattern_cfg=patterns.LidarPatternCfg( + channels = 100, + vertical_fov_range=[-90, 90], + horizontal_fov_range = [-90,90], + horizontal_res=1.0), + debug_vis=not args_cli.headless, + ) + + def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Run the simulator.""" + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + triggered = True + countdown = 42 + + # Simulate physics + while simulation_app.is_running(): + + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = scene["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + scene["robot"].write_root_state_to_sim(root_state) + # set joint positions with some noise + joint_pos, joint_vel = ( + scene["robot"].data.default_joint_pos.clone(), + scene["robot"].data.default_joint_vel.clone(), + ) + joint_pos += torch.rand_like(joint_pos) * 0.1 + scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + scene.reset() + print("[INFO]: Resetting robot state...") + # Apply default actions to the robot + # -- generate actions/commands + targets = scene["robot"].data.default_joint_pos + # -- apply action to the robot + scene["robot"].set_joint_position_target(targets) + # -- write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # update sim-time + sim_time += sim_dt + count += 1 + # update buffers + scene.update(sim_dt) + + # print information from the sensors + print("-------------------------------") + print(scene["ray_caster"]) + print("Ray cast hit results: ", scene["ray_caster"].data.ray_hits_w) + + if not triggered: + if countdown > 0: + countdown -= 1 + continue + else: + data = scene["ray_caster"].data.ray_hits_w.cpu().numpy() + np.save("cast_data.npy", data) + triggered = True + else: + continue + + + + + def main(): + """Main function.""" + + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) + # design scene + scene_cfg = RaycasterSensorSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + + if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close()