diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 9899632b89c..2e147c7da7a 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -58,6 +58,7 @@ Guidelines for modifications: * Calvin Yu * Cathy Y. Li * Cheng-Rong Lai +* Chengyi Lux Zhang * Chenyu Yang * Connor Smith * CY (Chien-Ying) Chen @@ -157,6 +158,7 @@ Guidelines for modifications: * Yanzi Zhu * Yijie Guo * Yohan Choi +* Yufeng Chi * Yujian Zhang * Yun Liu * Zehao Wang diff --git a/source/isaaclab/isaaclab/actuators/actuator_pd.py b/source/isaaclab/isaaclab/actuators/actuator_pd.py index 6de373f1bc7..a842af5124b 100644 --- a/source/isaaclab/isaaclab/actuators/actuator_pd.py +++ b/source/isaaclab/isaaclab/actuators/actuator_pd.py @@ -358,9 +358,9 @@ def compute( self, control_action: ArticulationActions, joint_pos: torch.Tensor, joint_vel: torch.Tensor ) -> ArticulationActions: # apply delay based on the delay the model for all the setpoints - control_action.joint_positions = self.positions_delay_buffer.compute(control_action.joint_positions) - control_action.joint_velocities = self.velocities_delay_buffer.compute(control_action.joint_velocities) - control_action.joint_efforts = self.efforts_delay_buffer.compute(control_action.joint_efforts) + control_action.joint_positions[:] = self.positions_delay_buffer.compute(control_action.joint_positions) + control_action.joint_velocities[:] = self.velocities_delay_buffer.compute(control_action.joint_velocities) + control_action.joint_efforts[:] = self.efforts_delay_buffer.compute(control_action.joint_efforts) # compte actuator model return super().compute(control_action, joint_pos, joint_vel) diff --git a/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py b/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py index 8a01ba2a370..0bd7f592d74 100644 --- a/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py +++ b/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py @@ -37,7 +37,9 @@ def __init__(self, max_len: int, batch_size: int, device: str): self._device = device self._ALL_INDICES = torch.arange(batch_size, device=device) - # max length tensor for comparisons + # max length integer for cpu comparisons + self._max_len_int = max_len + # broadcastedmax length tensor for gpu tensor comparisons self._max_len = torch.full((batch_size,), max_len, dtype=torch.int, device=device) # number of data pushes passed since the last call to :meth:`reset` self._num_pushes = torch.zeros(batch_size, dtype=torch.long, device=device) @@ -46,6 +48,8 @@ def __init__(self, max_len: int, batch_size: int, device: str): # the actual buffer for data storage # note: this is initialized on the first call to :meth:`append` self._buffer: torch.Tensor = None # type: ignore + # track if all batches have been initialized + self._need_reset: bool = True """ Properties. @@ -64,7 +68,7 @@ def device(self) -> str: @property def max_length(self) -> int: """The maximum length of the ring buffer.""" - return int(self._max_len[0].item()) + return self._max_len_int @property def current_length(self) -> torch.Tensor: @@ -100,6 +104,8 @@ def reset(self, batch_ids: Sequence[int] | None = None): batch_ids = slice(None) # reset the number of pushes for the specified batch indices self._num_pushes[batch_ids] = 0 + # reset is needed on next update to fill entire buffer with initial data + self._need_reset = True if self._buffer is not None: # set buffer at batch_id reset indices to 0.0 so that the buffer() getter returns the cleared circular buffer after reset. self._buffer[:, batch_ids, :] = 0.0 @@ -129,9 +135,14 @@ def append(self, data: torch.Tensor): # add the new data to the last layer self._buffer[self._pointer] = data # Check for batches with zero pushes and initialize all values in batch to first append - is_first_push = self._num_pushes == 0 - if torch.any(is_first_push): - self._buffer[:, is_first_push] = data[is_first_push] + # Only check if we haven't confirmed all batches are reset (avoids unnecessary checks if no reset done) + if self._need_reset: + is_first_push = self._num_pushes == 0 + if is_first_push.any().item(): + self._buffer[:, is_first_push] = data[is_first_push] + else: + # mark all the batches to be available + self._need_reset = False # increment number of number of pushes for all batches self._num_pushes += 1 @@ -156,8 +167,9 @@ def __getitem__(self, key: torch.Tensor) -> torch.Tensor: if len(key) != self.batch_size: raise ValueError(f"The argument 'key' has length {key.shape[0]}, while expecting {self.batch_size}") # check if the buffer is empty - if torch.any(self._num_pushes == 0) or self._buffer is None: - raise RuntimeError("Attempting to retrieve data on an empty circular buffer. Please append data first.") + if self._need_reset: + if self._buffer is None or (self._num_pushes == 0).any().item(): + raise RuntimeError("Attempting to retrieve data on an empty circular buffer. Please append data first.") # admissible lag valid_keys = torch.minimum(key, self._num_pushes - 1) diff --git a/source/isaaclab/isaaclab/utils/buffers/delay_buffer.py b/source/isaaclab/isaaclab/utils/buffers/delay_buffer.py index 85332dd87c7..8a1ea3ba9c3 100644 --- a/source/isaaclab/isaaclab/utils/buffers/delay_buffer.py +++ b/source/isaaclab/isaaclab/utils/buffers/delay_buffer.py @@ -174,4 +174,4 @@ def compute(self, data: torch.Tensor) -> torch.Tensor: self._circular_buffer.append(data) # return output delayed_data = self._circular_buffer[self._time_lags] - return delayed_data.clone() + return delayed_data