Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CONTRIBUTORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ Guidelines for modifications:
* Calvin Yu
* Cathy Y. Li
* Cheng-Rong Lai
* Chengyi Lux Zhang
* Chenyu Yang
* Connor Smith
* CY (Chien-Ying) Chen
Expand Down Expand Up @@ -157,6 +158,7 @@ Guidelines for modifications:
* Yanzi Zhu
* Yijie Guo
* Yohan Choi
* Yufeng Chi
* Yujian Zhang
* Yun Liu
* Zehao Wang
Expand Down
6 changes: 3 additions & 3 deletions source/isaaclab/isaaclab/actuators/actuator_pd.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,9 +358,9 @@ def compute(
self, control_action: ArticulationActions, joint_pos: torch.Tensor, joint_vel: torch.Tensor
) -> ArticulationActions:
# apply delay based on the delay the model for all the setpoints
control_action.joint_positions = self.positions_delay_buffer.compute(control_action.joint_positions)
control_action.joint_velocities = self.velocities_delay_buffer.compute(control_action.joint_velocities)
control_action.joint_efforts = self.efforts_delay_buffer.compute(control_action.joint_efforts)
control_action.joint_positions[:] = self.positions_delay_buffer.compute(control_action.joint_positions)
control_action.joint_velocities[:] = self.velocities_delay_buffer.compute(control_action.joint_velocities)
control_action.joint_efforts[:] = self.efforts_delay_buffer.compute(control_action.joint_efforts)
# compte actuator model
return super().compute(control_action, joint_pos, joint_vel)

Expand Down
26 changes: 19 additions & 7 deletions source/isaaclab/isaaclab/utils/buffers/circular_buffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@ def __init__(self, max_len: int, batch_size: int, device: str):
self._device = device
self._ALL_INDICES = torch.arange(batch_size, device=device)

# max length tensor for comparisons
# max length integer for cpu comparisons
self._max_len_int = max_len
# broadcastedmax length tensor for gpu tensor comparisons
self._max_len = torch.full((batch_size,), max_len, dtype=torch.int, device=device)
# number of data pushes passed since the last call to :meth:`reset`
self._num_pushes = torch.zeros(batch_size, dtype=torch.long, device=device)
Expand All @@ -46,6 +48,8 @@ def __init__(self, max_len: int, batch_size: int, device: str):
# the actual buffer for data storage
# note: this is initialized on the first call to :meth:`append`
self._buffer: torch.Tensor = None # type: ignore
# track if all batches have been initialized
self._all_initialized: bool = False

"""
Properties.
Expand All @@ -64,7 +68,7 @@ def device(self) -> str:
@property
def max_length(self) -> int:
"""The maximum length of the ring buffer."""
return int(self._max_len[0].item())
return self._max_len_int

@property
def current_length(self) -> torch.Tensor:
Expand Down Expand Up @@ -100,6 +104,8 @@ def reset(self, batch_ids: Sequence[int] | None = None):
batch_ids = slice(None)
# reset the number of pushes for the specified batch indices
self._num_pushes[batch_ids] = 0
# re-initialization is required
self._all_initialized = False
if self._buffer is not None:
# set buffer at batch_id reset indices to 0.0 so that the buffer() getter returns the cleared circular buffer after reset.
self._buffer[:, batch_ids, :] = 0.0
Expand Down Expand Up @@ -129,9 +135,14 @@ def append(self, data: torch.Tensor):
# add the new data to the last layer
self._buffer[self._pointer] = data
# Check for batches with zero pushes and initialize all values in batch to first append
is_first_push = self._num_pushes == 0
if torch.any(is_first_push):
self._buffer[:, is_first_push] = data[is_first_push]
# Only check if we haven't confirmed all batches are initialized (avoids GPU sync in hot path)
if not self._all_initialized:
is_first_push = self._num_pushes == 0
if is_first_push.any().item():
self._buffer[:, is_first_push] = data[is_first_push]
else:
# All batches now initialized, skip this check in future calls
self._all_initialized = True
# increment number of number of pushes for all batches
self._num_pushes += 1

Expand All @@ -156,8 +167,9 @@ def __getitem__(self, key: torch.Tensor) -> torch.Tensor:
if len(key) != self.batch_size:
raise ValueError(f"The argument 'key' has length {key.shape[0]}, while expecting {self.batch_size}")
# check if the buffer is empty
if torch.any(self._num_pushes == 0) or self._buffer is None:
raise RuntimeError("Attempting to retrieve data on an empty circular buffer. Please append data first.")
if not self._all_initialized:
if self._buffer is None or (self._num_pushes == 0).any().item():
raise RuntimeError("Attempting to retrieve data on an empty circular buffer. Please append data first.")

# admissible lag
valid_keys = torch.minimum(key, self._num_pushes - 1)
Expand Down
2 changes: 1 addition & 1 deletion source/isaaclab/isaaclab/utils/buffers/delay_buffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,4 +174,4 @@ def compute(self, data: torch.Tensor) -> torch.Tensor:
self._circular_buffer.append(data)
# return output
delayed_data = self._circular_buffer[self._time_lags]
return delayed_data.clone()
return delayed_data