Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 1 addition & 22 deletions genesis/engine/entities/rigid_entity/rigid_entity.py
Original file line number Diff line number Diff line change
Expand Up @@ -1730,27 +1730,6 @@ def get_links_ang(self, links_idx_local=None, envs_idx=None, *, unsafe=False):
links_idx = self._get_idx(links_idx_local, self.n_links, self._link_start, unsafe=True)
return self._solver.get_links_ang(links_idx, envs_idx, unsafe=unsafe)

@gs.assert_built
def get_links_accelerometer_data(self, links_idx_local=None, envs_idx=None, *, imu=False, unsafe=False):
"""
Returns the accelerometer data that would be measured by a IMU rigidly attached to the specified entity's links,
i.e. the true linear acceleration of the links expressed at their respective origin in local frame coordinates.

Parameters
----------
links_idx_local : None | array_like
The indices of the links. Defaults to None.
envs_idx : None | array_like, optional
The indices of the environments. If None, all environments will be considered. Defaults to None.

Returns
-------
acc : torch.Tensor, shape (n_links, 3) or (n_envs, n_links, 3)
The accelerometer data of IMUs rigidly attached of the specified entity's links.
"""
links_idx = self._get_idx(links_idx_local, self.n_links, self._link_start, unsafe=True)
return self._solver.get_links_acc(links_idx, envs_idx, mimick_imu=True, unsafe=unsafe)

@gs.assert_built
def get_links_acc(self, links_idx_local=None, envs_idx=None, *, unsafe=False):
"""
Expand All @@ -1770,7 +1749,7 @@ def get_links_acc(self, links_idx_local=None, envs_idx=None, *, unsafe=False):
The linear classical acceleration of the specified entity's links.
"""
links_idx = self._get_idx(links_idx_local, self.n_links, self._link_start, unsafe=True)
return self._solver.get_links_acc(links_idx, envs_idx, mimick_imu=False, unsafe=unsafe)
return self._solver.get_links_acc(links_idx, envs_idx, unsafe=unsafe)

@gs.assert_built
def get_links_acc_ang(self, links_idx_local=None, envs_idx=None, *, unsafe=False):
Expand Down
7 changes: 5 additions & 2 deletions genesis/engine/scene.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import pickle
import sys
import time
from typing import TYPE_CHECKING

import numpy as np
import torch
Expand All @@ -26,7 +27,6 @@
PBDOptions,
ProfilingOptions,
RigidOptions,
SensorOptions,
SFOptions,
SimOptions,
SPHOptions,
Expand All @@ -43,6 +43,9 @@
from genesis.vis import Visualizer
from genesis.utils.warnings import warn_once

if TYPE_CHECKING:
from genesis.sensors.base_sensor import SensorOptions


@gs.assert_initialized
class Scene(RBC):
Expand Down Expand Up @@ -516,7 +519,7 @@ def add_light(
gs.raise_exception("Adding lights is only supported by 'RayTracer' and 'BatchRenderer'.")

@gs.assert_unbuilt
def add_sensor(self, sensor_options: SensorOptions):
def add_sensor(self, sensor_options: "SensorOptions"):
return self._sim._sensor_manager.create_sensor(sensor_options)

@gs.assert_unbuilt
Expand Down
4 changes: 4 additions & 0 deletions genesis/engine/solvers/base_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,10 @@ def _kernel_set_gravity(self, gravity: ti.types.ndarray(), envs_idx: ti.types.nd
for j in ti.static(range(3)):
self._gravity[envs_idx[i_b_]][j] = gravity[i_b_, j]

def get_gravity(self, envs_idx=None, *, unsafe=False):
tensor = ti_field_to_torch(self._gravity, envs_idx, transpose=True, unsafe=unsafe)
return tensor.squeeze(0) if self.n_envs == 0 else tensor

def dump_ckpt_to_numpy(self) -> dict[str, np.ndarray]:
arrays: dict[str, np.ndarray] = {}

Expand Down
14 changes: 1 addition & 13 deletions genesis/engine/solvers/rigid/rigid_solver_decomp.py
Original file line number Diff line number Diff line change
Expand Up @@ -2043,18 +2043,16 @@ def get_links_ang(self, links_idx=None, envs_idx=None, *, unsafe=False):
tensor = ti_field_to_torch(self.links_state.cd_ang, envs_idx, links_idx, transpose=True, unsafe=unsafe)
return tensor.squeeze(0) if self.n_envs == 0 else tensor

def get_links_acc(self, links_idx=None, envs_idx=None, *, mimick_imu=False, unsafe=False):
def get_links_acc(self, links_idx=None, envs_idx=None, *, unsafe=False):
_tensor, links_idx, envs_idx = self._sanitize_2D_io_variables(
None, links_idx, self.n_links, 3, envs_idx, idx_name="links_idx", unsafe=unsafe
)
tensor = _tensor.unsqueeze(0) if self.n_envs == 0 else _tensor
kernel_get_links_acc(
mimick_imu,
tensor,
links_idx,
envs_idx,
self.links_state,
self._rigid_global_info,
self._static_rigid_sim_config,
)
return _tensor
Expand Down Expand Up @@ -6554,12 +6552,10 @@ def kernel_get_links_vel(

@ti.kernel
def kernel_get_links_acc(
mimick_imu: ti.i32,
tensor: ti.types.ndarray(),
links_idx: ti.types.ndarray(),
envs_idx: ti.types.ndarray(),
links_state: array_class.LinksState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: ti.template(),
):
ti.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.PARTIAL)
Expand All @@ -6577,14 +6573,6 @@ def kernel_get_links_acc(
vel = links_state.cd_vel[i_l, i_b] + ang.cross(cpos)
acc_classic_lin = acc_lin + ang.cross(vel)

# Mimick IMU accelerometer signal if requested
if mimick_imu:
# Subtract gravity
acc_classic_lin -= rigid_global_info.gravity[i_b]

# Move the resulting linear acceleration in local links frame
acc_classic_lin = gu.ti_inv_transform_by_quat(acc_classic_lin, links_state.quat[i_l, i_b])

for i in ti.static(range(3)):
tensor[i_b_, i_l_, i] = acc_classic_lin[i]

Expand Down
1 change: 0 additions & 1 deletion genesis/options/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,5 @@
from .solvers import *
from .vis import *
from .profiling import ProfilingOptions
from .sensors import SensorOptions

__all__ = ["ProfilingOptions"]
16 changes: 0 additions & 16 deletions genesis/options/sensors.py

This file was deleted.

1 change: 1 addition & 0 deletions genesis/sensors/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from .base_sensor import Sensor
from .imu import IMU
from .tactile import RigidContactSensor, RigidContactForceSensor, RigidContactForceGridSensor
from .data_recorder import SensorDataRecorder, RecordingOptions
from .data_handlers import (
Expand Down
99 changes: 88 additions & 11 deletions genesis/sensors/base_sensor.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,89 @@
from typing import TYPE_CHECKING, Any, List, Type
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, List

import numpy as np
import taichi as ti
import torch

import genesis as gs
from genesis.options import Options
from genesis.repr_base import RBC

if TYPE_CHECKING:
from genesis.options.sensors import SensorOptions
from genesis.utils.ring_buffer import TensorRingBuffer

from .sensor_manager import SensorManager


class SensorOptions(Options):
"""
Base class for all sensor options.
Each sensor should have their own options class that inherits from this class.
The options class should be registered with the SensorManager using the @register_sensor decorator.

Parameters
----------
read_delay : float
The delay in seconds before the sensor data is read.
"""

read_delay: float = 0.0

def validate(self, scene):
"""
Validate the sensor options values before the sensor is added to the scene.
"""
read_delay_hz = self.read_delay / scene._sim.dt
if not np.isclose(read_delay_hz, round(read_delay_hz), atol=1e-6):
gs.logger.warn(
f"Read delay should be a multiple of the simulation time step. Got {self.read_delay}"
f" and {scene._sim.dt}. Actual read delay will be {1/round(read_delay_hz)}."
)


@dataclass
class SharedSensorMetadata:
"""
Shared metadata between all sensors of the same class.
"""

cache_sizes: list[int] = field(default_factory=list)
read_delay_steps: list[int] = field(default_factory=list)


@ti.data_oriented
class Sensor(RBC):
"""
Base class for all types of sensors.

NOTE: The Sensor system is designed to be performant. All sensors of the same type are updated at once and stored
in a cache in SensorManager. Cache size is inferred from the return format and cache length of each sensor.
`read()` and `read_ground_truth()`, the public-facing methods of every Sensor, automatically handles indexing into
the shared cache to return the correct data.
"""

def __init__(self, sensor_options: "SensorOptions", sensor_idx: int, sensor_manager: "SensorManager"):
self._options: "SensorOptions" = sensor_options
self._idx: int = sensor_idx
self._manager: "SensorManager" = sensor_manager
self._shared_metadata: SharedSensorMetadata = sensor_manager._sensors_metadata[type(self)]

self._read_delay_steps = round(self._options.read_delay / self._manager._sim.dt)
self._shared_metadata.read_delay_steps.append(self._read_delay_steps)

# initialized by SensorManager during build
self._read_delay_steps: int = 0
self._shape_indices: list[tuple[int, int]] = []
self._shared_metadata: dict[str, Any] | None = None
self._cache: "TensorRingBuffer" | None = None
return_format = self._get_return_format()
return_shapes = return_format.values() if isinstance(return_format, dict) else (return_format,)
tensor_size = 0
for shape in return_shapes:
data_size = np.prod(shape)
self._shape_indices.append((tensor_size, tensor_size + data_size))
tensor_size += data_size

self._cache_size = self._get_cache_length() * tensor_size
self._shared_metadata.cache_sizes.append(self._cache_size)

self._cache_idx: int = -1 # initialized by SensorManager during build

# =============================== implementable methods ===============================

Expand Down Expand Up @@ -71,12 +125,19 @@ def _update_shared_ground_truth_cache(

@classmethod
def _update_shared_cache(
cls, shared_metadata: dict[str, Any], shared_ground_truth_cache: torch.Tensor, shared_cache: "TensorRingBuffer"
cls,
shared_metadata: dict[str, Any],
shared_ground_truth_cache: torch.Tensor,
shared_cache: torch.Tensor,
buffered_data: "TensorRingBuffer",
):
"""
Update the shared sensor cache for all sensors of this class using metadata in SensorManager.

The information in shared_cache should be the final measured sensor data after all noise and post-processing.
NOTE: The implementation should include applying the delay using the `_apply_delay_to_shared_cache()` method.
"""
raise NotImplementedError("Sensors must implement `update_shared_cache()`.")
raise NotImplementedError("Sensors must implement `update_shared_cache_with_noise()`.")

@classmethod
def _get_cache_dtype(cls) -> torch.dtype:
Expand All @@ -92,19 +153,35 @@ def read(self, envs_idx: List[int] | None = None):
"""
Read the sensor data (with noise applied if applicable).
"""
return self._get_formatted_data(self._cache.get(self._read_delay_steps), envs_idx)
return self._get_formatted_data(self._manager.get_cloned_from_cache(self), envs_idx)

@gs.assert_built
def read_ground_truth(self, envs_idx: List[int] | None = None):
"""
Read the ground truth sensor data (without noise).
"""
return self._get_formatted_data(self._manager.get_cloned_from_ground_truth_cache(self), envs_idx)
return self._get_formatted_data(self._manager.get_cloned_from_cache(self, is_ground_truth=True), envs_idx)

@classmethod
def _apply_delay_to_shared_cache(
self, shared_metadata: SharedSensorMetadata, shared_cache: torch.Tensor, buffered_data: "TensorRingBuffer"
):
"""
Applies the read delay to the shared cache tensor by copying the buffered data at the appropriate index.
"""
idx = 0
for tensor_size, read_delay_step in zip(shared_metadata.cache_sizes, shared_metadata.read_delay_steps):
shared_cache[:, idx : idx + tensor_size] = buffered_data.at(read_delay_step)[:, idx : idx + tensor_size]
idx += tensor_size

def _get_formatted_data(
self, tensor: torch.Tensor, envs_idx: list[int] | None
) -> torch.Tensor | dict[str, torch.Tensor]:
# Note: This method does not clone the data tensor, it should have been cloned by the caller.
"""
Formats the flattened cache tensor into a dict of tensors using the format specified in `_get_return_format()`.

NOTE: This method does not clone the data tensor, it should have been cloned by the caller.
"""

if envs_idx is None:
envs_idx = self._manager._sim._scene._envs_idx
Expand Down
Loading