Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions examples/rigid/single_franka_batch_render.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ def main():
parser.add_argument("-r", "--render_all_cameras", action="store_true", default=False)
parser.add_argument("-o", "--output_dir", type=str, default="img_output/test")
parser.add_argument("-u", "--use_rasterizer", action="store_true", default=False)
parser.add_argument("-d", "--debug", action="store_true", default=False)
args = parser.parse_args()

########################## init ##########################
Expand All @@ -37,6 +38,14 @@ def main():
)

########################## cameras ##########################
debug_cam = scene.add_camera(
res=(720, 1280),
pos=(1.5, -0.5, 1.0),
lookat=(0.0, 0.0, 0.5),
fov=60,
GUI=args.vis,
debug=True,
)
cam_0 = scene.add_camera(
res=(512, 512),
pos=(1.5, 0.5, 1.5),
Expand Down Expand Up @@ -75,14 +84,20 @@ def main():
# Create an image exporter
exporter = FrameImageExporter(args.output_dir)

if args.debug:
debug_cam.start_recording()
for i in range(args.n_steps):
scene.step()
if args.debug:
debug_cam.render()
if args.render_all_cameras:
rgba, depth, _, _ = scene.render_all_cameras(rgb=True, depth=True)
exporter.export_frame_all_cameras(i, rgb=rgba, depth=depth)
else:
rgba, depth, _, _ = cam_1.render(rgb=True, depth=True)
exporter.export_frame_single_camera(i, cam_1.idx, rgb=rgba, depth=depth)
if args.debug:
debug_cam.stop_recording("debug_cam.mp4")


if __name__ == "__main__":
Expand Down
9 changes: 8 additions & 1 deletion genesis/engine/scene.py
Original file line number Diff line number Diff line change
Expand Up @@ -529,6 +529,7 @@ def add_camera(
spp=256,
denoise=None,
env_idx=None,
debug=False,
):
"""
Add a camera to the scene.
Expand Down Expand Up @@ -567,6 +568,12 @@ def add_camera(
Whether to denoise the camera's rendered image. Only available when using the RayTracer renderer. Defaults
to True on Linux, otherwise False. If OptiX denoiser is not available in your platform, consider enabling
the OIDN denoiser option when building the RayTracer.
debug : bool
Whether to use the debug camera. It enables to create cameras that can used to monitor / debug the
simulation without being part of the "sensors". Their output is rendered by the usual simple Rasterizer
systematically, no matter if BatchRender and RayTracer is enabled. This way, it is possible to record the
simulation with arbitrary resolution and camera pose, without interfering with what robots can perceive
from their environment. Defaults to False.

Returns
-------
Expand All @@ -576,7 +583,7 @@ def add_camera(
if denoise is None:
denoise = sys.platform != "darwin"
return self._visualizer.add_camera(
res, pos, lookat, up, model, fov, aperture, focus_dist, GUI, spp, denoise, env_idx
res, pos, lookat, up, model, fov, aperture, focus_dist, GUI, spp, denoise, env_idx, debug
)

@gs.assert_unbuilt
Expand Down
20 changes: 12 additions & 8 deletions genesis/vis/batch_renderer.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,19 +85,19 @@ def build(self):
if gs.backend != gs.cuda:
gs.raise_exception("BatchRenderer requires CUDA backend.")

cameras = self._visualizer._cameras
self._cameras = gs.List([camera for camera in self._visualizer._cameras if not camera.debug])
lights = self._lights
rigid = self._visualizer.scene.rigid_solver
n_envs = max(self._visualizer.scene.n_envs, 1)
res = cameras[0].res
res = self._cameras[0].res
gpu_id = gs.device.index
use_rasterizer = self._renderer_options.use_rasterizer

# Cameras
n_cameras = len(cameras)
cameras_pos = torch.stack([camera.get_pos() for camera in cameras], dim=1)
cameras_quat = torch.stack([camera.get_quat() for camera in cameras], dim=1)
cameras_fov = torch.tensor([camera.fov for camera in cameras], dtype=gs.tc_float, device=gs.device)
n_cameras = len(self._cameras)
cameras_pos = torch.stack([camera.get_pos() for camera in self._cameras], dim=1)
cameras_quat = torch.stack([camera.get_quat() for camera in self._cameras], dim=1)
cameras_fov = torch.tensor([camera.fov for camera in self._cameras], dtype=gs.tc_float, device=gs.device)

# Build taichi arrays to store light properties once. If later we need to support dynamic lights, we should
# consider storing light properties as taichi fields in Genesis.
Expand Down Expand Up @@ -179,8 +179,8 @@ def render(self, rgb=True, depth=False, segmentation=False, normal=False, force_
self.update_scene()

# Render frame
cameras_pos = torch.stack([camera.get_pos() for camera in self._visualizer._cameras], dim=1)
cameras_quat = torch.stack([camera.get_quat() for camera in self._visualizer._cameras], dim=1)
cameras_pos = torch.stack([camera.get_pos() for camera in self._cameras], dim=1)
cameras_quat = torch.stack([camera.get_quat() for camera in self._cameras], dim=1)
render_options = np.array((rgb_, depth_, False, False, aliasing), dtype=np.uint32)
rgba_arr_all, depth_arr_all = self._renderer.render(
self._visualizer.scene.rigid_solver, cameras_pos, cameras_quat, render_options
Expand Down Expand Up @@ -217,3 +217,7 @@ def reset(self):
@property
def lights(self):
return self._lights

@property
def cameras(self):
return self._cameras
39 changes: 28 additions & 11 deletions genesis/vis/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import os
import time
import math
from functools import lru_cache

import cv2
import numpy as np
Expand Down Expand Up @@ -66,6 +67,14 @@ class Camera(Sensor):
The far plane of the camera.
transform : np.ndarray, shape (4, 4), optional
The transform matrix of the camera.
env_idx : int, optional
The index of the environment to track the camera.
debug : bool, optional
Whether to use the debug camera. It enables to create cameras that can used to monitor / debug the
simulation without being part of the "sensors". Their output is rendered by the usual simple Rasterizer
systematically, no matter if BatchRender and RayTracer is enabled. This way, it is possible to record the
simulation with arbitrary resolution and camera pose, without interfering with what robots can perceive
from their environment. Defaults to False.
"""

def __init__(
Expand All @@ -87,6 +96,7 @@ def __init__(
far=100.0,
transform=None,
env_idx=None,
debug=False,
):
self._idx = idx
self._uid = gs.UID()
Expand All @@ -111,6 +121,7 @@ def __init__(
self._is_built = False
self._attached_link = None
self._attached_offset_T = None
self._debug = debug

self._env_idx = env_idx
self._envs_offset = None
Expand Down Expand Up @@ -139,11 +150,10 @@ def build(self):

self._envs_offset = torch.as_tensor(self._visualizer._scene.envs_offset, dtype=gs.tc_float, device=gs.device)

self._batch_renderer = self._visualizer.batch_renderer
self._rasterizer = self._visualizer.rasterizer
self._raytracer = self._visualizer.raytracer
self._raytracer = self._visualizer.raytracer if not self._debug else None
self._batch_renderer = self._visualizer.batch_renderer if not self._debug else None

self._rasterizer.add_camera(self)
if self._batch_renderer is not None:
self._rgb_stacked = True
self._other_stacked = True
Expand All @@ -157,6 +167,7 @@ def build(self):
self._rgb_stacked = False
self._other_stacked = False
else:
self._rasterizer.add_camera(self)
self._rgb_stacked = self._visualizer._context.env_separate_rigid
self._other_stacked = self._visualizer._context.env_separate_rigid

Expand Down Expand Up @@ -323,13 +334,13 @@ def _batch_render(
# If n_envs == 0, the second dimension of the output is camera.
# Only return the current camera's image
if rgb_arr:
rgb_arr = rgb_arr[self._idx]
rgb_arr = rgb_arr[self.idx]
if depth:
depth_arr = depth_arr[self._idx]
depth_arr = depth_arr[self.idx]
if segmentation:
seg_arr = seg_arr[self._idx]
seg_arr = seg_arr[self.idx]
if normal:
normal_arr = normal_arr[self._idx]
normal_arr = normal_arr[self.idx]
return rgb_arr, depth_arr, seg_arr, normal_arr

@gs.assert_built
Expand Down Expand Up @@ -533,7 +544,6 @@ def render_pointcloud(self, world_frame=True):
point_cloud = point_cloud[:, :3].reshape((*depth_arr.shape, 3))
return point_cloud, mask

@gs.assert_built
def set_pose(self, transform=None, pos=None, lookat=None, up=None, env_idx=None):
"""
Set the pose of the camera.
Expand Down Expand Up @@ -608,9 +618,10 @@ def set_pose(self, transform=None, pos=None, lookat=None, up=None, env_idx=None)
self._multi_env_transform_tensor[env_idx] = transform
self._multi_env_quat_tensor[env_idx] = _T_to_quat_for_madrona(transform)

self._rasterizer.update_camera(self)
if self._raytracer is not None:
self._raytracer.update_camera(self)
elif self._batch_renderer is None:
self._rasterizer.update_camera(self)

@gs.assert_built
def set_params(self, fov=None, aperture=None, focus_dist=None, intrinsics=None):
Expand Down Expand Up @@ -651,9 +662,10 @@ def set_params(self, fov=None, aperture=None, focus_dist=None, intrinsics=None):
else:
self._fov = intrinsics_fov

self._rasterizer.update_camera(self)
if self._raytracer is not None:
self._raytracer.update_camera(self)
elif self._batch_renderer is None:
self._rasterizer.update_camera(self)

@gs.assert_built
def start_recording(self):
Expand Down Expand Up @@ -753,7 +765,7 @@ def is_built(self):

@property
def idx(self):
"""The integer index of the camera."""
"""The global integer index of the camera."""
return self._idx

@property
Expand Down Expand Up @@ -839,6 +851,11 @@ def env_idx(self):
"""Index of the environment being tracked by the camera."""
return self._env_idx

@property
def debug(self):
"""Whether the camera is a debug camera."""
return self._debug

@property
def pos(self):
"""The current position of the camera for the tracked environment."""
Expand Down
22 changes: 19 additions & 3 deletions genesis/vis/visualizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,17 +127,33 @@ def destroy(self):
self.viewer_lock = None
self._renderer = None

def add_camera(self, res, pos, lookat, up, model, fov, aperture, focus_dist, GUI, spp, denoise, env_idx):
cam_idx = len(self._cameras)
def add_camera(self, res, pos, lookat, up, model, fov, aperture, focus_dist, GUI, spp, denoise, env_idx, debug):
cam_idx = len([camera for camera in self._cameras if camera.debug == debug])
camera = Camera(
self, cam_idx, model, res, pos, lookat, up, fov, aperture, focus_dist, GUI, spp, denoise, env_idx=env_idx
self,
cam_idx,
model,
res,
pos,
lookat,
up,
fov,
aperture,
focus_dist,
GUI,
spp,
denoise,
env_idx=env_idx,
debug=debug,
)
self._cameras.append(camera)
return camera

def add_light(self, pos, dir, intensity, directional, castshadow, cutoff):
if self._batch_renderer is not None:
self._batch_renderer.add_light(pos, dir, intensity, directional, castshadow, cutoff)
else:
gs.raise_exception("`add_light` is specifically for batch renderer.")

def reset(self):
self._t = -1
Expand Down