Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 6 additions & 5 deletions examples/drone/hover_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,10 +122,11 @@ def _resample_commands(self, envs_idx):
self.commands[envs_idx, 2] = gs_rand_float(*self.command_cfg["pos_z_range"], (len(envs_idx),), gs.device)

def _at_target(self):
at_target = (
(torch.norm(self.rel_pos, dim=1) < self.env_cfg["at_target_threshold"]).nonzero(as_tuple=False).flatten()
return (
(torch.norm(self.rel_pos, dim=1) < self.env_cfg["at_target_threshold"])
.nonzero(as_tuple=False)
.reshape((-1,))
)
return at_target

def step(self, actions):
self.actions = torch.clip(actions, -self.env_cfg["clip_actions"], self.env_cfg["clip_actions"])
Expand Down Expand Up @@ -169,11 +170,11 @@ def step(self, actions):
)
self.reset_buf = (self.episode_length_buf > self.max_episode_length) | self.crash_condition

time_out_idx = (self.episode_length_buf > self.max_episode_length).nonzero(as_tuple=False).flatten()
time_out_idx = (self.episode_length_buf > self.max_episode_length).nonzero(as_tuple=False).reshape((-1,))
self.extras["time_outs"] = torch.zeros_like(self.reset_buf, device=gs.device, dtype=gs.tc_float)
self.extras["time_outs"][time_out_idx] = 1.0

self.reset_idx(self.reset_buf.nonzero(as_tuple=False).flatten())
self.reset_idx(self.reset_buf.nonzero(as_tuple=False).reshape((-1,)))

# compute reward
self.rew_buf[:] = 0.0
Expand Down
6 changes: 3 additions & 3 deletions examples/locomotion/go2_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def step(self, actions):
envs_idx = (
(self.episode_length_buf % int(self.env_cfg["resampling_time_s"] / self.dt) == 0)
.nonzero(as_tuple=False)
.flatten()
.reshape((-1,))
)
self._resample_commands(envs_idx)

Expand All @@ -153,11 +153,11 @@ def step(self, actions):
self.reset_buf |= torch.abs(self.base_euler[:, 1]) > self.env_cfg["termination_if_pitch_greater_than"]
self.reset_buf |= torch.abs(self.base_euler[:, 0]) > self.env_cfg["termination_if_roll_greater_than"]

time_out_idx = (self.episode_length_buf > self.max_episode_length).nonzero(as_tuple=False).flatten()
time_out_idx = (self.episode_length_buf > self.max_episode_length).nonzero(as_tuple=False).reshape((-1,))
self.extras["time_outs"] = torch.zeros_like(self.reset_buf, device=gs.device, dtype=gs.tc_float)
self.extras["time_outs"][time_out_idx] = 1.0

self.reset_idx(self.reset_buf.nonzero(as_tuple=False).flatten())
self.reset_idx(self.reset_buf.nonzero(as_tuple=False).reshape((-1,)))

# compute reward
self.rew_buf[:] = 0.0
Expand Down
49 changes: 22 additions & 27 deletions genesis/engine/bvh.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,6 @@ class Node:
# Whether an internal node has been visited during traversal
self.internal_node_active = ti.field(ti.u1, shape=(self.n_batches, self.n_aabbs - 1))
self.internal_node_ready = ti.field(ti.u1, shape=(self.n_batches, self.n_aabbs - 1))
self.updated = ti.field(ti.u1, shape=())

# Query results, vec3 of batch id, self id, query id
self.query_result = ti.field(gs.ti_ivec3, shape=(self.max_n_query_results))
Expand Down Expand Up @@ -281,11 +280,7 @@ def build_radix_tree(self):

# Parallel build for every internal node
for i_b, i in ti.ndrange(self.n_batches, self.n_aabbs - 1):
d = ti.select(
self.delta(i, i + 1, i_b) > self.delta(i, i - 1, i_b),
1,
-1,
)
d = ti.select(self.delta(i, i + 1, i_b) > self.delta(i, i - 1, i_b), 1, -1)

delta_min = self.delta(i, i - d, i_b)
l_max = ti.u32(2)
Expand Down Expand Up @@ -337,12 +332,12 @@ def compute_bounds(self):
Starts from the leaf nodes and works upwards layer by layer.
"""
self._kernel_compute_bounds_init()
while self.updated[None]:
self._kernel_compute_bounds_one_layer()
is_done = False
while not is_done:
is_done = self._kernel_compute_bounds_one_layer()

@ti.kernel
def _kernel_compute_bounds_init(self):
self.updated[None] = True
self.internal_node_active.fill(False)
self.internal_node_ready.fill(False)

Expand All @@ -355,26 +350,26 @@ def _kernel_compute_bounds_init(self):
self.internal_node_active[i_b, parent_idx] = True

@ti.kernel
def _kernel_compute_bounds_one_layer(self):
self.updated[None] = False
def _kernel_compute_bounds_one_layer(self) -> ti.u1:
for i_b, i in ti.ndrange(self.n_batches, self.n_aabbs - 1):
if not self.internal_node_active[i_b, i]:
continue
left_bound = self.nodes[i_b, self.nodes[i_b, i].left].bound
right_bound = self.nodes[i_b, self.nodes[i_b, i].right].bound
self.nodes[i_b, i].bound.min = ti.min(left_bound.min, right_bound.min)
self.nodes[i_b, i].bound.max = ti.max(left_bound.max, right_bound.max)
parent_idx = self.nodes[i_b, i].parent
if parent_idx != -1:
self.internal_node_ready[i_b, parent_idx] = True
self.internal_node_active[i_b, i] = False
self.updated[None] = True

if self.internal_node_active[i_b, i]:
left_bound = self.nodes[i_b, self.nodes[i_b, i].left].bound
right_bound = self.nodes[i_b, self.nodes[i_b, i].right].bound
self.nodes[i_b, i].bound.min = ti.min(left_bound.min, right_bound.min)
self.nodes[i_b, i].bound.max = ti.max(left_bound.max, right_bound.max)
parent_idx = self.nodes[i_b, i].parent
if parent_idx != -1:
self.internal_node_ready[i_b, parent_idx] = True
self.internal_node_active[i_b, i] = False

is_done = True
for i_b, i in ti.ndrange(self.n_batches, self.n_aabbs - 1):
if not self.internal_node_ready[i_b, i]:
continue
self.internal_node_active[i_b, i] = True
self.internal_node_ready[i_b, i] = False
if self.internal_node_ready[i_b, i]:
self.internal_node_active[i_b, i] = True
is_done = False
self.internal_node_ready.fill(False)

return is_done

@ti.kernel
def query(self, aabbs: ti.template()):
Expand Down
20 changes: 9 additions & 11 deletions genesis/engine/coupler.py
Original file line number Diff line number Diff line change
Expand Up @@ -694,7 +694,7 @@ def init_fem_fields(self):
self.fem_pressure.from_numpy(fem_pressure_np)
self.fem_pressure_gradient = ti.field(gs.ti_vec3, shape=(fem_solver._B, fem_solver.n_elements))
self.fem_floor_contact_pair_type = ti.types.struct(
active=gs.ti_int, # whether the contact pair is active
active=ti.u1, # whether the contact pair is active
batch_idx=gs.ti_int, # batch index
geom_idx=gs.ti_int, # index of the FEM element
intersection_code=gs.ti_int, # intersection code for the element
Expand Down Expand Up @@ -904,7 +904,7 @@ def fem_floor_detection(self, f: ti.i32):
# Compute data for each contact pair
for i_c in range(self.n_fem_floor_contact_pairs[None]):
pair = self.fem_floor_contact_pairs[i_c]
self.fem_floor_contact_pairs[i_c].active = 1 # mark the contact pair as active
self.fem_floor_contact_pairs[i_c].active = True # mark the contact pair as active
i_b = pair.batch_idx
i_e = pair.geom_idx
intersection_code = pair.intersection_code
Expand Down Expand Up @@ -956,7 +956,7 @@ def fem_floor_detection(self, f: ti.i32):
rigid_g = self.fem_pressure_gradient[i_b, i_e].z
# TODO A better way to handle corner cases where pressure and pressure gradient are ill defined
if total_area < gs.EPS or rigid_g < gs.EPS:
self.fem_floor_contact_pairs[i_c].active = 0
self.fem_floor_contact_pairs[i_c].active = False
continue
g = self.default_deformable_g * rigid_g / (self.default_deformable_g + rigid_g) # harmonic average
rigid_k = total_area * g
Expand Down Expand Up @@ -1004,7 +1004,7 @@ def compute_fem_floor_regularization(self, f: ti.i32):
fem_solver = self.fem_solver

for i_c in range(self.n_fem_floor_contact_pairs[None]):
if pairs[i_c].active == 0:
if not pairs[i_c].active:
continue
i_b = pairs[i_c].batch_idx
i_e = pairs[i_c].geom_idx
Expand Down Expand Up @@ -1103,7 +1103,7 @@ def compute_contact_gradient_hessian_diag_prec(self, f: ti.i32):
fem_solver = self.fem_solver

for i_c in range(self.n_fem_floor_contact_pairs[None]):
if pairs[i_c].active == 0:
if not pairs[i_c].active:
continue
i_b = pairs[i_c].batch_idx
i_e = pairs[i_c].geom_idx
Expand Down Expand Up @@ -1171,7 +1171,7 @@ def compute_contact_energy(self, f: ti.i32):
fem_solver = self.fem_solver

for i_c in range(self.n_fem_floor_contact_pairs[None]):
if pairs[i_c].active == 0:
if not pairs[i_c].active:
continue
i_b = pairs[i_c].batch_idx
if not self.batch_linesearch_active[i_b]:
Expand Down Expand Up @@ -1284,7 +1284,7 @@ def compute_Ap(self):

pairs = ti.static(self.fem_floor_contact_pairs)
for i_c in range(self.n_fem_floor_contact_pairs[None]):
if pairs[i_c].active == 0:
if not pairs[i_c].active:
continue
i_b = pairs[i_c].batch_idx
i_e = pairs[i_c].geom_idx
Expand Down Expand Up @@ -1444,20 +1444,18 @@ def compute_total_energy(self, f, energy):
for i_c in range(self.n_fem_floor_contact_pairs[None]):
pair = self.fem_floor_contact_pairs[i_c]
i_b = pair.batch_idx
if not self.batch_linesearch_active[i_b] or pair.active == 0:
if not self.batch_linesearch_active[i_b] or not pair.active:
continue
energy[i_b] += pair.energy

@ti.kernel
def init_linesearch(self, f: ti.i32):
fem_solver = self.fem_solver
dt = ti.static(self.sim._substep_dt)
dt2 = dt**2
for i_b in ti.ndrange(self._B):
self.batch_linesearch_active[i_b] = self.batch_active[i_b]
if not self.batch_linesearch_active[i_b]:
continue
self.linesearch_state[i_b].step_size = 1.0 / ti.static(self._linesearch_tau)
self.linesearch_state[i_b].step_size = 1.0 / self._linesearch_tau
self.linesearch_state[i_b].m = 0.0

# x_prev, m
Expand Down
12 changes: 6 additions & 6 deletions genesis/engine/entities/emitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,14 +101,14 @@ def emit(
else:
gs.raise_exception(f"Unsupported nozzle shape: {droplet_shape}.")

direction = np.asarray(direction, dtype=gs.np_float)
if np.linalg.norm(direction) < gs.EPS:
gs.raise_exception("Zero-length direction.")
else:
direction = gu.normalize(direction)

p_size = self._solver.particle_size if p_size is None else p_size

pos = np.array(pos)
if droplet_length is None:
# Use the speed to determine the length of the droplet in the emitting direction
droplet_length = speed * self._solver.substep_dt * self._sim.substeps + self._acc_droplet_len
Expand Down Expand Up @@ -148,10 +148,10 @@ def emit(
gs.raise_exception()

positions = gu.transform_by_trans_R(
positions,
pos,
positions.astype(gs.np_float, copy=False),
np.asarray(pos, dtype=gs.np_float),
gu.z_up_to_R(direction) @ gu.axis_angle_to_R(np.array([0.0, 0.0, 1.0], dtype=gs.np_float), theta),
).astype(gs.np_float)
)

positions = np.tile(positions[np.newaxis], (self._sim._B, 1, 1))

Expand All @@ -161,8 +161,8 @@ def emit(
n_particles = positions.shape[1]

# Expand vels with batch dimension
vels = np.tile(direction * speed, (n_particles, 1)).astype(gs.np_float)
vels = np.tile(vels[np.newaxis], (self._sim._B, 1, 1))
vels = speed * direction
vels = np.tile(vels.reshape((1, 1, -1)), (self._sim._B, n_particles, 1))

if n_particles > self._entity.n_particles:
gs.logger.warning(
Expand Down
34 changes: 17 additions & 17 deletions genesis/engine/entities/fem_entity.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@
import genesis.utils.element as eu
import genesis.utils.geom as gu
import genesis.utils.mesh as mu
from genesis.engine.coupler import SAPCoupler
from genesis.engine.states.cache import QueriedStates
from genesis.engine.states.entities import FEMEntityState
from genesis.utils.misc import to_gs_tensor
from genesis.utils.misc import to_gs_tensor, tensor_to_array

from .base_entity import Entity
from genesis.engine.coupler import SAPCoupler


@ti.data_oriented
Expand Down Expand Up @@ -62,28 +62,26 @@ def __init__(self, scene, solver, material, morph, surface, idx, v_start=0, el_s
el2tri = np.array(
[ # follow the order with correct normal
[[v[0], v[2], v[1]], [v[1], v[2], v[3]], [v[0], v[1], v[3]], [v[0], v[3], v[2]]] for v in self.elems
]
],
dtype=gs.np_int,
)
all_tri = el2tri.reshape(-1, 3)
all_tri = el2tri.reshape((-1, 3))
all_tri_sorted = np.sort(all_tri, axis=1)
_, unique_idcs, cnt = np.unique(all_tri_sorted, axis=0, return_counts=True, return_index=True)
unique_tri = all_tri[unique_idcs]
surface_tri = unique_tri[cnt == 1]

self._surface_tri_np = surface_tri.astype(gs.np_int)
self._surface_tri_np = surface_tri
self._n_surfaces = len(self._surface_tri_np)

if self._n_surfaces > 0:
self._n_surface_vertices = len(np.unique(self._surface_tri_np))
else:
self._n_surface_vertices = 0

tri2el = np.repeat(np.arange(self.elems.shape[0])[:, None], 4, axis=-1)
all_el = tri2el.reshape(
-1,
)
unique_el = all_el[unique_idcs]
self._surface_el_np = unique_el[cnt == 1].astype(gs.np_int)
tri2el = np.repeat(np.arange(self.elems.shape[0], dtype=gs.np_int)[:, np.newaxis], 4, axis=1)
unique_el = tri2el.flat[unique_idcs]
self._surface_el_np = unique_el[cnt == 1]

if isinstance(self.sim.coupler, SAPCoupler):
self.compute_pressure_field()
Expand Down Expand Up @@ -259,7 +257,7 @@ def set_muscle(self, muscle_group=None, muscle_direction=None):
if muscle_direction is not None:
muscle_direction = to_gs_tensor(muscle_direction)
assert muscle_direction.shape == (self.n_elements, 3)
assert torch.allclose(muscle_direction.norm(dim=-1), torch.Tensor([1.0]).to(muscle_direction))
assert ((1.0 - muscle_direction.norm(dim=-1)).abs() < gs.EPS).all()

self.set_muscle_direction(muscle_direction)

Expand Down Expand Up @@ -308,8 +306,8 @@ def instantiate(self, verts, elems):
Exception
If no vertices are provided.
"""
verts = verts.astype(gs.np_float)
elems = elems.astype(gs.np_int)
verts = verts.astype(gs.np_float, copy=False)
elems = elems.astype(gs.np_int, copy=False)

# rotate
R = gu.quat_to_R(np.array(self.morph.quat, dtype=gs.np_float))
Expand Down Expand Up @@ -368,8 +366,8 @@ def _add_to_solver(self, in_backward=False):
)

# Convert to appropriate numpy array types
elems_np = self.elems.astype(gs.np_int)
verts_numpy = self.init_positions.cpu().numpy().astype(gs.np_float)
elems_np = self.elems.astype(gs.np_int, copy=False)
verts_numpy = tensor_to_array(self.init_positions, dtype=gs.np_float)

self._solver._kernel_add_elements(
f=self._sim.cur_substep_local,
Expand Down Expand Up @@ -401,8 +399,10 @@ def compute_pressure_field(self):
TODO: Add margin support
Drake's implementation of margin seems buggy.
"""
init_positions = self.init_positions.cpu().numpy()
init_positions = tensor_to_array(self.init_positions)
signed_distance, *_ = igl.signed_distance(init_positions, init_positions, self._surface_tri_np)
signed_distance = signed_distance.astype(gs.np_float, copy=False)

unsigned_distance = np.abs(signed_distance)
max_distance = np.max(unsigned_distance)
if max_distance < gs.EPS:
Expand Down
Loading