diff --git a/.gitignore b/.gitignore index 08d2e8dee5a..4d371632cab 100644 --- a/.gitignore +++ b/.gitignore @@ -69,3 +69,9 @@ tests/ # Docker history .isaac-lab-docker-history + +# installl logs vscode +**/install +**/.vscode +**/log +CLI.txt diff --git a/A10_Robot/adapters/_init_.py b/A10_Robot/adapters/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/A10_Robot/adapters/joint_mapper.py b/A10_Robot/adapters/joint_mapper.py new file mode 100644 index 00000000000..d9805420344 --- /dev/null +++ b/A10_Robot/adapters/joint_mapper.py @@ -0,0 +1,40 @@ +# adapters/joint_mapper.py + +import numpy as np + +class JointMapper12to14: + """ + 自动关节映射器: + 输入:pi0.5(Franka 双臂 12 DOF) + Franka_L[0..5], Franka_R[0..5] + 输出:你的机器人 14 DOF + X7 左臂 Joint1~Joint7 + X7 右臂 Joint8~Joint14 + """ + + def __init__(self): + # 默认映射 6→7,最后一个补 0 或固定角度 + self.left_mapping = [0, 1, 2, 3, 4, 5] # → Joint1..Joint6 + self.right_mapping = [6, 7, 8, 9,10,11] # → Joint8..Joint13 + + # 第 7 DOF 用什么? + self.left_extra = 0.0 # 给 Joint7 + self.right_extra = 0.0 # 给 Joint14 + + def map(self, frankas_12dof): + """ + 输入 12D Franka(numpy 或 torch 都行) + 返回 14D X7 关节角 + """ + + frankas_12dof = np.asarray(frankas_12dof).reshape(-1) + + # 左臂 X7 (Joint1~7) + left = [frankas_12dof[i] for i in self.left_mapping] + left.append(self.left_extra) + + # 右臂 X7 (Joint8~14) + right = [frankas_12dof[i] for i in self.right_mapping] + right.append(self.right_extra) + + return np.array(left + right) diff --git a/A10_Robot/assets/.asset_hash b/A10_Robot/assets/.asset_hash new file mode 100644 index 00000000000..7246fdc7a31 --- /dev/null +++ b/A10_Robot/assets/.asset_hash @@ -0,0 +1 @@ +655aab74ee236b7891d8f41242e86987 \ No newline at end of file diff --git a/A10_Robot/assets/config.yaml b/A10_Robot/assets/config.yaml new file mode 100644 index 00000000000..c1039f8529f --- /dev/null +++ b/A10_Robot/assets/config.yaml @@ -0,0 +1,23 @@ +asset_path: /home/ircl/Allen/IsaacLab/A10_Robot/assets/dualarm/urdf/dualarm.urdf +usd_dir: /home/ircl/Allen/IsaacLab/A10_Robot/assets +usd_file_name: dualarm +force_usd_conversion: true +make_instanceable: true +fix_base: false +root_link_name: null +link_density: 0.0 +merge_fixed_joints: false +convert_mimic_joints_to_normal_joints: false +joint_drive: + drive_type: force + target_type: position + gains: + stiffness: 100.0 + damping: 1.0 +collider_type: convex_hull +self_collision: false +replace_cylinders_with_capsules: false +collision_from_visuals: false +## +# Generated by UrdfConverter on 2025-11-27 at 13:33:57. +## diff --git a/A10_Robot/assets/dualarm/meshes/Arm1_ee.STL b/A10_Robot/assets/dualarm/meshes/Arm1_ee.STL new file mode 100644 index 00000000000..bbcb89072d0 Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm1_ee.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm1_link1.STL b/A10_Robot/assets/dualarm/meshes/Arm1_link1.STL new file mode 100644 index 00000000000..33279906556 Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm1_link1.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm1_link2.STL b/A10_Robot/assets/dualarm/meshes/Arm1_link2.STL new file mode 100644 index 00000000000..50a0664b2eb Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm1_link2.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm1_link3.STL b/A10_Robot/assets/dualarm/meshes/Arm1_link3.STL new file mode 100644 index 00000000000..3360b8e02e8 Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm1_link3.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm1_link4.STL b/A10_Robot/assets/dualarm/meshes/Arm1_link4.STL new file mode 100644 index 00000000000..9711ff31b59 Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm1_link4.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm1_link5.STL b/A10_Robot/assets/dualarm/meshes/Arm1_link5.STL new file mode 100644 index 00000000000..cb4af85676b Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm1_link5.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm1_link6.STL b/A10_Robot/assets/dualarm/meshes/Arm1_link6.STL new file mode 100644 index 00000000000..460b4c8123b Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm1_link6.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm2_ee.STL b/A10_Robot/assets/dualarm/meshes/Arm2_ee.STL new file mode 100644 index 00000000000..ff250d435ba Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm2_ee.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm2_link1.STL b/A10_Robot/assets/dualarm/meshes/Arm2_link1.STL new file mode 100644 index 00000000000..7deef5f4c0c Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm2_link1.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm2_link2.STL b/A10_Robot/assets/dualarm/meshes/Arm2_link2.STL new file mode 100644 index 00000000000..c2d9a99dea1 Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm2_link2.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm2_link3.STL b/A10_Robot/assets/dualarm/meshes/Arm2_link3.STL new file mode 100644 index 00000000000..df324cf6337 Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm2_link3.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm2_link4.STL b/A10_Robot/assets/dualarm/meshes/Arm2_link4.STL new file mode 100644 index 00000000000..daa76b6267a Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm2_link4.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm2_link5.STL b/A10_Robot/assets/dualarm/meshes/Arm2_link5.STL new file mode 100644 index 00000000000..44ca8044ae9 Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm2_link5.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/Arm2_link6.STL b/A10_Robot/assets/dualarm/meshes/Arm2_link6.STL new file mode 100644 index 00000000000..7c9b6ada80b Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/Arm2_link6.STL differ diff --git a/A10_Robot/assets/dualarm/meshes/base_link.STL b/A10_Robot/assets/dualarm/meshes/base_link.STL new file mode 100644 index 00000000000..46b3b76999c Binary files /dev/null and b/A10_Robot/assets/dualarm/meshes/base_link.STL differ diff --git a/A10_Robot/assets/dualarm/urdf/dualarm.urdf b/A10_Robot/assets/dualarm/urdf/dualarm.urdf new file mode 100644 index 00000000000..dce585454cc --- /dev/null +++ b/A10_Robot/assets/dualarm/urdf/dualarm.urdf @@ -0,0 +1,849 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/A10_Robot/control/_init_.py b/A10_Robot/control/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/A10_Robot/control/controller.py b/A10_Robot/control/controller.py new file mode 100644 index 00000000000..bacb23bf23c --- /dev/null +++ b/A10_Robot/control/controller.py @@ -0,0 +1,27 @@ +import numpy as np +import torch +from .observation import get_observation +from openpi_client import websocket_client_policy + + +def simple_swing_control(scene, sim_time: float): + """让双臂简单做同步摆动。""" + action = scene["robot"].data.default_joint_pos.clone() + + # 左臂 Joint1~7 + action[:, 0:7] = 0.4 * np.sin(2 * np.pi * 0.5 * sim_time) + + # 右臂 Joint8~14 + action[:, 7:14] = 0.4 * np.sin(2 * np.pi * 0.5 * sim_time) + + scene["robot"].set_joint_position_target(action) + + +def pi_control(scene, sim_time, client: websocket_client_policy.WebsocketClientPolicy): + # 1)取 observation + obs = get_observation(scene) + + # 2)从服务器取 1 个 chunk + action_chunk = client.infer(obs)["actions"] # (10, 14) + return action_chunk + diff --git a/A10_Robot/control/mapping.py b/A10_Robot/control/mapping.py new file mode 100644 index 00000000000..d9cf3b1a70a --- /dev/null +++ b/A10_Robot/control/mapping.py @@ -0,0 +1,48 @@ +import numpy as np + +class ArmMapper: + def __init__(self, arm: str = "left"): + assert arm in ("left", "right") + self.arm = arm + + def aloha_delta_to_x7(self, q_aloha_curr: np.ndarray, delta_q_aloha: np.ndarray, q_x7_curr: np.ndarray): + q_aloha_curr = np.asarray(q_aloha_curr, dtype=float) + delta_q_aloha = np.asarray(delta_q_aloha, dtype=float) + q_x7_curr = np.asarray(q_x7_curr, dtype=float) + assert q_aloha_curr.shape == (7,) and delta_q_aloha.shape == (7,) and q_x7_curr.shape == (7,) + # Identity placeholder: treat X7 as same 7-DOF layout per arm + q_aloha_next = q_aloha_curr + delta_q_aloha + q_x7_next = q_x7_curr.copy() + q_x7_next[:] = q_aloha_next[:] + return q_x7_next, q_aloha_next + + def x7_state_to_aloha(self, q_x7_curr: np.ndarray, q_aloha_guess: np.ndarray): + q_x7_curr = np.asarray(q_x7_curr, dtype=float) + q_aloha_guess = np.asarray(q_aloha_guess, dtype=float) + assert q_x7_curr.shape == (7,) and q_aloha_guess.shape == (7,) + # Identity placeholder + q_aloha_curr = q_x7_curr.copy() + return q_aloha_curr + + +class BiArmMapper: + def __init__(self): + self.left = ArmMapper("left") + self.right = ArmMapper("right") + + def aloha_delta_to_x7(self, q_aloha_curr: np.ndarray, delta_q_aloha: np.ndarray, q_x7_curr: np.ndarray): + q_aloha_curr = np.asarray(q_aloha_curr, dtype=float) + delta_q_aloha = np.asarray(delta_q_aloha, dtype=float) + q_x7_curr = np.asarray(q_x7_curr, dtype=float) + assert q_aloha_curr.shape == (14,) and delta_q_aloha.shape == (14,) and q_x7_curr.shape == (14,) + L_x7, L_aloha = self.left.aloha_delta_to_x7(q_aloha_curr[:7], delta_q_aloha[:7], q_x7_curr[:7]) + R_x7, R_aloha = self.right.aloha_delta_to_x7(q_aloha_curr[7:], delta_q_aloha[7:], q_x7_curr[7:]) + return np.concatenate([L_x7, R_x7]), np.concatenate([L_aloha, R_aloha]) + + def x7_state_to_aloha(self, q_x7_curr: np.ndarray, q_aloha_guess: np.ndarray): + q_x7_curr = np.asarray(q_x7_curr, dtype=float) + q_aloha_guess = np.asarray(q_aloha_guess, dtype=float) + assert q_x7_curr.shape == (14,) and q_aloha_guess.shape == (14,) + L = self.left.x7_state_to_aloha(q_x7_curr[:7], q_aloha_guess[:7]) + R = self.right.x7_state_to_aloha(q_x7_curr[7:], q_aloha_guess[7:]) + return np.concatenate([L, R]) diff --git a/A10_Robot/control/observation.py b/A10_Robot/control/observation.py new file mode 100644 index 00000000000..c76fa936bdb --- /dev/null +++ b/A10_Robot/control/observation.py @@ -0,0 +1,84 @@ +from openpi_client import image_tools +import numpy as np +import torch +from control.mapping import BiArmMapper + +from openpi_client import image_tools +import numpy as np +import torch + +import numpy as np +import torch +import einops +from openpi_client import image_tools + + +def get_observation(scene): + # ---- 准备相机 ---- + cam_high = scene["head_camera"].data.output["rgb"] + cam_left_wrist = scene["left_camera"].data.output["rgb"] + cam_right_wrist = scene["right_camera"].data.output["rgb"] + + # ---- Tensor → numpy ---- + def to_numpy(img): + if isinstance(img, torch.Tensor): + img = img.detach().cpu().numpy() + return img[..., :3] # 只保留 RGB 3 通道 + + cam_high = to_numpy(cam_high) + cam_left_wrist = to_numpy(cam_left_wrist) + cam_right_wrist = to_numpy(cam_right_wrist) + + # ---- 缺失的 cam_left_wrist → 用 0 图填充 ---- + cam_low = np.zeros_like(cam_high) + + # ---- resize + uint8 + CHW ---- + def process(img): + # remove batch dim if present: (1, H, W, C) -> (H, W, C) + if img.ndim == 4 and img.shape[0] == 1: + img = img[0] + + img = image_tools.convert_to_uint8( + image_tools.resize_with_pad(img, 224, 224) + ) + return einops.rearrange(img, "h w c -> c h w") + + + obs_images = { + "cam_high": process(cam_high), + "cam_low": process(cam_low), + "cam_left_wrist": process(cam_left_wrist), + "cam_right_wrist": process(cam_right_wrist), + } + + + # 使用当前关节状态(而非默认初值) + qpos = scene["robot"].data.joint_pos + if isinstance(qpos, torch.Tensor): + qpos = qpos.detach().cpu().numpy() + + # 去掉可能的 batch 维: (1,14) -> (14,) + if isinstance(qpos, np.ndarray) and qpos.ndim == 2 and qpos.shape[0] == 1: + qpos = qpos[0] + + # 将 12 维(左臂6 + 右臂6)转换为 14 维(左臂6 + 左爪 + 右臂6 + 右爪),夹爪填充为 0 + if qpos.shape[0] == 12: + left = qpos[0:6] + right = qpos[6:12] + qpos = np.concatenate([left, np.array([0.0], dtype=left.dtype), right, np.array([0.0], dtype=right.dtype)]) + elif qpos.shape[0] > 14: + qpos = qpos[:14] + elif qpos.shape[0] < 14: + pad = np.zeros(14 - qpos.shape[0], dtype=qpos.dtype) + qpos = np.concatenate([qpos, pad]) + + qpos = qpos.astype(np.float32) + + # ---- 返回 ALOHA 标准格式 ---- + return { + "state": qpos, + "images": obs_images, + "prompt": "Grasp the red cube on the table.", + } + + diff --git a/A10_Robot/control/robot_reset.py b/A10_Robot/control/robot_reset.py new file mode 100644 index 00000000000..c34f40b8d2e --- /dev/null +++ b/A10_Robot/control/robot_reset.py @@ -0,0 +1,36 @@ +import numpy as np + + +def reset_robot(scene): + """将机器人重置到默认状态,并在仅12关节(无夹爪)场景下屏蔽夹爪位。 + + 约定: + - 若关节为 14 维,索引 6 和 13 为左右夹爪,重置时置为 0。 + - 若关节为 12 维(无夹爪),直接使用默认关节位姿/速度。 + """ + + robot = scene["robot"] + + # 重置根状态到默认,并按环境原点平移 + root = robot.data.default_root_state.clone() + root[:, :3] += scene.env_origins + robot.write_root_pose_to_sim(root[:, :7]) + robot.write_root_velocity_to_sim(root[:, 7:]) + + # 关节状态 + joint_pos = robot.data.default_joint_pos.clone() + joint_vel = robot.data.default_joint_vel.clone() + + # 若为 14 维,屏蔽夹爪通道(索引 6 与 13) + if hasattr(joint_pos, "numel") and joint_pos.shape[-1] == 14: + joint_pos[..., 6] = 0.0 + joint_pos[..., 13] = 0.0 + if hasattr(joint_vel, "numel") and joint_vel.shape[-1] == 14: + joint_vel[..., 6] = 0.0 + joint_vel[..., 13] = 0.0 + + # 写回仿真 + robot.write_joint_state_to_sim(joint_pos, joint_vel) + + # 让场景组件(传感器等)同步重置 + scene.reset() diff --git a/A10_Robot/control/test_openpi.py b/A10_Robot/control/test_openpi.py new file mode 100644 index 00000000000..d167eb48c23 --- /dev/null +++ b/A10_Robot/control/test_openpi.py @@ -0,0 +1,141 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/02_scene/create_scene.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") +parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sim import SimulationContext +from isaaclab.utils import configclass +#导入上级目录 +import sys +sys.path.append("..") +from robot.x7_duo_cfg import X7_DUO_CFG +from scene.x7_scene_cfg import X7SceneCfg +from controller import pi_control +from observation import get_observation + +from utils.robot_reset import reset_robot + +## +# Pre-defined configs +## +from isaaclab_assets import CARTPOLE_CFG # isort:skip + + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. + robot = scene["X7_duo"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + count = 0 + # Simulation loop + while simulation_app.is_running(): + # Reset + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = robot.data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + + robot.write_root_pose_to_sim(root_state[:, :7]) + robot.write_root_velocity_to_sim(root_state[:, 7:]) + # set joint positions with some noise + joint_pos = robot.data.default_joint_pos.clone() + print(joint_pos.shape) + + observation = get_observation(scene) + print(observation["observation/image"].shape) + print(observation["observation/wrist_image"].shape) + print(observation["observation/state"].shape) + + + # robot.write_joint_state_to_sim(joint_pos, joint_vel) + # # clear internal buffers + # scene.reset() + # print("[INFO]: Resetting robot state...") + # # Apply random action + # # -- generate random joint efforts + # efforts = torch.randn_like(robot.data.joint_pos) * 5.0 + # # -- apply action to the robot + # robot.set_joint_effort_target(efforts) + # -- write data to sim + scene.write_data_to_sim() + # Perform step + sim.step() + # Increment counter + count += 1 + # Update buffers + scene.update(sim_dt) + + +def main(): + """Main function.""" + # Load kit helper + # sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + # sim = SimulationContext(sim_cfg) + # # Set main camera + # sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) + # # Design scene + # scene_cfg = CartpoleSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + # scene = InteractiveScene(scene_cfg) + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + sim.set_camera_view([4.0, 0.0, 2.5], [0.0, 0.0, 0.5]) # type: ignore + + + scene_cfg = X7SceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/A10_Robot/interfaces/_init_.py b/A10_Robot/interfaces/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/A10_Robot/interfaces/pi_client.py b/A10_Robot/interfaces/pi_client.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/A10_Robot/load_dualarm.py b/A10_Robot/load_dualarm.py new file mode 100644 index 00000000000..010d401e95d --- /dev/null +++ b/A10_Robot/load_dualarm.py @@ -0,0 +1,106 @@ +import argparse + +from isaaclab.app import AppLauncher +import torch + +# ========= CLI ========= +parser = argparse.ArgumentParser(description="Load A10 dual-arm robot inside IsaacLab.") +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") +AppLauncher.add_app_launcher_args(parser) +args = parser.parse_args() + +# ========= 启动 GUI / SimulationApp ========= +app_launcher = AppLauncher(args) +simulation_app = app_launcher.app + +# ========= 在 app 启动后再导入 ========= +from isaaclab.scene import InteractiveScene +import isaaclab.sim as sim_utils +from scene.a10_scene_cfg import A10SceneCfg +from control.controller import simple_swing_control, pi_control +from openpi_client import websocket_client_policy +from control.robot_reset import reset_robot + +# ========= WebSocket client(必须全局初始化一次)========= +from openpi_client import websocket_client_policy + + + +def run(sim: sim_utils.SimulationContext, scene: InteractiveScene, client: websocket_client_policy.WebsocketClientPolicy): + robot=scene["robot"] + + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + action_chunk = None + + print("[INFO] Running simulator ...") + + robot=scene["robot"] + + while simulation_app.is_running(): + # Reset + if count > 0 and count % 500 == 0: + reset_robot(scene) # 根姿态 + 关节位姿 + scene.reset() + scene.write_data_to_sim() + # 给一小步让状态落地 + sim.step() + scene.update(sim_dt) + # 重新计时,避免首帧就再触发 + count = 0 + sim_time = 0.0 + # 跳过本轮控制,避免立刻被新动作覆盖 + continue + + # 控制器:双臂简单双摆 + action_chunk = pi_control(scene, sim_time, client) + for i in range(10): + a = action_chunk[i] # 14-dim + #print(f"Action step {i}: {a}") + + # -------------- ALOHA 动作结构 -------------- + left_arm = a[0:6] + #left_grip = a[6] + right_arm = a[7:13] + #right_grip = a[13] + # --------------------------------------------- + + # 4)当前机器人状态 + cur_q = scene["robot"].data.joint_pos[0].clone() + #print(f" Current qpos: {cur_q}") + + # 5)更新目标动作:这里用「增量控制」更稳定 + cur_q[0:6] += torch.tensor(left_arm, device=cur_q.device) + cur_q[6:12] += torch.tensor(right_arm, device=cur_q.device) + + + # 6)发给 A10 双臂 + scene["robot"].set_joint_position_target(cur_q.unsqueeze(0)) + + # 7)步进1次仿真 + scene.write_data_to_sim() + sim.step() + scene.update(sim_dt) + #sim.render() + count += 1 + sim_time += sim_dt + + +def main(): + # 初始化仿真 + sim_cfg = sim_utils.SimulationCfg(device=args.device) + sim = sim_utils.SimulationContext(sim_cfg) + sim.set_camera_view([4.0, 0.0, 2.5], [0.0, 0.0, 0.5]) # type: ignore + scene_cfg = A10SceneCfg(num_envs=args.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + sim.reset() + print("[INFO]: Setup complete...") + + client = websocket_client_policy.WebsocketClientPolicy(host="localhost", port=8000) + + run(sim, scene, client) + + +if __name__ == "__main__": + main() + simulation_app.close() \ No newline at end of file diff --git a/A10_Robot/packages/openpi-client/pyproject.toml b/A10_Robot/packages/openpi-client/pyproject.toml new file mode 100644 index 00000000000..fba7b66f62b --- /dev/null +++ b/A10_Robot/packages/openpi-client/pyproject.toml @@ -0,0 +1,23 @@ +[project] +name = "openpi-client" +version = "0.1.0" +requires-python = ">=3.7" +dependencies = [ + "dm-tree>=0.1.8", + "msgpack>=1.0.5", + "numpy>=1.22.4,<2.0.0", + "pillow>=9.0.0", + "tree>=0.2.4", + "websockets>=11.0", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.uv] +dev-dependencies = ["pytest>=8.3.4"] + +[tool.ruff] +line-length = 120 +target-version = "py37" diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/__init__.py b/A10_Robot/packages/openpi-client/src/openpi_client/__init__.py new file mode 100644 index 00000000000..3dc1f76bc69 --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/action_chunk_broker.py b/A10_Robot/packages/openpi-client/src/openpi_client/action_chunk_broker.py new file mode 100644 index 00000000000..8fa9d83d023 --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/action_chunk_broker.py @@ -0,0 +1,50 @@ +from typing import Dict + +import numpy as np +import tree +from typing_extensions import override + +from openpi_client import base_policy as _base_policy + + +class ActionChunkBroker(_base_policy.BasePolicy): + """Wraps a policy to return action chunks one-at-a-time. + + Assumes that the first dimension of all action fields is the chunk size. + + A new inference call to the inner policy is only made when the current + list of chunks is exhausted. + """ + + def __init__(self, policy: _base_policy.BasePolicy, action_horizon: int): + self._policy = policy + self._action_horizon = action_horizon + self._cur_step: int = 0 + + self._last_results: Dict[str, np.ndarray] | None = None + + @override + def infer(self, obs: Dict) -> Dict: # noqa: UP006 + if self._last_results is None: + self._last_results = self._policy.infer(obs) + self._cur_step = 0 + + def slicer(x): + if isinstance(x, np.ndarray): + return x[self._cur_step, ...] + else: + return x + + results = tree.map_structure(slicer, self._last_results) + self._cur_step += 1 + + if self._cur_step >= self._action_horizon: + self._last_results = None + + return results + + @override + def reset(self) -> None: + self._policy.reset() + self._last_results = None + self._cur_step = 0 diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/base_policy.py b/A10_Robot/packages/openpi-client/src/openpi_client/base_policy.py new file mode 100644 index 00000000000..2f4290651b1 --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/base_policy.py @@ -0,0 +1,12 @@ +import abc +from typing import Dict + + +class BasePolicy(abc.ABC): + @abc.abstractmethod + def infer(self, obs: Dict) -> Dict: + """Infer actions from observations.""" + + def reset(self) -> None: + """Reset the policy to its initial state.""" + pass diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/image_tools.py b/A10_Robot/packages/openpi-client/src/openpi_client/image_tools.py new file mode 100644 index 00000000000..7a971b9d5f6 --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/image_tools.py @@ -0,0 +1,58 @@ +import numpy as np +from PIL import Image + + +def convert_to_uint8(img: np.ndarray) -> np.ndarray: + """Converts an image to uint8 if it is a float image. + + This is important for reducing the size of the image when sending it over the network. + """ + if np.issubdtype(img.dtype, np.floating): + img = (255 * img).astype(np.uint8) + return img + + +def resize_with_pad(images: np.ndarray, height: int, width: int, method=Image.BILINEAR) -> np.ndarray: + """Replicates tf.image.resize_with_pad for multiple images using PIL. Resizes a batch of images to a target height. + + Args: + images: A batch of images in [..., height, width, channel] format. + height: The target height of the image. + width: The target width of the image. + method: The interpolation method to use. Default is bilinear. + + Returns: + The resized images in [..., height, width, channel]. + """ + # If the images are already the correct size, return them as is. + if images.shape[-3:-1] == (height, width): + return images + + original_shape = images.shape + + images = images.reshape(-1, *original_shape[-3:]) + resized = np.stack([_resize_with_pad_pil(Image.fromarray(im), height, width, method=method) for im in images]) + return resized.reshape(*original_shape[:-3], *resized.shape[-3:]) + + +def _resize_with_pad_pil(image: Image.Image, height: int, width: int, method: int) -> Image.Image: + """Replicates tf.image.resize_with_pad for one image using PIL. Resizes an image to a target height and + width without distortion by padding with zeros. + + Unlike the jax version, note that PIL uses [width, height, channel] ordering instead of [batch, h, w, c]. + """ + cur_width, cur_height = image.size + if cur_width == width and cur_height == height: + return image # No need to resize if the image is already the correct size. + + ratio = max(cur_width / width, cur_height / height) + resized_height = int(cur_height / ratio) + resized_width = int(cur_width / ratio) + resized_image = image.resize((resized_width, resized_height), resample=method) + + zero_image = Image.new(resized_image.mode, (width, height), 0) + pad_height = max(0, int((height - resized_height) / 2)) + pad_width = max(0, int((width - resized_width) / 2)) + zero_image.paste(resized_image, (pad_width, pad_height)) + assert zero_image.size == (width, height) + return zero_image diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/image_tools_test.py b/A10_Robot/packages/openpi-client/src/openpi_client/image_tools_test.py new file mode 100644 index 00000000000..8d4b4b92030 --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/image_tools_test.py @@ -0,0 +1,37 @@ +import numpy as np + +import openpi_client.image_tools as image_tools + + +def test_resize_with_pad_shapes(): + # Test case 1: Resize image with larger dimensions + images = np.zeros((2, 10, 10, 3), dtype=np.uint8) # Input images of shape (batch_size, height, width, channels) + height = 20 + width = 20 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (2, height, width, 3) + assert np.all(resized_images == 0) + + # Test case 2: Resize image with smaller dimensions + images = np.zeros((3, 30, 30, 3), dtype=np.uint8) + height = 15 + width = 15 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (3, height, width, 3) + assert np.all(resized_images == 0) + + # Test case 3: Resize image with the same dimensions + images = np.zeros((1, 50, 50, 3), dtype=np.uint8) + height = 50 + width = 50 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (1, height, width, 3) + assert np.all(resized_images == 0) + + # Test case 3: Resize image with odd-numbered padding + images = np.zeros((1, 256, 320, 3), dtype=np.uint8) + height = 60 + width = 80 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (1, height, width, 3) + assert np.all(resized_images == 0) diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy.py b/A10_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy.py new file mode 100644 index 00000000000..007f755edf5 --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy.py @@ -0,0 +1,57 @@ +"""Adds NumPy array support to msgpack. + +msgpack is good for (de)serializing data over a network for multiple reasons: +- msgpack is secure (as opposed to pickle/dill/etc which allow for arbitrary code execution) +- msgpack is widely used and has good cross-language support +- msgpack does not require a schema (as opposed to protobuf/flatbuffers/etc) which is convenient in dynamically typed + languages like Python and JavaScript +- msgpack is fast and efficient (as opposed to readable formats like JSON/YAML/etc); I found that msgpack was ~4x faster + than pickle for serializing large arrays using the below strategy + +The code below is adapted from https://github.com/lebedov/msgpack-numpy. The reason not to use that library directly is +that it falls back to pickle for object arrays. +""" + +import functools + +import msgpack +import numpy as np + + +def pack_array(obj): + if (isinstance(obj, (np.ndarray, np.generic))) and obj.dtype.kind in ("V", "O", "c"): + raise ValueError(f"Unsupported dtype: {obj.dtype}") + + if isinstance(obj, np.ndarray): + return { + b"__ndarray__": True, + b"data": obj.tobytes(), + b"dtype": obj.dtype.str, + b"shape": obj.shape, + } + + if isinstance(obj, np.generic): + return { + b"__npgeneric__": True, + b"data": obj.item(), + b"dtype": obj.dtype.str, + } + + return obj + + +def unpack_array(obj): + if b"__ndarray__" in obj: + return np.ndarray(buffer=obj[b"data"], dtype=np.dtype(obj[b"dtype"]), shape=obj[b"shape"]) + + if b"__npgeneric__" in obj: + return np.dtype(obj[b"dtype"]).type(obj[b"data"]) + + return obj + + +Packer = functools.partial(msgpack.Packer, default=pack_array) +packb = functools.partial(msgpack.packb, default=pack_array) + +Unpacker = functools.partial(msgpack.Unpacker, object_hook=unpack_array) +unpackb = functools.partial(msgpack.unpackb, object_hook=unpack_array) diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy_test.py b/A10_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy_test.py new file mode 100644 index 00000000000..4c774ba468a --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy_test.py @@ -0,0 +1,45 @@ +import numpy as np +import pytest +import tree + +from openpi_client import msgpack_numpy + + +def _check(expected, actual): + if isinstance(expected, np.ndarray): + assert expected.shape == actual.shape + assert expected.dtype == actual.dtype + assert np.array_equal(expected, actual, equal_nan=expected.dtype.kind == "f") + else: + assert expected == actual + + +@pytest.mark.parametrize( + "data", + [ + 1, # int + 1.0, # float + "hello", # string + np.bool_(True), # boolean scalar + np.array([1, 2, 3])[0], # int scalar + np.str_("asdf"), # string scalar + [1, 2, 3], # list + {"key": "value"}, # dict + {"key": [1, 2, 3]}, # nested dict + np.array(1.0), # 0D array + np.array([1, 2, 3], dtype=np.int32), # 1D integer array + np.array(["asdf", "qwer"]), # string array + np.array([True, False]), # boolean array + np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32), # 2D float array + np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.int16), # 3D integer array + np.array([np.nan, np.inf, -np.inf]), # special float values + {"arr": np.array([1, 2, 3]), "nested": {"arr": np.array([4, 5, 6])}}, # nested dict with arrays + [np.array([1, 2]), np.array([3, 4])], # list of arrays + np.zeros((3, 4, 5), dtype=np.float32), # 3D zeros + np.ones((2, 3), dtype=np.float64), # 2D ones with double precision + ], +) +def test_pack_unpack(data): + packed = msgpack_numpy.packb(data) + unpacked = msgpack_numpy.unpackb(packed) + tree.map_structure(_check, data, unpacked) diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/runtime/agent.py b/A10_Robot/packages/openpi-client/src/openpi_client/runtime/agent.py new file mode 100644 index 00000000000..a2c3ab66ef6 --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/runtime/agent.py @@ -0,0 +1,17 @@ +import abc + + +class Agent(abc.ABC): + """An Agent is the thing with agency, i.e. the entity that makes decisions. + + Agents receive observations about the state of the world, and return actions + to take in response. + """ + + @abc.abstractmethod + def get_action(self, observation: dict) -> dict: + """Query the agent for the next action.""" + + @abc.abstractmethod + def reset(self) -> None: + """Reset the agent to its initial state.""" diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/runtime/agents/policy_agent.py b/A10_Robot/packages/openpi-client/src/openpi_client/runtime/agents/policy_agent.py new file mode 100644 index 00000000000..65227c44dae --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/runtime/agents/policy_agent.py @@ -0,0 +1,18 @@ +from typing_extensions import override + +from openpi_client import base_policy as _base_policy +from openpi_client.runtime import agent as _agent + + +class PolicyAgent(_agent.Agent): + """An agent that uses a policy to determine actions.""" + + def __init__(self, policy: _base_policy.BasePolicy) -> None: + self._policy = policy + + @override + def get_action(self, observation: dict) -> dict: + return self._policy.infer(observation) + + def reset(self) -> None: + self._policy.reset() diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/runtime/environment.py b/A10_Robot/packages/openpi-client/src/openpi_client/runtime/environment.py new file mode 100644 index 00000000000..664ac4678aa --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/runtime/environment.py @@ -0,0 +1,32 @@ +import abc + + +class Environment(abc.ABC): + """An Environment represents the robot and the environment it inhabits. + + The primary contract of environments is that they can be queried for observations + about their state, and have actions applied to them to change that state. + """ + + @abc.abstractmethod + def reset(self) -> None: + """Reset the environment to its initial state. + + This will be called once before starting each episode. + """ + + @abc.abstractmethod + def is_episode_complete(self) -> bool: + """Allow the environment to signal that the episode is complete. + + This will be called after each step. It should return `True` if the episode is + complete (either successfully or unsuccessfully), and `False` otherwise. + """ + + @abc.abstractmethod + def get_observation(self) -> dict: + """Query the environment for the current state.""" + + @abc.abstractmethod + def apply_action(self, action: dict) -> None: + """Take an action in the environment.""" diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/runtime/runtime.py b/A10_Robot/packages/openpi-client/src/openpi_client/runtime/runtime.py new file mode 100644 index 00000000000..9552be091a2 --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/runtime/runtime.py @@ -0,0 +1,92 @@ +import logging +import threading +import time + +from openpi_client.runtime import agent as _agent +from openpi_client.runtime import environment as _environment +from openpi_client.runtime import subscriber as _subscriber + + +class Runtime: + """The core module orchestrating interactions between key components of the system.""" + + def __init__( + self, + environment: _environment.Environment, + agent: _agent.Agent, + subscribers: list[_subscriber.Subscriber], + max_hz: float = 0, + num_episodes: int = 1, + max_episode_steps: int = 0, + ) -> None: + self._environment = environment + self._agent = agent + self._subscribers = subscribers + self._max_hz = max_hz + self._num_episodes = num_episodes + self._max_episode_steps = max_episode_steps + + self._in_episode = False + self._episode_steps = 0 + + def run(self) -> None: + """Runs the runtime loop continuously until stop() is called or the environment is done.""" + for _ in range(self._num_episodes): + self._run_episode() + + # Final reset, this is important for real environments to move the robot to its home position. + self._environment.reset() + + def run_in_new_thread(self) -> threading.Thread: + """Runs the runtime loop in a new thread.""" + thread = threading.Thread(target=self.run) + thread.start() + return thread + + def mark_episode_complete(self) -> None: + """Marks the end of an episode.""" + self._in_episode = False + + def _run_episode(self) -> None: + """Runs a single episode.""" + logging.info("Starting episode...") + self._environment.reset() + self._agent.reset() + for subscriber in self._subscribers: + subscriber.on_episode_start() + + self._in_episode = True + self._episode_steps = 0 + step_time = 1 / self._max_hz if self._max_hz > 0 else 0 + last_step_time = time.time() + + while self._in_episode: + self._step() + self._episode_steps += 1 + + # Sleep to maintain the desired frame rate + now = time.time() + dt = now - last_step_time + if dt < step_time: + time.sleep(step_time - dt) + last_step_time = time.time() + else: + last_step_time = now + + logging.info("Episode completed.") + for subscriber in self._subscribers: + subscriber.on_episode_end() + + def _step(self) -> None: + """A single step of the runtime loop.""" + observation = self._environment.get_observation() + action = self._agent.get_action(observation) + self._environment.apply_action(action) + + for subscriber in self._subscribers: + subscriber.on_step(observation, action) + + if self._environment.is_episode_complete() or ( + self._max_episode_steps > 0 and self._episode_steps >= self._max_episode_steps + ): + self.mark_episode_complete() diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/runtime/subscriber.py b/A10_Robot/packages/openpi-client/src/openpi_client/runtime/subscriber.py new file mode 100644 index 00000000000..7c69edaa8e8 --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/runtime/subscriber.py @@ -0,0 +1,20 @@ +import abc + + +class Subscriber(abc.ABC): + """Subscribes to events in the runtime. + + Subscribers can be used to save data, visualize, etc. + """ + + @abc.abstractmethod + def on_episode_start(self) -> None: + """Called when an episode starts.""" + + @abc.abstractmethod + def on_step(self, observation: dict, action: dict) -> None: + """Append a step to the episode.""" + + @abc.abstractmethod + def on_episode_end(self) -> None: + """Called when an episode ends.""" diff --git a/A10_Robot/packages/openpi-client/src/openpi_client/websocket_client_policy.py b/A10_Robot/packages/openpi-client/src/openpi_client/websocket_client_policy.py new file mode 100644 index 00000000000..d6244f0f780 --- /dev/null +++ b/A10_Robot/packages/openpi-client/src/openpi_client/websocket_client_policy.py @@ -0,0 +1,55 @@ +import logging +import time +from typing import Dict, Optional, Tuple + +from typing_extensions import override +import websockets.sync.client + +from openpi_client import base_policy as _base_policy +from openpi_client import msgpack_numpy + + +class WebsocketClientPolicy(_base_policy.BasePolicy): + """Implements the Policy interface by communicating with a server over websocket. + + See WebsocketPolicyServer for a corresponding server implementation. + """ + + def __init__(self, host: str = "0.0.0.0", port: Optional[int] = None, api_key: Optional[str] = None) -> None: + self._uri = f"ws://{host}" + if port is not None: + self._uri += f":{port}" + self._packer = msgpack_numpy.Packer() + self._api_key = api_key + self._ws, self._server_metadata = self._wait_for_server() + + def get_server_metadata(self) -> Dict: + return self._server_metadata + + def _wait_for_server(self) -> Tuple[websockets.sync.client.ClientConnection, Dict]: + logging.info(f"Waiting for server at {self._uri}...") + while True: + try: + headers = {"Authorization": f"Api-Key {self._api_key}"} if self._api_key else None + conn = websockets.sync.client.connect( + self._uri, compression=None, max_size=None, additional_headers=headers + ) + metadata = msgpack_numpy.unpackb(conn.recv()) + return conn, metadata + except ConnectionRefusedError: + logging.info("Still waiting for server...") + time.sleep(5) + + @override + def infer(self, obs: Dict) -> Dict: # noqa: UP006 + data = self._packer.pack(obs) + self._ws.send(data) + response = self._ws.recv() + if isinstance(response, str): + # we're expecting bytes; if the server sends a string, it's an error. + raise RuntimeError(f"Error in inference server:\n{response}") + return msgpack_numpy.unpackb(response) + + @override + def reset(self) -> None: + pass diff --git a/A10_Robot/policy/_init_.py b/A10_Robot/policy/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/A10_Robot/robot/_init_.py b/A10_Robot/robot/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/A10_Robot/robot/a10_cfg.py b/A10_Robot/robot/a10_cfg.py new file mode 100644 index 00000000000..07004882fb7 --- /dev/null +++ b/A10_Robot/robot/a10_cfg.py @@ -0,0 +1,54 @@ +import math +import torch +import isaaclab.sim as sim_utils +from isaaclab.assets.articulation import ArticulationCfg +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.utils import math as math_utils +from isaaclab.scene.interactive_scene_cfg import InteractiveSceneCfg +from isaaclab.scene import InteractiveScene + +# 注意:这里路径是相对于 IsaacLab 根目录 +A10_USD_PATH = "A10_Robot/assets/dualarm.usd" + + +# ============================ +# A10 双臂机器人配置(假定总 14 DOF:左7+右7) +# ============================ + +A10_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=A10_USD_PATH, + rigid_props=sim_utils.RigidBodyPropertiesCfg(disable_gravity=False), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=True, + solver_position_iteration_count=8, + solver_velocity_iteration_count=0, + fix_root_link=True, + ), + ), + + # init_state=ArticulationCfg.InitialStateCfg( + # joint_pos={f"Joint{i}": 0.0 for i in range(1, 15)}, + # ), + + # 采用正则匹配为所有关节设置隐式关节驱动;后续可根据具体命名拆分左右臂 + actuators={ + "A1": ImplicitActuatorCfg(joint_names_expr=["Arm1_joint1"], stiffness=3000.0, damping=80.0), + "A2": ImplicitActuatorCfg(joint_names_expr=["Arm1_joint2"], stiffness=3000.0, damping=80.0), + "A3": ImplicitActuatorCfg(joint_names_expr=["Arm1_joint3"], stiffness=3000.0, damping=80.0), + "A4": ImplicitActuatorCfg(joint_names_expr=["Arm1_joint4"], stiffness=3000.0, damping=80.0), + "A5": ImplicitActuatorCfg(joint_names_expr=["Arm1_joint5"], stiffness=3000.0, damping=80.0), + "A6": ImplicitActuatorCfg(joint_names_expr=["Arm1_joint6"], stiffness=3000.0, damping=80.0), + #"A7": ImplicitActuatorCfg(joint_names_expr=["Arm1_ee"], stiffness=3000.0, damping=80.0), + + # 右臂 Joint8~Joint14 + "B1": ImplicitActuatorCfg(joint_names_expr=["Arm2_joint1"], stiffness=3000.0, damping=80.0), + "B2": ImplicitActuatorCfg(joint_names_expr=["Arm2_joint2"], stiffness=3000.0, damping=80.0), + "B3": ImplicitActuatorCfg(joint_names_expr=["Arm2_joint3"], stiffness=3000.0, damping=80.0), + "B4": ImplicitActuatorCfg(joint_names_expr=["Arm2_joint4"], stiffness=3000.0, damping=80.0), + "B5": ImplicitActuatorCfg(joint_names_expr=["Arm2_joint5"], stiffness=3000.0, damping=80.0), + "B6": ImplicitActuatorCfg(joint_names_expr=["Arm2_joint6"], stiffness=3000.0, damping=80.0), + # "B7": ImplicitActuatorCfg(joint_names_expr=["Arm2_ee"], stiffness=3000.0, damping=80.0), + }, +) + diff --git a/A10_Robot/scene/_init_.py b/A10_Robot/scene/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/A10_Robot/scene/a10_scene_cfg.py b/A10_Robot/scene/a10_scene_cfg.py new file mode 100644 index 00000000000..ff3748fd5cd --- /dev/null +++ b/A10_Robot/scene/a10_scene_cfg.py @@ -0,0 +1,67 @@ +import math +import torch +import numpy as np + +import isaaclab.sim as sim_utils +from isaaclab.assets.articulation import ArticulationCfg +from isaaclab.assets import AssetBaseCfg +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.utils import math as math_utils +from isaaclab.utils import configclass +from robot.a10_cfg import A10_CFG +from isaaclab.sensors import CameraCfg + + +@configclass +class A10SceneCfg(InteractiveSceneCfg): + ground = AssetBaseCfg( + prim_path="/World/defaultGroundPlane", + spawn=sim_utils.GroundPlaneCfg(), + ) + + robot = A10_CFG.replace( # type: ignore + prim_path="{ENV_REGEX_NS}/Robot" + ) + + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # 注意:下面的 prim_path 需要与你的 URDF 中相机挂载的 link 对齐。 + head_camera = CameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/base_link/head_cam", + update_period=0.1, + height=480, + width=640, + data_types=["rgb", "distance_to_image_plane"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=12.4, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) + ), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 2.3), rot=(0.98481, 0.17365, 0.0, 0.0), convention="opengl"), + ) + + right_camera = CameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/Arm2_ee/right_cam", + update_period=0.1, + height=480, + width=640, + data_types=["rgb", "distance_to_image_plane"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=18.14756, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) + ), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, -1.0, 0.0, 0.0), convention="opengl"), + ) + + left_camera = CameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/Arm1_ee/left_cam", + update_period=0.1, + height=480, + width=640, + data_types=["rgb", "distance_to_image_plane"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=12.4, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) + ), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(0.0, -1.0, 0.0, 0.0), convention="opengl"), + ) + diff --git a/A10_Robot/test_openpi.py b/A10_Robot/test_openpi.py new file mode 100644 index 00000000000..a69f379fcd5 --- /dev/null +++ b/A10_Robot/test_openpi.py @@ -0,0 +1,137 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/02_scene/create_scene.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sim import SimulationContext +from isaaclab.utils import configclass +from robot.x7_duo_cfg import X7_DUO_CFG +from scene.x7_scene_cfg import X7SceneCfg +from control.observation import get_observation + +from utils.robot_reset import reset_robot + +## +# Pre-defined configs +## +from isaaclab_assets import CARTPOLE_CFG # isort:skip + + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. + robot = scene["robot"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + count = 0 + # Simulation loop + while simulation_app.is_running(): + # Reset + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = robot.data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + + robot.write_root_pose_to_sim(root_state[:, :7]) + robot.write_root_velocity_to_sim(root_state[:, 7:]) + # set joint positions with some noise + joint_pos = robot.data.default_joint_pos.clone() + print(joint_pos.shape) + + observation = get_observation(scene) + #print(observation["observation/image"].shape) + #print(observation["observation/wrist_image"].shape) + # print(observation["observation/state"].shape) + + + # robot.write_joint_state_to_sim(joint_pos, joint_vel) + # # clear internal buffers + # scene.reset() + # print("[INFO]: Resetting robot state...") + # # Apply random action + # # -- generate random joint efforts + # efforts = torch.randn_like(robot.data.joint_pos) * 5.0 + # # -- apply action to the robot + # robot.set_joint_effort_target(efforts) + # -- write data to sim + scene.write_data_to_sim() + # Perform step + sim.step() + # Increment counter + count += 1 + # Update buffers + scene.update(sim_dt) + + +def main(): + """Main function.""" + # Load kit helper + # sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + # sim = SimulationContext(sim_cfg) + # # Set main camera + # sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) + # # Design scene + # scene_cfg = CartpoleSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + # scene = InteractiveScene(scene_cfg) + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + sim.set_camera_view([4.0, 0.0, 2.5], [0.0, 0.0, 0.5]) # type: ignore + + + scene_cfg = X7SceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/A10_Robot/utils/_init_.py b/A10_Robot/utils/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/X7_Robot/adapters/_init_.py b/X7_Robot/adapters/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/X7_Robot/adapters/joint_mapper.py b/X7_Robot/adapters/joint_mapper.py new file mode 100644 index 00000000000..d9805420344 --- /dev/null +++ b/X7_Robot/adapters/joint_mapper.py @@ -0,0 +1,40 @@ +# adapters/joint_mapper.py + +import numpy as np + +class JointMapper12to14: + """ + 自动关节映射器: + 输入:pi0.5(Franka 双臂 12 DOF) + Franka_L[0..5], Franka_R[0..5] + 输出:你的机器人 14 DOF + X7 左臂 Joint1~Joint7 + X7 右臂 Joint8~Joint14 + """ + + def __init__(self): + # 默认映射 6→7,最后一个补 0 或固定角度 + self.left_mapping = [0, 1, 2, 3, 4, 5] # → Joint1..Joint6 + self.right_mapping = [6, 7, 8, 9,10,11] # → Joint8..Joint13 + + # 第 7 DOF 用什么? + self.left_extra = 0.0 # 给 Joint7 + self.right_extra = 0.0 # 给 Joint14 + + def map(self, frankas_12dof): + """ + 输入 12D Franka(numpy 或 torch 都行) + 返回 14D X7 关节角 + """ + + frankas_12dof = np.asarray(frankas_12dof).reshape(-1) + + # 左臂 X7 (Joint1~7) + left = [frankas_12dof[i] for i in self.left_mapping] + left.append(self.left_extra) + + # 右臂 X7 (Joint8~14) + right = [frankas_12dof[i] for i in self.right_mapping] + right.append(self.right_extra) + + return np.array(left + right) diff --git a/X7_Robot/assets/X7-duo(2)/CMakeLists.txt b/X7_Robot/assets/X7-duo(2)/CMakeLists.txt new file mode 100644 index 00000000000..351e1c16467 --- /dev/null +++ b/X7_Robot/assets/X7-duo(2)/CMakeLists.txt @@ -0,0 +1,17 @@ +cmake_minimum_required(VERSION 3.5) +project(x7_duo) + +# 使用 ament_cmake 而不是 catkin +find_package(ament_cmake REQUIRED) + +# 如果你的包只包含 launch、urdf、rviz 等文件(不编译代码) +# 不需要 add_executable 或 target_link_libraries + +# 安装资源文件夹 +install( + DIRECTORY launch urdf meshes config + DESTINATION share/${PROJECT_NAME} +) + +# ament 包声明 +ament_package() diff --git a/X7_Robot/assets/X7-duo(2)/config/joint_names_X7-duo(2).yaml b/X7_Robot/assets/X7-duo(2)/config/joint_names_X7-duo(2).yaml new file mode 100644 index 00000000000..54d4a0a26e1 --- /dev/null +++ b/X7_Robot/assets/X7-duo(2)/config/joint_names_X7-duo(2).yaml @@ -0,0 +1 @@ +controller_joint_names: ['', 'Joint1', 'Joint2', 'Joint3', 'Joint4', 'Joint5', 'Joint6', 'Joint7', 'Joint8', 'Joint9', 'Joint10', 'Joint11', 'Joint12', 'Joint13', 'Joint14', ] diff --git a/X7_Robot/assets/X7-duo(2)/launch/display.launch.py b/X7_Robot/assets/X7-duo(2)/launch/display.launch.py new file mode 100644 index 00000000000..0961bc9aa9f --- /dev/null +++ b/X7_Robot/assets/X7-duo(2)/launch/display.launch.py @@ -0,0 +1,51 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + +def generate_launch_description(): + # 包路径 + pkg_path = get_package_share_directory('x7_duo') + + # URDF 文件路径 + urdf_file = os.path.join(pkg_path, 'urdf', 'x7_duo.urdf') + + # RViz 配置文件路径 + rviz_file = os.path.join(pkg_path, 'rviz', 'rviz_config.rviz') + + # 读取 URDF 内容作为 robot_description 参数 + with open(urdf_file, 'r') as infp: + robot_desc = infp.read() + + # --- 各节点定义 --- + joint_state_publisher_node = Node( + package='joint_state_publisher_gui', + executable='joint_state_publisher_gui', + name='joint_state_publisher_gui' + ) + + robot_state_publisher_node = Node( + package='robot_state_publisher', + executable='robot_state_publisher', + name='robot_state_publisher', + parameters=[{'robot_description': robot_desc}] + ) + + rviz2_node = Node( + package='rviz2', + executable='rviz2', + name='rviz2', + output='screen', + # '-d' 指定 rviz 配置文件;移除不受支持的 '--opengl 20' 参数 + arguments=['-d', rviz_file], + # 不再强制 LD_PRELOAD,默认环境通常更兼容。如果确实需要预加载库,请 + # 使用 env=os.environ.copy() 并合并键值后传入 Node 的 env 参数。 + ) + + # --- 创建 LaunchDescription 并添加节点 --- + ld = LaunchDescription() + ld.add_action(joint_state_publisher_node) + ld.add_action(robot_state_publisher_node) + ld.add_action(rviz2_node) + + return ld diff --git a/X7_Robot/assets/X7-duo(2)/launch/gazebo.launch.py b/X7_Robot/assets/X7-duo(2)/launch/gazebo.launch.py new file mode 100644 index 00000000000..41b6a64f5fc --- /dev/null +++ b/X7_Robot/assets/X7-duo(2)/launch/gazebo.launch.py @@ -0,0 +1,51 @@ +from launch import LaunchDescription +from launch.actions import IncludeLaunchDescription, ExecuteProcess +from launch.launch_description_sources import PythonLaunchDescriptionSource +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + +def generate_launch_description(): + # 包路径 + pkg_path = get_package_share_directory('x7_duo') + urdf_file = os.path.join(pkg_path, 'urdf', 'x7_duo.urdf') + + # Gazebo 仿真启动(空世界) + gazebo_pkg_path = get_package_share_directory('gazebo_ros') + gazebo_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(gazebo_pkg_path, 'launch', 'gazebo.launch.py') + ) + ) + + # 静态坐标变换 base_link <-> base_footprint + static_tf = Node( + package='tf2_ros', + executable='static_transform_publisher', + name='tf_footprint_base', + arguments=['0', '0', '0', '0', '0', '0', 'base_link', 'base_footprint'] + ) + + # 载入机器人模型到Gazebo + spawn_model = Node( + package='gazebo_ros', + executable='spawn_entity.py', + arguments=[ + '-file', urdf_file, + '-entity', 'x7_duo' + ], + output='screen' + ) + + # 模拟 joint calibration (发布一个 /calibrated 话题) + fake_joint_calibration = ExecuteProcess( + cmd=['ros2', 'topic', 'pub', '/calibrated', 'std_msgs/msg/Bool', 'data: true'], + output='screen' + ) + + return LaunchDescription([ + gazebo_launch, + static_tf, + spawn_model, + fake_joint_calibration + ]) diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link1.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link1.STL new file mode 100644 index 00000000000..e52f1b94565 Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link1.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link10.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link10.STL new file mode 100644 index 00000000000..fed75ad5cfa Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link10.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link11.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link11.STL new file mode 100644 index 00000000000..58e5e1fe6f1 Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link11.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link12.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link12.STL new file mode 100644 index 00000000000..4d7cc51ec07 Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link12.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link13.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link13.STL new file mode 100644 index 00000000000..0cb3b34057d Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link13.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link14.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link14.STL new file mode 100644 index 00000000000..12496d0f16a --- /dev/null +++ b/X7_Robot/assets/X7-duo(2)/meshes/Link14.STL @@ -0,0 +1 @@ +solid Link14 \ No newline at end of file diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link2.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link2.STL new file mode 100644 index 00000000000..710f64a0543 Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link2.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link3.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link3.STL new file mode 100644 index 00000000000..b6d592602fa Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link3.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link4.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link4.STL new file mode 100644 index 00000000000..54d3c86d7ac Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link4.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link5.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link5.STL new file mode 100644 index 00000000000..1c32425d9ba Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link5.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link6.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link6.STL new file mode 100644 index 00000000000..e0a62e7d438 Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link6.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link7.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link7.STL new file mode 100644 index 00000000000..05e0c47b7ca --- /dev/null +++ b/X7_Robot/assets/X7-duo(2)/meshes/Link7.STL @@ -0,0 +1 @@ +solid Link7 \ No newline at end of file diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link8.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link8.STL new file mode 100644 index 00000000000..296f573c32f Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link8.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/Link9.STL b/X7_Robot/assets/X7-duo(2)/meshes/Link9.STL new file mode 100644 index 00000000000..f57b17b5bda Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/Link9.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/meshes/base_link.STL b/X7_Robot/assets/X7-duo(2)/meshes/base_link.STL new file mode 100644 index 00000000000..0dc1cc4d525 Binary files /dev/null and b/X7_Robot/assets/X7-duo(2)/meshes/base_link.STL differ diff --git a/X7_Robot/assets/X7-duo(2)/package.xml b/X7_Robot/assets/X7-duo(2)/package.xml new file mode 100644 index 00000000000..bf00dd959d6 --- /dev/null +++ b/X7_Robot/assets/X7-duo(2)/package.xml @@ -0,0 +1,31 @@ + + + x7_duo + 1.0.0 + +

URDF Description package for x7_duo

+

This package contains configuration data, 3D models, and launch files + for the x7_duo robot.

+
+ + TODO + BSD + + + ament_cmake + + + rclcpp + robot_state_publisher + joint_state_publisher_gui + rviz2 + gazebo_ros + + + launch + launch_ros + + + ament_cmake + +
diff --git a/X7_Robot/assets/X7-duo(2)/rviz/rviz_config.rviz b/X7_Robot/assets/X7-duo(2)/rviz/rviz_config.rviz new file mode 100644 index 00000000000..72f7e815be3 --- /dev/null +++ b/X7_Robot/assets/X7-duo(2)/rviz/rviz_config.rviz @@ -0,0 +1,26 @@ +Panels: + - Class: rviz_common/Displays + Name: Displays + - Class: rviz_common/Views + Name: Views +Visualization Manager: + Class: "" + Displays: + - Class: rviz_default_plugins/RobotModel + Name: RobotModel + Enabled: true + Robot Description: robot_description + - Class: rviz_default_plugins/TF + Name: TF + Enabled: true + Global Options: + Fixed Frame: base_link + Views: + Current: + Class: rviz_default_plugins/Orbit + Distance: 2.0 + Pitch: 0.785 + Yaw: 0.785 +Window Geometry: + Height: 800 + Width: 1200 diff --git a/X7_Robot/assets/X7-duo(2)/urdf/X7_duo.csv b/X7_Robot/assets/X7-duo(2)/urdf/X7_duo.csv new file mode 100644 index 00000000000..8045c593c20 --- /dev/null +++ b/X7_Robot/assets/X7-duo(2)/urdf/X7_duo.csv @@ -0,0 +1,16 @@ +Link Name,Center of Mass X,Center of Mass Y,Center of Mass Z,Center of Mass Roll,Center of Mass Pitch,Center of Mass Yaw,Mass,Moment Ixx,Moment Ixy,Moment Ixz,Moment Iyy,Moment Iyz,Moment Izz,Visual X,Visual Y,Visual Z,Visual Roll,Visual Pitch,Visual Yaw,Mesh Filename,Color Red,Color Green,Color Blue,Color Alpha,Collision X,Collision Y,Collision Z,Collision Roll,Collision Pitch,Collision Yaw,Collision Mesh Filename,Material Name,SW Components,Coordinate System,Axis Name,Joint Name,Joint Type,Joint Origin X,Joint Origin Y,Joint Origin Z,Joint Origin Roll,Joint Origin Pitch,Joint Origin Yaw,Parent,Joint Axis X,Joint Axis Y,Joint Axis Z,Limit Effort,Limit Velocity,Limit Lower,Limit Upper,Calibration rising,Calibration falling,Dynamics Damping,Dynamics Friction,Safety Soft Upper,Safety Soft Lower,Safety K Position,Safety K Velocity +base_link,-0.362230924510549,0.292936995026824,-0.00346514121300403,0,0,0,841.059374418799,46.1239466552901,-0.0323358134675047,0.00321101498386625,40.9088285659479,0.0607333639766209,42.4118451236677,0,0,0,0,0,0,package://X7-duo(2)/meshes/base_link.STL,0.792156862745098,0.819607843137255,0.933333333333333,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/base_link.STL,,Oasis-300EL机器人(带顶板)2023-5-16-1;躯干-1;524SQ111-00-01-A-1;524SQ111-00-02-A-1;ZT21LC-AJ01-2-750-450 最高位置-1;524SQ111-00-03-01-A-视觉工控机支架-1;视觉工控机-1;机器人控制器#controller-1;示教器#HT0804外形模型-1;524SQ111-00-03-02-A-电路安装板-1;型材框架-1;Oasis-300EL-顶板-1;立柱风琴罩-1;524SQ111-00-04-A-头部装配总成-1;X7S-730-5V0.1外观-2/X7S-730-5V0.1-001 圆柱底座-1;X7S-730-5V0.1外观-1/X7S-730-5V0.1-001 圆柱底座-1,coordinate15,,,,0,0,0,0,0,0,,0,0,0,,,,,,,,,,,, +Link1,-1.1683E-05,0.050407,-0.049668,0,0,0,2.092,0.003644,7.1624E-07,9.6043E-07,0.0038992,-0.00080245,0.0029499,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link1.STL,0.89804,0.91765,0.92941,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link1.STL,,X7S-730-5V0.1外观-1/X7S-730-5V0.1-002 连杆1-1,coordinate1,Axis 1,Joint1,revolute,0,0,0.4345,0,0,-1.5251,base_link,0,0,1,190,1,-3.14,3.14,,,,,,,, +Link2,-0.00011256,0.041285,-0.0022275,0,0,0,0.83875,0.0025419,-6.5906E-06,-1.19E-06,0.0012397,8.6273E-05,0.0028224,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link2.STL,0.66667,0.69804,0.76863,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link2.STL,,X7S-730-5V0.1外观-1/X7S-730-5V0.1-003 连杆2-2,coordinate2,Axis 2,Joint2,revolute,0,0,0,1.5708,0,3.1416,Link1,0,0,-1,45,1,-3.14,3.14,,,,,,,, +Link3,2.2102E-06,0.05514,-0.031556,0,0,0,1.3226,0.0017673,1.4213E-08,5.1267E-08,0.0019097,-0.0004238,0.001322,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link3.STL,0.66667,0.69804,0.76863,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link3.STL,,X7S-730-5V0.1外观-1/X7S-730-5V0.1-004 连杆3-2,coordinate3,Axis 3,Joint3,revolute,0,0.2525,0,1.5708,0,3.1416,Link2,0,0,-1,45,1,-3.14,3.14,,,,,,,, +Link4,-0.00011256,0.041285,-0.0022275,0,0,0,0.83875,0.0025419,-6.5906E-06,-1.19E-06,0.0012397,8.6273E-05,0.0028224,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link4.STL,0.66667,0.69804,0.76863,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link4.STL,,X7S-730-5V0.1外观-1/X7S-730-5V0.1-003 连杆2-3,coordinate4,Axis 4,Joint4,revolute,0,0,0,1.5708,0,3.1416,Link3,0,0,-1,45,1,-3.14,3.14,,,,,,,, +Link5,2.2093E-06,0.05514,-0.031556,0,0,0,1.3226,0.0017673,1.4176E-08,5.1222E-08,0.0019097,-0.0004238,0.001322,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link5.STL,0.66667,0.69804,0.76863,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link5.STL,,X7S-730-5V0.1外观-1/X7S-730-5V0.1-004 连杆3-3,coordinate5,Axis 5,Joint5,revolute,0,0.2525,0,1.5708,0,3.1416,Link4,0,0,-1,45,1,-3.14,3.14,,,,,,,, +Link6,-0.00011256,0.041285,-0.0022275,0,0,0,0.83875,0.0025419,-6.5906E-06,-1.19E-06,0.0012397,8.6273E-05,0.0028224,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link6.STL,0.66667,0.69804,0.76863,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link6.STL,,X7S-730-5V0.1外观-1/X7S-730-5V0.1-003 连杆2-4,coordinate6,Axis 6,Joint6,revolute,0,0,0,1.5708,0,3.1416,Link5,0,0,-1,45,1,-3.14,3.14,,,,,,,, +Link7,-9.975E-05,-0.0001078,0.12704,0,0,0,1.4581,0.00073198,9.7501E-08,-8.7769E-07,0.00073784,-1.5995E-06,0.0011127,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link7.STL,0.75294,0.75294,0.75294,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link7.STL,,X7S-730-5V0.1外观-1/GJ17IV0.1 17关节I型-1,coordinate7,Axis 7,Joint7,revolute,0,0,0,1.5708,0,3.1416,Link6,0,0,1,45,1,-3.14,3.14,,,,,,,, +Link8,-1.13429334526738E-05,0.0504075893092015,-0.0496678513924143,0,0,0,2.09194606226853,0.00364399275445242,7.06884111213726E-07,9.50748744270944E-07,0.00389918388947431,-0.000802438458553338,0.00294987442129985,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link8.STL,0.898039215686275,0.917647058823529,0.929411764705882,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link8.STL,,X7S-730-5V0.1外观-2/X7S-730-5V0.1-002 连杆1-1,coordinate8,Axis 8,Joint8,revolute,0,0,-0.4385,3.1416,0,1.6376,base_link,0,0,1,190,1,-3.14,3.14,,,,,,,, +Link9,-0.00011256522461553,0.0412854149831438,-0.00222750466992383,0,0,0,0.838751218757198,0.00254192859839716,-6.59062205953789E-06,-1.18998893514286E-06,0.00123965768796234,8.62734670087217E-05,0.00282242746807384,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link9.STL,0.666666666666667,0.698039215686274,0.768627450980392,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link9.STL,,X7S-730-5V0.1外观-2/X7S-730-5V0.1-003 连杆2-2,coordinate9,Axis 9,Joint9,revolute,0,0,0,1.5708,0,3.1416,Link8,0,0,-1,45,1,-3.14,3.14,,,,,,,, +Link10,2.29691597714021E-06,0.0551396722696096,-0.0315561431611154,0,0,0,1.3225700524055,0.00176734181201217,1.63915647814682E-08,5.31028401832918E-08,0.00190967793104957,-0.000423798348497842,0.00132199056092953,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link10.STL,0.666666666666667,0.698039215686274,0.768627450980392,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link10.STL,,X7S-730-5V0.1外观-2/X7S-730-5V0.1-004 连杆3-2,coordinate10,Axis 10,Joint10,revolute,0,0.2525,0,1.5708,0,3.1416,Link9,0,0,-1,45,1,-3.14,3.14,,,,,,,, +Link11,-0.00011256522461528,0.041285414983144,-0.0022275046699245,0,0,0,0.838751218757202,0.00254192859839718,-6.590622059533E-06,-1.18998893513755E-06,0.00123965768796235,8.62734670087314E-05,0.00282242746807385,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link11.STL,0.666666666666667,0.698039215686274,0.768627450980392,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link11.STL,,X7S-730-5V0.1外观-2/X7S-730-5V0.1-003 连杆2-3,coordinate11,Axis 11,Joint11,revolute,0,0,0,1.5708,0,3.1416,Link10,0,0,-1,45,1,-3.14,3.14,,,,,,,, +Link12,2.2985648719942E-06,0.0551396722468784,-0.0315561431430874,0,0,0,1.32257004871916,0.00176734180715147,1.64572833134978E-08,5.31860931740121E-08,0.00190967791616066,-0.00042379834597667,0.00132199054882672,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link12.STL,0.666666666666667,0.698039215686274,0.768627450980392,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link12.STL,,X7S-730-5V0.1外观-2/X7S-730-5V0.1-004 连杆3-3,coordinate12,Axis 12,Joint12,revolute,0,0.2525,0,1.5708,0,3.1416,Link11,0,0,-1,45,1,-3.14,3.14,,,,,,,, +Link13,-0.000112565224615863,0.0412854149831442,-0.0022275046699245,0,0,0,0.838751218757205,0.00254192859839716,-6.59062205954451E-06,-1.1899889351498E-06,0.00123965768796235,8.62734670087266E-05,0.00282242746807385,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link13.STL,0.666666666666667,0.698039215686274,0.768627450980392,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link13.STL,,X7S-730-5V0.1外观-2/X7S-730-5V0.1-003 连杆2-4,coordinate13,Axis 13,Joint13,revolute,0,0,0,1.5708,0,3.1416,Link12,0,0,-1,45,1,-3.14,3.14,,,,,,,, +Link14,-9.9749788228215E-05,-0.000107798040414941,0.127038003995972,0,0,0,1.45808397711983,0.000731984271854175,9.75004468583981E-08,-8.77688033919086E-07,0.000737839455210335,-1.59953622040243E-06,0.00111266330572851,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link14.STL,0.752941176470588,0.752941176470588,0.752941176470588,1,0,0,0,0,0,0,package://X7-duo(2)/meshes/Link14.STL,,X7S-730-5V0.1外观-2/GJ17IV0.1 17关节I型-1,coordinate14,Axis 14,Joint14,revolute,0,0,0,1.5708,0,-3.1416,Link13,0,0,1,45,1,-3.14,3.14,,,,,,,, diff --git a/X7_Robot/assets/X7-duo(2)/urdf/x7_duo.urdf b/X7_Robot/assets/X7-duo(2)/urdf/x7_duo.urdf new file mode 100644 index 00000000000..d238b226227 --- /dev/null +++ b/X7_Robot/assets/X7-duo(2)/urdf/x7_duo.urdf @@ -0,0 +1,859 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/X7_Robot/assets/X7/.asset_hash b/X7_Robot/assets/X7/.asset_hash new file mode 100644 index 00000000000..e5aacf0bf22 --- /dev/null +++ b/X7_Robot/assets/X7/.asset_hash @@ -0,0 +1 @@ +8b3e13811bf3fcf4073a73e4bba72e5b \ No newline at end of file diff --git a/X7_Robot/assets/X7/config.yaml b/X7_Robot/assets/X7/config.yaml new file mode 100644 index 00000000000..584c7fe06ea --- /dev/null +++ b/X7_Robot/assets/X7/config.yaml @@ -0,0 +1,23 @@ +asset_path: /home/ircl/Allen/IsaacLab/X7_Robot/assets/X7-duo(2)/urdf/x7_duo.urdf +usd_dir: /home/ircl/Allen/IsaacLab/X7_Robot/assets/X7 +usd_file_name: x7_duo.usd +force_usd_conversion: true +make_instanceable: true +fix_base: false +root_link_name: null +link_density: 0.0 +merge_fixed_joints: false +convert_mimic_joints_to_normal_joints: false +joint_drive: + drive_type: force + target_type: position + gains: + stiffness: 100.0 + damping: 1.0 +collider_type: convex_hull +self_collision: false +replace_cylinders_with_capsules: false +collision_from_visuals: false +## +# Generated by UrdfConverter on 2025-11-18 at 19:26:46. +## diff --git a/X7_Robot/control/_init_.py b/X7_Robot/control/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/X7_Robot/control/aloha_kinematics.py b/X7_Robot/control/aloha_kinematics.py new file mode 100644 index 00000000000..34f074964d5 --- /dev/null +++ b/X7_Robot/control/aloha_kinematics.py @@ -0,0 +1,258 @@ +import numpy as np + +def rot_x(theta): + c, s = np.cos(theta), np.sin(theta) + return np.array([ + [1, 0, 0, 0], + [0, c,-s, 0], + [0, s, c, 0], + [0, 0, 0, 1], + ], dtype=float) + +def rot_y(theta): + c, s = np.cos(theta), np.sin(theta) + return np.array([ + [ c, 0, s, 0], + [ 0, 1, 0, 0], + [-s, 0, c, 0], + [ 0, 0, 0, 1], + ], dtype=float) + +def rot_z(theta): + c, s = np.cos(theta), np.sin(theta) + return np.array([ + [c,-s, 0, 0], + [s, c, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + ], dtype=float) + +def trans(x, y, z): + T = np.eye(4, dtype=float) + T[:3, 3] = [x, y, z] + return T + +def aloha_left_fk(q): + """ + q: iterable with 6 joint angles [q1..q6] in radians + return: 4x4 homogeneous transform base -> gripper_link + """ + q1, q2, q3, q4, q5, q6 = q + + T = np.eye(4, dtype=float) + + # base -> shoulder_link + T = T @ trans(0.0, 0.0, 0.079) + T = T @ rot_z(q1) + # shoulder_link -> upper_arm_link + T = T @ trans(0.0, 0.0, 0.04805) + + # upper_arm_link -> upper_forearm_link + T = T @ rot_y(q2) + T = T @ trans(0.05955, 0.0, 0.3) + + # upper_forearm_link -> lower_forearm_link + T = T @ rot_y(q3) + T = T @ trans(0.2, 0.0, 0.0) + + # lower_forearm_link -> wrist_link + T = T @ rot_x(q4) + T = T @ trans(0.1, 0.0, 0.0) + + # wrist_link -> gripper_link + T = T @ rot_y(q5) + T = T @ trans(0.069744, 0.0, 0.0) + + # gripper_link -> wrist_rotate frame + T = T @ rot_x(q6) + + return T + +def fk_pos_only(q): + """只要末端位置 (x,y,z)""" + T = aloha_left_fk(q) + return T[:3, 3] + +def pose_to_vec(T): + """ + 把 4x4 齐次变换转成 6 维向量 [p(3), w(3)] + w 是姿态误差的小角度近似用的 so(3) 向量 + """ + R = T[:3, :3] + p = T[:3, 3] + + # 取反对称部分,近似为旋转向量 + w = 0.5 * np.array([ + R[2, 1] - R[1, 2], + R[0, 2] - R[2, 0], + R[1, 0] - R[0, 1], + ], dtype=float) + return np.concatenate([p, w], axis=0) + +def aloha_left_ik( + T_target, + q_init=None, + max_iters=100, + tol_pos=1e-3, + tol_rot=1e-3, + damping=1e-3, +): + """ + 数值 IK:给定目标位姿 T_target (4x4),求 aloha 左臂关节角 q (6,) + + 返回: + q_sol, success + """ + if q_init is None: + q = np.zeros(6, dtype=float) + else: + q = np.array(q_init, dtype=float).copy() + + pose_target = pose_to_vec(T_target) + for it in range(max_iters): + T_cur = aloha_left_fk(q) + pose_cur = pose_to_vec(T_cur) + + err = pose_target - pose_cur + pos_err_norm = np.linalg.norm(err[:3]) + rot_err_norm = np.linalg.norm(err[3:]) + + if pos_err_norm < tol_pos and rot_err_norm < tol_rot: + return q, True + + # 数值 Jacobian: 6x6 + J = np.zeros((6, 6), dtype=float) + eps = 1e-4 + for i in range(6): + dq = np.zeros(6, dtype=float) + dq[i] = eps + pose_plus = pose_to_vec(aloha_left_fk(q + dq)) + pose_minus = pose_to_vec(aloha_left_fk(q - dq)) + J[:, i] = (pose_plus - pose_minus) / (2 * eps) + + # Damped least squares: dq = J^T (J J^T + λ^2 I)^(-1) err + JJt = J @ J.T + dq = J.T @ np.linalg.solve( + JJt + (damping ** 2) * np.eye(6, dtype=float), + err + ) + + q += dq + + # 可选:简单限幅(基于 XML 的 range,大致是 [-pi, pi]) + q = np.clip(q, -np.pi, np.pi) + + return q, False + +def rot_x(theta): + c, s = np.cos(theta), np.sin(theta) + return np.array([[1,0,0,0],[0,c,-s,0],[0,s,c,0],[0,0,0,1]]) + +def rot_y(theta): + c, s = np.cos(theta), np.sin(theta) + return np.array([[c,0,s,0],[0,1,0,0],[-s,0,c,0],[0,0,0,1]]) + +def rot_z(theta): + c, s = np.cos(theta), np.sin(theta) + return np.array([[c,-s,0,0],[s,c,0,0],[0,0,1,0],[0,0,0,1]]) + +def trans(x, y, z): + T = np.eye(4); T[:3,3] = [x,y,z]; return T + +def aloha_right_fk(q): + q1, q2, q3, q4, q5, q6 = q + + # 右臂 base:与左臂不同 + T = trans(0.469, 0.5, 0) @ rot_z(np.pi) + + # base -> shoulder_link + T = T @ trans(0, 0, 0.079) + T = T @ rot_z(q1) + + # shoulder_link -> upper_arm_link + T = T @ trans(0, 0, 0.04805) + T = T @ rot_y(q2) + + # upper_arm_link -> upper_forearm_link + T = T @ trans(0.05955, 0, 0.3) + T = T @ rot_y(q3) + + # upper_forearm_link -> lower_forearm_link + T = T @ trans(0.2, 0, 0) + T = T @ rot_x(q4) + + # lower_forearm_link -> wrist_link + T = T @ trans(0.1, 0, 0) + T = T @ rot_y(q5) + + # wrist_link -> gripper_link + T = T @ trans(0.069744, 0, 0) + T = T @ rot_x(q6) + + return T + +def fk_right_pos(q): + return aloha_right_fk(q)[:3,3] + +def aloha_right_ik(T_target, q_init=None): + """占位:建议改用下方类封装的 ik 方法以使用右臂 FK。""" + q, ok = AlohaKinematics(arm="right").ik(T_target, q_init) + return q, ok + + +class AlohaKinematics: + """ + 轻量封装:提供 ALOHA 左/右臂的 fk(q6) 与 ik(T, q_init6)。 + - fk(q): 返回 4x4 位姿(基座到末端) + - ik(T, q_init): 数值 IK(6 轴),返回 (q_sol(6,), success) + """ + def __init__(self, arm: str = "left"): + assert arm in ("left", "right"), "arm must be 'left' or 'right'" + self.arm = arm + + def fk(self, q): + q = np.asarray(q, dtype=float).reshape(6) + if self.arm == "left": + return aloha_left_fk(q) + else: + return aloha_right_fk(q) + + def ik(self, T_target, q_init=None, + max_iters: int = 100, tol_pos: float = 1e-3, tol_rot: float = 1e-3, damping: float = 1e-3): + """数值 IK(6 轴),目标为 4x4 位姿。 + 返回: (q_sol(6,), success) + """ + # 初始化 + if q_init is None: + q = np.zeros(6, dtype=float) + else: + q = np.array(q_init, dtype=float).reshape(6).copy() + + pose_target = pose_to_vec(T_target) + for _ in range(max_iters): + # 当前位姿与误差 + T_cur = self.fk(q) + pose_cur = pose_to_vec(T_cur) + err = pose_target - pose_cur + pos_err_norm = np.linalg.norm(err[:3]) + rot_err_norm = np.linalg.norm(err[3:]) + if pos_err_norm < tol_pos and rot_err_norm < tol_rot: + return q, True + + # 数值雅可比 6x6(中央差分) + J = np.zeros((6, 6), dtype=float) + eps = 1e-4 + for i in range(6): + dq = np.zeros(6, dtype=float) + dq[i] = eps + pose_plus = pose_to_vec(self.fk(q + dq)) + pose_minus = pose_to_vec(self.fk(q - dq)) + J[:, i] = (pose_plus - pose_minus) / (2 * eps) + + # 阻尼最小二乘 + JJt = J @ J.T + dq = J.T @ np.linalg.solve(JJt + (damping ** 2) * np.eye(6), err) + q += dq + q = np.clip(q, -np.pi, np.pi) + return q, False + diff --git a/X7_Robot/control/controller.py b/X7_Robot/control/controller.py new file mode 100644 index 00000000000..73d3a5de73e --- /dev/null +++ b/X7_Robot/control/controller.py @@ -0,0 +1,160 @@ +import numpy as np +import torch +from control.observation import get_observation, get_observation_with_mapping +from control.mapping import BiArmMapper +from openpi_client import websocket_client_policy + + +def simple_swing_control(scene, sim_time: float): + """让双臂简单做同步摆动。""" + action = scene["robot"].data.default_joint_pos.clone() + + # 左臂 Joint1~7 + action[:, 0:7] = 0.4 * np.sin(2 * np.pi * 0.5 * sim_time) + + # 右臂 Joint8~14 + action[:, 7:14] = 0.4 * np.sin(2 * np.pi * 0.5 * sim_time) + + scene["robot"].set_joint_position_target(action) + + +def pi_control(scene, sim_time, client: websocket_client_policy.WebsocketClientPolicy): + # 1)取 observation + obs = get_observation(scene) + + # 2)从服务器取 1 个 chunk + action_chunk = client.infer(obs)["actions"] # (10, 14) + return action_chunk + + +def pi_control_mapped( + scene, + sim_time, + client: websocket_client_policy.WebsocketClientPolicy, + aloha_state: np.ndarray | None = None, +): + """ + 从 ALOHA 的 VLA 模型拿到动作(delta ALOHA 关节), + 映射成 X7 可用的 14 维关节目标序列(绝对关节角)。 + + 返回: + x7_action_chunk: (T, 14) np.ndarray,按时间顺序的 X7 绝对关节角 + aloha_state_out: (14,) np.ndarray,映射后的“虚拟 ALOHA”末状态(供下次作为初值) + """ + + # 1) 获取给 ALOHA 模型的观测(其中 state 已是 ALOHA 空间) + obs = get_observation_with_mapping(scene, aloha_guess=aloha_state) + + # 2) 推理得到 ALOHA 动作序列 (T, 14) + out = client.infer(obs) + actions = out.get("actions", None) + if actions is None: + raise RuntimeError("Policy result missing 'actions'.") + actions = np.asarray(actions) + assert actions.ndim == 2 and actions.shape[1] == 14, f"expect (T,14), got {actions.shape}" + + # 3) 当前 X7 关节(14维),去掉 batch 维 + q_x7 = scene["robot"].data.default_joint_pos + if isinstance(q_x7, torch.Tensor): + q_x7 = q_x7.detach().cpu().numpy() + if isinstance(q_x7, np.ndarray) and q_x7.ndim == 2 and q_x7.shape[0] == 1: + q_x7 = q_x7[0] + if q_x7.shape[0] >= 14: + q_x7 = q_x7[:14] + else: + q_x7 = np.concatenate([q_x7, np.zeros(14 - q_x7.shape[0])]) + q_x7 = q_x7.astype(np.float64, copy=False) + + # 4) 初始 ALOHA 状态:优先 obs 中的 state(与 X7 对齐过),否则用零 + q_aloha = obs.get("state", None) + if q_aloha is None: + q_aloha = np.zeros(14, dtype=np.float64) + else: + q_aloha = np.asarray(q_aloha, dtype=np.float64) + + # 5) 配置双臂映射器(可设置静态坐标对齐) + bi = BiArmMapper() # 静态变换在 mapping 层内部定义/配置 + + # 6) 依次把每一帧 ALOHA delta 动作映射到 X7 绝对关节 + T = actions.shape[0] + x7_actions = np.zeros((T, 14), dtype=np.float32) + q_x7_curr = q_x7.copy() + q_aloha_curr = q_aloha.copy() + + for t in range(T): + delta_q_aloha = actions[t] + q_x7_next, q_aloha_next = bi.aloha_delta_to_x7( + q_aloha_curr=q_aloha_curr, + delta_q_aloha=delta_q_aloha, + q_x7_curr=q_x7_curr, + ) + x7_actions[t] = q_x7_next.astype(np.float32) + q_x7_curr = q_x7_next + q_aloha_curr = q_aloha_next + + return x7_actions, q_aloha_curr.astype(np.float32) + + +def pi_control_mapped_stateless( + scene, + sim_time, + client: websocket_client_policy.WebsocketClientPolicy, +): + """ + 无需保存 ALOHA 内部状态的版本: + - 直接用当前 X7 关节,通过 IK 映射得到 ALOHA 状态作为初值, + - 调用 ALOHA VLA 模型拿到 delta ALOHA 动作, + - 再映射成 X7 的绝对关节动作序列。 + + 返回: + x7_action_chunk: (T, 14) np.ndarray + """ + # 当前 X7 关节(14维) + q_x7 = scene["robot"].data.default_joint_pos + if isinstance(q_x7, torch.Tensor): + q_x7 = q_x7.detach().cpu().numpy() + if isinstance(q_x7, np.ndarray) and q_x7.ndim == 2 and q_x7.shape[0] == 1: + q_x7 = q_x7[0] + if q_x7.shape[0] >= 14: + q_x7 = q_x7[:14] + else: + q_x7 = np.concatenate([q_x7, np.zeros(14 - q_x7.shape[0])]) + q_x7 = q_x7.astype(np.float64, copy=False) + + # 准备映射器与静态变换,用于求 ALOHA 初值与后续映射 + bi = BiArmMapper() # 静态变换在 mapping 层内部定义/配置 + + # 由 X7 当下状态反推一个 ALOHA 初值(不保留历史) + aloha_guess = np.zeros(14, dtype=np.float64) + aloha_init = bi.x7_state_to_aloha(q_x7_curr=q_x7, q_aloha_guess=aloha_guess) + + # 生成喂给 ALOHA 模型的观测(使用上面推的初值) + obs = get_observation_with_mapping(scene, aloha_guess=aloha_init) + + # 推理得到 ALOHA 动作序列 (T, 14) + out = client.infer(obs) + actions = out.get("actions", None) + if actions is None: + raise RuntimeError("Policy result missing 'actions'.") + actions = np.asarray(actions) + assert actions.ndim == 2 and actions.shape[1] == 14, f"expect (T,14), got {actions.shape}" + + # 逐步映射为 X7 的绝对关节动作 + T = actions.shape[0] + x7_actions = np.zeros((T, 14), dtype=np.float32) + q_x7_curr = q_x7.copy() + q_aloha_curr = np.asarray(obs["state"], dtype=np.float64) + + for t in range(T): + delta_q_aloha = actions[t] + q_x7_next, q_aloha_next = bi.aloha_delta_to_x7( + q_aloha_curr=q_aloha_curr, + delta_q_aloha=delta_q_aloha, + q_x7_curr=q_x7_curr, + ) + x7_actions[t] = q_x7_next.astype(np.float32) + q_x7_curr = q_x7_next + q_aloha_curr = q_aloha_next + + return x7_actions + diff --git a/X7_Robot/control/mapping.py b/X7_Robot/control/mapping.py new file mode 100644 index 00000000000..06c4d132b11 --- /dev/null +++ b/X7_Robot/control/mapping.py @@ -0,0 +1,268 @@ +import numpy as np + +from control.aloha_kinematics import AlohaKinematics +from control.x7_kinematics import X7Kinematics + + +class ArmMapper: + """ + 单臂映射: + - ALOHA delta-q → X7 绝对关节角(通过 ALOHA FK + X7 IK) + - X7 关节角 → ALOHA 关节角(通过 X7 FK + ALOHA IK) + + 约定(很重要): + - ALOHA:每臂 7 维,前 6 维为手臂关节,第 7 维为 gripper;FK/IK 仅使用前 6 维。 + - X7:每臂 7 维,全部为手臂关节(无 gripper 维度);FK/IK 使用全部 7 维。 + """ + + def __init__(self, arm: str = "left"): + assert arm in ("left", "right") + self.arm = arm + self.aloha = AlohaKinematics(arm=arm) + self.x7 = X7Kinematics(arm=arm) + # 静态坐标变换(默认为单位): + # 将 ALOHA 末端位姿转换到 X7 末端坐标系;以及反向变换 + # 形状均为 (4,4) + self.T_aloha_to_x7 = np.eye(4, dtype=float) + self.T_x7_to_aloha = np.eye(4, dtype=float) + + def set_static_transform(self, T_aloha_to_x7: 'np.ndarray | None' = None, T_x7_to_aloha: 'np.ndarray | None' = None): + """ + 设置左右臂的静态坐标系对齐变换。 + - `T_aloha_to_x7`: 将 ALOHA 末端位姿映射到 X7 末端位姿的 4x4 变换矩阵。 + - `T_x7_to_aloha`: 将 X7 末端位姿映射到 ALOHA 末端位姿的 4x4 变换矩阵。 + 若只提供其中一个,另一个将尝试用矩阵逆求出(若可逆)。 + """ + if T_aloha_to_x7 is not None: + T_aloha_to_x7 = np.asarray(T_aloha_to_x7, dtype=float) + assert T_aloha_to_x7.shape == (4, 4) + self.T_aloha_to_x7 = T_aloha_to_x7 + # 若未给反向,则用逆 + if T_x7_to_aloha is None: + # 计算逆 + R = T_aloha_to_x7[:3, :3] + t = T_aloha_to_x7[:3, 3] + R_inv = R.T + t_inv = -R_inv @ t + T_inv = np.eye(4) + T_inv[:3, :3] = R_inv + T_inv[:3, 3] = t_inv + self.T_x7_to_aloha = T_inv + if T_x7_to_aloha is not None: + T_x7_to_aloha = np.asarray(T_x7_to_aloha, dtype=float) + assert T_x7_to_aloha.shape == (4, 4) + self.T_x7_to_aloha = T_x7_to_aloha + # 若未给正向,则用逆 + if T_aloha_to_x7 is None: + R = T_x7_to_aloha[:3, :3] + t = T_x7_to_aloha[:3, 3] + R_inv = R.T + t_inv = -R_inv @ t + T_inv = np.eye(4) + T_inv[:3, :3] = R_inv + T_inv[:3, 3] = t_inv + self.T_aloha_to_x7 = T_inv + + # ------------------------------------------------------------------ + # 1. ALOHA 增量 q → X7 绝对关节角 + # ------------------------------------------------------------------ + def aloha_delta_to_x7( + self, + q_aloha_curr: np.ndarray, + delta_q_aloha: np.ndarray, + q_x7_curr: np.ndarray, + ): + """ + 输入: + q_aloha_curr : (7,) 当前 ALOHA 关节角 + delta_q_aloha: (7,) ALOHA 增量动作(RL 输出) + q_x7_curr : (7,) 当前 X7 关节角 + + 输出: + q_x7_next : (7,) 映射后的 X7 绝对关节角 + q_aloha_next : (7,) 更新后的 ALOHA 关节角(作为内部“虚拟ALOHA”状态) + """ + q_aloha_curr = np.asarray(q_aloha_curr, dtype=float) + delta_q_aloha = np.asarray(delta_q_aloha, dtype=float) + q_x7_curr = np.asarray(q_x7_curr, dtype=float) + + assert q_aloha_curr.shape == (7,) + assert delta_q_aloha.shape == (7,) + assert q_x7_curr.shape == (7,) + + # 1) 先在“虚拟 ALOHA”上累积动作,得到新的 ALOHA 关节 + q_aloha_next = q_aloha_curr + delta_q_aloha + + # 只对前 6 维做 FK / IK(gripper 不参与位姿) + q_aloha_arm = q_aloha_next[:6] + + # 2) 用 ALOHA FK 计算末端位姿 (4x4) + T_target_aloha = self.aloha.fk(q_aloha_arm) + # 坐标系对齐:ALOHA → X7 + T_target = self.T_aloha_to_x7 @ T_target_aloha + + # 3) 用 X7 IK 求解对应的 X7 关节(X7 为 7 维手臂) + q_x7_arm_init = q_x7_curr[:7] + q_x7_arm_next = self.x7.ik(T_target, q_init=q_x7_arm_init) + # 兼容返回 (q, success, iters) 或 (q, success) 的情况 + if isinstance(q_x7_arm_next, (tuple, list)): + q_x7_arm_next = q_x7_arm_next[0] + + # 如果 IK 失败,你自己在 ik 里处理返回 None 或 raise, + # 这里简单一点:如果返回 None 就用当前值 + if q_x7_arm_next is None: + q_x7_arm_next = q_x7_arm_init + + q_x7_arm_next = np.asarray(q_x7_arm_next, dtype=float) + assert q_x7_arm_next.shape == (7,) + + # 4) X7 为 7 维手臂,无 gripper 维度,直接作为该臂的 7 维输出 + q_x7_next = q_x7_arm_next.copy() + + return q_x7_next, q_aloha_next + + # ------------------------------------------------------------------ + # 2. X7 关节状态 → ALOHA 关节状态 + # ------------------------------------------------------------------ + def x7_state_to_aloha( + self, + q_x7_curr: np.ndarray, + q_aloha_guess: np.ndarray, + ): + """ + 输入: + q_x7_curr : (7,) 当前 X7 关节角 + q_aloha_guess: (7,) ALOHA 初始 guess,用于 IK 初值(通常用上一时刻的 ALOHA 状态) + + 输出: + q_aloha_curr : (7,) 对应的 ALOHA 关节角 + """ + q_x7_curr = np.asarray(q_x7_curr, dtype=float) + q_aloha_guess = np.asarray(q_aloha_guess, dtype=float) + + assert q_x7_curr.shape == (7,) + assert q_aloha_guess.shape == (7,) + + # 1) X7 FK 得到当前末端位姿(X7 为 7 维手臂) + q_x7_arm = q_x7_curr[:7] + T_curr_x7 = self.x7.fk(q_x7_arm) + # 坐标系对齐:X7 → ALOHA + T_curr = self.T_x7_to_aloha @ T_curr_x7 + + # 2) 用 ALOHA IK 求解对应的 ALOHA 关节 + q_aloha_arm_init = q_aloha_guess[:6] + q_aloha_arm = self.aloha.ik(T_curr, q_init=q_aloha_arm_init) + if isinstance(q_aloha_arm, (tuple, list)): + q_aloha_arm = q_aloha_arm[0] + + if q_aloha_arm is None: + q_aloha_arm = q_aloha_arm_init + + q_aloha_arm = np.asarray(q_aloha_arm, dtype=float) + assert q_aloha_arm.shape == (6,) + + q_aloha_curr = np.zeros(7, dtype=float) + q_aloha_curr[:6] = q_aloha_arm + # gripper:X7 无 gripper 维度,这里保留猜测值的第 7 维(或由上层策略决定) + q_aloha_curr[6] = q_aloha_guess[6] + + return q_aloha_curr + + +# ====================================================================== +# 双臂接口:方便你直接丢 14 维向量进来 +# ALOHA: [L0..L6, R0..R6] +# X7 : [L0..L6, R0..R6] +# ====================================================================== + +class BiArmMapper: + """ + 双臂映射器: + - 假设 ALOHA 与 X7 都是 14 维:[左 7, 右 7] + - 内部各自用一个 ArmMapper(left) 和 ArmMapper(right) + """ + + def __init__(self): + self.left = ArmMapper(arm="left") + self.right = ArmMapper(arm="right") + + def aloha_delta_to_x7( + self, + q_aloha_curr: np.ndarray, + delta_q_aloha: np.ndarray, + q_x7_curr: np.ndarray, + ): + """ + 输入: + q_aloha_curr : (14,) + delta_q_aloha: (14,) + q_x7_curr : (14,) + + 输出: + q_x7_next : (14,) + q_aloha_next : (14,) + """ + q_aloha_curr = np.asarray(q_aloha_curr, dtype=float) + delta_q_aloha = np.asarray(delta_q_aloha, dtype=float) + q_x7_curr = np.asarray(q_x7_curr, dtype=float) + + assert q_aloha_curr.shape == (14,) + assert delta_q_aloha.shape == (14,) + assert q_x7_curr.shape == (14,) + + # 拆成左右 + q_a_L = q_aloha_curr[:7] + q_a_R = q_aloha_curr[7:] + dq_a_L = delta_q_aloha[:7] + dq_a_R = delta_q_aloha[7:] + q_x7_L = q_x7_curr[:7] + q_x7_R = q_x7_curr[7:] + + # 左臂 + q_x7_L_next, q_a_L_next = self.left.aloha_delta_to_x7( + q_aloha_curr=q_a_L, + delta_q_aloha=dq_a_L, + q_x7_curr=q_x7_L, + ) + + # 右臂 + q_x7_R_next, q_a_R_next = self.right.aloha_delta_to_x7( + q_aloha_curr=q_a_R, + delta_q_aloha=dq_a_R, + q_x7_curr=q_x7_R, + ) + + q_x7_next = np.concatenate([q_x7_L_next, q_x7_R_next], axis=0) + q_aloha_next = np.concatenate([q_a_L_next, q_a_R_next], axis=0) + + return q_x7_next, q_aloha_next + + def x7_state_to_aloha( + self, + q_x7_curr: np.ndarray, + q_aloha_guess: np.ndarray, + ): + """ + 输入: + q_x7_curr : (14,) + q_aloha_guess: (14,) + + 输出: + q_aloha_curr : (14,) + """ + q_x7_curr = np.asarray(q_x7_curr, dtype=float) + q_aloha_guess = np.asarray(q_aloha_guess, dtype=float) + + assert q_x7_curr.shape == (14,) + assert q_aloha_guess.shape == (14,) + + q_x7_L = q_x7_curr[:7] + q_x7_R = q_x7_curr[7:] + q_a_L_guess = q_aloha_guess[:7] + q_a_R_guess = q_aloha_guess[7:] + + q_a_L_curr = self.left.x7_state_to_aloha(q_x7_L, q_a_L_guess) + q_a_R_curr = self.right.x7_state_to_aloha(q_x7_R, q_a_R_guess) + + q_aloha_curr = np.concatenate([q_a_L_curr, q_a_R_curr], axis=0) + return q_aloha_curr diff --git a/X7_Robot/control/observation.py b/X7_Robot/control/observation.py new file mode 100644 index 00000000000..5e5cee8e6fa --- /dev/null +++ b/X7_Robot/control/observation.py @@ -0,0 +1,185 @@ +from openpi_client import image_tools +import numpy as np +import torch +from control.mapping import BiArmMapper + +from openpi_client import image_tools +import numpy as np +import torch + +import numpy as np +import torch +import einops +from openpi_client import image_tools + + +def get_observation(scene): + # ---- 准备相机 ---- + cam_high = scene["head_camera"].data.output["rgb"] + cam_left_wrist = scene["left_camera"].data.output["rgb"] + cam_right_wrist = scene["right_camera"].data.output["rgb"] + + # ---- Tensor → numpy ---- + def to_numpy(img): + if isinstance(img, torch.Tensor): + img = img.detach().cpu().numpy() + return img[..., :3] # 只保留 RGB 3 通道 + + cam_high = to_numpy(cam_high) + cam_left_wrist = to_numpy(cam_left_wrist) + cam_right_wrist = to_numpy(cam_right_wrist) + + # ---- 缺失的 cam_left_wrist → 用 0 图填充 ---- + cam_low = np.zeros_like(cam_high) + + # ---- resize + uint8 + CHW ---- + def process(img): + # remove batch dim if present: (1, H, W, C) -> (H, W, C) + if img.ndim == 4 and img.shape[0] == 1: + img = img[0] + + img = image_tools.convert_to_uint8( + image_tools.resize_with_pad(img, 224, 224) + ) + return einops.rearrange(img, "h w c -> c h w") + + + obs_images = { + "cam_high": process(cam_high), + "cam_low": process(cam_low), + "cam_left_wrist": process(cam_left_wrist), + "cam_right_wrist": process(cam_right_wrist), + } + + # ---- 关节:X7 Duo 需要 14 维 state ---- + # 你可能是 7+7,如果不是我们之后再 mapping + qpos = scene["robot"].data.default_joint_pos + if isinstance(qpos, torch.Tensor): + qpos = qpos.detach().cpu().numpy() + + # 去掉可能的 batch 维: (1,14) -> (14,) + if isinstance(qpos, np.ndarray) and qpos.ndim == 2 and qpos.shape[0] == 1: + qpos = qpos[0] + + # ALOHA 是 14 维,暂时截或 pad(后面 mapping 再做) + if qpos.shape[0] >= 14: + qpos = qpos[:14] + else: + pad = np.zeros(14 - qpos.shape[0]) + qpos = np.concatenate([qpos, pad]) + + qpos = qpos.astype(np.float32) + + # ---- 返回 ALOHA 标准格式 ---- + return { + "state": qpos, + "images": obs_images, + "prompt": "Grasp the red cube on the table.", + } + + +def get_observation_mapped(scene, aloha_guess: np.ndarray | None = None, static_transforms: dict | None = None): + """ + 获取观测并将 X7 的关节状态映射为 ALOHA 关节状态(14维),返回 ALOHA 标准格式: + {"state": (14,), "images": {CHW uint8}, "prompt": str} + + 参数: + - scene: 仿真场景/设备句柄(需包含 robot 与三路相机:head/left/right) + - aloha_guess: (14,) 可选,ALOHA IK 的初始猜测(建议使用上一次映射得到的 ALOHA 状态)。 + - static_transforms: 可选,静态坐标系对齐矩阵字典,例如: + { + "left": {"T_aloha_to_x7": 4x4, "T_x7_to_aloha": 4x4}, + "right": {"T_aloha_to_x7": 4x4, "T_x7_to_aloha": 4x4}, + } + 若只提供其中一个方向,另一方向将使用逆矩阵。 + """ + + # ---- 相机处理(与 get_observation 一致) ---- + cam_high = scene["head_camera"].data.output["rgb"] + cam_left_wrist = scene["left_camera"].data.output["rgb"] + cam_right_wrist = scene["right_camera"].data.output["rgb"] + + def to_numpy(img): + if isinstance(img, torch.Tensor): + img = img.detach().cpu().numpy() + return img[..., :3] + + cam_high = to_numpy(cam_high) + cam_left_wrist = to_numpy(cam_left_wrist) + cam_right_wrist = to_numpy(cam_right_wrist) + + cam_low = np.zeros_like(cam_high) + + import einops as _einops # 本函数内局部导入,避免头部重复 + + def process(img): + if img.ndim == 4 and img.shape[0] == 1: + img = img[0] + img = image_tools.convert_to_uint8( + image_tools.resize_with_pad(img, 224, 224) + ) + return _einops.rearrange(img, "h w c -> c h w") + + obs_images = { + "cam_high": process(cam_high), + "cam_low": process(cam_low), + "cam_left_wrist": process(cam_left_wrist), + "cam_right_wrist": process(cam_right_wrist), + } + + # ---- 读取 X7 当前关节(期望 14 维:左7 + 右7) ---- + q_x7 = scene["robot"].data.default_joint_pos + if isinstance(q_x7, torch.Tensor): + q_x7 = q_x7.detach().cpu().numpy() + if isinstance(q_x7, np.ndarray) and q_x7.ndim == 2 and q_x7.shape[0] == 1: + q_x7 = q_x7[0] + + # 规范到 14 维 + if q_x7.shape[0] >= 14: + q_x7 = q_x7[:14] + else: + q_x7 = np.concatenate([q_x7, np.zeros(14 - q_x7.shape[0])]) + q_x7 = q_x7.astype(np.float64, copy=False) + + # ---- ALOHA IK 初始猜测 ---- + if aloha_guess is None: + aloha_guess = np.zeros(14, dtype=np.float64) + else: + aloha_guess = np.asarray(aloha_guess, dtype=np.float64) + if aloha_guess.shape != (14,): + aloha_guess = np.zeros(14, dtype=np.float64) + + # ---- 映射:X7 → ALOHA ---- + bi = BiArmMapper() + # 静态坐标变换(若提供) + if static_transforms is not None: + left_cfg = static_transforms.get("left") if isinstance(static_transforms, dict) else None + right_cfg = static_transforms.get("right") if isinstance(static_transforms, dict) else None + if left_cfg: + bi.left.set_static_transform( + T_aloha_to_x7=left_cfg.get("T_aloha_to_x7"), + T_x7_to_aloha=left_cfg.get("T_x7_to_aloha"), + ) + if right_cfg: + bi.right.set_static_transform( + T_aloha_to_x7=right_cfg.get("T_aloha_to_x7"), + T_x7_to_aloha=right_cfg.get("T_x7_to_aloha"), + ) + + q_aloha = bi.x7_state_to_aloha(q_x7_curr=q_x7, q_aloha_guess=aloha_guess) + q_aloha = np.asarray(q_aloha, dtype=np.float32) + + # ---- 返回 ALOHA 标准格式 ---- + return { + "state": q_aloha, + "images": obs_images, + "prompt": "Grasp the red cube on the table.", + } + + +# 简洁别名,便于上层调用 +def get_observation_with_mapping(scene, aloha_guess: np.ndarray | None = None, static_transforms: dict | None = None): + return get_observation_mapped(scene, aloha_guess=aloha_guess, static_transforms=static_transforms) + + + diff --git a/X7_Robot/control/robot_reset.py b/X7_Robot/control/robot_reset.py new file mode 100644 index 00000000000..62a2d8e5b8e --- /dev/null +++ b/X7_Robot/control/robot_reset.py @@ -0,0 +1,34 @@ +import numpy as np + + +def reset_robot(scene): + """将双臂机器人重置为默认状态。 + + 约定:若模型是 14 维(双七轴含夹爪),则将夹爪索引 6 与 13 置 0; + 若模型是 12 维(双六轴不含夹爪),直接恢复默认关节状态。 + """ + + robot = scene["robot"] + + # 根状态重置(并加上环境原点偏移) + root = robot.data.default_root_state.clone() + root[:, :3] += scene.env_origins + robot.write_root_pose_to_sim(root[:, :7]) + robot.write_root_velocity_to_sim(root[:, 7:]) + + # 关节状态重置 + joint_pos = robot.data.default_joint_pos.clone() + joint_vel = robot.data.default_joint_vel.clone() + + # 若为 14 维,清零夹爪位 + if hasattr(joint_pos, "shape") and joint_pos.shape[-1] == 14: + joint_pos[..., 6] = 0.0 + joint_pos[..., 13] = 0.0 + if hasattr(joint_vel, "shape") and joint_vel.shape[-1] == 14: + joint_vel[..., 6] = 0.0 + joint_vel[..., 13] = 0.0 + + robot.write_joint_state_to_sim(joint_pos, joint_vel) + + # 场景组件同步重置(传感器等) + scene.reset() diff --git a/X7_Robot/control/test_mapping_sanity.py b/X7_Robot/control/test_mapping_sanity.py new file mode 100644 index 00000000000..a48a523456e --- /dev/null +++ b/X7_Robot/control/test_mapping_sanity.py @@ -0,0 +1,60 @@ +import numpy as np +from mapping import ArmMapper, BiArmMapper + + +def random_T(): + # Generate a small random transform near identity for testing + R = np.eye(3) + t = np.zeros(3) + T = np.eye(4) + T[:3, :3] = R + T[:3, 3] = t + return T + + +def test_single_arm(arm="left"): + mapper = ArmMapper(arm=arm) + # Example static transform: rotate 180deg around Z between frames + T = np.eye(4) + T[:3, :3] = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]) + mapper.set_static_transform(T_aloha_to_x7=T) + + q_aloha_curr = np.zeros(7) + delta_q_aloha = 0.05 * np.ones(7) + q_x7_curr = np.zeros(7) + + q_x7_next, q_aloha_next = mapper.aloha_delta_to_x7(q_aloha_curr, delta_q_aloha, q_x7_curr) + assert q_x7_next.shape == (7,) + assert q_aloha_next.shape == (7,) + + # Roundtrip X7 -> ALOHA + q_aloha_guess = q_aloha_curr + q_aloha_curr2 = mapper.x7_state_to_aloha(q_x7_next, q_aloha_guess) + assert q_aloha_curr2.shape == (7,) + + +def test_bi_arm(): + bi = BiArmMapper() + # Set different transforms per arm if needed + T = np.eye(4) + bi.left.set_static_transform(T_aloha_to_x7=T) + bi.right.set_static_transform(T_aloha_to_x7=T) + + q_aloha_curr = np.zeros(14) + delta_q_aloha = 0.03 * np.ones(14) + q_x7_curr = np.zeros(14) + + q_x7_next, q_aloha_next = bi.aloha_delta_to_x7(q_aloha_curr, delta_q_aloha, q_x7_curr) + assert q_x7_next.shape == (14,) + assert q_aloha_next.shape == (14,) + + q_aloha_guess = q_aloha_curr + q_aloha_curr2 = bi.x7_state_to_aloha(q_x7_next, q_aloha_guess) + assert q_aloha_curr2.shape == (14,) + + +if __name__ == "__main__": + test_single_arm("left") + test_single_arm("right") + test_bi_arm() + print("Mapping sanity tests passed.") diff --git a/X7_Robot/control/test_openpi.py b/X7_Robot/control/test_openpi.py new file mode 100644 index 00000000000..d167eb48c23 --- /dev/null +++ b/X7_Robot/control/test_openpi.py @@ -0,0 +1,141 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/02_scene/create_scene.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") +parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sim import SimulationContext +from isaaclab.utils import configclass +#导入上级目录 +import sys +sys.path.append("..") +from robot.x7_duo_cfg import X7_DUO_CFG +from scene.x7_scene_cfg import X7SceneCfg +from controller import pi_control +from observation import get_observation + +from utils.robot_reset import reset_robot + +## +# Pre-defined configs +## +from isaaclab_assets import CARTPOLE_CFG # isort:skip + + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. + robot = scene["X7_duo"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + count = 0 + # Simulation loop + while simulation_app.is_running(): + # Reset + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = robot.data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + + robot.write_root_pose_to_sim(root_state[:, :7]) + robot.write_root_velocity_to_sim(root_state[:, 7:]) + # set joint positions with some noise + joint_pos = robot.data.default_joint_pos.clone() + print(joint_pos.shape) + + observation = get_observation(scene) + print(observation["observation/image"].shape) + print(observation["observation/wrist_image"].shape) + print(observation["observation/state"].shape) + + + # robot.write_joint_state_to_sim(joint_pos, joint_vel) + # # clear internal buffers + # scene.reset() + # print("[INFO]: Resetting robot state...") + # # Apply random action + # # -- generate random joint efforts + # efforts = torch.randn_like(robot.data.joint_pos) * 5.0 + # # -- apply action to the robot + # robot.set_joint_effort_target(efforts) + # -- write data to sim + scene.write_data_to_sim() + # Perform step + sim.step() + # Increment counter + count += 1 + # Update buffers + scene.update(sim_dt) + + +def main(): + """Main function.""" + # Load kit helper + # sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + # sim = SimulationContext(sim_cfg) + # # Set main camera + # sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) + # # Design scene + # scene_cfg = CartpoleSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + # scene = InteractiveScene(scene_cfg) + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + sim.set_camera_view([4.0, 0.0, 2.5], [0.0, 0.0, 0.5]) # type: ignore + + + scene_cfg = X7SceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/X7_Robot/control/x7_kinematics.py b/X7_Robot/control/x7_kinematics.py new file mode 100644 index 00000000000..6b1994d4601 --- /dev/null +++ b/X7_Robot/control/x7_kinematics.py @@ -0,0 +1,358 @@ +# x7_kinematics.py +# +# Kinematics for X7-duo(2) robot described by the given URDF. +# - Left arm : Joint1 .. Joint7, end-effector = Link7 origin +# - Right arm : Joint8 .. Joint14, end-effector = Link14 origin +# +# Provides: +# fk_left(q) -> (pos[3], R[3,3], T[4,4]) +# fk_right(q) -> (pos[3], R[3,3], T[4,4]) +# ik_left(target_pos, target_R, q_init, ...) +# ik_right(...) +# +# Orientation in IK 用 3D 旋转向量(axis * angle)来表示误差,保证是 6D 控制。 + +import numpy as np + + +# ---------- basic rotation / transform helpers ---------- + +def rot_from_axis_angle(axis, angle): + """Rodrigues formula, axis is 3D unit vector, angle in rad.""" + axis = np.asarray(axis, dtype=float) + axis = axis / (np.linalg.norm(axis) + 1e-12) + x, y, z = axis + c = np.cos(angle) + s = np.sin(angle) + C = 1.0 - c + R = np.array([ + [c + x * x * C, x * y * C - z * s, x * z * C + y * s], + [y * x * C + z * s, c + y * y * C, y * z * C - x * s], + [z * x * C - y * s, z * y * C + x * s, c + z * z * C] + ]) + return R + + +def rot_x(a): + ca, sa = np.cos(a), np.sin(a) + return np.array([ + [1, 0, 0], + [0, ca, -sa], + [0, sa, ca], + ]) + + +def rot_y(a): + ca, sa = np.cos(a), np.sin(a) + return np.array([ + [ca, 0, sa], + [0, 1, 0], + [-sa, 0, ca], + ]) + + +def rot_z(a): + ca, sa = np.cos(a), np.sin(a) + return np.array([ + [ca, -sa, 0], + [sa, ca, 0], + [0, 0, 1], + ]) + + +def rpy_to_rot(rpy): + """URDF 风格 RPY: roll (X) -> pitch (Y) -> yaw (Z).""" + r, p, y = rpy + return rot_z(y) @ rot_y(p) @ rot_x(r) + + +def make_transform(R, t): + T = np.eye(4) + T[:3, :3] = R + T[:3, 3] = t + return T + + +def axis_angle_from_rot(R): + """R ∈ SO(3) -> 3D axis-angle vector (axis * angle).""" + R = np.asarray(R, dtype=float) + trace = np.clip(np.trace(R), -1.0, 3.0) + cos_theta = (trace - 1.0) / 2.0 + cos_theta = np.clip(cos_theta, -1.0, 1.0) + theta = np.arccos(cos_theta) + + if theta < 1e-8: + # very small angle -> approximate as zero + return np.zeros(3) + + wx = R[2, 1] - R[1, 2] + wy = R[0, 2] - R[2, 0] + wz = R[1, 0] - R[0, 1] + v = np.array([wx, wy, wz]) + v = v / (2.0 * np.sin(theta)) + return v * theta + + +# ---------- X7 joint definitions (from URDF) ---------- + +# 每个元素: (origin_xyz, origin_rpy, axis) +# 注意:这些是 parent_link -> child_link 的 joint 原点和 axis +LEFT_JOINTS = [ + # Joint1 + (np.array([0.0, 0.0, 0.4345]), + np.array([0.0, 0.0, -1.5251]), + np.array([0.0, 0.0, 1.0])), + # Joint2 + (np.array([0.0, 0.0, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, -1.0])), + # Joint3 + (np.array([0.0, 0.2525, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, -1.0])), + # Joint4 + (np.array([0.0, 0.0, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, -1.0])), + # Joint5 + (np.array([0.0, 0.2525, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, -1.0])), + # Joint6 + (np.array([0.0, 0.0, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, -1.0])), + # Joint7 + (np.array([0.0, 0.0, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, 1.0])), +] + +RIGHT_JOINTS = [ + # Joint8 + (np.array([0.0, 0.0, -0.4385]), + np.array([3.1416, 0.0, 1.6376]), + np.array([0.0, 0.0, 1.0])), + # Joint9 + (np.array([0.0, 0.0, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, -1.0])), + # Joint10 + (np.array([0.0, 0.2525, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, -1.0])), + # Joint11 + (np.array([0.0, 0.0, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, -1.0])), + # Joint12 + (np.array([0.0, 0.2525, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, -1.0])), + # Joint13 + (np.array([0.0, 0.0, 0.0]), + np.array([1.5708, 0.0, 3.1416]), + np.array([0.0, 0.0, -1.0])), + # Joint14 + (np.array([0.0, 0.0, 0.0]), + np.array([1.5708, 0.0, -3.1416]), + np.array([0.0, 0.0, 1.0])), +] + +# 关节上下限(左右臂都一样) +# lower="-3.14" upper="3.14" +JOINT_LIMITS = [(-3.14, 3.14)] * 7 + + +# ---------- FK ---------- + +def _fk_chain(q, joint_defs): + """Generic FK: q shape (7,), joint_defs is LEFT_JOINTS or RIGHT_JOINTS. + Returns: + pos (3,), R (3,3), T (4,4) in base_link frame. + """ + assert len(q) == 7 + T = np.eye(4) + for angle, (xyz, rpy, axis) in zip(q, joint_defs): + # parent_link -> joint frame + R0 = rpy_to_rot(rpy) + T0 = make_transform(R0, xyz) + # joint rotation about given axis + Rj = rot_from_axis_angle(axis, angle) + Tj = make_transform(Rj, np.zeros(3)) + T = T @ T0 @ Tj + + R = T[:3, :3] + pos = T[:3, 3] + return pos, R, T + + +def fk_left(q): + """Forward kinematics for left arm (Joint1..Joint7 -> Link7 origin).""" + q = np.asarray(q, dtype=float).reshape(7) + return _fk_chain(q, LEFT_JOINTS) + + +def fk_right(q): + """Forward kinematics for right arm (Joint8..Joint14 -> Link14 origin).""" + q = np.asarray(q, dtype=float).reshape(7) + return _fk_chain(q, RIGHT_JOINTS) + + +# ---------- numeric Jacobian ---------- + +def _numeric_jacobian(q, joint_defs, eps=1e-5): + """6x7 Jacobian by finite differences on [pos; orient_vec].""" + q = np.asarray(q, dtype=float).reshape(7) + pos0, R0, _ = _fk_chain(q, joint_defs) + J = np.zeros((6, 7)) + + for i in range(7): + dq = np.zeros(7) + dq[i] = eps + pos1, R1, _ = _fk_chain(q + dq, joint_defs) + + dp = (pos1 - pos0) / eps + # orientation difference from R0 -> R1 + R_rel = R1 @ R0.T + dw = axis_angle_from_rot(R_rel) / eps + + J[:, i] = np.concatenate([dp, dw]) + + return J + + +# ---------- IK (iterative, 6D) ---------- + +def _ik_numeric(target_pos, target_R, q_init, joint_defs, + max_iters=100, tol=1e-4, step=0.5, damping=1e-4): + """ + target_pos: (3,) + target_R: (3,3) + q_init: (7,) + Returns: + q_sol (7,), success (bool), iters (int) + """ + q = np.asarray(q_init, dtype=float).reshape(7) + target_pos = np.asarray(target_pos, dtype=float).reshape(3) + target_R = np.asarray(target_R, dtype=float).reshape(3, 3) + + for it in range(max_iters): + pos, R, _ = _fk_chain(q, joint_defs) + + # position error + e_pos = target_pos - pos + # orientation error: R_err = R_target * R_current^T + R_err = target_R @ R.T + e_ori = axis_angle_from_rot(R_err) + + e = np.concatenate([e_pos, e_ori]) + + if np.linalg.norm(e) < tol: + # clamp to joint limits before return + for j in range(7): + lo, hi = JOINT_LIMITS[j] + q[j] = np.clip(q[j], lo, hi) + return q, True, it + 1 + + J = _numeric_jacobian(q, joint_defs) + + # Damped least squares: dq = (J^T J + λI)^-1 J^T e + JT = J.T + H = JT @ J + damping * np.eye(7) + g = JT @ e + dq = np.linalg.solve(H, g) + + q = q + step * dq + + # clamp to limits + for j in range(7): + lo, hi = JOINT_LIMITS[j] + q[j] = np.clip(q[j], lo, hi) + + # not converged + return q, False, max_iters + + +def ik_left(target_pos, target_R, q_init, + max_iters=100, tol=1e-4, step=0.5, damping=1e-4): + """ + IK for left arm (Joint1..Joint7 -> Link7 origin). + + target_pos: (3,) 目标末端位置(base_link 坐标系) + target_R: (3,3) 目标末端姿态旋转矩阵 + q_init: (7,) 初始关节角 + """ + return _ik_numeric(target_pos, target_R, q_init, LEFT_JOINTS, + max_iters=max_iters, tol=tol, + step=step, damping=damping) + + +def ik_right(target_pos, target_R, q_init, + max_iters=100, tol=1e-4, step=0.5, damping=1e-4): + """ + IK for right arm (Joint8..Joint14 -> Link14 origin). + """ + return _ik_numeric(target_pos, target_R, q_init, RIGHT_JOINTS, + max_iters=max_iters, tol=tol, + step=step, damping=damping) + + +# ---------- Example usage (for quick test) ---------- +if __name__ == "__main__": + # zero config test + q0 = np.zeros(7) + pos_l, R_l, T_l = fk_left(q0) + pos_r, R_r, T_r = fk_right(q0) + print("Left FK @ zero:", pos_l) + print("Right FK @ zero:", pos_r) + + # try IK back to the same pose + q_sol_l, ok_l, it_l = ik_left(pos_l, R_l, q0) + print("IK left success:", ok_l, "iters:", it_l, "q:", q_sol_l) + + q_sol_r, ok_r, it_r = ik_right(pos_r, R_r, q0) + print("IK right success:", ok_r, "iters:", it_r, "q:", q_sol_r) + +""" +Lightweight OO wrapper to align with mapping.py usage. +Provides X7Kinematics(arm).fk(q) and .ik(T_target, q_init) using function implementations above. +""" + + +class X7Kinematics: + def __init__(self, arm: str = "left"): + assert arm in ("left", "right"), "arm must be 'left' or 'right'" + self.arm = arm + + def fk(self, q): + """Return 4x4 homogeneous transform from base_link to EE origin. + q: (7,) X7 joint vector (no gripper DOF). + """ + if self.arm == "left": + pos, R, T = fk_left(q) + else: + pos, R, T = fk_right(q) + return T + + def ik(self, T_target, q_init, + max_iters: int = 100, tol: float = 1e-4, step: float = 0.5, damping: float = 1e-4): + """Solve X7 IK to reach T_target using numeric IK functions above. + Inputs: + - T_target: (4,4) target homogeneous transform + - q_init: (7,) initial joint guess + Returns: + - q_sol: (7,) joint solution (clamped to limits) + - success: bool + - iters: int + """ + T_target = np.asarray(T_target, dtype=float) + target_pos = T_target[:3, 3] + target_R = T_target[:3, :3] + if self.arm == "left": + q_sol, ok, iters = ik_left(target_pos, target_R, q_init, + max_iters=max_iters, tol=tol, step=step, damping=damping) + else: + q_sol, ok, iters = ik_right(target_pos, target_R, q_init, + max_iters=max_iters, tol=tol, step=step, damping=damping) + return q_sol, ok, iters diff --git a/X7_Robot/interfaces/_init_.py b/X7_Robot/interfaces/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/X7_Robot/interfaces/pi_client.py b/X7_Robot/interfaces/pi_client.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/X7_Robot/load_x7_duo.py b/X7_Robot/load_x7_duo.py new file mode 100644 index 00000000000..be5ed169977 --- /dev/null +++ b/X7_Robot/load_x7_duo.py @@ -0,0 +1,110 @@ +import argparse + +from isaaclab.app import AppLauncher # 这一个可以在前面 + +# ========= CLI ========= +parser = argparse.ArgumentParser(description="Load X7 Duo dual-arm robot inside IsaacLab.") +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") +AppLauncher.add_app_launcher_args(parser) +args = parser.parse_args() + +# ========= 启动 GUI / SimulationApp ========= +app_launcher = AppLauncher(args) +simulation_app = app_launcher.app + +# ========= 下面这些,一定要在 app 启动之后再导入 ========= +from isaaclab.scene.interactive_scene_cfg import InteractiveSceneCfg +import isaaclab.sim as sim_utils +from isaacsim.core.simulation_manager import SimulationManager +import time +from isaaclab.scene import InteractiveScene + +from robot.x7_duo_cfg import X7_DUO_CFG +from scene.x7_scene_cfg import X7SceneCfg +from control.controller import pi_control + +from control.robot_reset import reset_robot +import torch + +# ========= WebSocket client(必须全局初始化一次)========= +from openpi_client import websocket_client_policy + + + +def run(sim: sim_utils.SimulationContext, scene: InteractiveScene, client: websocket_client_policy.WebsocketClientPolicy): + robot=scene["robot"] + + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + action_chunk = None + + print("[INFO] Running simulator ...") + + robot=scene["robot"] + + while simulation_app.is_running(): + # Reset + if count > 0 and count % 700 == 0: + reset_robot(scene) # 根姿态 + 关节位姿 + scene.reset() + scene.write_data_to_sim() + # 给一小步让状态落地 + sim.step() + scene.update(sim_dt) + # 重新计时,避免首帧就再触发 + count = 0 + sim_time = 0.0 + # 跳过本轮控制,避免立刻被新动作覆盖 + continue + + # 控制器:双臂简单双摆 + action_chunk = pi_control(scene, sim_time, client) + for i in range(10): + a = action_chunk[i] # 14-dim + + # -------------- ALOHA 动作结构 -------------- + left_arm = a[0:6] + #left_grip = a[6] + right_arm = -a[7:13] + #right_grip = a[13] + # --------------------------------------------- + + # 4)当前机器人状态 + cur_q = scene["robot"].data.joint_pos[0].clone() # shape (13,) or (14,) + + # 5)更新目标动作:这里用「增量控制」更稳定 + cur_q[0:6] += 0.05 * torch.tensor(left_arm, device=cur_q.device) + cur_q[6] = 0 + cur_q[7:13] += 0.05 * torch.tensor(right_arm, device=cur_q.device) + cur_q[13] = 0 + + # 6)发给 X7 双臂 + scene["robot"].set_joint_position_target(cur_q.unsqueeze(0)) + + # 7)步进1次仿真 + scene.write_data_to_sim() + sim.step() + scene.update(sim_dt) + #sim.render() + count += 1 + sim_time += sim_dt + + +def main(): + # 初始化仿真 + sim_cfg = sim_utils.SimulationCfg(device=args.device) + sim = sim_utils.SimulationContext(sim_cfg) + sim.set_camera_view([4.0, 0.0, 2.5], [0.0, 0.0, 0.5]) # type: ignore + scene_cfg = X7SceneCfg(num_envs=args.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + sim.reset() + print("[INFO]: Setup complete...") + + client = websocket_client_policy.WebsocketClientPolicy(host="localhost", port=8000) + + run(sim, scene, client) + + +if __name__ == "__main__": + main() + simulation_app.close() \ No newline at end of file diff --git a/X7_Robot/packages/openpi-client/pyproject.toml b/X7_Robot/packages/openpi-client/pyproject.toml new file mode 100644 index 00000000000..fba7b66f62b --- /dev/null +++ b/X7_Robot/packages/openpi-client/pyproject.toml @@ -0,0 +1,23 @@ +[project] +name = "openpi-client" +version = "0.1.0" +requires-python = ">=3.7" +dependencies = [ + "dm-tree>=0.1.8", + "msgpack>=1.0.5", + "numpy>=1.22.4,<2.0.0", + "pillow>=9.0.0", + "tree>=0.2.4", + "websockets>=11.0", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.uv] +dev-dependencies = ["pytest>=8.3.4"] + +[tool.ruff] +line-length = 120 +target-version = "py37" diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/__init__.py b/X7_Robot/packages/openpi-client/src/openpi_client/__init__.py new file mode 100644 index 00000000000..3dc1f76bc69 --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/action_chunk_broker.py b/X7_Robot/packages/openpi-client/src/openpi_client/action_chunk_broker.py new file mode 100644 index 00000000000..8fa9d83d023 --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/action_chunk_broker.py @@ -0,0 +1,50 @@ +from typing import Dict + +import numpy as np +import tree +from typing_extensions import override + +from openpi_client import base_policy as _base_policy + + +class ActionChunkBroker(_base_policy.BasePolicy): + """Wraps a policy to return action chunks one-at-a-time. + + Assumes that the first dimension of all action fields is the chunk size. + + A new inference call to the inner policy is only made when the current + list of chunks is exhausted. + """ + + def __init__(self, policy: _base_policy.BasePolicy, action_horizon: int): + self._policy = policy + self._action_horizon = action_horizon + self._cur_step: int = 0 + + self._last_results: Dict[str, np.ndarray] | None = None + + @override + def infer(self, obs: Dict) -> Dict: # noqa: UP006 + if self._last_results is None: + self._last_results = self._policy.infer(obs) + self._cur_step = 0 + + def slicer(x): + if isinstance(x, np.ndarray): + return x[self._cur_step, ...] + else: + return x + + results = tree.map_structure(slicer, self._last_results) + self._cur_step += 1 + + if self._cur_step >= self._action_horizon: + self._last_results = None + + return results + + @override + def reset(self) -> None: + self._policy.reset() + self._last_results = None + self._cur_step = 0 diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/base_policy.py b/X7_Robot/packages/openpi-client/src/openpi_client/base_policy.py new file mode 100644 index 00000000000..2f4290651b1 --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/base_policy.py @@ -0,0 +1,12 @@ +import abc +from typing import Dict + + +class BasePolicy(abc.ABC): + @abc.abstractmethod + def infer(self, obs: Dict) -> Dict: + """Infer actions from observations.""" + + def reset(self) -> None: + """Reset the policy to its initial state.""" + pass diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/image_tools.py b/X7_Robot/packages/openpi-client/src/openpi_client/image_tools.py new file mode 100644 index 00000000000..7a971b9d5f6 --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/image_tools.py @@ -0,0 +1,58 @@ +import numpy as np +from PIL import Image + + +def convert_to_uint8(img: np.ndarray) -> np.ndarray: + """Converts an image to uint8 if it is a float image. + + This is important for reducing the size of the image when sending it over the network. + """ + if np.issubdtype(img.dtype, np.floating): + img = (255 * img).astype(np.uint8) + return img + + +def resize_with_pad(images: np.ndarray, height: int, width: int, method=Image.BILINEAR) -> np.ndarray: + """Replicates tf.image.resize_with_pad for multiple images using PIL. Resizes a batch of images to a target height. + + Args: + images: A batch of images in [..., height, width, channel] format. + height: The target height of the image. + width: The target width of the image. + method: The interpolation method to use. Default is bilinear. + + Returns: + The resized images in [..., height, width, channel]. + """ + # If the images are already the correct size, return them as is. + if images.shape[-3:-1] == (height, width): + return images + + original_shape = images.shape + + images = images.reshape(-1, *original_shape[-3:]) + resized = np.stack([_resize_with_pad_pil(Image.fromarray(im), height, width, method=method) for im in images]) + return resized.reshape(*original_shape[:-3], *resized.shape[-3:]) + + +def _resize_with_pad_pil(image: Image.Image, height: int, width: int, method: int) -> Image.Image: + """Replicates tf.image.resize_with_pad for one image using PIL. Resizes an image to a target height and + width without distortion by padding with zeros. + + Unlike the jax version, note that PIL uses [width, height, channel] ordering instead of [batch, h, w, c]. + """ + cur_width, cur_height = image.size + if cur_width == width and cur_height == height: + return image # No need to resize if the image is already the correct size. + + ratio = max(cur_width / width, cur_height / height) + resized_height = int(cur_height / ratio) + resized_width = int(cur_width / ratio) + resized_image = image.resize((resized_width, resized_height), resample=method) + + zero_image = Image.new(resized_image.mode, (width, height), 0) + pad_height = max(0, int((height - resized_height) / 2)) + pad_width = max(0, int((width - resized_width) / 2)) + zero_image.paste(resized_image, (pad_width, pad_height)) + assert zero_image.size == (width, height) + return zero_image diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/image_tools_test.py b/X7_Robot/packages/openpi-client/src/openpi_client/image_tools_test.py new file mode 100644 index 00000000000..8d4b4b92030 --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/image_tools_test.py @@ -0,0 +1,37 @@ +import numpy as np + +import openpi_client.image_tools as image_tools + + +def test_resize_with_pad_shapes(): + # Test case 1: Resize image with larger dimensions + images = np.zeros((2, 10, 10, 3), dtype=np.uint8) # Input images of shape (batch_size, height, width, channels) + height = 20 + width = 20 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (2, height, width, 3) + assert np.all(resized_images == 0) + + # Test case 2: Resize image with smaller dimensions + images = np.zeros((3, 30, 30, 3), dtype=np.uint8) + height = 15 + width = 15 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (3, height, width, 3) + assert np.all(resized_images == 0) + + # Test case 3: Resize image with the same dimensions + images = np.zeros((1, 50, 50, 3), dtype=np.uint8) + height = 50 + width = 50 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (1, height, width, 3) + assert np.all(resized_images == 0) + + # Test case 3: Resize image with odd-numbered padding + images = np.zeros((1, 256, 320, 3), dtype=np.uint8) + height = 60 + width = 80 + resized_images = image_tools.resize_with_pad(images, height, width) + assert resized_images.shape == (1, height, width, 3) + assert np.all(resized_images == 0) diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy.py b/X7_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy.py new file mode 100644 index 00000000000..007f755edf5 --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy.py @@ -0,0 +1,57 @@ +"""Adds NumPy array support to msgpack. + +msgpack is good for (de)serializing data over a network for multiple reasons: +- msgpack is secure (as opposed to pickle/dill/etc which allow for arbitrary code execution) +- msgpack is widely used and has good cross-language support +- msgpack does not require a schema (as opposed to protobuf/flatbuffers/etc) which is convenient in dynamically typed + languages like Python and JavaScript +- msgpack is fast and efficient (as opposed to readable formats like JSON/YAML/etc); I found that msgpack was ~4x faster + than pickle for serializing large arrays using the below strategy + +The code below is adapted from https://github.com/lebedov/msgpack-numpy. The reason not to use that library directly is +that it falls back to pickle for object arrays. +""" + +import functools + +import msgpack +import numpy as np + + +def pack_array(obj): + if (isinstance(obj, (np.ndarray, np.generic))) and obj.dtype.kind in ("V", "O", "c"): + raise ValueError(f"Unsupported dtype: {obj.dtype}") + + if isinstance(obj, np.ndarray): + return { + b"__ndarray__": True, + b"data": obj.tobytes(), + b"dtype": obj.dtype.str, + b"shape": obj.shape, + } + + if isinstance(obj, np.generic): + return { + b"__npgeneric__": True, + b"data": obj.item(), + b"dtype": obj.dtype.str, + } + + return obj + + +def unpack_array(obj): + if b"__ndarray__" in obj: + return np.ndarray(buffer=obj[b"data"], dtype=np.dtype(obj[b"dtype"]), shape=obj[b"shape"]) + + if b"__npgeneric__" in obj: + return np.dtype(obj[b"dtype"]).type(obj[b"data"]) + + return obj + + +Packer = functools.partial(msgpack.Packer, default=pack_array) +packb = functools.partial(msgpack.packb, default=pack_array) + +Unpacker = functools.partial(msgpack.Unpacker, object_hook=unpack_array) +unpackb = functools.partial(msgpack.unpackb, object_hook=unpack_array) diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy_test.py b/X7_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy_test.py new file mode 100644 index 00000000000..4c774ba468a --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/msgpack_numpy_test.py @@ -0,0 +1,45 @@ +import numpy as np +import pytest +import tree + +from openpi_client import msgpack_numpy + + +def _check(expected, actual): + if isinstance(expected, np.ndarray): + assert expected.shape == actual.shape + assert expected.dtype == actual.dtype + assert np.array_equal(expected, actual, equal_nan=expected.dtype.kind == "f") + else: + assert expected == actual + + +@pytest.mark.parametrize( + "data", + [ + 1, # int + 1.0, # float + "hello", # string + np.bool_(True), # boolean scalar + np.array([1, 2, 3])[0], # int scalar + np.str_("asdf"), # string scalar + [1, 2, 3], # list + {"key": "value"}, # dict + {"key": [1, 2, 3]}, # nested dict + np.array(1.0), # 0D array + np.array([1, 2, 3], dtype=np.int32), # 1D integer array + np.array(["asdf", "qwer"]), # string array + np.array([True, False]), # boolean array + np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32), # 2D float array + np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.int16), # 3D integer array + np.array([np.nan, np.inf, -np.inf]), # special float values + {"arr": np.array([1, 2, 3]), "nested": {"arr": np.array([4, 5, 6])}}, # nested dict with arrays + [np.array([1, 2]), np.array([3, 4])], # list of arrays + np.zeros((3, 4, 5), dtype=np.float32), # 3D zeros + np.ones((2, 3), dtype=np.float64), # 2D ones with double precision + ], +) +def test_pack_unpack(data): + packed = msgpack_numpy.packb(data) + unpacked = msgpack_numpy.unpackb(packed) + tree.map_structure(_check, data, unpacked) diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/runtime/agent.py b/X7_Robot/packages/openpi-client/src/openpi_client/runtime/agent.py new file mode 100644 index 00000000000..a2c3ab66ef6 --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/runtime/agent.py @@ -0,0 +1,17 @@ +import abc + + +class Agent(abc.ABC): + """An Agent is the thing with agency, i.e. the entity that makes decisions. + + Agents receive observations about the state of the world, and return actions + to take in response. + """ + + @abc.abstractmethod + def get_action(self, observation: dict) -> dict: + """Query the agent for the next action.""" + + @abc.abstractmethod + def reset(self) -> None: + """Reset the agent to its initial state.""" diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/runtime/agents/policy_agent.py b/X7_Robot/packages/openpi-client/src/openpi_client/runtime/agents/policy_agent.py new file mode 100644 index 00000000000..65227c44dae --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/runtime/agents/policy_agent.py @@ -0,0 +1,18 @@ +from typing_extensions import override + +from openpi_client import base_policy as _base_policy +from openpi_client.runtime import agent as _agent + + +class PolicyAgent(_agent.Agent): + """An agent that uses a policy to determine actions.""" + + def __init__(self, policy: _base_policy.BasePolicy) -> None: + self._policy = policy + + @override + def get_action(self, observation: dict) -> dict: + return self._policy.infer(observation) + + def reset(self) -> None: + self._policy.reset() diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/runtime/environment.py b/X7_Robot/packages/openpi-client/src/openpi_client/runtime/environment.py new file mode 100644 index 00000000000..664ac4678aa --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/runtime/environment.py @@ -0,0 +1,32 @@ +import abc + + +class Environment(abc.ABC): + """An Environment represents the robot and the environment it inhabits. + + The primary contract of environments is that they can be queried for observations + about their state, and have actions applied to them to change that state. + """ + + @abc.abstractmethod + def reset(self) -> None: + """Reset the environment to its initial state. + + This will be called once before starting each episode. + """ + + @abc.abstractmethod + def is_episode_complete(self) -> bool: + """Allow the environment to signal that the episode is complete. + + This will be called after each step. It should return `True` if the episode is + complete (either successfully or unsuccessfully), and `False` otherwise. + """ + + @abc.abstractmethod + def get_observation(self) -> dict: + """Query the environment for the current state.""" + + @abc.abstractmethod + def apply_action(self, action: dict) -> None: + """Take an action in the environment.""" diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/runtime/runtime.py b/X7_Robot/packages/openpi-client/src/openpi_client/runtime/runtime.py new file mode 100644 index 00000000000..9552be091a2 --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/runtime/runtime.py @@ -0,0 +1,92 @@ +import logging +import threading +import time + +from openpi_client.runtime import agent as _agent +from openpi_client.runtime import environment as _environment +from openpi_client.runtime import subscriber as _subscriber + + +class Runtime: + """The core module orchestrating interactions between key components of the system.""" + + def __init__( + self, + environment: _environment.Environment, + agent: _agent.Agent, + subscribers: list[_subscriber.Subscriber], + max_hz: float = 0, + num_episodes: int = 1, + max_episode_steps: int = 0, + ) -> None: + self._environment = environment + self._agent = agent + self._subscribers = subscribers + self._max_hz = max_hz + self._num_episodes = num_episodes + self._max_episode_steps = max_episode_steps + + self._in_episode = False + self._episode_steps = 0 + + def run(self) -> None: + """Runs the runtime loop continuously until stop() is called or the environment is done.""" + for _ in range(self._num_episodes): + self._run_episode() + + # Final reset, this is important for real environments to move the robot to its home position. + self._environment.reset() + + def run_in_new_thread(self) -> threading.Thread: + """Runs the runtime loop in a new thread.""" + thread = threading.Thread(target=self.run) + thread.start() + return thread + + def mark_episode_complete(self) -> None: + """Marks the end of an episode.""" + self._in_episode = False + + def _run_episode(self) -> None: + """Runs a single episode.""" + logging.info("Starting episode...") + self._environment.reset() + self._agent.reset() + for subscriber in self._subscribers: + subscriber.on_episode_start() + + self._in_episode = True + self._episode_steps = 0 + step_time = 1 / self._max_hz if self._max_hz > 0 else 0 + last_step_time = time.time() + + while self._in_episode: + self._step() + self._episode_steps += 1 + + # Sleep to maintain the desired frame rate + now = time.time() + dt = now - last_step_time + if dt < step_time: + time.sleep(step_time - dt) + last_step_time = time.time() + else: + last_step_time = now + + logging.info("Episode completed.") + for subscriber in self._subscribers: + subscriber.on_episode_end() + + def _step(self) -> None: + """A single step of the runtime loop.""" + observation = self._environment.get_observation() + action = self._agent.get_action(observation) + self._environment.apply_action(action) + + for subscriber in self._subscribers: + subscriber.on_step(observation, action) + + if self._environment.is_episode_complete() or ( + self._max_episode_steps > 0 and self._episode_steps >= self._max_episode_steps + ): + self.mark_episode_complete() diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/runtime/subscriber.py b/X7_Robot/packages/openpi-client/src/openpi_client/runtime/subscriber.py new file mode 100644 index 00000000000..7c69edaa8e8 --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/runtime/subscriber.py @@ -0,0 +1,20 @@ +import abc + + +class Subscriber(abc.ABC): + """Subscribes to events in the runtime. + + Subscribers can be used to save data, visualize, etc. + """ + + @abc.abstractmethod + def on_episode_start(self) -> None: + """Called when an episode starts.""" + + @abc.abstractmethod + def on_step(self, observation: dict, action: dict) -> None: + """Append a step to the episode.""" + + @abc.abstractmethod + def on_episode_end(self) -> None: + """Called when an episode ends.""" diff --git a/X7_Robot/packages/openpi-client/src/openpi_client/websocket_client_policy.py b/X7_Robot/packages/openpi-client/src/openpi_client/websocket_client_policy.py new file mode 100644 index 00000000000..d6244f0f780 --- /dev/null +++ b/X7_Robot/packages/openpi-client/src/openpi_client/websocket_client_policy.py @@ -0,0 +1,55 @@ +import logging +import time +from typing import Dict, Optional, Tuple + +from typing_extensions import override +import websockets.sync.client + +from openpi_client import base_policy as _base_policy +from openpi_client import msgpack_numpy + + +class WebsocketClientPolicy(_base_policy.BasePolicy): + """Implements the Policy interface by communicating with a server over websocket. + + See WebsocketPolicyServer for a corresponding server implementation. + """ + + def __init__(self, host: str = "0.0.0.0", port: Optional[int] = None, api_key: Optional[str] = None) -> None: + self._uri = f"ws://{host}" + if port is not None: + self._uri += f":{port}" + self._packer = msgpack_numpy.Packer() + self._api_key = api_key + self._ws, self._server_metadata = self._wait_for_server() + + def get_server_metadata(self) -> Dict: + return self._server_metadata + + def _wait_for_server(self) -> Tuple[websockets.sync.client.ClientConnection, Dict]: + logging.info(f"Waiting for server at {self._uri}...") + while True: + try: + headers = {"Authorization": f"Api-Key {self._api_key}"} if self._api_key else None + conn = websockets.sync.client.connect( + self._uri, compression=None, max_size=None, additional_headers=headers + ) + metadata = msgpack_numpy.unpackb(conn.recv()) + return conn, metadata + except ConnectionRefusedError: + logging.info("Still waiting for server...") + time.sleep(5) + + @override + def infer(self, obs: Dict) -> Dict: # noqa: UP006 + data = self._packer.pack(obs) + self._ws.send(data) + response = self._ws.recv() + if isinstance(response, str): + # we're expecting bytes; if the server sends a string, it's an error. + raise RuntimeError(f"Error in inference server:\n{response}") + return msgpack_numpy.unpackb(response) + + @override + def reset(self) -> None: + pass diff --git a/X7_Robot/policy/_init_.py b/X7_Robot/policy/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/X7_Robot/robot/_init_.py b/X7_Robot/robot/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/X7_Robot/robot/x7_duo_cfg.py b/X7_Robot/robot/x7_duo_cfg.py new file mode 100644 index 00000000000..fd84f77c4e7 --- /dev/null +++ b/X7_Robot/robot/x7_duo_cfg.py @@ -0,0 +1,75 @@ +import math +import torch +from isaaclab.scene.interactive_scene_cfg import InteractiveSceneCfg +import isaaclab.sim as sim_utils +from isaaclab.assets.articulation import ArticulationCfg +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.utils import math as math_utils +from isaacsim.sensors.camera import Camera + +# 注意:这里路径是相对于 IsaacLab 根目录 +X7_USD_PATH = "X7_Robot/assets/X7/x7_duo.usd" + + +# ============================ +# 机器人配置(14 DOF) +# ============================ + +X7_DUO_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=X7_USD_PATH, + rigid_props=sim_utils.RigidBodyPropertiesCfg(disable_gravity=False), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=True, + solver_position_iteration_count=8, + solver_velocity_iteration_count=0, + fix_root_link=True, + ), + ), + + init_state=ArticulationCfg.InitialStateCfg( + joint_pos={f"Joint{i}": 0.0 for i in range(1, 15)}, + pos=(0.0, 0.0, 1.8), + # 绕 Y 轴 -90° 旋转 + rot=tuple( + math_utils.quat_from_euler_xyz( + torch.tensor([0.0]), # roll + torch.tensor([-math.pi / 2]), # pitch + torch.tensor([0.0]), # yaw + )[0].tolist() + ), + ), + + actuators={ + # 左臂 Joint1~Joint7 + "A1": ImplicitActuatorCfg(joint_names_expr=["Joint1"], stiffness=3000.0, damping=80.0), + "A2": ImplicitActuatorCfg(joint_names_expr=["Joint2"], stiffness=3000.0, damping=80.0), + "A3": ImplicitActuatorCfg(joint_names_expr=["Joint3"], stiffness=3000.0, damping=80.0), + "A4": ImplicitActuatorCfg(joint_names_expr=["Joint4"], stiffness=3000.0, damping=80.0), + "A5": ImplicitActuatorCfg(joint_names_expr=["Joint5"], stiffness=3000.0, damping=80.0), + "A6": ImplicitActuatorCfg(joint_names_expr=["Joint6"], stiffness=3000.0, damping=80.0), + "A7": ImplicitActuatorCfg(joint_names_expr=["Joint7"], stiffness=3000.0, damping=80.0), + + # 右臂 Joint8~Joint14 + "B1": ImplicitActuatorCfg(joint_names_expr=["Joint8"], stiffness=3000.0, damping=80.0), + "B2": ImplicitActuatorCfg(joint_names_expr=["Joint9"], stiffness=3000.0, damping=80.0), + "B3": ImplicitActuatorCfg(joint_names_expr=["Joint10"], stiffness=3000.0, damping=80.0), + "B4": ImplicitActuatorCfg(joint_names_expr=["Joint11"], stiffness=3000.0, damping=80.0), + "B5": ImplicitActuatorCfg(joint_names_expr=["Joint12"], stiffness=3000.0, damping=80.0), + "B6": ImplicitActuatorCfg(joint_names_expr=["Joint13"], stiffness=3000.0, damping=80.0), + "B7": ImplicitActuatorCfg(joint_names_expr=["Joint14"], stiffness=3000.0, damping=80.0), + }, +) + +# class X7DuoSceneCfg(InteractiveSceneCfg): +# # 地面 +# ground = AssetBaseCfg( +# prim_path="/World/defaultGroundPlane", +# spawn=sim_utils.GroundPlaneCfg(), +# ) + +# # 机器人 +# robot = X7_DUO_CFG.replace( +# prim_path="{ENV_REGEX_NS}/X7_duo" +# ) + diff --git a/X7_Robot/scene/_init_.py b/X7_Robot/scene/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/X7_Robot/scene/x7_scene_cfg.py b/X7_Robot/scene/x7_scene_cfg.py new file mode 100644 index 00000000000..3d450b771d0 --- /dev/null +++ b/X7_Robot/scene/x7_scene_cfg.py @@ -0,0 +1,70 @@ +import math +import torch +import numpy as np + +import isaaclab.sim as sim_utils +from isaaclab.assets.articulation import ArticulationCfg +from isaaclab.assets import AssetBaseCfg +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.utils import math as math_utils +from isaaclab.utils import configclass +from robot.x7_duo_cfg import X7_DUO_CFG +from isaaclab.sensors import CameraCfg + + +@configclass +class X7SceneCfg(InteractiveSceneCfg): + # 地面 + ground = AssetBaseCfg( + prim_path="/World/defaultGroundPlane", + spawn=sim_utils.GroundPlaneCfg(), + ) + + # 机器人 + robot = X7_DUO_CFG.replace( # type: ignore + prim_path="{ENV_REGEX_NS}/Robot" + ) + + #灯光 + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # sensors + head_camera = CameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/base_link/head_cam", + update_period=0.1, + height=480, + width=640, + data_types=["rgb", "distance_to_image_plane"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=12.4, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) + ), + offset=CameraCfg.OffsetCfg(pos=(0.4, 0.1, 0.0), rot=(0.67409, 0.21355, 0.67409, -0.213555), convention="opengl"), + ) + + left_camera = CameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/Link7/left_cam", + update_period=0.1, + height=480, + width=640, + data_types=["rgb", "distance_to_image_plane"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=18.14756, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) + ), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.3), rot=(0.0, -0.0, -1.0, -0.0), convention="opengl"), + ) + + right_camera = CameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/Link14/right_cam", + update_period=0.1, + height=480, + width=640, + data_types=["rgb", "distance_to_image_plane"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=12.4, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) + ), + offset=CameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.3), rot=(0.0, -0.0, -1.0, -0.0), convention="opengl"), + ) + diff --git a/X7_Robot/test_openpi.py b/X7_Robot/test_openpi.py new file mode 100644 index 00000000000..a69f379fcd5 --- /dev/null +++ b/X7_Robot/test_openpi.py @@ -0,0 +1,137 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/02_scene/create_scene.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sim import SimulationContext +from isaaclab.utils import configclass +from robot.x7_duo_cfg import X7_DUO_CFG +from scene.x7_scene_cfg import X7SceneCfg +from control.observation import get_observation + +from utils.robot_reset import reset_robot + +## +# Pre-defined configs +## +from isaaclab_assets import CARTPOLE_CFG # isort:skip + + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. + robot = scene["robot"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + count = 0 + # Simulation loop + while simulation_app.is_running(): + # Reset + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = robot.data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + + robot.write_root_pose_to_sim(root_state[:, :7]) + robot.write_root_velocity_to_sim(root_state[:, 7:]) + # set joint positions with some noise + joint_pos = robot.data.default_joint_pos.clone() + print(joint_pos.shape) + + observation = get_observation(scene) + #print(observation["observation/image"].shape) + #print(observation["observation/wrist_image"].shape) + # print(observation["observation/state"].shape) + + + # robot.write_joint_state_to_sim(joint_pos, joint_vel) + # # clear internal buffers + # scene.reset() + # print("[INFO]: Resetting robot state...") + # # Apply random action + # # -- generate random joint efforts + # efforts = torch.randn_like(robot.data.joint_pos) * 5.0 + # # -- apply action to the robot + # robot.set_joint_effort_target(efforts) + # -- write data to sim + scene.write_data_to_sim() + # Perform step + sim.step() + # Increment counter + count += 1 + # Update buffers + scene.update(sim_dt) + + +def main(): + """Main function.""" + # Load kit helper + # sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + # sim = SimulationContext(sim_cfg) + # # Set main camera + # sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) + # # Design scene + # scene_cfg = CartpoleSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + # scene = InteractiveScene(scene_cfg) + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + sim.set_camera_view([4.0, 0.0, 2.5], [0.0, 0.0, 0.5]) # type: ignore + + + scene_cfg = X7SceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/X7_Robot/utils/_init_.py b/X7_Robot/utils/_init_.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pyproject.toml b/pyproject.toml index aa5574018eb..44ab386fb32 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,6 +63,14 @@ known_task_firstparty = "isaaclab_tasks" # Imports from the local folder known_local_folder = "config" +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.packages.find] +where = ["source"] +include = ["isaaclab*", "isaaclab_rl*", "isaaclab_mimic*", "isaaclab_assets*", "isaaclab_tasks*"] + [tool.pyright] include = ["source", "scripts"] diff --git a/scripts/simulation/load_x7_duo.py b/scripts/simulation/load_x7_duo.py new file mode 100644 index 00000000000..561a7339ead --- /dev/null +++ b/scripts/simulation/load_x7_duo.py @@ -0,0 +1,225 @@ +import argparse +from isaaclab.app import AppLauncher + +# CLI arguments +parser = argparse.ArgumentParser(description="Load X7 Duo dual-arm robot inside IsaacLab.") +parser.add_argument("--num_envs", type=int, default=1) +AppLauncher.add_app_launcher_args(parser) +args = parser.parse_args() + +# Launch GUI +app_launcher = AppLauncher(args) +simulation_app = app_launcher.app + + +# --- Imports AFTER simulation app launched --- +import numpy as np +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets.articulation import ArticulationCfg +from isaaclab.assets import AssetBaseCfg +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +import math +from isaaclab.utils import math as math_utils + + + +# ===================================================================== +# 1) CONFIGURE THE X7-DUO ROBOT (14 DOF) +# ===================================================================== + +X7_DUO_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path="X7_Robot/assets/X7/x7_duo.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg(disable_gravity=False), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=True, + solver_position_iteration_count=8, + solver_velocity_iteration_count=0, + fix_root_link=True, + ), + ), + + init_state=ArticulationCfg.InitialStateCfg( + joint_pos={f"Joint{i}": 0.0 for i in range(1, 15)}, + pos=(0.0, 0.0, 1.8), + # 关键:绕 Y 轴 -90° 旋转(单位:弧度) + rot=tuple( + math_utils.quat_from_euler_xyz( + torch.tensor([0.0]), # roll = 0 + torch.tensor([-math.pi / 2]), # pitch = -90° + torch.tensor([0.0]), # yaw = 0 + )[0].tolist() + ), + ), + + actuators={ + # Left Arm Joint1~Joint7 + "A1": ImplicitActuatorCfg( + joint_names_expr=["Joint1"], + stiffness=3000.0, + damping=80.0 + ), + "A2": ImplicitActuatorCfg( + joint_names_expr=["Joint2"], + stiffness=3000.0, + damping=80.0 + ), + "A3": ImplicitActuatorCfg( + joint_names_expr=["Joint3"], + stiffness=3000.0, + damping=80.0 + ), + "A4": ImplicitActuatorCfg( + joint_names_expr=["Joint4"], + stiffness=3000.0, + damping=80.0 + ), + "A5": ImplicitActuatorCfg( + joint_names_expr=["Joint5"], + stiffness=3000.0, + damping=80.0 + ), + "A6": ImplicitActuatorCfg( + joint_names_expr=["Joint6"], + stiffness=3000.0, + damping=80.0 + ), + "A7": ImplicitActuatorCfg( + joint_names_expr=["Joint7"], + stiffness=3000.0, + damping=80.0 + ), + + # Right Arm Joint8~Joint14 + "B1": ImplicitActuatorCfg( + joint_names_expr=["Joint8"], + stiffness=3000.0, + damping=80.0 + ), + "B2": ImplicitActuatorCfg( + joint_names_expr=["Joint9"], + stiffness=3000.0, + damping=80.0 + ), + "B3": ImplicitActuatorCfg( + joint_names_expr=["Joint10"], + stiffness=3000.0, + damping=80.0 + ), + "B4": ImplicitActuatorCfg( + joint_names_expr=["Joint11"], + stiffness=3000.0, + damping=80.0 + ), + "B5": ImplicitActuatorCfg( + joint_names_expr=["Joint12"], + stiffness=3000.0, + damping=80.0 + ), + "B6": ImplicitActuatorCfg( + joint_names_expr=["Joint13"], + stiffness=3000.0, + damping=80.0 + ), + "B7": ImplicitActuatorCfg( + joint_names_expr=["Joint14"], + stiffness=3000.0, + damping=80.0 + ), + }, +) + + + +# ===================================================================== +# 2) DEFINE SCENE (WITH GROUND + ROBOT) +# ===================================================================== + +class X7DuoSceneCfg(InteractiveSceneCfg): + ground = AssetBaseCfg( + prim_path="/World/defaultGroundPlane", + spawn=sim_utils.GroundPlaneCfg() + ) + + robot = X7_DUO_CFG.replace( + prim_path="{ENV_REGEX_NS}/X7_duo" + ) + + +# ===================================================================== +# 3) SIMULATION LOOP +# ===================================================================== + +def run(sim, scene): + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + print("[INFO] Running simulator ...") + + while simulation_app.is_running(): + + # Every 500 frames → reset robot + if count % 500 == 0: + print("[INFO] Reset robot state ...") + + # Reset root pose + root = scene["robot"].data.default_root_state.clone() + root[:, :3] += scene.env_origins # place at env origin + scene["robot"].write_root_pose_to_sim(root[:, :7]) + scene["robot"].write_root_velocity_to_sim(root[:, 7:]) + + # Reset joints + jp = scene["robot"].data.default_joint_pos.clone() + jv = scene["robot"].data.default_joint_vel.clone() + scene["robot"].write_joint_state_to_sim(jp, jv) + + scene.reset() + + # ---------------------------------------------------- + # ACTION: make both arms swing periodically + # ---------------------------------------------------- + action = scene["robot"].data.default_joint_pos.clone() + + # Left Arm (Joint1~7) + action[:, 0:7] = 0.4 * np.sin(2 * np.pi * 0.5 * sim_time) + + # Right Arm (Joint8~14) + action[:, 7:14] = 0.4 * np.sin(2 * np.pi * 0.5 * sim_time) + + # Send command + scene["robot"].set_joint_position_target(action) + + # Step + scene.write_data_to_sim() + sim.step() + scene.update(sim_dt) + + count += 1 + sim_time += sim_dt + + +# ===================================================================== +# 4) MAIN +# ===================================================================== + +def main(): + sim_cfg = sim_utils.SimulationCfg(device=args.device) + sim = sim_utils.SimulationContext(sim_cfg) + sim.set_camera_view([4.0, 0.0, 2.5], [0, 0, 0.5]) + + scene_cfg = X7DuoSceneCfg(args.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + + sim.reset() + + print("[INFO] Scene ready.") + run(sim, scene) + + +if __name__ == "__main__": + main() + simulation_app.close() diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000000..9431a635b11 --- /dev/null +++ b/uv.lock @@ -0,0 +1,3 @@ +version = 1 +revision = 3 +requires-python = ">=3.11"