-
Notifications
You must be signed in to change notification settings - Fork 9
Dev external occupancy map generation #23
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
ade8ec8
c421cd8
ea95492
6fbed37
5b4ecf8
b50f84a
2966cd4
5842398
7b200a9
35c8251
ff83733
baa0d1b
c09cdae
8694182
3ffd751
cdfeb65
16b76b7
4318aee
f9a9acc
95ba0e5
3c0071d
500d0a8
febf0de
65f5b4f
1a670a5
35e1d77
40a39f8
1e57a1a
9c01359
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -10,4 +10,5 @@ _*/ | |
/.vs | ||
|
||
/app | ||
data | ||
data | ||
/.venv |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,111 @@ | ||
import argparse | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why there is a "05" in the file title? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is given because this script is currently listed as an example (number 5). The purpose of the numbering is to provide developers with a rough progression to follow. I find this pattern helpful when I see it in other projects, like NVISII. Maybe we should instead place this under "scripts", which is unordered, and meant more for direct usage, not modification |
||
import pandas | ||
from reader import Reader | ||
import numpy as np | ||
import tqdm | ||
import PIL.Image | ||
import io | ||
|
||
import os | ||
import subprocess | ||
import glob | ||
import argparse | ||
|
||
|
||
def numpy_array_to_flattened_columns(key: str, value: np.ndarray): | ||
columns = { | ||
f"{key}": value.flatten() | ||
} | ||
# add shape if ndim > 1 | ||
if value.ndim > 1: | ||
columns[f"{key}.shape"] = tuple(value.shape) | ||
return columns | ||
|
||
|
||
def numpy_array_to_jpg_columns(key: str, value: np.ndarray): | ||
image = PIL.Image.fromarray(value) | ||
buffer = io.BytesIO() | ||
image.save(buffer, format="JPEG") | ||
columns = { | ||
key: buffer.getvalue() | ||
} | ||
return columns | ||
|
||
|
||
if "MOBILITY_GEN_DATA" in os.environ: | ||
DATA_DIR = os.environ['MOBILITY_GEN_DATA'] | ||
else: | ||
DATA_DIR = os.path.expanduser("~/MobilityGenData") | ||
|
||
if __name__ == "__main__": | ||
|
||
parser = argparse.ArgumentParser() | ||
parser.add_argument("--input_dir", type=str, default=None) | ||
parser.add_argument("--output_dir", type=str, default=None) | ||
|
||
|
||
args = parser.parse_args() | ||
|
||
if args.input_dir is None: | ||
args.input_dir = os.path.join(DATA_DIR, "replays") | ||
|
||
if args.output_dir is None: | ||
args.output_dir = os.path.join(DATA_DIR, "parquet") | ||
|
||
if not os.path.exists(args.output_dir): | ||
os.makedirs(args.output_dir) | ||
|
||
input_recordings = glob.glob(os.path.join(args.input_dir, "*")) | ||
|
||
processed_count = 0 | ||
|
||
for input_recording_path in input_recordings: | ||
processed_count += 1 | ||
print(f"Processing {processed_count} / {len(input_recordings)}") | ||
|
||
recording_name = os.path.basename(input_recording_path) | ||
output_path = os.path.join(args.output_dir, recording_name + ".pqt") | ||
|
||
reader = Reader(recording_path=input_recording_path) | ||
|
||
index = 0 | ||
|
||
|
||
output: pandas.DataFrame = None | ||
|
||
for index in tqdm.tqdm(range(len(reader))): | ||
|
||
data_dict = {} | ||
|
||
# Common data (saved as raw arrays) | ||
state_common = reader.read_state_dict_common(index=index) | ||
state_common.update(reader.read_state_dict_depth(index=index)) | ||
state_common.update(reader.read_state_dict_segmentation(index=index)) | ||
# state_common.update(reader.read_state_dict_depth(index=index)) | ||
# TODO: handle normals | ||
|
||
for k, v in state_common.items(): | ||
if isinstance(v, np.ndarray): | ||
columns = numpy_array_to_flattened_columns(k, v) | ||
else: | ||
columns = {k: v} | ||
data_dict.update(columns) | ||
|
||
# RGB data (saved as jpg) | ||
state_rgb = reader.read_state_dict_rgb(index=index) | ||
for k, v in state_rgb.items(): | ||
if isinstance(v, np.ndarray): | ||
columns = numpy_array_to_jpg_columns(k, v) | ||
else: | ||
columns = {k: v} | ||
data_dict.update(columns) | ||
|
||
|
||
# use first frame to initialize | ||
if output is None: | ||
output = pandas.DataFrame(columns=data_dict.keys()) | ||
|
||
output.loc[index] = data_dict | ||
|
||
|
||
output.to_parquet(output_path, engine="pyarrow") |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
import argparse | ||
import pandas | ||
import matplotlib.pyplot as plt | ||
import numpy as np | ||
|
||
parser = argparse.ArgumentParser() | ||
parser.add_argument("parquet_path") | ||
args = parser.parse_args() | ||
|
||
data = pandas.read_parquet(args.parquet_path, engine="pyarrow") | ||
|
||
|
||
print(data.columns) | ||
vel = np.stack(data['robot.linear_velocity'].to_numpy()) | ||
|
||
|
||
plt.plot(vel[:, 0], 'r-') | ||
plt.plot(vel[:, 1], 'r-') | ||
plt.show() |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,28 @@ | ||
# Parquet Data Format | ||
|
||
Below is a description of the fields in a typical MobilityGen recording, common to all scenarios. | ||
|
||
| Field | Type | Shape | Description | | ||
|-------|------|-------|-------------| | ||
| robot.action | array | 2 | The command linear, angular velocity in the robot frame. | | ||
| robot.position | array | 3 | The xyz position of the robot in the world frame. | | ||
| robot.orientation | array | 4 | The quaternion of the robot in the world frame. | | ||
| robot.joint_positions| array | N | The joint positions of the robot. | | ||
| robot.joint_velocities | array | N | The joint velocities of the robot. | | ||
| robot.linear_velocity | array | 3 | The linear velocity of the robot in the world frame. (Retrieved by robot.get_linear_velocity() in isaac sim) | | ||
| robot.angular_velocity | array | 3 | The linear velocity of the robot in the world frame. (Retrieved by robot.get_angular_velocity() in isaac sim) | | ||
| robot.front_camera.left.segmentation_info | dict | | The segmentation info dictionary as retrieved by the Isaac Sim replicator annotator. | | ||
| robot.front_camera.left.segmentation_image | array | (HxW) flattened | The segmentation image as retrieved by the Isaac Sim replicator annotator. Flattened | | ||
| robot.front_camera.left.segmentation_image.shape | tuple | 2 | The segmentation image shape. | | ||
| robot.front_camera.left.rgb_image | bytes | | The RGB camera image compressed to JPG. | | ||
| robot.front_camera.left.depth_image | array | (HxW) | The depth image (in meters) flattened into an array. | | ||
| robot.front_camera.left.depth_image.shape | tuple | 2 | The shape of the depth image. | ||
|
||
> Note, there are additional fields with similar semantics for other cameras we have excluded brevity. | ||
|
||
Below are fields specific to the path following scenario | ||
|
||
| Field | Type | Shape | Description | | ||
|-------|------|-------|-------------| | ||
| target_path | array | (Nx2) flattened | The target path generated by the path planner in world coordinates. This is updated whenever the path planner is called, which occurs when the robot reaches a goal (or at the beginning of a new recording) | | ||
| target_path.shape | tuple | 2 | The shape of the target path array. | |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
"scene" seems more commonly used than "stage"?