Skip to content

Commit 4448d7d

Browse files
add initial version of metrics
1 parent 73d8f0a commit 4448d7d

File tree

9 files changed

+552
-26
lines changed

9 files changed

+552
-26
lines changed

README.md

Lines changed: 19 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -13,18 +13,25 @@
1313
# Omega-Prime: Data Model, Data Format and Python Library for Handling Ground Truth Traffic Data
1414

1515
Data Model, Format and Python Library for ground truth data containing information on dynamic objects, map and environmental factors optimized for representing urban traffic. The repository contains:
16-
- **Sepcification Document:** [./docs/omega_prime_specification.md](https://github.com/ika-rwth-aachen/omega-prime/tree/main/docs/omega_prime_specification.md)
17-
- **Data Model**: What signals exists and how these are defined.
18-
- **Data Format Specification**: How to exchange and store those signals.
19-
- **Python Library**:
20-
- **Creation** of omega-prime files from
21-
- ASAM OSI GroundTruth trace (e.g., output of esmini)
22-
- Table of moving object data (e.g., csv data)
23-
- ASAM OpenDRIVE map
24-
- [LevelXData datasets](https://levelxdata.com/) through [lxd-io](https://github.com/lenvt/lxd-io)
25-
- **Plotting** of data
26-
- **Validation** of data
27-
- **Interpolation** of data
16+
### Data Model and Sepcification
17+
see [./docs/omega_prime_specification.md](https://github.com/ika-rwth-aachen/omega-prime/tree/main/docs/omega_prime_specification.md)
18+
19+
- 🌍 **Data Model**: What signals exists and how these are defined.
20+
- 🧾 **Data Format Specification**: How to exchange and store those signals.
21+
22+
### Python Library
23+
- 🔨 **Create** omega-prime files from many sources (see [./tutorial.ipynb](https://github.com/ika-rwth-aachen/omega-prime/blob/main/tutorial.ipynb)):
24+
- ASAM OSI GroundTruth trace (e.g., output of esmini)
25+
- Table of moving object data (e.g., csv data)
26+
- ASAM OpenDRIVE map
27+
- [LevelXData datasets](https://levelxdata.com/) through [lxd-io](https://github.com/lenvt/lxd-io)
28+
- extend yourself by subclassing [DatasetConverter](omega_prime/converters/converter.py)
29+
- 🗺️ **Map Association**: Associate Object Location with Lanes from OpenDRIVE or OSI Maps (see [tutorial_locator.ipynb](https://github.com/ika-rwth-aachen/omega-prime/tree/main/tutorial_locatory.ipynb))
30+
- 📺 **Plotting** of data: interative top view plots using [altair](https://altair-viz.github.io/)
31+
-**Validation** of data: check if your data conforms to the omega-prime specification (e.g., correct yaw) using [pandera](https://pandera.readthedocs.io/en/stable/)
32+
- 📐 **Interpolation** of data: bring your data into a fixed frequency
33+
- 📈 **Metrics**: compute interaction metrics like PET, TTC, THW (see [tutorial_metrics.ipynb](https://github.com/ika-rwth-aachen/omega-prime/tree/main/tutorial_metrics.ipynb))
34+
- 🚀 **Fast Processing** directly on DataFrames using [polars](https://pola.rs/), [polars-st](https://oreilles.github.io/polars-st/)
2835

2936
The data model and format utilze [ASAM OpenDRIVE](https://publications.pages.asam.net/standards/ASAM_OpenDRIVE/ASAM_OpenDRIVE_Specification/latest/specification/index.html#) and [ASAM Open-Simulation-Interface GroundTruth messages](https://opensimulationinterface.github.io/osi-antora-generator/asamosi/V3.7.0/specification/index.html). omega-prime sets requirements on presence and quality of ASAM OSI GroundTruth messages and ASAM OpenDRIVE files and defines a file format for the exchange and storage of these.
3037

omega_prime/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
__pdoc__ = {}
22
__pdoc__["converters"] = False
33
""" .. include:: ./../README.md """
4-
from . import converters
4+
from . import converters, metrics
55
from .map_odr import MapOdr
66
from .locator import LaneRelation, Locator
77
from .map import Lane, LaneBoundary, Map, MapOsi
@@ -18,4 +18,5 @@
1818
"Locator",
1919
"LaneRelation",
2020
"converters",
21+
"metrics",
2122
]

omega_prime/metrics.py

Lines changed: 163 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
import polars as pl
2+
from dataclasses import dataclass, field
3+
from collections.abc import Callable
4+
import polars_st as st
5+
from .recording import Recording
6+
import graphlib
7+
8+
9+
@dataclass
10+
class Metric:
11+
compute_func: Callable[[pl.LazyFrame, ...], tuple[pl.LazyFrame, dict[str, pl.LazyFrame]]]
12+
computes_columns: list[str] = field(default_factory=list)
13+
computes_properties: list[str] = field(default_factory=list)
14+
requires_columns: list[str] = field(default_factory=list)
15+
requires_properties: list[str] = field(default_factory=list)
16+
17+
def compute_lazy(self, df, **kwargs) -> tuple[pl.DataFrame, dict[str, pl.DataFrame]]:
18+
return self.compute_func(df, **kwargs)
19+
20+
21+
@dataclass
22+
class MetricManager:
23+
metrics: list[Metric]
24+
_dependencies: dict[int | str, list[int | str]] = field(init=False)
25+
_ordered_metrics: list[Metric] = field(init=False)
26+
27+
def __post_init__(self):
28+
self._dependencies = {
29+
val: [i]
30+
for i, m in enumerate(self.metrics)
31+
for val in [f"column_{n}" for n in m.computes_columns] + [f"property_{n}" for n in m.computes_properties]
32+
} | {
33+
i: [f"column_{n}" for n in m.requires_columns] + [f"property_{n}" for n in m.requires_properties]
34+
for i, m in enumerate(self.metrics)
35+
}
36+
37+
unresovled_dependencies = {
38+
k: v for k, vv in self._dependencies.items() for v in vv if v not in self._dependencies
39+
}
40+
if len(unresovled_dependencies) > 0:
41+
error_dict = {f"self.metrics[{k}]": v for k, v in unresovled_dependencies.items()}
42+
raise RuntimeError(
43+
f"There are columns and properties required by metrics, that are never computed: {error_dict}"
44+
)
45+
46+
ts = graphlib.TopologicalSorter(self._dependencies)
47+
self._ordered_metrics = [self.metrics[o] for o in ts.static_order() if isinstance(o, int)]
48+
49+
def __repr__(self):
50+
return f"computes columns: {[c for m in self._ordered_metrics for c in m.computes_columns]} - computes properties {[p for m in self._ordered_metrics for p in m.computes_properties]}"
51+
52+
def compute(self, r: Recording, *args, **kwargs) -> tuple[pl.DataFrame, dict[str, pl.DataFrame]]:
53+
if "polygon" not in r._df.columns:
54+
r._df = r._add_polygons(r._df)
55+
if "geometry" not in r._df.columns:
56+
r._df = r._df.with_columns(geometry=st.from_shapely("polygon"))
57+
58+
df = pl.LazyFrame(r._df)
59+
properties = {}
60+
for m in self._ordered_metrics:
61+
df, new_p = m.compute_lazy(df, *args, **{k: properties[k] for k in m.requires_properties}, **kwargs)
62+
properties |= new_p
63+
res = pl.collect_all([df] + list(properties.values()))
64+
df, computed_props = res[0], res[1:]
65+
return df, {k: v for k, v in zip(properties.keys(), computed_props)}
66+
67+
68+
def add_driven_distance_and_vel(df, *args, **kwargs) -> tuple[pl.DataFrame, dict[str, pl.DataFrame]]:
69+
return df.with_columns(
70+
(pl.col("x").diff() ** 2 + pl.col("y").diff() ** 2)
71+
.sqrt()
72+
.over("idx")
73+
.fill_null(0.0)
74+
.cum_sum()
75+
.alias("distance_traveled"),
76+
(pl.col("vel_x") ** 2 + pl.col("vel_y") ** 2).sqrt().alias("vel"),
77+
), {}
78+
79+
80+
drivenDistancenAndVel = Metric(computes_columns=["distance_traveled", "vel"], compute_func=add_driven_distance_and_vel)
81+
82+
83+
def get_timegaps(df, ego_id, *args, time_buffer=2e9, **kwargs):
84+
ego_df = df.filter(idx=ego_id)
85+
86+
crossed = df.join(ego_df, how="cross", suffix="_ego")
87+
88+
crossed = crossed.filter(
89+
(pl.col("total_nanos_ego") - time_buffer) <= pl.col("total_nanos"),
90+
(pl.col("total_nanos_ego") + time_buffer) >= pl.col("total_nanos"),
91+
pl.col("idx_ego") != pl.col("idx"),
92+
)
93+
94+
all_timegaps = (
95+
crossed.filter(pl.col("geometry").st.intersects(pl.col("geometry_ego")))
96+
.with_columns(timegap=(pl.col("total_nanos") - pl.col("total_nanos_ego")) / 1e9)
97+
.select(
98+
"idx_ego", "idx", "total_nanos_ego", "total_nanos", "timegap", "distance_traveled", "distance_traveled_ego"
99+
)
100+
)
101+
102+
timegaps = (
103+
all_timegaps.group_by("idx", "idx_ego", "total_nanos_ego")
104+
.agg(
105+
pl.col("timegap", "total_nanos", "distance_traveled", "distance_traveled_ego").get(
106+
pl.col("timegap").abs().arg_min()
107+
),
108+
)
109+
.sort("idx_ego", "idx", "total_nanos_ego")
110+
.select(
111+
"idx_ego", "idx", "total_nanos_ego", "timegap", "total_nanos", "distance_traveled", "distance_traveled_ego"
112+
)
113+
)
114+
min_timegaps = timegaps.group_by("idx_ego", "idx").agg(
115+
pl.col("timegap").get(pl.col("timegap").abs().arg_min()).alias("min_timegap")
116+
)
117+
118+
p_timegaps = (
119+
crossed.join(timegaps, how="right", suffix="_overlap", on=["idx", "idx_ego"])
120+
.with_columns(
121+
pl.when(pl.col("total_nanos") >= pl.col("total_nanos_overlap"))
122+
.then((pl.col("total_nanos_overlap") - pl.col("total_nanos")) / 1e9)
123+
.otherwise((pl.col("distance_traveled_overlap") - pl.col("distance_traveled")) / pl.col("vel"))
124+
.alias("time_to_overlap"),
125+
pl.when(pl.col("total_nanos_ego") >= pl.col("total_nanos_ego_overlap"))
126+
.then((pl.col("total_nanos_ego_overlap") - pl.col("total_nanos_ego")) / 1e9)
127+
.otherwise((pl.col("distance_traveled_ego_overlap") - pl.col("distance_traveled_ego")) / pl.col("vel_ego"))
128+
.alias("time_to_overlap_ego"),
129+
)
130+
.with_columns(
131+
-(
132+
pl.col("time_to_overlap_ego")
133+
- pl.col("time_to_overlap")
134+
+ (pl.col("total_nanos_ego") - pl.col("total_nanos")) / 1e9
135+
).alias("p_timegap")
136+
)
137+
.group_by("idx_ego", "idx", "total_nanos_ego")
138+
.agg(
139+
pl.col("p_timegap", "total_nanos")
140+
.sort_by(pl.col("p_timegap").abs(), descending=False, nulls_last=True)
141+
.first()
142+
)
143+
.sort("idx_ego", "idx", "total_nanos_ego")
144+
)
145+
146+
min_p_timegaps = p_timegaps.group_by("idx_ego", "idx").agg(
147+
pl.col("p_timegap").sort_by(pl.col("p_timegap").abs(), descending=False).first()
148+
)
149+
150+
return df, {
151+
"timegaps": timegaps,
152+
"min_timegaps": min_timegaps,
153+
"p_timegaps": p_timegaps,
154+
"min_p_timegaps": min_p_timegaps,
155+
}
156+
157+
158+
timegaps_and_p_timegaps = Metric(
159+
requires_columns=["distance_traveled", "vel"],
160+
compute_func=get_timegaps,
161+
computes_columns=[],
162+
computes_properties=["timegaps", "min_timegaps", "p_timegaps", "min_p_timegaps"],
163+
)

omega_prime/recording.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -604,6 +604,7 @@ def plot_altair(self, start_frame=0, end_frame=-1, plot_map=True, metric_column=
604604
]
605605
)
606606
self._map_df = self._map_df.with_columns(geometry=st.from_shapely("polygon"))
607+
self._map_df.with_columns(pl.col("geometry").st.simplify(tolerance=1))
607608

608609
if end_frame != -1:
609610
df = self._df.filter(pl.col("frame") < end_frame, pl.col("frame") >= start_frame)
@@ -631,11 +632,12 @@ def plot_altair(self, start_frame=0, end_frame=-1, plot_map=True, metric_column=
631632
},
632633
"properties": {},
633634
}
634-
map = (
635-
self._map_df["geometry", "idx", "type"]
636-
.st.plot(color="green", fillOpacity=0.4)
637-
.encode(tooltip=["properties.idx:N", "properties.type:O"])
638-
)
635+
if plot_map:
636+
map = (
637+
self._map_df["geometry", "idx", "type"]
638+
.st.plot(color="green", fillOpacity=0.4)
639+
.encode(tooltip=["properties.idx:N", "properties.type:O"])
640+
)
639641
mvs = (
640642
df["geometry", "idx", "frame", "type"]
641643
.st.plot()
@@ -651,7 +653,7 @@ def plot_altair(self, start_frame=0, end_frame=-1, plot_map=True, metric_column=
651653
)
652654

653655
map_view = (
654-
(map + mvs)
656+
((map + mvs) if plot_map else mvs)
655657
.project("identity", reflectY=True, fit=pov)
656658
.properties(height=int(ymax - ymin) * 3, width=int(xmax - xmin) * 3, title="Map")
657659
)

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ testpaths = [
8989
"tests",
9090
"tutorial.ipynb",
9191
"tutorial_locator.ipynb",
92+
# "tutorial_metrics.ipynb",
9293
"README.md"
9394
]
9495
log_cli = true

requirements.txt

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ betterproto2==0.3.1
1414
# via betterosi
1515
betterproto2-rust-codec==0.1.3
1616
# via betterproto2
17-
certifi==2025.1.31
17+
certifi==2025.4.26
1818
# via
1919
# pyogrio
2020
# pyproj
@@ -81,7 +81,7 @@ multidict==6.4.3
8181
# via grpclib
8282
mypy-extensions==1.1.0
8383
# via typing-inspect
84-
narwhals==1.36.0
84+
narwhals==1.37.0
8585
# via altair
8686
networkx==3.4.2
8787
# via omega-prime (./pyproject.toml)
@@ -115,7 +115,7 @@ pandera==0.23.1
115115
# via omega-prime (./pyproject.toml)
116116
pillow==11.2.1
117117
# via matplotlib
118-
polars==1.27.1
118+
polars==1.28.1
119119
# via
120120
# omega-prime (./pyproject.toml)
121121
# lxd-io
@@ -127,7 +127,7 @@ protobuf==6.30.2
127127
# via
128128
# betterosi
129129
# mcap-protobuf-support
130-
pyarrow==19.0.1
130+
pyarrow==20.0.0
131131
# via
132132
# pandas
133133
# polars-st
@@ -186,7 +186,7 @@ tqdm-joblib==0.0.4
186186
# via omega-prime (./pyproject.toml)
187187
typeguard==4.4.2
188188
# via pandera
189-
typer==0.15.2
189+
typer==0.15.3
190190
# via
191191
# omega-prime (./pyproject.toml)
192192
# betterosi

tutorial.ipynb

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,8 @@
243243
"execution_count": null,
244244
"metadata": {
245245
"tags": [
246-
"nbval-ignore-output"
246+
"nbval-ignore-output",
247+
"nbval-skip"
247248
]
248249
},
249250
"outputs": [],

0 commit comments

Comments
 (0)