-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathvisualize.py
More file actions
217 lines (185 loc) · 8.2 KB
/
visualize.py
File metadata and controls
217 lines (185 loc) · 8.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
import cv2
import numpy as np
from camera import view_frame_from_device_frame
def fisheye_project_points(points, distortion_coeffs, fx, fy, cx, cy, image_width, image_height):
"""
Projects an array of 3D points onto the image plane with fisheye distortion.
Points with non-positive depth (Z <= 0) are filtered out.
Also filters out points whose projected pixel coordinates fall outside the image boundaries.
Parameters:
points : ndarray of shape (N, 3)
Array of 3D points, where each row is (X, Y, Z).
distortion_coeffs : list or array of floats
Fisheye distortion coefficients [k1, k2, k3, k4].
fx, fy : floats
Focal lengths in pixel units.
cx, cy : floats
Principal point coordinates.
image_width : int
Width of the image in pixels.
image_height : int
Height of the image in pixels.
Returns:
projected_points : ndarray of shape (M, 2)
Array of pixel coordinates after fisheye distortion for valid points.
valid_indices : ndarray of shape (M,)
Indices of the original array corresponding to valid points.
"""
# Filter out points with non-positive depth.
valid_mask = points[:, 2] > 0
valid_points = points[valid_mask]
valid_indices = np.nonzero(valid_mask)[0]
# If no valid points, return empty arrays.
if valid_points.shape[0] == 0:
return np.empty((0, 2)), np.empty((0,), dtype=int)
# Extract X, Y, Z from valid points.
X = valid_points[:, 0]
Y = valid_points[:, 1]
Z = valid_points[:, 2]
# Step 1: Project using the pinhole camera model to get normalized coordinates.
x_norm = X / Z
y_norm = Y / Z
# Step 2: Convert to polar coordinates.
r = np.sqrt(x_norm**2 + y_norm**2)
theta = np.arctan(r)
# Unpack distortion coefficients.
k1, k2, k3, k4 = distortion_coeffs
# Step 3: Apply the fisheye distortion model.
theta_d = theta * (1 + k1 * theta**2 + k2 * theta**4 + k3 * theta**6 + k4 * theta**8)
# Step 4: Map the distorted angle back to a distorted radius.
# Avoid division by zero.
r_d = np.where(r > 0, (theta_d / theta) * r, r)
# Step 5: Convert back to Cartesian coordinates.
x_d = np.where(r > 0, (x_norm / r) * r_d, x_norm)
y_d = np.where(r > 0, (y_norm / r) * r_d, y_norm)
# Step 6: Convert normalized distorted coordinates to pixel/image coordinates.
u = fx * x_d + cx
v = fy * y_d + cy
# Combine u and v into a 2D array.
projected_points_all = np.column_stack((u, v))
# Step 7: Filter out points that are outside the image boundaries.
image_valid_mask = (u >= 0) & (u < image_width) & (v >= 0) & (v < image_height)
projected_points = projected_points_all[image_valid_mask]
final_valid_indices = valid_indices[image_valid_mask]
return projected_points, final_valid_indices
def draw_path(
device_path,
img,
K,
R=None,
D=None,
width=0.25,
height=0.41,
fill_color=None, # (128, 0, 255),
line_color=None, # (0, 255, 0),
line_width=None, # 5,
pt_color=(255, 0, 255),
pt_size=0,
):
device_path_l = device_path + np.array([0, 0, height])
device_path_c = device_path + np.array([0, 0, height])
device_path_r = device_path + np.array([0, 0, height])
device_path_l[:, 1] -= width
device_path_r[:, 1] += width
# new projection
# TODO(Brad): add mounting angles
path_l_view_frame = (view_frame_from_device_frame @ R @ device_path_l.T).T
path_r_view_frame = (view_frame_from_device_frame @ R @ device_path_r.T).T
path_c_view_frame = (view_frame_from_device_frame @ R @ device_path_c.T).T
H, W = img.shape[0:2]
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]
img_pts_l, _ = fisheye_project_points(path_l_view_frame, D, fx, fy, cx, cy, image_height=H, image_width=W)
img_pts_r, _ = fisheye_project_points(path_r_view_frame, D, fx, fy, cx, cy, image_height=H, image_width=W)
img_pts_c, _ = fisheye_project_points(path_c_view_frame, D, fx, fy, cx, cy, image_height=H, image_width=W)
# # Precompute the polygon points for each segment
# pts_list = []
# n_pts = min(len(img_pts_l), len(img_pts_r), len(img_pts_c))
# for i in range(1, n_pts):
# u1, v1 = img_pts_l[i - 1]
# u2, v2 = img_pts_r[i - 1]
# u3, v3 = img_pts_l[i]
# u4, v4 = img_pts_r[i]
# pts = np.array([[u1, v1], [u2, v2], [u4, v4], [u3, v3]], np.int32).reshape((-1, 1, 2))
# pts_list.append(pts)
def u_at_bottom(poly, H):
"""Return u where the polyline intersects v = H-1. If no segment crosses,
linearly extrapolate from the first segment. Returns None if impossible."""
if len(poly) < 2:
return None
v_bot = H - 1
# 1) Try to find a *segment* that crosses v=H-1
for (u0, v0), (u1, v1) in zip(poly[:-1], poly[1:]):
# If either endpoint exactly on bottom, that's our intersection
if np.isfinite(v0) and abs(v0 - v_bot) < 1e-6:
return float(u0)
if np.isfinite(v1) and abs(v1 - v_bot) < 1e-6:
return float(u1)
# Check for a sign change around v_bot (crossing)
if np.isfinite(v0) and np.isfinite(v1):
d0 = v0 - v_bot
d1 = v1 - v_bot
if d0 == 0 or d1 == 0 or (d0 < 0) != (d1 < 0):
denom = v1 - v0
if abs(denom) < 1e-9:
continue
t = (v_bot - v0) / denom # between 0..1 for intersection on the segment
u = u0 + t * (u1 - u0)
return float(u)
# 2) If no crossing, extrapolate from the first segment
(u0, v0), (u1, v1) = poly[0], poly[1]
if not (np.isfinite(v0) and np.isfinite(v1)) or abs(v1 - v0) < 1e-9:
return None
t = (v_bot - v0) / (v1 - v0)
u = u0 + t * (u1 - u0)
return float(u)
# (Optional but recommended) clean + clamp projected points first
def valid_and_clamped(pts, W, H):
pts = np.asarray(pts, dtype=np.float32)
mask = np.isfinite(pts).all(axis=1)
pts = pts[mask]
if pts.size == 0:
return pts
pts[:, 0] = np.clip(pts[:, 0], 0, W - 1)
pts[:, 1] = np.clip(pts[:, 1], 0, H - 1)
return pts
img_pts_l = valid_and_clamped(img_pts_l, W, H)
img_pts_r = valid_and_clamped(img_pts_r, W, H)
img_pts_c = valid_and_clamped(img_pts_c, W, H)
n_pts = min(len(img_pts_l), len(img_pts_r), len(img_pts_c))
if n_pts < 2:
return img
# Build the usual strip quads
pts_list = []
for i in range(1, n_pts):
u1, v1 = img_pts_l[i - 1]
u2, v2 = img_pts_r[i - 1]
u3, v3 = img_pts_l[i]
u4, v4 = img_pts_r[i]
quad = np.array([[u1, v1], [u2, v2], [u4, v4], [u3, v3]], np.int32).reshape((-1, 1, 2))
pts_list.append(quad)
# NEW: compute true intersections with the bottom scanline
uL_bot = u_at_bottom(img_pts_l, H)
uR_bot = u_at_bottom(img_pts_r, H)
if uL_bot is not None and uR_bot is not None:
uL_bot = int(np.clip(uL_bot, 0, W - 1))
uR_bot = int(np.clip(uR_bot, 0, W - 1))
# Anchor the base polygon exactly where edges hit v=H-1
base_poly = np.array(
[[uL_bot, H - 1], [uR_bot, H - 1], [img_pts_r[0, 0], img_pts_r[0, 1]], [img_pts_l[0, 0], img_pts_l[0, 1]]],
np.int32,
).reshape((-1, 1, 2))
# Insert before the rest so blending order looks natural
pts_list.insert(0, base_poly)
# If fill_color is specified, fill all polygons at once
if fill_color is not None:
overlay = img.copy()
overlay = cv2.fillPoly(overlay, pts_list, color=fill_color)
alpha = 0.5 # Adjust transparency factor as needed
img = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
# If line_color is specified, draw all polylines at once
if line_color is not None:
img = cv2.polylines(img, pts_list, isClosed=True, color=line_color, thickness=line_width)
if pt_color is not None:
for img_pt in img_pts_c:
cv2.circle(img, [int(x) for x in img_pt], pt_size, (255, 0, 255), thickness=-1) # filled circle
return img