|
36 | 36 | from .metadata import read_metadata, PanoramaMetadata |
37 | 37 |
|
38 | 38 |
|
| 39 | +_MAX_SCALE = np.iinfo(np.int16).max // (2 * np.pi) |
| 40 | + |
| 41 | + |
39 | 42 | def rot_x(angle: float) -> np.typing.NDArray[np.float64]: |
40 | 43 | return np.array( |
41 | 44 | ( |
@@ -224,6 +227,8 @@ def run_annotate( |
224 | 227 | if scale is None: |
225 | 228 | scale = (f_w + f_h) / 2 |
226 | 229 |
|
| 230 | + scale = min(scale, _MAX_SCALE) |
| 231 | + |
227 | 232 | instrinsics: npt.NDArray[np.float32] = np.array( |
228 | 233 | ( |
229 | 234 | (f_w, 0.0, width/2), |
@@ -306,93 +311,95 @@ def run_annotate( |
306 | 311 | ) # type: ignore[call-overload] |
307 | 312 |
|
308 | 313 | console.print("Done") |
309 | | - console.print("Annotating points... ", end="") |
| 314 | + if len(points) > 0: |
| 315 | + console.print("Annotating points... ", end="") |
310 | 316 |
|
311 | | - # Top left image center point for reference |
312 | | - origin_x, origin_y, _, _ = cv.detail.resultRoi( |
313 | | - corners, |
314 | | - [(i.shape[1], i.shape[0]) for i in images_warped] |
315 | | - ) |
316 | | - tl_x, tl_y, tl_hz, tl_v = centers[0] |
317 | | - tl_x -= origin_x |
318 | | - tl_y -= origin_y |
| 317 | + # Top left image center point for reference |
| 318 | + origin_x, origin_y, _, _ = cv.detail.resultRoi( |
| 319 | + corners, |
| 320 | + [(i.shape[1], i.shape[0]) for i in images_warped] |
| 321 | + ) |
| 322 | + tl_x, tl_y, tl_hz, tl_v = centers[0] |
| 323 | + tl_x -= origin_x |
| 324 | + tl_y -= origin_y |
319 | 325 |
|
320 | | - if scale is None: |
321 | | - scale = 1000 |
| 326 | + if scale is None: |
| 327 | + scale = 1000 |
322 | 328 |
|
323 | | - full_360 = round(scale * np.pi * 2) |
| 329 | + full_360 = round(scale * np.pi * 2) |
324 | 330 |
|
325 | | - if camera_offset is None: |
326 | | - camera_offset = mean_coordinate(cam_offsets) |
| 331 | + if camera_offset is None: |
| 332 | + camera_offset = mean_coordinate(cam_offsets) |
327 | 333 |
|
328 | | - for pt, coord, label in points: |
329 | | - # To calculate the approximate "telescope" rotation, a preliminary |
330 | | - # polar position is needed. Then the camera offset is rotated with the |
331 | | - # preliminary angles. |
332 | | - prelim_hz, prelim_v, _ = (coord - center).to_polar() |
333 | | - offset_rot = ( |
334 | | - rot_z(float(prelim_hz)) @ rot_x(np.pi / 2 - float(prelim_v)) |
335 | | - ) |
336 | | - pt_hz, pt_v, _ = ( |
337 | | - coord |
338 | | - - (center + apply_rotation(camera_offset, offset_rot)) |
339 | | - ).to_polar() |
340 | | - |
341 | | - pt_hz_f = float(pt_hz - tl_hz) |
342 | | - pt_v_f = float(pt_v - tl_v) |
343 | | - pt_x = round(tl_x + pt_hz_f * scale) % full_360 |
344 | | - pt_y = round(tl_y + pt_v_f * scale) % full_360 |
345 | | - |
346 | | - cv.drawMarker( |
347 | | - result, |
348 | | - (pt_x, pt_y), |
349 | | - color, |
350 | | - marker, |
351 | | - markersize, |
352 | | - thickness |
353 | | - ) |
| 334 | + for pt, coord, label in points: |
| 335 | + # To calculate the approximate "telescope" rotation, a preliminary |
| 336 | + # polar position is needed. Then the camera offset is rotated with |
| 337 | + # the preliminary angles. |
| 338 | + prelim_hz, prelim_v, _ = (coord - center).to_polar() |
| 339 | + offset_rot = ( |
| 340 | + rot_z(float(prelim_hz)) @ rot_x(np.pi / 2 - float(prelim_v)) |
| 341 | + ) |
| 342 | + pt_hz, pt_v, _ = ( |
| 343 | + coord |
| 344 | + - (center + apply_rotation(camera_offset, offset_rot)) |
| 345 | + ).to_polar() |
| 346 | + |
| 347 | + pt_hz_f = float(pt_hz - tl_hz) |
| 348 | + pt_v_f = float(pt_v - tl_v) |
| 349 | + pt_x = round(tl_x + pt_hz_f * scale) % full_360 |
| 350 | + pt_y = round(tl_y + pt_v_f * scale) % full_360 |
| 351 | + |
| 352 | + cv.drawMarker( |
| 353 | + result, |
| 354 | + (pt_x, pt_y), |
| 355 | + color, |
| 356 | + marker, |
| 357 | + markersize, |
| 358 | + thickness |
| 359 | + ) |
354 | 360 |
|
355 | | - cv.putText( |
356 | | - result, |
357 | | - pt, |
358 | | - text_pos( |
| 361 | + cv.putText( |
| 362 | + result, |
359 | 363 | pt, |
360 | | - (pt_x, pt_y), |
361 | | - offset, |
| 364 | + text_pos( |
| 365 | + pt, |
| 366 | + (pt_x, pt_y), |
| 367 | + offset, |
| 368 | + font, |
| 369 | + fontscale, |
| 370 | + thickness, |
| 371 | + justify |
| 372 | + ), |
362 | 373 | font, |
363 | 374 | fontscale, |
| 375 | + color, |
364 | 376 | thickness, |
365 | | - justify |
366 | | - ), |
367 | | - font, |
368 | | - fontscale, |
369 | | - color, |
370 | | - thickness, |
371 | | - bottomLeftOrigin=False |
372 | | - ) |
373 | | - if label == "": |
374 | | - continue |
| 377 | + bottomLeftOrigin=False |
| 378 | + ) |
| 379 | + if label == "": |
| 380 | + continue |
375 | 381 |
|
376 | | - cv.putText( |
377 | | - result, |
378 | | - label, |
379 | | - text_pos( |
| 382 | + cv.putText( |
| 383 | + result, |
380 | 384 | label, |
381 | | - (pt_x, pt_y), |
382 | | - label_offset, |
| 385 | + text_pos( |
| 386 | + label, |
| 387 | + (pt_x, pt_y), |
| 388 | + label_offset, |
| 389 | + label_font, |
| 390 | + label_fontscale, |
| 391 | + label_thickness, |
| 392 | + label_justify |
| 393 | + ), |
383 | 394 | label_font, |
384 | 395 | label_fontscale, |
| 396 | + label_color, |
385 | 397 | label_thickness, |
386 | | - label_justify |
387 | | - ), |
388 | | - label_font, |
389 | | - label_fontscale, |
390 | | - label_color, |
391 | | - label_thickness, |
392 | | - bottomLeftOrigin=False |
393 | | - ) |
| 398 | + bottomLeftOrigin=False |
| 399 | + ) |
| 400 | + |
| 401 | + console.print("Done") |
394 | 402 |
|
395 | | - console.print("Done") |
396 | 403 | console.print("Saving final image... ", end="") |
397 | 404 | # For some reason the blending function returns the image as int16 instead |
398 | 405 | # uint8, and it might contain negative values. These need to be clipped, |
|
0 commit comments