|
9 | 9 | import numpy as np |
10 | 10 | import cv2 |
11 | 11 | import time |
12 | | -from iq_helper import find_roi_location, get_roi_from_frame |
| 12 | +from iq_helper import find_roi_location, get_roi_from_frame, WIDTH, HEIGHT |
13 | 13 |
|
14 | 14 | NUM_FRAMES = 100 # Number of frames to check |
15 | 15 | DEPTH_TOLERANCE = 0.05 # Acceptable deviation from expected depth in meters |
16 | 16 | FRAMES_PASS_THRESHOLD =0.8 # Percentage of frames that needs to pass |
17 | 17 | DEBUG_MODE = False |
18 | 18 |
|
19 | | -test.start("Basic Depth Image Quality Test") |
| 19 | +DISTANCE_FROM_CUBE = 0.53 |
| 20 | +DISTANCE_FROM_BACKGROUND = 0.67 |
| 21 | +dev, ctx = test.find_first_device_or_exit() |
20 | 22 |
|
21 | | -try: |
22 | | - dev, ctx = test.find_first_device_or_exit() |
| 23 | +def run_test(resolution, fps): |
| 24 | + test.start("Basic Depth Image Quality Test", f"{resolution[0]}x{resolution[1]} @ {fps}fps") |
23 | 25 |
|
24 | | - pipeline = rs.pipeline(ctx) |
25 | | - cfg = rs.config() |
26 | | - cfg.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) |
27 | | - cfg.enable_stream(rs.stream.infrared, 1, 640, 480, rs.format.y8, 30) # needed for finding the ArUco markers |
28 | | - profile = pipeline.start(cfg) |
29 | | - time.sleep(2) |
| 26 | + try: |
| 27 | + pipeline = rs.pipeline(ctx) |
| 28 | + cfg = rs.config() |
| 29 | + cfg.enable_stream(rs.stream.depth, resolution[0], resolution[1], rs.format.z16, fps) |
| 30 | + cfg.enable_stream(rs.stream.infrared, 1, 640, 480, rs.format.y8, 30) # needed for finding the ArUco markers |
| 31 | + if not cfg.can_resolve(pipeline): |
| 32 | + log.i(f"Configuration {resolution[0]}x{resolution[1]} @ {fps}fps is not supported by the device") |
| 33 | + return |
| 34 | + profile = pipeline.start(cfg) |
| 35 | + time.sleep(2) |
30 | 36 |
|
31 | | - depth_sensor = profile.get_device().first_depth_sensor() |
32 | | - depth_scale = depth_sensor.get_depth_scale() |
| 37 | + depth_sensor = profile.get_device().first_depth_sensor() |
| 38 | + depth_scale = depth_sensor.get_depth_scale() |
33 | 39 |
|
34 | | - # find region of interest (page) and get the transformation matrix |
35 | | - find_roi_location(pipeline, (4,5,6,7), DEBUG_MODE) # markers in the lab are 4,5,6,7 |
36 | | - depth_passes = {} |
37 | | - for i in range(NUM_FRAMES): |
38 | | - frames = pipeline.wait_for_frames() |
39 | | - depth_frame = frames.get_depth_frame() |
40 | | - infrared_frame = frames.get_infrared_frame() |
41 | | - if not depth_frame: |
42 | | - continue |
43 | | - |
44 | | - depth_image = get_roi_from_frame(depth_frame) |
| 40 | + # find region of interest (page) and get the transformation matrix |
| 41 | + find_roi_location(pipeline, (4,5,6,7), DEBUG_MODE) # markers in the lab are 4,5,6,7 |
45 | 42 |
|
46 | 43 | # Known pixel positions and expected depth values (in meters) |
47 | 44 | # Using temporary values until setup in lab is completed |
48 | | - h, w = depth_image.shape |
49 | 45 | depth_points = { |
50 | | - "cube": ((h // 2, w // 2), 0.45), # center of page, cube at 0.45m |
51 | | - "background": ((h // 2, int(w * 0.1)), 0.6), # left edge, background at 0.6m |
| 46 | + "cube": ((HEIGHT // 2, WIDTH // 2), DISTANCE_FROM_CUBE), # cube expected to be at the center of page |
| 47 | + "background": ((HEIGHT // 2, int(WIDTH * 0.1)), DISTANCE_FROM_BACKGROUND) # left edge, background |
52 | 48 | } |
53 | | - if not depth_passes: |
54 | | - depth_passes = {name: 0 for name in depth_points} |
55 | | - for point_name, ((x, y), expected_depth) in depth_points.items(): |
56 | | - raw_depth = depth_image[y, x] |
57 | | - depth_value = raw_depth * depth_scale # Convert to meters |
58 | | - |
59 | | - if abs(depth_value - expected_depth) <= DEPTH_TOLERANCE: |
60 | | - depth_passes[point_name] += 1 |
61 | | - else: |
62 | | - log.d(f"Frame {i} - {point_name} at ({x},{y}): {depth_value:.3f}m ≠ {expected_depth:.3f}m") |
63 | | - |
| 49 | + depth_passes = {name: 0 for name in depth_points} |
| 50 | + for i in range(NUM_FRAMES): |
| 51 | + frames = pipeline.wait_for_frames() |
| 52 | + depth_frame = frames.get_depth_frame() |
| 53 | + infrared_frame = frames.get_infrared_frame() |
| 54 | + if not depth_frame: |
| 55 | + continue |
| 56 | + |
| 57 | + depth_image = get_roi_from_frame(depth_frame) |
| 58 | + |
| 59 | + for point_name, ((x, y), expected_depth) in depth_points.items(): |
| 60 | + raw_depth = depth_image[y, x] |
| 61 | + depth_value = raw_depth * depth_scale # Convert to meters |
| 62 | + |
| 63 | + if abs(depth_value - expected_depth) <= DEPTH_TOLERANCE: |
| 64 | + depth_passes[point_name] += 1 |
| 65 | + else: |
| 66 | + log.d(f"Frame {i} - {point_name} at ({x},{y}): {depth_value:.3f}m ≠ {expected_depth:.3f}m") |
| 67 | + |
| 68 | + if DEBUG_MODE: |
| 69 | + # display IR image along with transformed view of IR, get_roi_from_frame(infrared_frame) |
| 70 | + infrared_np = np.asanyarray(infrared_frame.get_data()) |
| 71 | + w, h = infrared_np.shape |
| 72 | + dbg_resized = cv2.resize(get_roi_from_frame(infrared_frame), (h, w)) |
| 73 | + |
| 74 | + dbg = np.hstack([infrared_np, dbg_resized]) |
| 75 | + cv2.imshow("Depth IQ - IR | Depth", dbg) |
| 76 | + cv2.waitKey(1) |
| 77 | + |
| 78 | + # wait for close |
64 | 79 | if DEBUG_MODE: |
65 | | - # display IR image along with transformed view of IR, get_roi_from_frame(infrared_frame) |
66 | | - infrared_np = np.asanyarray(infrared_frame.get_data()) |
67 | | - w, h = infrared_np.shape |
68 | | - dbg_resized = cv2.resize(get_roi_from_frame(infrared_frame), (h, w)) |
69 | | - |
70 | | - dbg = np.hstack([infrared_np, dbg_resized]) |
71 | | - cv2.imshow("Depth IQ - IR | Depth", dbg) |
72 | | - cv2.waitKey(1) |
73 | | - |
74 | | - # wait for close |
75 | | - if DEBUG_MODE: |
76 | | - cv2.waitKey(0) |
77 | | - |
78 | | - # Check that each point passed the threshold |
79 | | - min_passes = int(NUM_FRAMES * FRAMES_PASS_THRESHOLD) |
80 | | - for point_name, count in depth_passes.items(): |
81 | | - log.i(f"{point_name.title()} passed in {count}/{NUM_FRAMES} frames") |
82 | | - test.check(count >= min_passes) |
83 | | - |
84 | | -except Exception as e: |
85 | | - test.fail() |
86 | | - raise e |
87 | | -finally: |
88 | | - cv2.destroyAllWindows() |
89 | | - |
90 | | -pipeline.stop() |
91 | | -test.finish() |
| 80 | + cv2.waitKey(0) |
| 81 | + |
| 82 | + # Check that each point passed the threshold |
| 83 | + min_passes = int(NUM_FRAMES * FRAMES_PASS_THRESHOLD) |
| 84 | + for point_name, count in depth_passes.items(): |
| 85 | + log.i(f"{point_name.title()} passed in {count}/{NUM_FRAMES} frames") |
| 86 | + test.check(count >= min_passes) |
| 87 | + |
| 88 | + except Exception as e: |
| 89 | + test.fail() |
| 90 | + raise e |
| 91 | + finally: |
| 92 | + cv2.destroyAllWindows() |
| 93 | + pipeline.stop() |
| 94 | + test.finish() |
| 95 | + |
| 96 | + |
| 97 | +log.d("context:", test.context) |
| 98 | + |
| 99 | +configurations = [((1280, 720), 30)] |
| 100 | +# on nightly we check additional arbitrary configurations |
| 101 | +if "nightly" in test.context: |
| 102 | + configurations += [ |
| 103 | + ((640,480), 15), |
| 104 | + ((640,480), 30), |
| 105 | + ((640,480), 60), |
| 106 | + ((848,480), 15), |
| 107 | + ((848,480), 30), |
| 108 | + ((848,480), 60), |
| 109 | + ((1280,720), 5), |
| 110 | + ((1280,720), 10), |
| 111 | + ((1280,720), 15), |
| 112 | + ] |
| 113 | + |
| 114 | +for resolution, fps in configurations: |
| 115 | + run_test(resolution, fps) |
| 116 | + |
92 | 117 | test.print_results_and_exit() |
0 commit comments