77from rspy import log , test
88import numpy as np
99import cv2
10- import time
10+ from iq_helper import find_roi_location , get_roi_from_frame , WIDTH , HEIGHT
1111
1212NUM_FRAMES = 100 # Number of frames to check
1313COLOR_TOLERANCE = 60 # Acceptable per-channel deviation in RGB values
1414FRAMES_PASS_THRESHOLD = 0.8 # Percentage of frames that needs to pass
1515DEBUG_MODE = False
1616
17- # A4 size in pixels at 96 DPI
18- A4_WIDTH = 794
19- A4_HEIGHT = 1123
20-
2117# expected colors (insertion order -> mapped row-major to 3x3 grid)
2218expected_colors = {
2319 "red" : (132 , 60 , 60 ),
3531
3632# we are given a 3x3 grid, we split it using 2 vertical and 2 horizontal separators
3733# we also calculate the center of each grid cell for sampling from it for the test
38- xs = [1.5 * A4_WIDTH / 6.0 , A4_WIDTH / 2.0 , 4.5 * A4_WIDTH / 6.0 ]
39- ys = [1.5 * A4_HEIGHT / 6.0 , A4_HEIGHT / 2.0 , 4.5 * A4_HEIGHT / 6.0 ]
34+ xs = [1.5 * WIDTH / 6.0 , WIDTH / 2.0 , 4.5 * WIDTH / 6.0 ]
35+ ys = [1.5 * HEIGHT / 6.0 , HEIGHT / 2.0 , 4.5 * HEIGHT / 6.0 ]
4036centers = [(x , y ) for y in ys for x in xs ]
4137
4238dev , ctx = test .find_first_device_or_exit ()
4339
4440def is_color_close (actual , expected , tolerance ):
4541 return all (abs (int (a ) - int (e )) <= tolerance for a , e in zip (actual , expected ))
4642
47- def compute_homography (pts ):
48- """
49- Given 4 points (the detected ArUco marker centers), find the 3×3 matrix that stretches/rotates
50- the four ArUco points so they become the corners of an A4 page (used to "flatten" the page in an image)
51- """
52- pts_sorted = sorted (pts , key = lambda p : (p [1 ], p [0 ]))
53- top_left , top_right = sorted (pts_sorted [:2 ], key = lambda p : p [0 ])
54- bottom_left , bottom_right = sorted (pts_sorted [2 :], key = lambda p : p [0 ])
55-
56- src = np .array ([top_left , top_right , bottom_right , bottom_left ], dtype = np .float32 )
57- dst = np .array ([[0 ,0 ],[A4_WIDTH - 1 ,0 ],[A4_WIDTH - 1 ,A4_HEIGHT - 1 ],[0 ,A4_HEIGHT - 1 ]], dtype = np .float32 )
58- M = cv2 .getPerspectiveTransform (src , dst )
59- return M # we later use M to get our roi
60-
61-
6243def draw_debug (frame_bgr , a4_page_bgr ):
6344 """
6445 Simple debug view:
6546 - left: camera frame
6647 - right: focused view on the A4 page with grid and color names
6748 """
68- vertical_lines = [A4_WIDTH / 3.0 , 2.0 * A4_WIDTH / 3.0 ]
69- horizontal_lines = [A4_HEIGHT / 3.0 , 2.0 * A4_HEIGHT / 3.0 ]
49+ vertical_lines = [WIDTH / 3.0 , 2.0 * WIDTH / 3.0 ]
50+ horizontal_lines = [HEIGHT / 3.0 , 2.0 * HEIGHT / 3.0 ]
7051 H , W = a4_page_bgr .shape [:2 ]
7152
7253 # draw grid on a4 page image
@@ -93,63 +74,6 @@ def draw_debug(frame_bgr, a4_page_bgr):
9374 return np .hstack ([left , right ])
9475
9576
96- def detect_a4_page (img , dict_type = cv2 .aruco .DICT_4X4_1000 , required_ids = (0 ,1 ,2 ,3 )):
97- """
98- Detect ArUco markers and return center of each one
99- Returns None if not all required markers are found
100- """
101- # init aruco detector
102- aruco = cv2 .aruco
103- dictionary = aruco .getPredefinedDictionary (dict_type )
104- try :
105- # new API (OpenCV >= 4.7)
106- parameters = aruco .DetectorParameters ()
107- detector = aruco .ArucoDetector (dictionary , parameters )
108- corners , ids , _ = detector .detectMarkers (img )
109- except AttributeError :
110- # legacy API (OpenCV <= 4.6) - used on some of our machines
111- parameters = aruco .DetectorParameters_create ()
112- corners , ids , _ = aruco .detectMarkers (img , dictionary , parameters = parameters )
113-
114- if ids is None or not all (rid in ids for rid in required_ids ):
115- return None
116-
117- id_to_corner = dict (zip (ids .flatten (), corners )) # map id to corners
118- values = [id_to_corner [rid ][0 ].mean (axis = 0 ) for rid in required_ids ] # for each required id, get center of marker coords
119-
120- return np .array (values , dtype = np .float32 )
121-
122-
123- def find_roi_location (pipeline ):
124- """
125- Returns a matrix that transforms from frame to region of interest
126- This matrix will later be used with cv2.warpPerspective()
127- """
128- # stream until page found
129- page_pts = None
130- start_time = time .time ()
131- while page_pts is None and time .time () - start_time < 5 :
132- frames = pipeline .wait_for_frames ()
133- color_frame = frames .get_color_frame ()
134- img_bgr = np .asanyarray (color_frame .get_data ())
135-
136- if DEBUG_MODE :
137- cv2 .imshow ("PageDetect - waiting for page" , img_bgr )
138- cv2 .waitKey (1 )
139-
140- page_pts = detect_a4_page (img_bgr )
141-
142- if page_pts is None :
143- log .e ("Failed to detect page within timeout" )
144- test .fail ()
145- raise Exception ("Page not found" )
146-
147- # page found - use it to calculate transformation matrix from frame to region of interest
148- M = compute_homography (page_pts )
149- cv2 .destroyAllWindows ()
150- return M , page_pts
151-
152-
15377def is_cfg_supported (resolution , fps ):
15478 color_sensor = dev .first_color_sensor ()
15579 for p in color_sensor .get_stream_profiles ():
@@ -170,35 +94,32 @@ def run_test(resolution, fps):
17094 for i in range (30 ): # skip initial frames
17195 pipeline .wait_for_frames ()
17296 try :
173-
17497 # find region of interest (page) and get the transformation matrix
175- # page_pts is only used for debug display
176- M , page_pts = find_roi_location (pipeline )
98+ find_roi_location (pipeline , (0 , 1 , 2 , 3 ), DEBUG_MODE ) # markers in the lab are 0,1,2,3
17799
178100 # sampling loop
179101 for i in range (NUM_FRAMES ):
180102 frames = pipeline .wait_for_frames ()
181103 color_frame = frames .get_color_frame ()
182104 img_bgr = np .asanyarray (color_frame .get_data ())
183105
184- # use M to get the region of interest - our colored grid printed in the lab
185- a4_bgr = cv2 .warpPerspective (img_bgr , M , (A4_WIDTH , A4_HEIGHT ))
106+ color_frame_roi = get_roi_from_frame (color_frame )
186107
187108 # sample each grid center and compare to expected color by row-major insertion order
188109 for idx , (x , y ) in enumerate (centers ):
189110 color = color_names [idx ] if idx < len (color_names ) else str (idx )
190111 expected_rgb = expected_colors [color ]
191112 x = int (round (x ))
192113 y = int (round (y ))
193- b , g , r = (int (v ) for v in a4_bgr [y , x ]) # stream is BGR, convert to RGB
114+ b , g , r = (int (v ) for v in color_frame_roi [y , x ]) # stream is BGR, convert to RGB
194115 pixel = (r , g , b )
195116 if is_color_close (pixel , expected_rgb , COLOR_TOLERANCE ):
196117 color_match_count [color ] += 1
197118 else :
198119 log .d (f"Frame { i } - { color } at ({ x } ,{ y } ) sampled: { pixel } too far from expected { expected_rgb } " )
199120
200121 if DEBUG_MODE :
201- dbg = draw_debug (img_bgr , a4_bgr )
122+ dbg = draw_debug (img_bgr , color_frame_roi )
202123 cv2 .imshow ("PageDetect - camera | A4" , dbg )
203124 cv2 .waitKey (1 )
204125
0 commit comments