Skip to content

Commit 3aea75a

Browse files
committed
Merge branch 'release_v2.18.0.0' into main
2 parents 07e7d03 + 8d9a1c6 commit 3aea75a

40 files changed

+1079
-110
lines changed

CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,7 @@ pybind11_add_module(${TARGET_NAME}
100100
src/pipeline/AssetManagerBindings.cpp
101101
src/openvino/OpenVINOBindings.cpp
102102
src/log/LogBindings.cpp
103+
src/VersionBindings.cpp
103104

104105
src/pipeline/node/NodeBindings.cpp
105106

@@ -124,6 +125,7 @@ pybind11_add_module(${TARGET_NAME}
124125
src/pipeline/node/FeatureTrackerBindings.cpp
125126
src/pipeline/node/AprilTagBindings.cpp
126127
src/pipeline/node/DetectionParserBindings.cpp
128+
src/pipeline/node/WarpBindings.cpp
127129

128130
src/pipeline/datatype/ADatatypeBindings.cpp
129131
src/pipeline/datatype/AprilTagConfigBindings.cpp

ci/Dockerfile

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,18 @@
11
FROM python:3.9-bullseye
22

3-
RUN apt-get update && apt-get install -y wget build-essential cmake pkg-config libjpeg-dev libtiff5-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libgtk2.0-dev libgtk-3-dev libatlas-base-dev gfortran git
3+
RUN apt-get update && apt-get install -y wget build-essential cmake pkg-config libjpeg-dev libtiff5-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libgtk2.0-dev libgtk-3-dev libatlas-base-dev gfortran git libopencv-dev
44

55
ADD ci/docker_dependencies.sh .
66
RUN ./docker_dependencies.sh
77

88
RUN pip install -U pip && pip install --extra-index-url https://www.piwheels.org/simple/ --prefer-binary opencv-python
99

10+
# Copy over the files
1011
COPY . /depthai-python
12+
13+
# Install C++ library
14+
RUN cmake -S /depthai-python/depthai-core -B /build -D CMAKE_BUILD_TYPE=Release -D BUILD_SHARED_LIBS=ON -D CMAKE_INSTALL_PREFIX=/usr/local
15+
RUN cmake --build /build --parallel 4 --config Relase --target install
16+
17+
# Install Python library
1118
RUN cd /depthai-python && python3 -m pip install .

depthai-core

Submodule depthai-core updated 48 files

examples/ColorCamera/rgb_camera_control.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@
3232
For the 'Select control: ...' options, use these keys to modify the value:
3333
'-' or '_' to decrease
3434
'+' or '=' to increase
35+
36+
'/' to toggle showing camera settings: exposure, ISO, lens position, color temperature
3537
"""
3638

3739
import depthai as dai
@@ -117,6 +119,7 @@ def clamp(num, v0, v1):
117119
luma_denoise = 0
118120
chroma_denoise = 0
119121
control = 'none'
122+
show = False
120123

121124
awb_mode = cycle([item for name, item in vars(dai.CameraControl.AutoWhiteBalanceMode).items() if name.isupper()])
122125
anti_banding_mode = cycle([item for name, item in vars(dai.CameraControl.AntiBandingMode).items() if name.isupper()])
@@ -129,6 +132,13 @@ def clamp(num, v0, v1):
129132

130133
ispFrames = ispQueue.tryGetAll()
131134
for ispFrame in ispFrames:
135+
if show:
136+
txt = f"[{ispFrame.getSequenceNum()}] "
137+
txt += f"Exposure: {ispFrame.getExposureTime().total_seconds()*1000:.3f} ms, "
138+
txt += f"ISO: {ispFrame.getSensitivity()}, "
139+
txt += f"Lens position: {ispFrame.getLensPosition()}, "
140+
txt += f"Color temp: {ispFrame.getColorTemperature()} K"
141+
print(txt)
132142
cv2.imshow('isp', ispFrame.getCvFrame())
133143

134144
# Send new cfg to camera
@@ -150,6 +160,9 @@ def clamp(num, v0, v1):
150160
key = cv2.waitKey(1)
151161
if key == ord('q'):
152162
break
163+
elif key == ord('/'):
164+
show = not show
165+
if not show: print("Printing camera settings: OFF")
153166
elif key == ord('c'):
154167
ctrl = dai.CameraControl()
155168
ctrl.setCaptureStill(True)

examples/ColorCamera/rgb_preview.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,9 @@
2626
print('Connected cameras: ', device.getConnectedCameras())
2727
# Print out usb speed
2828
print('Usb speed: ', device.getUsbSpeed().name)
29+
# Bootloader version
30+
if device.getBootloaderVersion() is not None:
31+
print('Bootloader version: ', device.getBootloaderVersion())
2932

3033
# Output queue will be used to get the rgb frames from the output defined above
3134
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)

examples/ColorCamera/rgb_undistort.py

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
import cv2
2+
import depthai as dai
3+
import numpy as np
4+
5+
camRes = dai.ColorCameraProperties.SensorResolution.THE_1080_P
6+
camSocket = dai.CameraBoardSocket.RGB
7+
ispScale = (1,2)
8+
9+
def getMesh(calibData, ispSize):
10+
M1 = np.array(calibData.getCameraIntrinsics(camSocket, ispSize[0], ispSize[1]))
11+
d1 = np.array(calibData.getDistortionCoefficients(camSocket))
12+
R1 = np.identity(3)
13+
mapX, mapY = cv2.initUndistortRectifyMap(M1, d1, R1, M1, ispSize, cv2.CV_32FC1)
14+
15+
meshCellSize = 16
16+
mesh0 = []
17+
# Creates subsampled mesh which will be loaded on to device to undistort the image
18+
for y in range(mapX.shape[0] + 1): # iterating over height of the image
19+
if y % meshCellSize == 0:
20+
rowLeft = []
21+
for x in range(mapX.shape[1]): # iterating over width of the image
22+
if x % meshCellSize == 0:
23+
if y == mapX.shape[0] and x == mapX.shape[1]:
24+
rowLeft.append(mapX[y - 1, x - 1])
25+
rowLeft.append(mapY[y - 1, x - 1])
26+
elif y == mapX.shape[0]:
27+
rowLeft.append(mapX[y - 1, x])
28+
rowLeft.append(mapY[y - 1, x])
29+
elif x == mapX.shape[1]:
30+
rowLeft.append(mapX[y, x - 1])
31+
rowLeft.append(mapY[y, x - 1])
32+
else:
33+
rowLeft.append(mapX[y, x])
34+
rowLeft.append(mapY[y, x])
35+
if (mapX.shape[1] % meshCellSize) % 2 != 0:
36+
rowLeft.append(0)
37+
rowLeft.append(0)
38+
39+
mesh0.append(rowLeft)
40+
41+
mesh0 = np.array(mesh0)
42+
meshWidth = mesh0.shape[1] // 2
43+
meshHeight = mesh0.shape[0]
44+
mesh0.resize(meshWidth * meshHeight, 2)
45+
46+
mesh = list(map(tuple, mesh0))
47+
48+
return mesh, meshWidth, meshHeight
49+
50+
def create_pipeline(calibData):
51+
pipeline = dai.Pipeline()
52+
53+
cam = pipeline.create(dai.node.ColorCamera)
54+
cam.setIspScale(ispScale)
55+
cam.setBoardSocket(camSocket)
56+
cam.setResolution(camRes)
57+
58+
manip = pipeline.create(dai.node.ImageManip)
59+
mesh, meshWidth, meshHeight = getMesh(calibData, cam.getIspSize())
60+
manip.setWarpMesh(mesh, meshWidth, meshHeight)
61+
manip.setMaxOutputFrameSize(cam.getIspWidth() * cam.getIspHeight() * 3 // 2)
62+
cam.isp.link(manip.inputImage)
63+
64+
cam_xout = pipeline.create(dai.node.XLinkOut)
65+
cam_xout.setStreamName("Undistorted")
66+
manip.out.link(cam_xout.input)
67+
68+
dist_xout = pipeline.create(dai.node.XLinkOut)
69+
dist_xout.setStreamName("Distorted")
70+
cam.isp.link(dist_xout.input)
71+
72+
return pipeline
73+
74+
with dai.Device() as device:
75+
76+
calibData = device.readCalibration()
77+
pipeline = create_pipeline(calibData)
78+
device.startPipeline(pipeline)
79+
80+
queues = [device.getOutputQueue(name, 4, False) for name in ['Undistorted', 'Distorted']]
81+
82+
while True:
83+
for q in queues:
84+
frame = q.get().getCvFrame()
85+
cv2.imshow(q.getName(), frame)
86+
87+
if cv2.waitKey(1) == ord('q'):
88+
break

examples/IMU/imu_firmware_update.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,8 @@ def timeDeltaToMilliS(delta) -> float:
5555
acceleroValues = imuPacket.acceleroMeter
5656
gyroValues = imuPacket.gyroscope
5757

58-
acceleroTs = acceleroValues.timestamp.get()
59-
gyroTs = gyroValues.timestamp.get()
58+
acceleroTs = acceleroValues.getTimestampDevice()
59+
gyroTs = gyroValues.getTimestampDevice()
6060
if baseTs is None:
6161
baseTs = acceleroTs if acceleroTs < gyroTs else gyroTs
6262
acceleroTs = timeDeltaToMilliS(acceleroTs - baseTs)

examples/IMU/imu_gyroscope_accelerometer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ def timeDeltaToMilliS(delta) -> float:
4646
acceleroValues = imuPacket.acceleroMeter
4747
gyroValues = imuPacket.gyroscope
4848

49-
acceleroTs = acceleroValues.timestamp.get()
50-
gyroTs = gyroValues.timestamp.get()
49+
acceleroTs = acceleroValues.getTimestampDevice()
50+
gyroTs = gyroValues.getTimestampDevice()
5151
if baseTs is None:
5252
baseTs = acceleroTs if acceleroTs < gyroTs else gyroTs
5353
acceleroTs = timeDeltaToMilliS(acceleroTs - baseTs)

examples/IMU/imu_rotation_vector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def timeDeltaToMilliS(delta) -> float:
4343
for imuPacket in imuPackets:
4444
rVvalues = imuPacket.rotationVector
4545

46-
rvTs = rVvalues.timestamp.get()
46+
rvTs = rVvalues.getTimestampDevice()
4747
if baseTs is None:
4848
baseTs = rvTs
4949
rvTs = rvTs - baseTs

examples/ImageManip/image_manip_warp_mesh.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
maxFrameSize = camRgb.getPreviewWidth() * camRgb.getPreviewHeight() * 3
1313

1414
# Warp preview frame 1
15-
manip1 = pipeline.create(dai.node.ImageManip)
15+
manip1 = pipeline.create(dai.node.Warp)
1616
# Create a custom warp mesh
1717
tl = dai.Point2f(20, 20)
1818
tr = dai.Point2f(460, 20)
Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
6+
if 1: # PoE config
7+
fps = 30
8+
res = dai.MonoCameraProperties.SensorResolution.THE_400_P
9+
poolSize = 24 # default 3, increased to prevent desync
10+
else: # USB
11+
fps = 30
12+
res = dai.MonoCameraProperties.SensorResolution.THE_720_P
13+
poolSize = 8 # default 3, increased to prevent desync
14+
15+
# Create pipeline
16+
pipeline = dai.Pipeline()
17+
18+
# Define sources and outputs
19+
monoL = pipeline.create(dai.node.MonoCamera)
20+
monoR = pipeline.create(dai.node.MonoCamera)
21+
22+
monoL.setBoardSocket(dai.CameraBoardSocket.LEFT)
23+
monoL.setResolution(res)
24+
monoL.setFps(fps)
25+
monoL.setNumFramesPool(poolSize)
26+
monoR.setBoardSocket(dai.CameraBoardSocket.RIGHT)
27+
monoR.setResolution(res)
28+
monoR.setFps(fps)
29+
monoR.setNumFramesPool(poolSize)
30+
31+
xoutDotL = pipeline.create(dai.node.XLinkOut)
32+
xoutDotR = pipeline.create(dai.node.XLinkOut)
33+
xoutFloodL = pipeline.create(dai.node.XLinkOut)
34+
xoutFloodR = pipeline.create(dai.node.XLinkOut)
35+
36+
xoutDotL.setStreamName('dot-left')
37+
xoutDotR.setStreamName('dot-right')
38+
xoutFloodL.setStreamName('flood-left')
39+
xoutFloodR.setStreamName('flood-right')
40+
streams = ['dot-left', 'dot-right', 'flood-left', 'flood-right']
41+
42+
# Script node for frame routing and IR dot/flood alternate
43+
script = pipeline.create(dai.node.Script)
44+
script.setProcessor(dai.ProcessorType.LEON_CSS)
45+
script.setScript("""
46+
dotBright = 500 # Note: recommended to not exceed 765, for max duty cycle
47+
floodBright = 200
48+
LOGGING = False # Set `True` for latency/timings debugging
49+
50+
node.warn(f'IR drivers detected: {str(Device.getIrDrivers())}')
51+
52+
flagDot = False
53+
while True:
54+
# Wait first for a frame event, received at MIPI start-of-frame
55+
event = node.io['event'].get()
56+
if LOGGING: tEvent = Clock.now()
57+
58+
# Immediately reconfigure the IR driver.
59+
# Note the logic is inverted, as it applies for next frame
60+
Device.setIrLaserDotProjectorBrightness(0 if flagDot else dotBright)
61+
Device.setIrFloodLightBrightness(floodBright if flagDot else 0)
62+
if LOGGING: tIrSet = Clock.now()
63+
64+
# Wait for the actual frames (after MIPI capture and ISP proc is done)
65+
frameL = node.io['frameL'].get()
66+
if LOGGING: tLeft = Clock.now()
67+
frameR = node.io['frameR'].get()
68+
if LOGGING: tRight = Clock.now()
69+
70+
if LOGGING:
71+
latIR = (tIrSet - tEvent ).total_seconds() * 1000
72+
latEv = (tEvent - event.getTimestamp() ).total_seconds() * 1000
73+
latProcL = (tLeft - event.getTimestamp() ).total_seconds() * 1000
74+
diffRecvRL = (tRight - tLeft ).total_seconds() * 1000
75+
node.warn(f'T[ms] latEv:{latEv:5.3f} latIR:{latIR:5.3f} latProcL:{latProcL:6.3f} '
76+
+ f' diffRecvRL:{diffRecvRL:5.3f}')
77+
78+
# Sync checks
79+
diffSeq = frameL.getSequenceNum() - event.getSequenceNum()
80+
diffTsEv = (frameL.getTimestamp() - event.getTimestamp()).total_seconds() * 1000
81+
diffTsRL = (frameR.getTimestamp() - frameL.getTimestamp()).total_seconds() * 1000
82+
if diffSeq or diffTsEv or (abs(diffTsRL) > 0.8):
83+
node.error(f'frame/event desync! Fr-Ev: {diffSeq} frames,'
84+
+ f' {diffTsEv:.3f} ms; R-L: {diffTsRL:.3f} ms')
85+
86+
# Route the frames to their respective outputs
87+
node.io['dotL' if flagDot else 'floodL'].send(frameL)
88+
node.io['dotR' if flagDot else 'floodR'].send(frameR)
89+
90+
flagDot = not flagDot
91+
""")
92+
93+
# Linking
94+
monoL.frameEvent.link(script.inputs['event'])
95+
monoL.out.link(script.inputs['frameL'])
96+
monoR.out.link(script.inputs['frameR'])
97+
98+
script.outputs['dotL'].link(xoutDotL.input)
99+
script.outputs['dotR'].link(xoutDotR.input)
100+
script.outputs['floodL'].link(xoutFloodL.input)
101+
script.outputs['floodR'].link(xoutFloodR.input)
102+
103+
# Connect to device and start pipeline
104+
with dai.Device(pipeline) as device:
105+
queues = [device.getOutputQueue(name=s, maxSize=4, blocking=False) for s in streams]
106+
107+
while True:
108+
for q in queues:
109+
pkt = q.tryGet()
110+
if pkt is not None:
111+
name = q.getName()
112+
frame = pkt.getCvFrame()
113+
cv2.imshow(name, frame)
114+
115+
if cv2.waitKey(5) == ord('q'):
116+
break

examples/SpatialDetection/spatial_calculator_multi_roi.py

100644100755
File mode changed.

0 commit comments

Comments
 (0)