Skip to content

Commit 7b57b28

Browse files
authored
Merge pull request #937 from luxonis/release_v2.24.0.0
Release v2.24.0.0
2 parents f5c5796 + bbf4048 commit 7b57b28

26 files changed

+814
-40
lines changed

CMakeLists.txt

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,8 @@ pybind11_add_module(${TARGET_NAME}
129129
src/pipeline/node/WarpBindings.cpp
130130
src/pipeline/node/UVCBindings.cpp
131131
src/pipeline/node/ToFBindings.cpp
132+
src/pipeline/node/SyncBindings.cpp
133+
src/pipeline/node/MessageDemuxBindings.cpp
132134

133135
src/pipeline/datatype/ADatatypeBindings.cpp
134136
src/pipeline/datatype/AprilTagConfigBindings.cpp
@@ -141,7 +143,9 @@ pybind11_add_module(${TARGET_NAME}
141143
src/pipeline/datatype/ImageManipConfigBindings.cpp
142144
src/pipeline/datatype/ImgDetectionsBindings.cpp
143145
src/pipeline/datatype/ImgFrameBindings.cpp
146+
src/pipeline/datatype/EncodedFrameBindings.cpp
144147
src/pipeline/datatype/IMUDataBindings.cpp
148+
src/pipeline/datatype/MessageGroupBindings.cpp
145149
src/pipeline/datatype/NNDataBindings.cpp
146150
src/pipeline/datatype/SpatialImgDetectionsBindings.cpp
147151
src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp
@@ -173,8 +177,8 @@ endif()
173177

174178
# Add stubs (pyi) generation step after building bindings
175179
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" "from mypy import api" RESULT_VARIABLE error OUTPUT_QUIET ERROR_QUIET)
176-
if(error)
177-
message(WARNING "Mypy not available - stubs won't be generated or checked")
180+
if(error OR CMAKE_CROSSCOMPILING)
181+
message(WARNING "Mypy not available or cross compiling - stubs won't be generated or checked")
178182
else()
179183
get_target_property(bindings_directory ${TARGET_NAME} LIBRARY_OUTPUT_DIRECTORY)
180184
if(NOT bindings_directory)

depthai-core

Submodule depthai-core updated 43 files

docs/source/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
subprocess.check_call(['make', 'install'], cwd=tmpdir+'/libusb-1.0.24')
2323
env['PATH'] = tmpdir+'/libusb/include:'+tmpdir+'/libusb/lib'+':'+env['PATH']
2424

25-
# Not needed anymore, part of pip install that carries the binary itself also
25+
# Not needed since libclang usage in pip requirements (brings its own library)
2626
# # libclang
2727
# subprocess.check_call(['wget', 'https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/misc/libclang-11_manylinux2014_x86_64.tar.xz'], cwd=tmpdir)
2828
# subprocess.check_call(['mkdir', '-p', 'libclang'], cwd=tmpdir)

examples/MonoCamera/mono_preview_alternate_pro.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,8 +43,8 @@
4343
script = pipeline.create(dai.node.Script)
4444
script.setProcessor(dai.ProcessorType.LEON_CSS)
4545
script.setScript("""
46-
dotBright = 500 # Note: recommended to not exceed 765, for max duty cycle
47-
floodBright = 200
46+
dotBright = 0.8
47+
floodBright = 0.1
4848
LOGGING = False # Set `True` for latency/timings debugging
4949
5050
node.warn(f'IR drivers detected: {str(Device.getIrDrivers())}')
@@ -57,8 +57,8 @@
5757
5858
# Immediately reconfigure the IR driver.
5959
# Note the logic is inverted, as it applies for next frame
60-
Device.setIrLaserDotProjectorBrightness(0 if flagDot else dotBright)
61-
Device.setIrFloodLightBrightness(floodBright if flagDot else 0)
60+
Device.setIrLaserDotProjectorIntensity(0 if flagDot else dotBright)
61+
Device.setIrFloodLightIntensity(floodBright if flagDot else 0)
6262
if LOGGING: tIrSet = Clock.now()
6363
6464
# Wait for the actual frames (after MIPI capture and ISP proc is done)

examples/StereoDepth/stereo_depth_from_host.py

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@
1717
parser.add_argument("-e", "--evaluate", help="Evaluate the disparity calculation.", default=None)
1818
parser.add_argument("-dumpdispcost", "--dumpdisparitycostvalues", action="store_true", help="Dumps the disparity cost values for each disparity range. 96 byte for each pixel.")
1919
parser.add_argument("--download", action="store_true", help="Downloads the 2014 Middlebury dataset.")
20+
parser.add_argument("--calibration", help="Path to calibration file", default=None)
21+
parser.add_argument("--rectify", action="store_true", help="Enable rectified streams")
22+
parser.add_argument("--swapLR", action="store_true", help="Swap left and right cameras.")
2023
args = parser.parse_args()
2124

2225
if args.evaluate is not None and args.dataset is not None:
@@ -603,8 +606,12 @@ def __init__(self, config):
603606
stereo.setRuntimeModeSwitch(True)
604607

605608
# Linking
606-
monoLeft.out.link(stereo.left)
607-
monoRight.out.link(stereo.right)
609+
if(args.swapLR):
610+
monoLeft.out.link(stereo.right)
611+
monoRight.out.link(stereo.left)
612+
else:
613+
monoLeft.out.link(stereo.left)
614+
monoRight.out.link(stereo.right)
608615
xinStereoDepthConfig.out.link(stereo.inputConfig)
609616
stereo.syncedLeft.link(xoutLeft.input)
610617
stereo.syncedRight.link(xoutRight.input)
@@ -630,9 +637,11 @@ def __init__(self, config):
630637
StereoConfigHandler.registerWindow("Stereo control panel")
631638

632639
# stereo.setPostProcessingHardwareResources(3, 3)
633-
640+
if(args.calibration):
641+
calibrationHandler = dai.CalibrationHandler(args.calibration)
642+
pipeline.setCalibrationData(calibrationHandler)
634643
stereo.setInputResolution(width, height)
635-
stereo.setRectification(False)
644+
stereo.setRectification(args.rectify)
636645
baseline = 75
637646
fov = 71.86
638647
focal = width / (2 * math.tan(fov / 2 / 180 * math.pi))

examples/Sync/demux_message_group.py

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
import depthai as dai
2+
import time
3+
from datetime import timedelta
4+
5+
pipeline = dai.Pipeline()
6+
7+
script1 = pipeline.create(dai.node.Script)
8+
script1.setScript("""
9+
from time import sleep
10+
11+
while True:
12+
sleep(1)
13+
b = Buffer(512)
14+
b.setData(bytes(4 * [i for i in range(0, 128)]))
15+
b.setTimestamp(Clock.now())
16+
node.io['out'].send(b)
17+
""")
18+
19+
script2 = pipeline.create(dai.node.Script)
20+
script2.setScript("""
21+
from time import sleep
22+
23+
while True:
24+
sleep(0.3)
25+
b = Buffer(512)
26+
b.setData(bytes(4 * [i for i in range(128, 256)]))
27+
b.setTimestamp(Clock.now())
28+
node.io['out'].send(b)
29+
""")
30+
31+
sync = pipeline.create(dai.node.Sync)
32+
sync.setSyncThresholdMs(timedelta(milliseconds=100))
33+
34+
demux = pipeline.create(dai.node.MessageDemux)
35+
36+
xout1 = pipeline.create(dai.node.XLinkOut)
37+
xout1.setStreamName("xout1")
38+
xout2 = pipeline.create(dai.node.XLinkOut)
39+
xout2.setStreamName("xout2")
40+
41+
script1.outputs["out"].link(sync.inputs["s1"])
42+
script2.outputs["out"].link(sync.inputs["s2"])
43+
sync.out.link(demux.input)
44+
demux.outputs["s1"].link(xout1.input)
45+
demux.outputs["s2"].link(xout2.input)
46+
47+
with dai.Device(pipeline) as device:
48+
print("Start")
49+
q1 = device.getOutputQueue("xout1", maxSize=10, blocking=True)
50+
q2 = device.getOutputQueue("xout2", maxSize=10, blocking=True)
51+
while True:
52+
bufS1 = q1.get()
53+
bufS2 = q2.get()
54+
print(f"Buffer 1 timestamp: {bufS1.getTimestamp()}")
55+
print(f"Buffer 2 timestamp: {bufS2.getTimestamp()}")
56+
print("----------")
57+
time.sleep(0.2)

examples/Sync/depth_video_synced.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
import depthai as dai
2+
import numpy as np
3+
import cv2
4+
from datetime import timedelta
5+
6+
pipeline = dai.Pipeline()
7+
8+
monoLeft = pipeline.create(dai.node.MonoCamera)
9+
monoRight = pipeline.create(dai.node.MonoCamera)
10+
color = pipeline.create(dai.node.ColorCamera)
11+
stereo = pipeline.create(dai.node.StereoDepth)
12+
sync = pipeline.create(dai.node.Sync)
13+
14+
xoutGrp = pipeline.create(dai.node.XLinkOut)
15+
16+
xoutGrp.setStreamName("xout")
17+
18+
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
19+
monoLeft.setCamera("left")
20+
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
21+
monoRight.setCamera("right")
22+
23+
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_ACCURACY)
24+
25+
color.setCamera("color")
26+
27+
sync.setSyncThreshold(timedelta(milliseconds=50))
28+
29+
monoLeft.out.link(stereo.left)
30+
monoRight.out.link(stereo.right)
31+
32+
stereo.disparity.link(sync.inputs["disparity"])
33+
color.video.link(sync.inputs["video"])
34+
35+
sync.out.link(xoutGrp.input)
36+
37+
disparityMultiplier = 255.0 / stereo.initialConfig.getMaxDisparity()
38+
with dai.Device(pipeline) as device:
39+
queue = device.getOutputQueue("xout", 10, False)
40+
while True:
41+
msgGrp = queue.get()
42+
for name, msg in msgGrp:
43+
frame = msg.getCvFrame()
44+
if name == "disparity":
45+
frame = (frame * disparityMultiplier).astype(np.uint8)
46+
frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET)
47+
cv2.imshow(name, frame)
48+
if cv2.waitKey(1) == ord("q"):
49+
break

examples/Sync/imu_video_synced.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
import depthai as dai
2+
import numpy as np
3+
import cv2
4+
from datetime import timedelta
5+
6+
device = dai.Device()
7+
8+
imuType = device.getConnectedIMU()
9+
imuFirmwareVersion = device.getIMUFirmwareVersion()
10+
print(f"IMU type: {imuType}, firmware version: {imuFirmwareVersion}")
11+
12+
if imuType != "BNO086":
13+
print("Rotation vector output is supported only by BNO086!")
14+
exit(0)
15+
16+
pipeline = dai.Pipeline()
17+
18+
color = pipeline.create(dai.node.ColorCamera)
19+
imu = pipeline.create(dai.node.IMU)
20+
sync = pipeline.create(dai.node.Sync)
21+
xoutImu = pipeline.create(dai.node.XLinkOut)
22+
xoutImu.setStreamName("imu")
23+
24+
xoutGrp = pipeline.create(dai.node.XLinkOut)
25+
xoutGrp.setStreamName("xout")
26+
27+
color.setCamera("color")
28+
29+
imu.enableIMUSensor(dai.IMUSensor.ROTATION_VECTOR, 120)
30+
imu.setBatchReportThreshold(1)
31+
imu.setMaxBatchReports(10)
32+
33+
sync.setSyncThreshold(timedelta(milliseconds=10))
34+
sync.setSyncAttempts(-1)
35+
36+
color.video.link(sync.inputs["video"])
37+
imu.out.link(sync.inputs["imu"])
38+
39+
sync.out.link(xoutGrp.input)
40+
41+
42+
with device:
43+
device.startPipeline(pipeline)
44+
groupQueue = device.getOutputQueue("xout", 3, True)
45+
while True:
46+
groupMessage = groupQueue.get()
47+
imuMessage = groupMessage["imu"]
48+
colorMessage = groupMessage["video"]
49+
print()
50+
print("Device timestamp imu: " + str(imuMessage.getTimestampDevice()))
51+
print("Device timestamp video:" + str(colorMessage.getTimestampDevice()))
52+
latestRotationVector = imuMessage.packets[-1].rotationVector
53+
imuF = "{:.4f}"
54+
print(f"Quaternion: i: {imuF.format(latestRotationVector.i)} j: {imuF.format(latestRotationVector.j)} "
55+
f"k: {imuF.format(latestRotationVector.k)} real: {imuF.format(latestRotationVector.real)}")
56+
print()
57+
cv2.imshow("video", colorMessage.getCvFrame())
58+
if cv2.waitKey(1) == ord("q"):
59+
break

examples/Sync/sync_scripts.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
import depthai as dai
2+
import time
3+
from datetime import timedelta
4+
5+
pipeline = dai.Pipeline()
6+
7+
script1 = pipeline.create(dai.node.Script)
8+
script1.setScript("""
9+
from time import sleep
10+
11+
while True:
12+
sleep(1)
13+
b = Buffer(512)
14+
b.setData(bytes(4 * [i for i in range(0, 128)]))
15+
b.setTimestamp(Clock.now())
16+
node.io['out'].send(b)
17+
""")
18+
19+
script2 = pipeline.create(dai.node.Script)
20+
script2.setScript("""
21+
from time import sleep
22+
23+
while True:
24+
sleep(0.3)
25+
b = Buffer(512)
26+
b.setData(bytes(4 * [i for i in range(128, 256)]))
27+
b.setTimestamp(Clock.now())
28+
node.io['out'].send(b)
29+
""")
30+
31+
sync = pipeline.create(dai.node.Sync)
32+
sync.setSyncThreshold(timedelta(milliseconds=100))
33+
34+
xout = pipeline.create(dai.node.XLinkOut)
35+
xout.setStreamName("xout")
36+
37+
sync.out.link(xout.input)
38+
39+
script1.outputs["out"].link(sync.inputs["s1"])
40+
script2.outputs["out"].link(sync.inputs["s2"])
41+
42+
# script1.outputs["out"].link(xout.input)
43+
44+
with dai.Device(pipeline) as device:
45+
print("Start")
46+
q = device.getOutputQueue("xout", maxSize=10, blocking=True)
47+
while True:
48+
grp = q.get()
49+
for name, msg in grp:
50+
print(f"Received {name} with timestamp {msg.getTimestamp()}")
51+
print(f"Time interval between messages: {grp.getIntervalNs() / 1e6}ms")
52+
print("----------")
53+
time.sleep(0.2)
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
#!/usr/bin/env python3
2+
3+
import depthai as dai
4+
5+
def frametype2str(ft):
6+
if ft == dai.EncodedFrame.FrameType.I:
7+
return "I"
8+
elif ft == dai.EncodedFrame.FrameType.P:
9+
return "P"
10+
elif ft == dai.EncodedFrame.FrameType.B:
11+
return "B"
12+
13+
def compress(ls):
14+
curr = ls[0]
15+
count = 1
16+
res = []
17+
for i in range(1, len(ls)):
18+
if ls[i] == curr:
19+
count += 1
20+
else:
21+
res.append((count, curr))
22+
curr = ls[i]
23+
count = 1
24+
res.append((count, curr))
25+
return res
26+
27+
28+
# Create pipeline
29+
pipeline = dai.Pipeline()
30+
31+
# Define sources and output
32+
camRgb = pipeline.create(dai.node.ColorCamera)
33+
videoEnc = pipeline.create(dai.node.VideoEncoder)
34+
xout = pipeline.create(dai.node.XLinkOut)
35+
36+
xout.setStreamName('h265')
37+
38+
# Properties
39+
camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
40+
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K)
41+
videoEnc.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN)
42+
43+
# Linking
44+
camRgb.video.link(videoEnc.input)
45+
videoEnc.out.link(xout.input)
46+
47+
frametypes = []
48+
# Connect to device and start pipeline
49+
with dai.Device(pipeline) as device:
50+
51+
# Output queue will be used to get the encoded data from the output defined above
52+
q = device.getOutputQueue(name="h265", maxSize=30, blocking=True)
53+
54+
# The .h265 file is a raw stream file (not playable yet)
55+
with open('video.h265', 'wb') as videoFile:
56+
print("Press Ctrl+C to stop encoding...")
57+
try:
58+
while True:
59+
h265Packet = q.get() # Blocking call, will wait until a new data has arrived
60+
frametypes.append(frametype2str(h265Packet.getFrameType()))
61+
h265Packet.getData().tofile(videoFile) # Appends the packet data to the opened file
62+
except KeyboardInterrupt:
63+
# Keyboard interrupt (Ctrl + C) detected
64+
pass
65+
66+
print("To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:")
67+
print("ffmpeg -framerate 30 -i video.h265 -c copy video.mp4")
68+
69+
print(",".join([f"{c}{f}" for c, f in compress(frametypes)]))

0 commit comments

Comments
 (0)