Skip to content

Commit 844490e

Browse files
committed
Merge branch 'release_2.17.4.0' into main
2 parents ee39813 + 2b35be1 commit 844490e

17 files changed

+359
-32
lines changed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ endif()
3535

3636
# Pybindings project
3737
set(TARGET_NAME depthai)
38-
project(depthai VERSION "1") # revision of bindings [depthai-core].[rev]
38+
project(depthai VERSION "0") # revision of bindings [depthai-core].[rev]
3939

4040
# Set default build type depending on context
4141
set(default_build_type "Release")
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
Spatial Calculator Multi-ROI
2+
============================
3+
4+
This example shows how one can use multiple ROIs with a single :ref:`SpatailLocationCalculator` node. A similar logic could be used as a simple depth line
5+
scanning camera for mobile robots.
6+
7+
.. rubric:: Similar samples:
8+
9+
- :ref:`Spatial location calculator`
10+
11+
Demo
12+
####
13+
14+
.. image:: https://user-images.githubusercontent.com/18037362/190861621-b57fd1e3-5a3d-4d79-b1a7-d17a0b78c63e.gif
15+
16+
Setup
17+
#####
18+
19+
.. include:: /includes/install_from_pypi.rst
20+
21+
Source code
22+
###########
23+
24+
.. tabs::
25+
26+
.. tab:: Python
27+
28+
Also `available on GitHub <https://github.com/luxonis/depthai-python/blob/main/examples/SpatialDetection/spatial_calculator_multi_roi.py>`__
29+
30+
.. literalinclude:: ../../../../examples/SpatialDetection/spatial_calculator_multi_roi.py
31+
:language: python
32+
:linenos:
33+
34+
.. tab:: C++
35+
36+
Also `available on GitHub <https://github.com/luxonis/depthai-core/blob/main/examples/SpatialDetection/spatial_calculator_multi_roi.cpp>`__
37+
38+
.. literalinclude:: ../../../../depthai-core/examples/SpatialDetection/spatial_calculator_multi_roi.cpp
39+
:language: cpp
40+
:linenos:
41+
42+
.. include:: /includes/footer-short.rst
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
Device information
2+
==================
3+
4+
This example shows how you can query device information.
5+
6+
The first part of the code queries all available devices without actually booting any device. For each found device, it prints the following information:
7+
8+
- **Device name**: Either IP, in case of OAK PoE cameras, or USB path in case of OAK USB cameras
9+
- **MxId**: Unique Mx (chip) identification code
10+
- **State**: State of the device. Note that OAK PoE cameras have bootloader flashed which initializes the network stack
11+
12+
Afterwards, the example boots into the first found device and prints available camera sensors, and reads calibration and eeprom data which stores product and
13+
board names.
14+
15+
Demo
16+
####
17+
18+
.. code-block::
19+
20+
Searching for all available devices...
21+
22+
Found device '1.3', MxId: '18443010D116631200', State: 'UNBOOTED'
23+
Found device '192.168.33.201', MxId: '184430102163DB0F00', State: 'BOOTLOADER'
24+
Found device '192.168.33.192', MxId: '1844301011F4C51200', State: 'BOOTLOADER'
25+
26+
Booting the first available camera (1.3)...
27+
Available camera sensors: {<CameraBoardSocket.RIGHT: 2>: 'OV9282', <CameraBoardSocket.RGB: 0>: 'IMX378', <CameraBoardSocket.LEFT: 1>: 'OV9282'}
28+
Product name: OAK-D Pro AF, board name DM9098
29+
30+
31+
Setup
32+
#####
33+
34+
.. include:: /includes/install_from_pypi.rst
35+
36+
Source code
37+
###########
38+
39+
.. tabs::
40+
41+
.. tab:: Python
42+
43+
Also `available on GitHub <https://github.com/luxonis/depthai-python/blob/main/examples/host_side/device_information.py>`__
44+
45+
.. literalinclude:: ../../../../examples/host_side/device_information.py
46+
:language: python
47+
:linenos:
48+
49+
.. tab:: C++
50+
51+
Also `available on GitHub <https://github.com/luxonis/depthai-core/blob/main/examples/host_side/device_information.cpp>`__
52+
53+
.. literalinclude:: ../../../../depthai-core/examples/host_side/device_information.cpp
54+
:language: cpp
55+
:linenos:
56+
57+
.. include:: /includes/footer-short.rst

examples/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,8 @@ add_python_example(spatial_object_tracker ObjectTracker/spatial_object_tracker.p
163163

164164
## Script
165165
add_python_example(script_camera_control Script/script_camera_control.py)
166+
add_python_example(script_json_communication Script/script_json_communication.py)
167+
add_python_example(script_get_device_info Script/script_get_device_info.py)
166168

167169
## SpatialDetection
168170
add_python_example(spatial_tiny_yolo_v3 SpatialDetection/spatial_tiny_yolo.py yolo3)
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
#!/usr/bin/env python3
2+
import depthai as dai
3+
import json
4+
5+
# Start defining a pipeline
6+
pipeline = dai.Pipeline()
7+
8+
# Script node
9+
script = pipeline.create(dai.node.Script)
10+
script.setScript("""
11+
import json
12+
data = json.dumps({
13+
'deviceId': __device_id__,
14+
'fwVersion': __version__
15+
}).encode('utf-8')
16+
17+
b = Buffer(len(data))
18+
b.setData(data)
19+
node.io['info'].send(b)
20+
""")
21+
22+
xout = pipeline.create(dai.node.XLinkOut)
23+
xout.setStreamName('info')
24+
script.outputs['info'].link(xout.input)
25+
26+
# Connect to device with pipeline
27+
with dai.Device(pipeline) as device:
28+
msg = device.getOutputQueue("info").get() # Wait for the "end" msg
29+
data = json.loads(msg.getData().tobytes().decode('utf-8'))
30+
print(json.dumps(data, indent=4))

examples/Script/script_get_ip.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#!/usr/bin/env python3
22
import depthai as dai
33

4-
# MAKE SURE U HAVE LATEST BOOTLOADER KTNXBAI
54
# Start defining a pipeline
65
pipeline = dai.Pipeline()
76

@@ -23,7 +22,7 @@ def get_ip_address(ifname):
2322
2423
ip = get_ip_address('re0') # '192.168.0.110'
2524
node.warn(f'IP of the device: {ip}')
26-
node.io['end'].send(Buffer())
25+
node.io['end'].send(Buffer(32))
2726
""")
2827

2928
xout = pipeline.create(dai.node.XLinkOut)

examples/Script/script_http_client.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
data1 = r1.read()
2020
node.warn(f'Public IP: {data1}')
2121
22-
node.io['end'].send(Buffer())
22+
node.io['end'].send(Buffer(32))
2323
""")
2424

2525
xout = pipeline.create(dai.node.XLinkOut)
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
#!/usr/bin/env python3
2+
3+
import cv2
4+
import depthai as dai
5+
import math
6+
7+
# Create pipeline
8+
pipeline = dai.Pipeline()
9+
10+
# Define sources and outputs
11+
monoLeft = pipeline.create(dai.node.MonoCamera)
12+
monoRight = pipeline.create(dai.node.MonoCamera)
13+
stereo = pipeline.create(dai.node.StereoDepth)
14+
spatialLocationCalculator = pipeline.create(dai.node.SpatialLocationCalculator)
15+
16+
xoutDepth = pipeline.create(dai.node.XLinkOut)
17+
xoutSpatialData = pipeline.create(dai.node.XLinkOut)
18+
xinSpatialCalcConfig = pipeline.create(dai.node.XLinkIn)
19+
20+
xoutDepth.setStreamName("depth")
21+
xoutSpatialData.setStreamName("spatialData")
22+
xinSpatialCalcConfig.setStreamName("spatialCalcConfig")
23+
24+
# Properties
25+
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
26+
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
27+
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
28+
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
29+
30+
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
31+
stereo.setLeftRightCheck(True)
32+
stereo.setExtendedDisparity(True)
33+
spatialLocationCalculator.inputConfig.setWaitForMessage(False)
34+
35+
# Create 10 ROIs
36+
for i in range(10):
37+
config = dai.SpatialLocationCalculatorConfigData()
38+
config.depthThresholds.lowerThreshold = 200
39+
config.depthThresholds.upperThreshold = 10000
40+
config.roi = dai.Rect(dai.Point2f(i*0.1, 0.45), dai.Point2f((i+1)*0.1, 0.55))
41+
spatialLocationCalculator.initialConfig.addROI(config)
42+
43+
# Linking
44+
monoLeft.out.link(stereo.left)
45+
monoRight.out.link(stereo.right)
46+
47+
spatialLocationCalculator.passthroughDepth.link(xoutDepth.input)
48+
stereo.depth.link(spatialLocationCalculator.inputDepth)
49+
50+
spatialLocationCalculator.out.link(xoutSpatialData.input)
51+
xinSpatialCalcConfig.out.link(spatialLocationCalculator.inputConfig)
52+
53+
# Connect to device and start pipeline
54+
with dai.Device(pipeline) as device:
55+
device.setIrLaserDotProjectorBrightness(1000)
56+
57+
# Output queue will be used to get the depth frames from the outputs defined above
58+
depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
59+
spatialCalcQueue = device.getOutputQueue(name="spatialData", maxSize=4, blocking=False)
60+
color = (0,200,40)
61+
fontType = cv2.FONT_HERSHEY_TRIPLEX
62+
63+
while True:
64+
inDepth = depthQueue.get() # Blocking call, will wait until a new data has arrived
65+
66+
depthFrame = inDepth.getFrame() # depthFrame values are in millimeters
67+
68+
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
69+
depthFrameColor = cv2.equalizeHist(depthFrameColor)
70+
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
71+
72+
spatialData = spatialCalcQueue.get().getSpatialLocations()
73+
for depthData in spatialData:
74+
roi = depthData.config.roi
75+
roi = roi.denormalize(width=depthFrameColor.shape[1], height=depthFrameColor.shape[0])
76+
77+
xmin = int(roi.topLeft().x)
78+
ymin = int(roi.topLeft().y)
79+
xmax = int(roi.bottomRight().x)
80+
ymax = int(roi.bottomRight().y)
81+
82+
coords = depthData.spatialCoordinates
83+
distance = math.sqrt(coords.x ** 2 + coords.y ** 2 + coords.z ** 2)
84+
85+
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, thickness=2)
86+
cv2.putText(depthFrameColor, "{:.1f}m".format(distance/1000), (xmin + 10, ymin + 20), fontType, 0.6, color)
87+
# Show the frame
88+
cv2.imshow("depth", depthFrameColor)
89+
90+
if cv2.waitKey(1) == ord('q'):
91+
break

examples/calibration/calibration_dump.py

100644100755
File mode changed.

0 commit comments

Comments
 (0)