Skip to content

Commit b38a7c0

Browse files
committed
Merge remote-tracking branch 'origin/release_v2.23.0.0' into main
2 parents 12f1a79 + 9591231 commit b38a7c0

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+534
-84
lines changed

.github/workflows/main.yml

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,6 @@ jobs:
4949
run: |
5050
python -m pip install --upgrade pip
5151
sudo apt install libusb-1.0-0-dev
52-
python -m pip install clang==14.0 --force-reinstall
5352
python -m pip install -r docs/requirements_mkdoc.txt
5453
- name: Configure project
5554
run: cmake -S . -B build -DDEPTHAI_PYTHON_FORCE_DOCSTRINGS=ON -DDEPTHAI_PYTHON_DOCSTRINGS_OUTPUT="$PWD/docstrings/depthai_python_docstring.hpp"
@@ -154,8 +153,13 @@ jobs:
154153
run: echo "BUILD_COMMIT_HASH=${{github.sha}}" >> $GITHUB_ENV
155154
- name: Building wheel
156155
run: python3 -m pip wheel . -w ./wheelhouse/ --verbose
157-
- name: Auditing wheel
158-
run: for whl in wheelhouse/*.whl; do auditwheel repair "$whl" --plat linux_armv7l -w wheelhouse/audited/; done
156+
- name: Auditing wheels and adding armv6l tag (Running on RPi, binaries compiled as armv6l)
157+
run: |
158+
python3 -m pip install -U wheel auditwheel
159+
for whl in wheelhouse/*.whl; do auditwheel repair "$whl" --plat linux_armv7l -w wheelhouse/preaudited/; done
160+
for whl in wheelhouse/preaudited/*.whl; do python3 -m wheel tags --platform-tag +linux_armv6l "$whl"; done
161+
mkdir -p wheelhouse/audited/
162+
for whl in wheelhouse/preaudited/*linux_armv6l*.whl; do cp "$whl" wheelhouse/audited/$(basename $whl); done
159163
- name: Archive wheel artifacts
160164
uses: actions/upload-artifact@v3
161165
with:
@@ -560,13 +564,13 @@ jobs:
560564
uses: codex-/return-dispatch@v1
561565
id: return_dispatch
562566
with:
563-
token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }} # Note this is NOT GITHUB_TOKEN but a PAT
567+
token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }} # Note this is NOT GITHUB_TOKEN but a PAT
564568
ref: main # or refs/heads/target_branch
565569
repo: depthai-core-hil-tests
566570
owner: luxonis
567571
workflow: regression_test.yml
568-
workflow_inputs: '{"commit": "${{ github.ref }}", "sha": "${{ github.sha }}", "parent_url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}'
569-
workflow_timeout_seconds: 120 # Default: 300
572+
workflow_inputs: '{"commit": "${{ github.ref }}", "parent_url": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}'
573+
workflow_timeout_seconds: 300 # was 120 Default: 300
570574

571575
- name: Release
572576
run: echo "https://github.com/luxonis/depthai-core-hil-tests/actions/runs/${{steps.return_dispatch.outputs.run_id}}" >> $GITHUB_STEP_SUMMARY

CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,8 +162,9 @@ if(WIN32)
162162
set(depthai_dll_libraries "$<TARGET_RUNTIME_DLLS:${TARGET_NAME}>")
163163
endif()
164164
add_custom_command(TARGET ${TARGET_NAME} POST_BUILD COMMAND
165-
${CMAKE_COMMAND} -E copy ${depthai_dll_libraries} $<TARGET_FILE_DIR:${TARGET_NAME}>
165+
"$<$<BOOL:${depthai_dll_libraries}>:${CMAKE_COMMAND};-E;copy_if_different;${depthai_dll_libraries};$<TARGET_FILE_DIR:${TARGET_NAME}>>"
166166
COMMAND_EXPAND_LISTS
167+
VERBATIM
167168
)
168169

169170
# Disable "d" postfix, so python can import the library as is

cmake/pybind11-mkdoc.cmake

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ function(pybind11_mkdoc_setup_internal target output_path mkdoc_headers enforce)
2828

2929
# Execute module pybind11_mkdoc to check if present
3030
message(STATUS "Checking for pybind11_mkdoc")
31-
execute_process(COMMAND ${PYTHON_EXECUTABLE} -m ${PYBIND11_MKDOC_MODULE_NAME} RESULT_VARIABLE error OUTPUT_QUIET ERROR_QUIET)
31+
execute_process(COMMAND ${PYTHON_EXECUTABLE} -m ${PYBIND11_MKDOC_MODULE_NAME} --help RESULT_VARIABLE error OUTPUT_QUIET ERROR_QUIET)
3232
if(error)
3333
set(message "Checking for pybind11_mkdoc - not found, docstrings not available")
3434
if(NOT enforce)

depthai-core

Submodule depthai-core updated 55 files

docs/requirements_mkdoc.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
1-
git+https://github.com/luxonis/pybind11_mkdoc.git@59746f8d1645c9f00ebfb534186334d0154b5bd6
1+
git+https://github.com/luxonis/pybind11_mkdoc.git@da6c64251a0ebbc3ffc007477a0b9c9f20cac165
2+
libclang==16.0.6
Lines changed: 149 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,149 @@
1+
Camera
2+
======
3+
4+
Camera node is a source of :ref:`image frames <ImgFrame>`. You can control in at runtime with the :code:`InputControl` and :code:`InputConfig`.
5+
It aims to unify the :ref:`ColorCamera` and :ref:`MonoCamera` into one node.
6+
7+
Compared to :ref:`ColorCamera` node, Camera node:
8+
9+
- Supports **cam.setSize()**, which replaces both ``cam.setResolution()`` and ``cam.setIspScale()``. Camera node will automatically find resolution that fits best, and apply correct scaling to achieve user-selected size
10+
- Supports **cam.setCalibrationAlpha()**, example here: :ref:`Undistort camera stream`
11+
- Supports **cam.loadMeshData()** and **cam.setMeshStep()**, which can be used for custom image warping (undistortion, perspective correction, etc.)
12+
13+
Besides points above, compared to :ref:`MonoCamera` node, Camera node:
14+
15+
- Doesn't have ``out`` output, as it has the same outputs as :ref:`ColorCamera` (``raw``, ``isp``, ``still``, ``preview``, ``video``). This means that ``preview`` will output 3 planes of the same grayscale frame (3x overhead), and ``isp`` / ``video`` / ``still`` will output luma (useful grayscale information) + chroma (all values are 128), which will result in 1.5x bandwidth overhead
16+
17+
How to place it
18+
###############
19+
20+
.. tabs::
21+
22+
.. code-tab:: py
23+
24+
pipeline = dai.Pipeline()
25+
cam = pipeline.create(dai.node.Camera)
26+
27+
.. code-tab:: c++
28+
29+
dai::Pipeline pipeline;
30+
auto cam = pipeline.create<dai::node::Camera>();
31+
32+
33+
Inputs and Outputs
34+
##################
35+
36+
.. code-block::
37+
38+
Camera node
39+
┌──────────────────────────────┐
40+
│ ┌─────────────┐ │
41+
│ │ Image │ raw │ raw
42+
│ │ Sensor │---┬--------├────────►
43+
│ └────▲────────┘ | │
44+
│ │ ┌--------┘ │
45+
│ ┌─┴───▼─┐ │ isp
46+
inputControl │ │ │-------┬-------├────────►
47+
──────────────►│------│ ISP │ ┌─────▼────┐ │ video
48+
│ │ │ | |--├────────►
49+
│ └───────┘ │ Image │ │ still
50+
inputConfig │ │ Post- │--├────────►
51+
──────────────►│----------------|Processing│ │ preview
52+
│ │ │--├────────►
53+
│ └──────────┘ │
54+
└──────────────────────────────┘
55+
56+
**Message types**
57+
58+
- :code:`inputConfig` - :ref:`ImageManipConfig`
59+
- :code:`inputControl` - :ref:`CameraControl`
60+
- :code:`raw` - :ref:`ImgFrame` - RAW10 bayer data. Demo code for unpacking `here <https://github.com/luxonis/depthai-experiments/blob/3f1b2b2/gen2-color-isp-raw/main.py#L13-L32>`__
61+
- :code:`isp` - :ref:`ImgFrame` - YUV420 planar (same as YU12/IYUV/I420)
62+
- :code:`still` - :ref:`ImgFrame` - NV12, suitable for bigger size frames. The image gets created when a capture event is sent to the Camera, so it's like taking a photo
63+
- :code:`preview` - :ref:`ImgFrame` - RGB (or BGR planar/interleaved if configured), mostly suited for small size previews and to feed the image into :ref:`NeuralNetwork`
64+
- :code:`video` - :ref:`ImgFrame` - NV12, suitable for bigger size frames
65+
66+
**ISP** (image signal processor) is used for bayer transformation, demosaicing, noise reduction, and other image enhancements.
67+
It interacts with the 3A algorithms: **auto-focus**, **auto-exposure**, and **auto-white-balance**, which are handling image sensor
68+
adjustments such as exposure time, sensitivity (ISO), and lens position (if the camera module has a motorized lens) at runtime.
69+
Click `here <https://en.wikipedia.org/wiki/Image_processor>`__ for more information.
70+
71+
**Image Post-Processing** converts YUV420 planar frames from the **ISP** into :code:`video`/:code:`preview`/:code:`still` frames.
72+
73+
``still`` (when a capture is triggered) and ``isp`` work at the max camera resolution, while ``video`` and ``preview`` are
74+
limited to max 4K (3840 x 2160) resolution, which is cropped from ``isp``.
75+
For IMX378 (12MP), the **post-processing** works like this:
76+
77+
.. code-block::
78+
79+
┌─────┐ Cropping to ┌─────────┐ Downscaling ┌──────────┐
80+
│ ISP ├────────────────►│ video ├───────────────►│ preview │
81+
└─────┘ max 3840x2160 └─────────┘ and cropping └──────────┘
82+
83+
.. image:: /_static/images/tutorials/isp.jpg
84+
85+
The image above is the ``isp`` output from the Camera (12MP resolution from IMX378). If you aren't downscaling ISP,
86+
the ``video`` output is cropped to 4k (max 3840x2160 due to the limitation of the ``video`` output) as represented by
87+
the blue rectangle. The Yellow rectangle represents a cropped ``preview`` output when the preview size is set to a 1:1 aspect
88+
ratio (eg. when using a 300x300 preview size for the MobileNet-SSD NN model) because the ``preview`` output is derived from
89+
the ``video`` output.
90+
91+
Usage
92+
#####
93+
94+
.. tabs::
95+
96+
.. code-tab:: py
97+
98+
pipeline = dai.Pipeline()
99+
cam = pipeline.create(dai.node.Camera)
100+
cam.setPreviewSize(300, 300)
101+
cam.setBoardSocket(dai.CameraBoardSocket.CAM_A)
102+
# Instead of setting the resolution, user can specify size, which will set
103+
# sensor resolution to best fit, and also apply scaling
104+
cam.setSize(1280, 720)
105+
106+
.. code-tab:: c++
107+
108+
dai::Pipeline pipeline;
109+
auto cam = pipeline.create<dai::node::Camera>();
110+
cam->setPreviewSize(300, 300);
111+
cam->setBoardSocket(dai::CameraBoardSocket::CAM_A);
112+
// Instead of setting the resolution, user can specify size, which will set
113+
// sensor resolution to best fit, and also apply scaling
114+
cam->setSize(1280, 720);
115+
116+
Limitations
117+
###########
118+
119+
Here are known camera limitations for the `RVC2 <https://docs.luxonis.com/projects/hardware/en/latest/pages/rvc/rvc2.html#rvc2>`__:
120+
121+
- **ISP can process about 600 MP/s**, and about **500 MP/s** when the pipeline is also running NNs and video encoder in parallel
122+
- **3A algorithms** can process about **200..250 FPS overall** (for all camera streams). This is a current limitation of our implementation, and we have plans for a workaround to run 3A algorithms on every Xth frame, no ETA yet
123+
124+
Examples of functionality
125+
#########################
126+
127+
- :ref:`Undistort camera stream`
128+
129+
Reference
130+
#########
131+
132+
.. tabs::
133+
134+
.. tab:: Python
135+
136+
.. autoclass:: depthai.node.Camera
137+
:members:
138+
:inherited-members:
139+
:noindex:
140+
141+
.. tab:: C++
142+
143+
.. doxygenclass:: dai::node::Camera
144+
:project: depthai-core
145+
:members:
146+
:private-members:
147+
:undoc-members:
148+
149+
.. include:: ../../includes/footer-short.rst

docs/source/components/nodes/warp.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ Usage
6565
# Warp engines to be used (0,1,2)
6666
warp.setHwIds([1])
6767
# Warp interpolation mode, choose between BILINEAR, BICUBIC, BYPASS
68-
warp.setInterpolation(dai.node.Warp.Properties.Interpolation.BYPASS)
68+
warp.setInterpolation(dai.Interpolation.NEAREST_NEIGHBOR)
6969

7070
.. code-tab:: c++
7171

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
Undistort camera stream
2+
=======================
3+
4+
This example shows how you can use :ref:`Camera` node to undistort wide FOV camera stream. :ref:`Camera` node will automatically undistort ``still``, ``video`` and ``preview`` streams, while ``isp`` stream will be left as is.
5+
6+
Demo
7+
####
8+
9+
.. figure:: https://github.com/luxonis/depthai-python/assets/18037362/936b9ad7-179b-42a5-a6cb-25efdbdf73d9
10+
11+
Left: Camera.isp output. Right: Camera.video (undistorted) output
12+
13+
Setup
14+
#####
15+
16+
.. include:: /includes/install_from_pypi.rst
17+
18+
Source code
19+
###########
20+
21+
.. tabs::
22+
23+
.. tab:: Python
24+
25+
Also `available on GitHub <https://github.com/luxonis/depthai-python/blob/main/examples/Camera/camera_undistort.py>`__
26+
27+
.. literalinclude:: ../../../../examples/Camera/camera_undistort.py
28+
:language: python
29+
:linenos:
30+
31+
.. tab:: C++
32+
33+
Work in progress.
34+
35+
36+
..
37+
Also `available on GitHub <https://github.com/luxonis/depthai-core/blob/main/examples/Camera/camera_undistort.cpp>`__
38+
39+
.. literalinclude:: ../../../../depthai-core/examples/Camera/camera_undistort.cpp
40+
:language: cpp
41+
:linenos:
42+
43+
.. include:: /includes/footer-short.rst

docs/source/tutorials/code_samples.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ Code Samples
77

88
../samples/bootloader/*
99
../samples/calibration/*
10+
../samples/Camera/*
1011
../samples/ColorCamera/*
1112
../samples/crash_report/*
1213
../samples/EdgeDetector/*
@@ -46,6 +47,11 @@ are presented with code.
4647
- :ref:`Calibration Reader` - Reads calibration data stored on device over XLink
4748
- :ref:`Calibration Load` - Loads and uses calibration data of version 6 (gen2 calibration data) in a pipeline
4849

50+
51+
.. rubric:: Camera
52+
53+
- :ref:`Undistort camera stream` - Showcases how Camera node undistorts camera streams
54+
4955
.. rubric:: ColorCamera
5056

5157
- :ref:`Auto Exposure on ROI` - Demonstrates how to use auto exposure based on the selected ROI

examples/Camera/camera_undistort.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import depthai as dai
2+
import cv2
3+
4+
pipeline = dai.Pipeline()
5+
6+
# Define sources and outputs
7+
camRgb: dai.node.Camera = pipeline.create(dai.node.Camera)
8+
9+
#Properties
10+
camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A)
11+
camRgb.setSize((1280, 800))
12+
13+
# Linking
14+
videoOut = pipeline.create(dai.node.XLinkOut)
15+
videoOut.setStreamName("video")
16+
camRgb.video.link(videoOut.input)
17+
18+
ispOut = pipeline.create(dai.node.XLinkOut)
19+
ispOut.setStreamName("isp")
20+
camRgb.isp.link(ispOut.input)
21+
22+
with dai.Device(pipeline) as device:
23+
video = device.getOutputQueue(name="video", maxSize=1, blocking=False)
24+
isp = device.getOutputQueue(name="isp", maxSize=1, blocking=False)
25+
26+
while True:
27+
if video.has():
28+
cv2.imshow("video", video.get().getCvFrame())
29+
if isp.has():
30+
cv2.imshow("isp", isp.get().getCvFrame())
31+
if cv2.waitKey(1) == ord('q'):
32+
break

examples/ColorCamera/rgb_preview.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
if device.getBootloaderVersion() is not None:
3131
print('Bootloader version:', device.getBootloaderVersion())
3232
# Device name
33-
print('Device name:', device.getDeviceName())
33+
print('Device name:', device.getDeviceName(), ' Product name:', device.getProductName())
3434

3535
# Output queue will be used to get the rgb frames from the output defined above
3636
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)

0 commit comments

Comments
 (0)