Skip to content

Commit 4d85750

Browse files
zrezkealex-luxonisSzabolcsGergelyjakaskerlMatevz Morato
authored
Release 2.25.1 (#1005)
* FW: fix fsync for OAK-D-SR * Add tilt projection support * Fixing externally managed environment (#1001) - Added custom error message in addition to "Externally managed environment" to let users know how to solve it - Updated .workflow to always create a separate environment where the depthai will be installed to avoid above error * Install NN blobs from blobconverter (#999) * added thermal_nn to install_requirements * Adding thermal to artifactory Co-authored-by: zrezke <[email protected]> * Fixed two bugs / unexpected behaviours (#1002) * Added bindings for the api. * core * FW: fixes for IMX378 and IMX582: fix concurrent run, fix scaling with IMX378 THE_1352X1012 resolution, change Camera node best sensor config select to prioritize matching aspect ratio * Remove the RH notification * FW: fix default fsync GPIO state for OAK-FFC-4P R7, FSIN_4LANE GPIO42 = input, pull-down. Other depthai-core updates * Develop sync with main (#1004) * Docs/release 25 (#997) * Initial docs update for v2.25 * Added pcl example docs * Adding pointcloud control example * Updating pointcloud example * Added encoded_frame docs (#998) * Added encoded_frame docs * Docs update (#1000) * Update sync_node.rst * Adding USB 3.2Gen2 enable docs --------- Co-authored-by: jakaskerl <[email protected]> * Core v2.25.1 * Fixed FFC3P boot issue. * Update core * Bump core to tagged. --------- Co-authored-by: alex-luxonis <[email protected]> Co-authored-by: SzabolcsGergely <[email protected]> Co-authored-by: jakaskerl <[email protected]> Co-authored-by: Matevz Morato <[email protected]>
1 parent e738ead commit 4d85750

File tree

7 files changed

+200
-26
lines changed

7 files changed

+200
-26
lines changed

.github/workflows/main.yml

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -531,18 +531,6 @@ jobs:
531531
ARTIFACTORY_USER: ${{ secrets.ARTIFACTORY_USER }}
532532
ARTIFACTORY_PASS: ${{ secrets.ARTIFACTORY_PASS }}
533533

534-
notify_robothub:
535-
if: startsWith(github.ref, 'refs/tags/v')
536-
needs: [release]
537-
runs-on: ubuntu-latest
538-
steps:
539-
- name: Repository Dispatch
540-
uses: peter-evans/repository-dispatch@v2
541-
with:
542-
token: ${{ secrets.REPO_ACCESS_TOKEN }}
543-
repository: luxonis/robothub-apps
544-
event-type: depthai-python-release
545-
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'
546534

547535
# notify_hil_workflow_linux_x86_64:
548536
# needs: [build-linux-x86_64]

.github/workflows/test-install-dependencies.yml

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@
3636
if: endsWith(matrix.container_image, 'rolling') == true
3737
run: |
3838
sudo apt-get install -y python3-venv
39-
python3 -m venv venv
40-
. venv/bin/activate
39+
python3 -m venv .env
40+
. .env/bin/activate
4141
pip install --upgrade pip
4242
python3 examples/install_requirements.py
4343
shell: bash
@@ -56,9 +56,13 @@
5656
run: |
5757
sed '/udevadm control --reload-rules && sudo udevadm trigger/d' docs/source/_static/install_dependencies.sh > tmp_script.sh
5858
bash tmp_script.sh
59-
- name: Install example requirements
59+
- name: Create a virtual environment
6060
run: |
61+
python3 -m venv .env
62+
. .env/bin/activate
63+
pip install --upgrade pip
6164
python3 examples/install_requirements.py
65+
shell: bash
6266
test_windows:
6367
runs-on: windows-latest
6468
steps:
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
import depthai as dai
2+
import cv2
3+
from pathlib import Path
4+
import numpy as np
5+
import sys
6+
7+
8+
nnPath = str((Path(__file__).parent / Path('../models/yolov6n_thermal_people_256x192_openvino_2022.1_6shave.blob')).resolve().absolute())
9+
if len(sys.argv) > 1:
10+
nnPath = sys.argv[1]
11+
12+
if not Path(nnPath).exists():
13+
import sys
14+
raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"')
15+
16+
labels = ["person"]
17+
18+
device = dai.Device()
19+
20+
pipeline = dai.Pipeline()
21+
nnet = pipeline.create(dai.node.YoloDetectionNetwork)
22+
nnet.setBlobPath(nnPath)
23+
nnet.setConfidenceThreshold(0.5)
24+
nnet.setNumClasses(1)
25+
nnet.setCoordinateSize(4)
26+
nnet.setIouThreshold(0.4)
27+
28+
thermalCam = pipeline.create(dai.node.Camera)
29+
thermalCam.setBoardSocket(dai.CameraBoardSocket.CAM_E)
30+
thermalCam.setPreviewSize(256, 192)
31+
32+
thermalCam.raw.link(nnet.input)
33+
34+
rawOut = pipeline.createXLinkOut()
35+
rawOut.setStreamName("preview")
36+
thermalCam.preview.link(rawOut.input)
37+
38+
xoutNn = pipeline.createXLinkOut()
39+
xoutNn.setStreamName("nn")
40+
nnet.out.link(xoutNn.input)
41+
42+
xoutPass = pipeline.createXLinkOut()
43+
xoutPass.setStreamName("pass")
44+
nnet.passthrough.link(xoutPass.input)
45+
46+
device.startPipeline(pipeline)
47+
48+
qNn = device.getOutputQueue(name="nn", maxSize=2, blocking=False)
49+
qPass = device.getOutputQueue(name="pass", maxSize=2, blocking=False)
50+
qPreview = device.getOutputQueue(name="preview", maxSize=2, blocking=False)
51+
52+
cv2.namedWindow("nnet", cv2.WINDOW_NORMAL)
53+
cv2.namedWindow("raw", cv2.WINDOW_NORMAL)
54+
cv2.resizeWindow("nnet", 640, 480)
55+
cv2.resizeWindow("raw", 640, 480)
56+
57+
while True:
58+
inNn = qNn.get()
59+
inPass = qPass.tryGet()
60+
inPreview = qPreview.get()
61+
if inNn and inPass:
62+
frame = inPass.getCvFrame().astype(np.float32)
63+
min_, max_ = frame.min(), frame.max()
64+
colormappedFrame = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
65+
colormappedFrame = cv2.applyColorMap(colormappedFrame, cv2.COLORMAP_MAGMA)
66+
67+
detections = inNn.detections
68+
for detection in detections:
69+
xmin = max(0.0, detection.xmin)
70+
ymin = max(0.0, detection.ymin)
71+
xmax = min(1.0, detection.xmax)
72+
ymax = min(1.0, detection.ymax)
73+
pt1 = int(xmin * 256), int(ymin * 192)
74+
pt2 = int(xmax * 256), int(ymax * 192)
75+
cv2.rectangle(colormappedFrame, pt1, pt2, (0, 255, 0))
76+
cv2.putText(colormappedFrame, labels[detection.label], pt1, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2, cv2.LINE_AA)
77+
cv2.imshow("nnet", colormappedFrame)
78+
if inPreview:
79+
cv2.imshow("raw", inPreview.getCvFrame())
80+
81+
if cv2.waitKey(1) == ord("q"):
82+
break

examples/install_requirements.py

Lines changed: 76 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,9 @@
44
import argparse
55
import re
66
import platform
7+
from subprocess import CalledProcessError, DEVNULL
8+
import textwrap
9+
710

811
convert_default = "empty"
912
parser = argparse.ArgumentParser()
@@ -46,7 +49,7 @@ def hasWhitespace(string):
4649
# try to import opencv, numpy in a subprocess, since it might fail with illegal instruction
4750
# if it was previously installed w/ pip without setting OPENBLAS_CORE_TYPE=ARMV8 env variable
4851
try:
49-
subprocess.check_call([sys.executable, "-c", "import numpy, cv2;"])
52+
subprocess.check_call([sys.executable, "-c", "import numpy, cv2;"], stderr=DEVNULL)
5053
requireOpenCv = False
5154
except subprocess.CalledProcessError as ex:
5255
requireOpenCv = True
@@ -94,10 +97,38 @@ def hasWhitespace(string):
9497

9598
# Update pip
9699
pip_update_cmd = [*pip_install, "pip"]
100+
97101
if args.dry_run:
98102
prettyPrint(pip_update_cmd)
99103
else:
100-
subprocess.check_call(pip_update_cmd)
104+
try:
105+
subprocess.check_call(pip_update_cmd)
106+
except CalledProcessError as e:
107+
print(f"\n\n\033[31m\033[1m[Warning]\033[0m An error occurred while trying to update pip: {e}\n")
108+
print("This likely stems from the fact that you're not using a Python virtual environment.")
109+
venv_creation_instructions = textwrap.dedent(f"""\
110+
\033[94m\033[1m
111+
Here's how you can create and activate a virtual environment:
112+
113+
1. Create a virtual environment:
114+
- For Linux or MacOS, use: python3 -m venv {parent_dir}/.env
115+
- For Windows, use: python -m venv {parent_dir}/.env
116+
117+
2. Activate the virtual environment:
118+
- For Linux or MacOS, use: source {parent_dir}/.env/bin/activate
119+
- For Windows, use: {parent_dir}/.env/Scripts/activate
120+
121+
Once activated, you'll be working within the virtual environment. You can then attempt to re-run this script.
122+
To exit the virtual environment when you're done, simply use the command: deactivate
123+
124+
For more detailed instructions, please refer to the official Python documentation on virtual environments: https://docs.python.org/3/tutorial/venv.html
125+
\033[0m
126+
""")
127+
128+
print(textwrap.indent(venv_creation_instructions, ' '))
129+
exit(0)
130+
131+
101132
# Install python dependencies
102133
python_dependencies_cmd = [*pip_package_install, *DEPENDENCIES]
103134
if args.dry_run:
@@ -161,16 +192,51 @@ def hasWhitespace(string):
161192
subprocess.check_call(downloader_cmd)
162193

163194
if args.convert != convert_default:
164-
nn_models_shaves = {
165-
"mobilenet-ssd": [5, 6, 8],
166-
"person-detection-retail-0013": [7],
167-
"yolo-v4-tiny-tf": [6],
168-
"yolo-v3-tiny-tf": [6],
195+
196+
nn_model_configs = {
197+
"mobilenet-ssd": {
198+
"shaves": [5, 6, 8],
199+
"compile_params": ["-ip U8"],
200+
"zoo_type": "intel",
201+
"default_ov_version": "2021.4"
202+
},
203+
"person-detection-retail-0013": {
204+
"shaves": [7],
205+
"compile_params": ["-ip U8"],
206+
"zoo_type": "intel",
207+
"default_ov_version": "2021.4"
208+
},
209+
"yolo-v4-tiny-tf": {
210+
"shaves": [6],
211+
"compile_params": ["-ip U8"],
212+
"zoo_type": "intel",
213+
"default_ov_version": "2021.4"
214+
},
215+
"yolo-v3-tiny-tf": {
216+
"shaves": [6],
217+
"compile_params": ["-ip U8"],
218+
"zoo_type": "intel",
219+
"default_ov_version": "2021.4"
220+
},
221+
"yolov6n_thermal_people_256x192": {
222+
"shaves": [6],
223+
"compile_params": ["-ip FP16"],
224+
"zoo_type": "depthai",
225+
"default_ov_version": "2022.1"
226+
},
169227
}
228+
170229
blobconverter_cmds = [
171-
[sys.executable, "-m", "blobconverter", "-zn", nn_name, "-sh", str(nn_shave), "-o", f"{examples_dir}/models", *(["-v", args.convert] if args.convert is not None else [])]
172-
for nn_name in nn_models_shaves
173-
for nn_shave in nn_models_shaves[nn_name]
230+
[sys.executable,
231+
"-m", "blobconverter",
232+
"-zn", nn_name,
233+
"-sh", str(nn_shave),
234+
"-o", f"{examples_dir}/models",
235+
"-zt", nn_model_configs[nn_name]["zoo_type"],
236+
*(["--compile-params", " ".join(nn_model_configs[nn_name]["compile_params"])] if nn_model_configs[nn_name]["compile_params"] else []),
237+
*(["-v", args.convert] if args.convert != convert_default else ["-v", nn_model_configs[nn_name]["default_ov_version"]])]
238+
for nn_name in nn_model_configs
239+
for nn_shave in nn_model_configs[nn_name]["shaves"]
174240
]
175241
install_blobconverter_cmd = [*pip_package_install, "blobconverter"]
176242
for cmd in [install_blobconverter_cmd] + blobconverter_cmds:
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# MIT License
2+
3+
# Copyright (c) 2021 Luxonis Holding Corporation
4+
5+
# Permission is hereby granted, free of charge, to any person obtaining a copy
6+
# of this software and associated documentation files (the "Software"), to deal
7+
# in the Software without restriction, including without limitation the rights
8+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9+
# copies of the Software, and to permit persons to whom the Software is
10+
# furnished to do so, subject to the following conditions:
11+
12+
# The above copyright notice and this permission notice shall be included in all
13+
# copies or substantial portions of the Software.
14+
15+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21+
# SOFTWARE.
22+
23+
description: >-
24+
thermal-yolo
25+
task_type: object_attributes
26+
files:
27+
- name: yolov6n_thermal_people_256x192_openvino_2022.1_6shave.blob
28+
size: 9311960
29+
sha256: fb75828e7014ad92170fe54bb3a3253b8be076005bf651ac30eb0841f63a3b86
30+
source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/yolov6n_thermal_people_256x192_openvino_2022.1_6shave.blob
31+
32+
framework: dldt
33+
license: https://raw.githubusercontent.com/luxonis/depthai-model-zoo/main/LICENSE

src/DeviceBootloaderBindings.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,7 @@ void DeviceBootloaderBindings::bind(pybind11::module& m, void* pCallstack){
161161

162162
//.def("flashCustom", &DeviceBootloader::flashCustom, py::arg("memory"), py::arg("offset"), py::arg("progressCallback"), py::arg("data"), DOC(dai, DeviceBootloader, flashCustom))
163163
.def("getVersion", [](DeviceBootloader& db) { py::gil_scoped_release release; return db.getVersion(); }, DOC(dai, DeviceBootloader, getVersion))
164+
.def("getFlashedVersion", [](DeviceBootloader& db) { py::gil_scoped_release release; return db.getFlashedVersion(); }, DOC(dai, DeviceBootloader, getFlashedVersion))
164165

165166
.def("isEmbeddedVersion", &DeviceBootloader::isEmbeddedVersion, DOC(dai, DeviceBootloader, isEmbeddedVersion))
166167
.def("getType", &DeviceBootloader::getType, DOC(dai, DeviceBootloader, getType))

0 commit comments

Comments
 (0)