diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/.env b/manufacturing-ai-suite/industrial-edge-insights-vision/.env index ff75e51da1..8526904cdc 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/.env +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/.env @@ -4,7 +4,7 @@ HOST_IP= REST_SERVER_PORT=8080 # DL Streamer Pipeline Server -DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 +DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 RTSP_CAMERA_IP= # MinIO service & client diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/.env_pallet-defect-detection b/manufacturing-ai-suite/industrial-edge-insights-vision/.env_pallet-defect-detection index ff75e51da1..8526904cdc 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/.env_pallet-defect-detection +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/.env_pallet-defect-detection @@ -4,7 +4,7 @@ HOST_IP= REST_SERVER_PORT=8080 # DL Streamer Pipeline Server -DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 +DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 RTSP_CAMERA_IP= # MinIO service & client diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/.env_pcb-anomaly-detection b/manufacturing-ai-suite/industrial-edge-insights-vision/.env_pcb-anomaly-detection index 5ab5da0eee..2337c52a05 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/.env_pcb-anomaly-detection +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/.env_pcb-anomaly-detection @@ -4,7 +4,7 @@ HOST_IP= REST_SERVER_PORT=8080 # DL Streamer Pipeline Server -DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 +DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 RTSP_CAMERA_IP= # MinIO service & client diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/.env_weld-porosity b/manufacturing-ai-suite/industrial-edge-insights-vision/.env_weld-porosity index e5aa84b3e0..62a7943c80 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/.env_weld-porosity +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/.env_weld-porosity @@ -4,7 +4,7 @@ HOST_IP= REST_SERVER_PORT=8080 # DL Streamer Pipeline Server -DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 +DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 RTSP_CAMERA_IP= # MinIO service & client diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/.env_worker-safety-gear-detection b/manufacturing-ai-suite/industrial-edge-insights-vision/.env_worker-safety-gear-detection index 31da364cae..c65c3267a1 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/.env_worker-safety-gear-detection +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/.env_worker-safety-gear-detection @@ -4,7 +4,7 @@ HOST_IP= REST_SERVER_PORT=8080 # DL Streamer Pipeline Server -DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 +DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 RTSP_CAMERA_IP= # MinIO service & client diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/setup.sh b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/setup.sh index 561c564bfb..81ba5d38de 100755 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/setup.sh +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/pallet-defect-detection/setup.sh @@ -3,7 +3,7 @@ # Download artifacts for a specific sample application # by calling respective app's setup.sh script SCRIPT_DIR=$(dirname $(readlink -f "$0")) -MODEL_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/1d40dfe1791d44e8cf6e8472c28c034e40fa508d/models/INT8/pallet_defect_detection.zip" +MODEL_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/06bb0d621cb14a1791672552a538beddddcc4066/models/INT8/pallet_defect_detection.zip" VIDEO_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/c13b8dbf23d514c2667d39b66615bd1400cb889d/videos/warehouse.avi" err() { diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/configs/pipeline-server-config.json b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/configs/pipeline-server-config.json index 5354510639..ff84648e83 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/configs/pipeline-server-config.json +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/configs/pipeline-server-config.json @@ -5,7 +5,7 @@ "name": "worker_safety_gear_detection_mlops", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "multifilesrc loop=TRUE location=/home/pipeline-server/resources/videos/Safety_Full_Hat_and_Vest.avi name=source ! h264parse ! decodebin3 ! gvadetect name=detection ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", + "pipeline": "multifilesrc loop=TRUE location=/home/pipeline-server/resources/videos/Safety_Full_Hat_and_Vest.avi name=source ! h264parse ! decodebin3 ! gvadetect name=detection threshold=0.4 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -23,7 +23,7 @@ "name": "worker_safety_gear_detection_mqtt", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvawatermark displ-cfg=font-scale=1.0,draw-txt-bg=false ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 threshold=0.4 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvawatermark displ-cfg=font-scale=1.0,draw-txt-bg=false ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -41,7 +41,7 @@ "name": "worker_safety_gear_detection_s3write", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 threshold=0.4 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -64,7 +64,7 @@ "name": "worker_safety_gear_detection_opcua", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvawatermark displ-cfg=font-scale=1.0,draw-txt-bg=false ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 threshold=0.4 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvawatermark displ-cfg=font-scale=1.0,draw-txt-bg=false ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -86,7 +86,7 @@ "name": "worker_safety_gear_detection_gpu", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=GPU model-instance-id=instgpu0 inference-region=full-frame inference-interval=1 batch-size=8 nireq=2 ie-config=\"GPU_THROUGHPUT_STREAMS=2\" threshold=0.7 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=GPU model-instance-id=instgpu0 inference-region=full-frame inference-interval=1 batch-size=8 nireq=2 ie-config=\"GPU_THROUGHPUT_STREAMS=2\" threshold=0.4 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -104,7 +104,7 @@ "name": "worker_safety_gear_detection_npu", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=NPU model-instance-id=instnpu0 inference-region=full-frame inference-interval=1 batch-size=1 nireq=4 threshold=0.7 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=NPU model-instance-id=instnpu0 inference-region=full-frame inference-interval=1 batch-size=1 nireq=4 threshold=0.4 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -122,7 +122,7 @@ "name": "worker_safety_gear_detection", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=CPU model-instance-id=inst0 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=CPU model-instance-id=inst0 threshold=0.4 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", "parameters": { "type": "object", "properties": { diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/setup.sh b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/setup.sh index a572ed64ff..3bd86851ce 100755 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/setup.sh +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/apps/worker-safety-gear-detection/setup.sh @@ -3,7 +3,7 @@ # Download artifacts for a specific sample application # by calling respective app's setup.sh script SCRIPT_DIR=$(dirname $(readlink -f "$0")) -MODEL_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/1d40dfe1791d44e8cf6e8472c28c034e40fa508d/models/INT8/worker-safety-gear-detection.zip" +MODEL_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/06bb0d621cb14a1791672552a538beddddcc4066/models/INT8/worker-safety-gear-detection.zip" VIDEO_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/edd25f37c324a9ef73df1642354b2ba5fa7b7df5/videos/Safety_Full_Hat_and_Vest.avi" err() { diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/get-started/deploy-multiple-instances-with-helm.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/get-started/deploy-multiple-instances-with-helm.md index 3ad584a7c1..3b51fd2e21 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/get-started/deploy-multiple-instances-with-helm.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/get-started/deploy-multiple-instances-with-helm.md @@ -58,6 +58,7 @@ MTX_WEBRTCICESERVERS2_0_USERNAME= # WebRTC credentials e.g. intel1234 MTX_WEBRTCICESERVERS2_0_PASSWORD= ``` + > **Note:** For GPU/NPU based pipelines, set `privileged_access_required: true` in the `helm/values_.yaml` file to enable access to host hardware devices. 4. Install pre-requisites for all instances @@ -806,7 +807,7 @@ Applications can take advantage of S3 publish feature from DL Streamer Pipeline >NOTE- For sake of simplicity, we assume that the new model has already been downloaded by Model Download microservice. The following curl command is only a simulation that just downloads the model. In production, however, they will be downloaded by the Model Download service. ```sh - export MODEL_URL='https://github.com/open-edge-platform/edge-ai-resources/raw/1d40dfe1791d44e8cf6e8472c28c034e40fa508d/models/INT8/pallet_defect_detection.zip' + export MODEL_URL='https://github.com/open-edge-platform/edge-ai-resources/raw/06bb0d621cb14a1791672552a538beddddcc4066/models/INT8/pallet_defect_detection.zip' curl -L "$MODEL_URL" -o "$(basename $MODEL_URL)" diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/get-started/deploy-with-helm.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/get-started/deploy-with-helm.md index 80fa00f5ec..b3ae0b9400 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/get-started/deploy-with-helm.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/get-started/deploy-with-helm.md @@ -23,6 +23,7 @@ ```sh cp helm/values_pallet-defect-detection.yaml helm/values.yaml ``` + > **Note:** For GPU/NPU based pipelines, set `privileged_access_required: true` in the `helm/values.yaml` file to enable access to host hardware devices. 3. Optional: Pull the helm chart and replace the existing helm folder with it @@ -32,13 +33,13 @@ - Download helm chart with the following command ```bash - helm pull oci://registry-1.docker.io/intel/pallet-defect-detection-reference-implementation --version 2.6.0-rc1 + helm pull oci://registry-1.docker.io/intel/pallet-defect-detection-reference-implementation --version 2.6.0-rc2 ``` - Unzip the package using the following command ```bash - tar -xvf pallet-defect-detection-reference-implementation-2.6.0-rc1.tgz + tar -xvf pallet-defect-detection-reference-implementation-2.6.0-rc2.tgz ``` - Replace the helm directory @@ -408,7 +409,7 @@ Applications can take advantage of S3 publish feature from DL Streamer Pipeline >NOTE- For sake of simplicity, we assume that the new model has already been downloaded by Model Download microservice. The following curl command is only a simulation that just downloads the model. In production, however, they will be downloaded by the Model Download service. ```sh - export MODEL_URL='https://github.com/open-edge-platform/edge-ai-resources/raw/1d40dfe1791d44e8cf6e8472c28c034e40fa508d/models/INT8/pallet_defect_detection.zip' + export MODEL_URL='https://github.com/open-edge-platform/edge-ai-resources/raw/06bb0d621cb14a1791672552a538beddddcc4066/models/INT8/pallet_defect_detection.zip' curl -L "$MODEL_URL" -o "$(basename $MODEL_URL)" diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides.md index c300fe1869..30e6f1f653 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides.md @@ -6,6 +6,7 @@ This section collects guides for Pallet Defect Detection sample application. - [Manage pipelines](./how-to-guides/manage-pipelines.md) - [Run multiple AI pipelines](./how-to-guides/run-multiple-ai-pipelines.md) - [Use GPU For Inference](./how-to-guides/use-gpu-for-inference.md) +- [Use NPU For Inference](./how-to-guides/use-npu-for-inference.md) - [Use Your AI Model and Video](./how-to-guides/use-your-ai-model-and-video.md) - [Change the Input Video Source](./how-to-guides/change-input-video-source.md) - [Scale Video Resolution](./how-to-guides/scale-video-resolution.md) @@ -27,6 +28,7 @@ This section collects guides for Pallet Defect Detection sample application. ./how-to-guides/manage-pipelines ./how-to-guides/run-multiple-ai-pipelines ./how-to-guides/use-gpu-for-inference +./how-to-guides/use-npu-for-inference ./how-to-guides/use-your-ai-model-and-video ./how-to-guides/change-input-video-source ./how-to-guides/scale-video-resolution diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/integrate-balluff-sdk.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/integrate-balluff-sdk.md index fbccd9ed3d..eecb0919f5 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/integrate-balluff-sdk.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/integrate-balluff-sdk.md @@ -24,7 +24,7 @@ cd edge-ai-libraries/microservices/dlstreamer-pipeline-server Create a Docker file named `BalluffDockerfile` inside your `dlstreamer-pipeline-server` directory with the following content. ```dockerfile -FROM intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 +FROM intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 USER root diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/integrate-pylon-sdk.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/integrate-pylon-sdk.md index 3588c044f6..f676f57a0b 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/integrate-pylon-sdk.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/integrate-pylon-sdk.md @@ -22,7 +22,7 @@ cd edge-ai-libraries/microservices/dlstreamer-pipeline-server Create a Docker file named `BaslerDockerfile` inside your `dlstreamer-pipeline-server` directory with the following content. ```dockerfile -FROM intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 +FROM intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 USER root diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/use-npu-for-inference.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/use-npu-for-inference.md new file mode 100644 index 0000000000..591e76ea97 --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pallet-defect-detection/how-to-guides/use-npu-for-inference.md @@ -0,0 +1,98 @@ +# How to use NPU for inference + +## Pre-requisites + +To take full advantage of hardware acceleration, pipelines can be designed so that different stages—such as decoding and inference—are executed on the most suitable hardware devices. + +Low-power accelerators like a Neural Processing Unit (NPU) can offload neural network computation from the CPU or GPU, enabling more efficient resource utilization and improved overall system performance. + +DLStreamer and the DLStreamer Pipeline Server support inference on NPU devices, allowing applications built on these frameworks to leverage NPU acceleration for improved efficiency and performance. + +Before running inference on an NPU, ensure that: +- The host system includes a supported NPU device +- The required NPU drivers are installed and properly configured + +For detailed setup instructions, refer to the [documentation](https://docs.openedgeplatform.intel.com/2026.0/edge-ai-libraries/dlstreamer/dev_guide/advanced_install/advanced_install_guide_prerequisites.html#optional-prerequisite-2-install-intel-npu-drivers). + + For containerized application, following additional changes are required. + +### Provide NPU access to the container + +This can be done by making the following changes to the docker compose file. + +```yaml +services: + dlstreamer-pipeline-server: + group_add: + # render group ID for ubuntu 22.04 host OS + - "110" + # render group ID for ubuntu 24.04 host OS + - "992" + devices: + # you can add specific devices in case you don't want to provide access to all like below. + - "/dev:/dev" +``` +The changes above adds the container user to the `render` group and provides access to the NPU devices. + +### Hardware specific encoder/decoders + +Unlike the changes done for the container above, the following requires a modification to the media pipeline itself. + +Gstreamer has a variety of hardware specific encoders and decoders elements such as Intel specific VA-API elements that you can benefit from by adding them into your media pipeline. Examples of such elements are `vah264dec`, `vah264enc`, `vajpegdec`, `vajpegdec`, etc. + +Additionally, one can also enforce zero-copy of buffers using GStreamer caps (capabilities) to the pipeline by adding `video/x-raw(memory: VAMemory)` for Intel NPUs. + +Read DL Streamer [docs](https://dlstreamer.github.io/dev_guide/gpu_device_selection.html) for more details. + +### NPU specific element properties + +DL Streamer inference elements also provides property such as `device=NPU` and `pre-process-backend=va` which should be used in pipelines with NPU memory. It performs mapping to the system memory and uses VA pre-processor. Read DL Streamer [docs](https://dlstreamer.github.io/dev_guide/model_preparation.html#model-pre-and-post-processing) for more. + +## Tutorial on how to use NPU specific pipelines + +> Note - This sample application already provides a default `docker-compose.yml` file that includes the necessary NPU access to the containers. + +The pipeline `pallet_defect_detection_npu` in `pipeline-server-config.json` contains NPU specific elements and uses NPU backend for inferencing. Follow the steps below to run the pipeline. + +### Steps + +1. Ensure that the sample application is up and running. If not, follow the steps [here](../get-started.md#set-up-the-application) to setup the application and then bring the services up + + >If you're running multiple instances of app, start the services using `./run.sh up` instead. + + ```sh + docker compose up -d + ``` +2. Start the pipeline. + ```sh + ./sample_start.sh -p pallet_defect_detection_npu + ``` + + This will start the pipeline. The inference stream can be viewed on WebRTC, in a browser, at the following url: + + >If you're running multiple instances of app, ensure to provide `NGINX_HTTPS_PORT` number in the url for the app instance i.e. replace with : + + ```bash + https:///mediamtx/pdd/ + ``` + +## Deploying with Helm + +### Intel GPU K8S Extension + +If you're deploying a NPU based pipeline (example: with VA elements like `vapostproc`, `vah264dec` etc., and/or with `device=NPU` in `gvadetect` in `dlstreamer_pipeline_server_config.json`) with Intel GPU k8s Extension, ensure to set the below details in the file `helm/values.yaml` appropriately in order to utilize the underlying NPU. + +```sh +gpu: + enabled: true + type: "gpu.intel.com/i915" + count: 1 +``` + +### Without Intel GPU K8S Extension + +If you're deploying a NPU based pipeline (example: with VA elements like `vapostproc`, `vah264dec` etc., and/or with `device=NPU` in `gvadetect` in `dlstreamer_pipeline_server_config.json`) without Intel GPU k8s Extension, ensure to set the below details in the file `helm/values.yaml` appropriately in order to utilize the underlying NPU. + +```sh +privileged_access_required: true +``` diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pcb-anomaly-detection/get-started/deploy-multiple-instances-with-helm.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pcb-anomaly-detection/get-started/deploy-multiple-instances-with-helm.md index 8c4d3f4184..94e3d0ad72 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pcb-anomaly-detection/get-started/deploy-multiple-instances-with-helm.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pcb-anomaly-detection/get-started/deploy-multiple-instances-with-helm.md @@ -58,6 +58,7 @@ MTX_WEBRTCICESERVERS2_0_USERNAME= # WebRTC credentials e.g. intel1234 MTX_WEBRTCICESERVERS2_0_PASSWORD= ``` + > **Note:** For GPU/NPU based pipelines, set `privileged_access_required: true` in the `helm/values_.yaml` file to enable access to host hardware devices. 4. Install pre-requisites for all instances diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pcb-anomaly-detection/get-started/deploy-with-helm.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pcb-anomaly-detection/get-started/deploy-with-helm.md index 9daab1e06b..fb989d1354 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pcb-anomaly-detection/get-started/deploy-with-helm.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/pcb-anomaly-detection/get-started/deploy-with-helm.md @@ -23,6 +23,7 @@ ```sh cp helm/values_pcb-anomaly-detection.yaml helm/values.yaml ``` + > **Note:** For GPU/NPU based pipelines, set `privileged_access_required: true` in the `helm/values.yaml` file to enable access to host hardware devices. 3. Optional: Pull the helm chart and replace the existing helm folder with it @@ -30,10 +31,10 @@ - Download helm chart with the following command - `helm pull oci://registry-1.docker.io/intel/pcb-anomaly-detection --version 1.2.0-rc1` + `helm pull oci://registry-1.docker.io/intel/pcb-anomaly-detection --version 1.2.0-rc2` - unzip the package using the following command - `tar -xvf pcb-anomaly-detection-1.2.0-rc1.tgz` + `tar -xvf pcb-anomaly-detection-1.2.0-rc2.tgz` - Replace the helm directory `rm -rf helm && mv pcb-anomaly-detection helm` diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/weld-porosity/get-started/deploy-multiple-instances-with-helm.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/weld-porosity/get-started/deploy-multiple-instances-with-helm.md index ba1732aedd..61d4b010fd 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/weld-porosity/get-started/deploy-multiple-instances-with-helm.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/weld-porosity/get-started/deploy-multiple-instances-with-helm.md @@ -58,6 +58,7 @@ MTX_WEBRTCICESERVERS2_0_USERNAME= # WebRTC credentials e.g. intel1234 MTX_WEBRTCICESERVERS2_0_PASSWORD= ``` + > **Note:** For GPU/NPU based pipelines, set `privileged_access_required: true` in the `helm/values_.yaml` file to enable access to host hardware devices. 4. Install pre-requisites for all instances diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/weld-porosity/get-started/deploy-with-helm.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/weld-porosity/get-started/deploy-with-helm.md index 698465ee71..cc2353c721 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/weld-porosity/get-started/deploy-with-helm.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/weld-porosity/get-started/deploy-with-helm.md @@ -23,16 +23,17 @@ ```sh cp helm/values_weld-porosity.yaml helm/values.yaml ``` + > **Note:** For GPU/NPU based pipelines, set `privileged_access_required: true` in the `helm/values.yaml` file to enable access to host hardware devices. 3. Optional: Pull the helm chart and replace the existing helm folder with it - Note: The helm chart should be downloaded when you are not using the helm chart provided in `edge-ai-suites/manufacturing-ai-suite/industrial-edge-insights-vision/helm` - Download helm chart with the following command - `helm pull oci://registry-1.docker.io/intel/weld-porosity-sample-application --version 1.4.0-rc1` + `helm pull oci://registry-1.docker.io/intel/weld-porosity-sample-application --version 1.4.0-rc2` - unzip the package using the following command - `tar -xvf weld-porosity-sample-application-1.4.0-rc1.tgz` + `tar -xvf weld-porosity-sample-application-1.4.0-rc2.tgz` - Replace the helm directory `rm -rf helm && mv weld-porosity-sample-application helm` diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/get-started/deploy-multiple-instances-with-helm.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/get-started/deploy-multiple-instances-with-helm.md index 913d970941..d352447086 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/get-started/deploy-multiple-instances-with-helm.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/get-started/deploy-multiple-instances-with-helm.md @@ -58,6 +58,7 @@ MTX_WEBRTCICESERVERS2_0_USERNAME= # WebRTC credentials e.g. intel1234 MTX_WEBRTCICESERVERS2_0_PASSWORD= ``` + > **Note:** For GPU/NPU based pipelines, set `privileged_access_required: true` in the `helm/values_.yaml` file to enable access to host hardware devices. 4. Install pre-requisites for all instances @@ -820,7 +821,7 @@ Applications can take advantage of S3 publish feature from DL Streamer Pipeline >NOTE- For sake of simplicity, we assume that the new model has already been downloaded by Model Download microservice. The following curl command is only a simulation that just downloads the model. In production, however, they will be downloaded by the Model Download service. ```sh - export MODEL_URL='https://github.com/open-edge-platform/edge-ai-resources/raw/1d40dfe1791d44e8cf6e8472c28c034e40fa508d/models/INT8/worker-safety-gear-detection.zip' + export MODEL_URL='https://github.com/open-edge-platform/edge-ai-resources/raw/06bb0d621cb14a1791672552a538beddddcc4066/models/INT8/worker-safety-gear-detection.zip' curl -L "$MODEL_URL" -o "$(basename $MODEL_URL)" diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/get-started/deploy-with-helm.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/get-started/deploy-with-helm.md index 8e02bce451..623708a75c 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/get-started/deploy-with-helm.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/get-started/deploy-with-helm.md @@ -21,15 +21,17 @@ ```sh cp helm/values_worker-safety-gear-detection.yaml helm/values.yaml ``` + > **Note:** For GPU/NPU based pipelines, set `privileged_access_required: true` in the `helm/values.yaml` file to enable access to host hardware devices. + 3. Optional: Pull the helm chart and replace the existing helm folder with it - Note: The helm chart should be downloaded when you are not using the helm chart provided in `edge-ai-suites/manufacturing-ai-suite/industrial-edge-insights-vision/helm` - Download helm chart with the following command - `helm pull oci://registry-1.docker.io/intel/worker-safety-gear-detection --version 1.2.0-rc1` + `helm pull oci://registry-1.docker.io/intel/worker-safety-gear-detection --version 1.2.0-rc2` - unzip the package using the following command - `tar -xvf worker-safety-gear-detection-1.2.0-rc1.tgz` + `tar -xvf worker-safety-gear-detection-1.2.0-rc2.tgz` - Replace the helm directory `rm -rf helm && mv worker-safety-gear-detection helm` @@ -392,7 +394,7 @@ Applications can take advantage of S3 publish feature from DL Streamer Pipeline >NOTE- For sake of simplicity, we assume that the new model has already been downloaded by Model Download microservice. The following curl command is only a simulation that just downloads the model. In production, however, they will be downloaded by the Model Download service. ```sh - export MODEL_URL='https://github.com/open-edge-platform/edge-ai-resources/raw/1d40dfe1791d44e8cf6e8472c28c034e40fa508d/models/INT8/worker-safety-gear-detection.zip' + export MODEL_URL='https://github.com/open-edge-platform/edge-ai-resources/raw/06bb0d621cb14a1791672552a538beddddcc4066/models/INT8/worker-safety-gear-detection.zip' curl -L "$MODEL_URL" -o "$(basename $MODEL_URL)" diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides.md index ae46490d3b..32e0333351 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides.md @@ -6,6 +6,7 @@ This section collects guides for Worker Safety Gear Detection sample application - [Manage pipelines](./how-to-guides/manage-pipelines.md) - [Run multiple AI pipelines](./how-to-guides/run-multiple-ai-pipelines.md) - [Use GPU For Inference](./how-to-guides/use-gpu-for-inference.md) +- [Use NPU For Inference](./how-to-guides/use-npu-for-inference.md) - [Use Your AI Model and Video](./how-to-guides/use-your-ai-model-and-video.md) - [Change the Input Video Source](./how-to-guides/change-input-video-source.md) - [Scale Video Resolution](./how-to-guides/scale-video-resolution.md) @@ -27,6 +28,7 @@ This section collects guides for Worker Safety Gear Detection sample application ./how-to-guides/manage-pipelines ./how-to-guides/run-multiple-ai-pipelines ./how-to-guides/use-gpu-for-inference +./how-to-guides/use-npu-for-inference ./how-to-guides/use-your-ai-model-and-video ./how-to-guides/change-input-video-source ./how-to-guides/scale-video-resolution diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/integrate-balluff-sdk.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/integrate-balluff-sdk.md index 2216adc6df..cf9a97e467 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/integrate-balluff-sdk.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/integrate-balluff-sdk.md @@ -29,7 +29,7 @@ cd edge-ai-libraries/microservices/dlstreamer-pipeline-server Create a Docker file named `BalluffDockerfile` inside your `dlstreamer-pipeline-server` directory with the following content. ```dockerfile -FROM intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 +FROM intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 USER root diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/integrate-pylon-sdk.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/integrate-pylon-sdk.md index ca428918a7..7c47e10557 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/integrate-pylon-sdk.md +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/integrate-pylon-sdk.md @@ -27,7 +27,7 @@ cd edge-ai-libraries/microservices/dlstreamer-pipeline-server Create a Docker file named `BaslerDockerfile` inside your `dlstreamer-pipeline-server` directory with the following content. ```dockerfile -FROM intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 +FROM intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 USER root diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/use-npu-for-inference.md b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/use-npu-for-inference.md new file mode 100644 index 0000000000..59962d714c --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/docs/user-guide/worker-safety-gear-detection/how-to-guides/use-npu-for-inference.md @@ -0,0 +1,98 @@ +# How to use NPU for inference + +## Pre-requisites + +To take full advantage of hardware acceleration, pipelines can be designed so that different stages—such as decoding and inference—are executed on the most suitable hardware devices. + +Low-power accelerators like a Neural Processing Unit (NPU) can offload neural network computation from the CPU or GPU, enabling more efficient resource utilization and improved overall system performance. + +DLStreamer and the DLStreamer Pipeline Server support inference on NPU devices, allowing applications built on these frameworks to leverage NPU acceleration for improved efficiency and performance. + +Before running inference on an NPU, ensure that: +- The host system includes a supported NPU device +- The required NPU drivers are installed and properly configured + +For detailed setup instructions, refer to the [documentation](https://docs.openedgeplatform.intel.com/2026.0/edge-ai-libraries/dlstreamer/dev_guide/advanced_install/advanced_install_guide_prerequisites.html#optional-prerequisite-2-install-intel-npu-drivers). + + For containerized application, following additional changes are required. + +### Provide NPU access to the container + +This can be done by making the following changes to the docker compose file. + +```yaml +services: + dlstreamer-pipeline-server: + group_add: + # render group ID for ubuntu 22.04 host OS + - "110" + # render group ID for ubuntu 24.04 host OS + - "992" + devices: + # you can add specific devices in case you don't want to provide access to all like below. + - "/dev:/dev" +``` +The changes above adds the container user to the `render` group and provides access to the NPU devices. + +### Hardware specific encoder/decoders + +Unlike the changes done for the container above, the following requires a modification to the media pipeline itself. + +Gstreamer has a variety of hardware specific encoders and decoders elements such as Intel specific VA-API elements that you can benefit from by adding them into your media pipeline. Examples of such elements are `vah264dec`, `vah264enc`, `vajpegdec`, `vajpegdec`, etc. + +Additionally, one can also enforce zero-copy of buffers using GStreamer caps (capabilities) to the pipeline by adding `video/x-raw(memory: VAMemory)` for Intel NPUs. + +Read DL Streamer [docs](https://dlstreamer.github.io/dev_guide/gpu_device_selection.html) for more details. + +### NPU specific element properties + +DL Streamer inference elements also provides property such as `device=NPU` and `pre-process-backend=va` which should be used in pipelines with NPU memory. It performs mapping to the system memory and uses VA pre-processor. Read DL Streamer [docs](https://dlstreamer.github.io/dev_guide/model_preparation.html#model-pre-and-post-processing) for more. + +## Tutorial on how to use NPU specific pipelines + +> Note - This sample application already provides a default `docker-compose.yml` file that includes the necessary NPU access to the containers. + +The pipeline `worker_safety_gear_detection_npu` in `pipeline-server-config.json` contains NPU specific elements and uses NPU backend for inferencing. Follow the steps below to run the pipeline. + +### Steps + +1. Ensure that the sample application is up and running. If not, follow the steps [here](../get-started.md#set-up-the-application) to setup the application and then bring the services up + + >If you're running multiple instances of app, start the services using `./run.sh up` instead. + + ```sh + docker compose up -d + ``` +2. Start the pipeline. + ```sh + ./sample_start.sh -p worker_safety_gear_detection_npu + ``` + + This will start the pipeline. The inference stream can be viewed on WebRTC, in a browser, at the following url: + + >If you're running multiple instances of app, ensure to provide `NGINX_HTTPS_PORT` number in the url for the app instance i.e. replace with : + + ```bash + https:///mediamtx/worker_safety/ + ``` + +## Deploying with Helm + +### Intel GPU K8S Extension + +If you're deploying a NPU based pipeline (example: with VA elements like `vapostproc`, `vah264dec` etc., and/or with `device=NPU` in `gvadetect` in `dlstreamer_pipeline_server_config.json`) with Intel GPU k8s Extension, ensure to set the below details in the file `helm/values.yaml` appropriately in order to utilize the underlying NPU. + +```sh +gpu: + enabled: true + type: "gpu.intel.com/i915" + count: 1 +``` + +### Without Intel GPU K8S Extension + +If you're deploying a NPU based pipeline (example: with VA elements like `vapostproc`, `vah264dec` etc., and/or with `device=NPU` in `gvadetect` in `dlstreamer_pipeline_server_config.json`) without Intel GPU k8s Extension, ensure to set the below details in the file `helm/values.yaml` appropriately in order to utilize the underlying NPU. + +```sh +privileged_access_required: true +``` diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-pallet-defect-detection.yaml b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-pallet-defect-detection.yaml index a163a9a6f9..6073d733e2 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-pallet-defect-detection.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-pallet-defect-detection.yaml @@ -17,10 +17,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 2.6.0-rc1 +version: 2.6.0-rc2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "2.6.0-rc1" \ No newline at end of file +appVersion: "2.6.0-rc2" \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-pcb-anomaly-detection.yaml b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-pcb-anomaly-detection.yaml index 5aee623440..7fa3d0fc4c 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-pcb-anomaly-detection.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-pcb-anomaly-detection.yaml @@ -17,10 +17,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.2.0-rc1 +version: 1.2.0-rc2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.2.0-rc1" \ No newline at end of file +appVersion: "1.2.0-rc2" \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-weld-porosity.yaml b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-weld-porosity.yaml index c90d14a1ee..70ceb4df79 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-weld-porosity.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-weld-porosity.yaml @@ -17,10 +17,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.4.0-rc1 +version: 1.4.0-rc2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.4.0-rc1" \ No newline at end of file +appVersion: "1.4.0-rc2" \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-worker-safety-gear-detection.yaml b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-worker-safety-gear-detection.yaml index 91d0cf7115..8b27d6b327 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-worker-safety-gear-detection.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart-worker-safety-gear-detection.yaml @@ -17,10 +17,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.2.0-rc1 +version: 1.2.0-rc2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "1.2.0-rc1" \ No newline at end of file +appVersion: "1.2.0-rc2" \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart.yaml b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart.yaml index a163a9a6f9..6073d733e2 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/Chart.yaml @@ -17,10 +17,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 2.6.0-rc1 +version: 2.6.0-rc2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "2.6.0-rc1" \ No newline at end of file +appVersion: "2.6.0-rc2" \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/pallet-defect-detection/setup.sh b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/pallet-defect-detection/setup.sh index c8599133a7..b18dec7739 100755 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/pallet-defect-detection/setup.sh +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/pallet-defect-detection/setup.sh @@ -3,7 +3,7 @@ # Download artifacts for a specific sample application # by calling respective app's setup.sh script SCRIPT_DIR=$(dirname $(readlink -f "$0")) -MODEL_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/1d40dfe1791d44e8cf6e8472c28c034e40fa508d/models/INT8/pallet_defect_detection.zip" +MODEL_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/06bb0d621cb14a1791672552a538beddddcc4066/models/INT8/pallet_defect_detection.zip" VIDEO_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/c13b8dbf23d514c2667d39b66615bd1400cb889d/videos/warehouse.avi" err() { diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/worker-safety-gear-detection/pipeline-server-config.json b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/worker-safety-gear-detection/pipeline-server-config.json index 5354510639..ff84648e83 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/worker-safety-gear-detection/pipeline-server-config.json +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/worker-safety-gear-detection/pipeline-server-config.json @@ -5,7 +5,7 @@ "name": "worker_safety_gear_detection_mlops", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "multifilesrc loop=TRUE location=/home/pipeline-server/resources/videos/Safety_Full_Hat_and_Vest.avi name=source ! h264parse ! decodebin3 ! gvadetect name=detection ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", + "pipeline": "multifilesrc loop=TRUE location=/home/pipeline-server/resources/videos/Safety_Full_Hat_and_Vest.avi name=source ! h264parse ! decodebin3 ! gvadetect name=detection threshold=0.4 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -23,7 +23,7 @@ "name": "worker_safety_gear_detection_mqtt", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvawatermark displ-cfg=font-scale=1.0,draw-txt-bg=false ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 threshold=0.4 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvawatermark displ-cfg=font-scale=1.0,draw-txt-bg=false ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -41,7 +41,7 @@ "name": "worker_safety_gear_detection_s3write", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 threshold=0.4 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -64,7 +64,7 @@ "name": "worker_safety_gear_detection_opcua", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvawatermark displ-cfg=font-scale=1.0,draw-txt-bg=false ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect name=detection model-instance-id=inst0 threshold=0.4 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvawatermark displ-cfg=font-scale=1.0,draw-txt-bg=false ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -86,7 +86,7 @@ "name": "worker_safety_gear_detection_gpu", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=GPU model-instance-id=instgpu0 inference-region=full-frame inference-interval=1 batch-size=8 nireq=2 ie-config=\"GPU_THROUGHPUT_STREAMS=2\" threshold=0.7 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=GPU model-instance-id=instgpu0 inference-region=full-frame inference-interval=1 batch-size=8 nireq=2 ie-config=\"GPU_THROUGHPUT_STREAMS=2\" threshold=0.4 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -104,7 +104,7 @@ "name": "worker_safety_gear_detection_npu", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=NPU model-instance-id=instnpu0 inference-region=full-frame inference-interval=1 batch-size=1 nireq=4 threshold=0.7 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=NPU model-instance-id=instnpu0 inference-region=full-frame inference-interval=1 batch-size=1 nireq=4 threshold=0.4 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", "parameters": { "type": "object", "properties": { @@ -122,7 +122,7 @@ "name": "worker_safety_gear_detection", "source": "gstreamer", "queue_maxsize": 50, - "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=CPU model-instance-id=inst0 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", + "pipeline": "{auto_source} name=source ! decodebin3 ! gvadetect device=CPU model-instance-id=inst0 threshold=0.4 name=detection ! queue ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvafpscounter ! appsink name=destination", "parameters": { "type": "object", "properties": { diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/worker-safety-gear-detection/setup.sh b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/worker-safety-gear-detection/setup.sh index a404d96043..8b60e4ed3a 100755 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/worker-safety-gear-detection/setup.sh +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/apps/worker-safety-gear-detection/setup.sh @@ -3,7 +3,7 @@ # Download artifacts for a specific sample application # by calling respective app's setup.sh script SCRIPT_DIR=$(dirname $(readlink -f "$0")) -MODEL_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/1d40dfe1791d44e8cf6e8472c28c034e40fa508d/models/INT8/worker-safety-gear-detection.zip" +MODEL_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/06bb0d621cb14a1791672552a538beddddcc4066/models/INT8/worker-safety-gear-detection.zip" VIDEO_URL="https://github.com/open-edge-platform/edge-ai-resources/raw/edd25f37c324a9ef73df1642354b2ba5fa7b7df5/videos/Safety_Full_Hat_and_Vest.avi" err() { diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values.yaml b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values.yaml index b115060230..2fb7f586a1 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values.yaml @@ -42,7 +42,7 @@ webrtcturnserver: password: images: minio: minio/minio:RELEASE.2020-12-12T08-39-07Z - dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 + dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 nginx: nginx:1.27-alpine mqtt_broker: eclipse-mosquitto:latest config: diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_pallet-defect-detection.yaml b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_pallet-defect-detection.yaml index 480456e789..897a1b0ea6 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_pallet-defect-detection.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_pallet-defect-detection.yaml @@ -42,7 +42,7 @@ webrtcturnserver: password: images: minio: minio/minio:RELEASE.2020-12-12T08-39-07Z - dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 + dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 nginx: nginx:1.27-alpine mqtt_broker: eclipse-mosquitto:latest config: diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_pcb-anomaly-detection.yaml b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_pcb-anomaly-detection.yaml index 3cb578a1c1..cec9d32f6c 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_pcb-anomaly-detection.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_pcb-anomaly-detection.yaml @@ -42,7 +42,7 @@ webrtcturnserver: password: images: minio: minio/minio:RELEASE.2020-12-12T08-39-07Z - dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 + dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 nginx: nginx:1.27-alpine mqtt_broker: eclipse-mosquitto:latest config: diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_weld-porosity.yaml b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_weld-porosity.yaml index 71f5138be8..39d8e78fa8 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_weld-porosity.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_weld-porosity.yaml @@ -42,7 +42,7 @@ webrtcturnserver: password: images: minio: minio/minio:RELEASE.2020-12-12T08-39-07Z - dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 + dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 nginx: nginx:1.27-alpine mqtt_broker: eclipse-mosquitto:latest config: diff --git a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_worker-safety-gear-detection.yaml b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_worker-safety-gear-detection.yaml index 879e8e159b..1816838e55 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_worker-safety-gear-detection.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-vision/helm/values_worker-safety-gear-detection.yaml @@ -42,7 +42,7 @@ webrtcturnserver: password: images: minio: minio/minio:RELEASE.2020-12-12T08-39-07Z - dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 + dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 nginx: nginx:1.27-alpine mqtt_broker: eclipse-mosquitto:latest config: diff --git a/metro-ai-suite/image-based-video-search/.env b/metro-ai-suite/image-based-video-search/.env index 9d6a80470a..ee8054516b 100644 --- a/metro-ai-suite/image-based-video-search/.env +++ b/metro-ai-suite/image-based-video-search/.env @@ -1,4 +1,4 @@ # General DOCKER_REGISTRY= # DL Streamer Pipeline Server -DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 \ No newline at end of file +DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 \ No newline at end of file diff --git a/metro-ai-suite/image-based-video-search/chart/Chart.yaml b/metro-ai-suite/image-based-video-search/chart/Chart.yaml index a2f0372231..fd803121fb 100644 --- a/metro-ai-suite/image-based-video-search/chart/Chart.yaml +++ b/metro-ai-suite/image-based-video-search/chart/Chart.yaml @@ -10,9 +10,9 @@ name: image-based-video-search apiVersion: v2 # Version of the chart (required) -version: 1.2.0-rc1 +version: 1.2.0-rc2 # Version of the application (required). # This should be the main application version. -appVersion: 1.2.0-rc1 +appVersion: 1.2.0-rc2 description: A Helm chart for ibvs diff --git a/metro-ai-suite/image-based-video-search/chart/templates/dlstreamer-pipeline-server/deployment.yaml b/metro-ai-suite/image-based-video-search/chart/templates/dlstreamer-pipeline-server/deployment.yaml index f4db10a987..63ec3f0641 100644 --- a/metro-ai-suite/image-based-video-search/chart/templates/dlstreamer-pipeline-server/deployment.yaml +++ b/metro-ai-suite/image-based-video-search/chart/templates/dlstreamer-pipeline-server/deployment.yaml @@ -58,7 +58,7 @@ spec: - name: '{{ include "image_based_video_search.fullname" . }}-dlstreamer-pipeline-server-models' mountPath: /output - name: model-downloader - image: intel/dlstreamer:2026.0.0-ubuntu24-rc1 + image: intel/dlstreamer:2026.0.0-ubuntu24-rc2 securityContext: allowPrivilegeEscalation: false runAsNonRoot: true diff --git a/metro-ai-suite/image-based-video-search/chart/values.yaml b/metro-ai-suite/image-based-video-search/chart/values.yaml index 5ae933b74a..51fe18bf3b 100644 --- a/metro-ai-suite/image-based-video-search/chart/values.yaml +++ b/metro-ai-suite/image-based-video-search/chart/values.yaml @@ -89,7 +89,7 @@ dlstreamerpipelineserver: # key: dlstreamerpipelineserver.repository.image image: docker.io/intel/dlstreamer-pipeline-server # key: dlstreamerpipelineserver.repository.tag - tag: 2026.0.0-ubuntu24-rc1 + tag: 2026.0.0-ubuntu24-rc2 # key: dlstreamerpipelineserver.replicas replicas: 1 # key: dlstreamerpipelineserver.nodeSelector @@ -263,7 +263,7 @@ streaming: # key: streaming.repository.image image: docker.io/intel/streaming-pipeline # key: streaming.repository.tag - tag: v1.2.0-rc1 + tag: v1.2.0-rc2 # key: streaming.replicas replicas: 1 # key: streaming.nodeSelector @@ -306,7 +306,7 @@ app: # key: app.repository.image image: docker.io/intel/image-based-video-search # key: app.repository.tag - tag: v1.2.0-rc1 + tag: v1.2.0-rc2 # key: app.replicas replicas: 1 # key: app.nodeSelector @@ -363,7 +363,7 @@ featurematching: # key: featurematching.repository.image image: docker.io/intel/feature-matching # key: featurematching.repository.tag - tag: v1.2.0-rc1 + tag: v1.2.0-rc2 # key: featurematching.replicas replicas: 1 # key: featurematching.nodeSelector diff --git a/metro-ai-suite/image-based-video-search/compose.yml b/metro-ai-suite/image-based-video-search/compose.yml index 17ec763839..bc6717258f 100644 --- a/metro-ai-suite/image-based-video-search/compose.yml +++ b/metro-ai-suite/image-based-video-search/compose.yml @@ -46,7 +46,7 @@ services: condition: service_healthy feature-matching: - image: ${DOCKER_REGISTRY}intel/feature-matching:v1.2.0-rc1 + image: ${DOCKER_REGISTRY}intel/feature-matching:v1.2.0-rc2 container_name: ibvs-featurematching build: context: src/feature-matching @@ -81,7 +81,7 @@ services: start_period: 10s restart: on-failure:5 streaming-pipeline: - image: ${DOCKER_REGISTRY}intel/streaming-pipeline:v1.2.0-rc1 + image: ${DOCKER_REGISTRY}intel/streaming-pipeline:v1.2.0-rc2 build: context: src/streaming-pipeline dockerfile: Dockerfile @@ -202,7 +202,7 @@ services: condition: service_started restart: on-failure:5 app: - image: ${DOCKER_REGISTRY}intel/image-based-video-search:v1.2.0-rc1 + image: ${DOCKER_REGISTRY}intel/image-based-video-search:v1.2.0-rc2 container_name: ibvs-app build: context: src/app diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/get-started.md b/metro-ai-suite/image-based-video-search/docs/user-guide/get-started.md index 6a2b37e083..ee501a8f0c 100644 --- a/metro-ai-suite/image-based-video-search/docs/user-guide/get-started.md +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/get-started.md @@ -65,7 +65,7 @@ By following this guide, you will learn how to: docker run --rm --user=root \ -e http_proxy -e https_proxy -e no_proxy \ -v "$MODELS_PATH:/output" \ - intel/dlstreamer:2026.0.0-ubuntu24-rc1 bash -c "$(cat < **Note:** Users can also create apps tailored to their use case using models supported by DLStreamer. +Check [the list of supported models](https://docs.openedgeplatform.intel.com/2026.0/edge-ai-libraries/dlstreamer/supported_models.html) for the latest information. + ## Software Requirements **Required Software**: diff --git a/metro-ai-suite/image-based-video-search/src/streaming-pipeline/Dockerfile b/metro-ai-suite/image-based-video-search/src/streaming-pipeline/Dockerfile index abd36000af..a0208b848e 100644 --- a/metro-ai-suite/image-based-video-search/src/streaming-pipeline/Dockerfile +++ b/metro-ai-suite/image-based-video-search/src/streaming-pipeline/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/intel/dlstreamer:2026.0.0-ubuntu24-rc1 +FROM docker.io/intel/dlstreamer:2026.0.0-ubuntu24-rc2 USER root # Pull Debian security fixes (addresses libpng/libpq/linux-libc-dev CVEs when available in node security) diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/.env b/metro-ai-suite/metro-vision-ai-app-recipe/.env index 90533b4aca..ef42b14319 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/.env +++ b/metro-ai-suite/metro-vision-ai-app-recipe/.env @@ -10,7 +10,7 @@ HOST_IP=0.0.0.0 # DL Streamer Pipeline Server -DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc1 +DLSTREAMER_PIPELINE_SERVER_IMAGE=intel/dlstreamer-pipeline-server:2026.0.0-ubuntu24-rc2 # This variable identifies the name of the application to be used. # It should match the name of the application directory in the metro-ai-suite. diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/docs/user-guide/get-started/deploy-with-helm.md b/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/docs/user-guide/get-started/deploy-with-helm.md index dd65c881f1..6939578ec1 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/docs/user-guide/get-started/deploy-with-helm.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/docs/user-guide/get-started/deploy-with-helm.md @@ -51,10 +51,10 @@ Optional: Pull the helm chart and replace the existing helm-chart folder with it cd loitering-detection #Download helm chart with the following command -helm pull oci://registry-1.docker.io/intel/loitering-detection --version 1.4.0-rc1 +helm pull oci://registry-1.docker.io/intel/loitering-detection --version 1.4.0-rc2 #unzip the package using the following command -tar -xvf loitering-detection-1.4.0-rc1.tgz +tar -xvf loitering-detection-1.4.0-rc2.tgz #Replace the helm directory rm -rf helm-chart && mv loitering-detection helm-chart diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/docs/user-guide/get-started/system-requirements.md b/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/docs/user-guide/get-started/system-requirements.md index 043e9f44c0..4c0c72bec9 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/docs/user-guide/get-started/system-requirements.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/docs/user-guide/get-started/system-requirements.md @@ -20,6 +20,17 @@ and run the application efficiently. | **Disk Space** | 128 GB SSD | 256 GB SSD | | **GPU/Accelerator** | Integrated GPU | Integrated/Discrete GPU | +### Validated Platforms +The pallet defect detection model for this sample app has been tested to work on the following platforms/XPU(s) + +| Product / Family | CPU | iGPU | NPU | dGPU | +|----------------------|-----------|------------|-----------|----------| +| Intel® Core™ Ultra Processors (Series 3, 2, 1), Intel® Core™ Processors Series 2, Intel® Core™ Processors (14th/13th/12th Gen) | ✓ | ✓ | ✓ | Intel(R) Arc(TM) A770, B580 | +| 4th Gen Intel® Xeon® Scalable Processors | ✓ | | | Intel(R) Arc(TM) A770, B580 | + +> **Note:** Users can also create apps tailored to their use case using models supported by DLStreamer. +Check [the list of supported models](https://docs.openedgeplatform.intel.com/2026.0/edge-ai-libraries/dlstreamer/supported_models.html) for the latest information. + ## Software Requirements **Required Software**: diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/Chart.yaml b/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/Chart.yaml index 72c082b7a7..e9d6d39bec 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/Chart.yaml +++ b/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -appVersion: 1.4.0-rc1 +appVersion: 1.4.0-rc2 description: A Helm chart for Loitering Detection Sample Application name: loitering-detection type: application -version: 1.4.0-rc1 +version: 1.4.0-rc2 diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/templates/dlstreamer-pipeline-server.yaml b/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/templates/dlstreamer-pipeline-server.yaml index bdc77ceb48..e6f7109398 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/templates/dlstreamer-pipeline-server.yaml +++ b/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/templates/dlstreamer-pipeline-server.yaml @@ -71,7 +71,7 @@ spec: - name: videos-volume mountPath: /tmp/videos - name: model-downloader - image: intel/dlstreamer:2026.0.0-ubuntu24-rc1 + image: intel/dlstreamer:2026.0.0-ubuntu24-rc2 securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/values.yaml b/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/values.yaml index ead4d58bdf..3873afca7d 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/values.yaml +++ b/metro-ai-suite/metro-vision-ai-app-recipe/loitering-detection/helm-chart/values.yaml @@ -16,7 +16,7 @@ mediamtx: imageTag: 1.11.3 dlstreamer_pipeline_server: image: intel/dlstreamer-pipeline-server - imageTag: 2026.0.0-ubuntu24-rc1 + imageTag: 2026.0.0-ubuntu24-rc2 grafana: image: grafana/grafana imageTag: 11.5.4 diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/chart/Chart.yaml b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/chart/Chart.yaml index 953527374e..44b2489dfe 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/chart/Chart.yaml +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/chart/Chart.yaml @@ -4,5 +4,5 @@ description: A Helm chart for Smart Intersection application type: application -version: 1.18.0-rc1 -appVersion: "1.18.0-rc1" \ No newline at end of file +version: 1.18.0-rc2 +appVersion: "1.18.0-rc2" \ No newline at end of file diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started/deploy-with-helm.md b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started/deploy-with-helm.md index 7241063b1d..b7764c1b69 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started/deploy-with-helm.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started/deploy-with-helm.md @@ -47,10 +47,10 @@ cd edge-ai-suites/metro-ai-suite/metro-vision-ai-app-recipe/ cd smart-intersection # Download helm chart with the following command -helm pull oci://registry-1.docker.io/intel/smart-intersection --version 1.18.0-rc1 +helm pull oci://registry-1.docker.io/intel/smart-intersection --version 1.18.0-rc2 # unzip the package using the following command -tar -xvf smart-intersection-1.18.0-rc1.tgz +tar -xvf smart-intersection-1.18.0-rc2.tgz # Replace the helm directory rm -rf chart && mv smart-intersection chart diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/docs/user-guide/get-started/deploy-with-helm.md b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/docs/user-guide/get-started/deploy-with-helm.md index f92b564d4a..a960b4d76e 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/docs/user-guide/get-started/deploy-with-helm.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/docs/user-guide/get-started/deploy-with-helm.md @@ -45,10 +45,10 @@ Optional: Pull the Helm chart and replace the existing `helm-chart` folder with cd smart-parking #Download helm chart with the following command -helm pull oci://registry-1.docker.io/intel/smart-parking --version 1.4.0-rc1 +helm pull oci://registry-1.docker.io/intel/smart-parking --version 1.4.0-rc2 #unzip the package using the following command -tar -xvf smart-parking-1.4.0-rc1.tgz +tar -xvf smart-parking-1.4.0-rc2.tgz #Replace the helm directory rm -rf helm-chart && mv smart-parking helm-chart diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/docs/user-guide/get-started/system-requirements.md b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/docs/user-guide/get-started/system-requirements.md index 96a6a4b428..aa1dd412b8 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/docs/user-guide/get-started/system-requirements.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/docs/user-guide/get-started/system-requirements.md @@ -19,6 +19,17 @@ This section provides detailed hardware, software, and platform requirements to | **Disk Space** | 128 GB SSD | 256 GB SSD | | **GPU/Accelerator** | Integrated GPU | Integrated/Discrete GPU | +### Validated Platforms +The pallet defect detection model for this sample app has been tested to work on the following platforms/XPU(s) + +| Product / Family | CPU | iGPU | NPU | dGPU | +|----------------------|-----------|------------|-----------|----------| +| Intel® Core™ Ultra Processors (Series 3, 2, 1), Intel® Core™ Processors Series 2, Intel® Core™ Processors (14th/13th/12th Gen) | ✓ | ✓ | ✓ | Intel(R) Arc(TM) A770, B580 | +| 4th Gen Intel® Xeon® Scalable Processors | ✓ | | | Intel(R) Arc(TM) A770, B580 | + +> **Note:** Users can also create apps tailored to their use case using models supported by DLStreamer. +Check [the list of supported models](https://docs.openedgeplatform.intel.com/2026.0/edge-ai-libraries/dlstreamer/supported_models.html) for the latest information. + ## Software Requirements **Required Software**: diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/Chart.yaml b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/Chart.yaml index 5278e721c0..ebca01e041 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/Chart.yaml +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -appVersion: 1.4.0-rc1 +appVersion: 1.4.0-rc2 description: A Helm chart for Smart Parking Sample Application name: smart-parking type: application -version: 1.4.0-rc1 +version: 1.4.0-rc2 diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/templates/dlstreamer-pipeline-server.yaml b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/templates/dlstreamer-pipeline-server.yaml index cadf35bca3..97b43d01de 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/templates/dlstreamer-pipeline-server.yaml +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/templates/dlstreamer-pipeline-server.yaml @@ -71,7 +71,7 @@ spec: - name: videos-volume mountPath: /tmp/videos - name: model-downloader - image: intel/dlstreamer:2026.0.0-ubuntu24-rc1 + image: intel/dlstreamer:2026.0.0-ubuntu24-rc2 securityContext: allowPrivilegeEscalation: false runAsNonRoot: true diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/values.yaml b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/values.yaml index bd0517bf94..4be19e0af2 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/values.yaml +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/helm-chart/values.yaml @@ -24,7 +24,7 @@ mediamtx: imageTag: 1.11.3 dlstreamer_pipeline_server: image: intel/dlstreamer-pipeline-server - imageTag: 2026.0.0-ubuntu24-rc1 + imageTag: 2026.0.0-ubuntu24-rc2 grafana: image: grafana/grafana imageTag: 11.5.4 diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/install.sh b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/install.sh index 6e3accf05c..b168f95273 100755 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/install.sh +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/install.sh @@ -3,7 +3,7 @@ docker run --rm --user=root \ -e http_proxy -e https_proxy -e no_proxy \ -v "$(dirname "$(readlink -f "$0")"):/opt/project" \ - intel/dlstreamer:2026.0.0-ubuntu24-rc1 bash -c "$(cat <