diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1142805a20..1088d135a1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -20,17 +20,6 @@ README.md @xwu2intel /education-ai-suite/smart-classroom/docs/ @avinash-palleti @unarayan @qianlongding @yunowo @BaoHuiling @xiaojingrich @open-edge-platform/open-edge-platform-docs-write /education-ai-suite/smart-classroom/README.md @avinash-palleti @unarayan @qianlongding @yunowo @BaoHuiling @xiaojingrich @open-edge-platform/open-edge-platform-docs-write -# ============================================================================ -# Health And Life Sciences AI Suite -# ============================================================================ - -# @xwu2intel added to keep the top ownership at the early development stage and the three-owner rule - remove once more owners are assigned -/health-and-life-sciences-ai-suite/ @sachinkaushik @avinash-palleti @xwu2intel -/health-and-life-sciences-ai-suite/README.md @sachinkaushik @avinash-palleti @xwu2intel @open-edge-platform/open-edge-platform-docs-write - -/health-and-life-sciences-ai-suite/**/docs/ @sachinkaushik @avinash-palleti @xwu2intel @open-edge-platform/open-edge-platform-docs-write -/health-and-life-sciences-ai-suite/**/README.md @sachinkaushik @avinash-palleti @xwu2intel @open-edge-platform/open-edge-platform-docs-write - # ============================================================================ # Manufacturing-AI Suite # ============================================================================ @@ -70,13 +59,8 @@ README.md @xwu2intel /metro-ai-suite/interactive-digital-avatar/docs/ @senhui2intel @Junyu-B @wzq112358 @myqi @xwu2intel @open-edge-platform/open-edge-platform-docs-write /metro-ai-suite/interactive-digital-avatar/README.md @senhui2intel @Junyu-B @wzq112358 @myqi @xwu2intel @open-edge-platform/open-edge-platform-docs-write -/metro-ai-suite/live-video-analysis/ @bharagha @yogeshmpandey @hteeyeoh -/metro-ai-suite/live-video-analysis/**/docs/ @bharagha @yogeshmpandey @hteeyeoh @open-edge-platform/open-edge-platform-docs-write -/metro-ai-suite/live-video-analysis/**/README.md @bharagha @yogeshmpandey @hteeyeoh @open-edge-platform/open-edge-platform-docs-write - # /metro-ai-suite/metro-sdk-manager/ no codeowners, the top codeowners apply -# inside vision ai recipe /metro-ai-suite/metro-vision-ai-app-recipe/docs/ @rrajore @vagheshp @xwu2intel @ajagadi1 @ArokiEdgard @open-edge-platform/open-edge-platform-docs-write /metro-ai-suite/metro-vision-ai-app-recipe/README.md @rrajore @vagheshp @xwu2intel @ajagadi1 @ArokiEdgard @open-edge-platform/open-edge-platform-docs-write @@ -91,11 +75,6 @@ README.md @xwu2intel /metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/ @rrajore @vagheshp @xwu2intel @ajagadi1 @tjanczak /metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/docs/ @rrajore @vagheshp @xwu2intel @ajagadi1 @tjanczak @open-edge-platform/open-edge-platform-docs-write /metro-ai-suite/metro-vision-ai-app-recipe/smart-parking/README.md @rrajore @vagheshp @xwu2intel @ajagadi1 @tjanczak @open-edge-platform/open-edge-platform-docs-write -# this may need to be confirmed: -/metro-ai-suite/metro-vision-ai-app-recipe/smart-tolling/ @rrajore @vagheshp @xwu2intel @ajagadi1 -/metro-ai-suite/metro-vision-ai-app-recipe/smart-tolling/docs/ @rrajore @vagheshp @xwu2intel @ajagadi1 @open-edge-platform/open-edge-platform-docs-write -/metro-ai-suite/metro-vision-ai-app-recipe/smart-tolling/README.md @rrajore @vagheshp @xwu2intel @ajagadi1 @open-edge-platform/open-edge-platform-docs-write - /metro-ai-suite/sensor-fusion-for-traffic-management/ @lijiunderstand @chaofanchen-intel @thegreatchaos @xwu2intel /metro-ai-suite/sensor-fusion-for-traffic-management/docs/ @lijiunderstand @chaofanchen-intel @thegreatchaos @xwu2intel @open-edge-platform/open-edge-platform-docs-write diff --git a/education-ai-suite/smart-classroom/ui/src/i18n/zh.json b/education-ai-suite/smart-classroom/ui/src/i18n/zh.json index d2a6971206..21bfe66199 100644 --- a/education-ai-suite/smart-classroom/ui/src/i18n/zh.json +++ b/education-ai-suite/smart-classroom/ui/src/i18n/zh.json @@ -16,7 +16,7 @@ "uploading": "正在上传转录…", "generatingSummary": "正在生成摘要", "generatingMindmap": "正在生成思维导图", - "streamingSummary": "正在传输摘要", + "streamingSummary": "正在传输摘要…", "loadingTranscript": "正在加载转录", "analyzingAudio": "正在分析音频", "summaryReady": "摘要准备就绪", @@ -162,8 +162,8 @@ }, "classStatistics": { "studentCount": "学生人数", - "standCount": "累计站立次数", - "raiseUpCount": "累计举手次数", + "standCount": "汇总的样地数量", + "raiseUpCount": "汇总举手次数", "standReIdData": "站立 ReID 数据", "studentId": "学生 ID", "count": "次数" diff --git a/manufacturing-ai-suite/hmi-augmented-worker/README.md b/manufacturing-ai-suite/hmi-augmented-worker/README.md index f5d2f88909..cb6e4d5cac 100644 --- a/manufacturing-ai-suite/hmi-augmented-worker/README.md +++ b/manufacturing-ai-suite/hmi-augmented-worker/README.md @@ -16,7 +16,7 @@ The `HMI Augmented Worker` sample application show cases how RAG pipelines can b - [System Requirements](./docs/user-guide/get-started/system-requirements.md): Requirements include hardware and software to deploy the sample application. - **Advanced** - - [Build From Source](./docs/user-guide/get-started/build-from-source.md): Guide to build the file watcher service on Windows® OS and how it can be interfaced with RAG pipeline that executes on the Ubuntu or Edge Microvisor Toolkit side. + - [Build From Source](./docs/user-guide/how-to-build-from-source.md): Guide to build the file watcher service on Windows® OS and how it can be interfaced with RAG pipeline that executes on the Ubuntu or Edge Microvisor Toolkit side. - **Release Notes** - [Release Notes](./docs/user-guide/release-notes.md): Notes on the latest releases, updates, improvements, and bug fixes. diff --git a/manufacturing-ai-suite/hmi-augmented-worker/docs/toc.rst b/manufacturing-ai-suite/hmi-augmented-worker/docs/toc.rst new file mode 100644 index 0000000000..a8fb32670f --- /dev/null +++ b/manufacturing-ai-suite/hmi-augmented-worker/docs/toc.rst @@ -0,0 +1,3 @@ +.. toctree:: + + user-guide/index diff --git a/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/get-started.md b/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/get-started.md index 451978116d..e008e836ff 100644 --- a/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/get-started.md +++ b/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/get-started.md @@ -60,7 +60,7 @@ The File Watcher service will need to be built and deployed. There is no pre-bui provided currently. It runs in the Windows® VM and is responsible for watching the configured folder for new contents. The new contents are added to the RAG context database which is then used by the RAG pipeline when responding to user queries. Refer to -[Build File Watcher Service from Source](./get-started/build-from-source.md#build-file-watcher-service-from-source) to compile the file watcher service executable +[Build File Watcher Service from Source](./how-to-build-from-source.md#build-file-watcher-service-from-source) to compile the file watcher service executable binary from source. In addition to the File Watcher service, the WebUI interface to access the RAG functionality @@ -88,7 +88,7 @@ To use the application effectively, make sure that all the steps mentioned in th The File Watcher service must be compiled from source and deployed on the Windows® VM. It monitors a configured folder for new files and updates the RAG context database accordingly. - Refer to [Build File Watcher Service from Source](./get-started/build-from-source.md) for build + Refer to [Build File Watcher Service from Source](./how-to-build-from-source.md) for build instructions. Start the service on the Windows® VM after deployment. - Access the WebUI @@ -110,7 +110,7 @@ To use the application effectively, make sure that all the steps mentioned in th ## Advanced Setup -- [How to Build from Source and Deploy](./get-started/build-from-source.md): Guide to build the +- [How to Build from Source and Deploy](./how-to-build-from-source.md): Guide to build the sample application services from source and docker compose deployment ## Other Documentation @@ -125,8 +125,7 @@ To use the application effectively, make sure that all the steps mentioned in th :::{toctree} :hidden: -./get-started/system-requirements -./get-started/build-from-source +get-started/system-requirements ::: hide_directive--> diff --git a/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/get-started/build-from-source.md b/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/how-to-build-from-source.md similarity index 92% rename from manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/get-started/build-from-source.md rename to manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/how-to-build-from-source.md index c92fdb1af9..8b7b363196 100644 --- a/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/get-started/build-from-source.md +++ b/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/how-to-build-from-source.md @@ -1,6 +1,6 @@ -# Build from Source +# How to Build from Source -This document provides step-by-step instructions for building the `ChatQnA Core` sample application and File Watcher service from source. Refer to the [prerequisites section](../get-started.md/#prerequisites) in the guide to install the appropriate software dependencies. +This document provides step-by-step instructions for building the `ChatQnA Core` sample application and File Watcher service from source. Refer to the [prerequisites section](./get-started.md/#prerequisites) in the guide to install the appropriate software dependencies. ## Build ChatQnA Core from Source @@ -10,6 +10,7 @@ For detailed instructions on building from source, visit the [Build from Source For docker compose deployment instructions, visit the [Running Application Container Guide](https://github.com/open-edge-platform/edge-ai-libraries/blob/main/sample-applications/chat-question-and-answer-core/docs/user-guide/build-from-source.md#running-the-application-container). + ## Build File Watcher Service from Source In the Windows® environment, the File Watcher Service works with the HMI application to continuously monitor file system activities such as creation, modification, and deletion. When it detects any changes, it sends the relevant file data over the network to the `ChatQnA Core` service for ingestion and contextual processing, supporting Retrieval-Augmented Generation (RAG) workflows. @@ -18,14 +19,13 @@ In the Windows® environment, the File Watcher Service works with the HMI applic - **Python Installer**: Visit the [official Python website](https://www.python.org/downloads/windows/). Select the latest version available under the "Python Releases for Windows" section. -- **Git \[OPTIONAL]**: Visit the [official GIT website](https://git-scm.com/download/win) to download the executable +- **Git[OPTIONAL]**: Visit the [official GIT website](https://git-scm.com/download/win) to download the executable ### Build File Watcher Service in Windows To build the File Watcher executable binary, follow these steps: 1. Clone and download the source code by either using Git clone or downloading the source code as a ZIP file directly from the [repository](https://github.com/open-edge-platform/edge-ai-suites). - ```bash git clone https://github.com/open-edge-platform/edge-ai-suites.git edge-ai-suites ``` @@ -51,7 +51,7 @@ To build the File Watcher executable binary, follow these steps: Once it's activated, the environment name appears in parentheses as follows: - ```text + ``` (venv_name) C:\Users\YourName\project> ``` @@ -73,23 +73,17 @@ To build the File Watcher executable binary, follow these steps: # Replace and to your network proxy and port number pip install -r requirements.txt --no-cache-dir --proxy : ``` - On Windows®, typically, proxy information can be fetched using the command, - ```sh netsh winhttp show proxy ``` - This will output one of the following two outputs: - - ```text + ```sh Current WinHTTP proxy settings: Direct access (no proxy server). ``` - or - - ```text + ```sh Current WinHTTP proxy settings: Proxy Server: : Bypass List: @@ -97,7 +91,7 @@ To build the File Watcher executable binary, follow these steps: 6. Set up Environment Variables using `.bat`. - To configure the file watcher service, you need to set up the environment variables using the [`set_env_vars.bat`](https://github.com/open-edge-platform/edge-ai-suites/blob/main/manufacturing-ai-suite/hmi-augmented-worker/file_watcher/set_env_vars.bat) file provided. Follow the steps below to ensure proper configuration: + To configure the file watcher service, you need to set up the environment variables using the [`set_env_vars.bat`](../../file_watcher/set_env_vars.bat) file provided. Follow the steps below to ensure proper configuration: - Open and edit the values for the variables with your corresponding setup. diff --git a/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/index.md b/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/index.md index 734d42667e..268ab5b932 100644 --- a/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/index.md +++ b/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/index.md @@ -108,6 +108,7 @@ You can also check the [system requirements](./get-started/system-requirements.m :hidden: get-started +how-to-build-from-source release-notes ::: diff --git a/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/release-notes.md b/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/release-notes.md index d50cc52091..b7151d3176 100644 --- a/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/release-notes.md +++ b/manufacturing-ai-suite/hmi-augmented-worker/docs/user-guide/release-notes.md @@ -1,7 +1,7 @@ # Release Notes -## Current Release +## Current Release **Version**: RC1 \ **Release Date**: 14 July 2025 diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/.gitignore b/manufacturing-ai-suite/industrial-edge-insights-multimodal/.gitignore index 18c6d51864..2a33658d2e 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/.gitignore +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/.gitignore @@ -5,5 +5,5 @@ helm/*.yaml helm/*.conf helm/*.sh helm/*.csv -helm/*.template + .venv/ diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/Makefile b/manufacturing-ai-suite/industrial-edge-insights-multimodal/Makefile index 91a3a6b688..1b0f267f97 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/Makefile +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/Makefile @@ -201,8 +201,6 @@ gen_helm_charts: @cp -f configs/nginx/nginx.conf helm/nginx.conf @cp -f configs/dlstreamer-pipeline-server/config.json helm/dlstreamer-pipeline-server.json @sed -i 's/"auto_start": true,/"auto_start": false,/' helm/dlstreamer-pipeline-server.json - @cp -f configs/seaweedfs-s3/seaweedfs_s3_config.json.template helm/seaweedfs_s3_config.json.template - @cp -f configs/seaweedfs-s3/s3-init-buckets.sh helm/s3-init-buckets.sh @sed -i "s/version: .*/version: ${version}/" "helm/Chart.yaml" @sed -i "s/appVersion: .*/appVersion: \"${version}\"/" "helm/Chart.yaml" @sed -i "s/weekly_build_date: .*/weekly_build_date: \"$(WEEKLY_BUILD_DATE)\"/" "helm/values.yaml" diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/README.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/README.md index af723309d9..d8217c0816 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/README.md +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/README.md @@ -17,9 +17,10 @@ Refer to the [How it works](./docs/user-guide/weld-defect-detection/index.md#how ## Learn More -- [How to build from source and deploy](./docs/user-guide/get-started/build-from-source.md): Guide to build from source and docker compose deployment. -- [How to deploy with Helm](./docs/user-guide/get-started/deploy-with-helm.md): Guide for deploying with Helm. -- [How to configure MQTT alerts](./docs/user-guide/how-to-guides/how-to-configure-alerts.md): Guide for configuring the MQTT alerts for the sample app. -- [How to update configuration](./docs/user-guide/how-to-guides/how-to-update-config.md): Guide for updating the configuration. -- [Troubleshooting](./docs/user-guide/troubleshooting.md): Troubleshooting information. -- [Release Notes](./docs/user-guide/release-notes.md): Information on the latest updates, improvements, and bug fixes. + - [How to build from source and deploy](./docs/user-guide/how-to-guides/how-to-build-from-source.md): Guide to build from source and docker compose deployment. + - [How to deploy with Helm](./docs/user-guide/how-to-guides/how-to-deploy-with-helm.md): Guide for deploying with Helm. + - [How to configure MQTT alerts](./docs/user-guide/how-to-guides/how-to-configure-alerts.md): Guide for configuring the MQTT alerts for the sample app. + - [How to update configuration](./docs/user-guide/how-to-guides/how-to-update-config.md): Guide for updating the configuration. + - [Troubleshooting](./docs/user-guide/troubleshooting.md): Troubleshooting information. +- **Release Notes** + - [Release Notes](./docs/user-guide/release-notes.md): Information on the latest updates, improvements, and bug fixes. diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/configs/nginx/nginx.conf b/manufacturing-ai-suite/industrial-edge-insights-multimodal/configs/nginx/nginx.conf index 365699162b..a0f4a8f048 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/configs/nginx/nginx.conf +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/configs/nginx/nginx.conf @@ -57,35 +57,6 @@ http { rewrite ^/ts-api/(.*)$ /$1 break; } - # SeaweedFS filer server - location /image-store { - proxy_pass http://seaweedfs-filer:8888/; - sub_filter '/seaweedfsstatic/' '/image-store/seaweedfsstatic/'; - sub_filter_once off; - sub_filter_types text/html text/css text/javascript application/javascript; - - # Rewrite href and action attributes for links and forms - sub_filter 'href="/' 'href="/image-store/'; - sub_filter 'action="/' 'action="/image-store/'; - # Rewrite JavaScript URLs and fetch calls - sub_filter "fetch('/" "fetch('/image-store/"; - sub_filter 'fetch("/' 'fetch("/image-store/'; - sub_filter "location.href = '/" "location.href = '/image-store/"; - sub_filter 'location.href = "/' 'location.href = "/image-store/'; - sub_filter "window.location = '/" "window.location = '/image-store/"; - sub_filter 'window.location = "/' 'window.location = "/image-store/'; - - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_buffering off; - rewrite ^/image-store/(.*)$ /$1 break; - } - # DL Streamer Pipeline Server location /dsps-api/ { proxy_pass http://dlstreamer-pipeline-server:8080/; diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docker-compose.yml b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docker-compose.yml index 437402d533..b081649f9d 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docker-compose.yml +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docker-compose.yml @@ -242,8 +242,8 @@ services: command: > /bin/sh -c "/usr/local/bin/nginx-cert-gen.sh && exec nginx -g 'daemon off;'" environment: - no_proxy: "ia-grafana,ia-time-series-analytics-microservice,ia-mqtt-broker,coturn,mediamtx,${no_proxy},${HOST_IP},seaweedfs-filer" - NO_PROXY: "ia-grafana,ia-time-series-analytics-microservice,ia-mqtt-broker,coturn,mediamtx,${no_proxy},${HOST_IP},seaweedfs-filer" + no_proxy: "ia-grafana,ia-time-series-analytics-microservice,ia-mqtt-broker,coturn,mediamtx,${no_proxy},${HOST_IP}" + NO_PROXY: "ia-grafana,ia-time-series-analytics-microservice,ia-mqtt-broker,coturn,mediamtx,${no_proxy},${HOST_IP}" MEDIAMTX_SERVER: mediamtx WHIP_SERVER_PORT: ${WHIP_SERVER_PORT} ports: @@ -264,7 +264,6 @@ services: - ia-mqtt-broker - mediamtx - coturn - - seaweedfs-filer ia-fusion-analytics: build: context: $PWD/fusion-analytics @@ -418,10 +417,6 @@ services: image: ${SEAWEEDFS_IMAGE} hostname: seaweedfs-master container_name: seaweedfs-master - read_only: true - user: "${TIMESERIES_UID}:${TIMESERIES_UID}" - security_opt: - - no-new-privileges networks: - timeseries_network volumes: @@ -435,10 +430,6 @@ services: image: ${SEAWEEDFS_IMAGE} hostname: seaweedfs-volume container_name: seaweedfs-volume - read_only: true - user: "${TIMESERIES_UID}:${TIMESERIES_UID}" - security_opt: - - no-new-privileges networks: - timeseries_network volumes: @@ -454,10 +445,8 @@ services: image: ${SEAWEEDFS_IMAGE} hostname: seaweedfs-filer container_name: seaweedfs-filer - read_only: true - user: "${TIMESERIES_UID}:${TIMESERIES_UID}" - security_opt: - - no-new-privileges + ports: + - "8887:8888" # Filer HTTP port networks: - timeseries_network volumes: @@ -481,10 +470,6 @@ services: image: ${SEAWEEDFS_IMAGE} hostname: seaweedfs-s3 container_name: seaweedfs-s3 - read_only: true - user: "${TIMESERIES_UID}:${TIMESERIES_UID}" - security_opt: - - no-new-privileges networks: - timeseries_network environment: @@ -513,7 +498,6 @@ services: volumes: - ./configs/seaweedfs-s3/seaweedfs_s3_config.json.template:/etc/seaweedfs/s3_config.json.template:ro - ./configs/seaweedfs-s3/s3-init-buckets.sh:/s3-init-buckets.sh:ro - - "vol_temp_seaweed_s3:/tmp" coturn: image: coturn/coturn:4.7.0 @@ -567,28 +551,13 @@ volumes: device: tmpfs seaweed_master_data: driver: local - driver_opts: - type: tmpfs - device: tmpfs seaweed_volume_data: driver: local - driver_opts: - type: tmpfs - device: tmpfs seaweed_filer_data: driver: local - driver_opts: - type: tmpfs - device: tmpfs vol_temp_seaweed_filer: name: "seaweed_filer_vol" driver: local driver_opts: type: tmpfs device: tmpfs - vol_temp_seaweed_s3: - name: "seaweed_s3_vol" - driver: local - driver_opts: - type: tmpfs - device: tmpfs diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/get-started.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/get-started.md index db65f57753..d589a1ff0a 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/get-started.md +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/get-started.md @@ -106,54 +106,52 @@ cd edge-ai-suites/manufacturing-ai-suite/industrial-edge-insights-multimodal 1. Get into the InfluxDB* container. - > **Note:** Use `kubectl exec -it -n -- /bin/bash` for the helm deployment - > where for \ replace with namespace name where the application was deployed and - > for \ replace with InfluxDB pod name. + > **Note:** Use `kubectl exec -it -n -- /bin/bash` for the helm deployment + > where for replace with namespace name where the application was deployed and + > for replace with InfluxDB pod name. - ```bash - docker exec -it ia-influxdb bash - ``` + ``` bash + docker exec -it ia-influxdb bash + ``` 2. Run following commands to see the data in InfluxDB*. - > **NOTE:** - > Please ignore the error message `There was an error writing history file: open /.influx_history: read-only file system` happening in the InfluxDB shell. - > This does not affect any functionality while working with the InfluxDB commands - - ``` bash - # For below command, the INFLUXDB_USERNAME and INFLUXDB_PASSWORD needs to be fetched from `.env` file - influx -username -password - use datain # database access - show measurements - # Run below query to check and output measurement processed - # by Time Series Analytics microservice - select * from "weld-sensor-anomaly-data" - - # Run below query to check and output measurement processed - # by DL Streamer pipeline server microservice - select * from "vision-weld-classification-results" - ``` + > **NOTE:** + > Please ignore the error message `There was an error writing history file: open /.influx_history: read-only file system` happening in the InfluxDB shell. + > This does not affect any functionality while working with the InfluxDB commands + + ``` bash + # For below command, the INFLUXDB_USERNAME and INFLUXDB_PASSWORD needs to be fetched from `.env` file + influx -username -password + use datain # database access + show measurements + # Run below query to check and output measurement processed + # by Time Series Analytics microservice + select * from "weld-sensor-anomaly-data" + + # Run below query to check and output measurement processed + # by DL Streamer pipeline server microservice + select * from "vision-weld-classification-results" + ``` 3. Check the output in Grafana. - - Use link `https://:3000` to launch Grafana from browser (preferably, chrome browser) - - > **Note:** Use link `https://:30001` to launch Grafana from browser (preferably Chrome browser) for the Helm deployment + - Use link `https://:3000` to launch Grafana from browser (preferably, chrome browser) - - Login to the Grafana with values set for `VISUALIZER_GRAFANA_USER` and `VISUALIZER_GRAFANA_PASSWORD` - in `.env` file and select **Multimodal Weld Defect Detection Dashboard**. + - Login to the Grafana with values set for `VISUALIZER_GRAFANA_USER` and `VISUALIZER_GRAFANA_PASSWORD` + in `.env` file and select **Multimodal Weld Defect Detection Dashboard**. - ![Grafana login](./_assets/login_wt.png) + ![Grafana login](./_assets/login_wt.png) - - After login, click on Dashboard - ![Menu view](./_assets/dashboard.png) + - After login, click on Dashboard + ![Menu view](./_assets/dashboard.png) - - Select the `Multimodal Weld Defect Detection Dashboard`. - ![Multimodal Weld Defect Detection Dashboard](./_assets/grafana_dashboard_selection.png) + - Select the `Multimodal Weld Defect Detection Dashboard`. + ![Multimodal Weld Defect Detection Dashboard](./_assets/grafana_dashboard_selection.png) - - One will see the below output. + - One will see the below output. - ![Anomaly prediction for weld data](./_assets/anomaly_prediction.png) + ![Anomaly prediction for weld data](./_assets/anomaly_prediction.png) ## Bring down the sample app @@ -174,8 +172,8 @@ docker logs -f | grep -i error ## Advanced setup -- [How to build from source and deploy](./get-started/build-from-source.md): Guide to build from source and docker compose deployment -- [How to deploy with Helm](./get-started/deploy-with-helm.md): Guide for deploying with Helm. +- [How to build from source and deploy](./how-to-guides/how-to-build-from-source.md): Guide to build from source and docker compose deployment +- [How to deploy with Helm](./docs/user-guide/how-to-guides/how-to-deploy-with-helm.md): Guide for deploying with Helm. - [How to configure MQTT alerts](./how-to-guides/how-to-configure-alerts.md): Guide for configuring the MQTT alerts in the Time Series Analytics microservice - [How to update config](./how-to-guides/how-to-update-config.md): Guide updating configuration of Time Series Analytics Microservice. @@ -183,9 +181,7 @@ docker logs -f | grep -i error :::{toctree} :hidden: -./get-started/system-requirements -./get-started/build-from-source -./get-started/deploy-with-helm +get-started/system-requirements.md ::: hide_directive--> diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/get-started/build-from-source.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/get-started/build-from-source.md deleted file mode 100644 index d796ac4eff..0000000000 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/get-started/build-from-source.md +++ /dev/null @@ -1,49 +0,0 @@ -# Build from source - -This guide provides step-by-step instructions for building the `Time Series Analytics` -microservice and `industrial-edge-insights-multimodal` Sample Application from source. -Follow the [prerequisites](../get-started.md#configure-docker) and ensure you understand the -[data flow explanation](../weld-defect-detection/index.md#data-flow-explanation) -before proceeding with the following steps. - -## Steps to Build from Source - -1. **Clone the source and build the `Time Series Analytics` microservice**. - - ```bash - git clone https://github.com/open-edge-platform/edge-ai-libraries.git - cd edge-ai-libraries/microservices/time-series-analytics/docker - - # build - docker compose build - ``` - - > **Note:** - > To include copyleft licensed sources when building the Docker image, use the below command: - > - > ```bash - > docker compose build --build-arg COPYLEFT_SOURCES=true - > ``` - -2. **Clone the source and build the sample app**. - - ```bash - git clone https://github.com/open-edge-platform/edge-ai-suites.git - cd edge-ai-suites/manufacturing-ai-suite/industrial-edge-insights-multimodal - - # build - make build # builds only data simulator and fusion analytics docker images - ``` - - > **Note:** - > To include copyleft licensed sources when building the Docker images, use the below command: - > - > ```bash - > make build_copyleft_sources - > ``` - -3. **Deploy with Docker compose and verify**. - - Follow the remaining steps/sections starting from - - [docker compose deployment](../get-started.md#deploy-with-docker-compose) diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides.md index 5c93d37ae8..6b4e21c191 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides.md +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides.md @@ -2,6 +2,8 @@ This section collects guides for Time Series Analytics microservice and associated multimodal sample apps. +- [Build from source](./how-to-guides/how-to-build-from-source.md) +- [Deploy with Helm](./how-to-guides/how-to-deploy-with-helm.md) - [Configure Alerts](./how-to-guides/how-to-configure-alerts.md) - [Update Configuration](./how-to-guides/how-to-update-config.md) - [Access S3 Stored Images](./how-to-guides/how-to-access-s3-stored-images.md) @@ -10,9 +12,11 @@ This section collects guides for Time Series Analytics microservice and associat :::{toctree} :hidden: +Build from source <./how-to-guides/how-to-build-from-source.md> +Deploy with Helm <./how-to-guides/how-to-deploy-with-helm.md> Configure Alerts <./how-to-guides/how-to-configure-alerts.md> Update Configuration <./how-to-guides/how-to-update-config.md> Access S3 Stored Images <./how-to-guides/how-to-access-s3-stored-images.md> ::: -hide_directive--> +hide_directive--> \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-access-s3-stored-images.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-access-s3-stored-images.md index 9abdceb655..80d6c6fc65 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-access-s3-stored-images.md +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-access-s3-stored-images.md @@ -17,8 +17,8 @@ The DL Streamer Pipeline Server generates vision metadata for each processed fra ```bash docker exec -it ia-influxdb bash ``` - - > **NOTE:** + + > **NOTE:** > Use `kubectl exec -it -n -- /bin/bash` for the helm deployment > where for replace with namespace name where the application was deployed and > for replace with InfluxDB pod name. @@ -41,11 +41,9 @@ The DL Streamer Pipeline Server generates vision metadata for each processed fra Access the SeaweedFS Filer interface in your web browser: -```text -https://:3000/image-store/buckets/dlstreamer-pipeline-results/weld-defect-classification/ ``` - -> **Note:** Use link `https://:30001/image-store/buckets/dlstreamer-pipeline-results/weld-defect-classification/` to access the SeaweedFS Filer interface for the Helm deployment. +http://:8887/buckets/dlstreamer-pipeline-results/weld-defect-classification/ +``` Images are organized by their `img_handle` identifier. Browse the directory to locate specific images, then click to view the image. @@ -54,7 +52,6 @@ Images are organized by their `img_handle` identifier. Browse the directory to l Follow these steps to correlate detection events in InfluxDB with stored images: 1. Query InfluxDB to retrieve vision metadata: - ```sql SELECT * FROM "vision-weld-classification-results" ``` @@ -62,11 +59,8 @@ Follow these steps to correlate detection events in InfluxDB with stored images: 2. Note the `img_handle` from the query results (e.g., `X7TINNVPNX`). 3. Navigate to the Filer interface: - - ```text + ``` http://:8887/buckets/dlstreamer-pipeline-results/weld-defect-classification/ ``` 4. Locate and open the file matching the `img_handle` (e.g., `X7TINNVPNX.jpg`). - -> **Note:** All data stored in SeaweedFS and InfluxDB is non-persistent and will be lost on container/pod restart. diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-build-from-source.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-build-from-source.md new file mode 100644 index 0000000000..b57a4c99bd --- /dev/null +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-build-from-source.md @@ -0,0 +1,49 @@ +# Build from source + +This guide provides step-by-step instructions for building the `Time Series Analytics` +microservice and `industrial-edge-insights-multimodal` Sample Application from source. +Follow the [prerequisites](../get-started.md#configure-docker) and ensure you understand the +[data flow explanation](../weld-defect-detection/index.md#data-flow-explanation) +before proceeding with the following steps. + +## Steps to Build from Source + +1. **Clone the source and build the `Time Series Analytics` microservice**. + + ```bash + git clone https://github.com/open-edge-platform/edge-ai-libraries.git + cd edge-ai-libraries/microservices/time-series-analytics/docker + + # build + docker compose build + ``` + + > **Note:** + > To include copyleft licensed sources when building the Docker image, use the below command: + > + > ```bash + > docker compose build --build-arg COPYLEFT_SOURCES=true + > ``` + +2. **Clone the source and build the sample app**. + + ```bash + git clone https://github.com/open-edge-platform/edge-ai-suites.git + cd edge-ai-suites/manufacturing-ai-suite/industrial-edge-insights-multimodal + + # build + make build # builds only data simulator and fusion analytics docker images + ``` + + > **Note:** + > To include copyleft licensed sources when building the Docker images, use the below command: + > + > ```bash + > make build_copyleft_sources + > ``` + +3. **Deploy with Docker compose and verify**. + + Follow the remaining steps/sections starting from + + [docker compose deployment](../get-started.md#deploy-with-docker-compose) diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-configure-alerts.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-configure-alerts.md index 2898a561ef..aab6c44cf3 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-configure-alerts.md +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-configure-alerts.md @@ -1,4 +1,4 @@ -# Configure Alerts +# Configure Alerts in Time Series Analytics Microservice This section provides instructions for setting up alerts in **Time Series Analytics Microservice**. @@ -86,7 +86,6 @@ kubectl get pods -n multimodal-sample-app | grep mqtt-broker ``` - Use the pod name from the output of the above command to subscribe to all topics: - ```bash kubectl exec -it -n multimodal-sample-app -- mosquitto_sub -h localhost -v -t '#' -p 1883 ``` diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/get-started/deploy-with-helm.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-deploy-with-helm.md similarity index 90% rename from manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/get-started/deploy-with-helm.md rename to manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-deploy-with-helm.md index 1d001987eb..a9b8504fba 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/get-started/deploy-with-helm.md +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-deploy-with-helm.md @@ -67,8 +67,6 @@ You can either generate or download the Helm charts. MTX_WEBRTCICESERVERS2_0_USERNAME: MTX_WEBRTCICESERVERS2_0_PASSWORD: HOST_IP: # IP address of server where DL Streamer Pipeline Server is running - S3_STORAGE_USERNAME: - S3_STORAGE_PASSWORD: ``` ## Step 3: Install Helm charts @@ -122,16 +120,16 @@ this sample application in Kubernetes environment: 1. The following udf package is placed in the repository under `edge-ai-suites/manufacturing-ai-suite/industrial-edge-insights-multimodal/configs/time-series-analytics-microservice`. - ```text - - time-series-analytics-microservice/ - - models/ - - weld_anomaly_detector.cb - - tick_scripts/ - - weld_anomaly_detector.tick - - udfs/ - - requirements.txt - - weld_anomaly_detector.py - ``` + > + > - time-series-analytics-microservice/ + > - models/ + > - weld_anomaly_detector.cb + > - tick_scripts/ + > - weld_anomaly_detector.tick + > - udfs/ + > - requirements.txt + > - weld_anomaly_detector.py + > 2. Copy your new UDF package to the `time-series-analytics-microservice` pod: @@ -150,6 +148,7 @@ this sample application in Kubernetes environment: ## Step 5: Activate the Pipeline and UDF Deployment Package + **DL Streamer Pipeline Server** You use a Client URL (cURL) command to start the pipeline. Start this pipeline with the @@ -162,16 +161,10 @@ curl -k https://localhost:30001/dsps-api/pipelines/user_defined_pipelines/weld_d "type": "mqtt", "topic": "vision_weld_defect_classification" }, - "frame": [{ - "type": "webrtc", - "peer-id": "samplestream" - }, - { - "type": "s3_write", - "bucket": "dlstreamer-pipeline-results", - "folder_prefix": "weld-defect-classification", - "block": false - }] + "frame": { + "type": "webrtc", + "peer-id": "samplestream" + } }, "parameters": { "classification-properties": { @@ -209,7 +202,7 @@ kubectl get all -n multimodal-sample-app # It may take a few minutes for all app ## Configure Alerts in Time Series Analytics Microservice -To configure alerts in Time Series Analytics Microservice, follow [the steps](../how-to-guides/how-to-configure-alerts.md#helm-deployment). +To configure alerts in Time Series Analytics Microservice, follow [the steps](./how-to-configure-alerts.md#helm-deployment). ## Troubleshooting diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-update-config.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-update-config.md index 541f7215c7..1a91bd449d 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-update-config.md +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/how-to-guides/how-to-update-config.md @@ -1,4 +1,4 @@ -# Update Configuration +# Update config in Time Series Analytics Microservice The Time Series Analytics Microservice provides an interactive Swagger UI at `https://:3000/ts-api/docs`. diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/release-notes/dec-2025.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/release-notes/dec-2025.md index 2a68b4ff32..781f641274 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/release-notes/dec-2025.md +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/release-notes/dec-2025.md @@ -11,9 +11,9 @@ showcasing a Vision and Time Series defect detection use case by detecting anoma ### Features -- Introduced a comprehensive sample application combining vision (video inspection) and + - Introduced a comprehensive sample application combining vision (video inspection) and time-series sensor analytics for industrial weld defect detection. -- Fusion analytics module integrates results from both modalities for improved anomaly detection. -- Weld data simulator generates synchronized video streams (RTSP) and time-series data (MQTT). + - Fusion analytics module integrates results from both modalities for improved anomaly detection. + - Weld data simulator generates synchronized video streams (RTSP) and time-series data (MQTT). -More details at [user guide](../../user-guide/index.md). +More details at [user-guide](../../user-guide/index.md). diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/troubleshooting.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/troubleshooting.md index 2dcbbf0544..90a88afa5c 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/troubleshooting.md +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/troubleshooting.md @@ -1,4 +1,4 @@ -# Troubleshooting +# Troubleshoot Guide This article contains troubleshooting steps for known issues. If you encounter any problems with the application not addressed here, check the [GitHub Issues](https://github.com/open-edge-platform/edge-ai-suites/issues) diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/weld-defect-detection/index.md b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/weld-defect-detection/index.md index 84bd0ae004..aa8693cda9 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/weld-defect-detection/index.md +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/docs/user-guide/weld-defect-detection/index.md @@ -32,6 +32,8 @@ The Weld Data Simulator uses sets of time synchronized .avi and .csv files from It ingests the .avi files as RTSP streams via the **mediamtx** server. This enables real-time video ingestion, simulating camera feeds for weld defect detection. Similarly, it ingests the .csv files as data points into **Telegraf** using the **MQTT** protocol. +--- + #### 2. **Analytics Modules** ##### 2.1 **DL Streamer Pipeline Server** @@ -69,6 +71,8 @@ defect classification model, publishes the frame metadata results over MQTT and | `frame.type` | The protocol type for streaming video frames. | `"webrtc"` | | `frame.peer-id` | Unique identifier for the WebRTC peer connection. | `"samplestream"` | +--- + ##### 2.2 **Time Series Analytics Microservice** **Time Series Analytics Microservice** uses **Kapacitor** - a real-time data processing engine that enables users to analyze time series data. It reads the weld sensor data points point by point coming from **Telegraf**, runs the ML CatBoost model to identify the anomalies, writes the results into configured measurement/table in **InfluxDB** and publishes anomalous data over MQTT. Also, publishes all the processed weld sensor data points over MQTT. @@ -90,6 +94,8 @@ The `udfs` section specifies the details of the UDFs used in the task. > **Note:** The maximum allowed size for `config.json` is 5 KB. +--- + **Alerts Configuration**: The `alerts` section defines the settings for alerting mechanisms, such as MQTT protocol. @@ -104,6 +110,7 @@ The `mqtt` section specifies the MQTT broker details for sending alerts. | `mqtt_broker_port` | The port number of the MQTT broker. | `1883` | | `name` | The name of the MQTT broker configuration. | `"my_mqtt_broker"` | + ###### **`udfs/`** Contains the python script to process the incoming data. @@ -112,6 +119,7 @@ detect anomalous weld data points using sensor data. **Note**: Please note, CatBoost models don't run on Intel GPUs. + ###### **`tick_scripts/`** The TICKScript `weld_anomaly_detector.tick` determines processing of the input data coming in. @@ -123,6 +131,8 @@ By default, it is configured to publish the alerts to **MQTT**. The `weld_anomaly_detector.cb` is a model built using the CatBoostClassifier Algo of CatBoost ML library. +--- + ##### 2.3 **Fusion Analytics** **Fusion Analytics** subscribes to the MQTT topics coming out of `DL Streamer Pipeline Server` and `Time Series Analytics Microservice`, applies `AND`/`OR` logic to determine the anomalies during weld process, publishes the results over MQTT and writes the results as a measurement/table in **InfluxDB** @@ -135,6 +145,8 @@ library. **Grafana** provides an intuitive user interface for visualizing time series data stored in **InfluxDB** and also rendering the output of `DL Streamer Pipeline Server` coming as WebRTC stream. Additionally, it visualizes the fusion analytics results stored in **InfluxDB**. +--- + ## Next Steps Refer to the detailed instructions in [Get Started](../get-started.md). diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/dlstreamer-pipeline-server.yaml b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/dlstreamer-pipeline-server.yaml index 61ae4e9fd6..463a5adbff 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/dlstreamer-pipeline-server.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/dlstreamer-pipeline-server.yaml @@ -111,21 +111,21 @@ spec: - name: REST_SERVER_PORT value: "8080" - name: no_proxy - value: localhost,127.0.0.1,.intel.com,{{ $.Values.env.RTSP_CAMERA_IP }},{{ $.Values.env.HOST_IP }},otel-collector,mediamtx,ia-mqtt-broker,{{ $.Values.config.seaweedfs_s3.name }} + value: localhost,127.0.0.1,.intel.com,{{ $.Values.env.RTSP_CAMERA_IP }},{{ $.Values.env.HOST_IP }},otel-collector,mediamtx,ia-mqtt-broker - name: NO_PROXY - value: localhost,127.0.0.1,.intel.com,{{ $.Values.env.RTSP_CAMERA_IP }},{{ $.Values.env.HOST_IP }},otel-collector,mediamtx,ia-mqtt-broker,{{ $.Values.config.seaweedfs_s3.name }} + value: localhost,127.0.0.1,.intel.com,{{ $.Values.env.RTSP_CAMERA_IP }},{{ $.Values.env.HOST_IP }},otel-collector,mediamtx,ia-mqtt-broker - name: http_proxy value: {{ $.Values.env.HTTP_PROXY }} - name: https_proxy value: {{ $.Values.env.HTTPS_PROXY }} - name: S3_STORAGE_HOST - value: "{{ $.Values.config.seaweedfs_s3.name }}" + value: "{{ $.Values.env.HOST_IP }}" - name: S3_STORAGE_PORT - value: "8333" + value: "{{ $.Values.env.S3_STORAGE_PORT }}" - name: S3_STORAGE_USER - value: "{{ $.Values.env.S3_STORAGE_USERNAME }}" + value: "{{ $.Values.env.MINIO_ACCESS_KEY }}" - name: S3_STORAGE_PASS - value: "{{ $.Values.env.S3_STORAGE_PASSWORD }}" + value: "{{ $.Values.env.MINIO_SECRET_KEY }}" - name: ENABLE_OPEN_TELEMETRY value: "{{ $.Values.env.ENABLE_OPEN_TELEMETRY }}" - name: OTEL_COLLECTOR_HOST diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/nginx.yaml b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/nginx.yaml index eb71605d67..9e04a5c7bd 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/nginx.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/nginx.yaml @@ -57,9 +57,9 @@ spec: command: ["/bin/bash", "-c", "/usr/local/bin/nginx-cert-gen.sh && exec nginx -g 'daemon off;'"] env: - name: no_proxy - value: "ia-grafana,ia-time-series-analytics-microservice,ia-mqtt-broker,{{ .Values.env.timeseries_no_proxy }},coturn,mediamtx,{{ .Values.env.HOST_IP }},dlstreamer-pipeline-server,seaweedfs-filer" + value: "ia-grafana,ia-time-series-analytics-microservice,ia-mqtt-broker,{{ .Values.env.timeseries_no_proxy }},coturn,mediamtx,{{ .Values.env.HOST_IP }},dlstreamer-pipeline-server" - name: NO_PROXY - value: "ia-grafana,ia-time-series-analytics-microservice,ia-mqtt-broker,{{ .Values.env.timeseries_no_proxy }},coturn,mediamtx,{{ .Values.env.HOST_IP }},dlstreamer-pipeline-server,seaweedfs-filer" + value: "ia-grafana,ia-time-series-analytics-microservice,ia-mqtt-broker,{{ .Values.env.timeseries_no_proxy }},coturn,mediamtx,{{ .Values.env.HOST_IP }},dlstreamer-pipeline-server" - name: HOST_IP value: "{{ .Values.env.HOST_IP }}" - name: MEDIAMTX_SERVER diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/provision-configmap.yaml b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/provision-configmap.yaml index bc71dd7b24..f2e3f8427a 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/provision-configmap.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/provision-configmap.yaml @@ -110,22 +110,4 @@ metadata: data: config.json: |- {{ .Files.Get "dlstreamer-pipeline-server.json" | indent 4 }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.namespace }} - name: seaweedfs-s3-init -data: - s3-init-buckets.sh: |- -{{ .Files.Get "s3-init-buckets.sh" | indent 4 }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.namespace }} - name: seaweedfs-s3-config-template -data: - seaweedfs_s3_config.json.template: |- -{{ .Files.Get "seaweedfs_s3_config.json.template" | indent 4 }} --- \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-filer.yaml b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-filer.yaml deleted file mode 100644 index 3250bd83cd..0000000000 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-filer.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# -# Apache v2 license -# Copyright (C) 2025 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.config.seaweedfs_filer.name }} - namespace: {{ .Values.namespace }} -spec: - type: ClusterIP - ports: - - name: filer-http-port - port: 8888 - - name: filer-grpc-port - port: 18888 - selector: - app: seaweedfs-filer ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: seaweedfs-filer - name: deployment-seaweedfs-filer - namespace: {{ .Values.namespace }} - -spec: - selector: - matchLabels: - app: seaweedfs-filer - template: - metadata: - labels: - app: seaweedfs-filer - spec: - securityContext: - fsGroup: {{ .Values.env.TIMESERIES_UID | int }} - runAsUser: {{ .Values.env.TIMESERIES_UID | int }} - runAsGroup: {{ .Values.env.TIMESERIES_UID | int }} - containers: - - name: seaweedfs-filer - image: {{ .Values.images.seaweedfs_image }} - imagePullPolicy: {{ .Values.imagePullPolicy }} - securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - runAsUser: {{ $.Values.env.TIMESERIES_UID | int }} - runAsGroup: {{ $.Values.env.TIMESERIES_UID | int }} - runAsNonRoot: true - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - name: vol-temp-seaweed-filer - mountPath: /tmp - - name: vol-seaweed-filer-data - mountPath: /data - args: ["filer", "-master={{ .Values.config.seaweedfs_master.name }}:9333", "-ip.bind=0.0.0.0"] - env: - - name: NO_PROXY - value: "localhost,127.0.0.1,{{ .Values.config.seaweedfs_filer.name }},{{ .Values.config.seaweedfs_volume.name }},{{ .Values.config.seaweedfs_master.name }},{{ .Values.config.seaweedfs_s3.name }},172.18.0.0/16" - - name: no_proxy - value: "localhost,127.0.0.1,{{ .Values.config.seaweedfs_filer.name }},{{ .Values.config.seaweedfs_volume.name }},{{ .Values.config.seaweedfs_master.name }},{{ .Values.config.seaweedfs_s3.name }},172.18.0.0/16" - volumes: - - name: vol-temp-seaweed-filer - emptyDir: {} - - name: vol-seaweed-filer-data - emptyDir: {} \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-master.yaml b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-master.yaml deleted file mode 100644 index 0ae8f13cee..0000000000 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-master.yaml +++ /dev/null @@ -1,69 +0,0 @@ -# -# Apache v2 license -# Copyright (C) 2025 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.config.seaweedfs_master.name }} - namespace: {{ .Values.namespace }} -spec: - type: ClusterIP - selector: - app: seaweedfs-master - ports: - - port: 9333 - name: master-server-port - - port: 19333 - name: master-grpc-port ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: seaweedfs-master - name: deployment-seaweedfs-master - namespace: {{ .Values.namespace }} - -spec: - selector: - matchLabels: - app: seaweedfs-master - template: - metadata: - labels: - app: seaweedfs-master - spec: - securityContext: - fsGroup: {{ .Values.env.TIMESERIES_UID | int }} - runAsUser: {{ .Values.env.TIMESERIES_UID | int }} - runAsGroup: {{ .Values.env.TIMESERIES_UID | int }} - containers: - - name: seaweedfs-master - image: {{ .Values.images.seaweedfs_image }} - imagePullPolicy: {{ .Values.imagePullPolicy }} - securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - runAsUser: {{ $.Values.env.TIMESERIES_UID | int }} - runAsGroup: {{ $.Values.env.TIMESERIES_UID | int }} - runAsNonRoot: true - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - name: vol-seaweed-master-data - mountPath: /data - args: ["master", "-ip={{ .Values.config.seaweedfs_master.name }}", "-ip.bind=0.0.0.0"] - env: - - name: NO_PROXY - value: "localhost,127.0.0.1,{{ .Values.config.seaweedfs_filer.name }},{{ .Values.config.seaweedfs_volume.name }},{{ .Values.config.seaweedfs_master.name }},{{ .Values.config.seaweedfs_s3.name }},172.18.0.0/16" - - name: no_proxy - value: "localhost,127.0.0.1,{{ .Values.config.seaweedfs_master.name }},{{ .Values.config.seaweedfs_volume.name }},{{ .Values.config.seaweedfs_filer.name }},{{ .Values.config.seaweedfs_s3.name }},172.18.0.0/16" - volumes: - - name: vol-seaweed-master-data - emptyDir: {} \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-s3.yaml b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-s3.yaml deleted file mode 100644 index c33def0ecd..0000000000 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-s3.yaml +++ /dev/null @@ -1,87 +0,0 @@ -# -# Apache v2 license -# Copyright (C) 2025 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.config.seaweedfs_s3.name }} - namespace: {{ .Values.namespace }} -spec: - type: ClusterIP - selector: - app: seaweedfs-s3 - ports: - - port: 8333 - name: s3-server-port ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: seaweedfs-s3 - name: deployment-seaweedfs-s3 - namespace: {{ .Values.namespace }} - -spec: - selector: - matchLabels: - app: seaweedfs-s3 - template: - metadata: - labels: - app: seaweedfs-s3 - spec: - securityContext: - fsGroup: {{ .Values.env.TIMESERIES_UID | int }} - runAsUser: {{ .Values.env.TIMESERIES_UID | int }} - runAsGroup: {{ .Values.env.TIMESERIES_UID | int }} - containers: - - name: seaweedfs-s3 - image: {{ .Values.images.seaweedfs_image }} - imagePullPolicy: {{ .Values.imagePullPolicy }} - securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - runAsUser: {{ $.Values.env.TIMESERIES_UID | int }} - runAsGroup: {{ $.Values.env.TIMESERIES_UID | int }} - runAsNonRoot: true - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - name: seaweedfs-s3-config-template - mountPath: /etc/seaweedfs/s3_config.json.template - subPath: seaweedfs_s3_config.json.template - - name: seaweedfs-s3-init - mountPath: /usr/local/bin/s3-init-buckets.sh - subPath: s3-init-buckets.sh - - name: tmp-volume - mountPath: /tmp - command: ["/usr/local/bin/s3-init-buckets.sh"] - args: ["s3", "-filer={{ .Values.config.seaweedfs_filer.name }}:8888", "-ip.bind=0.0.0.0", "-config=/tmp/s3_config.json"] - env: - - name: NO_PROXY - value: "localhost,127.0.0.1,{{ .Values.config.seaweedfs_filer.name }},{{ .Values.config.seaweedfs_volume.name }},{{ .Values.config.seaweedfs_master.name }},{{ .Values.config.seaweedfs_s3.name }},172.18.0.0/16" - - name: no_proxy - value: "localhost,127.0.0.1,{{ .Values.config.seaweedfs_master.name }},{{ .Values.config.seaweedfs_volume.name }},{{ .Values.config.seaweedfs_filer.name }},{{ .Values.config.seaweedfs_s3.name }},172.18.0.0/16" - - name: DEFAULT_S3_BUCKETS - value: dlstreamer-pipeline-results - - name: S3_STORAGE_USER - value: {{ .Values.env.S3_STORAGE_USERNAME }} - - name: S3_STORAGE_PASS - value: {{ .Values.env.S3_STORAGE_PASSWORD }} - volumes: - - name: seaweedfs-s3-config-template - configMap: - name: seaweedfs-s3-config-template - - name: seaweedfs-s3-init - configMap: - name: seaweedfs-s3-init - defaultMode: 0755 - - name: tmp-volume - emptyDir: {} \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-volume.yaml b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-volume.yaml deleted file mode 100644 index e0ff3e2c20..0000000000 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/seaweedfs-volume.yaml +++ /dev/null @@ -1,69 +0,0 @@ -# -# Apache v2 license -# Copyright (C) 2025 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.config.seaweedfs_volume.name }} - namespace: {{ .Values.namespace }} -spec: - type: ClusterIP - selector: - app: seaweedfs-volume - ports: - - port: 8080 - name: volume-server-port - - port: 18080 - name: volume-grpc-port ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: seaweedfs-volume - name: deployment-seaweedfs-volume - namespace: {{ .Values.namespace }} - -spec: - selector: - matchLabels: - app: seaweedfs-volume - template: - metadata: - labels: - app: seaweedfs-volume - spec: - securityContext: - fsGroup: {{ .Values.env.TIMESERIES_UID | int }} - runAsUser: {{ .Values.env.TIMESERIES_UID | int }} - runAsGroup: {{ .Values.env.TIMESERIES_UID | int }} - containers: - - name: seaweedfs-volume - image: {{ .Values.images.seaweedfs_image }} - imagePullPolicy: {{ .Values.imagePullPolicy }} - securityContext: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - runAsUser: {{ $.Values.env.TIMESERIES_UID | int }} - runAsGroup: {{ $.Values.env.TIMESERIES_UID | int }} - runAsNonRoot: true - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - volumeMounts: - - name: vol-seaweed-volume-data - mountPath: /data - args: ["volume", "-mserver={{ .Values.config.seaweedfs_master.name }}:9333", "-ip.bind=0.0.0.0", "-port=8080"] - env: - - name: NO_PROXY - value: "localhost,127.0.0.1,{{ .Values.config.seaweedfs_filer.name }},{{ .Values.config.seaweedfs_volume.name }},{{ .Values.config.seaweedfs_master.name }},{{ .Values.config.seaweedfs_s3.name }},172.18.0.0/16" - - name: no_proxy - value: "localhost,127.0.0.1,{{ .Values.config.seaweedfs_master.name }},{{ .Values.config.seaweedfs_volume.name }},{{ .Values.config.seaweedfs_filer.name }},{{ .Values.config.seaweedfs_s3.name }},172.18.0.0/16" - volumes: - - name: vol-seaweed-volume-data - emptyDir: {} \ No newline at end of file diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/time-series-analytics-microservice.yaml b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/time-series-analytics-microservice.yaml index bfb766c811..0a0ba86328 100755 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/time-series-analytics-microservice.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/templates/time-series-analytics-microservice.yaml @@ -10,7 +10,7 @@ metadata: name: {{ .Values.config.time_series_analytics_microservice.name }} namespace: {{ .Values.namespace }} spec: - type: ClusterIP + type: NodePort ports: - port: {{ .Values.config.time_series_analytics_microservice.kapacitor_port }} name: kapacitor-port diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/values.schema.json b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/values.schema.json index b22c7971e5..0c836952f0 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/values.schema.json +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/values.schema.json @@ -52,20 +52,6 @@ "type": "string", "pattern": "^(?:(?:25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])$", "description": "Must be a valid IPv4 address (e.g., 192.168.1.1)." - }, - "S3_STORAGE_USERNAME": { - "type": "string", - "pattern": "^[A-Za-z]{5,}$", - "description": "At least 5 characters - only alphabets allowed" - }, - "S3_STORAGE_PASSWORD": { - "type": "string", - "pattern": "^[A-Za-z0-9]{10,}$", - "allOf": [ - { "pattern": ".*[0-9].*" }, - { "pattern": ".*[A-Za-z].*" } - ], - "description": "At least 10 alphanumeric, at least one digit." } } } diff --git a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/values.yaml b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/values.yaml index c530a9590d..610caca36a 100644 --- a/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/values.yaml +++ b/manufacturing-ai-suite/industrial-edge-insights-multimodal/helm/values.yaml @@ -73,10 +73,6 @@ env: MTX_WEBRTCICESERVERS2_0_PASSWORD: COTURN_UDP_PORT: 3478 - # SeaweedFS S3 Storage related config - S3_STORAGE_USERNAME: - S3_STORAGE_PASSWORD: - # Fusion Analytics config FUSION_MODE: OR TOLERANCE_NS: "50e6" @@ -96,7 +92,6 @@ images: weld_data_simulator_image: intel/ia-weld-data-simulator dlstreamer_pipeline_server: intel/dlstreamer-pipeline-server:2025.2.0-ubuntu24 fusion_analytics_image: intel/ia-multimodal-fusion-analytics - seaweedfs_image: chrislusf/seaweedfs:4.07 privileged_access_required: false config: influx_db_server: @@ -138,14 +133,7 @@ config: int: rest_api_port: "8080" rtsp_output_port: "8554" - seaweedfs_filer: - name: seaweedfs-filer - seaweedfs_master: - name: seaweedfs-master - seaweedfs_volume: - name: seaweedfs-volume - seaweedfs_s3: - name: seaweedfs-s3 + coturn: name: coturn int: diff --git a/metro-ai-suite/README.md b/metro-ai-suite/README.md index 7f1ed1768d..eecb6ace01 100644 --- a/metro-ai-suite/README.md +++ b/metro-ai-suite/README.md @@ -24,7 +24,6 @@ The Suite also provides a collection of visual analytics sample applications, us | [Image Search by Text](https://edgesoftwarecatalog.intel.com/details/?microserviceType=recipeµserviceNameForUrl=metro-ai-suite-image-search-by-text)| A reference implementation using multi-modal large language models to perform image search with text query. | [Link](https://edgesoftwarecatalog.intel.com/details/?microserviceType=recipeµserviceNameForUrl=metro-ai-suite-image-search-by-text) | |[Image Based Video Search](image-based-video-search) | Performs near real-time analysis and image-based search to detect and retrieve objects of interest in large video datasets. | [Link](https://docs.openedgeplatform.intel.com/dev/edge-ai-suites/image-based-video-search/index.html) | |[Visual Search Question and Answering](visual-search-question-and-answering) | A unified application that integrates a multi-modal search engine for image search with text query with a visual question and answering assistant. | [Link](https://docs.openedgeplatform.intel.com/dev/edge-ai-suites/visual-search-question-and-answering/index.html) | -|[Deterministic Threat Detection](deterministic-threat-detection) | A sample application that showcases Time-Sensitive Networking (TSN) to enable deterministic, low-latency transmission of AI-processed video and sensor data alongside best-effort traffic on a shared network. | [Link](https://github.com/open-edge-platform/edge-ai-suites/blob/main/metro-ai-suite/deterministic-threat-detection/docs/user-guide/get-started.md) | See the respective sample applications to learn more about using them in your application development as well as customizing them to meet your use case needs. diff --git a/metro-ai-suite/deterministic-threat-detection/README.md b/metro-ai-suite/deterministic-threat-detection/README.md deleted file mode 100644 index 0a671fbb07..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Deterministic Threat Detection with Time-Sensitive Networking (TSN) - -This project demonstrates a Time-Sensitive Networking (TSN) sample application for deterministic, low-latency delivery of AI-processed video and sensor data in a shared network with other traffic. - -## Overview - -This sample application showcases how TSN can be used to protect latency-sensitive AI and sensor workloads in industrial and edge AI deployments. It demonstrates: - -- Multi-camera video acquisition over Ethernet -- Precise time synchronization using **IEEE 802.1AS (gPTP)** -- End-to-end latency measurement using PTP timestamps -- AI inference on synchronized video frames -- MQTT-based data aggregation and visualization -- The impact of network congestion from best-effort background traffic -- Traffic protection using **IEEE 802.1Qbv (Time-Aware Shaper)** - -## Use Case - -The use case involves multiple RTSP cameras streaming video to edge compute nodes for AI inference. Simultaneously, a sensor data producer generates telemetry data. Both inference results and sensor data are published over MQTT. - -An aggregation node measures the end-to-end latency. By injecting background traffic and then enabling TSN features, the demonstration shows how TSN provides consistent and deterministic latency for critical data streams. - -## Getting Started - -For detailed instructions on how to set up the environment and run the demonstration, please refer to the user guide in the `docs/user-guide` directory. Start with [get-started.md](./docs/user-guide/get-started.md). diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/TSN-Network-Topology.svg b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/TSN-Network-Topology.svg deleted file mode 100644 index 29c8c98eec..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/TSN-Network-Topology.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - -
Machine 1
Camera 1 RTSP Capture + AI Inference
Machine 2
Camera 2 RTSP Capture + AI Inference
AXIS
RTSP Camera 2
MOXA
TSN Switch
AXIS
RTSP Camera 1
Machine 3
Sensor Data Producer (MQTT)
Machine 4
MQTT Aggregator + Visualization
Machine 5
Traffic Injector
\ No newline at end of file diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-time-aware-shaper-port-setting.png b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-time-aware-shaper-port-setting.png deleted file mode 100644 index 4ac315ed23..0000000000 Binary files a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-time-aware-shaper-port-setting.png and /dev/null differ diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-time-aware-shaper.png b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-time-aware-shaper.png deleted file mode 100644 index 4b2ee7c1cf..0000000000 Binary files a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-time-aware-shaper.png and /dev/null differ diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-vlan-configuration.png b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-vlan-configuration.png deleted file mode 100644 index 66cb66bb2d..0000000000 Binary files a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-vlan-configuration.png and /dev/null differ diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-vlan-port-configuration.png b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-vlan-port-configuration.png deleted file mode 100644 index 6105bfce96..0000000000 Binary files a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-vlan-port-configuration.png and /dev/null differ diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-webui.png b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-webui.png deleted file mode 100644 index a9fe52569c..0000000000 Binary files a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/moxa-webui.png and /dev/null differ diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/mqtt-data-aggregator-with-traffic.png b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/mqtt-data-aggregator-with-traffic.png deleted file mode 100644 index 2e94db28bf..0000000000 Binary files a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/mqtt-data-aggregator-with-traffic.png and /dev/null differ diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/mqtt-data-aggregator.png b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/mqtt-data-aggregator.png deleted file mode 100644 index 8d49d01524..0000000000 Binary files a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/_images/mqtt-data-aggregator.png and /dev/null differ diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/get-started.md b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/get-started.md deleted file mode 100644 index 1bae95fd06..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/get-started.md +++ /dev/null @@ -1,137 +0,0 @@ -# Getting Started - -This guide provides a streamlined path to setting up and running the Deterministic Threat Detection demonstration. It covers the essential prerequisites and the main steps to see the system in action. - -# Use Case - -This use case demonstrates how Time-Sensitive Networking (TSN) enables deterministic and reliable delivery of AI-processed video and sensor data in a shared Ethernet network carrying mixed traffic. - -Multiple Ethernet-connected RTSP cameras stream video to edge compute nodes where each frame is timestamped using a PTP-synchronized system clock and processed through an AI inference pipeline. In parallel, a simulated sensor data producer generates time-stamped telemetry data. Both video inference results and sensor data are published over MQTT to a centralized aggregation node. - -The aggregation node subscribes to all MQTT topics and measures end-to-end latency by comparing the frame or sensor generation timestamp with the message reception time. Since all devices share a common time reference through IEEE 802.1AS (gPTP), the measured latency accurately reflects network and processing delays. - -To evaluate the impact of network congestion, best-effort background traffic is intentionally injected using iperf. Without TSN traffic shaping, this background traffic interferes with critical video and sensor data, resulting in increased latency and jitter. - -The experiment then enables VLAN-based traffic separation and IEEE 802.1Qbv (Time-Aware Shaper) on a TSN-capable switch to prioritize critical traffic. With TSN enabled, the system demonstrates consistent and deterministic latency for video and sensor data, even in the presence of heavy background traffic. - -This use case validates how TSN can be used to protect latency-sensitive AI and sensor workloads in industrial and edge AI deployments. - ---- - -## Hardware Details - -- **AXIS RTSP Cameras**: Cameras that support RTSP streaming. -- **MOXA TSN Switch**: A switch that supports IEEE 802.1AS (PTP) and IEEE 802.1Qbv (Time-Aware Shaper). -- **Arrow Lake Machines**: Linux-based systems equipped with Intel i226 TSN-capable network cards. - ---- - -## Network Topology - -The experimental setup consists of: - -- **2 × [AXIS RTSP Camera P3265-LVE](https://www.axis.com/products/axis-p3265-lve)** -- **1 × [Moxa Managed Switch TSN-G5000 Series](https://www.moxa.com/getmedia/a0db0ef9-2741-4bad-91c6-1ec1827aca64/moxa-tsn-g5000-series-web-console-manual-v2.3.pdf)** -- **5 × Arrow Lake Linux Machines with `Intel i226` TSN network cards** - - ![TSN Network Topology](./_images/TSN-Network-Topology.svg) - -### Logical Roles - -| Machine | Role | -|------|------| -| Machine 1 | Camera 1 RTSP Capture + AI Inference | -| Machine 2 | Camera 2 RTSP Capture + AI Inference | -| Machine 3 | Sensor Data Producer (MQTT) | -| Machine 4 | MQTT Aggregator + Visualization | -| Machine 5 | Traffic Injector (`iperf`) | - -All machines are connected to the MOXA switch and synchronized using PTP. - ---- - -## Steps to Test the Use Case - -1. **Configure PTP on all machines**: Synchronize the system clocks of all machines to a common time reference using Precision Time Protocol (PTP). This is essential for accurate latency measurement. - - ```bash - sudo apt-get update - sudo apt-get install -y linuxptp git - git clone https://git.code.sf.net/p/linuxptp/code linuxptp - cd linuxptp - # Terminal 1: Run ptp4l to synchronize the PTP clock - sudo ptp4l -i enp1s0 -f configs/gPTP.cfg --step_threshold=1 -m -s - # Terminal 2: Run phc2sys to synchronize the system clock to the PTP clock - sudo phc2sys -s enp1s0 -c CLOCK_REALTIME --step_threshold=1 --transportSpecific=1 -w -m - ``` - Note: Make sure to replace `enp1s0` with the actual network interface name associated with the `i226` network card. - - For detailed instructions on configuring PTP, refer to the [PTP Configuration Guide](./how-to-configure-ptp.md). - -2. **Create VLAN on all machines**: Set up Virtual LANs (VLANs) to segregate network traffic, isolating critical data from best-effort traffic. - - Configure the VLAN on the MOXA as mentioned in the [MOXA VLAN Configuration Guide](./how-to-configure-vlan-on-moxa-switch.md) to assign vlan id on TSN switch. - - On the Arrow Lake machines, create VLAN interfaces corresponding to the VLAN IDs configured on the MOXA switch. - ```bash - sudo ip link add link enp1s0 name enp1s0.1 type vlan id 1 - sudo ip link set enp1s0.1 type vlan egress-qos-map 0:1 - sudo ifconfig enp1s0.1 192.168.127.31 up - - sudo ip link add link enp1s0 name enp1s0.3 type vlan id 3 - sudo ip link set enp1s0.3 type vlan egress-qos-map 0:3 - sudo ifconfig enp1s0.3 192.168.3.31 up - - sudo ip link add link enp1s0 name enp1s0.5 type vlan id 5 - sudo ip link set enp1s0.5 type vlan egress-qos-map 0:5 - sudo ifconfig enp1s0.5 192.168.5.31 up - ``` - Note: Make sure to replace `enp1s0` with the actual network interface name associated with the `i226` network card. - - For detailed instructions on creating VLANs on HOST machines, refer to the [HOST VLAN Configuration Guide](./how-to-create-vlan-on-all-machines.md). - -3. **Run RTSP Camera Capture and AI Inference**: Start the video pipeline on Machines 1 and 2. This involves capturing the RTSP stream, timestamping frames using the PTP-synchronized clock, and running AI inference on the video and publish the results over MQTT. - - For detailed instructions on running RTSP camera capture and AI inference, refer to the [RTSP Camera and AI Inference Guide](./how-to-run-rtsp-camera-and-ai-inference.md). - -4. **Run Sensor Data Producer**: On Machine 3, start the Python script that simulates a sensor generating and publishing timestamped data over MQTT. - - For detailed instructions on running the sensor data producer, refer to the [Sensor Data Producer Guide](./how-to-run-sensor-data-producer.md). - -5. **Run MQTT Aggregator and Visualization**: On Machine 4, launch the application that subscribes to the MQTT topics, calculates end-to-end latency, and displays it on a live dashboard. - - MQTT Data Aggregator - - For detailed instructions on running the MQTT aggregator and visualization, refer to the [MQTT Aggregator and Visualization Guide](./how-to-run-mqtt-aggregator-and-visualization.md). - -6. **Run Traffic Injector**: On Machine 5, use `iperf3` to generate high-volume background traffic to simulate network congestion. - - MQTT Data Aggregator With Traffic - For detailed instructions on running the traffic injector, refer to the [Traffic Injector Guide](./how-to-run-traffic-injector.md). - -7. **Enable TSN Traffic Shaping**: Configure the Time-Aware Shaper (IEEE 802.1Qbv) on the MOXA switch to prioritize the critical traffic from cameras and sensors, protecting it from the background traffic. - - MOXA Time Aware Shaper - - For detailed instructions on enabling TSN traffic shaping, refer to the [TSN Traffic Shaping Guide](./how-to-enable-tsn-traffic-shaping.md). - -8. **Analyze Results and Visualize Latency**: Observe the latency graphs on the MQTT Aggregator dashboard. With TSN enabled, the latency for critical traffic should remain low and deterministic, even with the `iperf` traffic running. - - diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-configure-moxa-switch.md b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-configure-moxa-switch.md deleted file mode 100644 index c5e87bd6f5..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-configure-moxa-switch.md +++ /dev/null @@ -1,22 +0,0 @@ -# Configuring the MOXA TSN Switch - -## Overview - -The `MOXA TSN-G5000 Series` is a family of industrial Ethernet switches designed for Time-Sensitive Networking (TSN) applications. These switches combine traditional managed switch capabilities with advanced TSN features defined in IEEE 802.1 standards, making them ideal for real-time automation, industrial vision systems, and edge AI workloads where deterministic delivery of critical data is essential. - -In this sample application, the TSN-G5000 switch serves as the central network fabric connecting multiple RTSP cameras, edge processing nodes, sensor producers, and a traffic generator. It enables both traffic isolation and scheduled forwarding to guarantee consistent performance under mixed traffic conditions. - -## Accessing the Web Interface - -To configure the switch, you will need to access its web-based management interface. - -> **Note** -> On reset, the MOXA TSN switch will have the default IP address `192.168.127.253`. Make sure your computer is in the same subnet to access the web interface. The default username is `admin` and the default password is `moxa`. - -![MOXA Web UI](./_images/moxa-webui.png) - -## Further Reading - -For more detailed information, refer to the official user manual: - -- [Moxa Managed Switch TSN-G5000 Series User Manual](https://www.moxa.com/getmedia/a0db0ef9-2741-4bad-91c6-1ec1827aca64/moxa-tsn-g5000-series-web-console-manual-v2.3.pdf) diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-configure-ptp.md b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-configure-ptp.md deleted file mode 100644 index 7d4a046619..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-configure-ptp.md +++ /dev/null @@ -1,44 +0,0 @@ -# PTP Time Synchronization (IEEE 802.1AS) - -## What is PTP? - -Precision Time Protocol (PTP) provides sub-microsecond time synchronization across Ethernet devices, enabling accurate latency measurements. - -## Install PTP Tools - -```bash -sudo apt-get update -sudo apt-get install -y linuxptp git -git clone https://git.code.sf.net/p/linuxptp/code linuxptp -cd linuxptp -``` - -## PTP Commands - -The TSN switch is configured to act as the PTP Grandmaster clock. On each Arrow Lake machine, execute the following command to synchronize the system clock using PTP. - -Note: Make sure to replace `enp1s0` with the actual network interface name associated with the `i226` network card. - -1. **Start the PTP daemon (`ptp4l`)** - - Start the `ptp4l` daemon on each machine, specifying the network interface (`enp1s0`) related to the i226 network on that machine and the gPTP configuration file. - - ```bash - sudo ptp4l -i enp1s0 -f configs/gPTP.cfg --step_threshold=1 -m -s - ``` - -2. **Synchronize the System Clock (`phc2sys`)** - - Synchronize the system clock with the PTP hardware clock (PHC). - - ```bash - sudo phc2sys -s enp1s0 -c CLOCK_REALTIME --step_threshold=1 --transportSpecific=1 -w -m - ``` - -3. **Verify Synchronization** - - Check the `phc2sys` output to ensure the offset is within acceptable limits (e.g., less than 50ns). The output should look similar to this: - - ``` - phc2sys[1234.567]: CLOCK_REALTIME phc offset 12345 s0 freq +0 delay 1234 - ``` diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-configure-vlan-on-moxa-switch.md b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-configure-vlan-on-moxa-switch.md deleted file mode 100644 index 8de13aafad..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-configure-vlan-on-moxa-switch.md +++ /dev/null @@ -1,19 +0,0 @@ -# MOXA Default Configuration - -Following the guide [how-to-configure-moxa-switch.md](./how-to-configure-moxa-switch.md), set the MOXA switch to its default configuration before proceeding with VLAN configuration. - -## MOXA Switch VLAN Configuration - -Identify the switch port for each connected machine. You will then configure each port to handle tagged VLAN traffic for the specific VLAN IDs the connected machine requires. - -For example if machine 1 is connected to port 2 of the MOXA switch, and machine 1 can handle the traffic on the VLAN ID 1, 3, and 5, then set the port 2 as tagged VLAN for VLAN ID 1, 3, and 5. -1. Access the MOXA switch web interface by entering the switch's IP address(`192.168.127.253`) in a web browser. -2. Log in with your credentials. -3. Navigate to the "`VLAN` > `IEEE 802.1Q`" section in the menu. -4. Create VLANs with IDs 1, 3, and 5 if they do not already exist. -![MOXA VLAN Creation](./_images/moxa-vlan-configuration.png) -5. Assign the appropriate ports to the VLANs as tagged ports by clicking edit (🖉) on the specific port. -![MOXA VLAN Port Configuration](./_images/moxa-vlan-port-configuration.png) -6. Apply the configuration and reboot the switch if necessary. -7. Verify the VLAN configuration by checking the port settings and ensuring that the correct VLANs are assigned. -Refer to the MOXA switch user manual for detailed instructions specific to your switch model. diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-create-vlan-on-all-machines.md b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-create-vlan-on-all-machines.md deleted file mode 100644 index 85008b53ab..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-create-vlan-on-all-machines.md +++ /dev/null @@ -1,23 +0,0 @@ -# Create VLAN on All Machines - -To create VLAN interfaces on all Arrow Lake machines, execute the following commands on each machine. This example creates VLANs with IDs 1, 3, and 5, which correspond to the VLAN configuration on the MOXA TSN switch. - -Note: Make sure to replace `enp1s0` with the actual network interface name associated with the `i226` network card. - -```bash -sudo ip link add link enp1s0 name enp1s0.1 type vlan id 1 -sudo ip link set enp1s0.1 type vlan egress-qos-map 0:1 -sudo ifconfig enp1s0.1 192.168.127.31 up - -sudo ip link add link enp1s0 name enp1s0.3 type vlan id 3 -sudo ip link set enp1s0.3 type vlan egress-qos-map 0:3 -sudo ifconfig enp1s0.3 192.168.3.31 up - -sudo ip link add link enp1s0 name enp1s0.5 type vlan id 5 -sudo ip link set enp1s0.5 type vlan egress-qos-map 0:5 -sudo ifconfig enp1s0.5 192.168.5.31 up -``` - -## Instruction to assign vlan id on TSN switch - -If you are using MOXA TSN switch, please follow the document [how-to-configure-vlan-on-moxa-switch.md](./how-to-configure-vlan-on-moxa-switch.md) to assign vlan id on TSN switch. diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-enable-tsn-traffic-shaping.md b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-enable-tsn-traffic-shaping.md deleted file mode 100644 index 6e72562432..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-enable-tsn-traffic-shaping.md +++ /dev/null @@ -1,29 +0,0 @@ -# How to Enable TSN Traffic Shaping (IEEE 802.1Qbv) - -Time-Sensitive Networking (TSN) traffic shaping, specifically the Time-Aware Shaper defined in IEEE 802.1Qbv, is a critical feature for achieving determinism in industrial networks. It allows you to create protected time slots on the network, ensuring that high-priority, time-critical data is not delayed by lower-priority traffic. - -This guide provides a general overview of how to configure the Time-Aware Shaper on the MOXA TSN-G5000 series switch. - -## Key Concepts - -* **Gate Control List (GCL)**: This is the core of the Time-Aware Shaper. The GCL is a schedule that defines which traffic queues are open or closed at specific points in time for a given port. -* **Time-Triggered Gates**: By opening and closing gates for different traffic classes according to a repeating cycle, you can guarantee that high-priority frames (like those from a camera or critical sensor) have exclusive access to the network medium. -* **Cycle Time**: The GCL operates on a repeating cycle, synchronized across all TSN-enabled devices on the network via gPTP (IEEE 802.1AS). - -## Configuration Steps (General Guide) - -Following are steps to configure the Time-Aware Shaper on a MOXA TSN switch. The exact steps may vary based on the switch model and firmware version, so refer to the official documentation for detailed instructions. - -1. **Access the Switch Web Interface**: Log in to the MOXA TSN switch web interface using the default IP address, username, and password. -2. **Navigate to TSN Configuration**: Layer 2 Switching > Time-Aware Shaper > Click edit for the port to which you want to configure - -![MOXA Time Aware Shaper](./_images/moxa-time-aware-shaper.png) - -3. **Enable Time-Aware Shaper**: Click add icon to add multiple Gate Control Lists (GCLs) as needed. - -Currently, we have three critical streams that need to be protected from interference by best-effort traffic. Each stream is operating at a rate of 30 frames per second (fps), meaning a new frame is generated approximately every 33ms. To accommodate all three streams, we will create a Gate Control List (GCL) with a cycle time of 100ms. Since the sensor data doesn't require the full 33ms for each frame, we estimate that approximately 90ms will be needed to handle all the critical traffic, leaving 10ms available for best-effort traffic. - -![MOXA Time Aware Port Shaper](./_images/moxa-time-aware-shaper-port-setting.png) - -> **Important Note** -> The configuration of the Gate Control List is highly specific to the application's requirements, including the number of streams, data packet sizes, and desired latency. The values used above are for illustrative purposes only. For precise instructions and advanced settings, please consult the official [Moxa TSN-G5000 Series User Manual](https://www.moxa.com/getmedia/a0db0ef9-2741-4bad-91c6-1ec1827aca64/moxa-tsn-g5000-series-web-console-manual-v2.3.pdf). diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-mqtt-aggregator-and-visualization.md b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-mqtt-aggregator-and-visualization.md deleted file mode 100644 index fa68ed45e8..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-mqtt-aggregator-and-visualization.md +++ /dev/null @@ -1,63 +0,0 @@ -# How to Run the MQTT Aggregator and Visualization - -This guide explains how to run the `mqtt_data_aggregator.py` script. This script subscribes to multiple MQTT topics, calculates the end-to-end latency of the messages, and visualizes the results in a real-time web-based dashboard. - -## Overview - -The `mqtt_data_aggregator.py` script is a powerful tool for monitoring the performance of your TSN setup. It is a Dash application that: -- Connects to multiple MQTT brokers simultaneously. -- Subscribes to specified topics to receive data from cameras and sensors. -- Calculates the latency by comparing the message reception time with the PTP timestamp in the payload. -- Plots the latency for each topic on a live graph. -- Provides a web interface to view the visualization. - -## Prerequisites - -Before running the script, ensure you have Python 3 and the required libraries installed. - -```bash -pip install dash paho-mqtt plotly -``` - -## Running the Script - -Navigate to the `deterministic-threat-detection/mqtt_data_aggregator` directory and run the script. You will need to provide the correct broker IP addresses for each topic. - -```bash -cd deterministic-threat-detection/mqtt_data_aggregator -python3 mqtt_data_aggregator.py --topic-brokers "tsn_demo/camera1/inference:" "tsn_demo/camera2/inference:" "sample/sensor/data:" -``` - -Replace ``, ``, and `` with the IP addresses of the machines running the respective MQTT brokers (or the machines generating the data). - -## Viewing the Dashboard - -Once the script is running, open a web browser and navigate to the following address: - -`http://:8050` - -![MQTT Data Aggregator](./_images/mqtt-data-aggregator.png) - - -Replace `` with the IP address of the machine where you are running the `mqtt_data_aggregator.py` script. You will see a real-time plot of the end-to-end latency for each data stream. - -## Command-Line Arguments - -The script can be customized with the following command-line arguments: - -| Argument | Description | Default Value | -|---|---|---| -| `--topic-brokers` | A list of topic and broker IP address pairs in the format `'topic:broker_ip'`. | `tsn_demo/camera1/inference:localhost`, `tsn_demo/camera2/inference:localhost`, `sample/sensor/data:localhost` | -| `--port` | The port number for the MQTT brokers. | `1883` | -| `--window-seconds`| The time window in seconds to display on the plot. | `2` | -| `--y-min` | The minimum value for the Y-axis. | `0` | -| `--y-max` | The default maximum value for the Y-axis. The plot will auto-scale if latency exceeds this. | `5` | -| `--dash-port` | The port for the Dash web interface. | `8050` | - -### Example - -To run the aggregator with a 5-second plot window and a default Y-axis max of 10 seconds, you would use: - -```bash -python3 mqtt_data_aggregator.py --topic-brokers "tsn_demo/camera1/inference:192.168.1.101" "tsn_demo/camera2/inference:192.168.1.102" "sample/sensor/data:192.168.1.103" --window-seconds 5 --y-max 10 -``` diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-rtsp-camera-and-ai-inference.md b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-rtsp-camera-and-ai-inference.md deleted file mode 100644 index cce706ee4a..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-rtsp-camera-and-ai-inference.md +++ /dev/null @@ -1,86 +0,0 @@ -# Run RTSP Camera Capture and AI Inference - -This guide explains how to set up and run the RTSP camera capture and AI inference pipeline. - -## Download AI Model and Resources - -First, download the necessary AI models and supporting libraries. - -```bash -git clone https://github.com/open-edge-platform/edge-ai-libraries.git -cd edge-ai-libraries/microservices/dlstreamer-pipeline-server -wget -c https://github.com/open-edge-platform/edge-ai-resources/raw/a7c9522f5f936c47de8922046db7d7add13f93a0/models/INT8/pallet_defect_detection.zip -unzip -q pallet_defect_detection.zip -d models/ -cd docker -``` - -## Configure Environment - -Before running the services, you need to configure the environment variables. - -### Update .env file -Set the `MQTT_PORT` in the `.env` file. If you are behind a proxy, configure the proxy settings as well. - -``` -MQTT_PORT=1883 -# http_proxy=... -# https_proxy=... -``` - -### Copy Configuration Files -Copy the `ptp_frame_timestamp.py` and `config.json` from the `deterministic-threat-detection` module to the current docker directory. - -```bash -cp edge-ai-suites/metro-ai-suite/deterministic-threat-detection/rtsp_camera_stream/ptp_frame_timestamp.py . -cp edge-ai-suites/metro-ai-suite/deterministic-threat-detection/rtsp_camera_stream/config.json . -``` - -Note: Make sure to update the RTSP camera ``, ``, and `` in the `config.json` file before proceeding. Also, add the RTSP camera IP to the no_proxy environment variable if you are behind a proxy. - -## Update Docker Compose File - -Comment the existing resources folder mapping and add volume mappings to the `docker-compose.yml` file to make the custom script and configuration available to the `dlstreamer-pipeline-server` container. - -```yaml -services: - dlstreamer-pipeline-server: - # ... existing configuration ... - volumes: - # - "../resources:/home/pipeline-server/resources/" - - "../models:/home/pipeline-server/resources/" - - "./ptp_frame_timestamp.py:/home/pipeline-server/ptp_frame_timestamp.py" - - "./config.json:/home/pipeline-server/config.json" - # ... other volumes ... -``` - -## Run the Services - -Start the services using Docker Compose. - -```bash -docker compose up -d -``` - -## Start the RTSP Camera Pipeline - -Finally, start the pipeline by sending a POST request to the pipeline server. - -```bash -curl -k http://localhost:8080/pipelines/user_defined_pipelines/rtsp_camera_pipeline -X POST -H 'Content-Type: application/json' -d '{ - "destination": { - "metadata": { - "type": "mqtt", - "publish_frame": true, - "topic": "tsn_demo/camera/inference" - } - }, - "parameters": { - "detection-properties": { - "model": "/home/pipeline-server/resources/deployment/Detection/model/model.xml", - "device": "CPU" - } - } -}' -``` - -Note: Update the topic name if you are running the pipeline on multiple machines. \ No newline at end of file diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-sensor-data-producer.md b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-sensor-data-producer.md deleted file mode 100644 index 33783eefa5..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-sensor-data-producer.md +++ /dev/null @@ -1,57 +0,0 @@ -# How to Run the Sensor Data Producer - -This guide explains how to run the `sensor_data_producer.py` script to generate and publish simulated sensor data to an MQTT broker. - -## Overview - -The `sensor_data_producer.py` script is a Python application that simulates a sensor device. It generates timestamped data packets at a configurable rate and publishes them to a specified MQTT topic. This is useful for testing and demonstrating data pipelines in the TSN sample application. - -## Prerequisites - -Before running the script, ensure you have Python 3 and the `paho-mqtt` library installed. - -```bash -pip install paho-mqtt -``` -## MQTT Broker Setup - -If you don't have an MQTT broker set up, you can quickly run one using Docker. The following command will start an Eclipse Mosquitto MQTT broker on your machine: - -```bash -cd deterministic-threat-detection/sensor_data_producer -docker run -d \ - --name mqtt-broker \ - --network host \ - -v "$(pwd)/configs/mosquitto.conf:/mosquitto/config/mosquitto.conf:ro" \ - eclipse-mosquitto -``` - -## Running the Script - -Navigate to the `deterministic-threat-detection` directory and run the script from there. You need to provide the IP address of the machine where the MQTT aggregator is running or assume `localhost` if --broker is not specified. - -```bash -cd deterministic-threat-detection/sensor_data_producer -python3 sensor_data_producer.py --broker -``` - -Replace `` with the actual IP address of your MQTT broker. - -## Command-Line Arguments - -The script supports several command-line arguments to customize its behavior: - -| Argument | Description | Default Value | -|---|---|---| -| `--broker` | The IP address or hostname of the MQTT broker. | `localhost` | -| `--port` | The port number for the MQTT broker. | `1883` | -| `--topic` | The MQTT topic to publish the sensor data to. | `sample/sensor/data` | -| `--rate` | The rate in Hertz (Hz) at which to publish data. | `30` | - -### Example - -To publish data to a broker at `192.168.1.100` on topic `factory/sensors` at a rate of 50 Hz, you would use the following command: - -```bash -python3 sensor_data_producer.py --broker 192.168.1.100 --topic factory/sensors --rate 50 -``` diff --git a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-traffic-injector.md b/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-traffic-injector.md deleted file mode 100644 index aa4203d4ba..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/docs/user-guide/how-to-run-traffic-injector.md +++ /dev/null @@ -1,45 +0,0 @@ -# How to Run the Traffic Injector - -This guide explains how to use `iperf3` to inject best-effort background traffic into the network. This allows you to observe the impact of network congestion on your time-sensitive traffic and validate the effectiveness of TSN traffic shaping. - -## Overview - -`iperf3` is a modern, widely used network testing tool that can create data streams to measure network performance. In this use case, we use `iperf3` to generate background traffic that competes for network resources with the critical video and sensor data streams. - -The setup involves two machines: -- An **`iperf3` server** that listens for incoming traffic. -- An **`iperf3` client** that sends data to the server. - -## Prerequisites - -Ensure `iperf3` is installed on both the client and server machines. - -```bash -sudo apt-get update -sudo apt-get install -y iperf3 -``` - -## Running the Traffic Injector - -Follow these steps to start the traffic injection. - -### 1. Start the `iperf3` Server - -On the machine that will receive the traffic (e.g., Machine 4, the MQTT Aggregator), run the following command to start the `iperf3` server in the background. - -```bash -iperf3 -s & -``` - -The server will now be listening for connections on the default port (5201). - -### 2. Start the `iperf3` Client - -On the machine designated as the traffic injector (Machine 5), run the following command to start sending UDP traffic to the `iperf3` server. - -```bash -iperf3 -c -u -b 200M -t 3600 -``` - -Replace `` with the IP address of the machine running the `iperf3` server. - diff --git a/metro-ai-suite/deterministic-threat-detection/mqtt_data_aggregator/mqtt_data_aggregator.py b/metro-ai-suite/deterministic-threat-detection/mqtt_data_aggregator/mqtt_data_aggregator.py deleted file mode 100644 index e7d2c12348..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/mqtt_data_aggregator/mqtt_data_aggregator.py +++ /dev/null @@ -1,292 +0,0 @@ -""" -MQTT Data Aggregator and Real-Time Latency Visualizer. - -This script connects to multiple MQTT brokers, subscribes to specified topics, -and calculates the end-to-end latency of messages based on PTP timestamps. -It then visualizes these latencies in a real-time web-based dashboard using Dash. - -The script is designed to monitor the performance of a Time-Sensitive Networking (TSN) -setup by displaying how network conditions affect data delivery for different -streams (e.g., from cameras and sensors). -""" - -# -# Copyright (C) 2026 Intel Corporation. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import argparse -import json -import logging -import os -import signal -import time -from collections import deque -from threading import Lock - -import dash -import paho.mqtt.client as mqtt -import plotly.graph_objs as go -from dash import dcc, html -from dash.dependencies import Input, Output - -# --- Default Configuration --- -DEFAULT_BROKER_PORT = 1883 -DEFAULT_TOPICS_BROKERS = [ - "tsn_demo/camera1/inference:localhost", - "tsn_demo/camera2/inference:localhost", - "sample/sensor/data:localhost" -] -DEFAULT_WINDOW_SECONDS = 2 -DEFAULT_Y_AXIS_MIN = 0 -DEFAULT_Y_AXIS_MAX = 5 - -# --- Logging Setup --- -logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') - -# --- Global Data Structures --- -data_lock = Lock() -data_store = {} -active_clients = [] -last_update_times = {} - - -# --- Argument Parsing --- -def parse_arguments(): - """Parse command-line arguments for the aggregator and visualizer.""" - parser = argparse.ArgumentParser( - description="Multi-Broker MQTT Data Aggregator and Visualizer") - parser.add_argument("--topic-brokers", nargs='+', default=DEFAULT_TOPICS_BROKERS, - help="List of 'topic:broker_ip_address' pairs (e.g., 'topic1:broker1_ip' 'topic2:broker2_ip')") - parser.add_argument("--port", type=int, default=DEFAULT_BROKER_PORT, help="Default MQTT broker port") - parser.add_argument("--window-seconds", type=int, default=DEFAULT_WINDOW_SECONDS, help="Time window in seconds to display on the plot") - parser.add_argument("--y-min", type=float, default=DEFAULT_Y_AXIS_MIN, help="Y-axis minimum value") - parser.add_argument("--y-max", type=float, default=DEFAULT_Y_AXIS_MAX, help="Y-axis maximum value") - parser.add_argument("--dash-port", type=int, default=8050, help="Port for the Dash web interface") - return parser.parse_args() - - -# --- MQTT Callbacks --- -def on_connect_factory(broker_address, topic): - """ - Factory function to create an on_connect callback for a specific broker and topic. - - This allows each MQTT client to have a unique callback that logs its - connection status and subscribes to its designated topic. - - Args: - broker_address (str): The address of the MQTT broker. - topic (str): The topic to subscribe to upon connection. - - Returns: - function: The on_connect callback function. - """ - def on_connect(client, userdata, flags, rc, properties=None): - """Callback executed when the client connects to the MQTT broker.""" - if rc == 0: - logging.info(f"Connected to MQTT Broker at {broker_address} for topic {topic}") - client.subscribe(topic, qos=1) - logging.info(f"Subscribed to topic: {topic}") - else: - logging.error(f"Failed to connect to {broker_address}, return code {rc}") - return on_connect - - -def on_disconnect_factory(broker_address): - """ - Factory function to create an on_disconnect callback. - - Args: - broker_address (str): The address of the MQTT broker for logging. - - Returns: - function: The on_disconnect callback function. - """ - def on_disconnect(client, userdata, rc, properties=None): - """Callback executed when the client disconnects from the MQTT broker.""" - logging.warning( - f"Disconnected from MQTT Broker at {broker_address} (code: {rc}).") - return on_disconnect - - -def on_message(client, userdata, message): - """ - Callback executed when a message is received from an MQTT broker. - - It calculates the latency from the timestamp in the message payload - and stores it for visualization. - """ - try: - current_time = time.time() - payload = json.loads(message.payload) - topic = message.topic - - if "sensor" in topic: - timestamp = payload.get('timestamp') - else: - timestamp = payload.get('metadata', {}).get('ptp_timestamp') - - if timestamp is None: - logging.warning(f"Timestamp not found in message on topic {topic}") - return - - latency = (current_time - timestamp) - - with data_lock: - if topic in data_store: - data_store[topic]["timestamps"].append(current_time) - data_store[topic]["latencies"].append(latency) - last_update_times[topic] = current_time - - except (json.JSONDecodeError, KeyError) as e: - logging.error(f"Error processing message on topic {message.topic}: {e}") - - -# --- Dash App --- -def create_dash_app(args): - """ - Create and configure the Dash application for real-time visualization. - - Args: - args: The parsed command-line arguments. - - Returns: - dash.Dash: The configured Dash application instance. - """ - app = dash.Dash(__name__) - app.layout = html.Div([ - html.H1("Real-Time TSN Latency Monitor"), - dcc.Graph(id='latency-plot'), - dcc.Interval(id='interval-update', interval=1000, n_intervals=0) - ]) - - @app.callback(Output('latency-plot', 'figure'), Input('interval-update', 'n_intervals')) - def update_plot(n): - """ - Callback to update the latency plot at regular intervals. - - This function is triggered by the dcc.Interval component. It prunes old - data, redraws the plot with the latest latency information, and adjusts - the y-axis dynamically to fit the data. - - Args: - n (int): The number of intervals that have passed (not used). - - Returns: - dict: A dictionary representing the updated Plotly figure. - """ - traces = [] - current_time = time.time() - start_time = current_time - args.window_seconds - max_latency = 0 - - with data_lock: - for topic, data in data_store.items(): - # Prune data older than the time window - while data["timestamps"] and data["timestamps"][0] < start_time: - data["timestamps"].popleft() - data["latencies"].popleft() - - # Find the max latency in the current window for this topic - for lat in data["latencies"]: - if lat is not None and lat > max_latency: - max_latency = lat - - # Create a consistent set of timestamps for plotting - plot_timestamps = [start_time] + list(data["timestamps"]) + [current_time] - plot_latencies = [None] + list(data["latencies"]) + [None] - - traces.append(go.Scatter( - x=plot_timestamps, - y=plot_latencies, - mode='lines+markers', - name=topic, - connectgaps=False # This is important to show breaks - )) - - # Determine the y-axis range - y_axis_range = [args.y_min, args.y_max] - if max_latency > args.y_max: - y_axis_range[1] = max_latency * 1.1 # Add 10% buffer - - return { - 'data': traces, - 'layout': go.Layout( - xaxis={'title': 'Time', 'range': [start_time, current_time]}, - yaxis={'title': 'End-to-End Latency (seconds)', 'range': y_axis_range}, - legend={'x': 0.01, 'y': 0.99, 'bgcolor': 'rgba(255,255,255,0.8)', 'bordercolor': 'black', 'borderwidth': 1}, - margin={'l': 60, 'r': 40, 't': 40, 'b': 40} - ) - } - return app - - -# --- Main Execution --- -def main(args): - """ - Main function to set up MQTT clients and run the Dash application. - - Initializes data storage, configures and connects MQTT clients for each - specified topic-broker pair, and starts the web server for the - Dash visualization dashboard. - - Args: - args: The parsed command-line arguments. - """ - # Initialize data storage - for item in args.topic_brokers: - topic, broker = item.split(':') - if topic not in data_store: - data_store[topic] = { - "timestamps": deque(), - "latencies": deque() - } - last_update_times[topic] = 0 - - # Setup and connect MQTT clients - for item in args.topic_brokers: - topic, broker_address = item.split(':') - client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2) - client.on_connect = on_connect_factory(broker_address, topic) - client.on_disconnect = on_disconnect_factory(broker_address) - client.on_message = on_message - client.reconnect_delay_set(min_delay=1, max_delay=120) - - try: - client.connect_async(broker_address, args.port, 60) - client.loop_start() - active_clients.append(client) - logging.info(f"Initiating connection to {broker_address} for topic {topic}") - except (ConnectionRefusedError, OSError) as e: - logging.error(f"Fatal: Could not connect to {broker_address}. {e}") - - # Create and run Dash app - app = create_dash_app(args) - app.run(debug=False, host='0.0.0.0', port=args.dash_port, use_reloader=False) - - # Cleanup on exit - for client in active_clients: - client.loop_stop() - client.disconnect() - logging.info("Application finished.") - - -def signal_handler(sig, frame): - """ - Handle termination signals (SIGINT, SIGTERM) for graceful shutdown. - - Args: - sig: The signal number. - frame: The current stack frame. - """ - logging.info("Termination signal received. Shutting down...") - os.kill(os.getpid(), signal.SIGTERM) - - -if __name__ == '__main__': - args = parse_arguments() - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - main(args) - diff --git a/metro-ai-suite/deterministic-threat-detection/rtsp_camera_pipeline/config.json b/metro-ai-suite/deterministic-threat-detection/rtsp_camera_pipeline/config.json deleted file mode 100644 index 5ecdc30adf..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/rtsp_camera_pipeline/config.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "config": { - "pipelines": [ - { - "name": "rtsp_camera_pipeline", - "source": "gstreamer", - "queue_maxsize": 50, - "pipeline": "rtspsrc location=rtsp://:@ latency=100 ! rtph264depay ! gvapython class=PTPFrameTimeStamp function=process module=/home/pipeline-server/ptp_frame_timestamp.py name=ntp ! h264parse ! decodebin ! videoscale ! video/x-raw, width=1920, height=1080 ! videoconvert ! gvadetect name=detection model-instance-id=inst0 ! gvametaconvert add-empty-results=true name=metaconvert ! queue ! gvawatermark ! queue ! gvafpscounter ! queue ! jpegenc ! appsink name=destination", - "parameters": { - "type": "object", - "properties": { - "detection-properties": { - "element": { - "name": "detection", - "format": "element-properties" - } - } - } - }, - "auto_start": false - } - ] - } -} \ No newline at end of file diff --git a/metro-ai-suite/deterministic-threat-detection/rtsp_camera_pipeline/ptp_frame_timestamp.py b/metro-ai-suite/deterministic-threat-detection/rtsp_camera_pipeline/ptp_frame_timestamp.py deleted file mode 100644 index 5ffc94f155..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/rtsp_camera_pipeline/ptp_frame_timestamp.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -Custom DL Streamer pipeline element for adding PTP-synchronized timestamps. - -This script defines a class, `PTPFrameTimeStamp`, which can be used as a custom -processing block within a DL Streamer pipeline. Its primary function is to -capture the current system time, which is assumed to be synchronized via PTP, -and inject it as a JSON message into the metadata of each video frame that -passes through it. -""" - -# -# Copyright (C) 2026 Intel Corporation. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import json -from time import ctime, sleep -import ntplib -import requests -from datetime import datetime - -class PTPFrameTimeStamp: - """ - A custom pipeline element to add a PTP-synchronized timestamp to a frame. - - This class is designed to be instantiated by a pipeline framework. The `process` - method is called for each frame, where it adds a 'ptp_timestamp' field to the - frame's metadata. - """ - def __init__(self): - """Initializes the PTPFrameTimeStamp instance.""" - pass - - def _get_timestamp(self): - """ - Get the current PTP-synchronized timestamp. - - This method returns the current time as a Unix timestamp (seconds since epoch). - It relies on the underlying system clock being accurately synchronized by a - PTP service like ptp4l and phc2sys. - - Returns: - float: The current Unix timestamp. - """ - return datetime.now().timestamp() - - def process(self, frame): - """ - Process a frame and add a timestamp to its metadata. - - This is the main entry point called by the pipeline for each frame. - It retrieves the current timestamp and adds it to the frame's metadata - as a JSON string. - - Args: - frame: The frame object from the pipeline. - - Returns: - bool: True to indicate successful processing. - """ - frame.add_message(json.dumps({'ptp_timestamp': self._get_timestamp()})) - return True diff --git a/metro-ai-suite/deterministic-threat-detection/sensor_data_producer/configs/mosquitto.conf b/metro-ai-suite/deterministic-threat-detection/sensor_data_producer/configs/mosquitto.conf deleted file mode 100644 index cb72906edc..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/sensor_data_producer/configs/mosquitto.conf +++ /dev/null @@ -1,3 +0,0 @@ -allow_anonymous true -listener 1883 - diff --git a/metro-ai-suite/deterministic-threat-detection/sensor_data_producer/sensor_data_producer.py b/metro-ai-suite/deterministic-threat-detection/sensor_data_producer/sensor_data_producer.py deleted file mode 100644 index e41422b135..0000000000 --- a/metro-ai-suite/deterministic-threat-detection/sensor_data_producer/sensor_data_producer.py +++ /dev/null @@ -1,170 +0,0 @@ -""" -Simulated Sensor Data Producer for MQTT. - -This script simulates a sensor device that generates and publishes data to an MQTT -broker at a specified rate. The data is sent as a JSON payload containing a -timestamp, a sample number, and a simulated sensor value. - -It is intended for use in testing and demonstrating data pipelines, particularly -in scenarios like the Time-Sensitive Networking (TSN) use case where a stream of -time-stamped data is required. -""" - -# -# Copyright (C) 2026 Intel Corporation. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import argparse -import json -import logging -import os -import signal -import time -import paho.mqtt.client as mqtt - -# Default Configuration -DEFAULT_BROKER = "localhost" -DEFAULT_PORT = 1883 -DEFAULT_TOPIC = "sample/sensor/data" -DEFAULT_RATE_HZ = 30 - -# Logging Setup -logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') - -# Global State -running = True - -def on_connect(client, userdata, flags, rc, properties=None): - """Callback executed when the client successfully connects to the MQTT broker.""" - if rc == 0: - logging.info("Connected to MQTT Broker!") - else: - logging.error(f"Failed to connect, return code {rc}\n") - -def on_disconnect(client, userdata, rc, properties=None): - """Callback executed when the client disconnects from the MQTT broker.""" - logging.warning( - f"Disconnected from MQTT Broker with result code {rc}. Reconnecting...") - - -def parse_arguments(): - """ - Parse command-line arguments for the data producer. - - Allows configuration of the MQTT broker address, port, topic, and the - publishing rate. Values can also be set via environment variables. - - Returns: - argparse.Namespace: An object containing the parsed command-line arguments. - """ - parser = argparse.ArgumentParser(description="MQTT Sensor Data Producer") - parser.add_argument("--broker", type=str, default=os.getenv("MQTT_BROKER", DEFAULT_BROKER), - help=f"MQTT broker address (default: {DEFAULT_BROKER})") - parser.add_argument("--port", type=int, default=int(os.getenv("MQTT_PORT", DEFAULT_PORT)), - help=f"MQTT broker port (default: {DEFAULT_PORT})") - parser.add_argument("--topic", type=str, default=os.getenv("MQTT_TOPIC", DEFAULT_TOPIC), - help=f"MQTT topic to publish to (default: {DEFAULT_TOPIC})") - parser.add_argument("--rate", type=float, default=float(os.getenv("PUBLISH_RATE_HZ", DEFAULT_RATE_HZ)), - help=f"Publishing rate in Hz (default: {DEFAULT_RATE_HZ})") - return parser.parse_args() - -def signal_handler(sig, frame): - """ - Handle termination signals (SIGINT, SIGTERM) for graceful shutdown. - - Sets a global flag to signal the main loop to exit cleanly. - - Args: - sig: The signal number. - frame: The current stack frame. - """ - global running - logging.info("Termination signal received. Shutting down...") - running = False - -def run_producer(args): - """ - Main function to connect to MQTT and run the data production loop. - - This function initializes the MQTT client, connects to the broker, and enters - a loop to publish simulated sensor data at a precise rate determined by the - '--rate' argument. It handles connection errors and ensures a clean shutdown. - - Args: - args: An object containing the parsed command-line arguments. - """ - global running - interval = 1.0 / args.rate - - # Initialize Client - client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2) - client.on_connect = on_connect - client.on_disconnect = on_disconnect - - # Enable automatic reconnect - client.reconnect_delay_set(min_delay=1, max_delay=120) - - try: - client.connect(args.broker, args.port, 60) - except (ConnectionRefusedError, OSError) as e: - logging.error(f"Connection to MQTT broker failed: {e}") - return - - client.loop_start() - - logging.info(f"Publishing JSON to topic '{args.topic}' at {args.rate} Hz...") - - count = 0 - next_time = time.perf_counter() - - while running: - try: - # 1. Create a structured data dictionary - data = { - "id": "sensor_01", - "timestamp": time.time(), - "sample_no": count, - "value": 22.5 + (count % 10) * 0.1 # Example dynamic data - } - - # 2. Convert dictionary to JSON string - json_payload = json.dumps(data) - - # 3. Publish - result = client.publish(args.topic, payload=json_payload, qos=0) - if result.rc != mqtt.MQTT_ERR_SUCCESS: - logging.warning(f"Failed to publish message: {mqtt.error_string(result.rc)}") - - - count += 1 - - # 4. Precise Timing Loop - next_time += interval - sleep_time = next_time - time.perf_counter() - if sleep_time > 0: - time.sleep(sleep_time) - elif sleep_time < -0.1: # Reset timing if we're too far behind - next_time = time.perf_counter() + interval - - - except Exception as e: - logging.error(f"An error occurred in the main loop: {e}") - time.sleep(1) # Avoid rapid-fire errors - - logging.info("Exiting main loop.") - client.loop_stop() - client.disconnect() - logging.info("MQTT client disconnected.") - -if __name__ == "__main__": - # Register signal handlers for graceful shutdown - signal.signal(signal.SIGINT, signal_handler) - signal.signal(signal.SIGTERM, signal_handler) - - # Parse arguments and run the producer - args = parse_arguments() - run_producer(args) - logging.info("Application finished.") - diff --git a/metro-ai-suite/image-based-video-search/README.md b/metro-ai-suite/image-based-video-search/README.md index 5f82df5266..8be74e92f8 100644 --- a/metro-ai-suite/image-based-video-search/README.md +++ b/metro-ai-suite/image-based-video-search/README.md @@ -12,7 +12,7 @@ You can use this foundation to build solutions for diverse use cases, including ## How it Works The application workflow has three stages: inputs, processing, and outputs. -![Diagram illustrating the components and interactions within the Image-Based Video Search system, including inputs, processing, and outputs.](./docs/user-guide/_assets/architecture.svg) +![Diagram illustrating the components and interactions within the Image-Based Video Search system, including inputs, processing, and outputs.](docs/user-guide/_images/architecture.svg) ### Inputs @@ -33,16 +33,14 @@ The application includes a demonstration video for testing. The video loops cont - Matched search results, including metadata, timestamps, confidence scores, and frames -![Screenshot of the Image-Based Video Search sample application interface displaying search input and matched results](./docs/user-guide/_assets/imagesearch2.png) +![Screenshot of the Image-Based Video Search sample application interface displaying search input and matched results](docs/user-guide/_images/imagesearch2.png) ### Learn More - -- [Get Started](./docs/user-guide/get-started.md) -- [System Requirements](./docs/user-guide/get-started/system-requirements.md) -- [How It Works](./docs/user-guide/how-it-works.md) -- [Deploy with Helm](./docs/user-guide/get-started/deploy-with-helm.md) -- [Deploy with Edge Orchestrator](./docs/user-guide/get-started/deploy-with-edge-orchestrator.md) -- [Release Notes](./docs/user-guide/release-notes.md) +- [System Requirements](docs/user-guide/system-requirements.md) +- [Get Started](docs/user-guide/get-started.md) +- [Architecture Overview](docs/user-guide/overview-architecture.md) +- [How to Deploy with Helm](docs/user-guide/how-to-deploy-helm.md) +- [Release Notes](docs/user-guide/release-notes.md) ## Important Notice diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/Overview.md b/metro-ai-suite/image-based-video-search/docs/user-guide/Overview.md new file mode 100644 index 0000000000..627c6df176 --- /dev/null +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/Overview.md @@ -0,0 +1,70 @@ +# Image-Based Video Search (IBVS) Sample Application + + + +Performs near real-time analysis and image-based search to detect and retrieve +objects of interest in large video datasets. + +## Overview + +The **Image-Based Video Search** sample application lets users search live or +recorded camera feeds by providing an image and view matching objects with +location, timestamp, and confidence score details. + +This sample provides a working example of how to combine edge AI microservices +for video ingestion, object detection, feature extraction, and vector-based +search. + +You can use this foundation to build solutions for diverse use cases, including +city infrastructure monitoring and security applications, helping operators +quickly locate objects of interest across large video datasets. + +## How it Works + +The application workflow has three stages: inputs, processing, and outputs. + +![Diagram illustrating the components and interactions within the Image-Based Video Search system, including inputs, processing, and outputs.](_images/architecture.svg) + +### Inputs + +- Video files or live camera streams (simulated or real time) +- User-provided images or images captured from video for search + +The application includes a demonstration video for testing. The video loops +continuously and appears in the UI as soon as the application starts. + +### Processing + +- **Nginx reverse proxy server**: All interactions with user happens via Nginx server. It protects IBVS app by handling SSL/TLS encryption, filtering and validating requests and making the app directly inaccessible from external access. +- **Video analysis with Deep Learning Streamer Pipeline Server and MediaMTX**: + Select **Analyze Stream** to start the DL Streamer Pipeline Server pipeline. + The Pipeline Server processes video through **MediaMTX**, which simulates + remote video cameras and publishes live streams. The Pipeline Server extracts + frames and detects objects in each frame, publishing predictions through + **MQTT**. +- **Feature extraction with Feature Matching**: DL Streamer Pipeline Server + sends metadata and images through MQTT to the Feature Matching microservice. + Feature Matching generates feature vectors. If predictions exceed the + threshold, the system stores vector embeddings in MilvusDB and saves frames in + the Docker file system. +- **Storage and retrieval in MilvusDB**: MilvusDB stores feature vectors. You + can review them in MilvusUI. +- **Video search with ImageIngestor**: To search, first analyze the stream by + selecting **Analyze Stream**. Then upload an image or capture an object from + the video using **Upload Image** or **Capture Frame**. You can adjust the + frame to capture a specific object. The system ingests images via + ImageIngestor, processes them with DL Streamer Pipeline Server, and matches + them against stored feature vectors in MilvusDB. + +### Outputs + +- Matched search results, including metadata, timestamps, confidence scores, and + frames + +![Screenshot of the Image-Based Video Search sample application interface displaying search input and matched results](_images/imagesearch2.png) + +### Learn More +- [System Requirements](system-requirements.md) +- [Get Started](get-started.md) +- [DL Streamer Pipeline Server](https://docs.openedgeplatform.intel.com/dev/edge-ai-libraries/dlstreamer-pipeline-server/index.html) +- [Release Notes](release-notes.md) \ No newline at end of file diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/_assets/application.gif b/metro-ai-suite/image-based-video-search/docs/user-guide/_images/application.gif similarity index 100% rename from metro-ai-suite/image-based-video-search/docs/user-guide/_assets/application.gif rename to metro-ai-suite/image-based-video-search/docs/user-guide/_images/application.gif diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/_assets/architecture.svg b/metro-ai-suite/image-based-video-search/docs/user-guide/_images/architecture.svg similarity index 100% rename from metro-ai-suite/image-based-video-search/docs/user-guide/_assets/architecture.svg rename to metro-ai-suite/image-based-video-search/docs/user-guide/_images/architecture.svg diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/_assets/deployment.png b/metro-ai-suite/image-based-video-search/docs/user-guide/_images/deployment.png similarity index 100% rename from metro-ai-suite/image-based-video-search/docs/user-guide/_assets/deployment.png rename to metro-ai-suite/image-based-video-search/docs/user-guide/_images/deployment.png diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/_assets/ibvs-dp.png b/metro-ai-suite/image-based-video-search/docs/user-guide/_images/ibvs-dp.png similarity index 100% rename from metro-ai-suite/image-based-video-search/docs/user-guide/_assets/ibvs-dp.png rename to metro-ai-suite/image-based-video-search/docs/user-guide/_images/ibvs-dp.png diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/_assets/imagesearch1.png b/metro-ai-suite/image-based-video-search/docs/user-guide/_images/imagesearch1.png similarity index 100% rename from metro-ai-suite/image-based-video-search/docs/user-guide/_assets/imagesearch1.png rename to metro-ai-suite/image-based-video-search/docs/user-guide/_images/imagesearch1.png diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/_assets/imagesearch2.png b/metro-ai-suite/image-based-video-search/docs/user-guide/_images/imagesearch2.png similarity index 100% rename from metro-ai-suite/image-based-video-search/docs/user-guide/_assets/imagesearch2.png rename to metro-ai-suite/image-based-video-search/docs/user-guide/_images/imagesearch2.png diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/get-started.md b/metro-ai-suite/image-based-video-search/docs/user-guide/get-started.md index e3997c87f7..cb1bcbe500 100644 --- a/metro-ai-suite/image-based-video-search/docs/user-guide/get-started.md +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/get-started.md @@ -1,19 +1,26 @@ # Get Started + The **Image-Based Video Search** is a sample application that demonstrates how developers can leverage edge AI technologies to solve real-world challenges. It enables efficient processing and searching of video data to identify objects of interest, providing actionable insights in real-time. This application showcases searching through video data generated by cameras around a city to find the closest match to the user-provided image. + By following this guide, you will learn how to: - **Set up the sample application**: Use Docker Compose to quickly deploy the application in your environment. - **Run a predefined pipeline**: Execute a sample pipeline to see real-time traffic monitoring and object detection in action. - **Modify application parameters**: Customize settings like input sources and detection thresholds to adapt the application to your specific requirements. -## Prerequisites -- Verify that your system meets the [minimum requirements](./get-started/system-requirements.md). +## Prerequisites +- Verify that your system meets the [minimum requirements](./system-requirements.md). - Install Docker: [Installation Guide](https://docs.docker.com/get-docker/). + ## Set up and First Use 1. **Clone the Repository and update `.env` file**: @@ -22,17 +29,20 @@ By following this guide, you will learn how to: git clone https://github.com/open-edge-platform/edge-ai-suites.git cd edge-ai-suites/metro-ai-suite/image-based-video-search ``` - - > **Note:** The below step is required for deployment with certain pre-release images + + > Note: The below step is required for deployment with certain pre-release images - Update `DOCKER_REGISTRY` variable in `.env` file present at `edge-ai-suites/metro-ai-suite/image-based-video-search/`. The recommended setting to use pre-release images is: `DOCKER_REGISTRY=docker.io/` Please remember to include `/` at the end. + 2. **Build from Source (Optional)**: - Run the below command to build the images from source ```bash docker compose build ``` - > **Note:** You can skip this optional step since `docker compose up -d` that is run later in this document automatically pulls the required images. + > Note: You can skip this optional step since `docker compose up -d` that is run later in this document automatically pulls the required images. 3. **Download the Models**: - Download the models @@ -123,8 +133,9 @@ By following this guide, you will learn how to: - App UI: `https:///` - Search UI: `https:///docs` - MilvusDB UI: `https:///ibvs-milvus-ui` - - Stream UI: You can access https stream at `https:///stream` and RTSP stream at `rtsp://:8554/stream`. - > **Note:** Replace `` with your host IP address + - Stream UI: You can access https stream at `https:///stream` and RTSP stream at `rtsp://:8554/stream`. + > Note: Replace `` with your host IP address + 7. **Run the Application**: @@ -134,7 +145,7 @@ By following this guide, you will learn how to: - **Expected Results**: - Matched search results, including metadata, timestamps, distance to show the confidence rate of the prediction, and frames that include detected objects (e.g., vehicles, pedestrians, bikes). - | ![image1](./_assets/imagesearch1.png) | ![image2](./_assets/imagesearch2.png) | + | ![image1](./_images/imagesearch1.png) | ![image2](./_images/imagesearch2.png) | |--------------------------------|--------------------------------| 8. **Stop the Application**: @@ -289,18 +300,30 @@ By following this guide, you will learn how to: docker compose logs ``` -## Supporting Resources -- [Troubleshooting](./troubleshooting.md) -- [Docker Compose Documentation](https://docs.docker.com/compose/) +## Troubleshooting + +1. **Containers Not Starting**: + - Check the Docker logs for errors: + ```bash + docker compose logs + ``` +2. **Port Conflicts**: + - Update the `ports` section in the Compose file to resolve conflicts. + +3. **ibvs-milvusdb container is unhealthy**: + - Currently, milvusdb does not work with proxy servers. Make sure that the proxies `http_proxy`, `https_proxy` and `no_proxy` are set to empty string in `compose.yml` file + +4. **Empty search results after clicking on `Search Object`**: + - Make sure the models are able to detect the objects in the stream correctly + - Make sure you have analysed the stream first to capture the video frames into milvus database + - Make sure you are using the right frame to search the object + - Increase the 'To' timestamp in the search results to accommodate the latest results - + \ No newline at end of file diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/get-started/deploy-with-helm.md b/metro-ai-suite/image-based-video-search/docs/user-guide/how-to-deploy-helm.md similarity index 77% rename from metro-ai-suite/image-based-video-search/docs/user-guide/get-started/deploy-with-helm.md rename to metro-ai-suite/image-based-video-search/docs/user-guide/how-to-deploy-helm.md index 8fb8100fb6..56764d3e68 100644 --- a/metro-ai-suite/image-based-video-search/docs/user-guide/get-started/deploy-with-helm.md +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/how-to-deploy-helm.md @@ -23,7 +23,7 @@ Before You Begin, ensure the following: ## Pull the helm chart (Optional) -> **Note:** The helm chart should be downloaded when you are not using the helm chart provided in `edge-ai-suites/metro-ai-suite/image-based-video-search/chart` +- Note: The helm chart should be downloaded when you are not using the helm chart provided in `edge-ai-suites/metro-ai-suite/image-based-video-search/chart` - Download helm chart with the following command @@ -48,7 +48,7 @@ Before You Begin, ensure the following: - Run below command in the terminal ```bash # Install the Image-Based Video Search chart in the ibvs namespace - helm install ibvs . --create-namespace -n ibvs + helm install ibvs . --create-namespace -n ibvs ``` Some containers in the deployment requires network access. If you are in a proxy @@ -65,7 +65,7 @@ Before You Begin, ensure the following: 3. **Open IBVS UI**: - Now frontend should be accessible at `https://:30443/`. - > **Note:** To access the above url remotely, replace the `` with your system IP address. + > Note: To access the above url remotely, replace the `` with your system IP address. 4. **Stop the application**: - The app can be uninstalled using the following command: @@ -73,8 +73,33 @@ Before You Begin, ensure the following: helm uninstall -n ibvs ibvs ``` +## Troubleshooting + +1. **Helm Chart Not Found**: + + - Check if the Helm repository was added: + + ```bash + helm repo list + ``` + +1. **Pods Not Running**: + + - Review pod logs: + + ```bash + kubectl logs {{pod-name}} -n {{namespace}} + ``` + +1. **Service Unreachable**: + + - Confirm the service configuration: + + ```bash + kubectl get svc -n {{namespace}} + ``` + ## Supporting Resources -- [Troubleshooting Helm Deployments](../troubleshooting.md#troubleshooting-helm-deployments) - [Kubernetes Documentation](https://kubernetes.io/docs/home/) - [Helm Documentation](https://helm.sh/docs/) diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/get-started/deploy-with-edge-orchestrator.md b/metro-ai-suite/image-based-video-search/docs/user-guide/how-to-deploy-with-edge-orchestrator.md similarity index 74% rename from metro-ai-suite/image-based-video-search/docs/user-guide/get-started/deploy-with-edge-orchestrator.md rename to metro-ai-suite/image-based-video-search/docs/user-guide/how-to-deploy-with-edge-orchestrator.md index 6239416348..16363926cb 100644 --- a/metro-ai-suite/image-based-video-search/docs/user-guide/get-started/deploy-with-edge-orchestrator.md +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/how-to-deploy-with-edge-orchestrator.md @@ -2,15 +2,15 @@ Edge Orchestrator, part of Intel’s Edge Software, simplifies edge application deployment and management, making it easier to deploy edge solutions at scale. Edge Orchestrator provides: -- **Secure Infrastructure Management**: Offers secure and efficient remote onboarding and management of your edge node fleet across sites and geographies. Zero-trust security configuration reduces the time required to secure your edge applications. +* **Secure Infrastructure Management**: Offers secure and efficient remote onboarding and management of your edge node fleet across sites and geographies. Zero-trust security configuration reduces the time required to secure your edge applications. -- **Deployment Orchestration and Automation**: Lets you roll out and update applications and configure infrastructure nodes across your network from a single pane of glass. Edge Orchestrator provides automated cluster orchestration and dynamic application deployment. +* **Deployment Orchestration and Automation**: Lets you roll out and update applications and configure infrastructure nodes across your network from a single pane of glass. Edge Orchestrator provides automated cluster orchestration and dynamic application deployment. -- **Automated Deployment**: Automates the remote installation and updating of applications at scale. +* **Automated Deployment**: Automates the remote installation and updating of applications at scale. -- **Deep Telemetry**: Gives you policy-based life cycle management and centralized visibility into your distributed edge infrastructure and deployments. +* **Deep Telemetry**: Gives you policy-based life cycle management and centralized visibility into your distributed edge infrastructure and deployments. -- **Flexible Configuration**: From organizing your physical infrastructure to managing the permutations of executing applications in a variety of runtime environments, Edge Orchestrator gives you the flexibility to define the policies, criteria, and hierarchies that make the most sense for your specific business needs. +* **Flexible Configuration**: From organizing your physical infrastructure to managing the permutations of executing applications in a variety of runtime environments, Edge Orchestrator gives you the flexibility to define the policies, criteria, and hierarchies that make the most sense for your specific business needs. To deploy the **Image-Based Video Search** application with Edge Orchestrator, follow the steps described in this document. @@ -19,7 +19,7 @@ To deploy the **Image-Based Video Search** application with Edge Orchestrator, f ### Prerequisites 1. Access to the web interface of the Edge Orchestrator with one or more [Edge Nodes Onboarded](https://docs.openedgeplatform.intel.com/edge-manage-docs/dev/user_guide/set_up_edge_infra/edge_node_onboard/index.html) to the Edge Orchestrator. -2. Clusters with a [privilege template](https://docs.openedgeplatform.intel.com/edge-manage-docs/dev/user_guide/advanced_functionality/set_up_a_cluster_template.html) have been created on the needed Edge Nodes following the procedures described in [Create Cluster](https://docs.openedgeplatform.intel.com/edge-manage-docs/dev/user_guide/set_up_edge_infra/clusters/create_clusters.html#create-cluster). +1. Clusters with a [privilege template](https://docs.openedgeplatform.intel.com/edge-manage-docs/dev/user_guide/advanced_functionality/set_up_a_cluster_template.html) have been created on the needed Edge Nodes following the procedures described in [Create Cluster](https://docs.openedgeplatform.intel.com/edge-manage-docs/dev/user_guide/set_up_edge_infra/clusters/create_clusters.html#create-cluster). ### Making the Deployment Package Available @@ -30,13 +30,13 @@ To deploy the **Image-Based Video Search** application with Edge Orchestrator, f cd edge-ai-suites/metro-ai-suite/image-based-video-search ``` -2. From the web browser, open the URL of the Edge Orchestrator and import the Deployment Package present in the folder **deployment-package** following the steps described in [Import Deployment Package](https://docs.openedgeplatform.intel.com/edge-manage-docs/dev/user_guide/package_software/import_deployment.html). +1. From the web browser, open the URL of the Edge Orchestrator and import the Deployment Package present in the folder **deployment-package** following the steps described in [Import Deployment Package](https://docs.openedgeplatform.intel.com/edge-manage-docs/dev/user_guide/package_software/import_deployment.html). -3. Once the deployment package has been imported into Edge Orchestrator, you can see it in the list of Web UI as shown here. +1. Once the deployment package has been imported into Edge Orchestrator, you can see it in the list of Web UI as shown here. -![Image](../_assets/ibvs-dp.png) +**![Image](./_images/ibvs-dp.png)** -See [Deployment Packages](https://docs.openedgeplatform.intel.com/edge-manage-docs/dev/user_guide/package_software/deploy_packages.html#view-deployment-packages) for more information on deployment packages. +See [Deployment Packages]() for more information on deployment packages. ### Deploy the Application onto the Edge Nodes @@ -44,23 +44,23 @@ To set up a deployment: 1. Click the **Deployments** tab on the top menu to view the Deployments page. On the Deployments page, you can view the list of deployments that have been created. The status indicator shows a quick view of the status of the deployment, which depends on many factors. -2. Select the **Deployments** tab and click the **Setup a Deployment** button. The Setup a Deployment page appears. +1. Select the **Deployments** tab and click the **Setup a Deployment** button. The Setup a Deployment page appears. -3. On the Setup a Deployment page, select the **ibvs** package for the deployment from the list, and click **Next**. The Select a Profile step appears. +1. On the Setup a Deployment page, select the **ibvs** package for the deployment from the list, and click **Next**. The Select a Profile step appears. -4. In the Select a Profile step, select the deployment profile, and click **Next**. The Override Profile Values page appears. +1. In the Select a Profile step, select the deployment profile, and click **Next**. The Override Profile Values page appears. -5. The Override Profile Values page shows the deployment profile values that are available for overriding. Provide the necessary overriding values, then click **Next** to proceed to the Select Deployment Type step. +1. The Override Profile Values page shows the deployment profile values that are available for overriding. Provide the necessary overriding values, then click **Next** to proceed to the Select Deployment Type step. -6. On the Select Deployment Type page, select the type of deployment, and click **Next**: +1. On the Select Deployment Type page, select the type of deployment, and click **Next**: 1. If you select **Automatic** as the deployment type, enter the deployment name and metadata in key-value format to select the target cluster. - 2. If you select **Manual** as the deployment type, enter the deployment name and select the clusters from the list of clusters. + 1. If you select **Manual** as the deployment type, enter the deployment name and select the clusters from the list of clusters. -7. Click **Next** to view the Review page. +1. Click **Next** to view the Review page. -8. Verify if the deployment details are correct and click **Deploy**. +1. Verify if the deployment details are correct and click **Deploy**. After a few minutes, the deployment will start and will take about 5 minutes to complete. @@ -70,7 +70,7 @@ The **Image-Based Video Search** Sample Application is fully deployed when the a You can view the deployment status on the Deployments page. -> **Note:** If the deployment fails for any reason, the deployment status will display the “Error” or “Down” status. +> Note: If the deployment fails for any reason, the deployment status will display the “Error” or “Down” status. For more information on setting up a deployment, see [Set up a Deployment](https://docs.openedgeplatform.intel.com/edge-manage-docs/dev/user_guide/package_software/setup_deploy.html#set-up-a-deployment). @@ -78,6 +78,6 @@ For more information on setting up a deployment, see [Set up a Deployment](https 1. Download the kubeconfig of the cluster of the Edge Node on which the Application has been deployed. Refer [Kubeconfig Download](https://docs.openedgeplatform.intel.com/edge-manage-docs/dev/user_guide/set_up_edge_infra/clusters/accessing_clusters.html). -1. Follow the steps described in the **Image-Based Video Search** [Documentation](./deploy-with-helm.md) on usage of the application. +1. Follow the steps described in the **Image-Based Video Search** [Documentation](how-to-deploy-helm.md) on usage of the application. - > **Note:** Skip the install the Helm chart step. + > Note: Skip the install the helm chart step \ No newline at end of file diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/how-to-use-gpu-for-inference.md b/metro-ai-suite/image-based-video-search/docs/user-guide/how-to-use-gpu-for-inference.md index a9ef3f172f..cfcc76c4ea 100644 --- a/metro-ai-suite/image-based-video-search/docs/user-guide/how-to-use-gpu-for-inference.md +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/how-to-use-gpu-for-inference.md @@ -3,13 +3,11 @@ ## Docker deployment ### Prerequisites -Follow steps 1 and 2 mentioned in [Get Started](./get-started.md#set-up-and-first-use) guide -if not already done. +Follow steps 1 and 2 mentioned in [Get-started](./get-started.md#set-up-and-first-use) guide if not already done. ### Volume mount GPU config -Comment out CPU and NPU config and uncomment the GPU config present in [compose.yml](https://github.com/open-edge-platform/edge-ai-suites/blob/main/metro-ai-suite/image-based-video-search/compose.yml) -file under `volumes` section as shown below: +Comment out CPU and NPU config and uncomment the GPU config present in [compose](../../compose.yml) file under `volumes` section as shown below ```sh volumes: @@ -20,17 +18,15 @@ file under `volumes` section as shown below: ### Start and run the application -After the above changes to docker compose file, follow from step 3 as mentioned in the -[Get Started](./get-started.md#set-up-and-first-use) guide. +After the above changes to docker compose file, follow from step 3 as mentioned in [Get-started](./get-started.md#set-up-and-first-use) guide. ## Helm deployment ### Prerequisites -Follow step 1 mentioned in this [document](./get-started/deploy-with-helm.md#steps-to-deploy) if not already done. +Follow step 1 mentioned in this [document](./how-to-deploy-helm.md#steps-to-deploy) if not already done. ### Update values.yaml -In `values.yaml` file, change value of `pipeline` config present under -`dlstreamerpipelineserver` section as shown below: +In [values.yaml](../../chart/values.yaml) file, change value of `pipeline` config present under `dlstreamerpipelineserver` section as shown below - ```sh dlstreamerpipelineserver: @@ -50,5 +46,4 @@ dlstreamerpipelineserver: ### Start the application -After above changes to `values.yaml` file, follow from step 2 as mentioned in the -[Helm Deployment Guide](./get-started/deploy-with-helm.md#steps-to-deploy). +After above changes to `values.yaml` file, follow from step 2 as mentioned in this [document](./how-to-deploy-helm.md#steps-to-deploy). \ No newline at end of file diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/index.md b/metro-ai-suite/image-based-video-search/docs/user-guide/index.md index e31a0e3a99..9865561e9d 100644 --- a/metro-ai-suite/image-based-video-search/docs/user-guide/index.md +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/index.md @@ -1,4 +1,4 @@ -# Image-Based Video Search +# Image-Based Video Search Sample Application -Image-based Video Search sample application performs near real-time analysis and image-based search to detect and retrieve +Performs near real-time analysis and image-based search to detect and retrieve objects of interest in large video datasets. ## Overview @@ -30,12 +30,11 @@ quickly locate objects of interest across large video datasets. ## How it Works -The application workflow consists of three stages: inputs, processing, and outputs. +The application workflow has three stages: inputs, processing, and outputs. -![architectural diagram](./_assets/architecture.svg) -*Figure 1: Detailed Architecture of the Image-Based Video Search Application.* +![architectural diagram](./_images/architecture.svg) -### Inputs +## Inputs - Video files or live camera streams (simulated or real time) - User-provided images or images captured from video for search @@ -43,14 +42,14 @@ The application workflow consists of three stages: inputs, processing, and outpu The application includes a demonstration video for testing. The video loops continuously and appears in the UI as soon as the application starts. -### Processing +## Processing -- **Nginx reverse proxy server**: All interactions with user happens via Nginx server. It protects the IBVS app by handling SSL/TLS encryption, filtering and validating requests, offering centralized access control and making the app directly inaccessible from external access. +- **Nginx reverse proxy server**: All interactions with user happens via Nginx server. It protects IBVS app by handling SSL/TLS encryption, filtering and validating requests and making the app directly inaccessible from external access. - **Video analysis with Deep Learning Streamer Pipeline Server and MediaMTX**: Select **Analyze Stream** to start the DL Streamer Pipeline Server pipeline. The Pipeline Server processes video through **MediaMTX**, which simulates remote video cameras and publishes live streams. The Pipeline Server extracts - frames from RTSP streams and detects objects in each frame, publishing predictions through + frames and detects objects in each frame, publishing predictions through **MQTT**. - **Feature extraction with Feature Matching**: DL Streamer Pipeline Server sends metadata and images through MQTT to the Feature Matching microservice. @@ -66,29 +65,26 @@ continuously and appears in the UI as soon as the application starts. ImageIngestor, processes them with DL Streamer Pipeline Server, and matches them against stored feature vectors in MilvusDB. -### Outputs +## Outputs - Matched search results, including metadata, timestamps, confidence scores, and frames -![application interface screenshot](./_assets/imagesearch2.png) +![application interface screenshot](./_images/imagesearch2.png) *Screenshot of the Image-Based Video Search sample application interface displaying search input and matched results* -## Learn More -- [Get Started](./get-started.md) -- [System Requirements](./get-started/system-requirements.md) -- [Release Notes](./release-notes.md) -- [DL Streamer Pipeline Server](https://docs.openedgeplatform.intel.com/dev/edge-ai-libraries/dlstreamer-pipeline-server/index.html) - diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/how-it-works.md b/metro-ai-suite/image-based-video-search/docs/user-guide/overview-architecture.md similarity index 90% rename from metro-ai-suite/image-based-video-search/docs/user-guide/how-it-works.md rename to metro-ai-suite/image-based-video-search/docs/user-guide/overview-architecture.md index 271576b4f2..57e4595b1a 100644 --- a/metro-ai-suite/image-based-video-search/docs/user-guide/how-it-works.md +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/overview-architecture.md @@ -1,4 +1,4 @@ -# How It Works +# Architecture Overview The Image-Based Video Search Application is a modular reference implementation designed to help developers create vision-based search systems utilizing Vision @@ -18,8 +18,8 @@ The Image-Based Video Search Application is designed to: ### Technical Architecture Diagram -![Technical Architecture Diagram](./_assets/architecture.svg) -*Figure 1: Detailed Architecture of the Image-Based Video Search Application.* +![Technical Architecture Diagram](_images/architecture.svg) *Figure 1: Detailed +Architecture of the Image-Based Video Search Application.* ### Data Flow @@ -30,7 +30,7 @@ The Image-Based Video Search Application is designed to: - **User-provided images for search**: Allows users to upload images or for searching within the video streams. -2. **Video Analysis Pipeline**: +1. **Video Analysis Pipeline**: - **Deep Learning Streamer Pipeline Server**: - Consumes RTSP streams for object detection and feature extraction. @@ -40,20 +40,17 @@ The Image-Based Video Search Application is designed to: (Vector DB). - Stores frames for display in search results. -3. **Video Search Pipeline**: +1. **Video Search Pipeline**: - **Feature Matching Microservice**: - Queries DL Streamer Pipeline Server for feature vectors of user-provided images. - Searches MilvusDB to find matching frames. -4. **Output Generation**: +1. **Output Generation**: - Displays matched frames and metadata in the web application. -![application interface screenshot](./_assets/imagesearch2.png) -*Screenshot of the Image-Based Video Search sample application interface displaying search input and matched results* - ## Key Components and Their Roles 1. **Nginx reverse proxy server** diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/release-notes.md b/metro-ai-suite/image-based-video-search/docs/user-guide/release-notes.md index 1d8acf859a..da8a8b9ec7 100644 --- a/metro-ai-suite/image-based-video-search/docs/user-guide/release-notes.md +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/release-notes.md @@ -1,8 +1,33 @@ -# Release Notes + + +# Release Notes Details about the changes, improvements, and known issues in this release of the application. @@ -15,7 +40,7 @@ application. ## Version 1.0.1 -**Release Date**: 2025-08-20 +**Release Date**: [2025-08-20] - DL Streamer version upgraded to 2025.1.2 - DL Streamer Pipeline Server version upgraded to 3.1.0 @@ -23,7 +48,18 @@ application. ## Version 1.0.0 -**Release Date**: 2025-03-31 +**Release Date**: [2025-03-31] + - **MediaMTX**: This third-party microservice simulates remote video cameras placed in various geographical locations. The service can replay videos from diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/support.md b/metro-ai-suite/image-based-video-search/docs/user-guide/support.md new file mode 100644 index 0000000000..8258d64cef --- /dev/null +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/support.md @@ -0,0 +1,49 @@ +# Get Help + +This page provides troubleshooting steps, FAQs, and resources to help you +resolve common issues. + +## Troubleshooting Common Issues + +### 1. Containers Not Starting + +- **Issue**: The application containers fail to start. + +- **Solution**: + + ```bash + docker compose logs + ``` + + Check the logs for errors and resolve dependency issues. + +### 2. Port Conflicts + +- **Issue**: Port conflicts with other running applications. + +- **Solution**: Update the ports section in the Docker Compose file. + +### 3. ibvs-milvusdb container is unhealthy + +- **Issue**: ibvs-milvusdb container fails to start + +- **Solution**: + + Currently, milvusdb does not work with proxy servers. Make sure that the proxies `http_proxy`, `https_proxy` and `no_proxy` are set to empty string in `compose.yml` file + +### 4. Empty search results after clicking on `Search Object` + +- **Issue**: Search results are empty after clicking on `Search Object` button + +- **Solution**: + + - Make sure the models are able to detect the objects in the stream correctly + - Make sure you have analysed the stream first to capture the video frames into milvus database + - Make sure you are using the right frame to search the object + - Increase the 'To' timestamp in the search results to accommodate the latest results + + diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/get-started/system-requirements.md b/metro-ai-suite/image-based-video-search/docs/user-guide/system-requirements.md similarity index 97% rename from metro-ai-suite/image-based-video-search/docs/user-guide/get-started/system-requirements.md rename to metro-ai-suite/image-based-video-search/docs/user-guide/system-requirements.md index 46d5d47e4b..e799aa1d4b 100644 --- a/metro-ai-suite/image-based-video-search/docs/user-guide/get-started/system-requirements.md +++ b/metro-ai-suite/image-based-video-search/docs/user-guide/system-requirements.md @@ -13,6 +13,7 @@ help you set up and run the application efficiently. **Hardware Platforms** - 12th Generation Intel® Core™ processor or above with Intel® HD Graphics or , or Intel® Xeon® processor + ## Minimum Requirements | **Component** | **Minimum Requirement** | **Recommended** | |---------------------|---------------------------|--------------------------| @@ -37,4 +38,4 @@ help you set up and run the application efficiently. ## Validation - Ensure all dependencies are installed and configured before proceeding to - [Get Started](../get-started.md). + [Get Started](./get-started.md). diff --git a/metro-ai-suite/image-based-video-search/docs/user-guide/troubleshooting.md b/metro-ai-suite/image-based-video-search/docs/user-guide/troubleshooting.md deleted file mode 100644 index f99d48b73a..0000000000 --- a/metro-ai-suite/image-based-video-search/docs/user-guide/troubleshooting.md +++ /dev/null @@ -1,80 +0,0 @@ -# Troubleshooting - -This page provides troubleshooting steps, FAQs, and resources to help you -resolve common issues. If you encounter any problems with the application not addressed here, -check the [GitHub Issues](https://github.com/open-edge-platform/edge-ai-suites/issues) board. -Feel free to file new tickets there (after learning about the guidelines for [Contributing](https://github.com/open-edge-platform/edge-ai-suites/blob/main/CONTRIBUTING.md)). - -## Troubleshooting Common Issues - -1. **Containers Not Starting** - -- **Issue**: The application containers fail to start. - -- **Solution**: - - ```bash - docker compose logs - ``` - - Check the logs for errors and resolve dependency issues. - -2. **Port Conflicts** - -- **Issue**: Port conflicts with other running applications. - -- **Solution**: Update the ports section in the Docker Compose file. - -3. **`ibvs-milvusdb` container is unhealthy** - -- **Issue**: `ibvs-milvusdb` container fails to start. - -- **Solution**: - - Currently, milvusdb does not work with proxy servers. Make sure that the proxies `http_proxy`, `https_proxy` and `no_proxy` are set to empty string in `compose.yml` file. - -4. **Empty search results after clicking on `Search Object`** - -- **Issue**: Search results are empty after clicking on `Search Object` button. - -- **Solution**: - - - Make sure the models are able to detect the objects in the stream correctly - - Make sure you have analysed the stream first to capture the video frames into milvus database - - Make sure you are using the right frame to search the object - - Increase the 'To' timestamp in the search results to accommodate the latest results - -5. **Failure to launch `ibvs-app`, `ibvs-featurematching` or `ibvs-streaming` containers** - -- **Issue**: One of the above containers fails to come up. - -- **Solution**: - - Try building the image locally as mentioned in Step 2 of - [Set up and First Use](./get-started.md#set-up-and-first-use) before bringing up the containers. - -## Troubleshooting Helm Deployments - -1. **Helm Chart Not Found** - - - Check if the Helm repository was added: - - ```bash - helm repo list - ``` - -2. **Pods Not Running**: - - - Review pod logs: - - ```bash - kubectl logs {{pod-name}} -n {{namespace}} - ``` - -3. **Service Unreachable**: - - - Confirm the service configuration: - - ```bash - kubectl get svc -n {{namespace}} - ``` diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/README.md b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/README.md index b21646b2a7..9d1d39794d 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/README.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/README.md @@ -48,10 +48,10 @@ For more details, see [Overview](./docs/user-guide/index.md) ## Learn More -- [How to Deploy with Helm](./docs/user-guide/get-started/deploy-with-helm.md): How to deploy the application using Helm on a Kubernetes cluster. +- [How to Deploy with Helm](./docs/user-guide/how-to-deploy-helm.md): How to deploy the application using Helm on a Kubernetes cluster. - [Application Security Enablements](./docs/user-guide/application-security-enablement.md): Learn how to enable the various security features. - [Support and Troubleshooting](./docs/user-guide/troubleshooting.md): Find solutions to common issues and troubleshooting steps. ## License -The application is licensed under the [LIMITED EDGE SOFTWARE DISTRIBUTION LICENSE AGREEMENT](./LICENSE.txt). +The application is licensed under the [LIMITED EDGE SOFTWARE DISTRIBUTION LICENSE AGREEMENT](./LICENSE.txt). \ No newline at end of file diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started.md b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started.md index 77b3e6d321..f8166e0167 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started.md @@ -153,7 +153,7 @@ instead of `localhost` for external access: Choose one of the following methods to deploy the Smart Intersection Sample Application: -- **[Deploy Using Helm](./get-started/deploy-with-helm.md)**: Use Helm to deploy the application to a Kubernetes cluster for scalable and production-ready deployments. +- **[Deploy Using Helm](./how-to-deploy-helm.md)**: Use Helm to deploy the application to a Kubernetes cluster for scalable and production-ready deployments. ## Resources @@ -165,8 +165,7 @@ Choose one of the following methods to deploy the Smart Intersection Sample Appl :::{toctree} :hidden: -get-started/system-requirements.md -get-started/deploy-with-helm.md +get-started/system-requirements ::: hide_directive--> \ No newline at end of file diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/how-it-works.md b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/how-it-works.md index 41950bb0db..eb8fd4df61 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/how-it-works.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/how-it-works.md @@ -28,6 +28,6 @@ This section provides a high-level view of how the application integrates with a - [Get Started](./get-started.md): Follow step-by-step instructions to set up the application. - [System Requirements](./get-started/system-requirements.md): Check the hardware and software requirements for deploying the application. -- [How to Deploy with Helm](./get-started/deploy-with-helm.md): How to deploy the application using Helm on a Kubernetes cluster. -- [Application Security Enablements](./application-security-enablement.md): Learn how to enable the various security features. +- [How to Deploy with Helm](./how-to-deploy-helm.md): How to deploy the application using Helm on a Kubernetes cluster. +- [Application Security Enablements](./docs/user-guide/application-security-enablement.md): Learn how to enable the various security features. - [Support and Troubleshooting](./troubleshooting.md): Find solutions to common issues and troubleshooting steps. diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started/deploy-with-helm.md b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/how-to-deploy-helm.md similarity index 95% rename from metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started/deploy-with-helm.md rename to metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/how-to-deploy-helm.md index bd4bebd3bc..42801f5309 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/get-started/deploy-with-helm.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/how-to-deploy-helm.md @@ -17,7 +17,7 @@ Before You Begin, ensure the following: - **Kubernetes Cluster**: Ensure you have a properly installed and configured Kubernetes cluster. -- **System Requirements**: Verify that your system meets the [minimum requirements](./system-requirements.md). +- **System Requirements**: Verify that your system meets the [minimum requirements](./get-started/system-requirements.md). - **Tools Installed**: Install the required tools: - Kubernetes CLI (kubectl) - Helm 3 or later @@ -229,8 +229,8 @@ kubectl delete storageclass hostpath local-storage standard ## Next Steps -- **[Get Started](../get-started.md)**: Ensure you have completed the initial setup steps before proceeding. -- **[Troubleshooting Helm Deployments](../troubleshooting.md#troubleshooting-helm-deployments)**: Consolidated troubleshooting steps for resolving issues during Helm deployments. +- **[Get Started](./get-started.md)**: Ensure you have completed the initial setup steps before proceeding. +- **[Troubleshooting Helm Deployments](./troubleshooting.md#troubleshooting-helm-deployments)**: Consolidated troubleshooting steps for resolving issues during Helm deployments. ## Supporting Resources diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/index.md b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/index.md index 66f2d360cf..89c68f8d98 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/index.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/index.md @@ -42,6 +42,7 @@ This guide is designed to help developers understand the architecture, setup, an :hidden: get-started +how-to-deploy-helm how-it-works how-to-use-gpu-for-inference application-security-enablement diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/security-features/enable_trusted_compute.md b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/security-features/enable_trusted_compute.md index 93ab27fa95..8f72b36849 100644 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/security-features/enable_trusted_compute.md +++ b/metro-ai-suite/metro-vision-ai-app-recipe/smart-intersection/docs/user-guide/security-features/enable_trusted_compute.md @@ -93,7 +93,7 @@ deployment YAML file. Follow the steps mentioned in the official documentation to run the Helm chart: -[Steps to Deploy the Helm Chart](../get-started/deploy-with-helm.md#steps-to-deploy) +[Steps to Deploy the Helm Chart](../how-to-deploy-helm.md) ### Step 6: Verify DL Streamer Launch diff --git a/metro-ai-suite/metro-vision-ai-app-recipe/smart-tolling/docs/user-guide/index.md b/metro-ai-suite/metro-vision-ai-app-recipe/smart-tolling/docs/user-guide/index.md deleted file mode 100644 index ffb7f1927e..0000000000 --- a/metro-ai-suite/metro-vision-ai-app-recipe/smart-tolling/docs/user-guide/index.md +++ /dev/null @@ -1,3 +0,0 @@ -# Smart Tolling - -Placeholder article \ No newline at end of file diff --git a/metro-ai-suite/sensor-fusion-for-traffic-management/deployments/how_to_generate_kitti_format_dataset/requirements.txt b/metro-ai-suite/sensor-fusion-for-traffic-management/deployments/how_to_generate_kitti_format_dataset/requirements.txt index 08ee4ea11a..0f94c9a348 100644 --- a/metro-ai-suite/sensor-fusion-for-traffic-management/deployments/how_to_generate_kitti_format_dataset/requirements.txt +++ b/metro-ai-suite/sensor-fusion-for-traffic-management/deployments/how_to_generate_kitti_format_dataset/requirements.txt @@ -75,6 +75,7 @@ mayavi==4.8.3 mistune==3.1.3 narwhals==1.48.1 nbclient==0.10.2 +nbconvert==7.16.6 nbformat==5.10.4 nest-asyncio==1.6.0 notebook==7.4.4 diff --git a/metro-ai-suite/smart-route-planning-agent/.gitignore b/metro-ai-suite/smart-route-planning-agent/.gitignore index f0b14f5a9b..396b7a76a6 100644 --- a/metro-ai-suite/smart-route-planning-agent/.gitignore +++ b/metro-ai-suite/smart-route-planning-agent/.gitignore @@ -89,4 +89,4 @@ share/python-wheels/ *.egg-info/ .installed.cfg *.egg -MANIFEST +MANIFEST \ No newline at end of file diff --git a/metro-ai-suite/smart-route-planning-agent/.pre-commit-config.yaml b/metro-ai-suite/smart-route-planning-agent/.pre-commit-config.yaml deleted file mode 100644 index b07cbe9524..0000000000 --- a/metro-ai-suite/smart-route-planning-agent/.pre-commit-config.yaml +++ /dev/null @@ -1,42 +0,0 @@ -files: ^metro-ai-suite/smart-route-planning-agent/ -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v6.0.0 - hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-json - - id: check-toml - - id: debug-statements - - id: check-merge-conflict - - id: name-tests-test - exclude: ^tests/fixtures/ - args: [ --pytest-test-first ] - - id: requirements-txt-fixer - - id: detect-private-key - - id: no-commit-to-branch - - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.19.1 - hooks: - - id: mypy - additional_dependencies: [types-pyyaml, types-requests] - args: [ --ignore-missing-imports ] - exclude: ^metro-ai-suite/smart-route-planning-agent/testing/resources/ - - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.14.14 - hooks: - - id: ruff-check - types_or: [ python, pyi ] - args: [ --fix, --exit-non-zero-on-fix, --ignore-noqa ] - - id: ruff-format - types_or: [ python, pyi ] - - repo: https://github.com/koalaman/shellcheck-precommit - rev: v0.11.0 - hooks: - - id: shellcheck - - repo: https://github.com/astral-sh/uv-pre-commit - rev: 0.9.28 - hooks: - - id: uv-sync - args: ["--locked", "--all-packages"] diff --git a/metro-ai-suite/smart-route-planning-agent/docs/_static/redirect/index.html b/metro-ai-suite/smart-route-planning-agent/docs/_static/redirect/index.html new file mode 100644 index 0000000000..bab7560769 --- /dev/null +++ b/metro-ai-suite/smart-route-planning-agent/docs/_static/redirect/index.html @@ -0,0 +1 @@ + diff --git a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/Overview.md b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/Overview.md deleted file mode 100644 index f41662d3af..0000000000 --- a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/Overview.md +++ /dev/null @@ -1,16 +0,0 @@ -# Smart Route Planning Agent - -| **STATUS** | Work in Progress | -|------------| ------------------| - -This application uses AI Agent to analyze a route between given source and destination. It communicates with other agents to fetch live analysis reports for traffic intersections found along all feasible routes between the source and destination. Subsequently, the agent finds an optimum route in real-time which is likely to be free from any possible incidents (like congestion, weather, roadblocks, accidents etc.). - -

- Architecture -

- -## Supporting Resources - -- [Get Started Guide](get-started.md) -- [API Reference](api-reference.md) -- [System Requirements](system-requirements.md) diff --git a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/_images/ITS_architecture.png b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/_assets/ITS_architecture.png similarity index 100% rename from metro-ai-suite/smart-route-planning-agent/docs/user-guide/_images/ITS_architecture.png rename to metro-ai-suite/smart-route-planning-agent/docs/user-guide/_assets/ITS_architecture.png diff --git a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/how-to-build-from-source.md b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/build-from-source.md similarity index 72% rename from metro-ai-suite/smart-route-planning-agent/docs/user-guide/how-to-build-from-source.md rename to metro-ai-suite/smart-route-planning-agent/docs/user-guide/build-from-source.md index 82c61097d4..be2d6b8cb0 100644 --- a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/how-to-build-from-source.md +++ b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/build-from-source.md @@ -1,4 +1,4 @@ -# How to Build from Source +# Build from Source | **STATUS** | Work in Progress | |------------| ------------------| diff --git a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/environment-variables.md b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/environment-variables.md index 329223fc9f..9adfc06829 100644 --- a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/environment-variables.md +++ b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/environment-variables.md @@ -1,6 +1,6 @@ # Environment Variables -This document provides comprehensive information about all environment variables used by the Smart Traffic Intelligence Agent. +This section explains the environment variables used by the Smart Route Planning Agent. | **STATUS** | Work in Progress | |------------| ------------------| diff --git a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/get-started.md b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/get-started.md index 533fe6a974..4654aacd78 100644 --- a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/get-started.md +++ b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/get-started.md @@ -3,33 +3,31 @@ | **STATUS** | Work in Progress | |------------| ------------------| -This application uses AI Agent to analyze a route between given source and destination. It communicates with other agents to fetch live analysis reports for traffic intersections found along all feasible routes between the source and destination. Subsequently, the agent finds an optimum route in real-time which is likely to be free from any possible incidents (like congestion, weather, roadblocks, accidents etc.). - ## Prerequisites Before you begin, ensure the following: -- **System Requirements**: Verify that your system meets the [minimum requirements](./system-requirements.md). -- **Docker Installed**: Install Docker. For installation instructions, see [Get Docker](https://docs.docker.com/get-docker/). +- **System requirements**: Verify that your system meets the [minimum requirements](./system-requirements.md). + +- **Docker platform**: Install Docker platform. For installation instructions, see [Get Docker](https://docs.docker.com/get-docker/). -This guide assumes basic familiarity with Docker commands and terminal usage. If you are new to Docker, see [Docker Documentation](https://docs.docker.com/) for an introduction. +- You are familiar with Docker commands and using the terminal. If you are new to Docker platform, see [Docker Documentation](https://docs.docker.com/) for an introduction. ## Quick Start with Setup Script | **STATUS** | Work in Progress | |------------| ------------------| +Intel recommends using the unified setup script `setup.sh` that configures, builds, deploys, and manages the Smart Route Planning Agent. -The Smart Route Planning Agent includes a unified setup script (`setup.sh`) that combines both setup and orchestration functionality. It handles environment configuration, building, deployment, and ongoing service management. This is the **recommended approach** for getting started and managing the services. - -### 1. Clone the Repository +1. Clone the repository: ```bash git clone https://github.com/open-edge-platform/edge-ai-suites.git cd edge-ai-suites/metro-ai-suite/smart-route-planning-agent ``` -### 2. Run the Complete Setup +2. Run the complete setup: The setup script provides several options. For a complete setup (recommended for first-time users): @@ -37,9 +35,9 @@ The setup script provides several options. For a complete setup (recommended for source setup.sh --setup ``` -### 3. Alternative Setup Options +3. Run alternative setup options -For more granular control, the setup script provides individual commands: +For a more granular control, run these commands: ```bash @@ -53,16 +51,19 @@ source setup.sh --stop source setup.sh --restart ``` +## Manual Setup for Advanced Users -## Manual Setup (Advanced Users) - -For advanced users who need more control over the configuration, you can manually set up the stack using Docker Compose. +For advanced users who need more control over the configuration, you can set up the stack manually using Docker Compose tool. ### Manual Environment Configuration +<<<<<<< HEAD +If you prefer to configure environment variables manually instead of using the setup script, see the [Environment Variables Guide](./environment-variables.md) for details. +======= If you prefer to manually configure environment variables instead of using the setup script, see the [Environment Variables Guide](./environment-variables.md) for complete details. +>>>>>>> main -### Manual Docker Compose Deployment +### Manual Docker Compose Tool Deployment | **STATUS** | Work in Progress | |------------| ------------------| diff --git a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/index.md b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/index.md new file mode 100644 index 0000000000..c5bd3a1760 --- /dev/null +++ b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/index.md @@ -0,0 +1,30 @@ +# Smart Route Planning Agent + +| **STATUS** | Work in Progress | +|------------| ------------------| + +The Smart Route Planning Agent analyzes the route between the given source and destination, and communicates with other agents to fetch live analysis reports for traffic intersections found along feasible routes between the source and destination. Subsequently, the agent finds an optimum route in real-time, which is likely free from any possible incidents such as congestion, weather, roadblocks, and accidents. + +

+ Smart Route Planning Agent Architecture +

+ +## Learn More + +- [Get Started Guide](get-started.md) +- [API Reference](api-reference.md) +- [System Requirements](system-requirements.md) + + \ No newline at end of file diff --git a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/index.rst b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/index.rst deleted file mode 100644 index 943bfaa425..0000000000 --- a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. toctree:: - :hidden: - - Overview - -.. toctree:: - - system-requirements - get-started - -.. toctree:: - :caption: How to - - how-to-build-from-source - -.. toctree:: - :caption: References - - environment-variables - api-reference - -.. toctree:: - - release-notes diff --git a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/release-notes.md b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/release-notes.md index b6d8212bbf..c935328b30 100644 --- a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/release-notes.md +++ b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/release-notes.md @@ -7,7 +7,7 @@ ### Features - **Real-time Traffic Analysis**: Comprehensive directional traffic density monitoring with MQTT integration -- **VLM Integration**: Vision Language Model powered traffic scene analysis with sustained traffic detection +- **VLM Integration**: Vision Language Model (VLM)-powered traffic scene analysis with sustained traffic detection - **Sliding Window Analysis**: 15-second sliding window with 3-second sustained threshold for accurate traffic state detection - **Camera Image Management**: Intelligent camera image retention and coordination between API and VLM services - **RESTful API**: Complete HTTP API for traffic summaries, intersection monitoring, and VLM analysis retrieval @@ -23,7 +23,7 @@ ### Technical Specifications -- **Supported Languages**: Python 3.10+ +- **Supported Languages**: Python programming version 3.10 or higher - **Architecture**: Microservice with Docker containerization -- **Dependencies**: FastAPI, MQTT client, aiohttp, structlog -- **External Integrations**: MQTT brokers, VLM OpenVINO serving, camera image streams +- **Dependencies**: FastAPI, MQTT client, aiohttp, and structlog +- **External Integrations**: MQTT brokers, VLM OpenVINO serving, and camera image streams diff --git a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/system-requirements.md b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/system-requirements.md index 8d209ac026..26b8aea1ba 100644 --- a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/system-requirements.md +++ b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/system-requirements.md @@ -1,36 +1,34 @@ # System Requirements -This page provides detailed hardware, software, and platform requirements to help you set up and run the microservice efficiently. +This section shows the hardware, software, and platform requirements to help you set up and run Smart Route Planning Agent efficiently. -## Supported Platforms +The agent currently supports CPU- and GPU-based runs, and runs in the context of video summary pipeline. Hence, the system requirements is as per the documentation in the sample application. -This microservice currently supports CPU and GPU based runs. This microservice is intended to run in the context of video summary pipeline. Hence, supported platform, OS configuration etc. is as per the documentation in the sample application. The documentation here, hence, does not provide separate requirements. +## Supported Operating Systems -**Operating Systems** +- As per the sample application documentation. -- As per sample application documentation. +## Hardware Requirements -**Hardware Platforms** +- As per the sample application documentation. -- As per sample application documentation. - -## Minimum Requirements - -- As per sample application documentation. +## Software Requirements ## Software Requirements -**Required Software**: +- Docker engine version 24.0 +- Python programming language version 3.10 +- Docker Compose tool: [Installation Guide](https://docs.docker.com/compose/install/). + +## Other Requirements -- Docker 24.0 -- Python 3.10 -- Docker Compose: [Installation Guide](https://docs.docker.com/compose/install/). +- As per the sample application documentation. ## Validation - Ensure all required software are installed and configured before proceeding to [Get Started](./get-started.md). -## Supporting Resources +## Learn More - [Overview](Overview.md) - [API Reference](api-reference.md) diff --git a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/traffic-data-analysis-workflow.md b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/traffic-data-analysis-workflow.md index 0aff954617..033433969f 100644 --- a/metro-ai-suite/smart-route-planning-agent/docs/user-guide/traffic-data-analysis-workflow.md +++ b/metro-ai-suite/smart-route-planning-agent/docs/user-guide/traffic-data-analysis-workflow.md @@ -1,10 +1,6 @@ # Traffic Data Analysis Workflow -## Overview - -The Scene Intelligence microservice employs a sophisticated multi-stage traffic analysis pipeline that combines real-time directional traffic monitoring with AI-powered Vision Language Model (VLM) analysis. This document explains the complete workflow, configuration parameters, and data retention policies. - -## Table of Contents +The Smart Route Planning Agent employs a multi-stage traffic analysis pipeline that combines real-time directional traffic monitoring with AI-powered Vision Language Model (VLM) analysis. This section shows the complete workflow, configuration parameters, and data retention policies. - [Traffic Analysis Pipeline](#traffic-analysis-pipeline) - [VLM Integration and Triggers](#vlm-integration-and-triggers) @@ -55,9 +51,9 @@ The VLM system analyzes high-density traffic situations using a multi-gated appr ## VLM Integration and Triggers -### Trigger Conditions (ALL must be true) +The following are the trigger conditions, in which all must be true: -#### 1. **Threshold Gate**: Traffic Density Exceeds Threshold +1. **Threshold Gate**: traffic density exceeds the threshold: ```python # Configurable via environment or config file @@ -77,7 +73,7 @@ directional_densities = { - **Environment**: `HIGH_DENSITY_THRESHOLD=5.0` - **Config File**: `config/vlm_config.json` → `traffic_analysis.high_density_threshold` -#### 2. **State Change Gate**: Normal → High Traffic Transition +2. **State Change Gate**: normal to high-traffic transition: ```python # Only triggers on state transitions, not continuous high traffic @@ -86,9 +82,9 @@ if previous_state == NORMAL and current_state == HIGH: # Continue to next gate ``` -**Purpose**: Prevents continuous VLM calls during sustained high traffic periods. +**Purpose**: Prevents continuous VLM calls during sustained high-traffic periods. -#### 3. **Persistence Gate**: Sustained High Traffic Duration +3. **Persistence Gate**: sustained high-traffic duration: ```python # Traffic must remain high for minimum duration @@ -104,7 +100,7 @@ if time_in_high_state >= minimum_duration: - **Environment**: `MINIMUM_DURATION_FOR_CONSISTENTLY_HIGH_TRAFFIC_SECONDS=30` - **Config File**: `traffic_analysis.minimum_duration_for_consistently_high_traffic_seconds` -#### 4. **Cooldown Gate**: Time Since Last Analysis +4. **Cooldown Gate**: time since the last analysis: ```python # Prevent frequent VLM calls for same intersection @@ -120,7 +116,7 @@ if time_since_last >= cooldown_minutes * 60: - **Environment**: `VLM_COOLDOWN_MINUTES=1` - **Config File**: `traffic_analysis.vlm_cooldown_minutes` -#### 5. **Concurrency Gate**: No Pending Analysis +5. **Concurrency Gate**: no pending analysis: ```python # Prevent duplicate requests for same intersection @@ -159,7 +155,7 @@ Release Semaphore Slot ### Implementation Details -**Purpose**: Solve timing issues where traffic changes before VLM completes, making analysis invisible to users. +**Purpose**: Solve timing issues where traffic changes before the VLM completes, making analysis invisible to users. #### 1. **Sliding Window Configuration** @@ -185,10 +181,10 @@ traffic_window: List[TrafficWindow] = [] **Process**: -1. Every traffic update adds entry to window +1. Every traffic update adds an entry to the window 2. Remove entries older than 15 seconds 3. Analyze window for sustained periods ≥ 3 seconds -4. Trigger VLM with traffic context from sustained period +4. Trigger VLM with traffic context from a sustained period #### 3. **Sustained Traffic Detection** @@ -245,8 +241,8 @@ Scene Intelligence Service VLM Microservice (4 Workers) **Benefits**: -- Up to 4 intersections analyzed simultaneously -- Each intersection has independent state tracking +- Up to 4 intersections are analyzed simultaneously +- Each intersection has an independent state tracking - Optimal VLM microservice utilization - No blocking between intersections @@ -341,9 +337,9 @@ OV_CONFIG={"PERFORMANCE_HINT": "LATENCY"} # OpenVINO optimization **Features**: -- Environment variable substitution using `${VAR_NAME}` syntax -- Fallback values when environment variables not set -- JSON validation on service startup +- Enable environment variable substitution using the `${VAR_NAME}` syntax +- Enable fallback values when environment variables are not set +- Enable JSON validation on service startup ### Hardcoded Values (Priority: Low) @@ -375,9 +371,9 @@ max_age_hours = 2 # Default cleanup threshold #### Storage Duration -- **In-Memory**: Stored indefinitely until service restart +- **In-Memory**: Stored indefinitely until the service restarts - **API Visibility**: 20 minutes after analysis completion -- **Cleanup**: Manual via `clear_old_analyses()` method (2-hour default, never called automatically) +- **Cleanup**: Manual via the `clear_old_analyses()` method (2-hour default, never called automatically) #### Overwrite Behavior @@ -412,9 +408,9 @@ class VLMAnalysisResult: #### Storage Logic 1. **VLM Analysis**: Images stored with analysis results -2. **Same Retention**: Images persist as long as VLM analysis exists +2. **Same Retention**: Images persist as long as the VLM analysis exists 3. **API Priority**: Serve VLM-stored images first, then fresh images -4. **Overwrite**: Images replaced when new VLM analysis generated +4. **Overwrite**: Images are replaced when a new VLM analysis is generated #### Storage Format @@ -450,15 +446,15 @@ class IntersectionTrafficState: #### Persistence -- **Lifetime**: Exists until service restart -- **Reset**: Only when service restarted or intersection removed +- **Lifetime**: Exists until the service restarts +- **Reset**: Only when the service restarted or the intersection removed - **Updates**: Real-time via MQTT traffic data ## API Integration ### Response Enhancement -All traffic endpoints include VLM analysis when available: +All traffic endpoints will include a VLM analysis when there is enough information for the VLM: ```json { @@ -503,7 +499,7 @@ All traffic endpoints include VLM analysis when available: ## Troubleshooting -### VLM Analysis Not Triggering +### VLM Analysis is Not Triggered Check each gate condition: @@ -570,13 +566,13 @@ docker logs scene-intelligence | grep "VLM analysis completed" ## Summary -The Scene Intelligence traffic analysis system provides: +The Smart Route Planning Agent provides: - **Real-time directional traffic monitoring** with configurable thresholds -- **AI-powered traffic analysis** using Vision Language Models for high-density situations +- **AI-powered traffic analysis** using VLMs for high-density situations - **Windowed analysis** to solve timing issues and provide traffic context -- **Concurrent processing** supporting multiple simultaneous VLM analyses -- **Flexible configuration** via environment variables and config files +- **Concurrent processing** supports multiple simultaneous VLM analyses +- **Flexible configuration** via environment variables and configuration files - **Image retention** tied to VLM analysis lifecycle - **Comprehensive API integration** with enhanced traffic context diff --git a/metro-ai-suite/smart-route-planning-agent/setup.sh b/metro-ai-suite/smart-route-planning-agent/setup.sh index 4ebb886702..5665faac04 100755 --- a/metro-ai-suite/smart-route-planning-agent/setup.sh +++ b/metro-ai-suite/smart-route-planning-agent/setup.sh @@ -11,7 +11,9 @@ BLUE='\033[0;34m' NC='\033[0m' # No Color # Setting variables for directories used as volume mounts -DOCKER_DIR="src" +SOURCE="src" +SECRETS_DIR="${SOURCE}/secrets" +DOCKER_DIR="docker" COMPOSE_MAIN="${DOCKER_DIR}/compose.yaml" # Function to show help @@ -21,17 +23,17 @@ show_help() { echo -e "-----------------------------------------------------------------" echo "" echo -e "${BLUE}Available Commands:${NC}" - echo -e " ${GREEN}--setup${NC} Build and start the Smart-Route-Planning-Agent container" - echo -e " ${GREEN}--build${NC} Build the Smart-Route-Planning-Agent Docker container" - echo -e " ${GREEN}--run${NC} Start the Smart-Route-Planning-Agent container" - echo -e " ${GREEN}--stop${NC} Stop the running container" - echo -e " ${GREEN}--restart${NC} Restart the Smart-Route-Planning-Agent container" - echo -e " ${GREEN}--help${NC} Show this help message" + echo -e " ${GREEN}setup${NC} Build and start the Smart-Route-Planning-Agent container" + echo -e " ${GREEN}build${NC} Build the Smart-Route-Planning-Agent Docker container" + echo -e " ${GREEN}up${NC} Start the Smart-Route-Planning-Agent container" + echo -e " ${GREEN}down${NC} Stop the running container" + echo -e " ${GREEN}restart${NC} Restart the Smart-Route-Planning-Agent container" + echo -e " ${GREEN}help${NC} Show this help message" echo "" echo -e "${BLUE}Quick Start:${NC}" - echo -e " ${YELLOW}source setup.sh --setup${NC} # Build and start the container" - echo -e " ${YELLOW}source setup.sh --build${NC} # Build the container" - echo -e " ${YELLOW}source setup.sh --run${NC} # Start the container" + echo -e " ${YELLOW}source setup.sh setup${NC} # Build and start the container" + echo -e " ${YELLOW}source setup.sh build${NC} # Build the container" + echo -e " ${YELLOW}source setup.sh up${NC} # Start the container" echo -e "-----------------------------------------------------------------" } @@ -41,15 +43,15 @@ check_docker_compose() { echo -e "${RED}Error: Docker is not installed or not in PATH${NC}" return 1 fi - + if ! docker compose version &> /dev/null; then echo -e "${RED}Error: Docker Compose is not available${NC}" return 1 fi } -# Handle --help and argument validation -if [ "$#" -eq 0 ] || [ "$1" = "--help" ]; then +# Handle help and argument validation +if [ "$#" -eq 0 ] || [ "$1" = "help" ]; then show_help if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then exit 0; else return 0; fi fi @@ -57,32 +59,40 @@ fi # Check for valid arguments if [ "$#" -gt 1 ]; then echo -e "${RED}ERROR: Too many arguments provided.${NC}" - echo -e "${YELLOW}Use '--help' for usage information${NC}" + echo -e "${YELLOW}Use 'help' for usage information${NC}" if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then exit 1; else return 1; fi fi +# Export all environment variables # Base configuration -HOST_IP=$(ip route get 1 2>/dev/null | awk '{print $7}') # Fetch the host IP - +export HOST_IP=$(ip route get 1 2>/dev/null | awk '{print $7}') # Fetch the host IP # Fallback to localhost if HOST_IP is empty if [[ -z "$HOST_IP" ]]; then - HOST_IP="127.0.0.1" + export HOST_IP="127.0.0.1" echo -e "${YELLOW}Warning: Could not detect host IP, using fallback: ${HOST_IP}${NC}" fi - -export HOST_IP # Add HOST_IP to no_proxy only if not already present [[ $no_proxy != *"${HOST_IP}"* ]] && export no_proxy="${no_proxy},${HOST_IP}" - export TAG=${TAG:-latest} +export REGISTRY_URL=${REGISTRY_URL:-intel} +export PROJECT_NAME=${PROJECT_NAME:-} + # Construct registry path properly to avoid double slashes -if [[ -n "$REGISTRY" ]]; then - export REGISTRY="${REGISTRY%/}/" +if [[ -n "$REGISTRY_URL" && -n "$PROJECT_NAME" ]]; then + # Both are set, combine with single slash + export REGISTRY="${REGISTRY_URL%/}/${PROJECT_NAME%/}/" +elif [[ -n "$REGISTRY_URL" ]]; then + # Only registry URL is set + export REGISTRY="${REGISTRY_URL%/}/" +elif [[ -n "$PROJECT_NAME" ]]; then + # Only project name is set + export REGISTRY="${PROJECT_NAME%/}/" +else + # Neither is set, use empty registry + export REGISTRY="" fi -PROJECT_NAME="routeplanner" - echo -e "${GREEN}Using registry: ${YELLOW}$REGISTRY ${NC}" # Traffic Analysis Configuration @@ -90,8 +100,15 @@ export TRAFFIC_BUFFER_DURATION=${TRAFFIC_BUFFER_DURATION:-60} export LOG_LEVEL=${LOG_LEVEL:-INFO} export DATA_RETENTION_HOURS=${DATA_RETENTION_HOURS:-24} +# Health Check Configuration +export HEALTH_CHECK_INTERVAL=${HEALTH_CHECK_INTERVAL:-30s} +export HEALTH_CHECK_TIMEOUT=${HEALTH_CHECK_TIMEOUT:-10s} +export HEALTH_CHECK_RETRIES=${HEALTH_CHECK_RETRIES:-3} +export HEALTH_CHECK_START_PERIOD=${HEALTH_CHECK_START_PERIOD:-10s} + # AI Route Planner Configuration export AI_ROUTE_PLANNER_PORT=${AI_ROUTE_PLANNER_PORT:-7864} +export AI_ROUTE_PLANNER_DIR=${AI_ROUTE_PLANNER_DIR:-ai-route-planner} echo -e "${GREEN}Environment variables set:${NC}" echo -e " HOST_IP: ${YELLOW}$HOST_IP${NC}" @@ -101,11 +118,12 @@ echo -e " REGISTRY: ${YELLOW}$REGISTRY${NC}" # Function to build Docker images build_images() { echo -e "${BLUE}==> Building Smart-Route-Planning-Agent Docker container...${NC}" - - if docker compose -f "$COMPOSE_MAIN" -p "$PROJECT_NAME" build; then - echo -e "${GREEN}Docker container built successfully!${NC}" + + docker compose -f $COMPOSE_MAIN build + if [ $? -eq 0 ]; then + echo -e "${GREEN}Docker container built successfully${NC}" else - echo -e "${RED}Failed to build Docker container!${NC}" + echo -e "${RED}Failed to build Docker container${NC}" return 1 fi } @@ -113,55 +131,60 @@ build_images() { # Function to start the service start_service() { echo -e "${BLUE}==> Starting Smart-Route-Planning-Agent container...${NC}" - - if docker compose -f "$COMPOSE_MAIN" -p "$PROJECT_NAME" up -d; then + + docker compose -f $COMPOSE_MAIN up -d + + if [ $? -eq 0 ]; then echo -e "${GREEN}Smart-Route-Planning-Agent container started successfully!${NC}" echo -e "${BLUE}AI Route Planner UI: ${YELLOW}http://${HOST_IP}:${AI_ROUTE_PLANNER_PORT}${NC}" echo "" echo -e "${BLUE}To follow logs in real-time, run:${NC}" echo -e "${YELLOW}docker compose -f docker/compose.yaml logs -f${NC}" else - echo -e "${RED}Failed to start Smart-Route-Planning-Agent container!${NC}" + echo -e "${RED}Failed to start Smart-Route-Planning-Agent container${NC}" return 1 fi } # Check Docker Compose availability -if ! check_docker_compose; then +check_docker_compose +if [ $? -ne 0 ]; then if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then exit 1; else return 1; fi fi # Main logic based on command case "$1" in - "--setup") + "setup") echo -e "${BLUE}==> Running full setup (build and start)...${NC}" build_images - if build_images; then + if [ $? -eq 0 ]; then start_service else echo -e "${RED}Setup failed during build step${NC}" if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then exit 1; else return 1; fi fi ;; - "--build") + "build") build_images ;; - "--run") + "up") start_service ;; - "--stop") + "down") echo -e "${YELLOW}Stopping Smart-Route-Planning-Agent container...${NC}" - if docker compose -f "$COMPOSE_MAIN" -p "$PROJECT_NAME" down; then + docker compose -f $COMPOSE_MAIN down + if [ $? -eq 0 ]; then echo -e "${GREEN}Smart-Route-Planning-Agent container stopped successfully.${NC}" else echo -e "${RED}Failed to stop container${NC}" if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then exit 1; else return 1; fi fi ;; - "--restart") + "restart") echo -e "${BLUE}==> Restarting Smart-Route-Planning-Agent container...${NC}" - if docker compose -f "$COMPOSE_MAIN" -p "$PROJECT_NAME" down; then + docker compose -f $COMPOSE_MAIN down + if [ $? -eq 0 ]; then echo -e "${GREEN}Container stopped successfully${NC}" start_service else @@ -171,7 +194,14 @@ case "$1" in ;; *) echo -e "${RED}Unknown command: $1${NC}" - echo -e "${YELLOW}Use '--help' for usage information${NC}" + echo -e "${YELLOW}Use 'help' for usage information${NC}" if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then exit 1; else return 1; fi ;; esac + +if [ $? -eq 0 ]; then + echo -e "${GREEN}Done!${NC}" +else + echo -e "${RED}Operation failed. Check the logs above for details.${NC}" + if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then exit 1; else return 1; fi +fi \ No newline at end of file diff --git a/metro-ai-suite/smart-route-planning-agent/src/Dockerfile b/metro-ai-suite/smart-route-planning-agent/src/Dockerfile index cd80a4cb58..7bf0ecff4c 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/Dockerfile +++ b/metro-ai-suite/smart-route-planning-agent/src/Dockerfile @@ -1,4 +1,4 @@ -# Copyright (C) 2026 Intel Corporation +# Copyright (C) 2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 FROM python:3.12-slim @@ -33,4 +33,4 @@ HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ CMD uv run python health_check.py 7860 || exit 1 # Run the application -CMD ["uv", "run", "python", "main.py"] +CMD ["uv", "run", "python", "main.py"] \ No newline at end of file diff --git a/metro-ai-suite/smart-route-planning-agent/src/agents/planner_state.py b/metro-ai-suite/smart-route-planning-agent/src/agents/planner_state.py index a9157008d8..376040ac69 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/agents/planner_state.py +++ b/metro-ai-suite/smart-route-planning-agent/src/agents/planner_state.py @@ -1,10 +1,6 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - from operator import add from typing import Annotated, List, TypedDict - from config import CongestionLevel, StaticOptimizerName, WeatherStatus from schema import GeoCoordinates, LiveTrafficData @@ -18,21 +14,13 @@ class RouteState(TypedDict): distance: float # Total distance covered by the route -class OptimalRouteState(RouteState, total=False): - """ - Extended route state with optional traffic/weather information. - """ - +class OptimalRouteState(RouteState): traffic_history: CongestionLevel weather_status: WeatherStatus event_name: str -class LiveTrafficState(RouteState, total=False): - """ - Live traffic state with optional fields for traffic details. - """ - +class LiveTrafficState(RouteState): intersection_name: str # Name of the intersection where traffic is being reported timestamp: str # Time of recording the live traffic data location_coordinates: GeoCoordinates # Latitude and Longitude for the traffic @@ -43,12 +31,7 @@ class LiveTrafficState(RouteState, total=False): ] # Base64 encoded images from the intersection's cameras -class RoutePlannerState(TypedDict, total=False): - """ - Main state for the route planner agent. - only returns the keys it wants to update, not the entire state. - """ - +class RoutePlannerState(TypedDict): source: str destination: str no_fly_list: Annotated[ @@ -61,6 +44,13 @@ class RoutePlannerState(TypedDict, total=False): ] # List of Route optimizers to be applied live_traffic: LiveTrafficState # Details of live traffic recieved during real-time route optimization is_sub_optimal: bool # Flag to indicate if the optimal route is sub-optimal + is_unique_route: bool # Flag to indicate if only one unique route exists + blocked_routes: List[ + str + ] # List of routes blocked due to correct game moves by user based on actual route issues in the route + blocked_routes_invalid: List[ + str + ] # List of routes blocked due to incorrect game moves by user i.e., setting an incorrect weather or incident in route all_routes_data: List[ LiveTrafficData ] # Complete list of LiveTrafficData for all Routes diff --git a/metro-ai-suite/smart-route-planning-agent/src/agents/route_planner.py b/metro-ai-suite/smart-route-planning-agent/src/agents/route_planner.py index 4559d14dba..bb97755897 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/agents/route_planner.py +++ b/metro-ai-suite/smart-route-planning-agent/src/agents/route_planner.py @@ -1,24 +1,25 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - +from pathlib import Path from typing import List, Optional from langgraph.graph import END, START, StateGraph -from langgraph.graph.state import CompiledStateGraph from agents import RoutePlannerState as State -from agents.planner_state import LiveTrafficState, OptimalRouteState, RouteState from config import ( ADVERSE_WEATHER_CONDITIONS, GPX_DIR, IGNORED_ROUTES, + WEATHER_ISSUE_MAP, + INCIDENT_ISSUE_MAP, CongestionLevel, + IncidentStatus, PlannerNode, StaticOptimizerName, + WeatherStatus, ) from controllers import ( LiveTrafficController, StaticRouteOptimizerFactory, + RouteStatusInterface, ThresholdController, ) from schema import LiveTrafficData, RouteCondition @@ -39,12 +40,12 @@ class RoutePlanner: def __init__(self): self.graph = StateGraph(State) - self.all_routes: list[str] = route_files() + self.all_routes: list[Path] = route_files() # Construct all required nodes and edges and compile the graph - self.compiled_graph = self._build_graph() + self.graph = self._build_graph() - self.live_traffic_status_list: list[LiveTrafficState] = [] + self.live_traffic_status_list: list[dict] = [] def _find_new_shortest_available_route( self, source: str, destination: str, no_fly_list: list[str] @@ -89,18 +90,13 @@ def find_direct_route(self, state: State) -> State: logger.info("Finding direct shortest route ...") logger.debug(f"============= State of the state : {state} =============") shortest_route, shortest_distance = self._find_new_shortest_available_route( - state.get("source", ""), - state.get("destination", ""), + state["source"], + state["destination"], state.get("no_fly_list", IGNORED_ROUTES), ) # Update the direct_route dict with required information - direct_route_state: RouteState = { - "route_name": shortest_route, - "distance": shortest_distance, - } - # For optimal_route, use OptimalRouteState (which extends RouteState) - initial_optimal_route: OptimalRouteState = { + direct_route_state = { "route_name": shortest_route, "distance": shortest_distance, } @@ -108,7 +104,7 @@ def find_direct_route(self, state: State) -> State: return { "direct_route": direct_route_state, - "optimal_route": initial_optimal_route, # Initially, optimal route is same as direct route + "optimal_route": direct_route_state, # Initially, optimal route is same as direct route # "static_optimizers": STATIC_ROUTE_OPTIMIZER_STACK, Disabled static optimizers for now "no_fly_list": [*IGNORED_ROUTES], } @@ -121,13 +117,11 @@ def find_optimal_route(self, state: State) -> State: logger.info("Finding optimal routes based on static data ...") route_status: RouteCondition | None = None - static_optimizers = state.get("static_optimizers") - if static_optimizers: - optimizer_name: StaticOptimizerName = static_optimizers.pop() - # Get the controller constructor from factory - route_optimizer_constructor = ( - StaticRouteOptimizerFactory.get_optimizer_class(optimizer_name) - ) + if state.get("static_optimizers"): + optimizer_name: StaticOptimizerName = state.get("static_optimizers").pop() + route_optimizer: RouteStatusInterface = StaticRouteOptimizerFactory[ + optimizer_name + ] else: logger.error( "Optimal route node invoked when no static optimizers are available!" @@ -135,42 +129,25 @@ def find_optimal_route(self, state: State) -> State: return state current_optimal_route = state.get("optimal_route", {}) - optimal_route_name: str = current_optimal_route.get("route_name", "") - optimal_distance: float = current_optimal_route.get("distance", 0.0) - - if not optimal_route_name: - logger.error("No optimal route name found in state") - return state - - # Initialize optimal_route_state with current values - optimal_route_state: OptimalRouteState = { - "route_name": optimal_route_name, - "distance": optimal_distance, - } + optimal_route_name = current_optimal_route.get("route_name") + optimal_distance = current_optimal_route.get("distance") temp_parser = MapDataParser(GPX_DIR / optimal_route_name) route_data = temp_parser.get_route_data() for track in route_data["tracks"]: for track_point in track["track_points"]: - # Instantiate the controller and call fetch_route_status - controller_instance = route_optimizer_constructor( + route_status = route_optimizer( track_point["lat"], track_point["lon"] - ) - fetched_status = controller_instance.fetch_route_status() - # Handle different return types from fetch_route_status - if isinstance(fetched_status, list): - route_status = fetched_status[0] if fetched_status else None - else: - route_status = fetched_status + ).fetch_route_status() if route_status: # check if route_status has a required attributes and proceed accordingly if hasattr(route_status, "weather_condition"): if route_status.weather_condition in ADVERSE_WEATHER_CONDITIONS: optimal_route_name, optimal_distance = ( self._find_new_shortest_available_route( - state.get("source", ""), - state.get("destination", ""), + state["source"], + state["destination"], state.get("no_fly_list", []), ) ) @@ -187,8 +164,8 @@ def find_optimal_route(self, state: State) -> State: ]: optimal_route_name, optimal_distance = ( self._find_new_shortest_available_route( - state.get("source", ""), - state.get("destination", ""), + state["source"], + state["destination"], state.get("no_fly_list", []), ) ) @@ -205,7 +182,7 @@ def find_optimal_route(self, state: State) -> State: return { "optimal_route": optimal_route_state, - "no_fly_list": [optimal_route_name] if optimal_route_name else [], + "no_fly_list": [optimal_route_name], } def update_optimal_route_realtime(self, state: State) -> State: @@ -221,18 +198,11 @@ def update_optimal_route_realtime(self, state: State) -> State: local_no_fly_list = state.get("no_fly_list", []).copy() # Default values for graph state to be returned if no traffic issues or new optimal routes are found - current_optimal = state.get("optimal_route", {}) - optimal_route_state: OptimalRouteState = { - "route_name": current_optimal.get("route_name", ""), - "distance": current_optimal.get("distance", 0.0), - } - live_traffic_state: LiveTrafficState = { - "route_name": "", - "distance": 0.0, - } + optimal_route_state = state.get("optimal_route", {}) + live_traffic_state = {} # If none of the routes are optimal, we store sub-optimal route here. - sub_optimal_route: OptimalRouteState | None = None + sub_optimal_route: dict[str, str] = {} sub_optimal_density: int = 0 # fetch the available live traffic data @@ -241,6 +211,14 @@ def update_optimal_route_realtime(self, state: State) -> State: live_traffic_controller.fetch_route_status() ) + # Storage for valid blocked routes and invalid blocked routes + # Invalid blocked routes are those which are blocked due to incorrect game moves by user on intersections along the route + blocked_routes: list[str] = [] + blocked_routes_invalid: list[str] = [] + # logger.debug(f"Available Intersections: {intersection_list}") + + available_route_count: int = 0 + unique_route: bool = False # Iterate till no new routes are available while True: route_not_optimal: bool = False @@ -249,16 +227,20 @@ def update_optimal_route_realtime(self, state: State) -> State: # Get next available shortest route next_shortest_route_name, next_shortest_distance = ( self._find_new_shortest_available_route( - state.get("source", ""), - state.get("destination", ""), - local_no_fly_list, + state["source"], state["destination"], local_no_fly_list ) ) if not next_shortest_route_name or not next_shortest_distance: + total_blocked_routes = len(blocked_routes) + len(blocked_routes_invalid) + if available_route_count - total_blocked_routes == 1: + unique_route = True + live_traffic_state = {} logger.info("No more alternate routes available.") break + available_route_count += 1 + # Parse the next available shortest route map_parser = MapDataParser(GPX_DIR / next_shortest_route_name) route_data = map_parser.get_route_data() @@ -269,11 +251,19 @@ def update_optimal_route_realtime(self, state: State) -> State: route_data.get("tracks", [{}])[0].get("track_points", []) ) + num_intersections_in_route: int = 0 + intersection_blocked_count_valid: int = ( + 0 # Intersection blocked due to correct game move by user + ) + intersection_blocked_count_invalid: int = ( + 0 # Intersection blocked due to incorrect game move by user + ) logger.debug(f"Analyzing route: {next_shortest_route_name}") for i, trackpoint in enumerate(trackpoints): # If route has been found not to be optimal break out of loop - if route_not_optimal: - break + # UPDATE: Disabling for finding all intersections along route irrespective of traffic density + # if route_not_optimal: + # break # Iterate over all routes/intersection found by live traffic controller and proceed with only those which # match the lats and longs of current trackpoint @@ -290,8 +280,38 @@ def update_optimal_route_realtime(self, state: State) -> State: ) <= live_traffic_controller.proximity_factor ): + # Count the number of intersections in the current route + num_intersections_in_route += 1 + + # Verify if traffic status from Intersection API reflects the actual recorded scenario at the intersection if ( - traffic_status.traffic_density + WEATHER_ISSUE_MAP.get(next_shortest_route_name) + == traffic_status.weather_status + or INCIDENT_ISSUE_MAP.get(next_shortest_route_name) + == traffic_status.incident_status + ): + intersection_blocked_count_valid += 1 + elif ( + traffic_status.weather_status != WeatherStatus.CLEAR + or traffic_status.incident_status != IncidentStatus.CLEAR + ): + intersection_blocked_count_invalid += 1 + + logger.debug( + "Getting blocked routes when intersection is found to be in current route ..." + ) + logger.debug(f"Blocked routes valid : {blocked_routes}") + logger.debug( + f"Blocked routes invalid : {blocked_routes_invalid}" + ) + + # Do not try to update sub_optimal_route or live_traffic_state if route is already blocked + if ( + next_shortest_route_name + not in state.get("blocked_routes", []) + and next_shortest_route_name + not in state.get("blocked_routes_invalid", []) + and traffic_status.traffic_density > ThresholdController.TRAFFIC_DENSITY_THRESHOLD ): # If traffic is above threshold, stop looking for more trackpoints in current route @@ -300,7 +320,7 @@ def update_optimal_route_realtime(self, state: State) -> State: ) route_not_optimal = True - # Every route having density greater than threshold and is a "potential" sub-optimal route. + # Every route having density greater than threshold and is a "potential" sub-optimal route. if ( not sub_optimal_route or sub_optimal_density > traffic_status.traffic_density @@ -337,12 +357,67 @@ def update_optimal_route_realtime(self, state: State) -> State: ) break + if ( + 0 + < intersection_blocked_count_valid + intersection_blocked_count_invalid + ): + logger.info( + f"Some intersections in route {next_shortest_route_name} report issues. Considering route as non-optimal." + ) + route_not_optimal = True + + # Remove blocked route traffic details from live_traffic_status_list if present + self.live_traffic_status_list = [ + t + for t in self.live_traffic_status_list + if t.get("route_name") != next_shortest_route_name + ] + + # Discard sub-optimal ad optimal route if it is current route + if ( + sub_optimal_route + and sub_optimal_route.get("route_name") == next_shortest_route_name + ): + sub_optimal_route = {} + + if ( + optimal_route_state + and optimal_route_state.get("route_name") + == next_shortest_route_name + ): + optimal_route_state = {} + + # Keep it in blocked_route_invalid list, as long as at least one intersection is blocked due to incorrect game move by user. + # If all intersections in route are blocked due to correct game move by user, put it in blocked_routes list. + # blocked_route_invalid or blocked_route list required to : + # 1. Color the route yellow or red, respectively on map UI + # 2. Refrain the agent from taking this route again in current iteration + if intersection_blocked_count_valid == num_intersections_in_route: + blocked_routes.append(next_shortest_route_name) + if next_shortest_route_name in blocked_routes_invalid: + blocked_routes_invalid.remove(next_shortest_route_name) + else: + blocked_routes_invalid.append(next_shortest_route_name) + if next_shortest_route_name in blocked_routes: + blocked_routes.remove(next_shortest_route_name) + + else: + # If in some other iterations different intersection_blocked_count zero out, remove route from blocked states. + if next_shortest_route_name in blocked_routes: + blocked_routes.remove(next_shortest_route_name) + if next_shortest_route_name in blocked_routes_invalid: + blocked_routes_invalid.remove(next_shortest_route_name) + + logger.debug("getting blocked routes when current route analysis done ...") + logger.debug(f"Blocked routes valid : {blocked_routes}") + logger.debug(f"Blocked routes invalid : {blocked_routes_invalid}") + if i == len(trackpoints) - 1 and not route_not_optimal: # If we reached the last trackpoint without finding high traffic, consider route to be optimal logger.info(f"Route {next_shortest_route_name} is optimal.") # Potential (Sub-Optimal Route) Wasted. Go for the best route, when you have it. Get rid of the second best. - sub_optimal_route = None + sub_optimal_route = {} # Update the optimal_route_state for the graph state optimal_route_state = { @@ -355,46 +430,34 @@ def update_optimal_route_realtime(self, state: State) -> State: # Add current route to local no_fly_list and try next shortest route if any local_no_fly_list.append(next_shortest_route_name) - # If live traffic status (the issues in traffic) is for same route as that of sub_optimal_route, then pick the live - # traffic status of previous route. (Makes sure, the intersection marked red is never present in the current route selected) + # If live traffic status (the issues in traffic) is for same route as that of sub_optimal_route + # pick the live traffic status of previous route if ( sub_optimal_route and self.live_traffic_status_list - and sub_optimal_route.get("route_name") - == live_traffic_state.get("route_name") + and sub_optimal_route["route_name"] == live_traffic_state.get("route_name") ): logger.info( "Picking previous live traffic status as current optimal route is sub-optimal" ) # Picks second last entry from list if num of entries > 1 else picks the only entry available. - prev_traffic = self.live_traffic_status_list[ + live_traffic_state = self.live_traffic_status_list[ len(self.live_traffic_status_list) - 2 ] - # Update live_traffic_state with previous traffic data - prev_location = prev_traffic.get("location_coordinates") - live_traffic_state = { - "route_name": prev_traffic.get("route_name", ""), - "distance": prev_traffic.get("distance", 0.0), - "intersection_name": prev_traffic.get("intersection_name", ""), - "timestamp": prev_traffic.get("timestamp", ""), - "traffic_density": prev_traffic.get("traffic_density", 0), - } - # Only add location_coordinates if it exists - if prev_location is not None: - live_traffic_state["location_coordinates"] = prev_location return { - "optimal_route": sub_optimal_route - if sub_optimal_route - else optimal_route_state, + "optimal_route": sub_optimal_route or optimal_route_state, "live_traffic": live_traffic_state, "is_sub_optimal": bool(sub_optimal_route), + "blocked_routes": blocked_routes, + "blocked_routes_invalid": blocked_routes_invalid, + "is_unique_route": unique_route, "all_routes_data": all_routes_data, } def _should_rerun_static_route_optimizers(self, state: State) -> bool: """Re-run static route optimizers until optimizer stack is empty""" - return len(state.get("static_optimizers", [])) > 0 + return len(state["static_optimizers"]) > 0 def _route_optimizers_selector(self, state: State) -> str: """ @@ -404,13 +467,13 @@ def _route_optimizers_selector(self, state: State) -> str: if not state.get("direct_route"): return PlannerNode.DIRECT.value # if static optimizers are available, run static optimization node - elif state.get("static_optimizers", []): + elif state.get("static_optimizers"): return PlannerNode.OPTIMAL.value # Otherwise run realtime route optimization node else: return PlannerNode.REALTIME.value - def _build_graph(self) -> CompiledStateGraph: + def _build_graph(self) -> StateGraph: """Builds the state graph using different nodes and edges.""" # Added all three tools as nodes in Graph @@ -454,12 +517,12 @@ def plan_route( logger.info(f"Planning route from {source} to {destination}") - current_state: State = {"source": source, "destination": destination} + current_state = {"source": source, "destination": destination} if previous_state: - current_state = {**current_state, **previous_state} + current_state.update(previous_state) # Execute the graph to find the best route - route_detail = self.compiled_graph.invoke(current_state) + route_detail = self.graph.invoke(current_state) return route_detail diff --git a/metro-ai-suite/smart-route-planning-agent/src/compose.yaml b/metro-ai-suite/smart-route-planning-agent/src/compose.yaml index c2fe6237e3..0cd1db6cf4 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/compose.yaml +++ b/metro-ai-suite/smart-route-planning-agent/src/compose.yaml @@ -6,7 +6,7 @@ networks: driver: bridge services: - agent: + route-planner-agent: image: ${REGISTRY:-}smart-route-planning-agent:${TAG:-latest} build: context: . @@ -17,17 +17,22 @@ services: ports: - "${AI_ROUTE_PLANNER_PORT:-7864}:7860" environment: + # Proxy settings - http_proxy=${http_proxy} - https_proxy=${https_proxy} - no_proxy=${no_proxy},${HOST_IP} - NO_PROXY=${no_proxy},${HOST_IP} - HTTP_PROXY=${http_proxy} - HTTPS_PROXY=${https_proxy} + # Application settings - GRADIO_SERVER_NAME=0.0.0.0 - GRADIO_SERVER_PORT=7860 - PYTHONPATH=/app + # Scene Intelligence API Configuration + - SI_API_BASE=http://${HOST_IP}:${SCENE_INTELLIGENCE_PORT:-8082} volumes: - ./data/config.json:/app/config.json:ro + - ./data/game.json:/app/game.json:ro networks: - routeplanner healthcheck: @@ -37,3 +42,4 @@ services: retries: 3 start_period: 30s restart: unless-stopped + diff --git a/metro-ai-suite/smart-route-planning-agent/src/config.py b/metro-ai-suite/smart-route-planning-agent/src/config.py index 7d31526045..f8a5dfb226 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/config.py +++ b/metro-ai-suite/smart-route-planning-agent/src/config.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - from enum import Enum from pathlib import Path @@ -29,6 +26,15 @@ ROUTE_STATUS_DIR: Path = Path(__file__).parent / "data" / "csv" CONFIG_FILE: Path = Path(__file__).parent / "data" / "config.json" +# Real-time traffic API endpoint +# Get the API BASE from env var or a default value is picked +# SCENE_INTELLIGENCE_API_BASE = os.getenv("SI_API_BASE", "http://localhost:8082") +# SCENE_INTELLIGENCE_ENDPOINTS = { +# "traffic_summary": "/api/v1/traffic/directional/summary", +# "update_threshold": "/api/v1/config/vlm/threshold" +# } +# UPDATE : API Endpoints and Base now come from config file + class CongestionLevel(Enum): LOW = "Low" @@ -56,11 +62,6 @@ class WeatherStatus(Enum): FIRE = "Roadside Fire" FLOOD = "Flash Floods" - # Add a generic handler here so that any unknown weather status maps to CLEAR - @classmethod - def _missing_(cls, _): - return cls.CLEAR - # Weather conditions that trigger alternate route search ADVERSE_WEATHER_CONDITIONS = [ @@ -78,10 +79,9 @@ def _missing_(cls, _): } INCIDENT_ISSUE_MAP: dict[str, IncidentStatus] = { - "berkeley-sanbruno.gpx": IncidentStatus.MAINTENANCE, + "berkeley-sanbruno.gpx": IncidentStatus.MAINTENANCE, } - class StaticOptimizerName(Enum): """ An Enum to id all static route optimizers diff --git a/metro-ai-suite/smart-route-planning-agent/src/controllers/__init__.py b/metro-ai-suite/smart-route-planning-agent/src/controllers/__init__.py index 1b804c2296..4d8f62d737 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/controllers/__init__.py +++ b/metro-ai-suite/smart-route-planning-agent/src/controllers/__init__.py @@ -1,16 +1,10 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - +from .static_optimizer_factory import StaticRouteOptimizerFactory from .live_traffic import LiveTrafficController from .planned_events import PlannedEventsController -from .route_interface import RouteStatusInterface -from .static_optimizer_factory import ( - RouteOptimizerConstructor, - StaticRouteOptimizerFactory, -) -from .threshold import ThresholdController from .traffic_trends import TrafficTrendsController from .weather_report import WeatherReportController +from .route_interface import RouteStatusInterface +from .threshold import ThresholdController __all__ = [ "PlannedEventsController", @@ -19,6 +13,5 @@ "LiveTrafficController", "ThresholdController", "RouteStatusInterface", - "RouteOptimizerConstructor", "StaticRouteOptimizerFactory", ] diff --git a/metro-ai-suite/smart-route-planning-agent/src/controllers/live_traffic.py b/metro-ai-suite/smart-route-planning-agent/src/controllers/live_traffic.py index 108c84a7c7..dc714b3d93 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/controllers/live_traffic.py +++ b/metro-ai-suite/smart-route-planning-agent/src/controllers/live_traffic.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - import requests from typing import Optional, List @@ -69,22 +66,22 @@ def fetch_route_status(self) -> List[LiveTrafficData]: logger.debug( f"Sending request to Intersection API: {host}{api_endpoint}" ) - http_response = requests.get(f"{host}{api_endpoint}") - http_response.raise_for_status() + response = requests.get(f"{host}{api_endpoint}") + response.raise_for_status() # Raise an exception for HTTP errors # Parse the response - api_responses.append(http_response.json()) + api_responses.append(response.json()) except requests.RequestException as e: logger.error( f"Error fetching data from intersection at {host}: {e}" ) # List to store the final response as list of LiveTrafficData - live_traffic_intersection_records: list[LiveTrafficData] = [] + live_traffic_intersection_records = [] # Look for intersections that match our current coordinates - for response_data in api_responses: + for response in api_responses: # Check if intersection data is present - intersection_data = response_data.get("data", {}) + intersection_data = response.get("data", {}) if not intersection_data: continue @@ -101,10 +98,10 @@ def fetch_route_status(self) -> List[LiveTrafficData]: traffic_density = intersection_data.get("total_density", 0) # Get weather and incident status if available - weather_status = response_data.get("weather_data", {}).get( + weather_status = response.get("weather_data", {}).get( "short_forecast", WeatherStatus.CLEAR ) - incident_status = response_data.get("incident", {}).get( + incident_status = response.get("incident", {}).get( "incident_type", IncidentStatus.CLEAR ) diff --git a/metro-ai-suite/smart-route-planning-agent/src/controllers/planned_events.py b/metro-ai-suite/smart-route-planning-agent/src/controllers/planned_events.py index 90e6329b7d..da027311d1 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/controllers/planned_events.py +++ b/metro-ai-suite/smart-route-planning-agent/src/controllers/planned_events.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - import csv from typing import Optional diff --git a/metro-ai-suite/smart-route-planning-agent/src/controllers/route_interface.py b/metro-ai-suite/smart-route-planning-agent/src/controllers/route_interface.py index 957fa868a3..4d4c16e879 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/controllers/route_interface.py +++ b/metro-ai-suite/smart-route-planning-agent/src/controllers/route_interface.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - from abc import ABC, abstractmethod from typing import Optional, List diff --git a/metro-ai-suite/smart-route-planning-agent/src/controllers/static_optimizer_factory.py b/metro-ai-suite/smart-route-planning-agent/src/controllers/static_optimizer_factory.py index e78f6d591b..f39287a074 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/controllers/static_optimizer_factory.py +++ b/metro-ai-suite/smart-route-planning-agent/src/controllers/static_optimizer_factory.py @@ -1,48 +1,14 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Callable - from config import StaticOptimizerName as route_optimizer from .planned_events import PlannedEventsController -from .route_interface import RouteStatusInterface from .traffic_trends import TrafficTrendsController from .weather_report import WeatherReportController """ StaticRouteOptimizerFactory maps route optimizer names to their respective controller classes. """ - -# Type alias for a callable that creates a RouteStatusInterface instance -RouteOptimizerConstructor = Callable[[float, float], RouteStatusInterface] - - -class StaticRouteOptimizerFactory: - """A factory class to get static route optimizer controller classes based on optimizer names.""" - - _factory: dict[route_optimizer, RouteOptimizerConstructor] = { - route_optimizer.TRAFFIC: TrafficTrendsController, - route_optimizer.WEATHER: WeatherReportController, - route_optimizer.PLANNED_EVENTS: PlannedEventsController, - } - - @classmethod - def get_optimizer_class( - cls, optimizer_name: route_optimizer - ) -> RouteOptimizerConstructor: - """Get the optimizer constructor for the given optimizer name. - - Args: - optimizer_name: The name of the optimizer to get. - - Returns: - A callable that creates a RouteStatusInterface instance when called with (lat, lon). - - Raises: - ValueError: If no controller is found for the given optimizer name. - """ - concrete_class = cls._factory.get(optimizer_name) - if concrete_class is None: - raise ValueError(f"No controller found for optimizer: {optimizer_name}") - return concrete_class +StaticRouteOptimizerFactory = { + route_optimizer.TRAFFIC: TrafficTrendsController, + route_optimizer.WEATHER: WeatherReportController, + route_optimizer.PLANNED_EVENTS: PlannedEventsController, +} diff --git a/metro-ai-suite/smart-route-planning-agent/src/controllers/threshold.py b/metro-ai-suite/smart-route-planning-agent/src/controllers/threshold.py index f9f06d3a99..ae4d3f2568 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/controllers/threshold.py +++ b/metro-ai-suite/smart-route-planning-agent/src/controllers/threshold.py @@ -1,5 +1,4 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 +from typing import Dict, Any from utils.logging_config import get_logger @@ -11,9 +10,41 @@ class ThresholdController: Controller for updating traffic threshold values via the Scene Intelligence API. """ - TRAFFIC_DENSITY_THRESHOLD: int = 10 + TRAFFIC_DENSITY_THRESHOLD: int = 5 def __init__(self): # self.api_base = SCENE_INTELLIGENCE_API_BASE # self.threshold_endpoint = SCENE_INTELLIGENCE_ENDPOINTS["update_threshold"] pass + + def update_threshold(self, threshold_value: int) -> Dict[str, Any]: + """ + Update the traffic density threshold value in the Scene Intelligence API. + + Args: + threshold_value (int): The new threshold value (1-15) + + Returns: + Dict[str, Any]: The API response containing status information + """ + # try: + # logger.info(f"Updating traffic density threshold to {threshold_value}...") + + # ThresholdController.TRAFFIC_DENSITY_THRESHOLD = threshold_value + + # api_url = f"{self.api_base}{self.threshold_endpoint}" + # payload = { + # "threshold": threshold_value + # } + + # response = requests.put(api_url, json=payload) + # response.raise_for_status() + + # data = response.json() + # logger.info(f"Successfully updated traffic density threshold to {threshold_value}") + # return data + + # except Exception as e: + # logger.error(f"Error updating threshold value: {e}") + # return {"error": str(e), "success": False} + pass diff --git a/metro-ai-suite/smart-route-planning-agent/src/controllers/traffic_trends.py b/metro-ai-suite/smart-route-planning-agent/src/controllers/traffic_trends.py index d66236edc0..0f4e5b8c01 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/controllers/traffic_trends.py +++ b/metro-ai-suite/smart-route-planning-agent/src/controllers/traffic_trends.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - import csv from typing import Optional diff --git a/metro-ai-suite/smart-route-planning-agent/src/controllers/weather_report.py b/metro-ai-suite/smart-route-planning-agent/src/controllers/weather_report.py index 33dad549ae..160b329d12 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/controllers/weather_report.py +++ b/metro-ai-suite/smart-route-planning-agent/src/controllers/weather_report.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - import csv from typing import Optional diff --git a/metro-ai-suite/smart-route-planning-agent/src/data/config.json b/metro-ai-suite/smart-route-planning-agent/src/data/config.json index 0037838f6c..c9fd0ca764 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/data/config.json +++ b/metro-ai-suite/smart-route-planning-agent/src/data/config.json @@ -2,13 +2,40 @@ "api_endpoint": "/api/v1/traffic/current?images=false", "api_hosts": [ { - "host": "http://localhost:8081" + "name": "Intersection-X", + "host": "http://10.0.1.135:8081" }, { - "host": "http://localhost:8082" + "name": "Intersection-X", + "host": "http://10.0.1.154:8081" }, { - "host": "http://localhost:8083" + "name": "Intersection-X", + "host": "http://10.0.1.24:8081" + }, + { + "name": "Intersection-X", + "host": "http://10.0.1.235:8081" + }, + { + "name": "Intersection-X", + "host": "http://10.0.2.124:8081" + }, + { + "name": "Intersection-X", + "host": "http://10.0.2.145:8081" + }, + { + "name": "Intersection-X", + "host": "http://10.0.2.164:8081" + }, + { + "name": "Intersection-X", + "host": "http://10.0.2.186:8081" + }, + { + "name": "Intersection-X", + "host": "http://10.0.2.120:8081" } ] -} +} \ No newline at end of file diff --git a/metro-ai-suite/smart-route-planning-agent/src/data/game.json b/metro-ai-suite/smart-route-planning-agent/src/data/game.json new file mode 100644 index 0000000000..11cae1492f --- /dev/null +++ b/metro-ai-suite/smart-route-planning-agent/src/data/game.json @@ -0,0 +1,125 @@ +{ + "fire_emojis": [ + { + "id": 1, + "emoji": "🔥", + "latitude": 37.735341, + "longitude": -122.116071, + "label": "Fire Zone 1" + }, + { + "id": 2, + "emoji": "🔥", + "latitude": 37.436072, + "longitude": -121.843004, + "label": "Fire Zone 2" + }, + { + "id": 3, + "emoji": "🔥", + "latitude": 37.672323, + "longitude": -121.966501, + "label": "Fire Zone 3" + }, + { + "id": 4, + "emoji": "🔥", + "latitude": 37.762145, + "longitude": -122.125834, + "label": "Fire Zone 4" + }, + { + "id": 5, + "emoji": "🔥", + "latitude": 37.718522, + "longitude": -122.089316, + "label": "Fire Zone 5" + }, + { + "id": 6, + "emoji": "🔥", + "latitude": 37.462891, + "longitude": -121.856742, + "label": "Fire Zone 6" + }, + { + "id": 7, + "emoji": "🔥", + "latitude": 37.411234, + "longitude": -121.828156, + "label": "Fire Zone 7" + }, + { + "id": 8, + "emoji": "🔥", + "latitude": 37.697845, + "longitude": -121.981274, + "label": "Fire Zone 8" + }, + { + "id": 9, + "emoji": "🔥", + "latitude": 37.647912, + "longitude": -121.952318, + "label": "Fire Zone 39" + } + ], + "flood_emojis": [ + { + "id": 1, + "emoji": "🌊", + "latitude": 37.594019, + "longitude": -122.110582, + "label": "Flood Zone 1" + }, + { + "id": 2, + "emoji": "🌊", + "latitude": 37.776600, + "longitude": -122.260151, + "label": "Flood Zone 2" + }, + { + "id": 3, + "emoji": "🌊", + "latitude": 37.487305, + "longitude": -121.991201, + "label": "Flood Zone 3" + }, + { + "id": 4, + "emoji": "🌊", + "latitude": 37.619842, + "longitude": -122.095317, + "label": "Flood Zone 4" + }, + { + "id": 5, + "emoji": "🌊", + "latitude": 37.741000, + "longitude": -122.221783, + "label": "Flood Zone 5" + }, + { + "id": 6, + "emoji": "🌊", + "latitude": 37.751892, + "longitude": -122.244783, + "label": "Flood Zone 6" + }, + { + "id": 7, + "emoji": "🌊", + "latitude": 37.514672, + "longitude": -122.005438, + "label": "Flood Zone 7" + }, + { + "id": 8, + "emoji": "🌊", + "latitude": 37.461823, + "longitude": -121.975964, + "label": "Flood Zone 8" + } + ] +} diff --git a/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-dublin-sanjose.gpx b/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-dublin-sanjose.gpx index dc89aebdc6..84bdc1ca07 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-dublin-sanjose.gpx +++ b/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-dublin-sanjose.gpx @@ -19684,4 +19684,4 @@ - + \ No newline at end of file diff --git a/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-eastpaloalto.gpx b/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-eastpaloalto.gpx index 1f69e6c066..79597f1490 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-eastpaloalto.gpx +++ b/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-eastpaloalto.gpx @@ -12567,4 +12567,4 @@ - + \ No newline at end of file diff --git a/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-oakland-i880.gpx b/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-oakland-i880.gpx index 59057b2bbe..bf4b02af51 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-oakland-i880.gpx +++ b/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-oakland-i880.gpx @@ -13522,4 +13522,4 @@ - + \ No newline at end of file diff --git a/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-sanbruno-sunnyvale.gpx b/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-sanbruno-sunnyvale.gpx index 4eb451f60a..205fbd8431 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-sanbruno-sunnyvale.gpx +++ b/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-sanbruno-sunnyvale.gpx @@ -14410,4 +14410,4 @@ - + \ No newline at end of file diff --git a/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-sanbruno.gpx b/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-sanbruno.gpx index 5fa07d9a0f..036e40c5a2 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-sanbruno.gpx +++ b/metro-ai-suite/smart-route-planning-agent/src/data/routes/berkeley-sanbruno.gpx @@ -14739,4 +14739,4 @@ - + \ No newline at end of file diff --git a/metro-ai-suite/smart-route-planning-agent/src/health_check.py b/metro-ai-suite/smart-route-planning-agent/src/health_check.py index 33014392da..3c2324aa30 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/health_check.py +++ b/metro-ai-suite/smart-route-planning-agent/src/health_check.py @@ -1,8 +1,4 @@ #!/usr/bin/env python3 - -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - """ Health check script for AI Route Planner. This script checks if the Gradio application is running and responsive. diff --git a/metro-ai-suite/smart-route-planning-agent/src/main.py b/metro-ai-suite/smart-route-planning-agent/src/main.py index 3a64ee05c7..996c71e828 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/main.py +++ b/metro-ai-suite/smart-route-planning-agent/src/main.py @@ -1,13 +1,14 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - import base64 +import json import queue import threading import time +from pathlib import Path +from typing import Optional from io import BytesIO import gradio as gr +from gradio_toggle import Toggle from PIL import Image from config import APP_DETAILS, INITIAL_MAP_HTML @@ -21,16 +22,37 @@ optimization_active = False optimization_thread = None curr_agent_iteration = 1 +game_mode_enabled = False # Global flag for game mode UI_UPDATE_INTERVAL = 8 # Poll interval for new updates from data_queue used by thread OPTIMIZATION_INTERVAL = 12 # Seconds between agent invocations # Queue for passing data between agent thread and UI -data_queue: queue.Queue[dict] = queue.Queue() +data_queue = queue.Queue() # Lock for thread-safe access to shared variables thread_lock = threading.Lock() +def load_game_data(): + """Load game mode emoji data from JSON file""" + game_data_path = Path(__file__).parent / "data" / "game.json" + try: + with open(game_data_path, "r") as f: + return json.load(f) + except Exception as e: + logger.error(f"Error loading game data: {e}") + return {"fire_emojis": [], "flood_emojis": []} + + +def toggle_game_mode(enabled: bool) -> str: + """Toggle game mode on/off""" + global game_mode_enabled + game_mode_enabled = enabled + status = "Game Mode: ON" if enabled else "Game Mode: OFF" + logger.info(status) + return status + + def get_direct_route(source: str, destination: str) -> tuple[str, str, str]: """ Uses RouteService to trigger RoutePlanner agent and gets direct route between source and destination. @@ -39,17 +61,16 @@ def get_direct_route(source: str, destination: str) -> tuple[str, str, str]: # Validate input is_valid, error_message = route_service.validate_route_request(source, destination) if not is_valid: - return ( - error_message, - "", - route_service.get_fallback_map_html( - "Select locations to see the route map" - ), + return error_message, "", route_service.get_fallback_map_html( + "Select locations to see the route map" ) + # Get game data if game mode is enabled + game_data = load_game_data() if game_mode_enabled else None + # Start planning the route next_data_source, distance, main_route_map = route_service.create_direct_route_map( - source, destination + source, destination, game_data ) thinking_message = ( @@ -64,7 +85,9 @@ def get_direct_route(source: str, destination: str) -> tuple[str, str, str]: return agent_status_msg, thinking_message, main_route_map -def get_optimal_route(source: str, destination: str) -> tuple[str, str, str]: +def get_optimal_route( + source: str, destination: str +) -> tuple[str, str, Optional[dict[str, str]]]: """ Uses RouteService to trigger RoutePlanner agent and gets optimized route. """ @@ -77,12 +100,15 @@ def get_optimal_route(source: str, destination: str) -> tuple[str, str, str]: route_service.get_fallback_map_html( "Select locations to see the route map" ), - "", + None, ) + # Get game data if game mode is enabled + game_data = load_game_data() if game_mode_enabled else None + # Start planning the route next_data_source, route_issue, distance, is_sub_optimal, optimized_route_map = ( - route_service.create_alternate_route_map(source, destination) + route_service.create_alternate_route_map(source, destination, game_data) ) thinking_message: str = f"\n #### Route: {source} -> {destination}\n\n" @@ -312,17 +338,17 @@ def create_gradio_interface() -> gr.Blocks: css = """ /* Modern Font Import */ @import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap'); - + /* Global Font Styles */ body, .gradio-container, .gradio-container *, .gradio-container label, .gradio-container input, .gradio-container textarea, .gradio-container select, .gradio-container button { font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif !important; } - + h1, h2, h3, h4, h5, h6 { font-family: 'Inter', sans-serif !important; font-weight: 600; } - + .map-container { border-radius: 12px; overflow: hidden; @@ -401,7 +427,7 @@ def create_gradio_interface() -> gr.Blocks: .settings-panel .block { width: 100%; } - + /* Styling for the thinking output markdown component */ .thinking-output { border-radius: 10px; @@ -412,7 +438,7 @@ def create_gradio_interface() -> gr.Blocks: max-height: 50vh; line-height: 1.6; } - + .thinking-output h1, .thinking-output h2, .thinking-output h3 { color: #b112cd; margin-top: 1em; @@ -429,7 +455,7 @@ def create_gradio_interface() -> gr.Blocks: .thinking-output h4 { color: #950d85 } - + .thinking-output h5, .thinking-output h6 { color: #b942ab; } @@ -441,7 +467,7 @@ def create_gradio_interface() -> gr.Blocks: font-family: 'Roboto Mono', monospace; font-size: 0.9em; } - + .thinking-output pre { background-color: #f8fafc; padding: 12px; @@ -449,15 +475,15 @@ def create_gradio_interface() -> gr.Blocks: border-left: 3px solid #4f46e5; overflow-x: auto; } - + .thinking-output em, .thinking-output i { color: #8d3419; } - + .thinking-output strong, .thinking-output b { color: #262E9E; } - + .status-indicator { padding: 10px 16px; border-radius: 4px; @@ -509,6 +535,19 @@ def create_gradio_interface() -> gr.Blocks: with gr.Column(scale=1): with gr.Row(): + game_mode_toggle = Toggle( + label="Game Mode", + value=False, + color="#2FFF2F", + show_label=True, + container=False, + radius="lg", + interactive=True, + ) + game_mode_status = gr.Markdown( + "Game Mode: OFF", elem_id="game-mode-status" + ) + with gr.Column(scale=1): search_btn = gr.Button( "Find Route", @@ -589,6 +628,10 @@ def create_gradio_interface() -> gr.Blocks: intersection_image4, ] + game_mode_toggle.change( + fn=toggle_game_mode, inputs=[game_mode_toggle], outputs=[game_mode_status] + ) + # Connect the search button with initial route display and start the Route Planner agent search_btn.click( fn=start_agent, @@ -602,6 +645,11 @@ def create_gradio_interface() -> gr.Blocks: fn=lambda: gr.update(elem_classes=["status-indicator", "status-active"]), inputs=None, outputs=agent_status, + ).then( + # Disable game mode toggle while route planning is active + fn=lambda: gr.update(interactive=False), + inputs=None, + outputs=game_mode_toggle, ) stop_agent_btn.click( @@ -618,6 +666,11 @@ def create_gradio_interface() -> gr.Blocks: fn=lambda: gr.update(elem_classes=["status-indicator", "status-inactive"]), inputs=None, outputs=agent_status, + ).then( + # Re-enable game mode toggle when planning stops + fn=lambda: gr.update(interactive=True), + inputs=None, + outputs=game_mode_toggle, ) app.load( @@ -645,7 +698,9 @@ def create_gradio_interface() -> gr.Blocks: # Get configuration from environment variables server_name = os.getenv("GRADIO_SERVER_NAME", "0.0.0.0") - server_port = int(os.getenv("GRADIO_SERVER_PORT", "7860")) + server_port = int( + os.getenv("GRADIO_SERVER_PORT", "7860") + ) # Changed default to match Dockerfile server_config = { "server_name": server_name, diff --git a/metro-ai-suite/smart-route-planning-agent/src/pyproject.toml b/metro-ai-suite/smart-route-planning-agent/src/pyproject.toml index 380e0fba42..d0fa95c0a3 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/pyproject.toml +++ b/metro-ai-suite/smart-route-planning-agent/src/pyproject.toml @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - [project] name = "smart-route-planning-agent" version = "0.1.0" @@ -17,7 +14,7 @@ dependencies = [ "isort", "langgraph==1.0.7", "mypy", - "numpy>1.24.3", + "numpy", "pandas", "pillow>=10.4.0", "pydantic", diff --git a/metro-ai-suite/smart-route-planning-agent/src/schema.py b/metro-ai-suite/smart-route-planning-agent/src/schema.py index eb8ec836cd..eb0ce8bf6e 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/schema.py +++ b/metro-ai-suite/smart-route-planning-agent/src/schema.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - from typing import List, Optional from pydantic import BaseModel, Field diff --git a/metro-ai-suite/smart-route-planning-agent/src/services/route_service.py b/metro-ai-suite/smart-route-planning-agent/src/services/route_service.py index c91e2f91b1..f27033f616 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/services/route_service.py +++ b/metro-ai-suite/smart-route-planning-agent/src/services/route_service.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - from typing import Any, Dict, List, Optional, Tuple from agents import RoutePlanner, RoutePlannerState @@ -14,7 +11,7 @@ from utils.gpx_parser import MapDataParser from utils.logging_config import get_logger from utils.map_creator import MapCreator -from schema import LiveTrafficData +from schema import GeoCoordinates, LiveTrafficData logger = get_logger(__name__) @@ -34,6 +31,7 @@ def __init__(self): self.alternate_route: Optional[Dict] = None self.alternate_route_names: List[str] = [] # Keeps track of all alt route names self.new_alt_route_idx: int = 0 # Needed to identify new alt route and color it differently than others in list + self.blocked_routes: Dict[str, List[Dict[str, Any]]] = {} self.alt_route_trackpoints: list[list] = [] self.route_state: Optional[RoutePlannerState] = None @@ -42,16 +40,16 @@ def __init__(self): def _load_direct_shortest_route( self, source: str, destination: str - ) -> MapDataParser | None: + ) -> MapDataParser: """Load the shortest trivial route data using the RoutePlanner agent at startup""" try: # Running the agent for first time - finds direct trivial route. - self.route_state = self.route_planner.plan_route(source, destination) - - direct_route_name = self.route_state.get("direct_route", {}).get( - "route_name", "" + self.route_state: RoutePlannerState = self.route_planner.plan_route( + source, destination ) + + direct_route_name = self.route_state["direct_route"]["route_name"] map_data_parser = MapDataParser(GPX_DIR / direct_route_name) self.main_route = map_data_parser.get_route_data() @@ -68,9 +66,8 @@ def _load_direct_shortest_route( except Exception as e: logger.error(f"Error loading direct route file: {e}") self.main_route = None - return None - def _setup_locations(self, map_data_parser: MapDataParser | None) -> None: + def _setup_locations(self, map_data_parser: MapDataParser) -> None: """Setup location lists based on GPX data if available""" if map_data_parser: start_location, end_location = map_data_parser.get_start_end_locations() @@ -89,7 +86,7 @@ def _load_alternate_route(self, source: str, destination: str) -> None: try: # Pass the previous saved route_state and get the updated state as result - self.route_state = self.route_planner.plan_route( + self.route_state: RoutePlannerState = self.route_planner.plan_route( source, destination, self.route_state ) @@ -97,7 +94,7 @@ def _load_alternate_route(self, source: str, destination: str) -> None: # Instantiate object for alternate route based on optimal route name recieved from route_state alternate_route_name = self.route_state.get("optimal_route", {}).get( - "route_name", "" + "route_name" ) if alternate_route_name: # Push the new route name to list if not already present. Get index of new route anyway. @@ -112,6 +109,28 @@ def _load_alternate_route(self, source: str, destination: str) -> None: temp_parser = MapDataParser(GPX_DIR / alternate_route_name) self.alternate_route = temp_parser.get_route_data() + # Instantitate objects for blocked routes based on blocked route names recieved from route_state + blocked_route_names: list[str] = self.route_state.get("blocked_routes", []) + blocked_route_invalid_names: list[str] = self.route_state.get("blocked_routes_invalid", []) + + self.blocked_routes: Dict[str, List[Dict[str, Any]]] = {"valid": [], "invalid": []} + + # Update valid blocked routes. Valid because user set correct weather/incident data to block it. + for blocked_route in blocked_route_names: + logger.debug( + f"Route blocked due to issues at intersection: {blocked_route}" + ) + temp_parser = MapDataParser(GPX_DIR / blocked_route) + self.blocked_routes["valid"].append(temp_parser.get_route_data()) + + # Update invalid blocked routes. Invalid because user set incorrect weather/incident data to block it. + for blocked_route in blocked_route_invalid_names: + logger.debug( + f"Route blocked due to incorrect weather/incident setting by user at intersection: {blocked_route}" + ) + temp_parser = MapDataParser(GPX_DIR / blocked_route) + self.blocked_routes["invalid"].append(temp_parser.get_route_data()) + logger.info( f"Successfully loaded alternate route file: {alternate_route_name}" ) @@ -123,6 +142,8 @@ def _load_alternate_route(self, source: str, destination: str) -> None: f"Found {len(self.alternate_route['tracks'][0]['track_points'])} track points" ) except Exception as e: + import traceback + traceback.print_exc() logger.error(f"Error loading alternate route : {e}") self.alternate_route = None @@ -184,10 +205,7 @@ def _get_route_issue_detail(self) -> str: else: if event_name := optimal_route_data.get("event_name"): congestion_level = optimal_route_data.get("traffic_history") - if congestion_level is not None: - route_issue = f"planned event '{event_name}' with expected {congestion_level.value} traffic congestion on the route." - else: - route_issue = f"planned event '{event_name}' on the route." + route_issue = f"planned event '{event_name}' with expected {congestion_level.value} traffic congestion on the route." elif congestion_level := optimal_route_data.get("traffic_history"): route_issue = f"'{congestion_level.value}' historical traffic trends on the route." elif weather_condition := optimal_route_data.get("weather_status"): @@ -201,9 +219,10 @@ def _get_route_issue_detail(self) -> str: route_issue = f"high traffic density of {live_traffic.get('traffic_density')} at {live_traffic.get('intersection_name')}" - traffic_description = live_traffic.get("traffic_description") - if traffic_description: - route_issue += f" - {traffic_description[:900]} ..." + if live_traffic.get("traffic_description"): + route_issue += ( + f" - {live_traffic.get('traffic_description')[:900]} ..." + ) return route_issue def _get_next_data_source(self) -> str: @@ -212,11 +231,9 @@ def _get_next_data_source(self) -> str: This information is used to display what conditions agent is going to analyze next. """ - if self.route_state and self.route_state.get("static_optimizers"): + if self.route_state.get("static_optimizers"): # If any static optimizer is available, it will be used as next data source to optimize route - optimizer: StaticOptimizerName = self.route_state.get( - "static_optimizers", [] - )[-1] + optimizer: StaticOptimizerName = self.route_state["static_optimizers"][-1] # Get description respective to the StaticOptimizerName return optimizer.get_description() else: @@ -232,10 +249,10 @@ def get_default_locations(self) -> Tuple[str, str]: ) def create_direct_route_map( - self, start_location: str, end_location: str + self, start_location: str, end_location: str, game_data: Optional[dict] = None ) -> tuple[str, float, str]: """Create initial map showing only the main route before AI analysis""" - map_data_parser: MapDataParser | None = self._load_direct_shortest_route( + map_data_parser: MapDataParser = self._load_direct_shortest_route( start_location, end_location ) @@ -244,16 +261,16 @@ def create_direct_route_map( # Get the next data source to be used for route optimization and current route map next_data_source = self._get_next_data_source() - direct_route_map = self.create_route_map(start_location, end_location) + direct_route_map = self.create_route_map( + start_location, end_location, game_data=game_data + ) distance = ( - self.route_state.get("optimal_route", {}).get("distance", 0.0) - if self.route_state - else 0.0 + self.route_state["optimal_route"]["distance"] if self.route_state else 0.0 ) return next_data_source, distance, direct_route_map def create_alternate_route_map( - self, start_location: str, end_location: str + self, start_location: str, end_location: str, game_data: Optional[dict] = None ) -> tuple[str, str, float, bool, str]: """Create map showing alternative route""" @@ -266,11 +283,9 @@ def create_alternate_route_map( next_data_source = self._get_next_data_source() # Get intersection images and lat and long for route incidents (if any) from live traffic data - incident_location: Optional[dict[str, Any]] = None + incident_location: Optional[GeoCoordinates] = None # intersection_images: Optional[dict[str, str]] = None - if self.route_state and ( - live_traffic := self.route_state.get("live_traffic", {}) - ): + if live_traffic := self.route_state.get("live_traffic", {}): # intersection_images = live_traffic.get("intersection_images") incident_location = { "name": live_traffic.get("intersection_name"), @@ -278,22 +293,14 @@ def create_alternate_route_map( } # Get the complete live traffic data for all intersections - all_routes: List[LiveTrafficData] = ( - self.route_state.get("all_routes_data", []) if self.route_state else [] - ) + all_routes: List[LiveTrafficData] = self.route_state.get("all_routes_data", []) # Create alternate route map for the alternate route alternate_map = self.create_route_map( - start_location, end_location, incident_location, all_routes - ) - distance = ( - self.route_state.get("optimal_route", {}).get("distance", 0.0) - if self.route_state - else 0.0 - ) - is_sub_optimal = ( - self.route_state.get("is_sub_optimal", False) if self.route_state else False + start_location, end_location, incident_location, game_data, all_routes ) + distance = self.route_state.get("optimal_route", {}).get("distance", 0.0) + is_sub_optimal = self.route_state.get("is_sub_optimal", False) return ( next_data_source, @@ -308,6 +315,7 @@ def create_route_map( start_location: str, end_location: str, incident_location: Optional[dict[str, Any]] = None, + game_data: Optional[dict] = None, all_routes: Optional[List[LiveTrafficData]] = None, ) -> str: """Create a complete route map with all routes and markers""" @@ -352,6 +360,22 @@ def create_route_map( f"length of alt_route_trackpoints: {len(self.alt_route_trackpoints)}" ) + blocked_routes_trackpoints_valid: list[list] = [] + # Load valid blocked routes, if any (blocked by setting correct weather/incident data). To be shown in red. + if self.route_state and (valid_blocked_routes := self.blocked_routes.get("valid")): + for blocked_route in valid_blocked_routes: + blocked_routes_trackpoints_valid.append( + self._get_route_trackpoints(blocked_route) + ) + + blocked_routes_trackpoints_invalid: list[list] = [] + # Load invalid blocked routes, if any (blocked by setting incorrect weather/incident data). To be shown in yellow. + if self.route_state and (invalid_blocked_routes := self.blocked_routes.get("invalid")): + for blocked_route in invalid_blocked_routes: + blocked_routes_trackpoints_invalid.append( + self._get_route_trackpoints(blocked_route) + ) + # Calculate map center and zoom all_points = main_route_trackpoints[:] if self.alt_route_trackpoints: @@ -392,6 +416,24 @@ def create_route_map( f"Direct Shortest Route from {start_location} to {end_location}", ) + # Paint the valid blocked routes in red (valid because user set correct weather/incident data to block it) + for blocked_route_trackpoint in blocked_routes_trackpoints_valid: + self.map_creator.add_route_line( + map_obj, + blocked_route_trackpoint, + MAP_COLORS["blocked_routes_valid"], + f"Correctly Blocked Route from {start_location} to {end_location}", + ) + + # Paint the invalid blocked routes in yellow (invalid because user set incorrect weather/incident data to block it) + for blocked_route_trackpoint in blocked_routes_trackpoints_invalid: + self.map_creator.add_route_line( + map_obj, + blocked_route_trackpoint, + MAP_COLORS["blocked_routes_invalid"], + f"Incorrectly Blocked Route from {start_location} to {end_location}", + ) + # Add location markers self.map_creator.add_location_markers( map_obj, start_location, end_location, start_coords, end_coords @@ -399,9 +441,11 @@ def create_route_map( # Add intersection markers with incident location (location of high traffic congestion) if available if all_routes or incident_location: - self.map_creator.add_intersection_marker( - map_obj, incident_location, all_routes - ) + self.map_creator.add_intersection_marker(map_obj, incident_location, all_routes) + + # Add game mode markers if game data provided + if game_data: + self.map_creator.add_game_mode_markers(map_obj, game_data) # Add waypoint markers for longer routes using trackpoints if main_route_trackpoints and len(main_route_trackpoints) > 10: diff --git a/metro-ai-suite/smart-route-planning-agent/src/tests/test_logging.py b/metro-ai-suite/smart-route-planning-agent/src/tests/test_logging.py new file mode 100644 index 0000000000..3bade1be4e --- /dev/null +++ b/metro-ai-suite/smart-route-planning-agent/src/tests/test_logging.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +""" +Test script to verify logging functionality in the route agent +""" + +import logging +import os +import sys + +from agents.route_planner import get_optimal_route + +# Add the current directory to the Python path +sys.path.append(os.path.dirname(os.path.abspath(__file__))) + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + handlers=[logging.FileHandler("test_route_app.log"), logging.StreamHandler()], +) + + +def test_route_agent(): + """Test the route agent with logging""" + print("Testing route agent with logging...") + + def mock_progress_callback(text, progress): + print(f"Progress: {progress:.0%} - {text.split()[-10:]}") # Show last 10 words + + # Test route planning + route_info, thinking_output = get_optimal_route( + "Berkeley, California", + "Santa Clara, California", + progress_callback=mock_progress_callback, + ) + + print("\n" + "=" * 50) + print("ROUTE INFO:") + print(f"GPX File: {route_info['gpx_file']}") + print(f"Has Accident: {route_info['has_accident']}") + print(f"Has Fire: {route_info['has_fire']}") + print("\n" + "=" * 50) + print("THINKING OUTPUT (first 500 chars):") + print( + thinking_output[:500] + "..." if len(thinking_output) > 500 else thinking_output + ) + + +if __name__ == "__main__": + test_route_agent() diff --git a/metro-ai-suite/deterministic-threat-detection/docs/dictionary_append.txt b/metro-ai-suite/smart-route-planning-agent/src/tests/test_route_selection.py similarity index 100% rename from metro-ai-suite/deterministic-threat-detection/docs/dictionary_append.txt rename to metro-ai-suite/smart-route-planning-agent/src/tests/test_route_selection.py diff --git a/metro-ai-suite/smart-route-planning-agent/src/utils/gpx_parser.py b/metro-ai-suite/smart-route-planning-agent/src/utils/gpx_parser.py index 9f1bbebb49..04390f3458 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/utils/gpx_parser.py +++ b/metro-ai-suite/smart-route-planning-agent/src/utils/gpx_parser.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - from pathlib import Path from typing import Any, Dict, Optional, Tuple @@ -43,10 +40,7 @@ def _load_gpx_file(self) -> bool: """ gpx_file_path = self.gpx_file_path - if gpx_file_path is None: - raise ValueError("GPX file path is not set") - - if not Path(gpx_file_path).exists(): + if not Path.exists(gpx_file_path): raise FileNotFoundError(f"GPX file not found: {gpx_file_path}") try: diff --git a/metro-ai-suite/smart-route-planning-agent/src/utils/helper.py b/metro-ai-suite/smart-route-planning-agent/src/utils/helper.py index b1ff13dc99..7057f251cf 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/utils/helper.py +++ b/metro-ai-suite/smart-route-planning-agent/src/utils/helper.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - import json from pathlib import Path from typing import Optional @@ -12,7 +9,7 @@ logger = get_logger(__name__) -def get_all_available_route_files() -> list[str]: +def get_all_available_route_files() -> list[Path]: """ Get a list of all available GPX route files in the GPX_DIR. diff --git a/metro-ai-suite/smart-route-planning-agent/src/utils/logging_config.py b/metro-ai-suite/smart-route-planning-agent/src/utils/logging_config.py index 5fd81baeb7..f2c5fb2450 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/utils/logging_config.py +++ b/metro-ai-suite/smart-route-planning-agent/src/utils/logging_config.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - import logging import os from datetime import datetime diff --git a/metro-ai-suite/smart-route-planning-agent/src/utils/map_creator.py b/metro-ai-suite/smart-route-planning-agent/src/utils/map_creator.py index cc3c4d55d7..bbb00e3bd9 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/utils/map_creator.py +++ b/metro-ai-suite/smart-route-planning-agent/src/utils/map_creator.py @@ -1,6 +1,3 @@ -# Copyright (C) 2026 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - import math import random from typing import Any, Dict, List, Optional, Tuple @@ -176,14 +173,12 @@ def add_intersection_marker( """ # Iterate through all intersections and add location markers/popup based on weather, incident and traffic - for intersection in all_routes or []: + for intersection in all_routes: intersection_name = intersection.intersection_name latitude = intersection.location_coordinates.latitude longitude = intersection.location_coordinates.longitude - logger.debug( - f"Adding marker for intersection: {intersection_name} at ({latitude}, {longitude})" - ) + logger.debug(f"Adding marker for intersection: {intersection_name} at ({latitude}, {longitude})") # Create popup content with traffic data popup_html = f""" @@ -227,7 +222,7 @@ def add_intersection_marker(
Weather Alert: {intersection.weather_status.value.title()}
- """ + """ else: popup_html += f"""
@@ -239,7 +234,7 @@ def add_intersection_marker( if incident_location and intersection_name == incident_location.get("name"): icon_html = incident_icon_html # Add some description about high traffic congestion - popup_html += """ + popup_html += f"""
Route affected due to high traffic congestion!
@@ -260,6 +255,49 @@ def add_intersection_marker( ), ).add_to(map_obj) + + def add_game_mode_markers(self, map_obj: folium.Map, game_data: dict) -> None: + """Add fire and flood emoji markers for game mode""" + # Add fire emojis + for fire in game_data.get("fire_emojis", []): + fire_html = f""" +
{fire["emoji"]}
+ """ + folium.Marker( + location=[fire["latitude"], fire["longitude"]], + popup=folium.Popup( + f"{fire['label']}
{fire['emoji']} Fire hazard area", + max_width=200, + ), + icon=folium.DivIcon( + html=fire_html, icon_size=(30, 30), icon_anchor=(15, 15) + ), + ).add_to(map_obj) + + # Add flood emojis + for flood in game_data.get("flood_emojis", []): + flood_html = f""" +
{flood["emoji"]}
+ """ + folium.Marker( + location=[flood["latitude"], flood["longitude"]], + popup=folium.Popup( + f"{flood['label']}
{flood['emoji']} Flood risk area", + max_width=200, + ), + icon=folium.DivIcon( + html=flood_html, icon_size=(30, 30), icon_anchor=(15, 15) + ), + ).add_to(map_obj) + def add_location_markers( self, map_obj: folium.Map, @@ -447,7 +485,7 @@ def calculate_route_distance(self, route_points: List[List[float]]) -> float: if len(route_points) <= 1: return 0.0 - total_distance = 0.0 + total_distance = 0 for i in range(len(route_points) - 1): lat1, lon1 = route_points[i] lat2, lon2 = route_points[i + 1] diff --git a/metro-ai-suite/smart-route-planning-agent/src/uv.lock b/metro-ai-suite/smart-route-planning-agent/src/uv.lock index ec52770be9..ef09e66203 100644 --- a/metro-ai-suite/smart-route-planning-agent/src/uv.lock +++ b/metro-ai-suite/smart-route-planning-agent/src/uv.lock @@ -1870,7 +1870,7 @@ requires-dist = [ { name = "isort" }, { name = "langgraph", specifier = "==1.0.7" }, { name = "mypy" }, - { name = "numpy", specifier = ">1.24.3" }, + { name = "numpy" }, { name = "pandas" }, { name = "pillow", specifier = ">=10.4.0" }, { name = "pydantic" }, diff --git a/metro-ai-suite/smart-route-planning-agent/tests/test_logging.py b/metro-ai-suite/smart-route-planning-agent/tests/test_logging.py deleted file mode 100644 index 457eb00025..0000000000 --- a/metro-ai-suite/smart-route-planning-agent/tests/test_logging.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python3 -""" -Test script to verify logging functionality in the route agent -""" - -import logging -import os -import sys - -# Add the src directory to the Python path for imports -sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")) - -from src.agents.route_planner import RoutePlanner - -# Configure logging -logging.basicConfig( - level=logging.INFO, - format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", - handlers=[logging.FileHandler("test_route_app.log"), logging.StreamHandler()], -) - - -def test_route_agent() -> None: - """Test the route agent with logging""" - print("Testing route agent with logging...") - - # Initialize the route planner - planner = RoutePlanner() - - # Test route planning using the plan_route method - route_state = planner.plan_route( - source="Berkeley, California", - destination="Santa Clara, California", - ) - - print("\n" + "=" * 50) - print("ROUTE INFO:") - print(f"Direct Route: {route_state.get('direct_route', {})}") - print(f"Optimal Route: {route_state.get('optimal_route', {})}") - print(f"Is Sub-Optimal: {route_state.get('is_sub_optimal', False)}") - print("=" * 50) - - -if __name__ == "__main__": - test_route_agent() diff --git a/metro-ai-suite/smart-traffic-intersection-agent/README.md b/metro-ai-suite/smart-traffic-intersection-agent/README.md index 28885ab617..3b162a0515 100644 --- a/metro-ai-suite/smart-traffic-intersection-agent/README.md +++ b/metro-ai-suite/smart-traffic-intersection-agent/README.md @@ -8,7 +8,7 @@ The Smart Traffic Intersection Agent application analyzes various traffic scenar ## How It Works - [Overview](docs/user-guide/index.md): A high-level introduction to the agent. - - [Build from Source](docs/user-guide/build-from-source.md): Instructions for building the agent from source code. + - [How It Works](docs/user-guide/how-it-works.md): Instructions for building the agent from source code. ## Learn More - [Release Notes](docs/user-guide/release-notes.md): Information on the latest updates, improvements, and bug fixes. diff --git a/metro-ai-suite/smart-traffic-intersection-agent/docs/toc.rst b/metro-ai-suite/smart-traffic-intersection-agent/docs/toc.rst new file mode 100644 index 0000000000..a8fb32670f --- /dev/null +++ b/metro-ai-suite/smart-traffic-intersection-agent/docs/toc.rst @@ -0,0 +1,3 @@ +.. toctree:: + + user-guide/index diff --git a/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/index.md b/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/Overview.md similarity index 56% rename from metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/index.md rename to metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/Overview.md index f8bd5cf13e..34c83860a3 100644 --- a/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/index.md +++ b/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/Overview.md @@ -1,33 +1,36 @@ # Smart Traffic Intersection Agent -The Smart Traffic Intersection Agent is a comprehensive traffic analysis service that provides real-time intersection monitoring, directional traffic density analysis, and Vision Language Model (VLM) powered traffic insights. -It processes MQTT traffic data, manages camera images, and delivers intelligent traffic analysis through RESTful APIs. +The Smart Traffic Intersection Agent is a comprehensive traffic analysis service that provides real-time intersection monitoring, directional traffic density analysis, and Vision Language Model (VLM) powered traffic insights. It processes MQTT traffic data, manages camera images, and delivers intelligent traffic analysis through RESTful APIs. -The agent supports sliding-window analysis, sustained traffic detection, and intelligent management of camera images to enhance traffic insights. +## Overview -The following figure shows the high-level architecture of the Smart Traffic Intersection Agent, showcasing its core components and their interactions with external systems. +The microservice processes real-time traffic data from MQTT streams and provides advanced analytics including directional traffic density monitoring, VLM-powered traffic scene analysis, and comprehensive traffic summaries. It supports sliding window analysis, sustained traffic detection, and intelligent camera image management for enhanced traffic insights. + +## How It Works + +The diagram below illustrates the high-level architecture of the Smart Traffic Intelligence Agent, showcasing its core components and their interactions with external systems.

- Architecture + Architecture

The Smart Traffic UI below shows how traffic, weather data is analyzed and summary, alerts are shown to the user.

- Traffic Intersection Agent UI + Traffic Intersection Agent UI

## Components -The Smart Traffic Intersection stack includes the following containerized services: +The Smart Traffic Intelligence stack includes these containerized services: -- **MQTT Broker** (Eclipse Mosquitto message broker) - Message broker for traffic data +- **MQTT Broker** (Eclipse Mosquitto) - Message broker for traffic data - **DL Streamer Pipeline Server** - Video analytics and AI inference - **SceneScape Database** - Configuration and metadata storage - **SceneScape Web Server** - Management interface - **SceneScape Controller** - Orchestration service - **VLM OpenVINO Serving** - Vision Language Model inference -- **Traffic Intelligence** - Real-time traffic analysis with dual interface (API and UI) +- **Traffic Intelligence** - Real-time traffic analysis with dual interface (API + UI) ### Key Integration Points @@ -38,22 +41,8 @@ The Smart Traffic Intersection stack includes the following containerized servic - **Health Monitoring**: All services include health check endpoints -## Learn More +## Supporting Resources - [Get Started Guide](get-started.md) - [API Reference](api-reference.md) - [System Requirements](system-requirements.md) - - - \ No newline at end of file diff --git a/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_assets/ITT_architecture.png b/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_images/ITT_architecture.png similarity index 100% rename from metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_assets/ITT_architecture.png rename to metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_images/ITT_architecture.png diff --git a/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_assets/service_endpoints.png b/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_images/service_endpoints.png similarity index 100% rename from metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_assets/service_endpoints.png rename to metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_images/service_endpoints.png diff --git a/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_assets/traffic_agent.png b/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_images/traffic_agent.png similarity index 100% rename from metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_assets/traffic_agent.png rename to metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_images/traffic_agent.png diff --git a/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_assets/traffic_agent_ui.png b/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_images/traffic_agent_ui.png similarity index 100% rename from metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_assets/traffic_agent_ui.png rename to metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/_images/traffic_agent_ui.png diff --git a/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/get-started.md b/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/get-started.md index 10c696aec8..a59b43fb33 100644 --- a/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/get-started.md +++ b/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/get-started.md @@ -168,7 +168,7 @@ After the setup process completes, the URLs for all services are displayed on th The following is a sample response that you might get at script completion, which displays the URLs for accessing the relevant services: -![alt text](./_assets/service_endpoints.png) +![alt text](./_images/service_endpoints.png) ## Troubleshooting diff --git a/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/build-from-source.md b/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/how-it-works.md similarity index 98% rename from metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/build-from-source.md rename to metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/how-it-works.md index f032d9c804..d916dc74e9 100644 --- a/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/build-from-source.md +++ b/metro-ai-suite/smart-traffic-intersection-agent/docs/user-guide/how-it-works.md @@ -1,4 +1,4 @@ -# Build from Source +# How It Works This section shows how to build the Smart Traffic Intersection Agent from source to customize or extend its functionality. diff --git a/metro-ai-suite/smart-traffic-intersection-agent/setup.sh b/metro-ai-suite/smart-traffic-intersection-agent/setup.sh index 530e0eab64..35cca1f124 100755 --- a/metro-ai-suite/smart-traffic-intersection-agent/setup.sh +++ b/metro-ai-suite/smart-traffic-intersection-agent/setup.sh @@ -180,16 +180,6 @@ fi # END Dependencies # ============================================================================ -# Export required environment variables (HOST_IP already set above) -export TAG=${TAG:-latest} -# Construct registry path properly to avoid double slashes -if [[ -n "$REGISTRY" ]]; then - export REGISTRY="${REGISTRY%/}/" -fi - -# Traffic Intersection Agent Configuration -export TRAFFIC_INTELLIGENCE_PORT=${TRAFFIC_INTELLIGENCE_PORT:-8081} -export TRAFFIC_INTELLIGENCE_UI_PORT=${TRAFFIC_INTELLIGENCE_UI_PORT:-7860} # Export environment variables required by application (HOST_IP already set above) export LOG_LEVEL=${LOG_LEVEL:-INFO} export REFRESH_INTERVAL=${REFRESH_INTERVAL:-15} diff --git a/metro-ai-suite/video-processing-for-nvr/README.md b/metro-ai-suite/video-processing-for-nvr/README.md index 17848e309e..bae3132a83 100644 --- a/metro-ai-suite/video-processing-for-nvr/README.md +++ b/metro-ai-suite/video-processing-for-nvr/README.md @@ -14,7 +14,8 @@ The sample application depends on VPP SDK, OpenVINO and live555 * [License](#license) * [System requirements](#system-requirements) - * [How to run](#how-to-run-in-docker-container) + * [How to build](#how-to-build) + * [Known limitations](#known-limitations) ## License The sample application is licensed under [APACHE 2.0](https://github.com/open-edge-platform/edge-ai-suites/blob/main/LICENSE). @@ -30,10 +31,7 @@ The sample application is licensed under [APACHE 2.0](https://github.com/open-ed **Hardware:** * Intel® platforms with iGPU and dGPU -## How to run in docker container -Please refer to [docker guide](./docker/README.md) to run the video analytic workflow - -## How to install VPPSDK in bare metal +## How to install 1. Install VPPSDK and dependencies ``` @@ -42,7 +40,7 @@ echo "deb [signed-by=/usr/share/keyrings/sed-archive-keyring.gpg] https://eci.in echo "deb-src [signed-by=/usr/share/keyrings/sed-archive-keyring.gpg] https://eci.intel.com/sed-repos/$(source /etc/os-release && echo $VERSION_CODENAME) sed main" | sudo tee -a /etc/apt/sources.list.d/sed.list sudo bash -c 'echo -e "Package: *\nPin: origin eci.intel.com\nPin-Priority: 1000" > /etc/apt/preferences.d/sed' sudo apt update -sudo apt -y install intel-vppsdk +sudo apt install intel-vppsdk sudo bash /opt/intel/vppsdk/install_vppsdk_dependencies.sh source /opt/intel/vppsdk/env.sh @@ -52,7 +50,10 @@ source /opt/intel/vppsdk/env.sh 3. Run `svet2/live555_install.sh` to install live555 -4. Run `build.sh` in sub-folerds to build specific component depending on use case +4. Run `build.sh` in sub-folerds to build each component + +## How to run +Please refer to [docker guide](./docker/README.md) to run the video analytic workflow ## Known limitations diff --git a/metro-ai-suite/video-processing-for-nvr/svet2/build.sh b/metro-ai-suite/video-processing-for-nvr/svet2/build.sh index 9f99d9a339..c5b59f422f 100755 --- a/metro-ai-suite/video-processing-for-nvr/svet2/build.sh +++ b/metro-ai-suite/video-processing-for-nvr/svet2/build.sh @@ -1,5 +1,4 @@ #!/bin/bash -set -e topdir=`pwd` if [ ! -d "/opt/intel/vppsdk" ] diff --git a/metro-ai-suite/video-processing-for-nvr/svet2/live555_install.sh b/metro-ai-suite/video-processing-for-nvr/svet2/live555_install.sh index 5e95997ff9..03ec0841aa 100755 --- a/metro-ai-suite/video-processing-for-nvr/svet2/live555_install.sh +++ b/metro-ai-suite/video-processing-for-nvr/svet2/live555_install.sh @@ -1,7 +1,5 @@ #!/bin/bash -set -e -apt update -apt install -y wget libssl-dev unzip cmake build-essential +sudo apt install -y wget libssl-dev if [ -d "live555-master" ]; then rm -rf live555-master diff --git a/metro-ai-suite/visual-search-question-and-answering/README.md b/metro-ai-suite/visual-search-question-and-answering/README.md index a2e7f33c44..7ad4d90a82 100644 --- a/metro-ai-suite/visual-search-question-and-answering/README.md +++ b/metro-ai-suite/visual-search-question-and-answering/README.md @@ -2,8 +2,8 @@ Combination of a multi-modal search engine and a visual Q&A assistant, allowing users to add search results as context for more related answers. -## Learn More -- [Overview](./docs/user-guide/index.md) +## Learn More +- [Overview](./docs/user-guide/Overview.md) - [Get started guide](./docs/user-guide/get-started.md) - [Tutorials](./docs/user-guide/tutorials.md) diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/toc.rst b/metro-ai-suite/visual-search-question-and-answering/docs/toc.rst new file mode 100644 index 0000000000..67928b90bb --- /dev/null +++ b/metro-ai-suite/visual-search-question-and-answering/docs/toc.rst @@ -0,0 +1,3 @@ +.. toctree:: + + user-guide/index \ No newline at end of file diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/Overview.md b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/Overview.md new file mode 100644 index 0000000000..afdfb7858c --- /dev/null +++ b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/Overview.md @@ -0,0 +1,79 @@ +# Visual Search and QA + +Combination of a multi-modal search engine and a visual Q&A assistant, allowing users to add search results as context for more related answers. + +## Overview + +We deliver a Reference Implementation, named "Visual Search and QA". It is mainly composed of three parts: a multi-modal search engine, a multi-modal visual QnA chatbot which can answer questions based on the search results, and a fronted web UI which allows users to interact with and examine the search engine and chatbot. + +The search engine is equipped with a data preparation microservice and a retriever microservice. Together they support a typical workflow: images and videos data are processed and stored into a database, then users can start a query with text description, the images and videos that fit the description would be found in the database and returned to users. + +The visual QnA chatbot is a large vision language model that can take text and/or visual content (image/video) as input to chat with. In this RI, we further support adding the results from the search engine directly to the chatbot, and let the chatbot answer questions based on the context. + +- **Programming Language:** Python + +## How It Works + +The high-level architecture is shown below + +![Architecture](./_images/visual_search_qa_design.png) + +Figure 1: Architecture Diagram + +### Dataprep + +The dataprep microservice processes images and videos, extracts their embeddings using the image encoder from the CLIP model, and stores them in a vector database. + +#### Video Processing: + +- Extract frames at configurable intervals. + +#### Image/Frame Processing: + +- Resize, convert colors, normalize, and apply object detection with cropping. + +> **Note:** +> Object detection and cropping improve retrieval performance for large-scale scene images (e.g., high-resolution surveillance images with multiple objects). +> Since the image encoder input size is 224x224, resizing may render some objects (e.g., humans, vehicles) unrecognizable. +> Object detection and cropping preserve these objects as clear targets in separate cropped images. Metadata links the original image to its cropped versions. During retrieval, if a cropped image matches, the original image is returned. + +Instead of uploading data, users can specify directories on the host machine as data sources. This approach is more efficient for large datasets, which are common in the application's target scenarios. Assuming the application is self-hosted, users have certain access to the server. Then users know where the files are stored on the host machine, and can provide the file directory as input so that the microservice can process one-after-another or in batches. + +### Retriever + +The retriever microservice consists of a local multi-modal embedding model (same as the dataprep microservice) and a vector DB search engine. + +#### Workflow: + +1. The embedding model generates text embeddings for input descriptions (e.g., "traffic jam"). +2. The search engine searches the vector database for the top-k most similar matches. + +### Model Serving + +Check the [model serving doc](https://github.com/open-edge-platform/edge-ai-libraries/tree/main/microservices) for more details. + +### Web UI + +The UI, built with `streamlit`, allows users to: + +- Enter search queries. +- View matched results. +- Interact with the LVM in a chatbox with upload tools. + +#### Visual Search and QA UI Initial Interface: + +![Visual Search and QA UI Init Interface](./_images/web_ui.png) + +Figure 2: Initial Web UI + +#### Visual Search and QA UI Example: + +![Visual Search and QA UI Example](./_images/web_ui_res.png) + +Figure 3: Web UI with an example + +## Learn More + +- Check the [System requirements](./system-requirements.md) +- Start with the [Get Started](./get-started.md). +- Deploy with [Helm chart](./deploy-with-helm.md) diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/deduplicate_after.png b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/deduplicate_after.png similarity index 100% rename from metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/deduplicate_after.png rename to metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/deduplicate_after.png diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/deduplicate_before.png b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/deduplicate_before.png similarity index 100% rename from metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/deduplicate_before.png rename to metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/deduplicate_before.png diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/filter_after.png b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/filter_after.png similarity index 100% rename from metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/filter_after.png rename to metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/filter_after.png diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/filter_before.png b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/filter_before.png similarity index 100% rename from metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/filter_before.png rename to metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/filter_before.png diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/visual_search_qa_design.png b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/visual_search_qa_design.png similarity index 100% rename from metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/visual_search_qa_design.png rename to metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/visual_search_qa_design.png diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/web_ui.png b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/web_ui.png similarity index 100% rename from metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/web_ui.png rename to metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/web_ui.png diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/web_ui_res.png b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/web_ui_res.png similarity index 100% rename from metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_assets/web_ui_res.png rename to metro-ai-suite/visual-search-question-and-answering/docs/user-guide/_images/web_ui_res.png diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/get-started/deploy-with-helm.md b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/deploy-with-helm.md similarity index 98% rename from metro-ai-suite/visual-search-question-and-answering/docs/user-guide/get-started/deploy-with-helm.md rename to metro-ai-suite/visual-search-question-and-answering/docs/user-guide/deploy-with-helm.md index 2321645e98..f51c01e44e 100644 --- a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/get-started/deploy-with-helm.md +++ b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/deploy-with-helm.md @@ -1,18 +1,16 @@ -# Deploy with Helm +# How to deploy with Helm Chart This section shows how to deploy the Visual Search and QA Application using Helm chart. ## Prerequisites - Before you begin, ensure that you have the following: - - Kubernetes\* cluster set up and running. - The cluster must support **dynamic provisioning of Persistent Volumes (PV)**. Refer to the [Kubernetes Dynamic Provisioning Guide](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) for more details. - Install `kubectl` on your system. See the [Installation Guide](https://kubernetes.io/docs/tasks/tools/install-kubectl/). Ensure access to the Kubernetes cluster. - Helm chart installed on your system. See the [Installation Guide](https://helm.sh/docs/intro/install/). -## Steps to deploy with Helm +## Steps to deploy with Helm Do the following to deploy VSQA using Helm chart. ### Step 1: Acquire the helm chart @@ -68,6 +66,7 @@ Edit the `values.yaml` file to set the necessary environment variables. At minim | `global.registry` | Remote registry to pull images from. Default as blank | `intel/` | | `global.env.keeppvc` | Set to true to persist the storage. Default is false | false | + ### Step 3: Build Helm Dependencies Navigate to the chart directory and build the Helm dependencies using the following command: @@ -103,7 +102,7 @@ Check the pods status with `kubectl get po -n milvus`. `RESTARTS` are possible, ### Step 5: Prepare host directories for models and data -```sh +``` mkdir -p $HOME/data ``` @@ -125,6 +124,7 @@ Install helm install vsqa . --values values.yaml -n vsqa ``` + ### Step 7: Verify the Deployment Check the status of the deployed resources to ensure everything is running correctly: @@ -146,6 +146,7 @@ kubectl port-forward -n vsqa svc/visual-search-qa-app 17580:17580 Leave the session alive, then access `http://localhost:17580` to view the application. + ### Step 9: Uninstall the Application To uninstall, use the following command: @@ -162,7 +163,6 @@ helm uninstall my-milvus -n milvus ## Troubleshooting - If you encounter any issues during the deployment process, check the Kubernetes logs for errors: - ```bash kubectl logs -n ``` @@ -170,5 +170,4 @@ helm uninstall my-milvus -n milvus - If the data preparation pod shows error while loading a large dataset, it might be caused by too large of the dataset size. Try breaking the dataset into smaller subsets and ingest each of them instead. ## Related links - -- [Get started with docker-compose](../get-started.md) \ No newline at end of file +- [Get started with docker-compose](./get-started.md) \ No newline at end of file diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/get-started.md b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/get-started.md index 869983feda..a47811e7bb 100644 --- a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/get-started.md +++ b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/get-started.md @@ -1,20 +1,18 @@ # Get Started Guide -- **Time to Complete:** 30 mins -- **Programming Language:** Python +- **Time to Complete:** 30 mins +- **Programming Language:** Python ## Get Started ### Prerequisites - -- Install Docker: [Installation Guide](https://docs.docker.com/get-docker/). -- Install Docker Compose: [Installation Guide](https://docs.docker.com/compose/install/). -- Install Intel Client GPU driver: [Installation Guide](https://dgpu-docs.intel.com/driver/client/overview.html). +- Install Docker: [Installation Guide](https://docs.docker.com/get-docker/). +- Install Docker Compose: [Installation Guide](https://docs.docker.com/compose/install/). +- Install Intel Client GPU driver: [Installation Guide](https://dgpu-docs.intel.com/driver/client/overview.html). ### Step 1: Get the docker images -#### Option 1: Build from source - +#### Option 1: build from source Clone the source code repository if you don't have it ```bash @@ -62,7 +60,7 @@ export TAG="latest" ### Step 2: Prepare host directories for models and data -```sh +``` mkdir -p $HOME/data ``` @@ -70,7 +68,7 @@ If you would like to test the application with a demo dataset, please continue a Otherwise, if you would like to use your own data (images and video), make sure to put them all in the created data directory (`$HOME/data` in the example commands above) and make sure the created path matches with the `HOST_DATA_PATH` variable in `deployment/docker-compose/env.sh` BEFORE deploying the services. -> **Note:** Supported media types are jpg, png, and mp4. +Note: supported media types: jpg, png, mp4 ### Step 3: Deploy @@ -83,71 +81,61 @@ Otherwise, if you would like to use your own data (images and video), make sure cd deployment/docker-compose/ ``` -2. Set up environment variables. - - > **Note:** You need to set models first. +2. Set up environment variables, note that you need to set models first - - **Ubuntu**: - - ``` bash - export EMBEDDING_MODEL_NAME="CLIP/clip-vit-h-14" # Replace with other models if needed - export VLM_MODEL_NAME="Qwen/Qwen2.5-VL-7B-Instruct" # Replace with other models if needed - source env.sh - ``` - - > **Important:** You must set `EMBEDDING_MODEL_NAME` and `VLM_MODEL_NAME` before running `env.sh`. See - > [multimodal-embedding-serving's supported models](https://github.com/open-edge-platform/edge-ai-libraries/blob/main/microservices/multimodal-embedding-serving/docs/user-guide/supported-models.md) for available embedding models, and - > [vlm-openvino-serving's supported models](https://github.com/open-edge-platform/edge-ai-libraries/blob/main/microservices/vlm-openvino-serving/docs/user-guide/Overview.md#models-supported) for available vlm models. + ``` bash + export EMBEDDING_MODEL_NAME="CLIP/clip-vit-h-14" # Replace with other models if needed + export VLM_MODEL_NAME="Qwen/Qwen2.5-VL-7B-Instruct" # Replace with other models if needed + source env.sh + ``` - You might want to pay some attention to `DEVICE`, `VLM_DEVICE` and `EMBEDDING_DEVICE` in `env.sh`. By default, they are `GPU.1`, which applies to a standard hardware platform with an integrated GPU as `GPU.0` and a discrete GPU as `GPU.1`. You can refer to [OpenVINO's query device sample](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/hello-query-device.html) to learn more about how to identify which GPU index should be set. + **Important**: You must set `EMBEDDING_MODEL_NAME` and `VLM_MODEL_NAME` before running `env.sh`. See [multimodal-embedding-serving's supported models](https://github.com/open-edge-platform/edge-ai-libraries/blob/main/microservices/multimodal-embedding-serving/docs/user-guide/supported-models.md) for available embedding models, and [vlm-openvino-serving's supported models](https://github.com/open-edge-platform/edge-ai-libraries/blob/main/microservices/vlm-openvino-serving/docs/user-guide/Overview.md#models-supported) for available vlm models. - Note that the default volume directory for Milvus (the vector DB) data is under `/opt/volumes`. - If this directory is under constraint or you simply would like to store the data in a - different location, please set the environment variable via `export DOCKER_VOLUME_DIRECTORY=`. - The Milvus data will be stored at `${DOCKER_VOLUME_DIRECTORY}/volumes` in such case. - - **EMT-S**: + You might want to pay some attention to `DEVICE`, `VLM_DEVICE` and `EMBEDDING_DEVICE` in `env.sh`. By default, they are `GPU.1`, which applies to a standard hardware platform with an integrated GPU as `GPU.0` and a discrete GPU as `GPU.1`. You can refer to [OpenVINO's query device sample](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/hello-query-device.html) to learn more about how to identify which GPU index should be set. - If you are on an EMT-S platform, set up the variables correspondingly by running: - ``` bash - cd emt-s # go to emt-s specific files - export EMBEDDING_MODEL_NAME="CLIP/clip-vit-h-14" # Replace with other models if needed - export VLM_MODEL_NAME="Qwen/Qwen2.5-VL-7B-Instruct" # Replace with other models if needed - source env.sh - ``` + **Note**: The default volume directory for Milvus (the vector DB) data is under `/opt/volumes`. If this directory is under constraint or you simply would like to store the data in a diffrent location, please set the environment variable via `export DOCKER_VOLUME_DIRECTORY=`. The Milvus data will be stored at `${DOCKER_VOLUME_DIRECTORY}/volumes` in such case. -3. Deploy with docker compose +
+ For EMT-S platform + If you are on an EMT-S platform, please set up the variables correspondingly by running ``` bash - docker compose -f compose_milvus.yaml up -d + cd emt-s # go to emt-s specific files + export EMBEDDING_MODEL_NAME="CLIP/clip-vit-h-14" # Replace with other models if needed + export VLM_MODEL_NAME="Qwen/Qwen2.5-VL-7B-Instruct" # Replace with other models if needed + source env.sh ``` +
- It might take a while to start the services for the first time, as there are some models to be prepared. +3. Deploy with docker compose - Check if all microservices are up and runnning with + ``` bash + docker compose -f compose_milvus.yaml up -d + ``` - ```sh - docker compose -f compose_milvus.yaml ps - ``` +It might take a while to start the services for the first time, as there are some models to be prepared. - Output: - - ```text - NAME COMMAND SERVICE STATUS PORTS - dataprep-visualdata-milvus "uvicorn dataprep_vi…" dataprep-visualdata-milvus running (healthy) 0.0.0.0:9990->9990/tcp, :::9990->9990/tcp - milvus-etcd "etcd -advertise-cli…" milvus-etcd running (healthy) 2379-2380/tcp - milvus-minio "/usr/bin/docker-ent…" milvus-minio running (healthy) 0.0.0.0:9000-9001->9000-9001/tcp, :::9000-9001->9000-9001/tcp - milvus-standalone "/tini -- milvus run…" milvus-standalone running (healthy) 0.0.0.0:9091->9091/tcp, 0.0.0.0:19530->19530/tcp, :::9091->9091/tcp, :::19530->19530/tcp - multimodal-embedding gunicorn -b 0.0.0.0:8000 - ... Up (unhealthy) 0.0.0.0:9777->8000/tcp,:::9777->8000/tcp - retriever-milvus "uvicorn retriever_s…" retriever-milvus running (healthy) 0.0.0.0:7770->7770/tcp, :::7770->7770/tcp - visual-search-qa-app "streamlit run app.p…" visual-search-qa-app running (healthy) 0.0.0.0:17580->17580/tcp, :::17580->17580/tcp - vlm-openvino-serving "/bin/bash -c '/app/…" vlm-openvino-serving running (healthy) 0.0.0.0:9764->8000/tcp, :::9764->8000/tcp - ``` +Check if all microservices are up and runnning with `docker compose -f compose_milvus.yaml ps` + +Output +``` +NAME COMMAND SERVICE STATUS PORTS +dataprep-visualdata-milvus "uvicorn dataprep_vi…" dataprep-visualdata-milvus running (healthy) 0.0.0.0:9990->9990/tcp, :::9990->9990/tcp +milvus-etcd "etcd -advertise-cli…" milvus-etcd running (healthy) 2379-2380/tcp +milvus-minio "/usr/bin/docker-ent…" milvus-minio running (healthy) 0.0.0.0:9000-9001->9000-9001/tcp, :::9000-9001->9000-9001/tcp +milvus-standalone "/tini -- milvus run…" milvus-standalone running (healthy) 0.0.0.0:9091->9091/tcp, 0.0.0.0:19530->19530/tcp, :::9091->9091/tcp, :::19530->19530/tcp +multimodal-embedding gunicorn -b 0.0.0.0:8000 - ... Up (unhealthy) 0.0.0.0:9777->8000/tcp,:::9777->8000/tcp +retriever-milvus "uvicorn retriever_s…" retriever-milvus running (healthy) 0.0.0.0:7770->7770/tcp, :::7770->7770/tcp +visual-search-qa-app "streamlit run app.p…" visual-search-qa-app running (healthy) 0.0.0.0:17580->17580/tcp, :::17580->17580/tcp +vlm-openvino-serving "/bin/bash -c '/app/…" vlm-openvino-serving running (healthy) 0.0.0.0:9764->8000/tcp, :::9764->8000/tcp +``` #### Option2: Deploy in Kubernetes -Refer to [Deploy with helm](./get-started/deploy-with-helm.md) for details. +Please refer to [Deploy with helm](./deploy-with-helm.md) for details. + ## Try with a demo dataset @@ -156,8 +144,7 @@ Refer to [Deploy with helm](./get-started/deploy-with-helm.md) for details. ### Prepare demo dataset [DAVIS](https://davischallenge.org/davis2017/code.html) Create a `prepare_demo_dataset.sh` script as following - -```text +``` CONTAINER_IDS=$(docker ps -a --filter "status=running" -q | xargs -r docker inspect --format '{{.Config.Image}} {{.Id}}' | grep "dataprep-visualdata-milvus" | awk '{print $2}') # Check if any containers were found @@ -174,7 +161,6 @@ exit 0 ``` Run the script and check your host data directory `$HOME/data`, see if `DAVIS` is there. - ```bash bash prepare_demo_dataset.sh ``` @@ -184,7 +170,6 @@ In order to save time, only a subset of the dataset would be processed. They are This script only works when the `dataprep-visualdata-milvus` service is available. ### Use it on Web UI - Go to `http://{host_ip}:17580` with a browser. Put the exact path to the subset of demo dataset (usually`/home/user/data/DAVIS/subset`, may vary according to your local username) into `file directory on host`. Click `UpdataDB` and wait for the uploading done. Try searching with query text `tractor`, see if the results are correct. @@ -209,18 +194,80 @@ In this get started guide, you learned how to: ## Learn More -- Check [System requirements](./get-started/system-requirements.md). -- Learn how to deploy the application with [Helm](./get-started/deploy-with-helm.md). +- Check the [System requirements](./system-requirements.md) - Explore more functionalities in [Tutorials](./tutorials.md). -- Understand the components, services, architecture, and data flow, in [Overview](./index.md). -- Check [Troubleshooting](./troubleshooting.md). +- Understand the components, services, architecture, and data flow, in the [Overview](./Overview.md). + + +## Troubleshooting + +### Error Logs + +- Check the container log if a microservice shows mal-functional behaviours +```bash +docker logs +``` + +- Click `showInfo` button on the web UI to get essential information about microservices + +### VLM Microservice Model Loading Issues + +**Problem**: VLM microservice fails to load or save models with permission errors, or you see errors related to model access in the logs. + +**Cause**: This issue occurs when the `ov-models` Docker volume was created with incorrect ownership (root user) in previous versions of the application. The VLM microservice runs as a non-root user and requires proper permissions to read/write models. + +**Symptoms**: +- VLM microservice container fails to start or crashes during model loading +- Permission denied errors in VLM service logs +- Model conversion or caching failures +- Error messages mentioning `/home/appuser/.cache/huggingface` or `/app/ov-model` access issues + +**Solution**: +1. Stop the running application: + ```bash + docker compose -f compose_milvus.yaml down + ``` + +2. Remove the existing `ov-models`: + ```bash + docker volume rm ov-models + ``` + +3. Restart the application (the volume will be recreated with correct permissions): + ```bash + source env.sh + docker compose -f compose_milvus.yaml up -d + ``` + +**Note**: Removing the `ov-models` volume will delete any previously cached/converted models. The VLM service will automatically re-download and convert models on the next startup, which may take additional time depending on your internet connection and the model size. + +### Embedding Model Changed Issues + +**Problem**: Dataprep microservice API fails and "mismatch" is found in logs. + +**Cause**: If the application is re-deployed with a different embedding model set for the multimodal embedding service other than the previous deployment, it is possible that the embedding dimension has changed as well, leading to a vector dimension mismatch in vector DB. + +**Solution**: +1. Stop the running application: + ```bash + docker compose -f compose_milvus.yaml down + ``` + +2. Remove the existing Milvus volumes: + ```bash + sudo rm -rf /volumes/milvus + sudo rm -rf /volumes/minio + sudo rm -rf /volumes/etcd + ``` + +3. Restart the application: + ```bash + source env.sh + docker compose -f compose_milvus.yaml up -d + ``` + - diff --git a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/index.md b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/index.md index 7c80dc7894..390efc0bdb 100644 --- a/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/index.md +++ b/metro-ai-suite/visual-search-question-and-answering/docs/user-guide/index.md @@ -38,7 +38,7 @@ questions based on the context. The high-level architecture is shown below -![architecture diagram](./_assets/visual_search_qa_design.png) +![architecture diagram](./_images/visual_search_qa_design.png) ### Dataprep @@ -97,27 +97,30 @@ The UI, built with `streamlit`, allows users to: #### Visual Search and QA UI Initial Interface -![initial web UI image](./_assets/web_ui.png)\ -*Figure 1: Initial Web UI* +![initial web UI image](./_images/web_ui.png) + + Figure 1: Initial Web UI #### Visual Search and QA UI Example -![web UI with example](./_assets/web_ui_res.png)\ -*Figure 2: Web UI with an example* +![web UI with example](./_images/web_ui_res.png) + + Figure 2: Web UI with an example ## Learn More -- Check the [System requirements](./get-started/system-requirements.md). +- Check the [System requirements](./system-requirements.md). - Start with the [Get Started](./get-started.md). -- Deploy with [Helm chart](./get-started/deploy-with-helm.md). +- Deploy with [Helm chart](./deploy-with-helm.md).
planner_server:
@@ -202,14 +188,14 @@ The Ackermann steering version of this plugin utilizes some additional parameter
     use_sim_time: True
     planner_plugins: ["GridBased"]
     GridBased:
-      plugin: "its_planner::ITSPlanner"
+      plugin: "its_planner/ITSPlanner"
       interpolation_resolution: 0.05
       catmull_spline: False
       smoothing_window: 15
       buffer_size: 10
       build_road_map_once: True
       min_samples: 250
-      roadmap: "PROBABILISTIC"
+      roadmap: "PROBABLISTIC"
       w: 32
       h: 32
       n: 2
diff --git a/robotics-ai-suite/components/its-planner/collab_slam/run_collab.sh b/robotics-ai-suite/components/its-planner/collab_slam/run_collab.sh
index a42eca2cf5..d0e00c8002 100755
--- a/robotics-ai-suite/components/its-planner/collab_slam/run_collab.sh
+++ b/robotics-ai-suite/components/its-planner/collab_slam/run_collab.sh
@@ -2,52 +2,16 @@
 # SPDX-License-Identifier: Apache-2.0
 # Copyright (C) 2025 Intel Corporation
 
-current_dir=$(dirname "$(realpath "${BASH_SOURCE[0]}")")
-
-# Auto-detect ROS distribution instead of hardcoding
-if [ -n "$ROS_DISTRO" ]; then
-    echo "Using ROS distribution: $ROS_DISTRO"
-    source /opt/ros/"$ROS_DISTRO"/setup.bash
-else
-    echo "ROS_DISTRO not set, attempting to detect..."
-    if [ -f "/opt/ros/jazzy/setup.bash" ]; then
-        echo "Found Jazzy, using it"
-        source /opt/ros/jazzy/setup.bash
-    elif [ -f "/opt/ros/humble/setup.bash" ]; then
-        echo "Found Humble, using it"  
-        source /opt/ros/humble/setup.bash
-    else
-        echo "Error: No ROS installation found!"
-        exit 1
-    fi
-fi
-
-# Source workspace - use absolute path to avoid directory dependency issues
-workspace_dir="$(realpath "${current_dir}"/../..)"
-source "${workspace_dir}"/install/setup.bash
+current_dir=$(dirname "$(realpath ${BASH_SOURCE[0]})")
 
+source /opt/ros/humble/setup.bash
+source ../../install/setup.bash
 export TURTLEBOT3_MODEL=waffle
-
-# Set Gazebo model path (variable name differs between distributions)
-if [ "$ROS_DISTRO" = "jazzy" ]; then
-    export GZ_SIM_RESOURCE_PATH=$GZ_SIM_RESOURCE_PATH:/opt/ros/"$ROS_DISTRO"/share/turtlebot3_gazebo/models
-else
-    export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/"$ROS_DISTRO"/share/turtlebot3_gazebo/models
-fi
+export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/humble/share/turtlebot3_gazebo/models
 
 if [ "$1" = "localization" ]; then
-    echo "Starting collaborative localization..."
-    cd "${current_dir}" || exit  # Ensure we're in the right directory for launch files
-    ros2 launch collab_localization.launch.py remap_map_id:='/univloc_server/map' default_bt_xml_filename:="${workspace_dir}"/its_planner/navigate_w_recovery_"${ROS_DISTRO}".xml
+ ros2 launch collab_localization.launch.py remap_map_id:='/univloc_server/map'  default_bt_xml_filename:=/${current_dir}/../its_planner/navigate_w_recovery.xml
 elif [ "$1" = "mapping" ]; then
-    echo "Starting collaborative mapping..."
-    # Use proper ROS2 launch - TODO: move launch files to installed package  
-    cd "${current_dir}" || exit  # Ensure we're in the right directory for launch files
-    ros2 launch collab_mapping.launch.py remap_map_id:='/univloc_tracker_0/map'
-else
-    echo "Usage: $0 {localization|mapping}"
-    echo "  localization - Start collaborative localization"
-    echo "  mapping      - Start collaborative mapping"
-    exit 1
+ ros2 launch collab_mapping.launch.py remap_map_id:='/univloc_tracker_0/map'
 fi
 
diff --git a/robotics-ai-suite/components/its-planner/docs/README.md b/robotics-ai-suite/components/its-planner/docs/README.md
index 4dd94d316f..4b07e51573 100644
--- a/robotics-ai-suite/components/its-planner/docs/README.md
+++ b/robotics-ai-suite/components/its-planner/docs/README.md
@@ -55,9 +55,9 @@ The source code of this component can be found here:
 ## Getting Started
 
 Robotics AI Dev Kit provides a ROS 2 Deb package for the application,
-supported by the following platforms:
+supported by the following platform:
 
-- ROS 2 version: humble or jazzy
+- ROS 2 version: humble
 
 ## Prerequisites
 
@@ -69,45 +69,33 @@ supported by the following platforms:
 
 ## Install Deb package
 
-Install the `ros-${ROS_DISTRO}-its-planner` Deb package from the Intel®
+Install the `ros-humble-its-planner` Deb package from the Intel®
 Robotics AI Dev Kit APT repository
 
 > ``` bash
-> sudo apt install ros-${ROS_DISTRO}-its-planner
+> sudo apt install ros-humble-its-planner
 > ```
 
 Run the following script to set environment variables:
 
 > ``` bash
-> source /opt/ros/$ROS_DISTRO/setup.bash        # ROS_DISTRO=humble or jazzy
-> export TURTLEBOT3_MODEL=waffle
-> 
-> # Set Gazebo model path (variable name differs between distributions)
-> if [ "$ROS_DISTRO" = "jazzy" ]; then
->     export GZ_SIM_RESOURCE_PATH=$GZ_SIM_RESOURCE_PATH:/opt/ros/$ROS_DISTRO/share/turtlebot3_gazebo/models
-> else
->     export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/$ROS_DISTRO/share/turtlebot3_gazebo/models
-> fi
+> source /opt/ros/humble/setup.bash
+> export TURTLEBOT3_MODEL=waffle_pi
+> export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/humble/share/turtlebot3_gazebo/models
 > ```
 
 To launch the default ITS planner which is based on differential drive
 robot, run:
 
 > ``` bash
-> ros2 launch nav2_bringup tb3_simulation_launch.py \
->   headless:=False \
->   params_file:=/opt/ros/$ROS_DISTRO/share/its_planner/nav2_params_${ROS_DISTRO}.yaml \
->   default_bt_xml_filename:=/opt/ros/$ROS_DISTRO/share/its_planner/navigate_w_recovery_${ROS_DISTRO}.xml
+> ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False params_file:=/opt/ros/humble/share/its_planner/nav2_params.yaml default_bt_xml_filename:=/opt/ros/humble/share/its_planner/navigate_w_recovery.xml
 > ```
 
 ITS Planner also supports Ackermann steering; to launch the Ackermann
 ITS planner run:
 
 > ``` bash
-> ros2 launch nav2_bringup tb3_simulation_launch.py \
->   headless:=False \
->   params_file:=/opt/ros/$ROS_DISTRO/share/its_planner/nav2_params_dubins_${ROS_DISTRO}.yaml \
->   default_bt_xml_filename:=/opt/ros/$ROS_DISTRO/share/its_planner/navigate_w_recovery_${ROS_DISTRO}.xml
+> ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False params_file:=/opt/ros/humble/share/its_planner/nav2_params_dubins.yaml default_bt_xml_filename:=/opt/ros/humble/share/its_planner/navigate_w_recovery.xml
 > ```
 
 [!NOTE]
diff --git a/robotics-ai-suite/components/its-planner/its_planner/global_planner_plugin.xml b/robotics-ai-suite/components/its-planner/its_planner/global_planner_plugin.xml
index 41ef41d890..b22f9c5bea 100755
--- a/robotics-ai-suite/components/its-planner/its_planner/global_planner_plugin.xml
+++ b/robotics-ai-suite/components/its-planner/its_planner/global_planner_plugin.xml
@@ -5,7 +5,7 @@ SPDX-License-Identifier: Apache-2.0
 -->
 
 
-	
+	
 	  This is a plugin which utilizes ITS method to find global path.
 	
 
diff --git a/robotics-ai-suite/components/its-planner/its_planner/nav2_params.yaml b/robotics-ai-suite/components/its-planner/its_planner/nav2_params.yaml
new file mode 100644
index 0000000000..03456abbe9
--- /dev/null
+++ b/robotics-ai-suite/components/its-planner/its_planner/nav2_params.yaml
@@ -0,0 +1,359 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (C) 2025 Intel Corporation
+---
+amcl:
+  ros__parameters:
+    use_sim_time: True
+    alpha1: 0.2
+    alpha2: 0.2
+    alpha3: 0.2
+    alpha4: 0.2
+    alpha5: 0.2
+    base_frame_id: "base_footprint"
+    beam_skip_distance: 0.5
+    beam_skip_error_threshold: 0.9
+    beam_skip_threshold: 0.3
+    do_beamskip: false
+    global_frame_id: "map"
+    lambda_short: 0.1
+    laser_likelihood_max_dist: 2.0
+    laser_max_range: 100.0
+    laser_min_range: -1.0
+    laser_model_type: "likelihood_field"
+    max_beams: 60
+    max_particles: 2000
+    min_particles: 500
+    odom_frame_id: "odom"
+    pf_err: 0.05
+    pf_z: 0.99
+    recovery_alpha_fast: 0.0
+    recovery_alpha_slow: 0.0
+    resample_interval: 1
+    robot_model_type: "nav2_amcl::DifferentialMotionModel"
+    save_pose_rate: 0.5
+    sigma_hit: 0.2
+    tf_broadcast: true
+    transform_tolerance: 1.0
+    update_min_a: 0.2
+    update_min_d: 0.25
+    z_hit: 0.5
+    z_max: 0.05
+    z_rand: 0.5
+    z_short: 0.05
+    scan_topic: scan
+
+bt_navigator:
+  ros__parameters:
+    use_sim_time: True
+    global_frame: map
+    robot_base_frame: base_link
+    odom_topic: /odom
+    bt_loop_duration: 10
+    default_server_timeout: 20
+    # 'default_nav_through_poses_bt_xml' and 'default_nav_to_pose_bt_xml' are use defaults:
+    # nav2_bt_navigator/navigate_to_pose_w_replanning_and_recovery.xml
+    # nav2_bt_navigator/navigate_through_poses_w_replanning_and_recovery.xml
+    # They can be set here or via a RewrittenYaml remap from a parent launch file to Nav2.
+    plugin_lib_names:
+      - nav2_compute_path_to_pose_action_bt_node
+      - nav2_compute_path_through_poses_action_bt_node
+      - nav2_smooth_path_action_bt_node
+      - nav2_follow_path_action_bt_node
+      - nav2_spin_action_bt_node
+      - nav2_wait_action_bt_node
+      - nav2_assisted_teleop_action_bt_node
+      - nav2_back_up_action_bt_node
+      - nav2_drive_on_heading_bt_node
+      - nav2_clear_costmap_service_bt_node
+      - nav2_is_stuck_condition_bt_node
+      - nav2_goal_reached_condition_bt_node
+      - nav2_goal_updated_condition_bt_node
+      - nav2_globally_updated_goal_condition_bt_node
+      - nav2_is_path_valid_condition_bt_node
+      - nav2_initial_pose_received_condition_bt_node
+      - nav2_reinitialize_global_localization_service_bt_node
+      - nav2_rate_controller_bt_node
+      - nav2_distance_controller_bt_node
+      - nav2_speed_controller_bt_node
+      - nav2_truncate_path_action_bt_node
+      - nav2_truncate_path_local_action_bt_node
+      - nav2_goal_updater_node_bt_node
+      - nav2_recovery_node_bt_node
+      - nav2_pipeline_sequence_bt_node
+      - nav2_round_robin_node_bt_node
+      - nav2_transform_available_condition_bt_node
+      - nav2_time_expired_condition_bt_node
+      - nav2_path_expiring_timer_condition
+      - nav2_distance_traveled_condition_bt_node
+      - nav2_single_trigger_bt_node
+      - nav2_goal_updated_controller_bt_node
+      - nav2_is_battery_low_condition_bt_node
+      - nav2_navigate_through_poses_action_bt_node
+      - nav2_navigate_to_pose_action_bt_node
+      - nav2_remove_passed_goals_action_bt_node
+      - nav2_planner_selector_bt_node
+      - nav2_controller_selector_bt_node
+      - nav2_goal_checker_selector_bt_node
+      - nav2_controller_cancel_bt_node
+      - nav2_path_longer_on_approach_bt_node
+      - nav2_wait_cancel_bt_node
+      - nav2_spin_cancel_bt_node
+      - nav2_back_up_cancel_bt_node
+      - nav2_assisted_teleop_cancel_bt_node
+      - nav2_drive_on_heading_cancel_bt_node
+
+bt_navigator_navigate_through_poses_rclcpp_node:
+  ros__parameters:
+    use_sim_time: True
+
+bt_navigator_navigate_to_pose_rclcpp_node:
+  ros__parameters:
+    use_sim_time: True
+
+controller_server:
+  ros__parameters:
+    use_sim_time: True
+    controller_frequency: 20.0
+    min_x_velocity_threshold: 0.001
+    min_y_velocity_threshold: 0.5
+    min_theta_velocity_threshold: 0.001
+    failure_tolerance: 0.3
+    progress_checker_plugin: "progress_checker"
+    goal_checker_plugins: ["general_goal_checker"] # "precise_goal_checker"
+    controller_plugins: ["FollowPath"]
+
+    # Progress checker parameters
+    progress_checker:
+      plugin: "nav2_controller::SimpleProgressChecker"
+      required_movement_radius: 0.5
+      movement_time_allowance: 10.0
+    # Goal checker parameters
+    #precise_goal_checker:
+    #  plugin: "nav2_controller::SimpleGoalChecker"
+    #  xy_goal_tolerance: 0.25
+    #  yaw_goal_tolerance: 0.25
+    #  stateful: True
+    general_goal_checker:
+      stateful: True
+      plugin: "nav2_controller::SimpleGoalChecker"
+      xy_goal_tolerance: 0.25
+      yaw_goal_tolerance: 0.25
+    # DWB parameters
+    FollowPath:
+      plugin: "dwb_core::DWBLocalPlanner"
+      debug_trajectory_details: True
+      min_vel_x: 0.0
+      min_vel_y: 0.0
+      max_vel_x: 0.26
+      max_vel_y: 0.0
+      max_vel_theta: 1.0
+      min_speed_xy: 0.0
+      max_speed_xy: 0.26
+      min_speed_theta: 0.0
+      # Add high threshold velocity for turtlebot 3 issue.
+      # https://github.com/ROBOTIS-GIT/turtlebot3_simulations/issues/75
+      acc_lim_x: 2.5
+      acc_lim_y: 0.0
+      acc_lim_theta: 3.2
+      decel_lim_x: -2.5
+      decel_lim_y: 0.0
+      decel_lim_theta: -3.2
+      vx_samples: 20
+      vy_samples: 5
+      vtheta_samples: 20
+      sim_time: 1.7
+      linear_granularity: 0.05
+      angular_granularity: 0.025
+      transform_tolerance: 0.2
+      xy_goal_tolerance: 0.25
+      trans_stopped_velocity: 0.25
+      short_circuit_trajectory_evaluation: True
+      stateful: True
+      critics: ["RotateToGoal", "Oscillation", "BaseObstacle", "GoalAlign", "PathAlign", "PathDist", "GoalDist"]
+      BaseObstacle.scale: 0.02
+      PathAlign.scale: 32.0
+      PathAlign.forward_point_distance: 0.1
+      GoalAlign.scale: 24.0
+      GoalAlign.forward_point_distance: 0.1
+      PathDist.scale: 32.0
+      GoalDist.scale: 24.0
+      RotateToGoal.scale: 32.0
+      RotateToGoal.slowing_factor: 5.0
+      RotateToGoal.lookahead_time: -1.0
+
+local_costmap:
+  local_costmap:
+    ros__parameters:
+      update_frequency: 5.0
+      publish_frequency: 2.0
+      global_frame: odom
+      robot_base_frame: base_link
+      use_sim_time: True
+      rolling_window: true
+      width: 3
+      height: 3
+      resolution: 0.05
+      robot_radius: 0.22
+      plugins: ["voxel_layer", "inflation_layer"]
+      inflation_layer:
+        plugin: "nav2_costmap_2d::InflationLayer"
+        cost_scaling_factor: 3.0
+        inflation_radius: 0.55
+      voxel_layer:
+        plugin: "nav2_costmap_2d::VoxelLayer"
+        enabled: True
+        publish_voxel_map: True
+        origin_z: 0.0
+        z_resolution: 0.05
+        z_voxels: 16
+        max_obstacle_height: 2.0
+        mark_threshold: 0
+        observation_sources: scan
+        scan:
+          topic: /scan
+          max_obstacle_height: 2.0
+          clearing: True
+          marking: True
+          data_type: "LaserScan"
+          raytrace_max_range: 3.0
+          raytrace_min_range: 0.0
+          obstacle_max_range: 2.5
+          obstacle_min_range: 0.0
+      static_layer:
+        plugin: "nav2_costmap_2d::StaticLayer"
+        map_subscribe_transient_local: True
+      always_send_full_costmap: True
+
+global_costmap:
+  global_costmap:
+    ros__parameters:
+      update_frequency: 1.0
+      publish_frequency: 1.0
+      global_frame: map
+      robot_base_frame: base_link
+      use_sim_time: True
+      robot_radius: 0.22
+      resolution: 0.05
+      track_unknown_space: true
+      plugins: ["static_layer", "obstacle_layer", "inflation_layer"]
+      obstacle_layer:
+        plugin: "nav2_costmap_2d::ObstacleLayer"
+        enabled: True
+        observation_sources: scan
+        scan:
+          topic: /scan
+          max_obstacle_height: 2.0
+          clearing: True
+          marking: True
+          data_type: "LaserScan"
+          raytrace_max_range: 3.0
+          raytrace_min_range: 0.0
+          obstacle_max_range: 2.5
+          obstacle_min_range: 0.0
+      static_layer:
+        plugin: "nav2_costmap_2d::StaticLayer"
+        map_subscribe_transient_local: True
+      inflation_layer:
+        plugin: "nav2_costmap_2d::InflationLayer"
+        cost_scaling_factor: 3.0
+        inflation_radius: 0.55
+      always_send_full_costmap: True
+
+map_server:
+  ros__parameters:
+    use_sim_time: True
+    # Overridden in launch by the "map" launch configuration or provided default value.
+    # To use in yaml, remove the default "map" value in the tb3_simulation_launch.py file & provide full path to map below.
+    yaml_filename: ""
+
+map_saver:
+  ros__parameters:
+    use_sim_time: True
+    save_map_timeout: 5.0
+    free_thresh_default: 0.25
+    occupied_thresh_default: 0.65
+    map_subscribe_transient_local: True
+
+planner_server:
+  ros__parameters:
+    expected_planner_frequency: 0.01
+    use_sim_time: True
+    planner_plugins: ["GridBased"]
+    GridBased:
+      plugin: "its_planner/ITSPlanner"
+      interpolation_resolution: 0.05
+      catmull_spline: False
+      smoothing_window: 15
+      buffer_size: 10
+      build_road_map_once: True
+      enable_k: False
+      min_samples: 250
+      roadmap: "PROBABLISTIC"
+      w: 32
+      h: 32
+      n: 2
+
+smoother_server:
+  ros__parameters:
+    use_sim_time: True
+    smoother_plugins: ["simple_smoother"]
+    simple_smoother:
+      plugin: "nav2_smoother::SimpleSmoother"
+      tolerance: 1.0e-10
+      max_its: 1000
+      do_refinement: True
+
+behavior_server:
+  ros__parameters:
+    costmap_topic: local_costmap/costmap_raw
+    footprint_topic: local_costmap/published_footprint
+    cycle_frequency: 10.0
+    behavior_plugins: ["spin", "backup", "drive_on_heading", "assisted_teleop", "wait"]
+    spin:
+      plugin: "nav2_behaviors/Spin"
+    backup:
+      plugin: "nav2_behaviors/BackUp"
+    drive_on_heading:
+      plugin: "nav2_behaviors/DriveOnHeading"
+    wait:
+      plugin: "nav2_behaviors/Wait"
+    assisted_teleop:
+      plugin: "nav2_behaviors/AssistedTeleop"
+    global_frame: odom
+    robot_base_frame: base_link
+    transform_tolerance: 0.1
+    use_sim_time: true
+    simulate_ahead_time: 2.0
+    max_rotational_vel: 1.0
+    min_rotational_vel: 0.4
+    rotational_acc_lim: 3.2
+
+robot_state_publisher:
+  ros__parameters:
+    use_sim_time: True
+
+waypoint_follower:
+  ros__parameters:
+    use_sim_time: True
+    loop_rate: 20
+    stop_on_failure: false
+    waypoint_task_executor_plugin: "wait_at_waypoint"
+    wait_at_waypoint:
+      plugin: "nav2_waypoint_follower::WaitAtWaypoint"
+      enabled: True
+      waypoint_pause_duration: 200
+
+velocity_smoother:
+  ros__parameters:
+    use_sim_time: True
+    smoothing_frequency: 20.0
+    scale_velocities: False
+    feedback: "OPEN_LOOP"
+    max_velocity: [0.26, 0.0, 1.0]
+    min_velocity: [-0.26, 0.0, -1.0]
+    max_accel: [2.5, 0.0, 3.2]
+    max_decel: [-2.5, 0.0, -3.2]
+    odom_topic: "odom"
+    odom_duration: 0.1
+    deadband_velocity: [0.0, 0.0, 0.0]
+    velocity_timeout: 1.0
diff --git a/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins.yaml b/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins.yaml
new file mode 100644
index 0000000000..5d759a25fc
--- /dev/null
+++ b/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins.yaml
@@ -0,0 +1,364 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (C) 2025 Intel Corporation
+---
+amcl:
+  ros__parameters:
+    use_sim_time: True
+    alpha1: 0.2
+    alpha2: 0.2
+    alpha3: 0.2
+    alpha4: 0.2
+    alpha5: 0.2
+    base_frame_id: "base_footprint"
+    beam_skip_distance: 0.5
+    beam_skip_error_threshold: 0.9
+    beam_skip_threshold: 0.3
+    do_beamskip: false
+    global_frame_id: "map"
+    lambda_short: 0.1
+    laser_likelihood_max_dist: 2.0
+    laser_max_range: 100.0
+    laser_min_range: -1.0
+    laser_model_type: "likelihood_field"
+    max_beams: 60
+    max_particles: 2000
+    min_particles: 500
+    odom_frame_id: "odom"
+    pf_err: 0.05
+    pf_z: 0.99
+    recovery_alpha_fast: 0.0
+    recovery_alpha_slow: 0.0
+    resample_interval: 1
+    robot_model_type: "nav2_amcl::DifferentialMotionModel"
+    save_pose_rate: 0.5
+    sigma_hit: 0.2
+    tf_broadcast: true
+    transform_tolerance: 1.0
+    update_min_a: 0.2
+    update_min_d: 0.25
+    z_hit: 0.5
+    z_max: 0.05
+    z_rand: 0.5
+    z_short: 0.05
+    scan_topic: scan
+
+bt_navigator:
+  ros__parameters:
+    use_sim_time: True
+    global_frame: map
+    robot_base_frame: base_link
+    odom_topic: /odom
+    bt_loop_duration: 10
+    default_server_timeout: 20
+    # 'default_nav_through_poses_bt_xml' and 'default_nav_to_pose_bt_xml' are use defaults:
+    # nav2_bt_navigator/navigate_to_pose_w_replanning_and_recovery.xml
+    # nav2_bt_navigator/navigate_through_poses_w_replanning_and_recovery.xml
+    # They can be set here or via a RewrittenYaml remap from a parent launch file to Nav2.
+    plugin_lib_names:
+      - nav2_compute_path_to_pose_action_bt_node
+      - nav2_compute_path_through_poses_action_bt_node
+      - nav2_smooth_path_action_bt_node
+      - nav2_follow_path_action_bt_node
+      - nav2_spin_action_bt_node
+      - nav2_wait_action_bt_node
+      - nav2_assisted_teleop_action_bt_node
+      - nav2_back_up_action_bt_node
+      - nav2_drive_on_heading_bt_node
+      - nav2_clear_costmap_service_bt_node
+      - nav2_is_stuck_condition_bt_node
+      - nav2_goal_reached_condition_bt_node
+      - nav2_goal_updated_condition_bt_node
+      - nav2_globally_updated_goal_condition_bt_node
+      - nav2_is_path_valid_condition_bt_node
+      - nav2_initial_pose_received_condition_bt_node
+      - nav2_reinitialize_global_localization_service_bt_node
+      - nav2_rate_controller_bt_node
+      - nav2_distance_controller_bt_node
+      - nav2_speed_controller_bt_node
+      - nav2_truncate_path_action_bt_node
+      - nav2_truncate_path_local_action_bt_node
+      - nav2_goal_updater_node_bt_node
+      - nav2_recovery_node_bt_node
+      - nav2_pipeline_sequence_bt_node
+      - nav2_round_robin_node_bt_node
+      - nav2_transform_available_condition_bt_node
+      - nav2_time_expired_condition_bt_node
+      - nav2_path_expiring_timer_condition
+      - nav2_distance_traveled_condition_bt_node
+      - nav2_single_trigger_bt_node
+      - nav2_goal_updated_controller_bt_node
+      - nav2_is_battery_low_condition_bt_node
+      - nav2_navigate_through_poses_action_bt_node
+      - nav2_navigate_to_pose_action_bt_node
+      - nav2_remove_passed_goals_action_bt_node
+      - nav2_planner_selector_bt_node
+      - nav2_controller_selector_bt_node
+      - nav2_goal_checker_selector_bt_node
+      - nav2_controller_cancel_bt_node
+      - nav2_path_longer_on_approach_bt_node
+      - nav2_wait_cancel_bt_node
+      - nav2_spin_cancel_bt_node
+      - nav2_back_up_cancel_bt_node
+      - nav2_assisted_teleop_cancel_bt_node
+      - nav2_drive_on_heading_cancel_bt_node
+
+bt_navigator_navigate_through_poses_rclcpp_node:
+  ros__parameters:
+    use_sim_time: True
+
+bt_navigator_navigate_to_pose_rclcpp_node:
+  ros__parameters:
+    use_sim_time: True
+
+controller_server:
+  ros__parameters:
+    use_sim_time: True
+    controller_frequency: 20.0
+    min_x_velocity_threshold: 0.001
+    min_y_velocity_threshold: 0.5
+    min_theta_velocity_threshold: 0.001
+    failure_tolerance: 0.3
+    progress_checker_plugin: "progress_checker"
+    goal_checker_plugins: ["general_goal_checker"] # "precise_goal_checker"
+    controller_plugins: ["FollowPath"]
+
+    # Progress checker parameters
+    progress_checker:
+      plugin: "nav2_controller::SimpleProgressChecker"
+      required_movement_radius: 0.5
+      movement_time_allowance: 10.0
+    # Goal checker parameters
+    #precise_goal_checker:
+    #  plugin: "nav2_controller::SimpleGoalChecker"
+    #  xy_goal_tolerance: 0.15
+    #  yaw_goal_tolerance: 0.25
+    #  stateful: True
+    general_goal_checker:
+      stateful: True
+      plugin: "nav2_controller::SimpleGoalChecker"
+      xy_goal_tolerance: 0.15
+      yaw_goal_tolerance: 0.25
+    # DWB parameters
+    FollowPath:
+      plugin: "dwb_core::DWBLocalPlanner"
+      debug_trajectory_details: True
+      min_vel_x: 0.0
+      min_vel_y: 0.0
+      max_vel_x: 0.26
+      max_vel_y: 0.0
+      max_vel_theta: 1.0
+      min_speed_xy: 0.0
+      max_speed_xy: 0.26
+      min_speed_theta: 0.0
+      # Add high threshold velocity for turtlebot 3 issue.
+      # https://github.com/ROBOTIS-GIT/turtlebot3_simulations/issues/75
+      acc_lim_x: 2.5
+      acc_lim_y: 0.0
+      acc_lim_theta: 3.2
+      decel_lim_x: -2.5
+      decel_lim_y: 0.0
+      decel_lim_theta: -3.2
+      vx_samples: 20
+      vy_samples: 5
+      vtheta_samples: 20
+      sim_time: 1.7
+      linear_granularity: 0.05
+      angular_granularity: 0.025
+      transform_tolerance: 0.2
+      xy_goal_tolerance: 0.15
+      trans_stopped_velocity: 0.25
+      short_circuit_trajectory_evaluation: True
+      stateful: True
+      critics: ["RotateToGoal", "Oscillation", "BaseObstacle", "GoalAlign", "PathAlign", "PathDist", "GoalDist"]
+      BaseObstacle.scale: 0.02
+      PathAlign.scale: 256.0
+      PathAlign.forward_point_distance: 0.1
+      GoalAlign.scale: 3.0
+      GoalAlign.forward_point_distance: 0.1
+      PathDist.scale: 32.0
+      GoalDist.scale: 24.0
+      RotateToGoal.scale: 24.0
+      RotateToGoal.slowing_factor: 5.0
+      RotateToGoal.lookahead_time: -1.0
+
+local_costmap:
+  local_costmap:
+    ros__parameters:
+      update_frequency: 5.0
+      publish_frequency: 2.0
+      global_frame: odom
+      robot_base_frame: base_link
+      use_sim_time: True
+      rolling_window: true
+      width: 3
+      height: 3
+      resolution: 0.05
+      robot_radius: 0.25
+      plugins: ["voxel_layer", "inflation_layer"]
+      inflation_layer:
+        plugin: "nav2_costmap_2d::InflationLayer"
+        cost_scaling_factor: 3.0
+        inflation_radius: 0.55
+      voxel_layer:
+        plugin: "nav2_costmap_2d::VoxelLayer"
+        enabled: True
+        publish_voxel_map: True
+        origin_z: 0.0
+        z_resolution: 0.05
+        z_voxels: 16
+        max_obstacle_height: 2.0
+        mark_threshold: 0
+        observation_sources: scan
+        scan:
+          topic: /scan
+          max_obstacle_height: 2.0
+          clearing: True
+          marking: True
+          data_type: "LaserScan"
+          raytrace_max_range: 3.0
+          raytrace_min_range: 0.0
+          obstacle_max_range: 2.5
+          obstacle_min_range: 0.0
+      static_layer:
+        plugin: "nav2_costmap_2d::StaticLayer"
+        map_subscribe_transient_local: True
+      always_send_full_costmap: True
+
+global_costmap:
+  global_costmap:
+    ros__parameters:
+      update_frequency: 1.0
+      publish_frequency: 1.0
+      global_frame: map
+      robot_base_frame: base_link
+      use_sim_time: True
+      robot_radius: 0.25
+      resolution: 0.05
+      track_unknown_space: true
+      plugins: ["static_layer", "obstacle_layer", "inflation_layer"]
+      obstacle_layer:
+        plugin: "nav2_costmap_2d::ObstacleLayer"
+        enabled: True
+        observation_sources: scan
+        scan:
+          topic: /scan
+          max_obstacle_height: 2.0
+          clearing: True
+          marking: True
+          data_type: "LaserScan"
+          raytrace_max_range: 3.0
+          raytrace_min_range: 0.0
+          obstacle_max_range: 2.5
+          obstacle_min_range: 0.0
+      static_layer:
+        plugin: "nav2_costmap_2d::StaticLayer"
+        map_subscribe_transient_local: True
+      inflation_layer:
+        plugin: "nav2_costmap_2d::InflationLayer"
+        cost_scaling_factor: 3.0
+        inflation_radius: 0.55
+      always_send_full_costmap: True
+
+map_server:
+  ros__parameters:
+    use_sim_time: True
+    # Overridden in launch by the "map" launch configuration or provided default value.
+    # To use in yaml, remove the default "map" value in the tb3_simulation_launch.py file & provide full path to map below.
+    yaml_filename: ""
+
+map_saver:
+  ros__parameters:
+    use_sim_time: True
+    save_map_timeout: 5000
+    free_thresh_default: 0.25
+    occupied_thresh_default: 0.65
+    map_subscribe_transient_local: True
+
+planner_server:
+  ros__parameters:
+    expected_planner_frequency: 0.01
+    use_sim_time: True
+    planner_plugins: ["GridBased"]
+    GridBased:
+      plugin: "its_planner/ITSPlanner"
+      interpolation_resolution: 0.05
+      catmull_spline: False
+      smoothing_window: 15
+      buffer_size: 1
+      build_road_map_once: True
+      enable_k: False
+      min_samples: 250
+      roadmap: "PROBABLISTIC"
+      w: 40
+      h: 40
+      n: 2
+      dubins_path: True
+      turn_radius: .22
+      robot_radius: .25
+      yaw_tolerance: .125
+      use_final_heading: True
+
+smoother_server:
+  ros__parameters:
+    use_sim_time: True
+    smoother_plugins: ["simple_smoother"]
+    simple_smoother:
+      plugin: "nav2_smoother::SimpleSmoother"
+      tolerance: 1.0e-10
+      max_its: 1000
+      do_refinement: True
+
+behavior_server:
+  ros__parameters:
+    costmap_topic: local_costmap/costmap_raw
+    footprint_topic: local_costmap/published_footprint
+    cycle_frequency: 10.0
+    behavior_plugins: ["spin", "backup", "drive_on_heading", "assisted_teleop", "wait"]
+    spin:
+      plugin: "nav2_behaviors/Spin"
+    backup:
+      plugin: "nav2_behaviors/BackUp"
+    drive_on_heading:
+      plugin: "nav2_behaviors/DriveOnHeading"
+    wait:
+      plugin: "nav2_behaviors/Wait"
+    assisted_teleop:
+      plugin: "nav2_behaviors/AssistedTeleop"
+    global_frame: odom
+    robot_base_frame: base_link
+    transform_tolerance: 0.1
+    use_sim_time: true
+    simulate_ahead_time: 2.0
+    max_rotational_vel: 1.0
+    min_rotational_vel: 0.4
+    rotational_acc_lim: 3.2
+
+robot_state_publisher:
+  ros__parameters:
+    use_sim_time: True
+
+waypoint_follower:
+  ros__parameters:
+    use_sim_time: True
+    loop_rate: 20
+    stop_on_failure: false
+    waypoint_task_executor_plugin: "wait_at_waypoint"
+    wait_at_waypoint:
+      plugin: "nav2_waypoint_follower::WaitAtWaypoint"
+      enabled: True
+      waypoint_pause_duration: 200
+
+velocity_smoother:
+  ros__parameters:
+    use_sim_time: True
+    smoothing_frequency: 20.0
+    scale_velocities: False
+    feedback: "OPEN_LOOP"
+    max_velocity: [0.26, 0.0, 1.0]
+    min_velocity: [-0.26, 0.0, -1.0]
+    max_accel: [2.5, 0.0, 3.2]
+    max_decel: [-2.5, 0.0, -3.2]
+    odom_topic: "odom"
+    odom_duration: 0.1
+    deadband_velocity: [0.0, 0.0, 0.0]
+    velocity_timeout: 1.0
diff --git a/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins_humble.yaml b/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins_humble.yaml
index c193f0dfec..5d759a25fc 100644
--- a/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins_humble.yaml
+++ b/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins_humble.yaml
@@ -280,7 +280,7 @@ planner_server:
     use_sim_time: True
     planner_plugins: ["GridBased"]
     GridBased:
-      plugin: "its_planner::ITSPlanner"
+      plugin: "its_planner/ITSPlanner"
       interpolation_resolution: 0.05
       catmull_spline: False
       smoothing_window: 15
@@ -288,7 +288,7 @@ planner_server:
       build_road_map_once: True
       enable_k: False
       min_samples: 250
-      roadmap: "PROBABILISTIC"
+      roadmap: "PROBABLISTIC"
       w: 40
       h: 40
       n: 2
diff --git a/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins_jazzy.yaml b/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins_jazzy.yaml
index cf80a0d97f..2d97f1df5a 100644
--- a/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins_jazzy.yaml
+++ b/robotics-ai-suite/components/its-planner/its_planner/nav2_params_dubins_jazzy.yaml
@@ -97,7 +97,6 @@ controller_server:
     min_x_velocity_threshold: 0.001
     min_y_velocity_threshold: 0.5
     min_theta_velocity_threshold: 0.001
-    enable_stamped_cmd_vel: False
     failure_tolerance: 0.3
     progress_checker_plugins: ["progress_checker"]
     goal_checker_plugins: ["general_goal_checker"]
@@ -305,7 +304,7 @@ planner_server:
     planner_plugins: ["GridBased"]
     costmap_update_timeout: 1.0
     GridBased:
-      plugin: "its_planner::ITSPlanner"
+      plugin: "its_planner/ITSPlanner"
       interpolation_resolution: 0.05
       catmull_spline: False
       smoothing_window: 15
@@ -313,7 +312,7 @@ planner_server:
       build_road_map_once: True
       enable_k: False
       min_samples: 250
-      roadmap: "PROBABILISTIC"
+      roadmap: "PROBABLISTIC"
       w: 20
       h: 20
       n: 2
@@ -338,7 +337,6 @@ smoother_server:
 behavior_server:
   ros__parameters:
     use_sim_time: True
-    enable_stamped_cmd_vel: False
     local_costmap_topic: local_costmap/costmap_raw
     global_costmap_topic: global_costmap/costmap_raw
     local_footprint_topic: local_costmap/published_footprint
@@ -407,7 +405,6 @@ route_server:
 velocity_smoother:
   ros__parameters:
     use_sim_time: True
-    enable_stamped_cmd_vel: False
     smoothing_frequency: 20.0
     scale_velocities: False
     feedback: "OPEN_LOOP"
@@ -424,8 +421,6 @@ velocity_smoother:
 collision_monitor:
   ros__parameters:
     use_sim_time: True
-    enable_stamped_cmd_vel: False
-    cmd_vel_out_stamped: False
     base_frame_id: "base_footprint"
     odom_frame_id: "odom"
     cmd_vel_in_topic: "cmd_vel_smoothed"
@@ -457,7 +452,6 @@ collision_monitor:
 docking_server:
   ros__parameters:
     use_sim_time: True
-    enable_stamped_cmd_vel: False
     controller_frequency: 50.0
     initial_perception_timeout: 5.0
     wait_charge_timeout: 5.0
diff --git a/robotics-ai-suite/components/its-planner/its_planner/nav2_params_humble.yaml b/robotics-ai-suite/components/its-planner/its_planner/nav2_params_humble.yaml
index 3111d4f4b3..03456abbe9 100644
--- a/robotics-ai-suite/components/its-planner/its_planner/nav2_params_humble.yaml
+++ b/robotics-ai-suite/components/its-planner/its_planner/nav2_params_humble.yaml
@@ -280,7 +280,7 @@ planner_server:
     use_sim_time: True
     planner_plugins: ["GridBased"]
     GridBased:
-      plugin: "its_planner::ITSPlanner"
+      plugin: "its_planner/ITSPlanner"
       interpolation_resolution: 0.05
       catmull_spline: False
       smoothing_window: 15
@@ -288,7 +288,7 @@ planner_server:
       build_road_map_once: True
       enable_k: False
       min_samples: 250
-      roadmap: "PROBABILISTIC"
+      roadmap: "PROBABLISTIC"
       w: 32
       h: 32
       n: 2
diff --git a/robotics-ai-suite/components/its-planner/its_planner/nav2_params_jazzy.yaml b/robotics-ai-suite/components/its-planner/its_planner/nav2_params_jazzy.yaml
index bb1fd178bc..2633d62e8c 100644
--- a/robotics-ai-suite/components/its-planner/its_planner/nav2_params_jazzy.yaml
+++ b/robotics-ai-suite/components/its-planner/its_planner/nav2_params_jazzy.yaml
@@ -97,7 +97,6 @@ controller_server:
     min_x_velocity_threshold: 0.001
     min_y_velocity_threshold: 0.5
     min_theta_velocity_threshold: 0.001
-    enable_stamped_cmd_vel: False
     failure_tolerance: 0.3
     progress_checker_plugins: ["progress_checker"]
     goal_checker_plugins: ["general_goal_checker"]
@@ -312,7 +311,7 @@ planner_server:
     planner_plugins: ["GridBased"]
     costmap_update_timeout: 1.0
     GridBased:
-      plugin: "its_planner::ITSPlanner"
+      plugin: "its_planner/ITSPlanner"
       interpolation_resolution: 0.05
       catmull_spline: False
       smoothing_window: 15
@@ -320,7 +319,7 @@ planner_server:
       build_road_map_once: True
       enable_k: False
       min_samples: 250
-      roadmap: "PROBABILISTIC"
+      roadmap: "PROBABLISTIC"
       w: 32
       h: 32
       n: 2
@@ -340,7 +339,6 @@ smoother_server:
 behavior_server:
   ros__parameters:
     use_sim_time: True
-    enable_stamped_cmd_vel: False
     local_costmap_topic: local_costmap/costmap_raw
     global_costmap_topic: global_costmap/costmap_raw
     local_footprint_topic: local_costmap/published_footprint
@@ -409,7 +407,6 @@ route_server:
 velocity_smoother:
   ros__parameters:
     use_sim_time: True
-    enable_stamped_cmd_vel: False
     smoothing_frequency: 20.0
     scale_velocities: False
     feedback: "OPEN_LOOP"
@@ -426,7 +423,6 @@ velocity_smoother:
 collision_monitor:
   ros__parameters:
     use_sim_time: True
-    enable_stamped_cmd_vel: False
     base_frame_id: "base_footprint"
     odom_frame_id: "odom"
     cmd_vel_in_topic: "cmd_vel_smoothed"
@@ -458,7 +454,6 @@ collision_monitor:
 docking_server:
   ros__parameters:
     use_sim_time: True
-    enable_stamped_cmd_vel: False
     controller_frequency: 50.0
     initial_perception_timeout: 5.0
     wait_charge_timeout: 5.0
diff --git a/robotics-ai-suite/components/its-planner/its_planner/navigate_w_recovery.xml b/robotics-ai-suite/components/its-planner/its_planner/navigate_w_recovery.xml
new file mode 100644
index 0000000000..cf52ce7e3a
--- /dev/null
+++ b/robotics-ai-suite/components/its-planner/its_planner/navigate_w_recovery.xml
@@ -0,0 +1,43 @@
+
+
+
+
+    
+        
+            
+                
+                    
+                        
+                        
+                    
+                
+                
+                    
+                    
+                
+            
+            
+                
+                
+                    
+                    
+                    
+                    
+                    
+                    
+                    
+                    
+
+                    
+                
+            
+        
+    
+
diff --git a/robotics-ai-suite/components/its-planner/its_planner/src/its_planner.cpp b/robotics-ai-suite/components/its-planner/its_planner/src/its_planner.cpp
index 0ec86f8ff6..a25aa8402b 100644
--- a/robotics-ai-suite/components/its-planner/its_planner/src/its_planner.cpp
+++ b/robotics-ai-suite/components/its-planner/its_planner/src/its_planner.cpp
@@ -48,9 +48,9 @@ void ITSPlanner::configure(
   node_->get_parameter(name_ + ".interpolation_resolution", interpolation_resolution_);
   if (interpolation_resolution_ <= 0.0) {interpolation_resolution_ = 0.05;}
   nav2_util::declare_parameter_if_not_declared(
-    node_, name_ + ".roadmap", rclcpp::ParameterValue("PROBABILISTIC"));
+    node_, name_ + ".roadmap", rclcpp::ParameterValue("PROBABLISTIC"));
   node_->get_parameter(name_ + ".roadmap", roadmap_);
-  if (roadmap_ != "PROBABILISTIC" && roadmap_ != "DETERMINISTIC") {roadmap_ = "PROBABILISTIC";}
+  if (roadmap_ != "PROBABLISTIC" && roadmap_ != "DETERMINISTIC") {roadmap_ = "PROBABLISTIC";}
   nav2_util::declare_parameter_if_not_declared(
     node_, name_ + ".n", rclcpp::ParameterValue(2));
   node_->get_parameter(name_ + ".n", n_);
diff --git a/robotics-ai-suite/components/its-planner/its_planner/src/prm/prm.cpp b/robotics-ai-suite/components/its-planner/its_planner/src/prm/prm.cpp
index bea23cd68b..668e45690b 100755
--- a/robotics-ai-suite/components/its-planner/its_planner/src/prm/prm.cpp
+++ b/robotics-ai-suite/components/its-planner/its_planner/src/prm/prm.cpp
@@ -74,7 +74,7 @@ PRM::generateMilestones(
   vector> visited(map_size.first, vector(map_size.second, false));
   std::random_device rd;
   while (count < n) {
-    if (plan == "PROBABILISTIC") {
+    if (plan == "PROBABLISTIC") {
       std::uniform_int_distribution dist_one(0, map_size.first - 1);
       std::uniform_int_distribution dist_two(0, map_size.second - 1);
       MapLocation rand_pos = {dist_one(rd), dist_two(rd)};
diff --git a/robotics-ai-suite/components/its-planner/its_planner/test/test_dynamic_parameters.cpp b/robotics-ai-suite/components/its-planner/its_planner/test/test_dynamic_parameters.cpp
index ec802664d1..e491015986 100644
--- a/robotics-ai-suite/components/its-planner/its_planner/test/test_dynamic_parameters.cpp
+++ b/robotics-ai-suite/components/its-planner/its_planner/test/test_dynamic_parameters.cpp
@@ -33,7 +33,7 @@ TEST(ITSPlannerTest, testDynamicParameter)
 
   auto results = rec_param->set_parameters_atomically(
     {rclcpp::Parameter("test.interpolation_resolution", 0.05),
-      rclcpp::Parameter("test.roadmap", "PROBABILISTIC"),
+      rclcpp::Parameter("test.roadmap", "PROBABLISTIC"),
       rclcpp::Parameter("test.n", 2),
       rclcpp::Parameter("test.w", 32),
       rclcpp::Parameter("test.h", 32),
@@ -49,7 +49,7 @@ TEST(ITSPlannerTest, testDynamicParameter)
     results);
 
   EXPECT_EQ(node->get_parameter("test.interpolation_resolution").as_double(), 0.05);
-  EXPECT_EQ(node->get_parameter("test.roadmap").as_string(), "PROBABILISTIC");
+  EXPECT_EQ(node->get_parameter("test.roadmap").as_string(), "PROBABLISTIC");
   EXPECT_EQ(node->get_parameter("test.n").as_int(), 2);
   EXPECT_EQ(node->get_parameter("test.w").as_int(), 32);
   EXPECT_EQ(node->get_parameter("test.h").as_int(), 32);
diff --git a/robotics-ai-suite/components/its-planner/its_planner/test/test_prm.cpp b/robotics-ai-suite/components/its-planner/its_planner/test/test_prm.cpp
index 603eea0182..1aba7724a3 100644
--- a/robotics-ai-suite/components/its-planner/its_planner/test/test_prm.cpp
+++ b/robotics-ai-suite/components/its-planner/its_planner/test/test_prm.cpp
@@ -74,7 +74,7 @@ TEST(PRMTest, buildRoadmap_Test) {
   EXPECT_EQ(plan.milestones_.size(), 0);
   EXPECT_EQ(plan.adj_list_.size(), 0);
 
-  plan.buildRoadMap(costmap_2d, 500, "PROBABILISTIC", inflation_map, 1);
+  plan.buildRoadMap(costmap_2d, 500, "PROBABLISTIC", inflation_map, 1);
 
   const string filename = "road_map.txt";
   std::ifstream file;
diff --git a/robotics-ai-suite/components/its-planner/relocalization/README.md b/robotics-ai-suite/components/its-planner/relocalization/README.md
index 5d8502df16..b1a86d81db 100644
--- a/robotics-ai-suite/components/its-planner/relocalization/README.md
+++ b/robotics-ai-suite/components/its-planner/relocalization/README.md
@@ -12,40 +12,33 @@ To address this problem, we innovated a compute and memory efficient re-localiza
 
 ## Getting Started
 
-Robotics AI Suite provides a ROS2 Debian package for the application, supported by the following platforms:
+Robotics AI Suite provides a ROS2 Debian package for the application, supported by the following platform:
 
-- OS: Ubuntu 22.04 (Humble) or Ubuntu 24.04 (Jazzy)
+- OS: Ubuntu 22.04
 
-- ROS version: humble or jazzy
+- ROS version: humble
 
 ## Install Debian Package
 
-Install the ``ros-${ROS_DISTRO}-its-relocalization-bringup`` Debian package from the Intel Robotics AI Suite APT repo
+Install the ``ros-humble-its-relocalization-bringup`` Debian package from the Intel Robotics AI Suite APT repo
 
 ```sh
-sudo apt install ros-${ROS_DISTRO}-its-relocalization-bringup
+sudo apt install ros-humble-its-relocalization-bringup
 ```
 
 Run the following script to set environment variables and bringup ROS2 navigation, and Turtlebot3 in Gazebo:
 
 ```sh
-source /opt/ros/$ROS_DISTRO/setup.bash        # ROS_DISTRO=humble or jazzy
-export TURTLEBOT3_MODEL=waffle
-
-# Set Gazebo model path (variable name differs between distributions)
-if [ "$ROS_DISTRO" = "jazzy" ]; then
-    export GZ_SIM_RESOURCE_PATH=$GZ_SIM_RESOURCE_PATH:/opt/ros/$ROS_DISTRO/share/turtlebot3_gazebo/models
-else
-    export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/$ROS_DISTRO/share/turtlebot3_gazebo/models
-fi
-
+source /opt/ros/humble/setup.bash
+export TURTLEBOT3_MODEL=waffle_pi
+export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/humble/share/turtlebot3_gazebo/models
 ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False
 ```
 
 Once the ROS2 navigation is running in Gazebo, open a new terminal to bring up the re-localization:
 
 ```sh
-source /opt/ros/$ROS_DISTRO/setup.bash
+source /opt/ros/humble/setup.bash
 ros2 launch relocalization_bringup relocalization.launch.xml 
 ```
 
diff --git a/robotics-ai-suite/components/its-planner/run_its.sh b/robotics-ai-suite/components/its-planner/run_its.sh
old mode 100644
new mode 100755
index c656a5acec..121040d61f
--- a/robotics-ai-suite/components/its-planner/run_its.sh
+++ b/robotics-ai-suite/components/its-planner/run_its.sh
@@ -2,54 +2,49 @@
 # SPDX-License-Identifier: Apache-2.0
 # Copyright (C) 2025 Intel Corporation
 
-current_dir=$(dirname "$(realpath "${BASH_SOURCE[0]}")")
-
-# Auto-detect ROS distribution instead of hardcoding
-if [ -n "$ROS_DISTRO" ]; then
-    echo "Using ROS distribution: $ROS_DISTRO"
-    source /opt/ros/"$ROS_DISTRO"/setup.bash
-else
-    echo "ROS_DISTRO not set, attempting to detect..."
-    if [ -f "/opt/ros/jazzy/setup.bash" ]; then
-        echo "Found Jazzy, using it"
-        source /opt/ros/jazzy/setup.bash
-    elif [ -f "/opt/ros/humble/setup.bash" ]; then
-        echo "Found Humble, using it"
-        source /opt/ros/humble/setup.bash
-    else
-        echo "Error: No ROS installation found!"
-        exit 1
-    fi
+current_dir=$(dirname "$(realpath ${BASH_SOURCE[0]})")
+source /opt/ros/humble/setup.bash
+
+if [ ! -d "./build/" ] 
+then
+    echo "Building ITS-Planner."
+    colcon build --packages-select nav2_msgs
+    colcon build --packages-select nav2_amcl
+    colcon build --packages-select its_planner
+    source install/setup.bash
+    colcon build --packages-select send_localization
+    colcon build --packages-select pose_checker
+    colcon build --packages-select benchmarking
 fi
 
 print_usage() {
-  printf "Usage: %s [-d]\n" "$0"
-  printf "  -d   Use Ackermann (Dubins) params\n"
+  printf "Usage: ..."
 }
 
-yaml_file="nav2_params_${ROS_DISTRO}.yaml"
-bt_xml_file="navigate_w_recovery_${ROS_DISTRO}.xml"
-
-while getopts 'dh' flag; do
+yaml_file='nav2_params.yaml'
+while getopts 'd' flag; do
   case "${flag}" in
-    d) yaml_file="nav2_params_dubins_${ROS_DISTRO}.yaml" ;;
-    h) print_usage ; exit 0 ;;
-    *) print_usage ; exit 1 ;;
+    d) yaml_file='nav2_params_dubins.yaml' ;;
+    *) exit 1 ;;
   esac
 done
 
-source "${current_dir}"/install/setup.bash
+source install/setup.bash 
+#export TURTLEBOT3_MODEL=waffle
+export TURTLEBOT3_MODEL=waffle_pi
+export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/humble/share/turtlebot3_gazebo/models
 
-export TURTLEBOT3_MODEL=waffle
+#ros2 launch nav2_bringup tb3_simulation_launch.py
 
-# Set Gazebo model path (variable name differs between distributions)
-if [ "$ROS_DISTRO" = "jazzy" ]; then
-    export GZ_SIM_RESOURCE_PATH=$GZ_SIM_RESOURCE_PATH:/opt/ros/"$ROS_DISTRO"/share/turtlebot3_gazebo/models
-else
-    export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/"$ROS_DISTRO"/share/turtlebot3_gazebo/models
-fi
+#ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False
+
+ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False params_file:=/"${current_dir}"/its_planner/${yaml_file} default_bt_xml_filename:=/"${current_dir}"/its_planner/navigate_w_recovery.xml default_bt_xml_filename:=/"${current_dir}"/its_planner
+
+#ros2 launch nav2_bringup tb3_simulation_launch.py params_file:=/"${current_dir}"/its_planner/${yaml_file} default_bt_xml_filename:=/"${current_dir}"/its_planner/navigate_w_recovery.xml map_server:=/home/awm/ati_deliverables/applications.robotics.mobile.its-planner/nav2_bringup/bringup/maps/turtlebot3_house.pgm
+
+#ros2 launch pose_checker.yaml
+
+#ros2 launch turtlebot3_gazebo turtlebot3_house.launch.py
+
+#ros2 launch nav2_bringup tb3_simulation_launch.py slam:=True
 
-ros2 launch nav2_bringup tb3_simulation_launch.py \
-  headless:=False \
-  params_file:="${current_dir}/its_planner/${yaml_file}" \
-  default_bt_xml_filename:="${current_dir}/its_planner/${bt_xml_file}"
diff --git a/robotics-ai-suite/docs/robotics/dev_guide/tutorials_amr/navigation/its-path-planner-plugin.md b/robotics-ai-suite/docs/robotics/dev_guide/tutorials_amr/navigation/its-path-planner-plugin.md
index 686d670295..f5e17361ea 100644
--- a/robotics-ai-suite/docs/robotics/dev_guide/tutorials_amr/navigation/its-path-planner-plugin.md
+++ b/robotics-ai-suite/docs/robotics/dev_guide/tutorials_amr/navigation/its-path-planner-plugin.md
@@ -107,7 +107,7 @@ Run the following script to set environment variables:
 
 ```bash
 source /opt/ros/jazzy/setup.bash
-export TURTLEBOT3_MODEL=waffle
+export TURTLEBOT3_MODEL=waffle_pi
 export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/jazzy/share/turtlebot3_gazebo/models
 ```
 
@@ -117,7 +117,7 @@ export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/jazzy/share/turtlebot3_gaze
 
 ```bash
 source /opt/ros/humble/setup.bash
-export TURTLEBOT3_MODEL=waffle
+export TURTLEBOT3_MODEL=waffle_pi
 export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/humble/share/turtlebot3_gazebo/models
 ```
 
diff --git a/robotics-ai-suite/docs/robotics/dev_guide/tutorials_amr/navigation/navigation-relocalization.md b/robotics-ai-suite/docs/robotics/dev_guide/tutorials_amr/navigation/navigation-relocalization.md
index b726d97d58..47c58c5a2c 100644
--- a/robotics-ai-suite/docs/robotics/dev_guide/tutorials_amr/navigation/navigation-relocalization.md
+++ b/robotics-ai-suite/docs/robotics/dev_guide/tutorials_amr/navigation/navigation-relocalization.md
@@ -55,7 +55,7 @@ navigation, and TurtleBot3 robot in Gazebo simulation:
 
 ```bash
 source /opt/ros/jazzy/setup.bash
-export TURTLEBOT3_MODEL=waffle
+export TURTLEBOT3_MODEL=waffle_pi
 export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/jazzy/share/turtlebot3_gazebo/models
 ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False
 ```
@@ -66,7 +66,7 @@ ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False
 
 ```bash
 source /opt/ros/humble/setup.bash
-export TURTLEBOT3_MODEL=waffle
+export TURTLEBOT3_MODEL=waffle_pi
 export GAZEBO_MODEL_PATH=$GAZEBO_MODEL_PATH:/opt/ros/humble/share/turtlebot3_gazebo/models
 ros2 launch nav2_bringup tb3_simulation_launch.py headless:=False
 ```