Skip to content

Commit a7dab2e

Browse files
authored
[ViPPET] Implementing a dedicated script for managing model installation (open-edge-platform#1000)
1 parent 6a4ec7a commit a7dab2e

File tree

14 files changed

+712
-252
lines changed

14 files changed

+712
-252
lines changed
Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,4 @@
11
__pycache__
2-
emissions.csv
3-
.collector.out
4-
.collector.run
5-
free_output.log
6-
qmassa_output.json
7-
turbostat_output.log
8-
videos/
9-
models/*
102
.coverage
11-
.public_models_env
3+
models/output/*
4+
videos/

tools/visual-pipeline-and-platform-evaluation-tool/Dockerfile.vippet

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,14 +41,15 @@ COPY --chown=dlstreamer:dlstreamer \
4141
app.css \
4242
benchmark.py \
4343
chart.py \
44-
cleanup_models.sh \
4544
device.py \
4645
explore.py \
4746
gstpipeline.py \
47+
models/supported_models.lst \
48+
models.py \
4849
optimize.py \
4950
utils.py \
5051
/home/dlstreamer/vippet/
5152

5253
COPY --chown=dlstreamer:dlstreamer pipelines/ /home/dlstreamer/vippet/pipelines
5354

54-
CMD ["/bin/bash", "-c", "./cleanup_models.sh; python app.py"]
55+
CMD ["/bin/bash", "-c", "python app.py"]

tools/visual-pipeline-and-platform-evaluation-tool/Makefile

Lines changed: 21 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,8 @@ SHELL := bash -eu -o pipefail
77
.DEFAULT_GOAL := help
88
.PHONY: all lint build mdlint ruff fix-linter format pyright run build-dev run-dev test \
99
shell shell-vippet shell-models shell-collector shell-videogenerator stop clean help \
10-
public_models_choice reset_public_models_choice build-videogenerator docker-build
10+
build-videogenerator build-models docker-build check-device-type \
11+
install-models-once install-models-force install-models-all
1112

1213
# Code Versions
1314
VERSION := v1.2
@@ -23,8 +24,6 @@ DEV_COMPOSE_FILE := compose.dev.yml
2324
# Python venv Target
2425
VENV_DIR := .venv
2526

26-
PUBLIC_MODELS_ENV := .public_models_env
27-
2827
# Placeholder to prevent warnings about missing variables.
2928
# This value is overridden by setup_env.sh in relevant targets.
3029
export RENDER_GROUP_ID := 1000
@@ -81,35 +80,20 @@ build-dev: check-device-type $(VENV_DIR) ## Build core dev images (vippet-app, v
8180
build-videogenerator: ## Build videogenerator image
8281
DOCKER_TAG=$(VERSION) docker compose build videogenerator
8382

83+
build-models: ## Build model image
84+
DOCKER_TAG=$(VERSION) docker compose build models
85+
8486
docker-build: ## Build all images
8587
$(MAKE) build DEVICE_TYPE=$(or $(DEVICE_TYPE),CPU)
8688
$(MAKE) build-videogenerator
89+
$(MAKE) build-models
8790

88-
public_models_choice: ## Ask if public models shall be downloaded
89-
@if [ -f $(PUBLIC_MODELS_ENV) ]; then \
90-
printf "Choice on downloading public models was already made, skipping prompt. "; \
91-
printf "To reset choice run \`make reset_public_models_choice\`.\n"; \
92-
exit 0; \
93-
fi; \
94-
read -p "Do you want to download public models? (y/n): " yn; \
95-
case $$yn in \
96-
y|Y|[yY][eE][sS]) DOWNLOAD_PUBLIC_MODELS=true ;; \
97-
*) DOWNLOAD_PUBLIC_MODELS=false ;; \
98-
esac; \
99-
echo "DOWNLOAD_PUBLIC_MODELS set to $$DOWNLOAD_PUBLIC_MODELS" ;\
100-
echo "DOWNLOAD_PUBLIC_MODELS=$$DOWNLOAD_PUBLIC_MODELS" > $(PUBLIC_MODELS_ENV)
101-
102-
reset_public_models_choice: ## Reset choice on public models download
103-
rm -rf $(PUBLIC_MODELS_ENV)
104-
105-
run: check-device-type public_models_choice ## Run the docker compose services
91+
run: check-device-type install-models-once ## Run the docker compose services
10692
source setup_env.sh -d $(DEVICE_TYPE) && \
107-
mkdir -p models && chmod o+w models && \
10893
DOCKER_TAG=$(VERSION) docker compose up -d
10994

110-
run-dev: check-device-type public_models_choice ## Run the docker compose services for development
95+
run-dev: check-device-type install-models-once ## Run the docker compose services for development
11196
source setup_env.sh -d $(DEVICE_TYPE) && \
112-
mkdir -p models && chmod o+w models && \
11397
DOCKER_TAG=$(VERSION) docker compose -f $(COMPOSE_FILE) -f $(DEV_COMPOSE_FILE) up -d
11498

11599
test: $(VENV_DIR) ## Run tests and generate coverage report
@@ -119,6 +103,18 @@ test: $(VENV_DIR) ## Run tests and generate coverage report
119103
python -m coverage run --source=./ --data-file=/tmp/.vippet-coverage -m unittest discover -v -s ./tests -p '*_test.py' && \
120104
python -m coverage report --data-file=/tmp/.vippet-coverage --omit=*/config-3.py,*/config.py,*_test.py"
121105

106+
install-models-once: ## Handle models download and installation (once)
107+
mkdir -p models/output && chmod o+w models/output
108+
MODEL_INSTALLATION=once DOCKER_TAG=$(VERSION) docker compose run --rm -it models
109+
110+
install-models-force: ## Handle models download and installation (force)
111+
mkdir -p models/output && chmod o+w models/output
112+
MODEL_INSTALLATION=force DOCKER_TAG=$(VERSION) docker compose run --rm -it models
113+
114+
install-models-all: ## Install all models automatically (no dialog)
115+
mkdir -p models/output && chmod o+w models/output
116+
MODEL_INSTALLATION=all DOCKER_TAG=$(VERSION) docker compose run --rm -it models
117+
122118
shell: ## Open shell in specified container (i.e. make shell SERVICE=vippet-gpu)
123119
docker exec -it $(SERVICE) bash
124120

@@ -140,7 +136,7 @@ stop: ## Stop the docker compose services
140136
DOCKER_TAG=$(VERSION) docker compose down models collector videogenerator $$VIPPET_SERVICE
141137

142138
clean: ## Clean all build artifacts
143-
rm -rf .collector-signals/ models/ videos/
139+
rm -rf .collector-signals/ models/output/ videos/
144140

145141
help: ## Print help for each target
146142
@echo ViPPET make targets

tools/visual-pipeline-and-platform-evaluation-tool/app.py

Lines changed: 40 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import logging
22
import os
3+
import sys
34
from datetime import datetime
45
from typing import Dict, List, Optional, Tuple
56

@@ -16,6 +17,7 @@
1617
from optimize import PipelineOptimizer
1718
from gstpipeline import GstPipeline, PipelineLoader
1819
from utils import prepare_video_and_constants
20+
from models import SupportedModelsManager
1921

2022
logging.getLogger("httpx").setLevel(logging.WARNING)
2123

@@ -41,6 +43,11 @@
4143
)
4244
gst_inspector = GstInspector()
4345
device_discovery = DeviceDiscovery()
46+
try:
47+
supported_models_manager = SupportedModelsManager()
48+
except Exception as e:
49+
logging.error(str(e))
50+
sys.exit(1)
4451

4552
# Device detection and chart title logic
4653
charts: List[Chart] = create_charts(device_discovery.list_devices())
@@ -674,17 +681,8 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
674681
# Mapping of these choices to actual model path in utils.py
675682
object_detection_model = gr.Dropdown(
676683
label="Object Detection Model",
677-
choices=[
678-
"SSDLite MobileNet V2 (INT8)",
679-
"YOLO v5m 416x416 (INT8)",
680-
"YOLO v5s 416x416 (INT8)",
681-
"YOLO v5m 640x640 (INT8)",
682-
"YOLO v10s 640x640 (FP16)",
683-
"YOLO v10m 640x640 (FP16)",
684-
"YOLO v8 License Plate Detector (FP32)",
685-
],
686-
value="YOLO v5s 416x416 (INT8)",
687684
elem_id="object_detection_model",
685+
# choices and value will be set for each pipeline later
688686
)
689687

690688
# Object detection device
@@ -732,16 +730,8 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
732730
# Mapping of these choices to actual model path in utils.py
733731
object_classification_model = gr.Dropdown(
734732
label="Object Classification Model",
735-
choices=[
736-
"Disabled",
737-
"EfficientNet B0 (INT8)",
738-
"MobileNet V2 PyTorch (FP16)",
739-
"ResNet-50 TF (INT8)",
740-
"PaddleOCR (FP32)",
741-
"Vehicle Attributes Recognition Barrier 0039 (FP16)",
742-
],
743-
value="ResNet-50 TF (INT8)",
744733
elem_id="object_classification_model",
734+
# choices and value will be set for each pipeline later
745735
)
746736

747737
# Object classification device
@@ -1112,23 +1102,38 @@ def create_interface(title: str = "Visual Pipeline and Platform Evaluation Tool"
11121102
None,
11131103
inferencing_channels,
11141104
).then(
1105+
# Read supported models and update the model dropdowns every time a new pipeline is selected
1106+
# (list of models may change)
11151107
lambda: [
1116-
gr.Dropdown(
1117-
choices=current_pipeline[1]["parameters"][
1118-
"inference"
1119-
]["detection_models"],
1120-
value=current_pipeline[1]["parameters"][
1121-
"inference"
1122-
]["detection_model_default"],
1123-
),
1124-
gr.Dropdown(
1125-
choices=current_pipeline[1]["parameters"][
1126-
"inference"
1127-
]["classification_models"],
1128-
value=current_pipeline[1]["parameters"][
1129-
"inference"
1130-
]["classification_model_default"],
1131-
),
1108+
*(
1109+
lambda det, cls: [
1110+
gr.Dropdown(
1111+
choices=det[0],
1112+
value=det[1],
1113+
),
1114+
gr.Dropdown(
1115+
choices=cls[0],
1116+
value=cls[1],
1117+
),
1118+
]
1119+
)(
1120+
supported_models_manager.filter_detection_models(
1121+
current_pipeline[1]["parameters"][
1122+
"inference"
1123+
]["detection_models"],
1124+
current_pipeline[1]["parameters"][
1125+
"inference"
1126+
]["detection_model_default"],
1127+
),
1128+
supported_models_manager.filter_classification_models(
1129+
current_pipeline[1]["parameters"][
1130+
"inference"
1131+
]["classification_models"],
1132+
current_pipeline[1]["parameters"][
1133+
"inference"
1134+
]["classification_model_default"],
1135+
),
1136+
)
11321137
],
11331138
outputs=[
11341139
object_detection_model,

tools/visual-pipeline-and-platform-evaluation-tool/cleanup_models.sh

Lines changed: 0 additions & 19 deletions
This file was deleted.

tools/visual-pipeline-and-platform-evaluation-tool/compose.yml

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -14,41 +14,42 @@ x-vippet: &vippet
1414
interval: 30s
1515
timeout: 10s
1616
retries: 5
17-
depends_on:
18-
models:
19-
condition: service_completed_successfully
2017
restart: on-failure:5
2118
environment:
2219
GST_DEBUG: "1"
2320
TZ: America/Los_Angeles
2421
PIPELINE: smartnvr
22+
MODELS_PATH: /home/dlstreamer/vippet/models
2523
http_proxy: ${http_proxy}
2624
https_proxy: ${https_proxy}
2725
no_proxy: ${no_proxy}
28-
env_file:
29-
- path: '.public_models_env'
30-
required: false
3126
volumes:
3227
- collector-signals:/home/dlstreamer/vippet/.collector-signals
33-
- ./models:/home/dlstreamer/vippet/models
28+
- ./models/output:/home/dlstreamer/vippet/models
3429

3530
services:
3631
models:
37-
image: intel/dlstreamer:2025.1.2-ubuntu24
32+
profiles: [do-not-start]
33+
image: docker.io/intel/vippet-models:${DOCKER_TAG}
34+
build:
35+
context: models
36+
dockerfile: Dockerfile
37+
args:
38+
http_proxy:
39+
https_proxy:
40+
no_proxy:
41+
stdin_open: true
42+
tty: true
3843
container_name: models
3944
volumes:
40-
- ./models/:/output
41-
- ./models.sh:/home/dlstreamer/models.sh
45+
- ./models/output/:/output
4246
environment:
43-
- MODELS_PATH=/output/
47+
- MODELS_PATH=/output
48+
- MODEL_INSTALLATION=${MODEL_INSTALLATION:-once}
4449
- http_proxy=${http_proxy}
4550
- https_proxy=${https_proxy}
4651
- no_proxy=${no_proxy}
47-
env_file:
48-
- path: '.public_models_env'
49-
required: false
50-
command: './models.sh'
51-
restart: on-failure:5
52+
command: './model_manager.sh'
5253
vippet-cpu:
5354
<<: *vippet
5455
container_name: vippet-cpu
@@ -123,4 +124,3 @@ volumes:
123124
driver_opts:
124125
type: tmpfs
125126
device: tmpfs
126-

tools/visual-pipeline-and-platform-evaluation-tool/docs/user-guide/get-started.md

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ By following this guide, you will learn how to:
2323
- Create and navigate to the directory:
2424

2525
```bash
26-
mkdir visual-pipeline-and-platform-evaluation-tool
26+
mkdir -p visual-pipeline-and-platform-evaluation-tool/models
2727
cd visual-pipeline-and-platform-evaluation-tool
2828
```
2929

@@ -33,15 +33,17 @@ By following this guide, you will learn how to:
3333
curl -LO "https://github.com/open-edge-platform/edge-ai-libraries/raw/refs/heads/main/tools/visual-pipeline-and-platform-evaluation-tool/setup_env.sh"
3434
curl -LO "https://github.com/open-edge-platform/edge-ai-libraries/raw/refs/heads/main/tools/visual-pipeline-and-platform-evaluation-tool/compose.yml"
3535
curl -LO "https://github.com/open-edge-platform/edge-ai-libraries/raw/refs/heads/main/tools/visual-pipeline-and-platform-evaluation-tool/Makefile"
36-
curl -LO "https://github.com/open-edge-platform/edge-ai-libraries/raw/refs/heads/main/tools/visual-pipeline-and-platform-evaluation-tool/models.sh"
37-
chmod +x models.sh
36+
curl -Lo models/Dockerfile "https://github.com/open-edge-platform/edge-ai-libraries/raw/refs/heads/main/tools/visual-pipeline-and-platform-evaluation-tool/models/Dockerfile"
37+
curl -Lo models/model_manager.sh "https://github.com/open-edge-platform/edge-ai-libraries/raw/refs/heads/main/tools/visual-pipeline-and-platform-evaluation-tool/models/model_manager.sh"
38+
curl -Lo models/supported_models.lst "https://github.com/open-edge-platform/edge-ai-libraries/raw/refs/heads/main/tools/visual-pipeline-and-platform-evaluation-tool/models/supported_models.lst"
39+
chmod +x models/model_manager.sh
3840
```
3941

4042
2. **Start the Application**:
4143
- Set the appropriate device type (CPU, GPU, or NPU) and run the following command:
4244

4345
```bash
44-
make run DEVICE_TYPE=<CPU/GPU/NPU>
46+
make build-models run DEVICE_TYPE=<CPU/GPU/NPU>
4547
```
4648

4749
3. **Verify the Application**:
@@ -70,6 +72,18 @@ For alternative ways to set up the sample application, see:
7072

7173
- [How to Build from Source](./how-to-build-source.md)
7274

75+
### Model Installation and Management
76+
77+
When you first launch the Visual Pipeline and Platform Evaluation Tool,
78+
you will be prompted to select and install the models you wish to use.
79+
This step allows you to choose only the models relevant to your intended pipelines.
80+
81+
If you want to manage your installed models again, run the following command:
82+
83+
```bash
84+
make install-models-force
85+
```
86+
7387
### Known Issues
7488

7589
- **Issue 1**: The Visual Pipeline and Platform Evaluation Tool container fails to start the analysis when the "Run"

0 commit comments

Comments
 (0)