diff --git a/docs/user-guide/other-topics/how-to-configure-dlstreamer-video-pipeline.md b/docs/user-guide/other-topics/how-to-configure-dlstreamer-video-pipeline.md index 246861098..214b93a0b 100644 --- a/docs/user-guide/other-topics/how-to-configure-dlstreamer-video-pipeline.md +++ b/docs/user-guide/other-topics/how-to-configure-dlstreamer-video-pipeline.md @@ -30,11 +30,10 @@ Model chaining allows you to combine multiple AI models in a single pipeline to ##### Prerequisites -By default, only a limited number of models is downloaded during helm chart installation, which limits the possibilities of model chaining. To enable the full set of models: +By default, only a limited number of models is downloaded during helm chart installation, which limits the possibilities of model chaining. To enable all models: -1. Set `initModels.modelType=all` in `kubernetes/scenescape-chart/values.yaml`. -2. Configure desired model precisions (e.g., `initModels.modelPrecisions=FP16`) in `kubernetes/scenescape-chart/values.yaml`. -3. (Re)deploy Intel® SceneScape to download the additional models. +1. Configure desired model precisions (e.g., `initModels.modelPrecisions=FP16`) in `kubernetes/scenescape-chart/values.yaml`. +2. (Re)deploy Intel® SceneScape to download the supported models. ##### Chaining Syntax @@ -50,28 +49,14 @@ By default, only a limited number of models is downloaded during helm chart inst Use the following short names to refer to each model in the chain: -| Category | Full Model Name | Short Name | Description | -| --------------------- | -------------------------------------------- | ----------- | ----------------------------------------- | -| **Person Detection** | person-detection-retail-0013 | retail | General person detection | -| | pedestrian-and-vehicle-detector-adas-0001 | pedveh | Pedestrian and vehicle detection | -| **Person Analysis** | person-reidentification-retail-0277 | reid | Person re-identification | -| | person-attributes-recognition-crossroad-0238 | personattr | Person attributes (age, gender, clothing) | -| | age-gender-recognition-retail-0013 | agegender | Age and gender classification | -| | human-pose-estimation-0001 | pose | Human pose estimation | -| **Vehicle Detection** | vehicle-detection-0200 | veh0200 | Vehicle detection (newer model) | -| | vehicle-detection-0201 | veh0201 | Vehicle detection (alternative) | -| | vehicle-detection-0202 | veh0202 | Vehicle detection (alternative) | -| | vehicle-detection-adas-0002 | vehadas | ADAS vehicle detection | -| | person-vehicle-bike-detection-2000 | pvb2000 | Multi-class detection | -| | person-vehicle-bike-detection-2001 | pvb2001 | Multi-class detection (v2) | -| | person-vehicle-bike-detection-2002 | pvb2002 | Multi-class detection (v3) | -| | person-vehicle-bike-detection-crossroad-0078 | pvbcross78 | Crossroad detection | -| | person-vehicle-bike-detection-crossroad-1016 | pvbcross16 | Crossroad detection (v2) | -| **Vehicle Analysis** | vehicle-attributes-recognition-barrier-0042 | vehattr | Vehicle attributes (color, type) | -| | vehicle-license-plate-detection-barrier-0106 | platedetect | License plate detection | -| **Text Analysis** | horizontal-text-detection-0001 | textdetect | Text detection | -| | text-recognition-0012 | textrec | Text recognition | -| | text-recognition-resnet-fc | textresnet | ResNet-based text recognition | +| Category | Full Model Name | Short Name | Description | +| -------------------- | -------------------------------------------- | ---------- | ----------------------------------------- | +| **Person Detection** | person-detection-retail-0013 | retail | General person detection | +| | person-vehicle-bike-detection-crossroad-1016 | pvbcross16 | Crossroad multi-class detection | +| **Person Analysis** | person-reidentification-retail-0277 | reid | Person re-identification | +| | person-attributes-recognition-crossroad-0238 | personattr | Person attributes (age, gender, clothing) | +| | age-gender-recognition-retail-0013 | agegender | Age and gender classification | +| **Vehicle Analysis** | vehicle-attributes-recognition-barrier-0042 | vehattr | Vehicle attributes (color, type) | ##### Common Chaining Patterns @@ -93,23 +78,20 @@ retail=GPU+agegender=GPU ``` # Vehicle detection with re-identification -veh0200=GPU+reid=GPU +pvbcross16=GPU+reid=GPU # Vehicle detection with attributes -veh0200+vehattr - -# Vehicle detection with license plate detection -veh0200+platedetect +pvbcross16+vehattr ``` **Multi-Class Detection:** ``` # Detect people, vehicles, and bikes -pvb2000=GPU +pvbcross16=GPU # Multi-class detection with re-identification -pvb2000=GPU+reid=GPU +pvbcross16=GPU+reid=GPU ``` #### Advanced Configuration diff --git a/kubernetes/scenescape-chart/templates/model-installer/job.yaml b/kubernetes/scenescape-chart/templates/model-installer/job.yaml index 5ff7d1982..78b353525 100644 --- a/kubernetes/scenescape-chart/templates/model-installer/job.yaml +++ b/kubernetes/scenescape-chart/templates/model-installer/job.yaml @@ -2,9 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 {{- if .Values.hooks.enabled }} -{{- if not (has .Values.initModels.modelType (list "default" "ocr" "all")) }} -{{- fail "initModels.modelType must be one of: default, ocr, all" }} -{{- end }} {{- if not (kindIs "bool" .Values.initModels.modelProc) }} {{- fail "initModels.modelProc must be a boolean (true or false)" }} {{- end }} @@ -45,8 +42,6 @@ spec: imagePullPolicy: {{ .Values.initModels.image.pullPolicy }} name: {{ .Release.Name }}-init-models-container env: - - name: MODEL_TYPE - value: "{{ .Values.initModels.modelType }}" - name: MODEL_PRECISIONS value: "{{ .Values.initModels.modelPrecisions }}" - name: MODEL_PROC diff --git a/kubernetes/scenescape-chart/values.yaml b/kubernetes/scenescape-chart/values.yaml index 8e07d934a..b2bb51720 100644 --- a/kubernetes/scenescape-chart/values.yaml +++ b/kubernetes/scenescape-chart/values.yaml @@ -17,8 +17,6 @@ initModels: name: python tag: 3.13-slim@sha256:4c2cf9917bd1cbacc5e9b07320025bdb7cdf2df7b0ceaccb55e9dd7e30987419 pullPolicy: IfNotPresent - # Model type to download - MUST be one of: default, ocr, all - modelType: default # Model precisions - comma-separated list of: FP32, FP16, INT8 modelPrecisions: FP32 modelProc: true diff --git a/model_installer/Makefile b/model_installer/Makefile index c3f07baed..bb70a7f9a 100644 --- a/model_installer/Makefile +++ b/model_installer/Makefile @@ -28,7 +28,6 @@ install-omz-models: create-models-volume build-image ; APPDIR=/workspace \ ; HOSTDIR=$$PWD \ ; IMAGE=$(IMAGE):latest \ - ; MODELS=--$${MODELS:-default} \ ; PRECISIONS=$${PRECISIONS:-FP32} \ ; docker run --rm -v $$HOSTDIR:$$APPDIR:z \ -v $${COMPOSE_PROJECT_NAME}_vol-models:/opt/intel/openvino/deployment_tools/intel_models \ @@ -39,7 +38,7 @@ install-omz-models: create-models-volume build-image -e MODEL_DIR=/opt/intel/openvino/deployment_tools/intel_models \ -u $$UID \ -l user=$$USER $$IMAGE \ - /workspace/model_installer/src/install-omz-models $$MODELS --precisions $$PRECISIONS --model_proc + /workspace/model_installer/src/install-omz-models --precisions $$PRECISIONS --model_proc @echo "DONE ==> Installing Open Model Zoo models" copy-config-files: create-models-volume diff --git a/model_installer/src/README.md b/model_installer/src/README.md index 2457fe59f..947ab14bc 100644 --- a/model_installer/src/README.md +++ b/model_installer/src/README.md @@ -9,16 +9,16 @@ The models and configuration files are downloaded into a models volume that is a ## Configuration -Model installer can be configured to download a specific set of models using the following parameters: +Model installer downloads the supported model set defined in `install-omz-models` (`_DEFAULT_MODELS`) and can be configured with the following parameters: -| Parameter | Allowed Values | Format | Description | -| ------------ | ----------------------- | -------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `models` | `default`, `ocr`, `all` | Single value | Specifies which set of models to download. `default` includes person detection, re-identification, and pose estimation models. `ocr` includes text detection and recognition models. `all` downloads both default and OCR models. | -| `precisions` | `FP32`, `FP16`, `INT8` | Comma-separated list | Model precision formats to download. Multiple precisions can be specified for the same model (e.g., `FP16,FP32`). The first one will be used as preferred when generating `model-config.json` | +| Parameter | Allowed Values | Format | Description | +| ------------ | ---------------------- | -------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `precisions` | `FP32`, `FP16`, `INT8` | Comma-separated list | Model precision formats to download. Multiple precisions can be specified for the same model (e.g., `FP16,FP32`). The first one will be used as preferred when generating `model-config.json` | +| `model_proc` | `true`, `false` | Single value | When enabled, attempts to download model-proc JSON files for each supported model and precision. | -For Kubernetes deployment refer to the `initModels` section in [Helm chart values](../../kubernetes/scenescape-chart/values.yaml), for example use `--set initModels.modelType=all --set initModels.modelPrecisions=FP16,FP32` when installing the Helm chart. +For Kubernetes deployment refer to the `initModels` section in [Helm chart values](../../kubernetes/scenescape-chart/values.yaml), for example use `--set initModels.modelPrecisions=FP16,FP32 --set initModels.modelProc=true` when installing the Helm chart. -For Docker deployment use `MODELS` and `PRECISIONS` environment variables when building, e.g.: `make install-models MODELS=all` or `make install-models MODELS=all PRECISIONS="FP16,FP8"`. +For Docker deployment use `PRECISIONS` environment variable when building, e.g.: `make install-models` or `make install-models PRECISIONS="FP16,FP32"`. ## Models Volume Folder Structure diff --git a/model_installer/src/entrypoint-k8s.sh b/model_installer/src/entrypoint-k8s.sh index 00769f78b..aabff9f2b 100644 --- a/model_installer/src/entrypoint-k8s.sh +++ b/model_installer/src/entrypoint-k8s.sh @@ -11,25 +11,8 @@ apt-get update && apt-get install -y --no-install-recommends wget && rm -rf /var pip install --no-cache-dir -r /workspace/requirements-runtime.txt # Run the entrypoint script to download models -echo "Starting model installation with MODEL_TYPE=${MODEL_TYPE}, PRECISIONS=${MODEL_PRECISIONS}, MODEL_PROC=${MODEL_PROC}" -# Build arguments for install-omz-models -ARGS="" -case "${MODEL_TYPE}" in - "default") - ARGS="--default" - ;; - "ocr") - ARGS="--ocr" - ;; - "all") - ARGS="--all" - ;; - *) - echo "Unknown MODEL_TYPE: ${MODEL_TYPE}. Using default." - ARGS="--default" - ;; -esac -ARGS="${ARGS} --precisions ${MODEL_PRECISIONS}" +echo "Starting model installation with PRECISIONS=${MODEL_PRECISIONS}, MODEL_PROC=${MODEL_PROC}" +ARGS="--precisions ${MODEL_PRECISIONS}" # Add model_proc flag if enabled if [ "${MODEL_PROC}" = "true" ]; then ARGS="${ARGS} --model_proc" @@ -41,7 +24,7 @@ echo "Copying config files..." python /workspace/copy-config-files /workspace ${MODEL_DIR} echo "Model installation completed successfully" echo "Models installed in: ${MODEL_DIR}" -ls -la "${MODEL_DIR}" || true`` +ls -la "${MODEL_DIR}" || true if [ -d "/workspace/models-storage/models/" ]; then echo "Models downloaded successfully" diff --git a/model_installer/src/generate_model_config.py b/model_installer/src/generate_model_config.py index 5ac835f6c..6edbf90e3 100644 --- a/model_installer/src/generate_model_config.py +++ b/model_installer/src/generate_model_config.py @@ -22,26 +22,11 @@ _MODEL_NAME_MAP = { # Intel models "age-gender-recognition-retail-0013": "agegender", - "horizontal-text-detection-0001": "textdetect", - "human-pose-estimation-0001": "pose", - "pedestrian-and-vehicle-detector-adas-0001": "pedveh", "person-attributes-recognition-crossroad-0238": "personattr", "person-detection-retail-0013": "retail", "person-reidentification-retail-0277": "reid", - "person-vehicle-bike-detection-2000": "pvb2000", - "person-vehicle-bike-detection-2001": "pvb2001", - "person-vehicle-bike-detection-2002": "pvb2002", - "person-vehicle-bike-detection-crossroad-0078": "pvbcross78", "person-vehicle-bike-detection-crossroad-1016": "pvbcross16", - "text-recognition-0012": "textrec", - "vehicle-license-plate-detection-barrier-0106": "platedetect", "vehicle-attributes-recognition-barrier-0042": "vehattr", - "vehicle-detection-0200": "veh0200", - "vehicle-detection-0201": "veh0201", - "vehicle-detection-0202": "veh0202", - "vehicle-detection-adas-0002": "vehadas", - # Public models - "text-recognition-resnet-fc": "textresnet", } diff --git a/model_installer/src/install-omz-models b/model_installer/src/install-omz-models index bbdde9e65..0f64e6f6d 100755 --- a/model_installer/src/install-omz-models +++ b/model_installer/src/install-omz-models @@ -12,61 +12,24 @@ from argparse import ArgumentParser from generate_model_config import generate_model_config TIMEOUT = 600 -NUM_RETRIES = "6" -OMZ_DIR = "/usr/local/lib/open_model_zoo" MODEL_DIR = os.environ.get("MODEL_DIR") if "MODEL_DIR" in os.environ else "/workspace/models" OVMS_MODEL_DIR = os.path.join(MODEL_DIR, "ovms") MODEL_CACHE_DIR = os.path.join(OVMS_MODEL_DIR, "cache") OVMS_CONFIGFILE = f"{MODEL_DIR}/ovms-config.json" OMZ_BASE_URL = "https://storage.openvinotoolkit.org/repositories/open_model_zoo/2023.0/models_bin/1" -# Special case: some public models have a different base URL -MODEL_BASE_URLS = { - "text-recognition-resnet-fc": "https://storage.openvinotoolkit.org/repositories/open_model_zoo/public/text-recognition-resnet-fc" -} MODEL_PROC_FOLDER_URL = "https://raw.githubusercontent.com/open-edge-platform/edge-ai-libraries/refs/heads/release-2025.2.0/libraries/dl-streamer/samples/gstreamer/model_proc/intel/" _DEFAULT_MODELS = [ 'person-detection-retail-0013', - 'person-reidentification-retail-0277', - 'human-pose-estimation-0001', -] - -_OCR_MODELS = [ - 'horizontal-text-detection-0001', - 'text-recognition-resnet-fc', - 'text-recognition-0012' -] - -_ALL_MODELS = _DEFAULT_MODELS + _OCR_MODELS + [ - 'pedestrian-and-vehicle-detector-adas-0001', - 'person-vehicle-bike-detection-2000', - 'person-vehicle-bike-detection-2001', - 'person-vehicle-bike-detection-2002', - 'person-vehicle-bike-detection-crossroad-0078', 'person-vehicle-bike-detection-crossroad-1016', - 'person-attributes-recognition-crossroad-0238', + 'person-reidentification-retail-0277', 'age-gender-recognition-retail-0013', - 'vehicle-detection-0200', - 'vehicle-detection-0201', - 'vehicle-detection-0202', - 'vehicle-detection-adas-0002', - 'vehicle-license-plate-detection-barrier-0106', - 'vehicle-attributes-recognition-barrier-0042' -] - -# TODO: use download_public_models.sh for downloading supported models -# see: https://github.com/open-edge-platform/edge-ai-libraries/tree/main/libraries/dl-streamer/samples -_PUBLIC_MODELS = [ - # This is 1-channel model that is not supported by DLStreamer - # TODO: use ch_PP-OCRv4_rec_infer - 'text-recognition-resnet-fc', + 'person-attributes-recognition-crossroad-0238', + 'vehicle-attributes-recognition-barrier-0042', ] def _build_argparser(): parser = ArgumentParser() - parser.add_argument("--default", action="store_true", help="Download default models") - parser.add_argument("--ocr", action="store_true", help="Download OCR models") - parser.add_argument("--all", action="store_true", help="Download all models") parser.add_argument("--precisions", help="Donwload models with specific precisions." "Comma separated value, with no extra spaces.", type=str, default="FP32") @@ -105,7 +68,7 @@ def _download_model(model, precisions, model_subdir, download_proc): exit_code = 0 for prec in precisions.split(","): prec = prec.strip() - model_url_dir = MODEL_BASE_URLS.get(model, f"{OMZ_BASE_URL}/{model}/{prec}") + model_url_dir = f"{OMZ_BASE_URL}/{model}/{prec}" dest_dir = os.path.join(MODEL_DIR, model_subdir, model, prec) _make_dir(dest_dir) try: @@ -153,12 +116,7 @@ def _copy_files(source, destination): def main(): args = _build_argparser().parse_args() - if args.all: - models = _ALL_MODELS - elif args.ocr: - models = _OCR_MODELS - else: - models = _DEFAULT_MODELS + models = _DEFAULT_MODELS precisions = args.precisions proc_files = args.model_proc @@ -174,7 +132,7 @@ def main(): ovms_config_len = len(ovms_config["model_config_list"]) for model in models: - model_subdir = "public" if model in _PUBLIC_MODELS else "intel" + model_subdir = "intel" if not os.path.isdir(os.path.join(MODEL_DIR, model_subdir, model)): # Download model _download_model(model, precisions, model_subdir, proc_files) diff --git a/tools/ppl_runner/README.md b/tools/ppl_runner/README.md index 63ae0b028..12ab7967c 100644 --- a/tools/ppl_runner/README.md +++ b/tools/ppl_runner/README.md @@ -16,7 +16,7 @@ Building Intel® SceneScape will perform all the above steps and additionally bu The commands below will perform all the above steps and additionally build all images (adjust environment variables if needed): ``` -make MODELS=all PRECISIONS=FP32 +make install-models PRECISIONS=FP32 make init-sample-data ```