-
Notifications
You must be signed in to change notification settings - Fork 185
Expand file tree
/
Copy pathgeti_sample.sh
More file actions
executable file
·131 lines (118 loc) · 5.38 KB
/
geti_sample.sh
File metadata and controls
executable file
·131 lines (118 loc) · 5.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#!/bin/bash
# ==============================================================================
# Copyright (C) 2021-2025 Intel Corporation
#
# SPDX-License-Identifier: MIT
# ==============================================================================
# This sample refers to a video file by Rihard-Clement-Ciprian Diac via Pexels
# (https://www.pexels.com)
# ==============================================================================
set -e
if [ -z "${MODELS_PATH:-}" ]; then
echo "Error: MODELS_PATH is not set." >&2
exit 1
else
echo "MODELS_PATH: $MODELS_PATH"
fi
# Default values for parameters
# MODEL_TYPE can be rotated-detection, instance-segmentation, detection, geti-detection, classification, geti-obb, geti-segmentation, geti-classification-single, geti-classification-multi,anomaly-detection
MODEL_TYPE=${1:-detection}
MODEL_PATH=${2:-/home/path/to/your/model.xml}
# Supported values: CPU, GPU, NPU
DEVICE=${3:-CPU}
# PREPROC_BACKEND can be ie/opencv for CPU or va/va-surface-sharing GPU or va for NPU.
PREPROC_BACKEND=${4:-"ie"}
# INPUT can be a file path, a URL, or a video device (e.g., /dev/video0)
INPUT=${5:-https://videos.pexels.com/video-files/1192116/1192116-sd_640_360_30fps.mp4}
# OUTPUT can be file, display, fps, json, display-and-json
OUTPUT=${6:-file}
if [[ ! $MODEL_TYPE =~ ^(rotated-detection|instance-segmentation|detection|geti-detection|classification|geti-classification-single|geti-classification-multi|geti-obb|geti-segmentation|anomaly-detection)$ ]]; then
echo "Error: Invalid MODEL_TYPE. Supported values: rotated-detection, instance-segmentation, detection, classification, anomaly-detection." >&2
exit 1
fi
if [[ ! $DEVICE =~ ^(CPU|GPU|NPU)$ ]]; then
echo "Error: Invalid DEVICE. Supported values: CPU, GPU, NPU." >&2
exit 1
fi
if [[ ! $PREPROC_BACKEND =~ ^(ie|opencv|va|va-surface-sharing)?$ ]]; then
echo "Error: Invalid PREPROC_BACKEND. Supported values: ie/opencv for CPU or va/va-surface-sharing GPU or va for NPU" >&2
exit 1
fi
if [[ ! $OUTPUT =~ ^(file|display|fps|json|display-and-json)?$ ]]; then
echo "Error: Invalid OUTPUT. Supported values: file, display, fps, json, display-and-json." >&2
exit 1
fi
FULL_MODEL_PATH="${MODELS_PATH}/${MODEL_PATH}"
echo "FULL_MODEL_PATH: $FULL_MODEL_PATH"
# check if model exists in local directory
if [ ! -f $FULL_MODEL_PATH ]; then
echo "Model not found: ${FULL_MODEL_PATH}"
exit
fi
if [[ $INPUT == "/dev/video"* ]]; then
SOURCE_ELEMENT="v4l2src device=${INPUT}"
elif [[ $INPUT == *"://"* ]]; then
SOURCE_ELEMENT="urisourcebin buffer-size=4096 uri=${INPUT}"
else
SOURCE_ELEMENT="filesrc location=${INPUT}"
fi
DECODE_ELEMENT="! decodebin3 !"
if [[ $DEVICE == "GPU" ]] || [[ $DEVICE == "NPU" ]]; then
DECODE_ELEMENT="! decodebin3 ! vapostproc ! video/x-raw(memory:VAMemory) !"
fi
# Validate and set PREPROC_BACKEND based on DEVICE
if [[ "$PREPROC_BACKEND" == "" ]]; then
PREPROC_BACKEND="ie" # Default value for CPU
if [[ "$DEVICE" == "GPU" ]]; then
PREPROC_BACKEND="va-surface-sharing" # Default value for GPU
fi
if [[ "$DEVICE" == "NPU" ]]; then
PREPROC_BACKEND="va" # Default value for NPU
fi
else
if [[ "$PREPROC_BACKEND" == "ie" ]] || [[ "$PREPROC_BACKEND" == "opencv" ]] || [[ "$PREPROC_BACKEND" == "va" ]] || [[ "$PREPROC_BACKEND" == "va-surface-sharing" ]]; then
PREPROC_BACKEND=${PREPROC_BACKEND}
else
echo "Error wrong value for PREPROC_BACKEND parameter. Supported values: ie/opencv for CPU | va/va-surface-sharing/opencv for GPU/NPU".
fi
fi
INFERENCE_ELEMENT="gvadetect"
if [[ $MODEL_TYPE =~ "classification" ]] || [[ $MODEL_TYPE =~ "anomaly-detection" ]]; then
INFERENCE_ELEMENT="gvaclassify inference-region=full-frame"
fi
WT_OBB_ELEMENT=" "
if [[ $MODEL_TYPE =~ "rotated-detection" ]]; then
WT_OBB_ELEMENT=" obb=true "
fi
if [[ $OUTPUT == "file" ]]; then
FILE="$(basename ${INPUT%.*})"
rm -f "geti_${FILE}_${MODEL_TYPE}_${DEVICE}.mp4"
if [[ $(gst-inspect-1.0 va | grep vah264enc) ]]; then
ENCODER="vah264enc"
elif [[ $(gst-inspect-1.0 va | grep vah264lpenc) ]]; then
ENCODER="vah264lpenc"
else
echo "Error - VA-API H.264 encoder not found."
exit
fi
SINK_ELEMENT="gvawatermark${WT_OBB_ELEMENT} ! gvafpscounter ! ${ENCODER} ! h264parse ! mp4mux ! filesink location=geti_${FILE}_${MODEL_TYPE}_${DEVICE}.mp4"
elif [[ $OUTPUT == "display" ]] || [[ -z $OUTPUT ]]; then
SINK_ELEMENT="gvawatermark${WT_OBB_ELEMENT} ! videoconvertscale ! gvafpscounter ! autovideosink sync=false"
elif [[ $OUTPUT == "fps" ]]; then
SINK_ELEMENT="gvafpscounter ! fakesink async=false"
elif [[ $OUTPUT == "json" ]]; then
rm -f output.json
SINK_ELEMENT="gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! fakesink async=false"
elif [[ $OUTPUT == "display-and-json" ]]; then
rm -f output.json
SINK_ELEMENT="gvawatermark${WT_OBB_ELEMENT}! gvametaconvert add-tensor-data=true ! gvametapublish file-format=json-lines file-path=output.json ! videoconvert ! gvafpscounter ! autovideosink sync=false"
else
echo Error wrong value for SINK_ELEMENT parameter
echo Valid values: "file" - render to file, "display" - render to screen, "fps" - print FPS, "json" - write to output.json, "display-and-json" - render to screen and write to output.json
exit
fi
PIPELINE="gst-launch-1.0 $SOURCE_ELEMENT $DECODE_ELEMENT \
$INFERENCE_ELEMENT model=$FULL_MODEL_PATH device=$DEVICE pre-process-backend=$PREPROC_BACKEND ! queue ! \
$SINK_ELEMENT"
echo "${PIPELINE}"
$PIPELINE