forked from open-edge-platform/dlstreamer
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsample_gvagenai.sh
More file actions
executable file
·187 lines (170 loc) · 5.53 KB
/
sample_gvagenai.sh
File metadata and controls
executable file
·187 lines (170 loc) · 5.53 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
#!/bin/bash
# ==============================================================================
# Copyright (C) 2025 Intel Corporation
#
# SPDX-License-Identifier: MIT
# ==============================================================================
set -euo pipefail
# Default values
DEFAULT_SOURCE="https://videos.pexels.com/video-files/1192116/1192116-sd_640_360_30fps.mp4"
DEFAULT_DEVICE="CPU"
DEFAULT_PROMPT="Describe what you see in this video."
DEFAULT_FRAME_RATE="1"
DEFAULT_CHUNK_SIZE="10"
DEFAULT_MAX_NEW_TOKENS="100"
DEFAULT_METRICS="false"
# Function to display usage information
show_usage() {
echo "Usage: $0 [OPTIONS]"
echo ""
echo "Video Summarization with MiniCPM-V, Phi-4-multimodal-instruct or Gemma 3 model using gvagenai element"
echo ""
echo "Options:"
echo " -s, --source FILE/URL/CAMERA Input source (file path, URL or web camera)"
echo " -d, --device DEVICE Inference device (CPU, GPU, NPU)"
echo " -p, --prompt TEXT Text prompt for the model"
echo " -r, --frame-rate RATE Frame sampling rate (fps)"
echo " -c, --chunk-size NUM Chunk size, or frames per inference call"
echo " -t, --max-tokens NUM Maximum new tokens to generate"
echo " -m, --metrics Include performance metrics in JSON output"
echo " -h, --help Show this help message"
echo ""
echo "Examples:"
echo " $0 --source video.mp4 --device GPU"
echo " $0 --chunk-size 1 --frame-rate 10"
echo " $0 --prompt \"Describe what do you see in this video?\""
echo " $0 --metrics --max-tokens 200"
echo ""
}
# Check if GENAI_MODEL_PATH is set
if [ -z "${GENAI_MODEL_PATH:-}" ]; then
echo "ERROR - GENAI_MODEL_PATH environment variable is not set." >&2
echo "Please set it to the path where your MiniCPM-V, Phi-4-multimodal-instruct or Gemma 3 model is located." >&2
echo "Examples: export GENAI_MODEL_PATH=/path/to/minicpm-v-model" >&2
echo " export GENAI_MODEL_PATH=/path/to/Phi-4-multimodal" >&2
echo " export GENAI_MODEL_PATH=/path/to/gemma-3-model" >&2
exit 1
fi
# Initialize variables with defaults
INPUT="$DEFAULT_SOURCE"
DEVICE="$DEFAULT_DEVICE"
PROMPT="$DEFAULT_PROMPT"
FRAME_RATE="$DEFAULT_FRAME_RATE"
CHUNK_SIZE="$DEFAULT_CHUNK_SIZE"
MAX_NEW_TOKENS="$DEFAULT_MAX_NEW_TOKENS"
METRICS="$DEFAULT_METRICS"
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-s|--source)
INPUT="$2"
shift 2
;;
-d|--device)
DEVICE="$2"
shift 2
;;
-p|--prompt)
PROMPT="$2"
shift 2
;;
-r|--frame-rate)
FRAME_RATE="$2"
shift 2
;;
-c|--chunk-size)
CHUNK_SIZE="$2"
shift 2
;;
-t|--max-tokens)
MAX_NEW_TOKENS="$2"
shift 2
;;
-m|--metrics)
METRICS="true"
shift
;;
-h|--help)
show_usage
exit 0
;;
*)
# Support legacy positional arguments for backwards compatibility
if [ $# -ge 1 ] && [ -z "${INPUT_SET:-}" ]; then
INPUT="$1"
INPUT_SET=1
elif [ $# -ge 1 ] && [ -z "${DEVICE_SET:-}" ]; then
DEVICE="$1"
DEVICE_SET=1
elif [ $# -ge 1 ] && [ -z "${PROMPT_SET:-}" ]; then
PROMPT="$1"
PROMPT_SET=1
else
echo "Unknown option: $1" >&2
show_usage
exit 1
fi
shift
;;
esac
done
# Validate arguments
if [[ "$DEVICE" != "CPU" && "$DEVICE" != "GPU" && "$DEVICE" != "NPU" ]]; then
echo "ERROR - Invalid device: $DEVICE. Use CPU, GPU, or NPU." >&2
exit 1
fi
# Enable debug logging if metrics is enabled
if [[ "$METRICS" == "true" ]]; then
export GST_DEBUG=4
fi
# Print configuration
echo "=== sample gvagenai configuration ==="
echo "Model Path: $GENAI_MODEL_PATH"
echo "Source: $INPUT"
echo "Device: $DEVICE"
echo "Prompt: $PROMPT"
echo "Frame Rate: $FRAME_RATE fps"
echo "Chunk Size: $CHUNK_SIZE"
echo "Max New Tokens: $MAX_NEW_TOKENS"
echo "Metrics: $METRICS"
echo "==========================================="
# Check if model exists
if [ ! -d "$GENAI_MODEL_PATH" ]; then
echo "ERROR - Model directory not found: $GENAI_MODEL_PATH" >&2
exit 1
fi
# Determine the source element based on the input
if [[ $INPUT == "/dev/video"* ]]; then
SOURCE_ELEMENT="v4l2src device=${INPUT}"
elif [[ "$INPUT" == *"://"* ]]; then
SOURCE_ELEMENT="urisourcebin buffer-size=4096 uri=${INPUT}"
else
SOURCE_ELEMENT="filesrc location=${INPUT}"
fi
# Generation configuration
GENERATION_CONFIG="max_new_tokens=${MAX_NEW_TOKENS}"
# Create the pipeline
OUTPUT_FILE="genai_output.json"
PIPELINE="gst-launch-1.0 \
$SOURCE_ELEMENT ! \
decodebin3 ! \
videoconvert ! \
video/x-raw,format=RGB ! \
gvagenai \
device=$DEVICE \
model-path=\"$GENAI_MODEL_PATH\" \
prompt=\"$PROMPT\" \
generation-config=\"$GENERATION_CONFIG\" \
frame-rate=$FRAME_RATE \
chunk-size=$CHUNK_SIZE \
metrics=$METRICS ! \
gvametapublish file-path=$OUTPUT_FILE ! \
fakesink async=false"
echo ""
echo "Running gvagenai inference pipeline..."
echo "Pipeline: $PIPELINE"
echo ""
eval "$PIPELINE"
echo ""
echo "Pipeline execution completed."
echo "Results saved to: $OUTPUT_FILE"