@@ -10,29 +10,42 @@ source "$SCRIPT_DIR/test_utils.sh"
1010LLAMA_STACK_BASE_URL=" http://127.0.0.1:8321"
1111
1212function start_and_wait_for_llama_stack_container {
13+ # Build docker run command with base arguments
14+ docker_args=(
15+ -d
16+ --pull=never
17+ --net=host
18+ -p 8321:8321
19+ --env " INFERENCE_MODEL=$VLLM_INFERENCE_MODEL "
20+ --env " EMBEDDING_MODEL=$EMBEDDING_MODEL "
21+ --env " VLLM_URL=$VLLM_URL "
22+ --env " ENABLE_SENTENCE_TRANSFORMERS=True"
23+ --env " EMBEDDING_PROVIDER=sentence-transformers"
24+ --env " TRUSTYAI_LMEVAL_USE_K8S=False"
25+ --env " POSTGRES_HOST=${POSTGRES_HOST:- localhost} "
26+ --env " POSTGRES_PORT=${POSTGRES_PORT:- 5432} "
27+ --env " POSTGRES_DB=${POSTGRES_DB:- llamastack} "
28+ --env " POSTGRES_USER=${POSTGRES_USER:- llamastack} "
29+ --env " POSTGRES_PASSWORD=${POSTGRES_PASSWORD:- llamastack} "
30+ )
31+
32+ # Only add Vertex AI configuration if VERTEX_AI_PROJECT is set
33+ if [ -n " ${VERTEX_AI_PROJECT:- } " ]; then
34+ docker_args+=(
35+ --env " VERTEX_AI_PROJECT=$VERTEX_AI_PROJECT "
36+ --env " VERTEX_AI_LOCATION=$VERTEX_AI_LOCATION "
37+ --env " GOOGLE_APPLICATION_CREDENTIALS=/run/secrets/gcp-credentials"
38+ )
39+ # Only mount credentials if the file exists
40+ if [ -n " ${GOOGLE_APPLICATION_CREDENTIALS:- } " ] && [ -f " $GOOGLE_APPLICATION_CREDENTIALS " ]; then
41+ docker_args+=(--volume " $GOOGLE_APPLICATION_CREDENTIALS :/run/secrets/gcp-credentials:ro" )
42+ fi
43+ fi
44+
45+ docker_args+=(--name llama-stack " $IMAGE_NAME :$GITHUB_SHA " )
46+
1347 # Start llama stack
14- docker run \
15- -d \
16- --pull=never \
17- --net=host \
18- -p 8321:8321 \
19- --env INFERENCE_MODEL=" $VLLM_INFERENCE_MODEL " \
20- --env EMBEDDING_MODEL=" $EMBEDDING_MODEL " \
21- --env VLLM_URL=" $VLLM_URL " \
22- --env ENABLE_SENTENCE_TRANSFORMERS=True \
23- --env EMBEDDING_PROVIDER=sentence-transformers \
24- --env TRUSTYAI_LMEVAL_USE_K8S=False \
25- --env VERTEX_AI_PROJECT=" $VERTEX_AI_PROJECT " \
26- --env VERTEX_AI_LOCATION=" $VERTEX_AI_LOCATION " \
27- --env GOOGLE_APPLICATION_CREDENTIALS=" /run/secrets/gcp-credentials" \
28- --env POSTGRES_HOST=" ${POSTGRES_HOST:- localhost} " \
29- --env POSTGRES_PORT=" ${POSTGRES_PORT:- 5432} " \
30- --env POSTGRES_DB=" ${POSTGRES_DB:- llamastack} " \
31- --env POSTGRES_USER=" ${POSTGRES_USER:- llamastack} " \
32- --env POSTGRES_PASSWORD=" ${POSTGRES_PASSWORD:- llamastack} " \
33- --volume " $GOOGLE_APPLICATION_CREDENTIALS :/run/secrets/gcp-credentials:ro" \
34- --name llama-stack \
35- " $IMAGE_NAME :$GITHUB_SHA "
48+ docker run " ${docker_args[@]} "
3649 echo " Started Llama Stack container..."
3750
3851 # Wait for llama stack to be ready by doing a health check
@@ -153,15 +166,28 @@ main() {
153166 # Track failures
154167 failed_checks=()
155168
169+ # Build list of models to test based on available configuration
170+ models_to_test=(" $VLLM_INFERENCE_MODEL " " $EMBEDDING_MODEL " )
171+ inference_models_to_test=(" $VLLM_INFERENCE_MODEL " )
172+
173+ # Only include Vertex AI models if VERTEX_AI_PROJECT is set
174+ if [ -n " ${VERTEX_AI_PROJECT:- } " ]; then
175+ echo " ===> VERTEX_AI_PROJECT is set, including Vertex AI models in tests"
176+ models_to_test+=(" $VERTEX_AI_INFERENCE_MODEL " )
177+ inference_models_to_test+=(" $VERTEX_AI_INFERENCE_MODEL " )
178+ else
179+ echo " ===> VERTEX_AI_PROJECT is not set, skipping Vertex AI models"
180+ fi
181+
156182 echo " ===> Testing model list for all models..."
157- for model in " $VLLM_INFERENCE_MODEL " " $VERTEX_AI_INFERENCE_MODEL " " $EMBEDDING_MODEL " ; do
183+ for model in " ${models_to_test[@]} " ; do
158184 if ! test_model_list " $model " ; then
159185 failed_checks+=(" model_list:$model " )
160186 fi
161187 done
162188
163189 echo " ===> Testing inference for all models..."
164- for model in " $VLLM_INFERENCE_MODEL " " $VERTEX_AI_INFERENCE_MODEL " ; do
190+ for model in " ${inference_models_to_test[@]} " ; do
165191 if ! test_model_openai_inference " $model " ; then
166192 failed_checks+=(" inference:$model " )
167193 fi
0 commit comments