Skip to content

Commit 12440ba

Browse files
derekhigginsleseb
authored andcommitted
Add integration tests to CI workflow
o Added new integration test step that runs llama-stack pytest tests o Modified smoke test workflow to include cleanup step o Created run_integration_tests.sh script to clone upstream repo and run tests o Moved docker cleanup from smoke.sh to workflow cleanup step Signed-off-by: Derek Higgins <derekh@redhat.com>
1 parent 9f1bd63 commit 12440ba

File tree

3 files changed

+96
-5
lines changed

3 files changed

+96
-5
lines changed

.github/workflows/redhat-distro-container.yml

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,9 @@ env:
2424
jobs:
2525
build-test-push:
2626
runs-on: ubuntu-latest
27+
env:
28+
INFERENCE_MODEL: meta-llama/Llama-3.2-1B-Instruct
29+
VLLM_URL: http://localhost:8000/v1
2730
strategy:
2831
matrix:
2932
platform: [linux/amd64] # TODO: enable other arch once all pip packages are available.
@@ -32,6 +35,12 @@ jobs:
3235
- name: Checkout repository
3336
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
3437

38+
- name: Install uv
39+
uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca # v6.0.1
40+
with:
41+
python-version: 3.12
42+
version: 0.7.6
43+
3544
- name: Set up QEMU
3645
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
3746

@@ -55,14 +64,22 @@ jobs:
5564
id: vllm
5665
uses: ./.github/actions/setup-vllm
5766

58-
- name: Smoke test image
67+
- name: Start and smoke test LLS distro image
5968
id: smoke-test
6069
shell: bash
61-
env:
62-
INFERENCE_MODEL: meta-llama/Llama-3.2-1B-Instruct
63-
VLLM_URL: http://localhost:8000/v1
6470
run: ./tests/smoke.sh
6571

72+
- name: Integration tests
73+
id: integration-tests
74+
shell: bash
75+
run: ./tests/run_integration_tests.sh
76+
77+
- name: cleanup
78+
if: always()
79+
shell: bash
80+
run: |
81+
docker rm -f vllm llama-stack
82+
6683
- name: Log in to Quay.io
6784
id: login
6885
if: github.event_name == 'push'

tests/run_integration_tests.sh

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
#!/usr/bin/env bash
2+
3+
set -euo pipefail
4+
5+
# Configuration
6+
LLAMA_STACK_REPO="https://github.com/meta-llama/llama-stack.git"
7+
WORK_DIR="/tmp/llama-stack-integration-tests"
8+
INFERENCE_MODEL="${INFERENCE_MODEL:-meta-llama/Llama-3.2-1B-Instruct}"
9+
10+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
11+
12+
# Get version dynamically from Containerfile.in (look in parent directory)
13+
CONTAINERFILE_IN="$SCRIPT_DIR/../distribution/Containerfile.in"
14+
LLAMA_STACK_VERSION=$(grep -o 'llama-stack==[0-9]\+\.[0-9]\+\.[0-9]\+' "$CONTAINERFILE_IN" | cut -d'=' -f3)
15+
if [ -z "$LLAMA_STACK_VERSION" ]; then
16+
echo "Error: Could not extract llama-stack version from Containerfile.in"
17+
exit 1
18+
fi
19+
20+
function clone_llama_stack() {
21+
# Clone the repository if it doesn't exist
22+
if [ ! -d "$WORK_DIR" ]; then
23+
git clone "$LLAMA_STACK_REPO" "$WORK_DIR"
24+
fi
25+
26+
# Checkout the specific tag
27+
cd "$WORK_DIR"
28+
# fetch origin incase we didn't clone a fresh repo
29+
git fetch origin
30+
if ! git checkout "v$LLAMA_STACK_VERSION"; then
31+
echo "Error: Could not checkout tag v$LLAMA_STACK_VERSION"
32+
echo "Available tags:"
33+
git tag | grep "^v" | tail -10
34+
exit 1
35+
fi
36+
}
37+
38+
function run_integration_tests() {
39+
echo "Running integration tests..."
40+
41+
cd "$WORK_DIR"
42+
43+
# Test to skip
44+
SKIP_TESTS="test_text_chat_completion_tool_calling_tools_not_in_request or test_inference_store_tool_calls"
45+
46+
# Dynamically determine the path to run.yaml from the original script directory
47+
STACK_CONFIG_PATH="$SCRIPT_DIR/../distribution/run.yaml"
48+
if [ ! -f "$STACK_CONFIG_PATH" ]; then
49+
echo "Error: Could not find stack config at $STACK_CONFIG_PATH"
50+
exit 1
51+
fi
52+
53+
uv run pytest -s -v tests/integration/inference/ \
54+
--stack-config=server:"$STACK_CONFIG_PATH" \
55+
--text-model=vllm-inference/"$INFERENCE_MODEL" \
56+
-k "not ($SKIP_TESTS)"
57+
}
58+
59+
function main() {
60+
echo "Starting llama-stack integration tests"
61+
echo "Configuration:"
62+
echo " LLAMA_STACK_VERSION: $LLAMA_STACK_VERSION"
63+
echo " LLAMA_STACK_REPO: $LLAMA_STACK_REPO"
64+
echo " WORK_DIR: $WORK_DIR"
65+
echo " INFERENCE_MODEL: $INFERENCE_MODEL"
66+
67+
clone_llama_stack
68+
run_integration_tests
69+
70+
echo "Integration tests completed successfully!"
71+
}
72+
73+
74+
main "$@"
75+
exit 0

tests/smoke.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,5 @@ main() {
6969
echo "===> Smoke test completed successfully!"
7070
}
7171

72-
trap 'docker rm -f -v llama-stack >/dev/null 2>&1 || true' EXIT
7372
main "$@"
7473
exit 0

0 commit comments

Comments
 (0)