Skip to content

Run deploy and benchmark on K8S #93

Run deploy and benchmark on K8S

Run deploy and benchmark on K8S #93

# Copyright (C) 2025 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
name: Run deploy and benchmark on K8S
on:
workflow_dispatch:
inputs:
runner:
default: "aise-perf"
description: "Runner label 'aise-perf'"
required: true
type: string
clean_up:
default: true
description: "Clean up the example deployment after running the workflow"
required: false
type: boolean
example_branch:
default: "main"
description: "GenAIExamples branch to trigger test scripts and config yaml"
required: true
type: string
eval_branch:
default: "main"
description: "GenAIEval branch build from source for test, empty means using latest release version"
required: true
type: string
example:
default: "ChatQnA"
description: "The example to deploy and benchmark, [ChatQnA/DocSum]"
required: true
type: string
test_mode:
default: "oob"
description: "The mode of the test, e.g., [oob|tune]"
required: true
type: string
target_node:
default: "1"
description: "The target node number for the test, if empty, use the default value in config yaml"
required: false
type: string
deploy_args:
default: "version=0-latest#namespace=default#with_rerank=False#engine=vllm#max_batch_size=[8]#max_num_seqs=[8]"
description: "The arguments for deploy, use # to separate the arguments"
required: false
type: string
benchmark_args:
default: "bench_target=[chatqna_qlist_pubmed]#dataset=[\"/home/sdp/opea_benchmark/pubmed_10.txt\"]#prompt=[10]#max_token_size=[128]#user_queries=[512]#concurrency=[128]"
description: "The arguments for benchmark, use # to separate the arguments"
required: false
type: string
jobs:
run-deploy-and-benchmark:
runs-on: "${{ inputs.runner }}"
env:
conda_env_name: "OPEA_perf"
steps:
- name: Show All Inputs
run: |
echo "Inputs:"
echo '${{ toJson(inputs) }}'
- name: Clean Up Working Directory
run: |
sudo rm -rf ${{github.workspace}}/*
export PATH=${HOME}/miniforge3/bin/:$PATH
if conda info --envs | grep -q "$conda_env_name"; then
echo "$conda_env_name exist!"
else
conda create -n ${conda_env_name} python=3.12 -y
fi
- name: Checkout out Validation
uses: actions/checkout@v4
with:
path: Validation
- name: Checkout out GenAIExamples
uses: actions/checkout@v4
with:
repository: opea-project/GenAIExamples
ref: ${{ inputs.example_branch }}
path: GenAIExamples
- name: Checkout out GenAIEval
if: ${{ inputs.eval_branch != '' }}
uses: actions/checkout@v4
with:
repository: opea-project/GenAIEval
ref: ${{ inputs.eval_branch }}
path: GenAIEval
- name: Set up running config yaml
working-directory: ./Validation
env:
HF_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }}
MODEL_PATH: "opea_benchmark/model"
run: |
set -e
export PATH=${HOME}/miniforge3/bin/:$PATH
source activate ${conda_env_name}
sed -i /^opea-eval/d ../GenAIExamples/requirements.txt
pip install -r ../GenAIExamples/requirements.txt
pip list
bash .github/scripts/run_deploy_benchmark.sh --generate_config ${{ inputs.example }} ${{ inputs.deploy_args }} ${{ inputs.benchmark_args }}
- name: Run deploy and benchmark
working-directory: ./Validation
run: |
set -e
export PATH=${HOME}/miniforge3/bin/:$PATH
source activate ${conda_env_name}
# Set GenAIEval source code path if the branch is not empty
if [ -n "${{ inputs.eval_branch }}" ]; then
export EVAL_PATH="${{ github.workspace }}/GenAIEval"
export PYTHONPATH="${EVAL_PATH}"
echo "EVAL_PATH=${EVAL_PATH}"
fi
bash .github/scripts/run_deploy_benchmark.sh --run ${{ inputs.example }} ${{ inputs.test_mode }} ${{ inputs.target_node }} ${{ inputs.clean_up }}
- name: Generate report
working-directory: ./Validation
run: |
set -e
export PATH=${HOME}/miniforge3/bin/:$PATH
source activate ${conda_env_name}
bash .github/scripts/run_deploy_benchmark.sh --generate_report ${{ github.workspace }}/GenAIExamples/benchmark_output
- name: Publish pipeline artifact
if: ${{ !cancelled() }}
uses: actions/upload-artifact@v4
with:
path: ${{ github.workspace }}/GenAIExamples/benchmark_output
name: benchmark_output
overwrite: true