Skip to content

ci: add automated performance benchmark workflow for pull requests #3

ci: add automated performance benchmark workflow for pull requests

ci: add automated performance benchmark workflow for pull requests #3

Workflow file for this run

name: Performance Comparison for Pull Requests
on:
pull_request:
branches: [master]
jobs:
benchmark-pr:
name: Run Benchmark
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- name: benchmark_adapter
target: benchmark_adapter.py
extra_args: ""
- name: benchmark_management_api
target: benchmark_management_api.py
extra_args: ""
- name: benchmark_model_large
target: benchmark_model.py
extra_args: "-k large"
- name: benchmark_model_others
target: benchmark_model.py
extra_args: "-k 'not large'"
- name: benchmark_role_manager
target: benchmark_role_manager.py
extra_args: ""
steps:
- name: Checkout PR branch
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt -r requirements_dev.txt
# Run benchmark on PR branch
- name: Run benchmark on PR branch
run: |
pytest -o "python_files=${{ matrix.target }}" ${{ matrix.extra_args }} tests/benchmarks --benchmark-json=pr-${{ matrix.name }}.json
# Checkout base branch and run benchmark
- name: Checkout base branch
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.base.sha }}
clean: false
path: base
- name: Run benchmark on base branch
working-directory: base
run: |
pip install -r requirements.txt -r requirements_dev.txt
pytest -o "python_files=${{ matrix.target }}" ${{ matrix.extra_args }} tests/benchmarks --benchmark-json=../base-${{ matrix.name }}.json
- name: Upload benchmark shard
uses: actions/upload-artifact@v4
with:
name: benchmark-shard-${{ matrix.name }}
path: |
pr-${{ matrix.name }}.json
base-${{ matrix.name }}.json
report:
name: Generate Report
needs: benchmark-pr
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.12'
- name: Download all benchmark shards
uses: actions/download-artifact@v4
with:
pattern: benchmark-shard-*
merge-multiple: true
path: benchmark_data
- name: Merge Benchmark Results
run: |
python .github/scripts/merge_benchmarks.py base.json benchmark_data/base-*.json
python .github/scripts/merge_benchmarks.py pr.json benchmark_data/pr-*.json
# Save commit SHAs for display
- name: Save commit info
id: commits
run: |
BASE_SHA="${{ github.event.pull_request.base.sha }}"
HEAD_SHA="${{ github.event.pull_request.head.sha }}"
echo "base_short=${BASE_SHA:0:7}" >> $GITHUB_OUTPUT
echo "head_short=${HEAD_SHA:0:7}" >> $GITHUB_OUTPUT
# Compare benchmarks using script
- name: Compare benchmarks
id: benchstat
run: |
cat > comparison.md << 'EOF'
## Benchmark Comparison
Comparing base branch (`${{ steps.commits.outputs.base_short }}`)
vs PR branch (`${{ steps.commits.outputs.head_short }}`)
```
EOF
python3 .github/scripts/pytest_benchstat.py base.json pr.json >> comparison.md || true
echo '```' >> comparison.md
# Post-process to append percentage + emoji column (🚀 faster < -10%, 🐌 slower > +10%, otherwise ➡️)
if [ ! -f comparison.md ]; then
echo "comparison.md not found after benchstat." >&2
exit 1
fi
python3 .github/scripts/benchmark_formatter.py
# Save PR number
- name: Save PR number
run: |
PR_NUMBER="${{ github.event.pull_request.number }}"
if [ -z "$PR_NUMBER" ]; then
echo "Error: Pull request number is not available in event payload." >&2
exit 1
fi
echo "$PR_NUMBER" > pr_number.txt
# Upload benchmark results
- name: Upload benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: |
comparison.md
pr_number.txt