Skip to content

Add --repeat to examples #111

Add --repeat to examples

Add --repeat to examples #111

Workflow file for this run

name: Benchmarks
on:
workflow_dispatch:
# TODO: Remove pull_request trigger after testing
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [main]
permissions:
id-token: write # Required for OIDC authentication to AWS
contents: read
env:
BENCHMARK_REPEAT_COUNT: 5
jobs:
benchmarks:
name: benchmarks-${{ matrix.name }}
runs-on: ${{ matrix.runner }}
strategy:
fail-fast: false
matrix:
include:
- runner: 'c7i-16xlarge'
name: 'x86-intel-64cpu-128gb'
- runner: 'c8g-16xlarge'
name: 'arm-graviton4-64cpu-128gb'
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
ref: ${{ github.head_ref || github.ref }}
- name: Set safe directory
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
- name: Install deps
run: |
sudo yum -y install gcc openssl-devel curl jq z3-devel clang pkg-config python3 python3-pip
pip3 install -r .github/scripts/requirements.txt
- name: Setup Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Cache Rust dependencies
uses: Swatinem/rust-cache@v2
with:
key: benchmarks-${{ matrix.name }}
cache-on-failure: true
- name: Run platform diagnostics
run: cargo test -p binius-utils --features platform-diagnostics test_platform_diagnostics -- --nocapture
env:
RUSTFLAGS: "-C target-cpu=native"
- name: Run benchmarks
run: |
python3 .github/scripts/run_benchmarks.py --repeat ${{ env.BENCHMARK_REPEAT_COUNT }}
env:
PERFETTO_PLATFORM_NAME: "${{ matrix.runner }}"
RUSTFLAGS: "-C target-cpu=native"
- name: List perfetto traces and summaries
if: always()
run: |
echo "=== Perfetto traces directory structure ==="
if [ -d perfetto_traces ]; then
find perfetto_traces -type f | sort
else
echo "DEBUG: No perfetto_traces directory found"
fi
echo ""
echo "=== Benchmark summaries ==="
if [ -d benchmark_summaries ]; then
find benchmark_summaries -type f | sort
else
echo "DEBUG: No benchmark_summaries directory found"
fi
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: ${{ secrets.AWS_UPLOAD_ROLE }}
aws-region: us-east-1
- name: Upload perfetto traces to S3
run: |
BRANCH_NAME="${{ github.head_ref || github.ref_name }}"
echo "DEBUG: Branch name: $BRANCH_NAME"
if [ "$BRANCH_NAME" = "main" ]; then
BRANCH_PATH="main"
else
# Sanitize branch name
# Lowercase, keep a-z 0-9 . _ -, replace others with dash, remove leading/trailing dashes
SANITIZED_BRANCH=$(echo "$BRANCH_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9._-]/-/g' | sed 's/^-*//g' | sed 's/-*$//g' | sed 's/--*/-/g')
BRANCH_PATH="branch-${SANITIZED_BRANCH}"
fi
echo "DEBUG: Branch path: $BRANCH_PATH"
if [ -d perfetto_traces ]; then
echo "DEBUG: Uploading perfetto traces to S3.."
aws s3 cp "perfetto_traces/" "${{ secrets.PERFETTO_BUCKET }}/traces/binius64/${BRANCH_PATH}/" --recursive
echo "DEBUG: Traces uploaded to: ${{ secrets.PERFETTO_BUCKET }}/traces/binius64/${BRANCH_PATH}/"
else
echo "DEBUG: No perfetto_traces directory to upload"
fi
echo "BRANCH_PATH=${BRANCH_PATH}" >> $GITHUB_ENV
- name: Generate benchmark report for GitHub Summary
if: always()
run: |
echo "DEBUG: Generating benchmark report..."
echo "DEBUG: BRANCH_PATH=${BRANCH_PATH}"
echo "DEBUG: Current directory contents:"
ls -la
echo "DEBUG: Running report generation script..."
python3 .github/scripts/generate_benchmark_report.py \
--branch-path "${BRANCH_PATH}" \
>> $GITHUB_STEP_SUMMARY
echo "DEBUG: Report generation completed, exit code: $?"
- name: Upload benchmark summaries as artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: benchmark-summaries-${{ matrix.name }}
path: |
benchmark_summaries/*.json
perfetto_traces/**/metrics.json
retention-days: 30
publish-benchmark-results:
name: Publish benchmark results to binius-benchmark repo
# TODO: Re-enable condition
# if: github.ref == 'refs/heads/main'
needs: [benchmarks]
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- name: Download all benchmark artifacts
uses: actions/download-artifact@v4
with:
path: ./artifacts
pattern: benchmark-summaries-*
merge-multiple: true
# GitHub App authentication
- name: Create GitHub App token
uses: actions/create-github-app-token@v2
id: app-token
with:
app-id: ${{ vars.BENCHMARK_PUBLISHER_APP_ID }}
private-key: ${{ secrets.BENCHMARK_PUBLISHER_PRIVATE_KEY }}
owner: IrreducibleOSS
repositories: binius-benchmark
- name: Get GitHub App User ID
id: get-user-id
run: echo "user-id=$(gh api "/users/${{ steps.app-token.outputs.app-slug }}[bot]" --jq .id)" >> "$GITHUB_OUTPUT"
env:
GH_TOKEN: ${{ steps.app-token.outputs.token }}
- name: Configure git credentials globally
run: |
git config --global user.name '${{ steps.app-token.outputs.app-slug }}[bot]'
git config --global user.email '${{ steps.get-user-id.outputs.user-id }}+${{ steps.app-token.outputs.app-slug }}[bot]@users.noreply.github.com'
- name: Publish benchmark results with retry
uses: nick-fields/retry@v3
with:
timeout_minutes: 10
max_attempts: 5
retry_wait_seconds: 30
command: |
set -e # Exit on any error to trigger retry
# Clone and setup
rm -rf /tmp/binius-benchmark
git clone "https://x-access-token:${GH_TOKEN}@github.com/IrreducibleOSS/binius-benchmark.git" /tmp/binius-benchmark
cd /tmp/binius-benchmark
git pull origin main
# Process all benchmark-results files
echo "Processing benchmark results..."
python3 "${{ env.BENCHMARK_PROCESSOR_SCRIPT }}" "$GITHUB_WORKSPACE"/artifacts/benchmark_summaries/benchmark-results-*.json
# Commit if changes exist
git add .
if ! git diff --staged --quiet; then
git commit -m "Update benchmark data - binius64@${{ github.sha }}"
git push origin main
echo "🎉 Successfully pushed changes"
else
echo "ℹ️ No changes to commit"
fi
env:
GH_TOKEN: ${{ steps.app-token.outputs.token }}
BENCHMARK_PROCESSOR_SCRIPT: scripts/update.py