Skip to content

[ci] Update config to binius64 (#926) #99

[ci] Update config to binius64 (#926)

[ci] Update config to binius64 (#926) #99

Workflow file for this run

name: Benchmarks
on:
workflow_dispatch:
# TODO: Enable pull_request trigger for testing
# pull_request:
# types: [opened, synchronize, reopened]
push:
branches: [main]
permissions:
id-token: write # Required for OIDC authentication to AWS
contents: read
jobs:
setup:
name: Setup benchmark configuration
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
timestamp: ${{ steps.set-timestamp.outputs.timestamp }}
runs: ${{ steps.set-runs.outputs.runs }}
steps:
- name: Generate timestamp
id: set-timestamp
run: |
TIMESTAMP=$(date -u +%Y%m%d-%H%M%S)
echo "timestamp=$TIMESTAMP" >> $GITHUB_OUTPUT
echo "Generated timestamp: $TIMESTAMP"
- name: Set number of benchmark runs
id: set-runs
run: |
if [ "${{ github.event_name }}" = "pull_request" ]; then
echo "runs=1" >> $GITHUB_OUTPUT
echo "PR detected: Running benchmarks 1 time"
else
echo "runs=5" >> $GITHUB_OUTPUT
echo "Main/manual run: Running benchmarks 5 times"
fi
- name: Generate matrix configuration
id: set-matrix
uses: actions/github-script@v7
with:
script: |
const prMachines = [
{runner: 'c7i-16xlarge', name: 'x86-intel-64cpu-128gb'},
{runner: 'c8g-16xlarge', name: 'arm-graviton4-64cpu-128gb'}
];
const additionalMachines = [
// Additional high-memory instances disabled for cost optimization
// Uncomment these lines to enable more instance types in the future
// {runner: 'm7i-16xlarge', name: 'x86-intel-64cpu-256gb'},
// {runner: 'r7i-16xlarge', name: 'x86-intel-64cpu-512gb'},
// {runner: 'm8g-16xlarge', name: 'arm-graviton4-64cpu-256gb'},
// {runner: 'r8g-16xlarge', name: 'arm-graviton4-64cpu-512gb'}
];
const isPR = context.eventName === 'pull_request';
const selected = isPR ? prMachines : [...prMachines, ...additionalMachines];
console.log(`Event type: ${context.eventName}`);
console.log(`Is PR: ${isPR}`);
console.log(`Selected ${selected.length} machines:`);
selected.forEach(m => console.log(` - ${m.name} (${m.runner})`));
core.setOutput('matrix', JSON.stringify({instance: selected}));
- name: Display configuration summary
run: |
echo "=== Benchmark Configuration ==="
echo "Timestamp: ${{ steps.set-timestamp.outputs.timestamp }}"
echo "Event: ${{ github.event_name }}"
echo "Branch: ${{ github.head_ref || github.ref_name }}"
echo "Commit: ${{ github.sha }}"
echo "Runs per benchmark: ${{ steps.set-runs.outputs.runs }}"
echo ""
echo "Matrix configuration:"
echo '${{ steps.set-matrix.outputs.matrix }}' | jq .
benchmarks:
name: benchmarks-${{ matrix.instance.name }}
needs: setup
runs-on: ${{ matrix.instance.runner }}
strategy:
fail-fast: false
matrix: ${{ fromJson(needs.setup.outputs.matrix) }}
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
ref: ${{ github.head_ref || github.ref }}
- name: Set safe directory
run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
- name: Install deps
run: sudo yum -y install gcc openssl-devel curl jq z3-devel clang pkg-config python3
- name: Setup Rust
uses: actions-rust-lang/setup-rust-toolchain@v1
- name: Cache Rust dependencies
uses: Swatinem/rust-cache@v2
with:
key: benchmarks-${{ matrix.instance.name }}
cache-on-failure: true
- name: Run platform diagnostics
run: cargo test -p binius-utils --features platform-diagnostics test_platform_diagnostics -- --nocapture
env:
RUSTFLAGS: "-C target-cpu=native"
- name: Run benchmarks
run: |
RUN_ID="${{ needs.setup.outputs.timestamp }}-$(git rev-parse --short HEAD)"
python3 .github/scripts/run_benchmarks.py \
--runs ${{ needs.setup.outputs.runs }} \
--run-id "$RUN_ID" \
--generate-stats
env:
PERFETTO_PLATFORM_NAME: "${{ matrix.instance.runner }}"
- name: List perfetto traces
if: always()
run: |
echo "=== Trace directory structure ==="
find perfetto_traces -type d | sort
echo ""
echo "=== Summary ==="
echo "Total files: $(find perfetto_traces -name "*.perfetto-trace" | wc -l)"
echo "Total size: $(du -sh perfetto_traces | cut -f1)"
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: ${{ secrets.AWS_UPLOAD_ROLE }}
aws-region: us-east-1
- name: Upload perfetto traces to S3
run: |
BRANCH_NAME="${{ github.head_ref || github.ref_name }}"
if [ "$BRANCH_NAME" = "main" ]; then
BRANCH_PATH="main"
else
BRANCH_PATH="branch-${BRANCH_NAME//\//-}"
fi
aws s3 cp "perfetto_traces/" "${{ secrets.PERFETTO_BUCKET }}/traces/binius64/${BRANCH_PATH}/" --recursive
echo "BRANCH_PATH=${BRANCH_PATH}" >> $GITHUB_ENV
- name: Generate Perfetto UI URLs
run: |
python3 - <<'EOF' >> $GITHUB_STEP_SUMMARY
import os
import urllib.parse
from pathlib import Path
perfetto_host = "https://perfetto.irreducible.com"
branch_path = os.environ["BRANCH_PATH"]
run_id = os.environ.get("RUN_ID", "")
runs = int("${{ needs.setup.outputs.runs }}")
print(f"## 📊 Perfetto Traces for ${{ matrix.instance.name }}")
print()
traces_dir = Path("perfetto_traces")
for bench_dir in sorted(traces_dir.glob("*/*")):
if not bench_dir.is_dir():
continue
benchmark = bench_dir.parent.name
print(f"### {benchmark}")
for mode in ["multi-fusion", "multi", "single-fusion", "single"]:
traces = sorted(bench_dir.glob(f"{mode}-run*.perfetto-trace"))
if traces:
links = []
for run in range(1, runs + 1):
trace = next((t for t in traces if f"run{run}-" in t.name), None)
if trace:
s3_key = f"traces/binius64/{branch_path}/{benchmark}/{bench_dir.name}/{trace.name}"
trace_url = f"{perfetto_host}/{s3_key}"
encoded = urllib.parse.quote_plus(trace_url)
ui_url = f"{perfetto_host}/#!/?url={encoded}"
links.append(f"[#{run}]({ui_url})")
if links:
print(f"- **{mode}**: {' '.join(links)}")
print()
EOF
env:
RUN_ID: "${{ needs.setup.outputs.timestamp }}-$(git rev-parse --short HEAD)"
- name: Extract benchmark metrics
run: |
python3 - <<'EOF' | tee -a $GITHUB_STEP_SUMMARY
import json
import re
import subprocess
import statistics
from pathlib import Path
def parse_time(s):
m = re.match(r'([\d.]+)\s*(s|ms)', s)
if m:
val = float(m.group(1))
return val * 1000 if m.group(2) == 's' else val
return None
metrics = {
"timestamp": "${{ needs.setup.outputs.timestamp }}",
"commit": subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).decode().strip(),
"branch": "${{ github.head_ref || github.ref_name }}",
"machine": "${{ matrix.instance.name }}",
"benchmarks": {}
}
proving_pattern = r'Proving \[ ([\d.]+(?:s|ms))'
verifying_pattern = r'Verifying \[ ([\d.]+(?:s|ms))'
for log_file in Path("benchmark_logs").glob("*.log"):
parts = log_file.stem.split("_", 1)
if len(parts) == 2:
bench, mode = parts
if bench not in metrics["benchmarks"]:
metrics["benchmarks"][bench] = {}
with open(log_file) as f:
content = re.sub(r'\x1b\[[0-9;]*m', '', f.read())
proving = [parse_time(m) for m in re.findall(proving_pattern, content)]
proving = [t for t in proving if t]
verifying = [parse_time(m) for m in re.findall(verifying_pattern, content)]
verifying = [t for t in verifying if t]
metrics["benchmarks"][bench][mode] = {
"proving_avg_ms": round(statistics.mean(proving), 2) if proving else None,
"proving_std_ms": round(statistics.stdev(proving), 2) if len(proving) > 1 else None,
"verifying_avg_ms": round(statistics.mean(verifying), 2) if verifying else None,
"verifying_std_ms": round(statistics.stdev(verifying), 2) if len(verifying) > 1 else None
}
with open("benchmark_metrics.json", "w") as f:
json.dump(metrics, f, indent=2)
print("## 📊 Benchmark Performance Metrics")
print("```json")
print(json.dumps(metrics, indent=2))
print("```")
EOF
- name: Add circuit stats to GitHub summary
if: always()
run: |
if [ -f circuit_stats.md ]; then
echo "## 📈 Circuit Statistics for ${{ matrix.instance.name }}" >> $GITHUB_STEP_SUMMARY
cat circuit_stats.md >> $GITHUB_STEP_SUMMARY
fi
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: perfetto-traces-${{ matrix.instance.name }}
path: |
perfetto_traces/**/*.perfetto-trace
circuit_stats.md
benchmark_results.json
benchmark_metrics.json
retention-days: 30