Skip to content

fix: debug benchmarks #21

fix: debug benchmarks

fix: debug benchmarks #21

Workflow file for this run

name: Benchmarks
on:
pull_request:
paths-ignore:
- README.md
- CONTRIBUTING.md
- CITATION.cff
- LICENSE
- .readthedocs.yml
- docs-img/**
- docs/**
- awkward-cpp/docs/**
- studies/**
workflow_dispatch:
concurrency:
group: "benchmark-${{ github.head_ref || github.run_id }}"
cancel-in-progress: true
jobs:
run-benchmarks:
name: Run Benchmarks
runs-on: self-hosted
env:
PIP_ONLY_BINARY: numpy
# Required for miniconda to activate conda
defaults:
run:
shell: bash -l {0}
steps:
- name: Clean the workspace and mamba
run: |
rm -rf * .[!.]* || echo "Nothing to clean"
rm -rf ~/micromamba* || echo "Nothing to clean"
- uses: actions/checkout@v4
with:
submodules: true
- name: Get micromamba
uses: mamba-org/setup-micromamba@v2
with:
environment-name: test-env
init-shell: bash
create-args: >-
python=3.13
- name: Generate build files
run: |
pip install pipx
pipx run nox -s prepare -- --headers --signatures --tests
- name: Cache awkward-cpp wheel
id: cache-awkward-cpp-wheel
uses: actions/cache@v4
with:
path: awkward-cpp/dist
key: ${{ github.job }}-${{ hashFiles('awkward-cpp/**') }}
- name: Build awkward-cpp wheel
if: steps.cache-awkward-cpp-wheel.outputs.cache-hit != 'true'
run: |
python -m pip install build
python -m build -w awkward-cpp
- name: Find built wheel
uses: tj-actions/glob@v22
id: find-wheel
with:
files: |
awkward-cpp/dist/*.whl
- name: Add workaround for 3.13 + cramjam
run: echo 'PYO3_USE_ABI3_FORWARD_COMPATIBILITY=1' >> $GITHUB_ENV
shell: bash
- name: Install awkward, awkward-cpp, and dependencies
run: python -m pip install -v . ${{ steps.find-wheel.outputs.paths }} pytest-github-actions-annotate-failures
- name: Setup Benchmark Env
run: python -m pip install -r benchmarks/requirements-benchmark.txt
- name: Print versions
run: python -m pip list
- name: Run Benchmark and Comparisons
id: benchmark_and_compare
run: |
cd benchmarks/
./run_action.sh
COMPARISON=$(cat $(echo $BASE_OUTPUT_DIR)/comparison.md)
{
echo "comparison<<EOF"
echo "${COMPARISON}"
echo "EOF"
} >> $GITHUB_OUTPUT
cd ..
env:
TARGET_BRANCH: ${{ github.event.pull_request.base.ref }} # usually: main
BRANCH_NAME: ${{ github.head_ref || github.ref_name }} # feature branch
BASE_OUTPUT_DIR: results
continue-on-error: true # failed benchmarking shouldn't stop the rest of the steps
- name: Comment on PR
uses: actions/github-script@v7
if: ${{ hashFiles('benchmarks/results/comparison.md') != '' }} # if there's no comparison.md, we won't post anything
with:
script: |
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: process.env.COMPARISON
})
env:
COMPARISON: ${{ steps.benchmark_and_compare.outputs.comparison }}
- name: Cleanup Benchmark Outputs
run: rm -r benchmarks/$BASE_OUTPUT_DIR
env:
BASE_OUTPUT_DIR: results