Skip to content

update changesets (#1512) #50

update changesets (#1512)

update changesets (#1512) #50

name: "benchmark_cargo_cmp"
on:
# Run using manual triggers from GitHub UI:
# https://docs.github.com/en/actions/managing-workflow-runs/manually-running-a-workflow
workflow_dispatch:
inputs:
dryRun:
description: "Attempt a local run and report results here, without updating the bencher dashboard."
type: "boolean"
required: true
default: true
bencherProject:
description: "Bencher project:"
type: "string"
# Run on pushes to 'main' branch:
push:
branches:
- "main"
# Queue up benchmark workflows for the same branch, so that results are reported in order:
concurrency:
group: "${{ github.workflow }}-${{ github.ref_name }}"
cancel-in-progress: false
jobs:
benchmark:
runs-on: "ubuntu-22.04" # _SLANG_DEV_CONTAINER_BASE_IMAGE_ (keep in sync)
# Only run on the main repo (not forks), unless it is a dry run:
if: "${{ github.repository == 'NomicFoundation/slang' || inputs.dryRun == true }}"
steps:
- name: "Checkout Repository"
uses: "actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683"
- name: "Restore Cache"
uses: "./.github/actions/cache/restore"
- name: "infra setup cargo pipenv"
run: "./scripts/bin/infra setup cargo pipenv"
- name: "Set bencher project"
run: |
echo "SLANG_BENCHER_PROJECT=${{ inputs.bencherProject }}" >> $GITHUB_ENV
if: "${{ inputs.bencherProject }}"
- name: "infra perf cargo comparison"
run: "./scripts/bin/infra perf cargo ${{ inputs.dryRun == true && '--dry-run' || '' }} comparison"
env:
BENCHER_API_TOKEN: "${{ secrets.BENCHER_API_TOKEN }}"
- name: "Upload Benchmarking Data"
uses: "actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02"
with:
name: "benchmarking-data"
path: "target/iai"
if-no-files-found: "error"