Skip to content

Create a workflow to run benchmarks #6

Create a workflow to run benchmarks

Create a workflow to run benchmarks #6

Workflow file for this run

name: Benchmarks
on:
pull_request:
branches:
- main
workflow_dispatch:
inputs:
halt-for-connection:
description: 'Should this workflow run wait for a remote connection?'
type: choice
required: true
default: 'no'
options:
- 'yes'
- 'no'
jobs:
build:
strategy:
matrix:
runner: ["linux-x86-g2-48-l4-4gpu"]
runs-on: ${{ matrix.runner }}
container:
image: "gcr.io/tensorflow-testing/nosla-cuda12.3-cudnn9.1-ubuntu20.04-manylinux2014-multipython:latest"
env:
JAXCI_HERMETIC_PYTHON_VERSION: 3.11
steps:
- name: Checkout JAX Fork
uses: actions/checkout@v3
with:
repository: 'google-ml-infra/jax-fork'
path: jax-fork
- name: Checkout XLA
uses: actions/checkout@v3
with:
repository: 'openxla/xla' # Or your XLA fork
path: xla
- name: Install JAX Dependencies
working-directory: jax-fork
run: |
python -m pip install --upgrade pip
pip install pytest
pip install absl-py
pip install "jax[cuda12_pip]" # Adjust CUDA version if needed
pip install google-benchmark
- name: Run JAX Multiprocess GPU Test
working-directory: jax-fork
continue-on-error: true
run: python -m pytest tests/multiprocess_gpu_test.py
- name: Build XLA GPU Atomic Test
working-directory: xla
continue-on-error: true
run: bazel build -c opt --config=cuda //xla/service/gpu/tests:gpu_atomic_test
- name: Run XLA GPU Atomic Test
working-directory: xla
continue-on-error: true
run: bazel test -c opt --config=cuda //xla/service/gpu/tests:gpu_atomic_test