Skip to content

Create a workflow to run benchmarks #41

Create a workflow to run benchmarks

Create a workflow to run benchmarks #41

Workflow file for this run

name: Benchmarks
on:
pull_request:
branches:
- main
workflow_dispatch:
inputs:
halt-for-connection:
description: 'Should this workflow run wait for a remote connection?'
type: choice
required: true
default: 'no'
options:
- 'yes'
- 'no'
jobs:
# jax-build:
# strategy:
# matrix:
# runner: ["linux-x86-g2-48-l4-4gpu"]
# runs-on: ${{ matrix.runner }}
# container:
# image: "gcr.io/tensorflow-testing/nosla-cuda12.3-cudnn9.1-ubuntu20.04-manylinux2014-multipython:latest"
# env:
# JAXCI_HERMETIC_PYTHON_VERSION: 3.11
# steps:
# - name: Checkout JAX Fork
# uses: actions/checkout@v3
# with:
# repository: 'google-ml-infra/jax-fork'
# path: jax-fork
# - name: Install JAX Dependencies
# working-directory: jax-fork
# run: |
# python -m pip install --upgrade pip
# pip install pytest
# pip install absl-py
# pip install "jax[cuda12_pip]" # Adjust CUDA version if needed
# pip install google-benchmark
# - name: Run JAX Multiprocess GPU Test
# working-directory: jax-fork
# continue-on-error: true
# run: python -m pytest tests/multiprocess_gpu_test.py
# - name: Build XLA GPU Atomic Test
# working-directory: xla
# continue-on-error: true
# run: bazel build -c opt --config=cuda //xla/service/gpu/tests:gpu_atomic_test
# - name: Run XLA GPU Atomic Test
# working-directory: xla
# continue-on-error: true
# run: bazel test -c opt --config=cuda //xla/service/gpu/tests:gpu_atomic_test
build-xla-gpu:
runs-on: linux-x86-g2-48-l4-4gpu # Use a GPU-enabled runner
container:
image: "gcr.io/tensorflow-testing/nosla-cuda12.3-cudnn9.1-ubuntu20.04-manylinux2014-multipython:latest"
options: --gpus all --privileged # Might need privileged mode, use with caution
steps:
- name: Checkout XLA
uses: actions/checkout@v3
with:
repository: openxla/xla # Replace with your fork if needed
path: xla
# - name: Build XLA with GPU support
# working-directory: xla
# continue-on-error: true
# run: bazel build --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//tools/ci_build/gpu_build:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --nobuild -- //xla/... //build_tools/... @tsl//tsl/...
# - name: Run XLA tests with GPU
# working-directory: xla
# continue-on-error: true
# run: bazel test --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//tools/ci_build/gpu_build:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async -- //xla/... //build_tools/... @tsl//tsl/...
# - name: Run Profile Analysis
# working-directory: xla
# continue-on-error: true
# run: bazel analyze-profile profile.json.gz
# - name: Get GPU spec
# working-directory: xla
# continue-on-error: true
# run: nvidia-smi
- name: Wait For Connection
uses: google-ml-infra/actions/ci_connection@main
with:
halt-dispatch-input: ${{ inputs.halt-for-connection }}
- name: Configure XLA
working-directory: xla
run: ./configure.py --backend CUDA --nccl
- name: Set TF_CPP_MAX_VLOG_LEVEL
working-directory: xla
run: echo "TF_CPP_MAX_VLOG_LEVEL=1" >> $GITHUB_ENV
- name: Build hlo_runner_main
working-directory: xla
run: bazel build -c opt --config=cuda --dynamic_mode=off //xla/tools/multihost_hlo_runner:hlo_runner_main
- name: Run XLA GPU microbenchmarks with hlo_runner_main
working-directory: xla
continue-on-error: true
run: |
for file in xla/tools/multihost_hlo_runner/data/*.hlo; do
filename=$(basename "$file")
echo "Running benchmark on $file"
bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main "$file"
done