Skip to content

Create a workflow to run benchmarks #129

Create a workflow to run benchmarks

Create a workflow to run benchmarks #129

Workflow file for this run

name: Benchmarks
on:
pull_request:
branches:
- main
workflow_dispatch:
inputs:
halt-for-connection:
description: 'Should this workflow run wait for a remote connection?'
type: choice
required: true
default: 'no'
options:
- 'yes'
- 'no'
jobs:
build-xla-gpu-and-test:
runs-on: linux-x86-g2-48-l4-4gpu # Use a GPU-enabled runner
container:
image: "gcr.io/tensorflow-testing/nosla-cuda12.3-cudnn9.1-ubuntu20.04-manylinux2014-multipython:latest"
options: --gpus all --privileged # Might need privileged mode, use with caution
steps:
- name: Checkout XLA
uses: actions/checkout@v3
with:
repository: openxla/xla # Replace with your fork if needed
path: xla
# - name: Checkout repository
# uses: actions/checkout@v3
# with:
# repository: juliagmt-google/xla
# path: xla
# - name: Wait For Connection
# uses: google-ml-infra/actions/ci_connection@main
# with:
# halt-dispatch-input: ${{ inputs.halt-for-connection }}
- name: Print machine specs
run: |
lscpu
free -h # Memory information
df -h # Disk space information
uname -a # Kernel information
- name: Create results directory
working-directory: xla
run: mkdir results
- name: Set up Python 3.10 # Choose your desired Python version
uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Create and activate virtual environment
shell: bash # Force the use of bash
run: |
python -m venv xla/venv
source xla/venv/bin/activate
- name: Run setup.sh for E2E benchmarks flax_2b (within venv)
working-directory: xla/xla/backends/cpu/benchmarks/e2e/gemma2/flax_2b
run: |
bash setup.sh
- name: Run run.sh for E2E benchmarks flax_2b (within venv)
working-directory: xla/xla/backends/cpu/benchmarks/e2e/gemma2/flax_2b
timeout-minutes: 30
shell: bash
run: |
source ../../../../../venv/bin/activate
output=$(bash run.sh)
echo "$output"
# Extract metrics using Python and regex
python - << EOF

Check failure on line 79 in .github/workflows/benchmarks.yml

View workflow run for this annotation

GitHub Actions / .github/workflows/benchmarks.yml

Invalid workflow file

You have an error in your yaml syntax on line 79
import re
import json
text = \"\"\"${output}\"\"\"
ttft_pattern = r"TTFT: ([\d.]+) ms ± ([\d.]+)%"
e2e_latency_pattern = r"E2E Latency: ([\d.]+) ms ± ([\d.]+)%"
tpot_pattern = r"TPOT: ([\d.]+) ms"
ttft_match = re.search(ttft_pattern, text)
e2e_latency_match = re.search(e2e_latency_pattern, text)
tpot_match = re.search(tpot_pattern, text)
metrics = {
"TTFT": {"value": ttft_match.group(1) if ttft_match else None, "std_dev": ttft_match.group(2) if ttft_match else None},
"E2E Latency": {"value": e2e_latency_match.group(1) if e2e_latency_match else None, "std_dev": e2e_latency_match.group(2) if e2e_latency_match else None},
"TPOT": {"value": tpot_match.group(1) if tpot_match else None},
}
with open("metrics.json", "w") as f:
json.dump(metrics, f, indent=4)
print(f"::set-output name=metrics::{json.dumps(metrics)}")
EOF
# Upload metrics as an artifact (optional)
# Create the results directory if it doesn't exist
mkdir -p ../../../../../results
# Copy the metrics.json file to the results directory
cp metrics.json ../../../../../results/
- name: Wait For Connection
uses: google-ml-infra/actions/ci_connection@main
with:
halt-dispatch-input: ${{ inputs.halt-for-connection }}
# - name: Get GPU spec
# working-directory: xla
# continue-on-error: true
# run: nvidia-smi
# - name: Configure XLA
# working-directory: xla
# run: ./configure.py --backend CUDA --nccl
# - name: Set TF_CPP_MAX_VLOG_LEVEL
# working-directory: xla
# run: echo "TF_CPP_MAX_VLOG_LEVEL=1" >> $GITHUB_ENV # Use GITHUB_ENV to persist across steps
# - name: Check TF_CPP_MAX_VLOG_LEVEL
# working-directory: xla
# run: echo "$TF_CPP_MAX_VLOG_LEVEL"
# - name: Build hlo_runner_main
# working-directory: xla
# run: bazel build -c opt --config=cuda --dynamic_mode=off //xla/tools/multihost_hlo_runner:hlo_runner_main
# - name: Wait For Connection
# uses: google-ml-infra/actions/ci_connection@main
# with:
# halt-dispatch-input: ${{ inputs.halt-for-connection }}
# - name: Create gpu_hlo_backend.hlo
# working-directory: xla
# run: |
# cat << EOF > gpu_hlo_backend.hlo
# HloModule module
# // CHECK: is_scheduled=true
# ENTRY computation {
# p = f32[5000,6000]{1,0} parameter(0)
# e = f32[5000,6000]{1,0} sqrt(p)
# c = f32[6000,5000] transpose(p), dimensions={1,0}
# r = f32[300,20,5000] reshape(c)
# ROOT out = (f32[5000,6000], f32[300,20,5000]) tuple(e,r)
# }
# EOF
# - name: Wait For Connection
# uses: google-ml-infra/actions/ci_connection@main
# with:
# halt-dispatch-input: ${{ inputs.halt-for-connection }}
# - name: Run an HLO file
# working-directory: xla
# run: |
# ./bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main --device_type=gpu --log_output=True --use_spmd_partitioning gpu_hlo_backend.hlo &> results/gpu_hlo_backend.log
# - name: Wait For Connection
# uses: google-ml-infra/actions/ci_connection@main
# with:
# halt-dispatch-input: ${{ inputs.halt-for-connection }}
# - name: Download parse_xla_logs.py
# working-directory: xla
# run: wget https://raw.githubusercontent.com/juliagmt-google/xla/main/.github/workflows/parse_xla_logs.py
# - name: Parse XLA logs
# working-directory: xla
# run: python parse_xla_logs.py results/gpu_hlo_backend.log
- name: Upload Results
uses: actions/upload-artifact@v4
with:
name: gpu-xla-benchmarks
path: xla/results
# # jax-build-and-test:
# # runs-on: linux-x86-g2-48-l4-4gpu # Use a GPU-enabled runner
# # container:
# # image: "gcr.io/tensorflow-testing/nosla-cuda12.3-cudnn9.1-ubuntu20.04-manylinux2014-multipython:latest"
# # env:
# # JAXCI_HERMETIC_PYTHON_VERSION: 3.11
# # steps:
# # - name: Checkout JAX Fork
# # uses: actions/checkout@v3
# # with:
# # repository: 'google-ml-infra/jax-fork'
# # path: jax-fork
# # - name: Install JAX Dependencies
# # working-directory: jax-fork
# # run: |
# # python -m pip install --upgrade pip
# # pip install pytest
# # pip install absl-py
# # pip install "jax[cuda12_pip]" # Adjust CUDA version if needed
# # pip install google-benchmark
# # - name: Run JAX Multiprocess GPU Test
# # working-directory: jax-fork
# # continue-on-error: true
# # run: python -m pytest tests/multiprocess_gpu_test.py
# # - name: Run HLO Module Benchmarks withg GPU in xla/tests/fuzz
# # working-directory: xla
# # continue-on-error: true
# # run: |
# # for file in xla/tests/fuzz/*.hlo; do
# # filename=$(basename "$file")
# # # Skip expected failed hlo files.
# # if [[ "$filename" == "rand_000060.hlo" || "$filename" == "rand_000067.hlo" || "$filename" == "rand_000072.hlo" ]]; then
# # echo "Skipping benchmark on $file"
# # continue
# # fi
# # echo "Running benchmark on $file" &> results/"$filename".log
# # ./bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main --device_type=gpu --use_spmd_partitioning "$file" &> results/"$filename".log
# # done
# # - name: Upload Results
# # uses: actions/upload-artifact@v4
# # with:
# # name: gpu-xla-benchmarks
# # path: xla/results