Skip to content

Create a workflow to run benchmarks #67

Create a workflow to run benchmarks

Create a workflow to run benchmarks #67

Workflow file for this run

name: Benchmarks
on:
pull_request:
branches:
- main
workflow_dispatch:
inputs:
halt-for-connection:
description: 'Should this workflow run wait for a remote connection?'
type: choice
required: true
default: 'no'
options:
- 'yes'
- 'no'
jobs:
jax-build-and-test:
runs-on: linux-x86-g2-48-l4-4gpu # Use a GPU-enabled runner
container:
image: "gcr.io/tensorflow-testing/nosla-cuda12.3-cudnn9.1-ubuntu20.04-manylinux2014-multipython:latest"
env:
JAXCI_HERMETIC_PYTHON_VERSION: 3.11
steps:
- name: Checkout JAX Fork
uses: actions/checkout@v3
with:
repository: 'google-ml-infra/jax-fork'
path: jax-fork
- name: Install JAX Dependencies
working-directory: jax-fork
run: |
python -m pip install --upgrade pip
pip install pytest
pip install absl-py
pip install "jax[cuda12_pip]" # Adjust CUDA version if needed
pip install google-benchmark
- name: Run JAX Multiprocess GPU Test
working-directory: jax-fork
continue-on-error: true
run: python -m pytest tests/multiprocess_gpu_test.py
build-xla-gpu-and-test:
runs-on: linux-x86-g2-48-l4-4gpu # Use a GPU-enabled runner
container:
image: "gcr.io/tensorflow-testing/nosla-cuda12.3-cudnn9.1-ubuntu20.04-manylinux2014-multipython:latest"
options: --gpus all --privileged # Might need privileged mode, use with caution
steps:
- name: Checkout XLA
uses: actions/checkout@v3
with:
repository: openxla/xla # Replace with your fork if needed
path: xla
- name: Create results directory
working-directory: xla
run: mkdir -p results
- name: Get GPU spec
working-directory: xla
continue-on-error: true
run: nvidia-smi
- name: Configure XLA
working-directory: xla
run: ./configure.py --backend CUDA --nccl
- name: Set TF_CPP_MAX_VLOG_LEVEL
working-directory: xla
run: echo "TF_CPP_MAX_VLOG_LEVEL=1" >> $GITHUB_ENV # Use GITHUB_ENV to persist across steps
- name: Check TF_CPP_MAX_VLOG_LEVEL
working-directory: xla
run: echo "$TF_CPP_MAX_VLOG_LEVEL"
- name: Build hlo_runner_main
working-directory: xla
run: bazel build -c opt --config=cuda --dynamic_mode=off //xla/tools/multihost_hlo_runner:hlo_runner_main
- name: Create gemm_006f564ad71b327343de5f090e801883.hlo
working-directory: xla
run: |
cat << EOF > gemm_006f564ad71b327343de5f090e801883.hlo
Code panel - press c to focus line 1.
HloModule gemm_fusion_dot.166, entry_computation_layout={(bf16[8,12,2048,2048]{3,2,1,0}, bf16[16384,128]{1,0}, bf16[128]{0})->bf16[8,12,2048,128]{3,2,1,0}}
%gemm_fusion_dot.166_computation.clone (parameter_0.167: bf16[8,12,2048,2048], parameter_1.167: bf16[16384,128], parameter_2.18: bf16[128]) -> bf16[8,12,2048,128] {
%parameter_0.167 = bf16[8,12,2048,2048]{3,2,1,0} parameter(0)
%bitcast.22615 = bf16[8,24576,2048]{2,1,0} bitcast(bf16[8,12,2048,2048]{3,2,1,0} %parameter_0.167)
%parameter_1.167 = bf16[16384,128]{1,0} parameter(1)
%parameter_2.18 = bf16[128]{0} parameter(2)
%broadcast.9073 = bf16[16384,128]{1,0} broadcast(bf16[128]{0} %parameter_2.18), dimensions={1}, metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/value/mul" source_file="third_party/py/praxis/layers/quantization/operations.py" source_line=228}
%multiply.7656 = bf16[16384,128]{1,0} multiply(bf16[16384,128]{1,0} %parameter_1.167, bf16[16384,128]{1,0} %broadcast.9073), metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/value/mul" source_file="third_party/py/praxis/layers/quantization/operations.py" source_line=228}
%bitcast.22616 = bf16[8,2048,128]{2,1,0} bitcast(bf16[16384,128]{1,0} %multiply.7656)
%dot.1454 = bf16[8,24576,128]{2,1,0} dot(bf16[8,24576,2048]{2,1,0} %bitcast.22615, bf16[8,2048,128]{2,1,0} %bitcast.22616), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={1}, metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/self_attention._dot_atten/pv_einsum/BNTS,BSH->BNTH/dot_general[dimension_numbers=(((3,), (1,)), ((0,), (0,))) precision=None preferred_element_type=None]" source_file="third_party/py/praxis/layers/base_ops.py" source_line=28}
ROOT %bitcast.22617 = bf16[8,12,2048,128]{3,2,1,0} bitcast(bf16[8,24576,128]{2,1,0} %dot.1454), metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/self_attention._dot_atten/transpose[permutation=(0, 2, 1, 3)]" source_file="third_party/py/praxis/layers/multi_query_attention.py" source_line=454}
}
ENTRY %entry_computation (convert.8139: bf16[8,12,2048,2048], gemm_fusion_dot.163: bf16[16384,128], Arg_23.24: bf16[128]) -> bf16[8,12,2048,128] {
%convert.8139 = bf16[8,12,2048,2048]{3,2,1,0} parameter(0)
%gemm_fusion_dot.163 = bf16[16384,128]{1,0} parameter(1)
%Arg_23.24 = bf16[128]{0} parameter(2)
ROOT %micro_kernel = bf16[8,12,2048,128]{3,2,1,0} fusion(bf16[8,12,2048,2048]{3,2,1,0} %convert.8139, bf16[16384,128]{1,0} %gemm_fusion_dot.163, bf16[128]{0} %Arg_23.24), kind=kCustom, calls=%gemm_fusion_dot.166_computation.clone, metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/self_attention._dot_atten/pv_einsum/BNTS,BSH->BNTH/dot_general[dimension_numbers=(((3,), (1,)), ((0,), (0,))) precision=None preferred_element_type=None]" source_file="third_party/py/praxis/layers/base_ops.py" source_line=28}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
} ROOT fusion = s8[1,4,192,384,32] fusion(param_0), kind=kInput, calls=fusion
}
EOF
- name: Wait For Connection
uses: google-ml-infra/actions/ci_connection@main
with:
halt-dispatch-input: ${{ inputs.halt-for-connection }}
- name: Run specific HLO file
working-directory: xla
run: ./bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main --device_type=gpu --use_spmd_partitioning gemm_006f564ad71b327343de5f090e801883.hlo &> results/gemm_006f564ad71b327343de5f090e801883.hlo.log
# - name: Run HLO Module Benchmarks with GPU in xla/tests/fuzz
# working-directory: xla
# continue-on-error: true
# run: |
# for file in xla/tests/fuzz/*.hlo; do
# filename=$(basename "$file")
# # Skip expected failed hlo files.
# if [[ "$filename" == "rand_000060.hlo" || "$filename" == "rand_000067.hlo" || "$filename" == "rand_000072.hlo" ]]; then
# echo "Skipping benchmark on $file"
# continue
# fi
# echo "Running benchmark on $file"
# ./bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main --device_type=gpu --use_spmd_partitioning "$file" &> results/"$filename".log
# done
- name: Upload Results
uses: actions/upload-artifact@v4
with:
name: gpu-xla-benchmarks
path: xla/results
# - name: Wait For Connection
# uses: google-ml-infra/actions/ci_connection@main
# with:
# halt-dispatch-input: ${{ inputs.halt-for-connection }}
# - name: Run HLO Module Benchmarks withg GPU in xla/tests/fuzz
# working-directory: xla
# continue-on-error: true
# run: |
# for file in xla/tests/fuzz/*.hlo; do
# filename=$(basename "$file")
# # Skip expected failed hlo files.
# if [[ "$filename" == "rand_000060.hlo" || "$filename" == "rand_000067.hlo" || "$filename" == "rand_000072.hlo" ]]; then
# echo "Skipping benchmark on $file"
# continue
# fi
# echo "Running benchmark on $file" &> results/"$filename".log
# ./bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main --device_type=gpu --use_spmd_partitioning "$file" &> results/"$filename".log
# done
# - name: Upload Results
# uses: actions/upload-artifact@v4
# with:
# name: gpu-xla-benchmarks
# path: xla/results