Skip to content

Commit 392d152

Browse files
Update benchmarks.yml
1 parent 724b7d7 commit 392d152

File tree

1 file changed

+23
-17
lines changed

1 file changed

+23
-17
lines changed

.github/workflows/benchmarks.yml

Lines changed: 23 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -72,34 +72,40 @@ jobs:
7272
repository: openxla/xla # Replace with your fork if needed
7373
path: xla
7474

75+
# - name: Build XLA with GPU support
76+
# working-directory: xla
77+
# continue-on-error: true
78+
# run: bazel build --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//tools/ci_build/gpu_build:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --nobuild -- //xla/... //build_tools/... @tsl//tsl/...
79+
80+
# - name: Run XLA tests with GPU
81+
# working-directory: xla
82+
# continue-on-error: true
83+
# run: bazel test --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//tools/ci_build/gpu_build:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async -- //xla/... //build_tools/... @tsl//tsl/...
84+
85+
# - name: Run Profile Analysis
86+
# working-directory: xla
87+
# continue-on-error: true
88+
# run: bazel analyze-profile profile.json.gz
89+
90+
# - name: Get GPU spec
91+
# working-directory: xla
92+
# continue-on-error: true
93+
# run: nvidia-smi
7594
# - name: Wait For Connection
7695
# uses: google-ml-infra/actions/ci_connection@main
7796
# with:
7897
# halt-dispatch-input: ${{ inputs.halt-for-connection }}
7998

80-
- name: Build XLA with GPU support
99+
- name: Configure XLA
81100
working-directory: xla
82-
continue-on-error: true
83-
run: bazel build --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//tools/ci_build/gpu_build:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --nobuild -- //xla/... //build_tools/... @tsl//tsl/...
84-
85-
- name: Run XLA tests with GPU
86-
working-directory: xla
87-
continue-on-error: true
88-
run: bazel test --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//tools/ci_build/gpu_build:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async -- //xla/... //build_tools/... @tsl//tsl/...
101+
run: ./configure.py --backend CUDA --nccl
89102

90-
- name: Run Profile Analysis
103+
- name: Set TF_CPP_MAX_VLOG_LEVEL
91104
working-directory: xla
92-
continue-on-error: true
93-
run: bazel analyze-profile profile.json.gz
94-
95-
- name: Get GPU spec
96-
working-directory: xla
97-
continue-on-error: true
98-
run: nvidia-smi
105+
run: echo "TF_CPP_MAX_VLOG_LEVEL=1" >> $GITHUB_ENV
99106

100107
- name: Build hlo_runner_main
101108
working-directory: xla
102-
continue-on-error: true
103109
run: bazel build -c opt --config=cuda --dynamic_mode=off //xla/tools/multihost_hlo_runner:hlo_runner_main
104110

105111
- name: Run XLA GPU microbenchmarks with hlo_runner_main

0 commit comments

Comments
 (0)