Skip to content

Commit f7eba41

Browse files
Update benchmarks.yml
1 parent 6c3bf18 commit f7eba41

File tree

1 file changed

+28
-14
lines changed

1 file changed

+28
-14
lines changed

.github/workflows/benchmarks.yml

Lines changed: 28 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,6 @@ jobs:
3535
repository: 'google-ml-infra/jax-fork'
3636
path: jax-fork
3737

38-
- name: Checkout XLA
39-
uses: actions/checkout@v3
40-
with:
41-
repository: 'openxla/xla' # Or your XLA fork
42-
path: xla
43-
4438
- name: Install JAX Dependencies
4539
working-directory: jax-fork
4640
run: |
@@ -55,12 +49,32 @@ jobs:
5549
continue-on-error: true
5650
run: python -m pytest tests/multiprocess_gpu_test.py
5751

58-
- name: Build XLA GPU Atomic Test
59-
working-directory: xla
60-
continue-on-error: true
61-
run: bazel build -c opt --config=cuda //xla/service/gpu/tests:gpu_atomic_test
52+
# - name: Build XLA GPU Atomic Test
53+
# working-directory: xla
54+
# continue-on-error: true
55+
# run: bazel build -c opt --config=cuda //xla/service/gpu/tests:gpu_atomic_test
6256

63-
- name: Run XLA GPU Atomic Test
64-
working-directory: xla
65-
continue-on-error: true
66-
run: bazel test -c opt --config=cuda //xla/service/gpu/tests:gpu_atomic_test
57+
# - name: Run XLA GPU Atomic Test
58+
# working-directory: xla
59+
# continue-on-error: true
60+
# run: bazel test -c opt --config=cuda //xla/service/gpu/tests:gpu_atomic_test
61+
xla-gpu-ci:
62+
runs-on: linux-x86-g2-48-l4-4gpu
63+
steps:
64+
- name: Checkout XLA
65+
uses: actions/checkout@v3
66+
with:
67+
repository: 'openxla/xla'
68+
path: xla
69+
70+
- name: Pull Docker image (with parallel)
71+
run: parallel --ungroup --retries 3 --delay 15 --nonall -- docker pull us-central1-docker.pkg.dev/tensorflow-sigs/tensorflow/ml-build:latest
72+
73+
- name: Run Docker container for build and test
74+
run: |
75+
docker run --detach --name=xla_ci --rm --interactive --tty --volume=./xla:/github/xla --workdir=/github/xla us-central1-docker.pkg.dev/tensorflow-sigs/tensorflow/ml-build:latest bash
76+
docker exec xla_ci parallel --ungroup --retries 3 --delay 15 --nonall -- bazel build --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//tools/ci_build/gpu_build:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --nobuild -- //xla/... //build_tools/... @tsl//tsl/...
77+
docker exec xla_ci bazel test --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//tools/ci_build/gpu_build:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async -- //xla/... //build_tools/... @tsl//tsl/...
78+
docker exec xla_ci bazel analyze-profile profile.json.gz
79+
docker stop xla_ci
80+

0 commit comments

Comments
 (0)