Skip to content

Commit 5533e6e

Browse files
Update benchmarks.yml
1 parent 808d88e commit 5533e6e

File tree

1 file changed

+16
-8
lines changed

1 file changed

+16
-8
lines changed

.github/workflows/benchmarks.yml

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -54,15 +54,18 @@ jobs:
5454
- name: Check TF_CPP_MAX_VLOG_LEVEL
5555
working-directory: xla
5656
run: echo "$TF_CPP_MAX_VLOG_LEVEL"
57-
- name: Wait For Connection
58-
uses: google-ml-infra/actions/ci_connection@main
59-
with:
60-
halt-dispatch-input: ${{ inputs.halt-for-connection }}
57+
# - name: Wait For Connection
58+
# uses: google-ml-infra/actions/ci_connection@main
59+
# with:
60+
# halt-dispatch-input: ${{ inputs.halt-for-connection }}
6161

6262

63-
- name: Build hlo_runner_main
63+
- name: Build hlo_runner_main_gpu
6464
working-directory: xla
6565
run: bazel build -c opt --config=cuda --dynamic_mode=off //xla/tools/multihost_hlo_runner:hlo_runner_main_gpu
66+
- name: Build hlo_runner_main
67+
working-directory: xla
68+
run: bazel build -c opt --config=cuda --dynamic_mode=off //xla/tools/multihost_hlo_runner:hlo_runner_main
6669

6770
- name: Build test_gpu_profiler
6871
working-directory: xla
@@ -93,15 +96,20 @@ jobs:
9396
# - name: Run specific HLO file
9497
# working-directory: xla
9598
# run: |
96-
# nvidia-smi --query-gpu=utilization.gpu --format=csv -l 1 > gpu_utilization.log & ./bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main_gpu --device_type=gpu --log_output=True --use_spmd_partitioning gemm_00881937d6d49056045c3325a12b108b.hlo &> results/gemm_00881937d6d49056045c3325a12b108b.log
99+
# nvidia-smi --query-gpu=utilization.gpu --format=csv -l 1 > gpu_utilization.log & bazel run -c opt --config=cuda --dynamic_mode=off //xla/tools/multihost_hlo_runner:hlo_runner_main_gpu -- --device_type=gpu --log_output=True --use_spmd_partitioning isolated_convolution.hlo &> results/isolated_convolution.log
97100
# - name: Wait For Connection
98101
# uses: google-ml-infra/actions/ci_connection@main
99102
# with:
100103
# halt-dispatch-input: ${{ inputs.halt-for-connection }}
101-
- name: Run specific HLO file
104+
- name: Run specific HLO file hlo_runner_main_gpu
105+
working-directory: xla
106+
run: |
107+
nvidia-smi --query-gpu=utilization.gpu --format=csv -l 1 > results.gpu_utilization.log & bazel run -c opt --config=cuda --dynamic_mode=off //xla/tools/multihost_hlo_runner:hlo_runner_main_gpu -- --device_type=gpu --log_output=True --use_spmd_partitioning isolated_convolution.hlo &> results/hlo_runner_main_gpu_isolated_convolution.log
108+
109+
- name: Run specific HLO file hlo_runner_main
102110
working-directory: xla
103111
run: |
104-
nvidia-smi --query-gpu=utilization.gpu --format=csv -l 1 > results/gpu_utilization.log & ./bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main_gpu --device_type=gpu --log_output=True --use_spmd_partitioning isolated_convolution.hlo &> results/isolated_convolution.hlo.log
112+
nvidia-smi --query-gpu=utilization.gpu --format=csv -l 1 > results/gpu_utilization_v2.log & bazel run -c opt --config=cuda --dynamic_mode=off //xla/tools/multihost_hlo_runner:hlo_runner_main -- --device_type=gpu --log_output=True --use_spmd_partitioning isolated_convolution.hlo &> results/hlo_runner_main_isolated_convolution.log
105113
106114
- name: Run test_gpu_profiler
107115
working-directory: xla

0 commit comments

Comments
 (0)