File tree Expand file tree Collapse file tree 1 file changed +0
-55
lines changed Expand file tree Collapse file tree 1 file changed +0
-55
lines changed Original file line number Diff line number Diff line change 6969 - name : Configure XLA
7070 working-directory : xla
7171 run : ./configure.py --backend CUDA --nccl
72-
73- - name : Set TF_CPP_MAX_VLOG_LEVEL
74- working-directory : xla
75- run : export TF_CPP_MAX_VLOG_LEVEL=1
76-
77- - name : Check TF_CPP_MAX_VLOG_LEVEL
78- working-directory : xla
79- run : echo "$TF_CPP_MAX_VLOG_LEVEL"
80-
81- - name : Build hlo_runner_main
82- working-directory : xla
83- run : bazel build -c opt --config=cuda --dynamic_mode=off //xla/tools/multihost_hlo_runner:hlo_runner_main
84- name : Benchmarks
85-
86- on :
87- pull_request :
88- branches :
89- - main
90- workflow_dispatch :
91- inputs :
92- halt-for-connection :
93- description : ' Should this workflow run wait for a remote connection?'
94- type : choice
95- required : true
96- default : ' no'
97- options :
98- - ' yes'
99- - ' no'
10072
101- jobs :
102- build-xla-gpu-and-test :
103- runs-on : linux-x86-g2-48-l4-4gpu
104- container :
105- image : " gcr.io/tensorflow-testing/nosla-cuda12.3-cudnn9.1-ubuntu20.04-manylinux2014-multipython:latest"
106- options : --gpus all --privileged
107-
108- steps :
109- - name : Checkout XLA
110- uses : actions/checkout@v3
111- with :
112- repository : openxla/xla
113- path : xla
114-
115- - name : Create results directory
116- working-directory : xla
117- run : mkdir -p results
118-
119- - name : Get GPU spec
120- working-directory : xla
121- continue-on-error : true
122- run : nvidia-smi
123-
124- - name : Configure XLA
125- working-directory : xla
126- run : ./configure.py --backend CUDA --nccl
127-
12873 - name : Set TF_CPP_MAX_VLOG_LEVEL
12974 working-directory : xla
13075 run : echo "TF_CPP_MAX_VLOG_LEVEL=1" >> $GITHUB_ENV # Use GITHUB_ENV to persist across steps
You can’t perform that action at this time.
0 commit comments