@@ -23,15 +23,15 @@ jobs:
2323 options : --gpus all --privileged # Might need privileged mode, use with caution
2424
2525 steps :
26- - name : Checkout XLA
27- uses : actions/checkout@v3
28- with :
29- repository : openxla/xla # Replace with your fork if needed
30- # - name: Checkout repository
26+ # - name: Checkout XLA
3127 # uses: actions/checkout@v3
3228 # with:
33- # repository: juliagmt-google/xla
34- # path: xla
29+ # repository: openxla/xla # Replace with your fork if needed
30+ - name : Checkout repository
31+ uses : actions/checkout@v3
32+ with :
33+ repository : juliagmt-google/xla
34+ path : xla
3535
3636 # - name: Wait For Connection
3737 # uses: google-ml-infra/actions/ci_connection@main
@@ -159,18 +159,18 @@ jobs:
159159 # - name: Create gpu_hlo_backend.hlo
160160 # working-directory: xla
161161 # run: |
162- # cat << EOF > gpu_hlo_backend.hlo
163- # HloModule module
164- # // CHECK: is_scheduled=true
162+ cat << EOF > gpu_hlo_backend.hlo
163+ HloModule module
164+ // CHECK : is_scheduled=true
165165
166- # ENTRY computation {
167- # p = f32[5000,6000]{1,0} parameter(0)
168- # e = f32[5000,6000]{1,0} sqrt(p)
169- # c = f32[6000,5000] transpose(p), dimensions={1,0}
170- # r = f32[300,20,5000] reshape(c)
171- # ROOT out = (f32[5000,6000], f32[300,20,5000]) tuple(e,r)
172- # }
173- # EOF
166+ ENTRY computation {
167+ p = f32[5000,6000]{1,0} parameter(0)
168+ e = f32[5000,6000]{1,0} sqrt(p)
169+ c = f32[6000,5000] transpose(p), dimensions={1,0}
170+ r = f32[300,20,5000] reshape(c)
171+ ROOT out = (f32[5000,6000], f32[300,20,5000]) tuple(e,r)
172+ }
173+ EOF
174174
175175 # - name: Wait For Connection
176176 # uses: google-ml-infra/actions/ci_connection@main
0 commit comments