@@ -64,25 +64,19 @@ jobs:
6464 # with:
6565 # halt-dispatch-input: ${{ inputs.halt-for-connection }}
6666
67- - name : Create gemm_006f564ad71b327343de5f090e801883 .hlo
67+ - name : Create gpu_hlo_backend .hlo
6868 working-directory : xla
6969 run : |
70- cat << EOF > gemm_00881937d6d49056045c3325a12b108b.hlo
71- HloModule gemm_fusion_dot.542, entry_computation_layout={(bf16[1,8192,3072]{2,1,0}, s8[3072,6,512]{2,1,0})->bf16[1,8192,6,512]{3,2,1,0}}
72- %gemm_fusion_dot.542_computation.clone (parameter_0.543: bf16[1,8192,3072], parameter_1.543: s8[3072,6,512]) -> bf16[1,8192,6,512] {
73- %parameter_0.543 = bf16[1,8192,3072]{2,1,0} parameter(0)
74- %bitcast.69925 = bf16[8192,3072]{1,0} bitcast(bf16[1,8192,3072]{2,1,0} %parameter_0.543)
75- %parameter_1.543 = s8[3072,6,512]{2,1,0} parameter(1)
76- %bitcast.69926 = s8[3072,3072]{1,0} bitcast(s8[3072,6,512]{2,1,0} %parameter_1.543)
77- %convert.18528 = bf16[3072,3072]{1,0} convert(s8[3072,3072]{1,0} %bitcast.69926), metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/query/query.quantized_einsum/ABD,DNH->ABNH/convert_element_type[new_dtype=bfloat16 weak_type=False]" source_file="third_party/py/praxis/layers/quantization/operations.py" source_line=220}
78- %dot.4949 = bf16[8192,3072]{1,0} dot(bf16[8192,3072]{1,0} %bitcast.69925, bf16[3072,3072]{1,0} %convert.18528), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/query/query.quantized_einsum/ABD,DNH->ABNH/dot_general[dimension_numbers=(((2,), (0,)), ((), ())) precision=None preferred_element_type=None]" source_file="third_party/py/praxis/layers/quantization/operations.py" source_line=220}
79- ROOT %bitcast.69927 = bf16[1,8192,6,512]{3,2,1,0} bitcast(bf16[8192,3072]{1,0} %dot.4949)
80- }
70+ cat << EOF > gpu_hlo_backend.hlo
71+ HloModule module
72+ // CHECK: is_scheduled=true
8173
82- ENTRY %entry_computation (multiply.28104: bf16[1,8192,3072], Arg_51.52: s8[3072,6,512]) -> bf16[1,8192,6,512] {
83- %multiply.28104 = bf16[1,8192,3072]{2,1,0} parameter(0)
84- %Arg_51.52 = s8[3072,6,512]{2,1,0} parameter(1)
85- ROOT %micro_kernel = bf16[1,8192,6,512]{3,2,1,0} fusion(bf16[1,8192,3072]{2,1,0} %multiply.28104, s8[3072,6,512]{2,1,0} %Arg_51.52), kind=kCustom, calls=%gemm_fusion_dot.542_computation.clone, metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/query/query.quantized_einsum/ABD,DNH->ABNH/dot_general[dimension_numbers=(((2,), (0,)), ((), ())) precision=None preferred_element_type=None]" source_file="third_party/py/praxis/layers/quantization/operations.py" source_line=220}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
74+ ENTRY computation {
75+ p = f32[5000,6000]{1,0} parameter(0)
76+ e = f32[5000,6000]{1,0} sqrt(p)
77+ c = f32[6000,5000] transpose(p), dimensions={1,0}
78+ r = f32[300,20,5000] reshape(c)
79+ ROOT out = (f32[5000,6000], f32[300,20,5000]) tuple(e,r)
8680 }
8781 EOF
8882
9488 - name : Run an HLO file
9589 working-directory : xla
9690 run : |
97- ./ bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main --device_type=gpu --log_output=True --use_spmd_partitioning gemm_00881937d6d49056045c3325a12b108b .hlo &> results/gemm_00881937d6d49056045c3325a12b108b .hlo.log
91+ nvidia-smi --query-gpu=utilization.gpu --format=csv -l 1 > results/gpu_utilization_gpu_backend.log & ./ bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main --device_type=gpu --log_output=True --use_spmd_partitioning gpu_hlo_backend .hlo &> results/gpu_hlo_backend .hlo.log
9892
9993 - name : Wait For Connection
10094 uses : google-ml-infra/actions/ci_connection@main
@@ -107,7 +101,7 @@ jobs:
107101
108102 - name : Parse XLA logs
109103 working-directory : xla
110- run : python parse_xla_logs.py results/gemm_00881937d6d49056045c3325a12b108b .hlo.log
104+ run : python parse_xla_logs.py results/gpu_hlo_backend .hlo.log
111105
112106 - name : Upload Results
113107 uses : actions/upload-artifact@v4
0 commit comments