@@ -72,23 +72,12 @@ jobs:
7272 - name : Create gemm_006f564ad71b327343de5f090e801883.hlo
7373 working-directory : xla
7474 run : |
75- cat << EOF > gemm_00881937d6d49056045c3325a12b108b.hlo
76- HloModule gemm_fusion_dot.542, entry_computation_layout={(bf16[1,8192,3072]{2,1,0}, s8[3072,6,512]{2,1,0})->bf16[1,8192,6,512]{3,2,1,0}}
77-
78- %gemm_fusion_dot.542_computation.clone (parameter_0.543: bf16[1,8192,3072], parameter_1.543: s8[3072,6,512]) -> bf16[1,8192,6,512] {
79- %parameter_0.543 = bf16[1,8192,3072]{2,1,0} parameter(0)
80- %bitcast.69925 = bf16[8192,3072]{1,0} bitcast(bf16[1,8192,3072]{2,1,0} %parameter_0.543)
81- %parameter_1.543 = s8[3072,6,512]{2,1,0} parameter(1)
82- %bitcast.69926 = s8[3072,3072]{1,0} bitcast(s8[3072,6,512]{2,1,0} %parameter_1.543)
83- %convert.18528 = bf16[3072,3072]{1,0} convert(s8[3072,3072]{1,0} %bitcast.69926), metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/query/query.quantized_einsum/ABD,DNH->ABNH/convert_element_type[new_dtype=bfloat16 weak_type=False]" source_file="third_party/py/praxis/layers/quantization/operations.py" source_line=220}
84- %dot.4949 = bf16[8192,3072]{1,0} dot(bf16[8192,3072]{1,0} %bitcast.69925, bf16[3072,3072]{1,0} %convert.18528), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/query/query.quantized_einsum/ABD,DNH->ABNH/dot_general[dimension_numbers=(((2,), (0,)), ((), ())) precision=None preferred_element_type=None]" source_file="third_party/py/praxis/layers/quantization/operations.py" source_line=220}
85- ROOT %bitcast.69927 = bf16[1,8192,6,512]{3,2,1,0} bitcast(bf16[8192,3072]{1,0} %dot.4949)
86- }
87-
88- ENTRY %entry_computation (multiply.28104: bf16[1,8192,3072], Arg_51.52: s8[3072,6,512]) -> bf16[1,8192,6,512] {
89- %multiply.28104 = bf16[1,8192,3072]{2,1,0} parameter(0)
90- %Arg_51.52 = s8[3072,6,512]{2,1,0} parameter(1)
91- ROOT %micro_kernel = bf16[1,8192,6,512]{3,2,1,0} fusion(bf16[1,8192,3072]{2,1,0} %multiply.28104, s8[3072,6,512]{2,1,0} %Arg_51.52), kind=kCustom, calls=%gemm_fusion_dot.542_computation.clone, metadata={op_name="pjit(_wrapped_fn)/jit(main)/tarzan_lm.apply/tarzan_lm.decode_with_params/lm/transformer/x_layers_0/self_attention/query/query.quantized_einsum/ABD,DNH->ABNH/dot_general[dimension_numbers=(((2,), (0,)), ((), ())) precision=None preferred_element_type=None]" source_file="third_party/py/praxis/layers/quantization/operations.py" source_line=220}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
75+ cat << EOF > isolated_convolution.hlo
76+ HloModule convolution.167:
77+ ENTRY %convolution.167 (parameter.0: f32[16,28,28,128], parameter.1: f32[3,3,128,128]) -> f32[16,28,28,128] {
78+ %parameter.0 = f32[16,28,28,128]{3,0,2,1} parameter(0)
79+ %parameter.1 = f32[3,3,128,128]{3,2,1,0} parameter(1)
80+ ROOT %convolution.167 = f32[16,28,28,128]{3,0,2,1} convolution(f32[16,28,28,128]{3,0,2,1} %parameter.0, f32[3,3,128,128]{3,2,1,0} %parameter.1), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01oi->b01f
9281 }
9382 EOF
9483
10897 - name : Run specific HLO file
10998 working-directory : xla
11099 run : |
111- nvidia-smi --query-gpu=utilization.gpu --format=csv -l 1 > results/gpu_utilization.log & ./bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main_gpu --device_type=gpu --log_output=True --use_spmd_partitioning gemm_00881937d6d49056045c3325a12b108b .hlo &> results/gemm_00881937d6d49056045c3325a12b108b .hlo.log
100+ nvidia-smi --query-gpu=utilization.gpu --format=csv -l 1 > results/gpu_utilization.log & ./bazel-bin/xla/tools/multihost_hlo_runner/hlo_runner_main_gpu --device_type=gpu --log_output=True --use_spmd_partitioning isolated_convolution .hlo &> results/isolated_convolution .hlo.log
112101 - name : Wait For Connection
113102 uses : google-ml-infra/actions/ci_connection@main
114103 with :
@@ -119,7 +108,7 @@ jobs:
119108
120109 - name : Parse XLA logs
121110 working-directory : xla
122- run : python parse_xla_logs.py results/gemm_00881937d6d49056045c3325a12b108b .hlo.log
111+ run : python parse_xla_logs.py results/isolated_convolution .hlo.log
123112
124113 - name : Upload Results
125114 uses : actions/upload-artifact@v4
0 commit comments