We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent fddf6b0 commit 2661015Copy full SHA for 2661015
src/sparseml/pytorch/utils/quantization/quantize_qat_export.py
@@ -745,7 +745,9 @@ def _convert_quantizable_matmul_and_add(model: ModelProto):
745
continue
746
if output_quantize_node.op_type != "QuantizeLinear":
747
748
- bias_initializer = get_init_by_name(model, bias_add_node.input[1])
+ bias_initializer = get_init_by_name(model, bias_add_node.input[1]) or (
749
+ get_init_by_name(model, bias_add_node.input[0])
750
+ )
751
if bias_initializer is None:
752
753
0 commit comments