@@ -529,6 +529,74 @@ TEST(ConvTransposeTest, ConvTranspose_InvalidKernelShape) {
529529 kDmlExecutionProvider }); // TODO: Unskip when fixed #41968513
530530}
531531
532+ TEST (ConvTransposeTest, ConvTranspose_InvalidBiasShape_1) {
533+ ConvTransposeOpAttributes attrs = {
534+ vector<int64_t >{1 , 5 }, // kernel_shape
535+ {}, // output_padding
536+ vector<int64_t >{2 , 1 , 1 , 14 }, // output_shape
537+ vector<int64_t >{0 , 0 , 0 , 0 }, // pads
538+ vector<int64_t >{1 , 1 }, // strides
539+ vector<int64_t >{1 , 1 }, // dilations
540+ 1 , // group
541+ " NOTSET" // auto_pad
542+ };
543+ vector<float > X = {0 .0f , 1 .0f , 2 .0f , 3 .0f , 4 .0f , 5 .0f , 6 .0f , 7 .0f , 8 .0f , 9 .0f ,
544+ 10 .0f , 11 .0f , 12 .0f , 13 .0f , 14 .0f , 15 .0f , 16 .0f , 17 .0f , 18 .0f , 19 .0f };
545+ vector<int64_t > X_shape = {2 , 1 , 1 , 10 };
546+ vector<float > W = {1 .0f , 2 .0f , 3 .0f , 2 .0f , 1 .0f };
547+ vector<int64_t > W_shape = {1 , 1 , 1 , 5 };
548+ vector<float > B = {1 .0f , 2 .0f }; // invalid bias shape, should be {1}
549+ vector<int64_t > B_shape = {2 }; // invalid bias shape, should be {1}
550+ vector<int64_t > Y_shape = {2 , 1 , 1 , 14 };
551+ vector<float > expected_vals = {1 .0f , 2 .0f , 5 .0f , 11 .0f , 19 .0f , 28 .0f , 37 .0f , 46 .0f , 55 .0f , 64 .0f , 63 .0f , 51 .0f , 27 .0f , 10 .0f ,
552+ 11 .0f , 32 .0f , 65 .0f , 91 .0f , 109 .0f , 118 .0f , 127 .0f , 136 .0f , 145 .0f , 154 .0f , 143 .0f , 111 .0f , 57 .0f , 20 .0f };
553+ TestConvTransposeOp (attrs, {X, W, B}, {X_shape, W_shape, B_shape}, expected_vals, Y_shape,
554+ OpTester::ExpectResult::kExpectFailure ,
555+ // Just ensure that it starts with the expected string.
556+ " Bias shape is not compatible with number of output channels. "
557+ " It should be a 1-D tensor with size num_output_channels(M)." ,
558+ // The EP exclusions are along the same lines as ConvTranspose_InvalidKernelShape which
559+ // also tests for invalid shapes. It also includes XnnPack which seems to have its own
560+ // way of dealing with incorrectly shaped bias.
561+ {kTensorrtExecutionProvider , kQnnExecutionProvider ,
562+ kDmlExecutionProvider , kXnnpackExecutionProvider ,
563+ kWebGpuExecutionProvider }); // Remove when https://github.com/microsoft/onnxruntime/issues/27210 is fixed
564+ }
565+
566+ TEST (ConvTransposeTest, ConvTranspose_InvalidBiasShape_2) {
567+ ConvTransposeOpAttributes attrs = {
568+ vector<int64_t >{1 , 5 }, // kernel_shape
569+ {}, // output_padding
570+ vector<int64_t >{2 , 1 , 1 , 14 }, // output_shape
571+ vector<int64_t >{0 , 0 , 0 , 0 }, // pads
572+ vector<int64_t >{1 , 1 }, // strides
573+ vector<int64_t >{1 , 1 }, // dilations
574+ 1 , // group
575+ " NOTSET" // auto_pad
576+ };
577+ vector<float > X = {0 .0f , 1 .0f , 2 .0f , 3 .0f , 4 .0f , 5 .0f , 6 .0f , 7 .0f , 8 .0f , 9 .0f ,
578+ 10 .0f , 11 .0f , 12 .0f , 13 .0f , 14 .0f , 15 .0f , 16 .0f , 17 .0f , 18 .0f , 19 .0f };
579+ vector<int64_t > X_shape = {2 , 1 , 1 , 10 };
580+ vector<float > W = {1 .0f , 2 .0f , 3 .0f , 2 .0f , 1 .0f };
581+ vector<int64_t > W_shape = {1 , 1 , 1 , 5 };
582+ vector<float > B = {1 .0f , 2 .0f };
583+ vector<int64_t > B_shape = {1 , 2 }; // invalid bias rank (it should be 1-D)
584+ vector<int64_t > Y_shape = {2 , 1 , 1 , 14 };
585+ vector<float > expected_vals = {1 .0f , 2 .0f , 5 .0f , 11 .0f , 19 .0f , 28 .0f , 37 .0f , 46 .0f , 55 .0f , 64 .0f , 63 .0f , 51 .0f , 27 .0f , 10 .0f ,
586+ 11 .0f , 32 .0f , 65 .0f , 91 .0f , 109 .0f , 118 .0f , 127 .0f , 136 .0f , 145 .0f , 154 .0f , 143 .0f , 111 .0f , 57 .0f , 20 .0f };
587+ TestConvTransposeOp (attrs, {X, W, B}, {X_shape, W_shape, B_shape}, expected_vals, Y_shape,
588+ OpTester::ExpectResult::kExpectFailure ,
589+ // Just ensure that it starts with the expected string.
590+ " Bias shape is not compatible with number of output channels. "
591+ " It should be a 1-D tensor with size num_output_channels(M)." ,
592+ // The EP exclusions are along the same lines as ConvTranspose_InvalidKernelShape which
593+ // also tests for invalid shapes. It also includes XnnPack which seems to have its own
594+ // way of dealing with incorrectly shaped bias.
595+ {kTensorrtExecutionProvider , kQnnExecutionProvider ,
596+ kDmlExecutionProvider , kXnnpackExecutionProvider ,
597+ kWebGpuExecutionProvider }); // Remove when https://github.com/microsoft/onnxruntime/issues/27210 is fixed
598+ }
599+
532600TEST (ConvTransposeTest, ConvTranspose_onnx) {
533601 ConvTransposeOpAttributes attrs = {
534602 vector<int64_t >{3 , 3 }, // kernel_shape
0 commit comments