Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1686,7 +1686,6 @@ void ONNXToZHighLoweringPass::runOnOperation() {
// prohibit the combined ops lowering happened.
RewritePatternSet combinedPatterns(&getContext());
onnx_mlir::getONNXToZHighMultipleOpPatterns(combinedPatterns);

// It's ok to fail.
(void)applyPatternsGreedily(module, std::move(combinedPatterns));

Expand Down
9 changes: 5 additions & 4 deletions src/Accelerators/NNPA/Conversion/ONNXToZHigh/ONNXToZHigh.td
Original file line number Diff line number Diff line change
Expand Up @@ -344,21 +344,22 @@ def replaceONNXMaxPattern : Pat<
// (ZHighInvSqrtOp (ZHighStickOp %X))
//===----------------------------------------------------------------------===//
def replaceDiv1SqrtPattern : Pat<
(ONNXDivOp $a, (ONNXSqrtOp $x)),
(ONNXDivOp:$divOp $a, (ONNXSqrtOp:$sqrtOp $x)),
(ZHighUnstickOp (ZHighInvSqrtOp (ZHighStickOp:$s_x $x, (NoneLayoutAttr), (GetDefaultSaturation)),
(returnType $s_x))),
[(IsCompatibleWithNNPALevelArch15),(IsConstOfOnes:$a),(IsFloatType:$a)]
[(IsCompatibleWithNNPALevelArch15),(IsConstOfOnes:$a),(IsFloatType:$a),
(OpIsBigEnoughForNNPA:$sqrtOp),(OpIsBigEnoughForNNPA:$divOp)]
>;

//===----------------------------------------------------------------------===//
// ONNXReciprocalOp(ONNXSqrtOp %X) = ZHighUnstickOp
// (ZHighInvSqrtOp (ZHighStickOp %X))
//===----------------------------------------------------------------------===//
def replaceReciprocalSqrtPattern : Pat<
(ONNXReciprocalOp (ONNXSqrtOp $x)),
(ONNXReciprocalOp (ONNXSqrtOp:$sqrtOp $x)),
(ZHighUnstickOp (ZHighInvSqrtOp (ZHighStickOp:$s_x $x, (NoneLayoutAttr), (GetDefaultSaturation)),
(returnType $s_x))),
[(IsCompatibleWithNNPALevelArch15)]
[(IsCompatibleWithNNPALevelArch15), (OpIsBigEnoughForNNPA:$sqrtOp)]
>;

//===----------------------------------------------------------------------===//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,4 +129,13 @@ void configureONNXToZHighLoweringPass(bool optReportNNPAUnsupportedOps,
}
}

bool isTooSmallOpForNNPA(mlir::Operation *op) {
if (!op)
return false;
// Operation whose all inputs are scalar.
bool scalarOp = llvm::all_of(
op->getOperands(), [](mlir::Value v) { return isScalarTensor(v); });
return scalarOp;
}

} // namespace onnx_mlir
61 changes: 35 additions & 26 deletions src/Accelerators/NNPA/Conversion/ONNXToZHigh/ONNXToZHighCommon.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,36 @@ struct ONNXToZHighLoweringConfiguration {
};
};

/// Get transposed tensor by using a permutation array.
mlir::Value emitONNXTranspose(mlir::Location loc,
mlir::PatternRewriter &rewriter, mlir::Value x,
mlir::ArrayRef<int64_t> perms);

/// Get transposed tensor by using a permutation array and a result type.
mlir::Value emitONNXTransposeWithType(mlir::Location loc,
mlir::PatternRewriter &rewriter, mlir::Type transposedType, mlir::Value x,
mlir::ArrayRef<int64_t> perms);

/// Split a tensor along an axis in which each chunk has a size of
/// NNPAGetMaxForDim and the last chuck can be smaller.
mlir::ValueRange splitAlongAxis(
onnx_mlir::MultiDialectBuilder<onnx_mlir::OnnxBuilder> &create,
mlir::Value X, int64_t axis);

// Check if a value is a constant tensor of a single f32 value or not.
bool isF32ScalarConstantTensor(mlir::Value v);

// Get FloatAttr from a constant tensor of a single f32 value.
mlir::FloatAttr getScalarF32AttrFromConstant(mlir::Value v);

// Emit ONNX Concat to store the shape of the input x.
mlir::Value getDynShape(
mlir::Location loc, mlir::PatternRewriter &rewriter, mlir::Value x);

// Check if an operation is too small for NNPA, e.g. operation witl all scalar
// tensors.
bool isTooSmallOpForNNPA(mlir::Operation *op);

template <typename OP_TYPE>
void addDynamicallyLegalOpFor(mlir::ConversionTarget *target,
const onnx_mlir::DimAnalysis *dimAnalysis,
Expand All @@ -66,6 +96,11 @@ void addDynamicallyLegalOpFor(mlir::ConversionTarget *target,

// If not CPU, check if the op is legal for NNPA.
bool isLegalForNNPA = false;

// Operations whose inputs are small, do not use NNPA.
if (isTooSmallOpForNNPA(genericOp))
return true;

if (checkLegalityFn)
isLegalForNNPA = !checkLegalityFn(op, dimAnalysis);
else {
Expand Down Expand Up @@ -97,31 +132,5 @@ void addDynamicallyLegalOpFor(mlir::ConversionTarget *target,
});
}

/// Get transposed tensor by using a permutation array.
mlir::Value emitONNXTranspose(mlir::Location loc,
mlir::PatternRewriter &rewriter, mlir::Value x,
mlir::ArrayRef<int64_t> perms);

/// Get transposed tensor by using a permutation array and a result type.
mlir::Value emitONNXTransposeWithType(mlir::Location loc,
mlir::PatternRewriter &rewriter, mlir::Type transposedType, mlir::Value x,
mlir::ArrayRef<int64_t> perms);

/// Split a tensor along an axis in which each chunk has a size of
/// NNPAGetMaxForDim and the last chuck can be smaller.
mlir::ValueRange splitAlongAxis(
onnx_mlir::MultiDialectBuilder<onnx_mlir::OnnxBuilder> &create,
mlir::Value X, int64_t axis);

// Check if a value is a constant tensor of a single f32 value or not.
bool isF32ScalarConstantTensor(mlir::Value v);

// Get FloatAttr from a constant tensor of a single f32 value.
mlir::FloatAttr getScalarF32AttrFromConstant(mlir::Value v);

// Emit ONNX Concat to store the shape of the input x.
mlir::Value getDynShape(
mlir::Location loc, mlir::PatternRewriter &rewriter, mlir::Value x);

} // namespace onnx_mlir
#endif
Original file line number Diff line number Diff line change
Expand Up @@ -58,4 +58,6 @@ def ACT_RELUAttr: NativeCodeCall<"$_builder.getStringAttr(\"ACT_RELU\")">;

def GetTypeOf : NativeCodeCall<"$0.getType()" >;

def OpIsBigEnoughForNNPA: Constraint<CPred<"!isTooSmallOpForNNPA($_self.getDefiningOp())">>;

#endif // ONNX_TO_ZHIGH_COMMON
2 changes: 1 addition & 1 deletion test/accelerators/NNPA/numerical/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ add_numerical_test(TestGRUNNPA


# LeakyRelu
set(TestLeakyReluNNPA_instruction zdnn_mul)
set(TestLeakyReluNNPA_instruction zdnnx_leaky_relu)
# Automatically set following config when using --maccel=NNPA
add_numerical_test(TestLeakyReluNNPA
${ONNX_NUMERICALTEST_SRC_DIR}/TestLeakyRelu.cpp
Expand Down
27 changes: 27 additions & 0 deletions test/mlir/accelerators/nnpa/conversion/onnx-to-zhigh/add.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -64,3 +64,30 @@ func.func @test_exceed_limit_add(%arg0 : tensor<32769x10xf32>, %arg1 : tensor<32
// CHECK-LABEL: func @test_exceed_limit_add
// CHECK: "onnx.Add"
}

// -----

func.func @test_scalar_add_1(%arg0 : tensor<f32>, %arg1 : tensor<f32>) -> tensor<f32> {
%x = "onnx.Add"(%arg0, %arg1) : (tensor<f32>, tensor<f32>) -> tensor<f32>
"func.return"(%x) : (tensor<f32>) -> ()

// CHECK-LABEL: func.func @test_scalar_add_1
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<f32>, [[PARAM_1_:%.+]]: tensor<f32>) -> tensor<f32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.Add"([[PARAM_0_]], [[PARAM_1_]]) : (tensor<f32>, tensor<f32>) -> tensor<f32>
// CHECK: return [[VAR_0_]] : tensor<f32>
// CHECK: }
}

// -----

func.func @test_scalar_add_2(%arg0 : tensor<1xf32>, %arg1 : tensor<1xf32>) -> tensor<1xf32> {
%x = "onnx.Add"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
"func.return"(%x) : (tensor<1xf32>) -> ()

// CHECK-LABEL: func.func @test_scalar_add_2
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>, [[PARAM_1_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.Add"([[PARAM_0_]], [[PARAM_1_]]) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}

14 changes: 14 additions & 0 deletions test/mlir/accelerators/nnpa/conversion/onnx-to-zhigh/div.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -64,3 +64,17 @@ func.func @test_exceed_limit_div(%arg0 : tensor<32769x10xf32>, %arg1 : tensor<32
// CHECK-LABEL: func @test_exceed_limit_div
// CHECK: "onnx.Div"
}

// -----

func.func @test_scalar_div(%arg0 : tensor<1xf32>, %arg1 : tensor<1xf32>) -> tensor<1xf32> {
%x = "onnx.Div"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
"func.return"(%x) : (tensor<1xf32>) -> ()

// CHECK-LABEL: func.func @test_scalar_div
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>, [[PARAM_1_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.Div"([[PARAM_0_]], [[PARAM_1_]]) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}

14 changes: 14 additions & 0 deletions test/mlir/accelerators/nnpa/conversion/onnx-to-zhigh/exp.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,17 @@ func.func @test_exceed_limit_exp(%arg0 : tensor<32769x10xf32>) -> tensor<*xf32>
// CHECK-LABEL: func @test_exceed_limit_exp
// CHECK: "onnx.Exp"
}

// -----

func.func @test_scalar_exp(%arg0 : tensor<1xf32>) -> tensor<1xf32> {
%x = "onnx.Exp"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
"func.return"(%x) : (tensor<1xf32>) -> ()

// CHECK-LABEL: func.func @test_scalar_exp
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.Exp"([[PARAM_0_]]) : (tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}

26 changes: 26 additions & 0 deletions test/mlir/accelerators/nnpa/conversion/onnx-to-zhigh/gelu.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,29 @@ func.func @test_gelu_tanh_arch15(%arg0 : tensor<1x2xf32>) -> tensor<1x2xf32> {
// CHECK: return [[VAR_2_]] : tensor<1x2xf32>
// CHECK: }
}

// -----

func.func @test_gelu_erf_arch15_scalar(%arg0 : tensor<1xf32>) -> tensor<1xf32>{
%0 ="onnx.Gelu"(%arg0) {approximate = "none"} : (tensor<1xf32>) -> tensor<1xf32>
"func.return"(%0) : (tensor<1xf32>) -> ()

// CHECK-LABEL: func.func @test_gelu_erf_arch15_scalar
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.Gelu"([[PARAM_0_]]) <{approximate = "none"}> : (tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}

// -----

func.func @test_gelu_tanh_arch15_scalar(%arg0 : tensor<1xf32>) -> tensor<1xf32>{
%0 ="onnx.Gelu"(%arg0) {approximate = "tanh"} : (tensor<1xf32>) -> tensor<1xf32>
"func.return"(%0) : (tensor<1xf32>) -> ()

// CHECK-LABEL: func.func @test_gelu_tanh_arch15_scalar
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.Gelu"([[PARAM_0_]]) <{approximate = "tanh"}> : (tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}
18 changes: 18 additions & 0 deletions test/mlir/accelerators/nnpa/conversion/onnx-to-zhigh/invsqrt.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,21 @@ func.func @test_invsqrt_div2(%arg0 : tensor<1x2xf32>) -> tensor<*xf32> {
// CHECK: return [[VAR_2_]] : tensor<1x2xf32>
// CHECK: }
}

// -----

func.func @test_scalar_invsqrt(%arg0 : tensor<1xf32>) -> tensor<1xf32> {
%a = onnx.Constant dense<[1.0]> : tensor<1xf32>
%x = "onnx.Sqrt"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
%y = "onnx.Div"(%a, %x) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
"func.return"(%y) : (tensor<1xf32>) -> ()

// CHECK-LABEL: func.func @test_scalar_invsqrt
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK-DAG: [[VAR_0_:%.+]] = onnx.Constant dense<1.000000e+00> : tensor<1xf32>
// CHECK-DAG: [[VAR_1_:%.+]] = "onnx.Sqrt"([[PARAM_0_]]) : (tensor<1xf32>) -> tensor<1xf32>
// CHECK: [[VAR_2_:%.+]] = "onnx.Div"([[VAR_0_]], [[VAR_1_]]) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_2_]] : tensor<1xf32>
// CHECK: }
}

Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,15 @@ func.func @test_leakyrelu_default(%arg0 : tensor<10x10xf32>) -> tensor<*xf32> {
// CHECK: }
}

// -----

func.func @test_leakyrelu_scalar(%arg0 : tensor<1xf32>) -> tensor<*xf32> {
%0 = "onnx.LeakyRelu"(%arg0) : (tensor<1xf32>) -> tensor<*xf32>
"func.return"(%0) : (tensor<*xf32>) -> ()

// CHECK-LABEL: func.func @test_leakyrelu_scalar
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.LeakyRelu"([[PARAM_0_]]) <{alpha = 0.00999999977 : f32}> : (tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}
14 changes: 14 additions & 0 deletions test/mlir/accelerators/nnpa/conversion/onnx-to-zhigh/log.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -36,3 +36,17 @@ func.func @test_exceed_limit_log(%arg0 : tensor<32769x10xf32>) -> tensor<*xf32>
// CHECK-LABEL: func @test_exceed_limit_log
// CHECK: "onnx.Log"
}

// -----

func.func @test_scalar_log(%arg0 : tensor<1xf32>) -> tensor<1xf32> {
%x = "onnx.Log"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
"func.return"(%x) : (tensor<1xf32>) -> ()

// CHECK-LABEL: func.func @test_scalar_log
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.Log"([[PARAM_0_]]) : (tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}

13 changes: 13 additions & 0 deletions test/mlir/accelerators/nnpa/conversion/onnx-to-zhigh/max.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -64,3 +64,16 @@ func.func @test_exceed_limit_max(%arg0 : tensor<32769x10xf32>, %arg1 : tensor<32
// CHECK-LABEL: func @test_exceed_limit_max
// CHECK: "onnx.Max"
}

// -----

func.func @test_max_scalar(%arg0 : tensor<1xf32>, %arg1 : tensor<1xf32>) -> tensor<*xf32> {
%0 = "onnx.Max"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<*xf32>
"func.return"(%0) : (tensor<*xf32>) -> ()

// CHECK-LABEL: func.func @test_max_scalar
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>, [[PARAM_1_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.Max"([[PARAM_0_]], [[PARAM_1_]]) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}
13 changes: 13 additions & 0 deletions test/mlir/accelerators/nnpa/conversion/onnx-to-zhigh/min.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -64,3 +64,16 @@ func.func @test_exceed_limit_min(%arg0 : tensor<32769x10xf32>, %arg1 : tensor<32
// CHECK-LABEL: func @test_exceed_limit_min
// CHECK: "onnx.Min"
}

// -----

func.func @test_min_scalar(%arg0 : tensor<1xf32>, %arg1 : tensor<1xf32>) -> tensor<*xf32> {
%0 = "onnx.Min"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<*xf32>
"func.return"(%0) : (tensor<*xf32>) -> ()

// CHECK-LABEL: func.func @test_min_scalar
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>, [[PARAM_1_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.Min"([[PARAM_0_]], [[PARAM_1_]]) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}
14 changes: 14 additions & 0 deletions test/mlir/accelerators/nnpa/conversion/onnx-to-zhigh/mul.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -64,3 +64,17 @@ func.func @test_exceed_limit_mul(%arg0 : tensor<32769x10xf32>, %arg1 : tensor<32
// CHECK-LABEL: func @test_exceed_limit_mul
// CHECK: "onnx.Mul"
}

// -----

func.func @test_scalar_mul(%arg0 : tensor<1xf32>, %arg1 : tensor<1xf32>) -> tensor<1xf32> {
%x = "onnx.Mul"(%arg0, %arg1) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
"func.return"(%x) : (tensor<1xf32>) -> ()

// CHECK-LABEL: func.func @test_scalar_mul
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>, [[PARAM_1_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.Mul"([[PARAM_0_]], [[PARAM_1_]]) : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}

Original file line number Diff line number Diff line change
Expand Up @@ -73,3 +73,18 @@ func.func @test_reduce_max_axes_not_lowered_not_multiple_axes(%arg0 : tensor<3x2
// CHECK: return [[VAR_1_]] : tensor<1x2x1xf32>
// CHECK: }
}

// -----

func.func @test_reduce_max_scalar(%arg0 : tensor<1xf32>) -> tensor<*xf32> {
%cst = "onnx.Constant"() {value = dense<[0]> : tensor<1xi64> } : () -> tensor<1xi64>
%0 ="onnx.ReduceMax"(%arg0, %cst) {keepdims = 1 : si64, noop_with_empty_axes = 0 : si64} : (tensor<1xf32>, tensor<1xi64>)-> tensor<*xf32>
"func.return"(%0) : (tensor<*xf32>) -> ()

// CHECK-LABEL: func.func @test_reduce_max_scalar
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = onnx.Constant dense<0> : tensor<1xi64>
// CHECK: [[VAR_1_:%.+]] = "onnx.ReduceMax"([[PARAM_0_]], [[VAR_0_]]) <{keepdims = 1 : si64, noop_with_empty_axes = 0 : si64}> : (tensor<1xf32>, tensor<1xi64>) -> tensor<1xf32>
// CHECK: return [[VAR_1_]] : tensor<1xf32>
// CHECK: }
}
Original file line number Diff line number Diff line change
Expand Up @@ -70,3 +70,16 @@ func.func @test_exceed_limit_reducemean_v13(%arg0 : tensor<32769x3x5x7xf32>) ->
// CHECK-LABEL: func @test_exceed_limit_reducemean_v13
// CHECK: "onnx.ReduceMeanV13"
}

// -----

func.func @test_reduce_mean_scalar(%arg0 : tensor<1xf32>) -> tensor<*xf32> {
%0 ="onnx.ReduceMeanV13"(%arg0) { axes = [0] } : (tensor<1xf32>)-> tensor<*xf32>
"func.return"(%0) : (tensor<*xf32>) -> ()

// CHECK-LABEL: func.func @test_reduce_mean_scalar
// CHECK-SAME: ([[PARAM_0_:%.+]]: tensor<1xf32>) -> tensor<1xf32> {
// CHECK: [[VAR_0_:%.+]] = "onnx.ReduceMeanV13"([[PARAM_0_]]) <{axes = [0], keepdims = 1 : si64}> : (tensor<1xf32>) -> tensor<1xf32>
// CHECK: return [[VAR_0_]] : tensor<1xf32>
// CHECK: }
}
Loading
Loading