Skip to content

Commit 37cef0e

Browse files
committed
[Codegen] Migrate MapStoreOp to VectorizableOpInterface
The revision also deletes VectorizeIREELinalgExtOps, because it is already covered in GenericVectorization pass. The tests from `vectorize_iree_linalg_ext_ops.mlir` are migrated to `generic_vectorization.mlir`. The map_store vectorization is only handled on LLVMGPU, which requires DecomposeMapStoreOp pass, so the vectorization is only enabled on LLVMGPU for now -- which matches the current behavior in IREE. It is a step towards https://lists.lfaidata.foundation/g/iree-technical-discussion/message/15 Assisted-by: Claude ci-extra: test_torch Signed-off-by: hanhanW <hanhan0912@gmail.com>
1 parent c1ebfc1 commit 37cef0e

14 files changed

Lines changed: 233 additions & 265 deletions

File tree

compiler/src/iree/compiler/Codegen/Common/GenericVectorization.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include "iree/compiler/Codegen/Dialect/VectorExt/Transforms/Transforms.h"
1212
#include "iree/compiler/Codegen/Interfaces/VectorizableOpInterface.h"
1313
#include "iree/compiler/Codegen/Utils/Utils.h"
14+
#include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtOps.h"
1415
#include "llvm/Support/DebugLog.h"
1516
#include "mlir/Dialect/Affine/LoopUtils.h"
1617
#include "mlir/Dialect/Linalg/Transforms/Hoisting.h"
@@ -172,6 +173,9 @@ void GenericVectorizationPass::runOnOperation() {
172173
isa<linalg::PackOp, linalg::UnPackOp>(op)) {
173174
candidates.push_back(op);
174175
} else if (isa<VectorizableOpInterface>(op)) {
176+
if (!vectorizeMapStore && isa<IREE::LinalgExt::MapStoreOp>(op)) {
177+
return;
178+
}
175179
candidates.push_back(op);
176180
}
177181
});

compiler/src/iree/compiler/Codegen/Common/Passes.td

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -639,7 +639,9 @@ def GenericVectorizationPass :
639639
"Enable folding casting ops into vector.contract.">,
640640
Option<"maxVectorSize", "max-vector-size", "int64_t",
641641
/*default=*/"2147483647",
642-
"Max vector size allowed to avoid creating large vectors.">
642+
"Max vector size allowed to avoid creating large vectors.">,
643+
Option<"vectorizeMapStore", "vectorize-map-store", "bool", /*default=*/"false",
644+
"Enable vectorization of iree_linalg_ext.map_store operations via VectorizableOpInterface.">
643645
];
644646
let dependentDialects = [
645647
"::mlir::arith::ArithDialect"

compiler/src/iree/compiler/Codegen/Common/test/generic_vectorization.mlir

Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
// RUN: iree-opt --pass-pipeline="builtin.module(func.func(iree-codegen-generic-vectorization{enable-vector-masking=true}))" --split-input-file %s | FileCheck %s -check-prefix=CHECK-MASK
33
// RUN: iree-opt --pass-pipeline="builtin.module(func.func(iree-codegen-generic-vectorization{fold-cast-into-contract=true}))" --split-input-file %s | FileCheck %s -check-prefix=CHECK-FOLD
44
// RUN: iree-opt --pass-pipeline="builtin.module(func.func(iree-codegen-generic-vectorization{vectorize-to-transfer-gather=true}))" --split-input-file %s | FileCheck %s --check-prefix=CHECK-GATHER
5+
// RUN: iree-opt --pass-pipeline="builtin.module(func.func(iree-codegen-generic-vectorization{vectorize-map-store=true}))" --split-input-file %s | FileCheck %s --check-prefix=CHECK-MAP-STORE
56

67
func.func @matmul(%lhs: tensor<3x4xf16>, %rhs: tensor<4x5xf16>, %acc: tensor<3x5xf32>) -> tensor<3x5xf32> {
78
%result = linalg.matmul ins(%lhs, %rhs: tensor<3x4xf16>, tensor<4x5xf16>) outs(%acc: tensor<3x5xf32>) -> tensor<3x5xf32>
@@ -1001,3 +1002,145 @@ func.func @arg_compare_with_index_base(%input: tensor<4x128xf32>,
10011002
// CHECK: %[[WRITE_VAL:.+]] = vector.transfer_write %[[RESULT_VAL]], %[[OUT_VAL]]
10021003
// CHECK: %[[WRITE_IDX:.+]] = vector.transfer_write %[[RESULT_IDX]], %[[OUT_IDX]]
10031004
// CHECK: return %[[WRITE_VAL]], %[[WRITE_IDX]]
1005+
1006+
// -----
1007+
1008+
func.func @map_store(
1009+
%input: tensor<4x16x64xf32>, %output: tensor<4x16x64xf32>
1010+
) -> tensor<4x16x64xf32> {
1011+
%0 = iree_linalg_ext.map_store %input into %output {
1012+
^bb0(%idx0: index, %idx1: index, %idx2: index):
1013+
%mask = arith.constant true
1014+
iree_linalg_ext.yield %idx0, %idx1, %idx2, %mask : index, index, index, i1
1015+
} : tensor<4x16x64xf32> into tensor<4x16x64xf32> -> tensor<4x16x64xf32>
1016+
return %0 : tensor<4x16x64xf32>
1017+
}
1018+
// CHECK-MAP-STORE-LABEL: @map_store
1019+
// CHECK-MAP-STORE-SAME: %[[INPUT:[a-zA-Z0-9_]+]]
1020+
// CHECK-MAP-STORE-SAME: %[[OUTPUT:[a-zA-Z0-9_]+]]
1021+
// CHECK-MAP-STORE: %[[READ:.+]] = vector.transfer_read %[[INPUT]]
1022+
// CHECK-MAP-STORE: %[[MAP_SCATTER:.+]] = iree_linalg_ext.map_store
1023+
// CHECK-MAP-STORE-SAME: %[[READ]] into %[[OUTPUT]]
1024+
// CHECK-MAP-STORE: : vector<4x16x64xf32> into tensor<4x16x64xf32> -> tensor<4x16x64xf32>
1025+
// CHECK-MAP-STORE: return %[[MAP_SCATTER]] : tensor<4x16x64xf32>
1026+
1027+
// -----
1028+
1029+
func.func @no_vectorize_map_store_dynamic(
1030+
%input: tensor<?xf32>, %output: tensor<64xf32>
1031+
) -> tensor<64xf32> {
1032+
%0 = iree_linalg_ext.map_store %input into %output {
1033+
^bb0(%idx0: index):
1034+
%mask = arith.constant true
1035+
iree_linalg_ext.yield %idx0, %mask : index, i1
1036+
} : tensor<?xf32> into tensor<64xf32> -> tensor<64xf32>
1037+
return %0 : tensor<64xf32>
1038+
}
1039+
// CHECK-MAP-STORE-LABEL: @no_vectorize_map_store_dynamic
1040+
// CHECK-MAP-STORE-NOT: vector
1041+
1042+
// -----
1043+
1044+
func.func @map_store_f4_multiple_of_byte(
1045+
%input: tensor<2x2xf4E2M1FN>, %output: tensor<2x2xf4E2M1FN>
1046+
) -> tensor<2x2xf4E2M1FN> {
1047+
%0 = iree_linalg_ext.map_store %input into %output {
1048+
^bb0(%idx0: index, %idx1: index):
1049+
%mask = arith.constant true
1050+
iree_linalg_ext.yield %idx0, %idx1, %mask : index, index, i1
1051+
} : tensor<2x2xf4E2M1FN> into tensor<2x2xf4E2M1FN> -> tensor<2x2xf4E2M1FN>
1052+
return %0 : tensor<2x2xf4E2M1FN>
1053+
}
1054+
// CHECK-MAP-STORE-LABEL: @map_store_f4_multiple_of_byte
1055+
// CHECK-MAP-STORE-SAME: %[[INPUT:[a-zA-Z0-9_]+]]
1056+
// CHECK-MAP-STORE-SAME: %[[OUTPUT:[a-zA-Z0-9_]+]]
1057+
// CHECK-MAP-STORE: %[[READ:.+]] = vector.transfer_read %[[INPUT]]
1058+
// CHECK-MAP-STORE: %[[MAP_SCATTER:.+]] = iree_linalg_ext.map_store
1059+
// CHECK-MAP-STORE-SAME: %[[READ]] into %[[OUTPUT]]
1060+
// CHECK-MAP-STORE: : vector<2x2xf4E2M1FN> into tensor<2x2xf4E2M1FN> -> tensor<2x2xf4E2M1FN>
1061+
// CHECK-MAP-STORE: return %[[MAP_SCATTER]] : tensor<2x2xf4E2M1FN>
1062+
1063+
// -----
1064+
1065+
func.func @map_store_f4_not_multiple_of_byte(
1066+
%input: tensor<2x1xf4E2M1FN>, %output: tensor<2x2xf4E2M1FN>
1067+
) -> tensor<2x2xf4E2M1FN> {
1068+
%0 = iree_linalg_ext.map_store %input into %output {
1069+
^bb0(%idx0: index, %idx1: index):
1070+
%mask = arith.constant true
1071+
iree_linalg_ext.yield %idx0, %idx1, %mask : index, index, i1
1072+
} : tensor<2x1xf4E2M1FN> into tensor<2x2xf4E2M1FN> -> tensor<2x2xf4E2M1FN>
1073+
return %0 : tensor<2x2xf4E2M1FN>
1074+
}
1075+
// CHECK-MAP-STORE-LABEL: @map_store_f4_not_multiple_of_byte
1076+
// CHECK-MAP-STORE-NOT: vector
1077+
1078+
// -----
1079+
1080+
func.func @map_store_f4_unit_stride(
1081+
%input: tensor<2x2xf4E2M1FN>, %output: tensor<2x4xf4E2M1FN>
1082+
) -> tensor<2x4xf4E2M1FN> {
1083+
%0 = iree_linalg_ext.map_store %input into %output {
1084+
^bb0(%idx0: index, %idx1: index):
1085+
%mask = arith.constant true
1086+
%1 = affine.apply affine_map<(d0) -> (d0 + 2)>(%idx1)
1087+
iree_linalg_ext.yield %idx0, %1, %mask : index, index, i1
1088+
} : tensor<2x2xf4E2M1FN> into tensor<2x4xf4E2M1FN> -> tensor<2x4xf4E2M1FN>
1089+
return %0 : tensor<2x4xf4E2M1FN>
1090+
}
1091+
// CHECK-MAP-STORE-LABEL: @map_store_f4_unit_stride
1092+
// CHECK-MAP-STORE-SAME: %[[INPUT:[a-zA-Z0-9_]+]]
1093+
// CHECK-MAP-STORE-SAME: %[[OUTPUT:[a-zA-Z0-9_]+]]
1094+
// CHECK-MAP-STORE: %[[READ:.+]] = vector.transfer_read %[[INPUT]]
1095+
// CHECK-MAP-STORE: %[[MAP_SCATTER:.+]] = iree_linalg_ext.map_store
1096+
// CHECK-MAP-STORE-SAME: %[[READ]] into %[[OUTPUT]]
1097+
// CHECK-MAP-STORE: : vector<2x2xf4E2M1FN> into tensor<2x4xf4E2M1FN> -> tensor<2x4xf4E2M1FN>
1098+
// CHECK-MAP-STORE: return %[[MAP_SCATTER]] : tensor<2x4xf4E2M1FN>
1099+
1100+
// -----
1101+
1102+
func.func @map_store_f4_not_unit_stride(
1103+
%input: tensor<2x2xf4E2M1FN>, %output: tensor<2x4xf4E2M1FN>
1104+
) -> tensor<2x4xf4E2M1FN> {
1105+
%0 = iree_linalg_ext.map_store %input into %output {
1106+
^bb0(%idx0: index, %idx1: index):
1107+
%mask = arith.constant true
1108+
%1 = affine.apply affine_map<(d0) -> (d0 * 2)>(%idx1)
1109+
iree_linalg_ext.yield %idx0, %1, %mask : index, index, i1
1110+
} : tensor<2x2xf4E2M1FN> into tensor<2x4xf4E2M1FN> -> tensor<2x4xf4E2M1FN>
1111+
return %0 : tensor<2x4xf4E2M1FN>
1112+
}
1113+
// CHECK-MAP-STORE-LABEL: @map_store_f4_not_unit_stride
1114+
// CHECK-MAP-STORE-NOT: vector
1115+
1116+
// -----
1117+
1118+
func.func @map_store_f4_not_index_applied_multiple_times(
1119+
%input: tensor<2x2xf4E2M1FN>, %output: tensor<2x4xf4E2M1FN>
1120+
) -> tensor<2x4xf4E2M1FN> {
1121+
%0 = iree_linalg_ext.map_store %input into %output {
1122+
^bb0(%idx0: index, %idx1: index):
1123+
%mask = arith.constant true
1124+
%1 = affine.apply affine_map<(d0, d1) -> (d0 + d1)>(%idx1, %idx1)
1125+
iree_linalg_ext.yield %idx0, %1, %mask : index, index, i1
1126+
} : tensor<2x2xf4E2M1FN> into tensor<2x4xf4E2M1FN> -> tensor<2x4xf4E2M1FN>
1127+
return %0 : tensor<2x4xf4E2M1FN>
1128+
}
1129+
// CHECK-MAP-STORE-LABEL: @map_store_f4_not_index_applied_multiple_times
1130+
// CHECK-MAP-STORE-NOT: vector
1131+
1132+
// -----
1133+
1134+
func.func @map_store_f4_mask_depends_on_inner_index(
1135+
%input: tensor<2x2xf4E2M1FN>, %output: tensor<2x4xf4E2M1FN>
1136+
) -> tensor<2x4xf4E2M1FN> {
1137+
%0 = iree_linalg_ext.map_store %input into %output {
1138+
^bb0(%idx0: index, %idx1: index):
1139+
%c1 = arith.constant 1 : index
1140+
%mask = arith.cmpi uge, %idx1, %c1 : index
1141+
iree_linalg_ext.yield %idx0, %idx1, %mask : index, index, i1
1142+
} : tensor<2x2xf4E2M1FN> into tensor<2x4xf4E2M1FN> -> tensor<2x4xf4E2M1FN>
1143+
return %0 : tensor<2x4xf4E2M1FN>
1144+
}
1145+
// CHECK-MAP-STORE-LABEL: @map_store_f4_mask_depends_on_inner_index
1146+
// CHECK-MAP-STORE-NOT: vector

compiler/src/iree/compiler/Codegen/Interfaces/BUILD.bazel

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,8 @@ iree_compiler_cc_library(
233233
":VectorizableOpInterfaceGen",
234234
"//compiler/src/iree/compiler/Codegen/Dialect/VectorExt/IR:IREEVectorExtDialect",
235235
"//compiler/src/iree/compiler/Dialect/LinalgExt/IR",
236+
"//compiler/src/iree/compiler/Utils",
237+
"@llvm-project//mlir:Analysis",
236238
"@llvm-project//mlir:ArithDialect",
237239
"@llvm-project//mlir:IR",
238240
"@llvm-project//mlir:TensorDialect",

compiler/src/iree/compiler/Codegen/Interfaces/CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -169,13 +169,15 @@ iree_cc_library(
169169
"VectorizableOpInterface.cpp"
170170
DEPS
171171
::VectorizableOpInterfaceGen
172+
MLIRAnalysis
172173
MLIRArithDialect
173174
MLIRIR
174175
MLIRTensorDialect
175176
MLIRUBDialect
176177
MLIRVectorDialect
177178
iree::compiler::Codegen::Dialect::VectorExt::IR::IREEVectorExtDialect
178179
iree::compiler::Dialect::LinalgExt::IR
180+
iree::compiler::Utils
179181
PUBLIC
180182
)
181183

compiler/src/iree/compiler/Codegen/Interfaces/VectorizableOpInterface.cpp

Lines changed: 78 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
#include "iree/compiler/Codegen/Dialect/VectorExt/IR/VectorExtOps.h"
1111
#include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtDialect.h"
1212
#include "iree/compiler/Dialect/LinalgExt/IR/LinalgExtOps.h"
13+
#include "iree/compiler/Utils/Indexing.h"
14+
#include "mlir/Analysis/SliceAnalysis.h"
1315
#include "mlir/Dialect/Arith/IR/Arith.h"
1416
#include "mlir/Dialect/Tensor/IR/Tensor.h"
1517
#include "mlir/Dialect/UB/IR/UBOps.h"
@@ -385,16 +387,85 @@ struct ToLayoutOpVectorizationModel
385387
}
386388
};
387389

390+
struct MapStoreOpVectorizationModel
391+
: public VectorizableOpInterface::ExternalModel<
392+
MapStoreOpVectorizationModel, IREE::LinalgExt::MapStoreOp> {
393+
394+
bool isVectorizable(Operation *op, ArrayRef<int64_t> vectorSizes,
395+
ArrayRef<bool> scalableDims,
396+
DictionaryAttr options) const {
397+
auto mapStoreOp = cast<IREE::LinalgExt::MapStoreOp>(op);
398+
if (mapStoreOp.isVectorized()) {
399+
return false;
400+
}
401+
ShapedType inputType = mapStoreOp.getInputType();
402+
if (!inputType.hasStaticShape()) {
403+
return false;
404+
}
405+
const int64_t innerSize = inputType.getShape()[inputType.getRank() - 1];
406+
const int64_t bitWidth = inputType.getElementTypeBitWidth();
407+
if ((innerSize * bitWidth % 8) != 0) {
408+
return false;
409+
}
410+
// In case of a sub-byte bitwidth, we check that there is a contiguous copy
411+
// on the inner dimension that is a multiple of a byte. Note that the mask
412+
// shouldn't depend on the inner index for this.
413+
if (bitWidth < 8) {
414+
// First check that the mask is not the forward slice of the inner index.
415+
Value innermostInputIdx =
416+
mapStoreOp.getInputIndex(mapStoreOp.getInputRank() - 1);
417+
SetVector<Operation *> slice;
418+
getForwardSlice(innermostInputIdx, &slice);
419+
Operation *maskOp = mapStoreOp.getMask().getDefiningOp();
420+
if (maskOp && slice.contains(maskOp)) {
421+
return false;
422+
}
423+
// Next check that the inner index of the yield is a unit function of
424+
// the inner input index.
425+
Value innermostOutputIdx =
426+
mapStoreOp.getOutputIndex(mapStoreOp.getOutputRank() - 1);
427+
if (!isUnitFunctionOf(innermostOutputIdx, innermostInputIdx)) {
428+
return false;
429+
}
430+
}
431+
return true;
432+
}
433+
434+
FailureOr<SmallVector<Value>> vectorize(Operation *op, RewriterBase &rewriter,
435+
ArrayRef<int64_t> vectorSizes,
436+
ArrayRef<bool> scalableDims,
437+
DictionaryAttr options) const {
438+
auto mapStoreOp = cast<IREE::LinalgExt::MapStoreOp>(op);
439+
Location loc = mapStoreOp.getLoc();
440+
rewriter.setInsertionPoint(mapStoreOp);
441+
ShapedType inputType = mapStoreOp.getInputType();
442+
Value zero = arith::ConstantIndexOp::create(rewriter, loc, 0);
443+
SmallVector<Value> zeros(inputType.getRank(), zero);
444+
auto inputVectorType =
445+
VectorType::get(inputType.getShape(), inputType.getElementType());
446+
Value inputVector = vector::TransferReadOp::create(
447+
rewriter, loc, inputVectorType, mapStoreOp.getInput(),
448+
/*indices=*/zeros,
449+
/*padding=*/std::nullopt);
450+
auto vectorizedMapStoreOp =
451+
clone(rewriter, mapStoreOp, mapStoreOp.getResultTypes(),
452+
{inputVector, mapStoreOp.getOutput()});
453+
return SmallVector<Value>(vectorizedMapStoreOp->getResults());
454+
}
455+
};
456+
388457
} // namespace
389458

390459
void registerVectorizableOpInterfaceExternalModels(DialectRegistry &registry) {
391-
registry.addExtension(
392-
+[](MLIRContext *ctx, IREE::LinalgExt::IREELinalgExtDialect *dialect) {
393-
IREE::LinalgExt::GatherOp::attachInterface<GatherOpVectorizationModel>(
394-
*ctx);
395-
IREE::LinalgExt::ArgCompareOp::attachInterface<
396-
ArgCompareOpVectorizationModel>(*ctx);
397-
});
460+
registry.addExtension(+[](MLIRContext *ctx,
461+
IREE::LinalgExt::IREELinalgExtDialect *dialect) {
462+
IREE::LinalgExt::GatherOp::attachInterface<GatherOpVectorizationModel>(
463+
*ctx);
464+
IREE::LinalgExt::ArgCompareOp::attachInterface<
465+
ArgCompareOpVectorizationModel>(*ctx);
466+
IREE::LinalgExt::MapStoreOp::attachInterface<MapStoreOpVectorizationModel>(
467+
*ctx);
468+
});
398469
registry.addExtension(+[](MLIRContext *ctx,
399470
IREE::VectorExt::IREEVectorExtDialect *dialect) {
400471
IREE::VectorExt::ToLayoutOp::attachInterface<ToLayoutOpVectorizationModel>(

compiler/src/iree/compiler/Codegen/LLVMGPU/Passes.cpp

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -299,6 +299,7 @@ static void addGPUVectorizationPasses(OpPassManager &funcPassManager,
299299
options.enableCleanup = false;
300300
options.foldCastIntoContract = true;
301301
options.enableVectorMasking = enableMasking;
302+
options.vectorizeMapStore = true;
302303
funcPassManager.addPass(createGenericVectorizationPass(options));
303304
funcPassManager.addPass(createCanonicalizerPass());
304305
funcPassManager.addPass(createCSEPass());
@@ -573,8 +574,6 @@ void addGPUTileAndFusePassPipeline(OpPassManager &funcPassManager,
573574
funcPassManager.addPass(createGPUCombineValueSemanticBarriersPass());
574575

575576
// Step 6. Lower special ops and vectorize.
576-
funcPassManager.addPass(
577-
IREE::LinalgExt::createVectorizeIREELinalgExtOpsPass());
578577
funcPassManager.addPass(IREE::GPU::createVectorizeIREEGPUOpsPass());
579578
addGPUVectorizationPasses(funcPassManager, /*vectorizeCopies=*/false,
580579
/*enableMasking=*/true,
@@ -836,8 +835,6 @@ void addGPUVectorDistributePassPipeline(OpPassManager &funcPassManager,
836835
funcPassManager.addPass(tensor::createFoldTensorSubsetOpsPass());
837836

838837
// Linalg -> Vector
839-
funcPassManager.addPass(
840-
IREE::LinalgExt::createVectorizeIREELinalgExtOpsPass());
841838
addGPUVectorizationPasses(funcPassManager, /*vectorizeCopies=*/true,
842839
/*enableMasking=*/true);
843840

compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/BUILD.bazel

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,6 @@ iree_compiler_cc_library(
4848
"TestReshapeFusion.cpp",
4949
"TileAttention.cpp",
5050
"TransposeFusion.cpp",
51-
"VectorizeIREELinalgExtOps.cpp",
5251
],
5352
hdrs = [
5453
"LoopMappingUtils.h",

compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/CMakeLists.txt

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ iree_cc_library(
4646
"TestReshapeFusion.cpp"
4747
"TileAttention.cpp"
4848
"TransposeFusion.cpp"
49-
"VectorizeIREELinalgExtOps.cpp"
5049
DEPS
5150
::PassesIncGen
5251
LLVMSupport

compiler/src/iree/compiler/Dialect/LinalgExt/Transforms/Passes.td

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -146,13 +146,4 @@ def TestReshapeFusionPass :
146146
let summary = "Test reshape fusion patterns";
147147
}
148148

149-
def VectorizeIREELinalgExtOpsPass :
150-
InterfacePass<"iree-linalg-ext-vectorize-ops", "mlir::FunctionOpInterface"> {
151-
let summary = "Convert linalg_ext ops into their vector form.";
152-
let dependentDialects = [
153-
"::mlir::vector::VectorDialect",
154-
"::mlir::arith::ArithDialect"
155-
];
156-
}
157-
158149
#endif // IREE_DIALECT_LINALGEXT_PASSES

0 commit comments

Comments
 (0)