1 //===- TestLinalgFusionTransforms.cpp - Test Linalg fusion patterns -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements logic for testing Linalg fusion patterns.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "mlir/Dialect/Affine/IR/AffineOps.h"
14 #include "mlir/Dialect/Func/IR/FuncOps.h"
15 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
16 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
17 #include "mlir/Dialect/SCF/Transforms/Transforms.h"
18 #include "mlir/Pass/Pass.h"
19 #include "mlir/Pass/PassManager.h"
20 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
21 #include "mlir/Transforms/Passes.h"
22
23 using namespace mlir;
24 using namespace mlir::linalg;
25
fuseLinalgOpsGreedily(func::FuncOp f)26 static LogicalResult fuseLinalgOpsGreedily(func::FuncOp f) {
27 OpBuilder b(f);
28 DenseSet<Operation *> eraseSet;
29
30 // Save original Linalg ops, we only want to make a pass over those.
31 SmallVector<LinalgOp, 8> linalgOps;
32 f.walk([&](LinalgOp op) {
33 // TODO: support multi-results.
34 if (op->getNumResults() <= 1)
35 linalgOps.push_back(op);
36 });
37
38 // Tile and Fuse for tensors inputs (TODO: all tensor operands).
39 bool changed = false;
40 for (LinalgOp linalgOp : llvm::reverse(linalgOps)) {
41 for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
42 if (opOperand->get().getType().isa<MemRefType>()) {
43 // TODO: LinalgDependenceGraph should be able to update itself.
44 // The current naive and expensive reconstruction of the graph should be
45 // removed.
46 linalg::Aliases aliases;
47 linalg::LinalgDependenceGraph graph(aliases, linalgOps);
48 auto info = fuseProducerOfBuffer(b, *opOperand, graph);
49 if (failed(info))
50 continue;
51 auto *originalOp = info->originalProducer.getOperation();
52 eraseSet.insert(originalOp);
53 auto *originalOpInLinalgOpsVector =
54 std::find(linalgOps.begin(), linalgOps.end(), originalOp);
55 *originalOpInLinalgOpsVector = info->fusedProducer.getOperation();
56 changed = true;
57 } else if (opOperand->get().getType().isa<RankedTensorType>()) {
58 // Tile and Fuse tensor input.
59 if (opOperand->getOperandNumber() >= linalgOp.getNumInputs())
60 continue;
61 auto info = fuseProducerOfTensor(b, *opOperand);
62 if (failed(info))
63 continue;
64 auto *originalOp = info->originalProducer.getOperation();
65 auto *originalOpInLinalgOpsVector =
66 std::find(linalgOps.begin(), linalgOps.end(), originalOp);
67 *originalOpInLinalgOpsVector = info->fusedProducer.getOperation();
68 // Don't mark for erasure in the tensor case, let DCE handle this.
69 changed = true;
70 }
71 }
72 }
73 // The `fuseProducerOfBuffer` function performs structural checks and in
74 // particular that no covering read or write exist between the consumer and
75 // the producer. As a consequence, the only fusions that may occur preserve
76 // subsequent dependences and are guaranteed by construction to produce the
77 // whole view. We may thus erase the producer once it is fused.
78 for (auto *e : eraseSet)
79 e->erase();
80
81 return changed ? success() : failure();
82 }
83
84 namespace {
85 struct TestLinalgGreedyFusion
86 : public PassWrapper<TestLinalgGreedyFusion, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID__anon56f4bccd0211::TestLinalgGreedyFusion87 MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgGreedyFusion)
88
89 void getDependentDialects(DialectRegistry ®istry) const override {
90 registry.insert<AffineDialect, linalg::LinalgDialect, memref::MemRefDialect,
91 scf::SCFDialect>();
92 }
getArgument__anon56f4bccd0211::TestLinalgGreedyFusion93 StringRef getArgument() const final { return "test-linalg-greedy-fusion"; }
getDescription__anon56f4bccd0211::TestLinalgGreedyFusion94 StringRef getDescription() const final {
95 return "Test Linalg fusion by applying a greedy test transformation.";
96 }
runOnOperation__anon56f4bccd0211::TestLinalgGreedyFusion97 void runOnOperation() override {
98 MLIRContext *context = &getContext();
99 RewritePatternSet patterns =
100 linalg::getLinalgTilingCanonicalizationPatterns(context);
101 patterns.add<ExtractSliceOfPadTensorSwapPattern>(context);
102 scf::populateSCFForLoopCanonicalizationPatterns(patterns);
103 FrozenRewritePatternSet frozenPatterns(std::move(patterns));
104 OpPassManager pm(func::FuncOp::getOperationName());
105 pm.addPass(createLoopInvariantCodeMotionPass());
106 pm.addPass(createCanonicalizerPass());
107 pm.addPass(createCSEPass());
108 do {
109 (void)applyPatternsAndFoldGreedily(getOperation(), frozenPatterns);
110 if (failed(runPipeline(pm, getOperation())))
111 this->signalPassFailure();
112 } while (succeeded(fuseLinalgOpsGreedily(getOperation())));
113 }
114 };
115 } // namespace
116
117 namespace mlir {
118 namespace test {
registerTestLinalgGreedyFusion()119 void registerTestLinalgGreedyFusion() {
120 PassRegistration<TestLinalgGreedyFusion>();
121 }
122
123 } // namespace test
124 } // namespace mlir
125