1 //===- Bufferize.cpp - Bufferization of linalg ops ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Transforms/Bufferize.h"
10 #include "PassDetail.h"
11 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
12 #include "mlir/Dialect/Linalg/Passes.h"
13 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
14 #include "mlir/Dialect/Linalg/Utils/Utils.h"
15 #include "mlir/Dialect/StandardOps/Transforms/Passes.h"
16 #include "mlir/Dialect/StandardOps/Utils/Utils.h"
17 #include "mlir/Dialect/Vector/VectorOps.h"
18 #include "mlir/IR/BuiltinDialect.h"
19 #include "mlir/IR/Operation.h"
20 #include "mlir/Pass/Pass.h"
21 
22 using namespace ::mlir;
23 using namespace ::mlir::linalg;
24 
25 static Value cloneMemref(Location loc, Value memref, OpBuilder &b) {
26   auto memrefType = memref.getType().cast<MemRefType>();
27   auto alloc =
28       b.create<AllocOp>(loc, memrefType, getDynOperands(loc, memref, b));
29   b.create<linalg::CopyOp>(loc, memref, alloc);
30   return alloc;
31 }
32 
33 static LogicalResult
34 allocateBuffersForResults(Location loc, LinalgOp linalgOp,
35                           linalg::GenericOpAdaptor &adaptor,
36                           SmallVectorImpl<Value> &resultBuffers, OpBuilder &b) {
37   // Lazily compute loopRanges.
38   SmallVector<Range, 4> loopRanges;
39 
40   // Allocate a buffer for every tensor result.
41   assert(linalgOp.getNumOutputs() == linalgOp->getNumResults());
42   for (auto en : llvm::enumerate(linalgOp->getResultTypes())) {
43     size_t resultIndex = en.index();
44     Type resultType = en.value();
45 
46     auto tensorType = resultType.dyn_cast<RankedTensorType>();
47     if (tensorType == nullptr) {
48       linalgOp.emitOpError()
49           << "tensor to buffer conversion expects ranked tensor results";
50       return failure();
51     }
52     auto tensorShape = tensorType.getShape();
53     auto memrefType = MemRefType::get(tensorShape, tensorType.getElementType());
54     Value resultTensor = adaptor.outputs()[resultIndex];
55 
56     // Clone output buffers whose value is actually used.
57     if (linalgOp.payloadUsesValueFromOutputOperandIndex(resultIndex)) {
58       resultBuffers.push_back(cloneMemref(loc, resultTensor, b));
59       continue;
60     }
61 
62     if (auto alloc = resultTensor.getDefiningOp<AllocOp>()) {
63       resultBuffers.push_back(resultTensor);
64       continue;
65     }
66     // Allocate buffers for statically-shaped results.
67     if (memrefType.hasStaticShape()) {
68       resultBuffers.push_back(b.create<AllocOp>(loc, memrefType));
69       continue;
70     }
71 
72     resultBuffers.push_back(b.create<AllocOp>(
73         loc, memrefType, getDynOperands(loc, resultTensor, b)));
74   }
75   return success();
76 }
77 
78 /// Specialization for `linalg::GenericOp` and `linalg::IndexedGenericOp`.
79 /// A pattern to convert Generic Linalg operations which work on tensors to
80 /// use buffers. BufferPlacement pass should be later used to move
81 /// Alloc operations to the correct positions and insert the missing Dealloc
82 /// operations in the correct places.
83 template <typename GenericOpTy>
84 static void
85 finalizeBufferAllocationForGenericOp(ConversionPatternRewriter &rewriter,
86                                      GenericOpTy genericOp, ValueRange inputs,
87                                      ValueRange outputs) {
88   // Generate a new linalg operation that works on buffers.
89   auto newGenericOp = rewriter.create<GenericOpTy>(
90       genericOp.getLoc(),
91       /*resultTensorTypes=*/llvm::None,
92       /*inputs=*/inputs,
93       /*outputs=*/outputs, genericOp.indexing_maps(),
94       genericOp.iterator_types(), genericOp.docAttr(),
95       genericOp.library_callAttr(), genericOp.sparseAttr());
96 
97   // Create a new block in the region of the new Generic Op.
98   Block *oldBlock = genericOp.getBody();
99   Region &newRegion = newGenericOp.region();
100   Block *newBlock = rewriter.createBlock(&newRegion, newRegion.begin(),
101                                          oldBlock->getArgumentTypes());
102 
103   // Clone the body of the old block to the new block.
104   BlockAndValueMapping mapping;
105   mapping.map(oldBlock->getArguments(), newBlock->getArguments());
106 
107   OpBuilder::InsertionGuard guard(rewriter);
108   rewriter.setInsertionPointToEnd(newBlock);
109   for (auto &op : oldBlock->getOperations()) {
110     Operation *clonedOp = rewriter.clone(op, mapping);
111     mapping.map(op.getResults(), clonedOp->getResults());
112   }
113 
114   // Replace the results of the old op with the new output buffers.
115   rewriter.replaceOp(genericOp, outputs);
116 }
117 
118 /// Specialization for all other `linalg::LinalgOp`.
119 static void finalizeBufferAllocation(ConversionPatternRewriter &rewriter,
120                                      linalg::LinalgOp linalgOp,
121                                      ValueRange inputs, ValueRange outputs) {
122   assert(!isa<linalg::GenericOp>(linalgOp.getOperation()));
123   assert(!isa<linalg::IndexedGenericOp>(linalgOp.getOperation()));
124   SmallVector<Value, 8> newOperands = inputs;
125   newOperands.append(outputs.begin(), outputs.end());
126   auto otherOperands = linalgOp.getAssumedNonShapedOperands();
127   newOperands.append(otherOperands.begin(), otherOperands.end());
128   linalgOp.clone(rewriter, linalgOp.getLoc(),
129                  /*resultTypes=*/ArrayRef<Type>{}, newOperands);
130   // Replace the results of the old op with the new output buffers.
131   rewriter.replaceOp(linalgOp, outputs);
132 }
133 
134 //===----------------------------------------------------------------------===//
135 // Bufferization patterns.
136 //===----------------------------------------------------------------------===//
137 
138 namespace {
139 
140 /// Generic conversion pattern that matches any LinalgOp. This avoids template
141 /// instantiating one pattern for each LinalgOp.
142 class BufferizeInitTensorOp : public OpConversionPattern<InitTensorOp> {
143 public:
144   using OpConversionPattern<InitTensorOp>::OpConversionPattern;
145 
146   LogicalResult
147   matchAndRewrite(InitTensorOp op, ArrayRef<Value> operands,
148                   ConversionPatternRewriter &rewriter) const final {
149     linalg::InitTensorOpAdaptor adaptor(operands, op->getAttrDictionary());
150     rewriter.replaceOpWithNewOp<AllocOp>(
151         op, getTypeConverter()->convertType(op.getType()).cast<MemRefType>(),
152         adaptor.sizes());
153     return success();
154   }
155 };
156 
157 /// Generic conversion pattern that matches any LinalgOp. This avoids template
158 /// instantiating one pattern for each LinalgOp.
159 class BufferizeAnyLinalgOp : public ConversionPattern {
160 public:
161   BufferizeAnyLinalgOp(TypeConverter &typeConverter)
162       : ConversionPattern(/*benefit=*/1, typeConverter, MatchAnyOpTypeTag()) {}
163 
164   LogicalResult
165   matchAndRewrite(Operation *op, ArrayRef<Value> operands,
166                   ConversionPatternRewriter &rewriter) const final {
167 
168     LinalgOp linalgOp = dyn_cast<linalg::LinalgOp>(op);
169     if (!linalgOp)
170       return failure();
171 
172     // We abuse the GenericOpAdaptor here.
173     // TODO: Manually create an Adaptor that captures inputs and outputs for all
174     // linalg::LinalgOp interface ops.
175     linalg::GenericOpAdaptor adaptor(operands, op->getAttrDictionary());
176 
177     Location loc = linalgOp.getLoc();
178     SmallVector<Value, 2> newOutputBuffers;
179 
180     if (failed(allocateBuffersForResults(loc, linalgOp, adaptor,
181                                          newOutputBuffers, rewriter))) {
182       linalgOp.emitOpError()
183           << "Failed to allocate buffers for tensor results.";
184       return failure();
185     }
186 
187     // Delegate to the linalg generic pattern.
188     if (auto genericOp = dyn_cast<linalg::GenericOp>(op)) {
189       finalizeBufferAllocationForGenericOp<GenericOp>(
190           rewriter, genericOp, adaptor.inputs(), newOutputBuffers);
191       return success();
192     }
193 
194     // Delegate to the linalg indexed generic pattern.
195     if (auto genericOp = dyn_cast<linalg::IndexedGenericOp>(op)) {
196       finalizeBufferAllocationForGenericOp<IndexedGenericOp>(
197           rewriter, genericOp, adaptor.inputs(), newOutputBuffers);
198       return success();
199     }
200 
201     finalizeBufferAllocation(rewriter, linalgOp, adaptor.inputs(),
202                              newOutputBuffers);
203     return success();
204   }
205 };
206 
207 /// Convert `subtensor %t [offsets][sizes][strides] -> %st` to an alloc + copy
208 /// pattern.
209 /// ```
210 ///   %a = alloc(sizes)
211 ///   %sv = subview %source [offsets][sizes][strides]
212 ///   linalg_copy(%sv, %a)
213 /// ```
214 ///
215 /// This pattern is arguable a std pattern once linalg::CopyOp becomes
216 /// std::CopyOp.
217 class SubTensorOpConverter : public OpConversionPattern<SubTensorOp> {
218 public:
219   using OpConversionPattern<SubTensorOp>::OpConversionPattern;
220 
221   LogicalResult
222   matchAndRewrite(SubTensorOp op, ArrayRef<Value> operands,
223                   ConversionPatternRewriter &rewriter) const final {
224     SubTensorOpAdaptor adaptor(operands, op->getAttrDictionary());
225     Value sourceMemref = adaptor.source();
226     assert(sourceMemref.getType().isa<MemRefType>());
227 
228     MemRefType subviewMemRefType =
229         getTypeConverter()->convertType(op.getType()).cast<MemRefType>();
230     // op.sizes() capture exactly the dynamic alloc operands matching the
231     // subviewMemRefType thanks to subview/subtensor canonicalization and
232     // verification.
233     Value alloc =
234         rewriter.create<AllocOp>(op.getLoc(), subviewMemRefType, op.sizes());
235     Value subView = rewriter.create<SubViewOp>(
236         op.getLoc(), sourceMemref, op.getMixedOffsets(), op.getMixedSizes(),
237         op.getMixedStrides());
238     rewriter.create<linalg::CopyOp>(op.getLoc(), subView, alloc);
239     rewriter.replaceOp(op, alloc);
240     return success();
241   }
242 };
243 
244 /// Convert `subtensor_insert %source into %dest [offsets][sizes][strides] ->
245 /// %t` to an tensor_to_memref + subview + copy + tensor_load pattern.
246 /// tensor_to_memref and tensor_load are inserted automatically by the
247 /// conversion infra:
248 /// ```
249 ///   %sv = subview %dest [offsets][sizes][strides]
250 ///   linalg_copy(%source, %sv)
251 ///   // replace with %dest
252 /// ```
253 ///
254 /// This pattern is arguable a std pattern once linalg::CopyOp becomes
255 /// std::CopyOp.
256 class SubTensorInsertOpConverter
257     : public OpConversionPattern<SubTensorInsertOp> {
258 public:
259   using OpConversionPattern<SubTensorInsertOp>::OpConversionPattern;
260 
261   LogicalResult
262   matchAndRewrite(SubTensorInsertOp op, ArrayRef<Value> operands,
263                   ConversionPatternRewriter &rewriter) const final {
264     SubTensorInsertOpAdaptor adaptor(operands, op->getAttrDictionary());
265     Value sourceMemRef = adaptor.source();
266     assert(sourceMemRef.getType().isa<MemRefType>());
267 
268     // For now, be conservative and copy the converted input memref.
269     // In general, the converted input memref here could be aliased or could
270     // point into constant memory, so mutating it would lead to miscompilations.
271     Value destMemRef = cloneMemref(op.getLoc(), adaptor.dest(), rewriter);
272     assert(destMemRef.getType().isa<MemRefType>());
273 
274     // Take a subview to copy the small memref.
275     Value subview = rewriter.create<SubViewOp>(
276         op.getLoc(), destMemRef, op.getMixedOffsets(), op.getMixedSizes(),
277         op.getMixedStrides());
278     // Copy the small memref.
279     rewriter.create<linalg::CopyOp>(op.getLoc(), sourceMemRef, subview);
280     rewriter.replaceOp(op, destMemRef);
281     return success();
282   }
283 };
284 } // namespace
285 
286 namespace {
287 /// Converts Linalg operations that work on tensor-type operands or results to
288 /// work on buffers.
289 struct LinalgBufferizePass : public LinalgBufferizeBase<LinalgBufferizePass> {
290   void runOnOperation() override {
291     MLIRContext &context = getContext();
292     ConversionTarget target(context);
293     BufferizeTypeConverter typeConverter;
294 
295     // Mark all Standard operations legal.
296     target.addLegalDialect<AffineDialect, StandardOpsDialect>();
297     target.addIllegalOp<InitTensorOp, SubTensorOp, SubTensorInsertOp>();
298 
299     // Mark all Linalg operations illegal as long as they work on tensors.
300     auto isLegalOperation = [&](Operation *op) {
301       return typeConverter.isLegal(op);
302     };
303     target.addDynamicallyLegalDialect<linalg::LinalgDialect>(isLegalOperation);
304     target.addDynamicallyLegalOp<ConstantOp>(isLegalOperation);
305 
306     OwningRewritePatternList patterns;
307     populateLinalgBufferizePatterns(&context, typeConverter, patterns);
308     if (failed(applyPartialConversion(getOperation(), target,
309                                       std::move(patterns))))
310       signalPassFailure();
311   }
312 };
313 } // end anonymous namespace
314 
315 std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgBufferizePass() {
316   return std::make_unique<LinalgBufferizePass>();
317 }
318 
319 void mlir::linalg::populateLinalgBufferizePatterns(
320     MLIRContext *context, BufferizeTypeConverter &typeConverter,
321     OwningRewritePatternList &patterns) {
322   patterns.insert<BufferizeAnyLinalgOp>(typeConverter);
323   // TODO: Drop this once tensor constants work in standard.
324   // clang-format off
325   patterns.insert<
326       BufferizeInitTensorOp,
327       SubTensorOpConverter,
328       SubTensorInsertOpConverter
329     >(typeConverter, context);
330   // clang-format on
331 }
332