1 //===- Promotion.cpp - Implementation of linalg Promotion -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Promotion pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PassDetail.h"
14 #include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
15 #include "mlir/Dialect/Linalg/EDSC/FoldedIntrinsics.h"
16 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
17 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
18 #include "mlir/Dialect/Linalg/Passes.h"
19 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
20 #include "mlir/Dialect/Linalg/Utils/Utils.h"
21 #include "mlir/Dialect/SCF/SCF.h"
22 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
23 #include "mlir/IR/AffineExpr.h"
24 #include "mlir/IR/AffineExprVisitor.h"
25 #include "mlir/IR/AffineMap.h"
26 #include "mlir/Support/LLVM.h"
27 #include "mlir/Transforms/FoldUtils.h"
28 #include "llvm/ADT/MapVector.h"
29 #include "llvm/Support/CommandLine.h"
30 
31 using namespace mlir;
32 using namespace mlir::edsc;
33 using namespace mlir::edsc::intrinsics;
34 using namespace mlir::linalg;
35 using namespace mlir::scf;
36 
37 using llvm::MapVector;
38 
39 using folded_affine_min = FoldedValueBuilder<AffineMinOp>;
40 using folded_linalg_range = FoldedValueBuilder<linalg::RangeOp>;
41 using folded_std_dim = FoldedValueBuilder<DimOp>;
42 using folded_std_subview = FoldedValueBuilder<SubViewOp>;
43 using folded_std_view = FoldedValueBuilder<ViewOp>;
44 
45 #define DEBUG_TYPE "linalg-promotion"
46 
47 /// If `size` comes from an AffineMinOp and one of the values of AffineMinOp
48 /// is a constant then return a new value set to the smallest such constant.
49 /// Otherwise return size.
50 static Value extractSmallestConstantBoundingSize(OpBuilder &b, Location loc,
51                                                  Value size) {
52   Optional<int64_t> boundingConst = {};
53   if (auto affineMinOp = size.getDefiningOp<AffineMinOp>()) {
54     for (auto e : affineMinOp.getAffineMap().getResults())
55       if (auto cst = e.dyn_cast<AffineConstantExpr>())
56         boundingConst = boundingConst
57                             ? std::min(boundingConst.getValue(), cst.getValue())
58                             : cst.getValue();
59   } else if (auto constIndexOp = size.getDefiningOp<ConstantOp>()) {
60     if (constIndexOp.getType().isa<IndexType>())
61       boundingConst = constIndexOp.value().cast<IntegerAttr>().getInt();
62   }
63   return boundingConst && *boundingConst >= 0
64              ? b.create<ConstantIndexOp>(loc, *boundingConst)
65              : size;
66 }
67 
68 /// Alloc a new buffer of `size`. If `dynamicBuffers` is true allocate exactly
69 /// the size needed, otherwise try to allocate a static bounding box.
70 static Value allocBuffer(const LinalgPromotionOptions &options,
71                          Type elementType, Value size, bool dynamicBuffers,
72                          OperationFolder *folder,
73                          Optional<unsigned> alignment = None) {
74   auto *ctx = size.getContext();
75   auto width = llvm::divideCeil(elementType.getIntOrFloatBitWidth(), 8);
76   IntegerAttr alignment_attr;
77   if (alignment.hasValue())
78     alignment_attr =
79         IntegerAttr::get(IntegerType::get(ctx, 64), alignment.getValue());
80   if (!dynamicBuffers)
81     if (auto cst = size.getDefiningOp<ConstantIndexOp>())
82       return options.useAlloca
83                  ? std_alloca(MemRefType::get(width * cst.getValue(),
84                                               IntegerType::get(ctx, 8)),
85                               ValueRange{}, alignment_attr)
86                        .value
87                  : std_alloc(MemRefType::get(width * cst.getValue(),
88                                              IntegerType::get(ctx, 8)),
89                              ValueRange{}, alignment_attr)
90                        .value;
91   Value mul =
92       folded_std_muli(folder, folded_std_constant_index(folder, width), size);
93   return options.useAlloca
94              ? std_alloca(MemRefType::get(-1, IntegerType::get(ctx, 8)), mul,
95                           alignment_attr)
96                    .value
97              : std_alloc(MemRefType::get(-1, IntegerType::get(ctx, 8)), mul,
98                          alignment_attr)
99                    .value;
100 }
101 
102 /// Default allocation callback function. This allocates a promoted buffer when
103 /// no call back to do so is provided. The default is to allocate a
104 /// memref<..xi8> and return a view to get a memref type of shape
105 /// boundingSubViewSize.
106 static Optional<Value> defaultAllocBufferCallBack(
107     const LinalgPromotionOptions &options, OpBuilder &builder,
108     SubViewOp subView, ArrayRef<Value> boundingSubViewSize, bool dynamicBuffers,
109     Optional<unsigned> alignment, OperationFolder *folder) {
110   ShapedType viewType = subView.getType();
111   int64_t rank = viewType.getRank();
112   (void)rank;
113   assert(rank > 0 && boundingSubViewSize.size() == static_cast<size_t>(rank));
114   auto zero = folded_std_constant_index(folder, 0);
115   auto one = folded_std_constant_index(folder, 1);
116 
117   Value allocSize = one;
118   for (auto size : llvm::enumerate(boundingSubViewSize))
119     allocSize = folded_std_muli(folder, allocSize, size.value());
120   Value buffer = allocBuffer(options, viewType.getElementType(), allocSize,
121                              dynamicBuffers, folder, alignment);
122   SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(),
123                                    ShapedType::kDynamicSize);
124   Value view = folded_std_view(
125       folder, MemRefType::get(dynSizes, viewType.getElementType()), buffer,
126       zero, boundingSubViewSize);
127   return view;
128 }
129 
130 /// Default implementation of deallocation of the buffer use for promotion. It
131 /// expects to get the same value that the default allocation method returned,
132 /// i.e. result of a ViewOp.
133 static LogicalResult
134 defaultDeallocBufferCallBack(const LinalgPromotionOptions &options,
135                              OpBuilder &b, Value fullLocalView) {
136   auto viewOp = fullLocalView.getDefiningOp<ViewOp>();
137   assert(viewOp && "expected full local view to be a ViewOp");
138   if (!options.useAlloca)
139     std_dealloc(viewOp.source());
140   return success();
141 }
142 
143 namespace {
144 
145 /// Helper struct that captures the information required to apply the
146 /// transformation on each op. This bridges the abstraction gap with the
147 /// user-facing API which exposes positional arguments to control which operands
148 /// are promoted.
149 struct LinalgOpInstancePromotionOptions {
150   LinalgOpInstancePromotionOptions(LinalgOp op,
151                                    const LinalgPromotionOptions &options);
152   /// SubViews to promote.
153   MapVector<unsigned, Value> subViews;
154   /// True if the full view should be used for the promoted buffer.
155   DenseMap<Value, bool> useFullTileBuffers;
156 
157   /// Callback functions for allocation and deallocation of promoted buffers, as
158   /// well as to copy the data into and out of these buffers.
159   AllocBufferCallbackFn allocationFn;
160   DeallocBufferCallbackFn deallocationFn;
161   CopyCallbackFn copyInFn;
162   CopyCallbackFn copyOutFn;
163 
164   /// Allow the use of dynamically-sized buffers.
165   bool dynamicBuffers;
166   /// Alignment of promoted buffer.
167   Optional<unsigned> alignment;
168 };
169 } // namespace
170 
171 LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
172     LinalgOp linalgOp, const LinalgPromotionOptions &options)
173     : subViews(), dynamicBuffers(options.dynamicBuffers),
174       alignment(options.alignment) {
175   assert(linalgOp.hasBufferSemantics() && "revisit usage of shaped operand");
176   unsigned nBuffers = linalgOp.getNumShapedOperands();
177   auto vUseFullTileBuffers =
178       options.useFullTileBuffers.getValueOr(llvm::SmallBitVector());
179   vUseFullTileBuffers.resize(nBuffers, options.useFullTileBuffersDefault);
180 
181   for (unsigned idx = 0; idx != nBuffers; ++idx) {
182     if (options.operandsToPromote && !options.operandsToPromote->count(idx))
183       continue;
184     auto *op = linalgOp.getShapedOperand(idx).getDefiningOp();
185     if (auto sv = dyn_cast_or_null<SubViewOp>(op)) {
186       subViews[idx] = sv;
187       useFullTileBuffers[sv] = vUseFullTileBuffers[idx];
188     }
189   }
190 
191   allocationFn =
192       (options.allocationFn ? *(options.allocationFn)
193                             : [&](OpBuilder &builder, SubViewOp subViewOp,
194                                   ArrayRef<Value> boundingSubViewSize,
195                                   OperationFolder *folder) -> Optional<Value> {
196         return defaultAllocBufferCallBack(options, builder, subViewOp,
197                                           boundingSubViewSize, dynamicBuffers,
198                                           alignment, folder);
199       });
200   deallocationFn =
201       (options.deallocationFn
202            ? *(options.deallocationFn)
203            : [&](OpBuilder &b, Value buffer) {
204                return defaultDeallocBufferCallBack(options, b, buffer);
205              });
206   auto defaultCopyCallBack = [&](OpBuilder &builder, Value src,
207                                  Value dst) -> LogicalResult {
208     linalg_copy(src, dst);
209     return success();
210   };
211   copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack);
212   copyOutFn = (options.copyOutFn ? *(options.copyOutFn) : defaultCopyCallBack);
213 }
214 
215 // Performs promotion of a `subView` into a local buffer of the size of the
216 // *ranges* of the `subView`. This produces a buffer whose size may be bigger
217 // than the actual size of the `subView` at the boundaries.
218 // This is related to the full/partial tile problem.
219 // Returns a PromotionInfo containing a `buffer`, `fullLocalView` and
220 // `partialLocalView` such that:
221 //   * `buffer` is always the size of the full tile.
222 //   * `fullLocalView` is a dense contiguous view into that buffer.
223 //   * `partialLocalView` is a dense non-contiguous slice of `fullLocalView`
224 //     that corresponds to the size of `subView` and accounting for boundary
225 //     effects.
226 // The point of the full tile buffer is that constant static tile sizes are
227 // folded and result in a buffer type with statically known size and alignment
228 // properties.
229 // To account for general boundary effects, padding must be performed on the
230 // boundary tiles. For now this is done with an unconditional `fill` op followed
231 // by a partial `copy` op.
232 Optional<PromotionInfo> mlir::linalg::promoteSubviewAsNewBuffer(
233     OpBuilder &b, Location loc, SubViewOp subView,
234     AllocBufferCallbackFn allocationFn, OperationFolder *folder) {
235   ScopedContext scopedContext(b, loc);
236   auto viewType = subView.getType();
237   auto rank = viewType.getRank();
238   SmallVector<Value, 4> fullSizes, partialSizes;
239   fullSizes.reserve(rank);
240   partialSizes.reserve(rank);
241   for (auto en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
242     auto rangeValue = en.value();
243     // Try to extract a tight constant.
244     LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n");
245     Value size = extractSmallestConstantBoundingSize(b, loc, rangeValue.size);
246     LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
247     fullSizes.push_back(size);
248     partialSizes.push_back(folded_std_dim(folder, subView, en.index()));
249   }
250   SmallVector<int64_t, 4> dynSizes(fullSizes.size(), -1);
251   // If a callback is not specified, then use the default implementation for
252   // allocating the promoted buffer.
253   Optional<Value> fullLocalView = allocationFn(b, subView, fullSizes, folder);
254   if (!fullLocalView)
255     return {};
256   auto zero = folded_std_constant_index(folder, 0);
257   auto one = folded_std_constant_index(folder, 1);
258   SmallVector<Value, 4> zeros(fullSizes.size(), zero);
259   SmallVector<Value, 4> ones(fullSizes.size(), one);
260   auto partialLocalView =
261       folded_std_subview(folder, *fullLocalView, zeros, partialSizes, ones);
262   return PromotionInfo{*fullLocalView, partialLocalView};
263 }
264 
265 static Optional<MapVector<unsigned, PromotionInfo>>
266 promoteSubViews(OpBuilder &b, Location loc,
267                 LinalgOpInstancePromotionOptions options,
268                 OperationFolder *folder) {
269   if (options.subViews.empty())
270     return {};
271 
272   ScopedContext scope(b, loc);
273   MapVector<unsigned, PromotionInfo> promotionInfoMap;
274 
275   for (auto v : options.subViews) {
276     SubViewOp subView = cast<SubViewOp>(v.second.getDefiningOp());
277     Optional<PromotionInfo> promotionInfo = promoteSubviewAsNewBuffer(
278         b, loc, subView, options.allocationFn, folder);
279     if (!promotionInfo)
280       return {};
281     promotionInfoMap[v.first] = *promotionInfo;
282 
283     // Only fill the buffer if the full local view is used
284     if (!options.useFullTileBuffers[v.second])
285       continue;
286     Value fillVal;
287     if (auto t = subView.getType().getElementType().dyn_cast<FloatType>())
288       fillVal = folded_std_constant(folder, FloatAttr::get(t, 0.0));
289     else if (auto t =
290                  subView.getType().getElementType().dyn_cast<IntegerType>())
291       fillVal = folded_std_constant_int(folder, 0, t);
292     linalg_fill(promotionInfo->fullLocalView, fillVal);
293   }
294 
295   // Copy data into the promoted buffers. Use callback if provided.
296   for (auto v : options.subViews) {
297     auto info = promotionInfoMap.find(v.first);
298     if (info == promotionInfoMap.end())
299       continue;
300     if (failed(options.copyInFn(b, cast<SubViewOp>(v.second.getDefiningOp()),
301                                 info->second.partialLocalView)))
302       return {};
303   }
304   return promotionInfoMap;
305 }
306 
307 static Optional<LinalgOp>
308 promoteSubViews(OpBuilder &b, LinalgOp op,
309                 LinalgOpInstancePromotionOptions options,
310                 OperationFolder *folder) {
311   assert(op.hasBufferSemantics() && "expected linalg op with buffer semantics");
312 
313   if (auto convOp = dyn_cast<linalg::ConvOp>(op.getOperation())) {
314     // TODO: add a level of indirection to linalg.generic.
315     if (convOp.padding())
316       return {};
317   }
318 
319   // 1. Promote the specified views and use them in the new op.
320   auto loc = op.getLoc();
321   auto promotedBuffersAndViews = promoteSubViews(b, loc, options, folder);
322   if (!promotedBuffersAndViews ||
323       promotedBuffersAndViews->size() != options.subViews.size())
324     return {};
325 
326   // 2. Append all other operands as they appear, this enforces that such
327   // operands are not views. This is to support cases such as FillOp taking
328   // extra scalars etc.  Keep a reference to output buffers;
329   SmallVector<Value, 8> opViews;
330   opViews.reserve(op.getNumShapedOperands());
331   SmallVector<std::pair<Value, Value>, 8> writebackViews;
332   writebackViews.reserve(promotedBuffersAndViews->size());
333   for (auto view : llvm::enumerate(op.getShapedOperands())) {
334     if (options.subViews.count(view.index()) != 0) {
335       if (options.useFullTileBuffers[view.value()])
336         opViews.push_back(
337             (*promotedBuffersAndViews)[view.index()].fullLocalView);
338       else
339         opViews.push_back(
340             (*promotedBuffersAndViews)[view.index()].partialLocalView);
341       if (view.index() >= op.getNumInputs())
342         writebackViews.emplace_back(std::make_pair(
343             view.value(),
344             (*promotedBuffersAndViews)[view.index()].partialLocalView));
345     } else {
346       opViews.push_back(view.value());
347     }
348   }
349   op->setOperands(0, opViews.size(), opViews);
350 
351   OpBuilder::InsertionGuard guard(b);
352   b.setInsertionPointAfter(op);
353   ScopedContext scope(b, loc);
354   // 3. Emit write-back for the promoted output views: copy the partial view.
355   for (auto viewAndPartialLocalView : writebackViews) {
356     if (failed(options.copyOutFn(b, viewAndPartialLocalView.second,
357                                  viewAndPartialLocalView.first)))
358       return {};
359   }
360 
361   // 4. Dealloc all local buffers.
362   for (const auto &pi : *promotedBuffersAndViews)
363     options.deallocationFn(b, pi.second.fullLocalView);
364   return op;
365 }
366 
367 LogicalResult
368 mlir::linalg::promoteSubviewsPrecondition(Operation *op,
369                                           LinalgPromotionOptions options) {
370   LinalgOp linOp = dyn_cast<LinalgOp>(op);
371   // Transformation applies to buffers only.
372   if (!linOp || !linOp.hasBufferSemantics())
373     return failure();
374   // Check that at least one of the requested operands is indeed a subview.
375   for (auto en : llvm::enumerate(linOp.getShapedOperands())) {
376     auto sv = isa_and_nonnull<SubViewOp>(en.value().getDefiningOp());
377     if (sv) {
378       if (!options.operandsToPromote.hasValue() ||
379           options.operandsToPromote->count(en.index()))
380         return success();
381     }
382   }
383   // TODO: Check all subviews requested are bound by a static constant.
384   // TODO: Check that the total footprint fits within a given size.
385   return failure();
386 }
387 
388 Optional<LinalgOp> mlir::linalg::promoteSubViews(OpBuilder &b,
389                                                  LinalgOp linalgOp,
390                                                  LinalgPromotionOptions options,
391                                                  OperationFolder *folder) {
392   LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options);
393   return ::promoteSubViews(
394       b, linalgOp, LinalgOpInstancePromotionOptions(linalgOp, options), folder);
395 }
396 
397 namespace {
398 struct LinalgPromotionPass : public LinalgPromotionBase<LinalgPromotionPass> {
399   LinalgPromotionPass() = default;
400   LinalgPromotionPass(bool dynamicBuffers, bool useAlloca) {
401     this->dynamicBuffers = dynamicBuffers;
402     this->useAlloca = useAlloca;
403   }
404 
405   void runOnFunction() override {
406     OperationFolder folder(&getContext());
407     getFunction().walk([this, &folder](LinalgOp op) {
408       auto options = LinalgPromotionOptions()
409                          .setDynamicBuffers(dynamicBuffers)
410                          .setUseAlloca(useAlloca);
411       if (failed(promoteSubviewsPrecondition(op, options)))
412         return;
413       LLVM_DEBUG(llvm::dbgs() << "Promote: " << *(op.getOperation()) << "\n");
414       OpBuilder b(op);
415       promoteSubViews(b, op, options, &folder);
416     });
417   }
418 };
419 } // namespace
420 
421 // TODO: support more transformation options in the pass.
422 std::unique_ptr<OperationPass<FuncOp>>
423 mlir::createLinalgPromotionPass(bool dynamicBuffers, bool useAlloca) {
424   return std::make_unique<LinalgPromotionPass>(dynamicBuffers, useAlloca);
425 }
426 std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgPromotionPass() {
427   return std::make_unique<LinalgPromotionPass>();
428 }
429