1 //===- LoopTiling.cpp --- Loop tiling pass ------------------------------*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a pass to tile loop nests.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PassDetail.h"
14 #include "mlir/Analysis/AffineAnalysis.h"
15 #include "mlir/Analysis/AffineStructures.h"
16 #include "mlir/Analysis/LoopAnalysis.h"
17 #include "mlir/Analysis/Utils.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Affine/IR/AffineValueMap.h"
20 #include "mlir/Dialect/Affine/Passes.h"
21 #include "mlir/IR/BlockAndValueMapping.h"
22 #include "mlir/IR/Builders.h"
23 #include "mlir/Transforms/LoopUtils.h"
24 #include "mlir/Transforms/Utils.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Debug.h"
27 using namespace mlir;
28 
29 #define DEBUG_TYPE "affine-loop-tile"
30 
31 namespace {
32 
33 /// A pass to perform loop tiling on all suitable loop nests of a Function.
34 struct LoopTiling : public AffineLoopTilingBase<LoopTiling> {
35   LoopTiling() = default;
36   explicit LoopTiling(uint64_t cacheSizeBytes, bool avoidMaxMinBounds = true)
37       : avoidMaxMinBounds(avoidMaxMinBounds) {
38     this->cacheSizeInKiB = cacheSizeBytes / 1024;
39   }
40 
41   void runOnFunction() override;
42   void getTileSizes(ArrayRef<AffineForOp> band,
43                     SmallVectorImpl<unsigned> *tileSizes);
44 
45   // Default tile size if nothing is provided.
46   constexpr static unsigned kDefaultTileSize = 4;
47 
48   // If true, tile sizes are set to avoid max/min in bounds if possible.
49   bool avoidMaxMinBounds = true;
50 };
51 
52 } // end anonymous namespace
53 
54 /// Creates a pass to perform loop tiling on all suitable loop nests of a
55 /// Function.
56 std::unique_ptr<OperationPass<FuncOp>>
57 mlir::createLoopTilingPass(uint64_t cacheSizeBytes) {
58   return std::make_unique<LoopTiling>(cacheSizeBytes);
59 }
60 std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopTilingPass() {
61   return std::make_unique<LoopTiling>();
62 }
63 
64 // Move the loop body of AffineForOp 'src' from 'src' into the specified
65 // location in destination's body, ignoring the terminator.
66 static inline void moveLoopBody(AffineForOp src, AffineForOp dest,
67                                 Block::iterator loc) {
68   auto &insts = src.getBody()->getOperations();
69   dest.getBody()->getOperations().splice(loc, insts, insts.begin(),
70                                          std::prev(insts.end()));
71 }
72 
73 // Move the loop body of AffineForOp 'src' from 'src' to the start of dest's
74 // body.
75 static inline void moveLoopBody(AffineForOp src, AffineForOp dest) {
76   moveLoopBody(src, dest, dest.getBody()->begin());
77 }
78 
79 /// Constructs and sets new loop bounds after tiling for the case of
80 /// hyper-rectangular index sets, where the bounds of one dimension do not
81 /// depend on other dimensions. Bounds of each dimension can thus be treated
82 /// independently, and deriving the new bounds is much simpler and faster
83 /// than for the case of tiling arbitrary polyhedral shapes.
84 static void
85 constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
86                                 MutableArrayRef<AffineForOp> newLoops,
87                                 ArrayRef<unsigned> tileSizes) {
88   assert(!origLoops.empty());
89   assert(origLoops.size() == tileSizes.size());
90 
91   OpBuilder b(origLoops[0].getOperation());
92   unsigned width = origLoops.size();
93 
94   // Bounds for tile space loops.
95   for (unsigned i = 0; i < width; i++) {
96     OperandRange newLbOperands = origLoops[i].getLowerBoundOperands();
97     OperandRange newUbOperands = origLoops[i].getUpperBoundOperands();
98     newLoops[i].setLowerBound(newLbOperands, origLoops[i].getLowerBoundMap());
99     newLoops[i].setUpperBound(newUbOperands, origLoops[i].getUpperBoundMap());
100     newLoops[i].setStep(tileSizes[i]);
101   }
102   // Bounds for intra-tile loops.
103   for (unsigned i = 0; i < width; i++) {
104     int64_t largestDiv = getLargestDivisorOfTripCount(origLoops[i]);
105     auto mayBeConstantCount = getConstantTripCount(origLoops[i]);
106     // The lower bound is just the tile-space loop.
107     AffineMap lbMap = b.getDimIdentityMap();
108     newLoops[width + i].setLowerBound(
109         /*operands=*/newLoops[i].getInductionVar(), lbMap);
110 
111     // Set the upper bound.
112     if (mayBeConstantCount && mayBeConstantCount.getValue() < tileSizes[i]) {
113       // Trip count is less than tile size; upper bound is the trip count.
114       auto ubMap = b.getConstantAffineMap(mayBeConstantCount.getValue());
115       newLoops[width + i].setUpperBoundMap(ubMap);
116     } else if (largestDiv % tileSizes[i] != 0) {
117       // Intra-tile loop ii goes from i to min(i + tileSize, ub_i).
118       // Construct the upper bound map; the operands are the original operands
119       // with 'i' (tile-space loop) appended to it. The new upper bound map is
120       // the original one with an additional expression i + tileSize appended.
121 
122       // Add dim operands from original upper bound.
123       SmallVector<Value, 4> ubOperands;
124       auto ub = origLoops[i].getUpperBound();
125       ubOperands.reserve(ub.getNumOperands() + 1);
126       auto origUbMap = ub.getMap();
127       for (unsigned j = 0, e = origUbMap.getNumDims(); j < e; ++j)
128         ubOperands.push_back(ub.getOperand(j));
129 
130       // Add dim operand for new loop upper bound.
131       ubOperands.push_back(newLoops[i].getInductionVar());
132 
133       // Add symbol operands from original upper bound.
134       for (unsigned j = 0, e = origUbMap.getNumSymbols(); j < e; ++j)
135         ubOperands.push_back(ub.getOperand(origUbMap.getNumDims() + j));
136 
137       SmallVector<AffineExpr, 4> boundExprs;
138       boundExprs.reserve(1 + origUbMap.getNumResults());
139       auto dim = b.getAffineDimExpr(origUbMap.getNumDims());
140       // The new upper bound map is the original one with an additional
141       // expression i + tileSize appended.
142       boundExprs.push_back(dim + tileSizes[i]);
143       boundExprs.append(origUbMap.getResults().begin(),
144                         origUbMap.getResults().end());
145       auto ubMap =
146           AffineMap::get(origUbMap.getNumDims() + 1, origUbMap.getNumSymbols(),
147                          boundExprs, b.getContext());
148       newLoops[width + i].setUpperBound(/*operands=*/ubOperands, ubMap);
149     } else {
150       // No need of the min expression.
151       auto dim = b.getAffineDimExpr(0);
152       auto ubMap = AffineMap::get(1, 0, dim + tileSizes[i]);
153       newLoops[width + i].setUpperBound(newLoops[i].getInductionVar(), ubMap);
154     }
155   }
156 }
157 
158 /// Tiles the specified band of perfectly nested loops creating tile-space loops
159 /// and intra-tile loops. A band is a contiguous set of loops.
160 //  TODO(bondhugula): handle non hyper-rectangular spaces.
161 LogicalResult
162 mlir::tilePerfectlyNested(MutableArrayRef<AffineForOp> input,
163                           ArrayRef<unsigned> tileSizes,
164                           SmallVectorImpl<AffineForOp> *tiledNest) {
165   // Check if the supplied for op's are all successively nested.
166   assert(!input.empty() && "no loops in input band");
167   assert(input.size() == tileSizes.size() && "Too few/many tile sizes");
168 
169   assert(isPerfectlyNested(input) && "input loops not perfectly nested");
170 
171   auto origLoops = input;
172 
173   AffineForOp rootAffineForOp = origLoops[0];
174   auto loc = rootAffineForOp.getLoc();
175   // Note that width is at least one since band isn't empty.
176   unsigned width = input.size();
177 
178   SmallVector<AffineForOp, 6> tiledLoops(2 * width);
179 
180   // The outermost among the loops as we add more..
181   auto *topLoop = rootAffineForOp.getOperation();
182   AffineForOp innermostPointLoop;
183 
184   // Add intra-tile (or point) loops.
185   for (unsigned i = 0; i < width; i++) {
186     OpBuilder b(topLoop);
187     // Loop bounds will be set later.
188     auto pointLoop = b.create<AffineForOp>(loc, 0, 0);
189     pointLoop.getBody()->getOperations().splice(
190         pointLoop.getBody()->begin(), topLoop->getBlock()->getOperations(),
191         topLoop);
192     tiledLoops[2 * width - 1 - i] = pointLoop;
193     topLoop = pointLoop.getOperation();
194     if (i == 0)
195       innermostPointLoop = pointLoop;
196   }
197 
198   // Add tile space loops;
199   for (unsigned i = width; i < 2 * width; i++) {
200     OpBuilder b(topLoop);
201     // Loop bounds will be set later.
202     auto tileSpaceLoop = b.create<AffineForOp>(loc, 0, 0);
203     tileSpaceLoop.getBody()->getOperations().splice(
204         tileSpaceLoop.getBody()->begin(), topLoop->getBlock()->getOperations(),
205         topLoop);
206     tiledLoops[2 * width - i - 1] = tileSpaceLoop;
207     topLoop = tileSpaceLoop.getOperation();
208   }
209 
210   // Move the loop body of the original nest to the new one.
211   moveLoopBody(origLoops.back(), innermostPointLoop);
212 
213   SmallVector<Value, 8> origLoopIVs;
214   extractForInductionVars(input, &origLoopIVs);
215 
216   FlatAffineConstraints cst;
217   getIndexSet(input, &cst);
218   if (!cst.isHyperRectangular(0, width)) {
219     llvm::dbgs() << "tiled code generation unimplemented for the "
220                     "non-hyperrectangular case, op:"
221                  << *rootAffineForOp << "\n";
222     return failure();
223   }
224 
225   constructTiledIndexSetHyperRect(origLoops, tiledLoops, tileSizes);
226 
227   // Replace original IVs with intra-tile loop IVs.
228   for (unsigned i = 0; i < width; i++)
229     origLoopIVs[i].replaceAllUsesWith(tiledLoops[i + width].getInductionVar());
230 
231   // Erase the old loop nest.
232   rootAffineForOp.erase();
233 
234   if (tiledNest)
235     *tiledNest = std::move(tiledLoops);
236 
237   return success();
238 }
239 
240 // Identify valid and profitable bands of loops to tile. This is currently just
241 // a temporary placeholder to test the mechanics of tiled code generation.
242 // Returns all maximal outermost perfect loop nests to tile.
243 static void getTileableBands(FuncOp f,
244                              std::vector<SmallVector<AffineForOp, 6>> *bands) {
245   // Get maximal perfect nest of 'affine.for' insts starting from root
246   // (inclusive).
247   auto getMaximalPerfectLoopNest = [&](AffineForOp root) {
248     SmallVector<AffineForOp, 6> band;
249     getPerfectlyNestedLoops(band, root);
250     bands->push_back(band);
251   };
252 
253   for (auto &block : f)
254     for (auto &op : block)
255       if (auto forOp = dyn_cast<AffineForOp>(op))
256         getMaximalPerfectLoopNest(forOp);
257 }
258 
259 /// Reduces each tile size to the largest divisor of the corresponding trip
260 /// count (if the trip count is known).
261 static void adjustToDivisorsOfTripCounts(ArrayRef<AffineForOp> band,
262                                          SmallVectorImpl<unsigned> *tileSizes) {
263   assert(band.size() == tileSizes->size() && "invalid tile size count");
264   for (unsigned i = 0, e = band.size(); i < e; i++) {
265     unsigned &tSizeAdjusted = (*tileSizes)[i];
266     auto mayConst = getConstantTripCount(band[i]);
267     if (!mayConst)
268       continue;
269     // Adjust the tile size to largest factor of the trip count less than
270     // tSize.
271     uint64_t constTripCount = mayConst.getValue();
272     if (constTripCount > 1 && tSizeAdjusted > constTripCount / 2)
273       tSizeAdjusted = constTripCount / 2;
274     while (constTripCount % tSizeAdjusted != 0)
275       tSizeAdjusted--;
276   }
277 }
278 
279 // Returns tile sizes to use. Checks CL options; if none are specified, sets it
280 // based on a simple model that looks at the memory footprint and determines
281 // tile sizes assuming identity accesses / 1:1 tile size proportional footprint
282 // along each of the dimensions being tiled.
283 // TODO(mlir-team): evolve this model. Tile size determination is a large area
284 // to play with in general.
285 void LoopTiling::getTileSizes(ArrayRef<AffineForOp> band,
286                               SmallVectorImpl<unsigned> *tileSizes) {
287   if (band.empty())
288     return;
289 
290   // Use command-line tileSize for all loops if specified.
291   if (tileSize) {
292     tileSizes->assign(band.size(), tileSize);
293     return;
294   }
295 
296   // Use tileSizes and fill them with default tile size if it's short.
297   if (!this->tileSizes.empty()) {
298     tileSizes->assign(this->tileSizes.begin(), this->tileSizes.end());
299     tileSizes->resize(band.size(), kDefaultTileSize);
300     return;
301   }
302   tileSizes->resize(band.size());
303 
304   // The first loop in the band.
305   auto rootForOp = band[0];
306   (void)rootForOp;
307 
308   // Obtain memory footprint and set tile sizes so that a tile fits in
309   // the cache size. This is an approximation with the assumption that the
310   // footprint increases with the tile size linearly in that dimension (i.e.,
311   // assumes one-to-one access function).
312   auto fp = getMemoryFootprintBytes(band[0], 0);
313   if (!fp) {
314     // Fill with default tile sizes if footprint is unknown.
315     std::fill(tileSizes->begin(), tileSizes->end(),
316               LoopTiling::kDefaultTileSize);
317     if (avoidMaxMinBounds)
318       adjustToDivisorsOfTripCounts(band, tileSizes);
319     LLVM_DEBUG(
320         rootForOp.emitWarning("memory footprint unknown: using default tile "
321                               "sizes adjusted to trip count divisors"));
322     return;
323   }
324 
325   // Check how many times larger the cache size is when compared to footprint.
326   uint64_t cacheSizeBytes = cacheSizeInKiB * 1024;
327   uint64_t excessFactor = llvm::divideCeil(fp.getValue(), cacheSizeBytes);
328   if (excessFactor <= 1) {
329     // No need of any tiling - set tile size to 1.
330     std::fill(tileSizes->begin(), tileSizes->end(), 1);
331     return;
332   }
333 
334   // Divide all loops equally in an attempt to reduce footprint.
335   // TODO(bondhugula): this is approximate. Ideally, obtain reuse factor /
336   // profitability along each dimension and weight tile sizes based on that as
337   // one possible approach. Or compute a polynomial in tile sizes and solve for
338   // it.
339 
340   // For an n-d tileable band, compute the n^th root of the excess.
341   unsigned tSize =
342       static_cast<unsigned>(floorl(std::pow(excessFactor, 1.0 / band.size())));
343   // We'll keep a running product to determine the last tile size better.
344   unsigned cumulProductOfTileSizes = 1;
345   for (unsigned i = 0, e = band.size(); i < e; i++) {
346     if (i < e - 1)
347       (*tileSizes)[i] = tSize;
348     else
349       // Set last tile size to cover the balance.
350       (*tileSizes)[i] = std::max(
351           1U, static_cast<unsigned>(excessFactor / cumulProductOfTileSizes));
352     cumulProductOfTileSizes *= (*tileSizes)[i];
353   }
354   if (avoidMaxMinBounds)
355     adjustToDivisorsOfTripCounts(band, tileSizes);
356 }
357 
358 void LoopTiling::runOnFunction() {
359   // Bands of loops to tile.
360   std::vector<SmallVector<AffineForOp, 6>> bands;
361   getTileableBands(getFunction(), &bands);
362 
363   // Tile each band.
364   for (auto &band : bands) {
365     // Set up tile sizes; fill missing tile sizes at the end with default tile
366     // size or tileSize if one was provided.
367     SmallVector<unsigned, 6> tileSizes;
368     getTileSizes(band, &tileSizes);
369     if (llvm::DebugFlag) {
370       auto diag = band[0].emitRemark("using tile sizes [");
371       for (auto tSize : tileSizes)
372         diag << tSize << ' ';
373       diag << "]\n";
374     }
375     SmallVector<AffineForOp, 6> tiledNest;
376     if (failed(tilePerfectlyNested(band, tileSizes, &tiledNest)))
377       return signalPassFailure();
378 
379     // Separate full and partial tiles.
380     if (separate) {
381       auto intraTileLoops =
382           MutableArrayRef<AffineForOp>(tiledNest).drop_front(band.size());
383       separateFullTiles(intraTileLoops);
384     }
385   }
386 }
387 
388 constexpr unsigned LoopTiling::kDefaultTileSize;
389