1 //===- LoopTiling.cpp --- Loop tiling pass ------------------------------*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a pass to tile loop nests.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Analysis/AffineAnalysis.h"
14 #include "mlir/Analysis/AffineStructures.h"
15 #include "mlir/Analysis/LoopAnalysis.h"
16 #include "mlir/Analysis/Utils.h"
17 #include "mlir/Dialect/Affine/IR/AffineOps.h"
18 #include "mlir/Dialect/Affine/IR/AffineValueMap.h"
19 #include "mlir/Dialect/Affine/Passes.h"
20 #include "mlir/IR/BlockAndValueMapping.h"
21 #include "mlir/IR/Builders.h"
22 #include "mlir/Pass/Pass.h"
23 #include "mlir/Transforms/LoopUtils.h"
24 #include "mlir/Transforms/Utils.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Debug.h"
27 using namespace mlir;
28 
29 #define DEBUG_TYPE "affine-loop-tile"
30 
31 static llvm::cl::OptionCategory clOptionsCategory(DEBUG_TYPE " options");
32 
33 static llvm::cl::opt<unsigned long long>
34     clCacheSizeKiB("affine-tile-cache-size",
35                    llvm::cl::desc("Set size of cache to tile for in KiB"),
36                    llvm::cl::cat(clOptionsCategory));
37 
38 // Separate full and partial tiles.
39 static llvm::cl::opt<bool>
40     clSeparate("affine-tile-separate",
41                llvm::cl::desc("Separate full and partial tiles"),
42                llvm::cl::cat(clOptionsCategory));
43 
44 // Tile size to use for all loops (overrides -tile-sizes if provided).
45 static llvm::cl::opt<unsigned>
46     clTileSize("affine-tile-size",
47                llvm::cl::desc("Use this tile size for all loops"),
48                llvm::cl::cat(clOptionsCategory));
49 
50 // List of tile sizes. If any of them aren't provided, they are filled with
51 // clTileSize / kDefaultTileSize.
52 static llvm::cl::list<unsigned> clTileSizes(
53     "affine-tile-sizes",
54     llvm::cl::desc(
55         "List of tile sizes for each perfect nest (overridden by -tile-size)"),
56     llvm::cl::ZeroOrMore, llvm::cl::cat(clOptionsCategory));
57 
58 namespace {
59 
60 /// A pass to perform loop tiling on all suitable loop nests of a Function.
61 struct LoopTiling : public PassWrapper<LoopTiling, FunctionPass> {
62 /// Include the generated pass utilities.
63 #define GEN_PASS_AffineLoopTiling
64 #include "mlir/Dialect/Affine/Passes.h.inc"
65 
66   explicit LoopTiling(uint64_t cacheSizeBytes = kDefaultCacheMemCapacity,
67                       bool avoidMaxMinBounds = true)
68       : cacheSizeBytes(cacheSizeBytes), avoidMaxMinBounds(avoidMaxMinBounds) {}
69 
70   void runOnFunction() override;
71   void getTileSizes(ArrayRef<AffineForOp> band,
72                     SmallVectorImpl<unsigned> *tileSizes);
73 
74   // Default tile size if nothing is provided.
75   constexpr static unsigned kDefaultTileSize = 4;
76   constexpr static uint64_t kDefaultCacheMemCapacity = 512 * 1024UL;
77 
78   // Capacity of the cache to tile for.
79   uint64_t cacheSizeBytes;
80   // If true, tile sizes are set to avoid max/min in bounds if possible.
81   bool avoidMaxMinBounds;
82 };
83 
84 } // end anonymous namespace
85 
86 /// Creates a pass to perform loop tiling on all suitable loop nests of a
87 /// Function.
88 std::unique_ptr<OperationPass<FuncOp>>
89 mlir::createLoopTilingPass(uint64_t cacheSizeBytes) {
90   return std::make_unique<LoopTiling>(cacheSizeBytes);
91 }
92 std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopTilingPass() {
93   return std::make_unique<LoopTiling>();
94 }
95 
96 // Move the loop body of AffineForOp 'src' from 'src' into the specified
97 // location in destination's body, ignoring the terminator.
98 static inline void moveLoopBody(AffineForOp src, AffineForOp dest,
99                                 Block::iterator loc) {
100   auto &insts = src.getBody()->getOperations();
101   dest.getBody()->getOperations().splice(loc, insts, insts.begin(),
102                                          std::prev(insts.end()));
103 }
104 
105 // Move the loop body of AffineForOp 'src' from 'src' to the start of dest's
106 // body.
107 static inline void moveLoopBody(AffineForOp src, AffineForOp dest) {
108   moveLoopBody(src, dest, dest.getBody()->begin());
109 }
110 
111 /// Constructs and sets new loop bounds after tiling for the case of
112 /// hyper-rectangular index sets, where the bounds of one dimension do not
113 /// depend on other dimensions. Bounds of each dimension can thus be treated
114 /// independently, and deriving the new bounds is much simpler and faster
115 /// than for the case of tiling arbitrary polyhedral shapes.
116 static void
117 constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
118                                 MutableArrayRef<AffineForOp> newLoops,
119                                 ArrayRef<unsigned> tileSizes) {
120   assert(!origLoops.empty());
121   assert(origLoops.size() == tileSizes.size());
122 
123   OpBuilder b(origLoops[0].getOperation());
124   unsigned width = origLoops.size();
125 
126   // Bounds for tile space loops.
127   for (unsigned i = 0; i < width; i++) {
128     auto lbOperands = origLoops[i].getLowerBoundOperands();
129     auto ubOperands = origLoops[i].getUpperBoundOperands();
130     SmallVector<Value, 4> newLbOperands(lbOperands);
131     SmallVector<Value, 4> newUbOperands(ubOperands);
132     newLoops[i].setLowerBound(newLbOperands, origLoops[i].getLowerBoundMap());
133     newLoops[i].setUpperBound(newUbOperands, origLoops[i].getUpperBoundMap());
134     newLoops[i].setStep(tileSizes[i]);
135   }
136   // Bounds for intra-tile loops.
137   for (unsigned i = 0; i < width; i++) {
138     int64_t largestDiv = getLargestDivisorOfTripCount(origLoops[i]);
139     auto mayBeConstantCount = getConstantTripCount(origLoops[i]);
140     // The lower bound is just the tile-space loop.
141     AffineMap lbMap = b.getDimIdentityMap();
142     newLoops[width + i].setLowerBound(
143         /*operands=*/newLoops[i].getInductionVar(), lbMap);
144 
145     // Set the upper bound.
146     if (mayBeConstantCount.hasValue() &&
147         mayBeConstantCount.getValue() < tileSizes[i]) {
148       // Trip count is less than tile size; upper bound is the trip count.
149       auto ubMap = b.getConstantAffineMap(mayBeConstantCount.getValue());
150       newLoops[width + i].setUpperBoundMap(ubMap);
151     } else if (largestDiv % tileSizes[i] != 0) {
152       // Intra-tile loop ii goes from i to min(i + tileSize, ub_i).
153       // Construct the upper bound map; the operands are the original operands
154       // with 'i' (tile-space loop) appended to it. The new upper bound map is
155       // the original one with an additional expression i + tileSize appended.
156       auto ub = origLoops[i].getUpperBound();
157       SmallVector<Value, 4> ubOperands;
158       ubOperands.reserve(ub.getNumOperands() + 1);
159       auto origUbMap = ub.getMap();
160       // Add dim operands from original upper bound.
161       for (unsigned j = 0, e = origUbMap.getNumDims(); j < e; ++j) {
162         ubOperands.push_back(ub.getOperand(j));
163       }
164       // Add dim operand for new loop upper bound.
165       ubOperands.push_back(newLoops[i].getInductionVar());
166       // Add symbol operands from original upper bound.
167       for (unsigned j = 0, e = origUbMap.getNumSymbols(); j < e; ++j) {
168         ubOperands.push_back(ub.getOperand(origUbMap.getNumDims() + j));
169       }
170       SmallVector<AffineExpr, 4> boundExprs;
171       boundExprs.reserve(1 + origUbMap.getNumResults());
172       auto dim = b.getAffineDimExpr(origUbMap.getNumDims());
173       // The new upper bound map is the original one with an additional
174       // expression i + tileSize appended.
175       boundExprs.push_back(dim + tileSizes[i]);
176       boundExprs.append(origUbMap.getResults().begin(),
177                         origUbMap.getResults().end());
178       auto ubMap = AffineMap::get(origUbMap.getNumDims() + 1,
179                                   origUbMap.getNumSymbols(), boundExprs);
180       newLoops[width + i].setUpperBound(/*operands=*/ubOperands, ubMap);
181     } else {
182       // No need of the min expression.
183       auto dim = b.getAffineDimExpr(0);
184       auto ubMap = AffineMap::get(1, 0, dim + tileSizes[i]);
185       newLoops[width + i].setUpperBound(newLoops[i].getInductionVar(), ubMap);
186     }
187   }
188 }
189 
190 /// Tiles the specified band of perfectly nested loops creating tile-space loops
191 /// and intra-tile loops. A band is a contiguous set of loops.
192 //  TODO(bondhugula): handle non hyper-rectangular spaces.
193 LogicalResult mlir::tileCodeGen(MutableArrayRef<AffineForOp> band,
194                                 ArrayRef<unsigned> tileSizes,
195                                 SmallVectorImpl<AffineForOp> *tiledNest) {
196   // Check if the supplied for op's are all successively nested.
197   assert(!band.empty() && "no loops in band");
198   assert(band.size() == tileSizes.size() && "Too few/many tile sizes");
199 
200   for (unsigned i = 1, e = band.size(); i < e; i++)
201     assert(band[i].getParentOp() == band[i - 1] && "not a perfect nest / band");
202 
203   auto origLoops = band;
204 
205   AffineForOp rootAffineForOp = origLoops[0];
206   auto loc = rootAffineForOp.getLoc();
207   // Note that width is at least one since band isn't empty.
208   unsigned width = band.size();
209 
210   SmallVector<AffineForOp, 6> tiledLoops(2 * width);
211 
212   // The outermost among the loops as we add more..
213   auto *topLoop = rootAffineForOp.getOperation();
214   AffineForOp innermostPointLoop;
215 
216   // Add intra-tile (or point) loops.
217   for (unsigned i = 0; i < width; i++) {
218     OpBuilder b(topLoop);
219     // Loop bounds will be set later.
220     auto pointLoop = b.create<AffineForOp>(loc, 0, 0);
221     pointLoop.getBody()->getOperations().splice(
222         pointLoop.getBody()->begin(), topLoop->getBlock()->getOperations(),
223         topLoop);
224     tiledLoops[2 * width - 1 - i] = pointLoop;
225     topLoop = pointLoop.getOperation();
226     if (i == 0)
227       innermostPointLoop = pointLoop;
228   }
229 
230   // Add tile space loops;
231   for (unsigned i = width; i < 2 * width; i++) {
232     OpBuilder b(topLoop);
233     // Loop bounds will be set later.
234     auto tileSpaceLoop = b.create<AffineForOp>(loc, 0, 0);
235     tileSpaceLoop.getBody()->getOperations().splice(
236         tileSpaceLoop.getBody()->begin(), topLoop->getBlock()->getOperations(),
237         topLoop);
238     tiledLoops[2 * width - i - 1] = tileSpaceLoop;
239     topLoop = tileSpaceLoop.getOperation();
240   }
241 
242   // Move the loop body of the original nest to the new one.
243   moveLoopBody(origLoops[origLoops.size() - 1], innermostPointLoop);
244 
245   SmallVector<Value, 8> origLoopIVs;
246   extractForInductionVars(band, &origLoopIVs);
247   SmallVector<Optional<Value>, 6> ids(origLoopIVs.begin(), origLoopIVs.end());
248   FlatAffineConstraints cst;
249   getIndexSet(band, &cst);
250 
251   if (!cst.isHyperRectangular(0, width)) {
252     llvm::dbgs() << "tiled code generation unimplemented for the "
253                     "non-hyperrectangular case, op:"
254                  << *rootAffineForOp << "\n";
255     return failure();
256   }
257 
258   constructTiledIndexSetHyperRect(origLoops, tiledLoops, tileSizes);
259 
260   // Replace original IVs with intra-tile loop IVs.
261   for (unsigned i = 0; i < width; i++)
262     origLoopIVs[i].replaceAllUsesWith(tiledLoops[i + width].getInductionVar());
263 
264   // Erase the old loop nest.
265   rootAffineForOp.erase();
266 
267   if (tiledNest)
268     *tiledNest = std::move(tiledLoops);
269 
270   return success();
271 }
272 
273 // Identify valid and profitable bands of loops to tile. This is currently just
274 // a temporary placeholder to test the mechanics of tiled code generation.
275 // Returns all maximal outermost perfect loop nests to tile.
276 static void getTileableBands(FuncOp f,
277                              std::vector<SmallVector<AffineForOp, 6>> *bands) {
278   // Get maximal perfect nest of 'affine.for' insts starting from root
279   // (inclusive).
280   auto getMaximalPerfectLoopNest = [&](AffineForOp root) {
281     SmallVector<AffineForOp, 6> band;
282     getPerfectlyNestedLoops(band, root);
283     bands->push_back(band);
284   };
285 
286   for (auto &block : f)
287     for (auto &op : block)
288       if (auto forOp = dyn_cast<AffineForOp>(op))
289         getMaximalPerfectLoopNest(forOp);
290 }
291 
292 // Reduce each tile size to the largest divisor of the corresponding trip count
293 // (if the trip count is known).
294 static void adjustToDivisorsOfTripCounts(ArrayRef<AffineForOp> band,
295                                          SmallVectorImpl<unsigned> *tileSizes) {
296   assert(band.size() == tileSizes->size() && "invalid tile size count");
297   for (unsigned i = 0, e = band.size(); i < e; i++) {
298     unsigned &tSizeAdjusted = (*tileSizes)[i];
299     auto mayConst = getConstantTripCount(band[i]);
300     if (!mayConst.hasValue())
301       continue;
302     // Adjust the tile size to largest factor of the trip count less than
303     // tSize.
304     uint64_t constTripCount = mayConst.getValue();
305     if (constTripCount > 1 && tSizeAdjusted > constTripCount / 2)
306       tSizeAdjusted = constTripCount / 2;
307     while (constTripCount % tSizeAdjusted != 0)
308       tSizeAdjusted--;
309   }
310 }
311 
312 // Returns tile sizes to use. Checks CL options; if none are specified, sets it
313 // based on a simple model that looks at the memory footprint and determines
314 // tile sizes assuming identity accesses / 1:1 tile size proportional footprint
315 // along each of the dimensions being tiled.
316 // TODO(mlir-team): evolve this model. Tile size determination is a large area
317 // to play with in general.
318 void LoopTiling::getTileSizes(ArrayRef<AffineForOp> band,
319                               SmallVectorImpl<unsigned> *tileSizes) {
320   if (band.empty())
321     return;
322 
323   tileSizes->resize(band.size());
324 
325   // Use clTileSize for all loops if specified.
326   if (clTileSize.getNumOccurrences() > 0) {
327     std::fill(tileSizes->begin(), tileSizes->end(), clTileSize);
328     return;
329   }
330 
331   // Use clTileSizes and fill them with default tile size if it's short.
332   if (!clTileSizes.empty()) {
333     std::fill(tileSizes->begin(), tileSizes->end(),
334               LoopTiling::kDefaultTileSize);
335     std::copy(clTileSizes.begin(),
336               clTileSizes.begin() + std::min(clTileSizes.size(), band.size()),
337               tileSizes->begin());
338     return;
339   }
340 
341   // The first loop in the band.
342   auto rootForOp = band[0];
343   (void)rootForOp;
344 
345   // Obtain memory footprint and set tile sizes so that a tile fits in
346   // the cache size. This is an approximation with the assumption that the
347   // footprint increases with the tile size linearly in that dimension (i.e.,
348   // assumes one-to-one access function).
349   auto fp = getMemoryFootprintBytes(band[0], 0);
350   if (!fp.hasValue()) {
351     // Fill with default tile sizes if footprint is unknown.
352     std::fill(tileSizes->begin(), tileSizes->end(),
353               LoopTiling::kDefaultTileSize);
354     if (avoidMaxMinBounds)
355       adjustToDivisorsOfTripCounts(band, tileSizes);
356     LLVM_DEBUG(
357         rootForOp.emitWarning("memory footprint unknown: using default tile "
358                               "sizes adjusted to trip count divisors"));
359     return;
360   }
361 
362   // Check how many times larger the cache size is when compared to footprint.
363   uint64_t excessFactor = llvm::divideCeil(fp.getValue(), cacheSizeBytes);
364   if (excessFactor <= 1) {
365     // No need of any tiling - set tile size to 1.
366     std::fill(tileSizes->begin(), tileSizes->end(), 1);
367     return;
368   }
369 
370   // Divide all loops equally in an attempt to reduce footprint.
371   // TODO(bondhugula): this is approximate. Ideally, obtain reuse factor /
372   // profitability along each dimension and weight tile sizes based on that as
373   // one possible approach. Or compute a polynomial in tile sizes and solve for
374   // it.
375 
376   // For an n-d tileable band, compute n^th root of the excess.
377   unsigned tSize =
378       static_cast<unsigned>(floorl(std::pow(excessFactor, 1.0 / band.size())));
379   // We'll keep a running product to determine the last tile size better.
380   unsigned cumulProductOfTileSizes = 1;
381   for (unsigned i = 0, e = band.size(); i < e; i++) {
382     if (i < e - 1)
383       (*tileSizes)[i] = tSize;
384     else
385       // Set last tile size to cover the balance.
386       (*tileSizes)[i] = std::max(
387           1U, static_cast<unsigned>(excessFactor / cumulProductOfTileSizes));
388     cumulProductOfTileSizes *= (*tileSizes)[i];
389   }
390   if (avoidMaxMinBounds)
391     adjustToDivisorsOfTripCounts(band, tileSizes);
392 }
393 
394 void LoopTiling::runOnFunction() {
395   // Override cache size if provided on command line.
396   if (clCacheSizeKiB.getNumOccurrences() > 0)
397     cacheSizeBytes = clCacheSizeKiB * 1024;
398 
399   // Bands of loops to tile.
400   std::vector<SmallVector<AffineForOp, 6>> bands;
401   getTileableBands(getFunction(), &bands);
402 
403   // Tile each band.
404   for (auto &band : bands) {
405     // Set up tile sizes; fill missing tile sizes at the end with default tile
406     // size or clTileSize if one was provided.
407     SmallVector<unsigned, 6> tileSizes;
408     getTileSizes(band, &tileSizes);
409     if (llvm::DebugFlag) {
410       auto diag = band[0].emitRemark("using tile sizes [");
411       for (auto tSize : tileSizes)
412         diag << tSize << ' ';
413       diag << "]\n";
414     }
415     SmallVector<AffineForOp, 6> tiledNest;
416     if (failed(tileCodeGen(band, tileSizes, &tiledNest)))
417       return signalPassFailure();
418 
419     // Separate full and partial tiles.
420     if (clSeparate) {
421       auto intraTileLoops =
422           MutableArrayRef<AffineForOp>(tiledNest).drop_front(band.size());
423       separateFullTiles(intraTileLoops);
424     }
425   }
426 }
427 
428 constexpr unsigned LoopTiling::kDefaultTileSize;
429 constexpr uint64_t LoopTiling::kDefaultCacheMemCapacity;
430