1 //===- LoopUtils.cpp ---- Misc utilities for loop transformation ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements miscellaneous loop transformation routines.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "mlir/Dialect/Affine/LoopUtils.h"
14 #include "mlir/Analysis/SliceAnalysis.h"
15 #include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h"
16 #include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
17 #include "mlir/Dialect/Affine/Analysis/Utils.h"
18 #include "mlir/Dialect/Affine/IR/AffineOps.h"
19 #include "mlir/Dialect/Affine/IR/AffineValueMap.h"
20 #include "mlir/Dialect/Affine/Utils.h"
21 #include "mlir/Dialect/Func/IR/FuncOps.h"
22 #include "mlir/Dialect/MemRef/IR/MemRef.h"
23 #include "mlir/Dialect/SCF/SCF.h"
24 #include "mlir/IR/BlockAndValueMapping.h"
25 #include "mlir/IR/IntegerSet.h"
26 #include "mlir/Support/MathExtras.h"
27 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
28 #include "mlir/Transforms/RegionUtils.h"
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
33 
34 #define DEBUG_TYPE "LoopUtils"
35 
36 using namespace mlir;
37 using namespace presburger;
38 using llvm::SmallMapVector;
39 
40 namespace {
41 // This structure is to pass and return sets of loop parameters without
42 // confusing the order.
43 struct LoopParams {
44   Value lowerBound;
45   Value upperBound;
46   Value step;
47 };
48 } // namespace
49 
50 /// Computes the cleanup loop lower bound of the loop being unrolled with
51 /// the specified unroll factor; this bound will also be upper bound of the main
52 /// part of the unrolled loop. Computes the bound as an AffineMap with its
53 /// operands or a null map when the trip count can't be expressed as an affine
54 /// expression.
55 static void
56 getCleanupLoopLowerBound(AffineForOp forOp, unsigned unrollFactor,
57                          AffineMap &cleanupLbMap,
58                          SmallVectorImpl<Value> &cleanupLbOperands) {
59   AffineMap tripCountMap;
60   SmallVector<Value, 4> tripCountOperands;
61   getTripCountMapAndOperands(forOp, &tripCountMap, &tripCountOperands);
62   // Trip count can't be computed.
63   if (!tripCountMap) {
64     cleanupLbMap = AffineMap();
65     return;
66   }
67 
68   OpBuilder b(forOp);
69   auto lbMap = forOp.getLowerBoundMap();
70   auto lb = b.create<AffineApplyOp>(forOp.getLoc(), lbMap,
71                                     forOp.getLowerBoundOperands());
72 
73   // For each upper bound expr, get the range.
74   // Eg: affine.for %i = lb to min (ub1, ub2),
75   // where tripCountExprs yield (tr1, tr2), we create affine.apply's:
76   // lb + tr1 - tr1 % ufactor, lb + tr2 - tr2 % ufactor; the results of all
77   // these affine.apply's make up the cleanup loop lower bound.
78   SmallVector<AffineExpr, 4> bumpExprs(tripCountMap.getNumResults());
79   SmallVector<Value, 4> bumpValues(tripCountMap.getNumResults());
80   int64_t step = forOp.getStep();
81   for (unsigned i = 0, e = tripCountMap.getNumResults(); i < e; i++) {
82     auto tripCountExpr = tripCountMap.getResult(i);
83     bumpExprs[i] = (tripCountExpr - tripCountExpr % unrollFactor) * step;
84     auto bumpMap = AffineMap::get(tripCountMap.getNumDims(),
85                                   tripCountMap.getNumSymbols(), bumpExprs[i]);
86     bumpValues[i] =
87         b.create<AffineApplyOp>(forOp.getLoc(), bumpMap, tripCountOperands);
88   }
89 
90   SmallVector<AffineExpr, 4> newUbExprs(tripCountMap.getNumResults());
91   for (unsigned i = 0, e = bumpExprs.size(); i < e; i++)
92     newUbExprs[i] = b.getAffineDimExpr(0) + b.getAffineDimExpr(i + 1);
93 
94   cleanupLbOperands.clear();
95   cleanupLbOperands.push_back(lb);
96   cleanupLbOperands.append(bumpValues.begin(), bumpValues.end());
97   cleanupLbMap = AffineMap::get(1 + tripCountMap.getNumResults(), 0, newUbExprs,
98                                 b.getContext());
99   // Simplify the cleanupLbMap + cleanupLbOperands.
100   fullyComposeAffineMapAndOperands(&cleanupLbMap, &cleanupLbOperands);
101   cleanupLbMap = simplifyAffineMap(cleanupLbMap);
102   canonicalizeMapAndOperands(&cleanupLbMap, &cleanupLbOperands);
103   // Remove any affine.apply's that became dead from the simplification above.
104   for (auto v : bumpValues)
105     if (v.use_empty())
106       v.getDefiningOp()->erase();
107 
108   if (lb.use_empty())
109     lb.erase();
110 }
111 
112 /// Helper to replace uses of loop carried values (iter_args) and loop
113 /// yield values while promoting single iteration affine.for ops.
114 static void replaceIterArgsAndYieldResults(AffineForOp forOp) {
115   // Replace uses of iter arguments with iter operands (initial values).
116   auto iterOperands = forOp.getIterOperands();
117   auto iterArgs = forOp.getRegionIterArgs();
118   for (auto e : llvm::zip(iterOperands, iterArgs))
119     std::get<1>(e).replaceAllUsesWith(std::get<0>(e));
120 
121   // Replace uses of loop results with the values yielded by the loop.
122   auto outerResults = forOp.getResults();
123   auto innerResults = forOp.getBody()->getTerminator()->getOperands();
124   for (auto e : llvm::zip(outerResults, innerResults))
125     std::get<0>(e).replaceAllUsesWith(std::get<1>(e));
126 }
127 
128 /// Promotes the loop body of a forOp to its containing block if the forOp
129 /// was known to have a single iteration.
130 // TODO: extend this for arbitrary affine bounds.
131 LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
132   Optional<uint64_t> tripCount = getConstantTripCount(forOp);
133   if (!tripCount || tripCount.getValue() != 1)
134     return failure();
135 
136   if (forOp.getLowerBoundMap().getNumResults() != 1)
137     return failure();
138 
139   // Replaces all IV uses to its single iteration value.
140   auto iv = forOp.getInductionVar();
141   auto *parentBlock = forOp->getBlock();
142   if (!iv.use_empty()) {
143     if (forOp.hasConstantLowerBound()) {
144       OpBuilder topBuilder(forOp->getParentOfType<func::FuncOp>().getBody());
145       auto constOp = topBuilder.create<arith::ConstantIndexOp>(
146           forOp.getLoc(), forOp.getConstantLowerBound());
147       iv.replaceAllUsesWith(constOp);
148     } else {
149       auto lbOperands = forOp.getLowerBoundOperands();
150       auto lbMap = forOp.getLowerBoundMap();
151       OpBuilder builder(forOp);
152       if (lbMap == builder.getDimIdentityMap()) {
153         // No need of generating an affine.apply.
154         iv.replaceAllUsesWith(lbOperands[0]);
155       } else {
156         auto affineApplyOp =
157             builder.create<AffineApplyOp>(forOp.getLoc(), lbMap, lbOperands);
158         iv.replaceAllUsesWith(affineApplyOp);
159       }
160     }
161   }
162 
163   replaceIterArgsAndYieldResults(forOp);
164 
165   // Move the loop body operations, except for its terminator, to the loop's
166   // containing block.
167   forOp.getBody()->back().erase();
168   parentBlock->getOperations().splice(Block::iterator(forOp),
169                                       forOp.getBody()->getOperations());
170   forOp.erase();
171   return success();
172 }
173 
174 /// Generates an affine.for op with the specified lower and upper bounds
175 /// while generating the right IV remappings to realize shifts for operations in
176 /// its body. The operations that go into the loop body are specified in
177 /// opGroupQueue starting from the specified offset, and in that order. The
178 /// first element of the pair specifies the shift applied to that group of
179 /// operations; the shift is multiplied by the loop step before being applied.
180 /// Returns nullptr if the generated loop simplifies to a single iteration one.
181 static AffineForOp generateShiftedLoop(
182     AffineMap lbMap, AffineMap ubMap,
183     const std::vector<std::pair<uint64_t, ArrayRef<Operation *>>> &opGroupQueue,
184     unsigned offset, AffineForOp srcForOp, OpBuilder b) {
185   auto lbOperands = srcForOp.getLowerBoundOperands();
186   auto ubOperands = srcForOp.getUpperBoundOperands();
187 
188   assert(lbMap.getNumInputs() == lbOperands.size());
189   assert(ubMap.getNumInputs() == ubOperands.size());
190 
191   auto loopChunk = b.create<AffineForOp>(srcForOp.getLoc(), lbOperands, lbMap,
192                                          ubOperands, ubMap, srcForOp.getStep());
193   auto loopChunkIV = loopChunk.getInductionVar();
194   auto srcIV = srcForOp.getInductionVar();
195 
196   BlockAndValueMapping operandMap;
197 
198   auto bodyBuilder = OpBuilder::atBlockTerminator(loopChunk.getBody());
199   for (auto it = opGroupQueue.begin() + offset, e = opGroupQueue.end(); it != e;
200        ++it) {
201     uint64_t shift = it->first;
202     auto ops = it->second;
203     // All 'same shift' operations get added with their operands being
204     // remapped to results of cloned operations, and their IV used remapped.
205     // Generate the remapping if the shift is not zero: remappedIV = newIV -
206     // shift.
207     if (!srcIV.use_empty() && shift != 0) {
208       auto ivRemap = bodyBuilder.create<AffineApplyOp>(
209           srcForOp.getLoc(),
210           bodyBuilder.getSingleDimShiftAffineMap(
211               -static_cast<int64_t>(srcForOp.getStep() * shift)),
212           loopChunkIV);
213       operandMap.map(srcIV, ivRemap);
214     } else {
215       operandMap.map(srcIV, loopChunkIV);
216     }
217     for (auto *op : ops)
218       bodyBuilder.clone(*op, operandMap);
219   };
220   if (succeeded(promoteIfSingleIteration(loopChunk)))
221     return AffineForOp();
222   return loopChunk;
223 }
224 
225 // The skewing of operations with respect to one another can be used for
226 // example to allow overlap of asynchronous operations (such as DMA
227 // communication) with computation, or just relative shifting of operations
228 // for better register reuse, locality or parallelism. As such, the shifts are
229 // typically expected to be at most of the order of the number of operations.
230 // This method should not be used as a substitute for loop distribution/fission.
231 // This method uses an algorithm// in time linear in the number of operations
232 // in the body of the for loop - (using the 'sweep line' paradigm). This method
233 // asserts preservation of SSA dominance. A check for that as well as that for
234 // memory-based dependence preservation check rests with the users of this
235 // method.
236 LogicalResult mlir::affineForOpBodySkew(AffineForOp forOp,
237                                         ArrayRef<uint64_t> shifts,
238                                         bool unrollPrologueEpilogue) {
239   assert(forOp.getBody()->getOperations().size() == shifts.size() &&
240          "too few/many shifts");
241   if (forOp.getBody()->begin() == std::prev(forOp.getBody()->end()))
242     return success();
243 
244   // If the trip counts aren't constant, we would need versioning and
245   // conditional guards (or context information to prevent such versioning). The
246   // better way to pipeline for such loops is to first tile them and extract
247   // constant trip count "full tiles" before applying this.
248   auto mayBeConstTripCount = getConstantTripCount(forOp);
249   if (!mayBeConstTripCount.hasValue()) {
250     LLVM_DEBUG(forOp.emitRemark("non-constant trip count loop not handled"));
251     return success();
252   }
253   uint64_t tripCount = mayBeConstTripCount.getValue();
254 
255   assert(isOpwiseShiftValid(forOp, shifts) &&
256          "shifts will lead to an invalid transformation\n");
257 
258   int64_t step = forOp.getStep();
259 
260   unsigned numChildOps = shifts.size();
261 
262   // Do a linear time (counting) sort for the shifts.
263   uint64_t maxShift = *std::max_element(shifts.begin(), shifts.end());
264   if (maxShift >= numChildOps) {
265     // Large shifts are not the typical use case.
266     forOp.emitWarning("not shifting because shifts are unrealistically large");
267     return success();
268   }
269 
270   // An array of operation groups sorted by shift amount; each group has all
271   // operations with the same shift in the order in which they appear in the
272   // body of the 'affine.for' op.
273   std::vector<std::vector<Operation *>> sortedOpGroups(maxShift + 1);
274   unsigned pos = 0;
275   for (auto &op : forOp.getBody()->without_terminator()) {
276     auto shift = shifts[pos++];
277     sortedOpGroups[shift].push_back(&op);
278   }
279 
280   // Unless the shifts have a specific pattern (which actually would be the
281   // common use case), prologue and epilogue are not meaningfully defined.
282   // Nevertheless, if 'unrollPrologueEpilogue' is set, we will treat the first
283   // loop generated as the prologue and the last as epilogue and unroll these
284   // fully.
285   AffineForOp prologue, epilogue;
286 
287   // Do a sweep over the sorted shifts while storing open groups in a
288   // vector, and generating loop portions as necessary during the sweep. A block
289   // of operations is paired with its shift.
290   std::vector<std::pair<uint64_t, ArrayRef<Operation *>>> opGroupQueue;
291 
292   auto origLbMap = forOp.getLowerBoundMap();
293   uint64_t lbShift = 0;
294   OpBuilder b(forOp);
295   for (uint64_t d = 0, e = sortedOpGroups.size(); d < e; ++d) {
296     // If nothing is shifted by d, continue.
297     if (sortedOpGroups[d].empty())
298       continue;
299     if (!opGroupQueue.empty()) {
300       assert(d > 0 &&
301              "Queue expected to be empty when the first block is found");
302       // The interval for which the loop needs to be generated here is:
303       // [lbShift, min(lbShift + tripCount, d)) and the body of the
304       // loop needs to have all operations in opQueue in that order.
305       AffineForOp res;
306       if (lbShift + tripCount * step < d * step) {
307         res = generateShiftedLoop(
308             b.getShiftedAffineMap(origLbMap, lbShift),
309             b.getShiftedAffineMap(origLbMap, lbShift + tripCount * step),
310             opGroupQueue, /*offset=*/0, forOp, b);
311         // Entire loop for the queued op groups generated, empty it.
312         opGroupQueue.clear();
313         lbShift += tripCount * step;
314       } else {
315         res = generateShiftedLoop(b.getShiftedAffineMap(origLbMap, lbShift),
316                                   b.getShiftedAffineMap(origLbMap, d),
317                                   opGroupQueue, /*offset=*/0, forOp, b);
318         lbShift = d * step;
319       }
320 
321       if (res) {
322         // Simplify/canonicalize the affine.for.
323         RewritePatternSet patterns(res.getContext());
324         AffineForOp::getCanonicalizationPatterns(patterns, res.getContext());
325         bool erased;
326         (void)applyOpPatternsAndFold(res, std::move(patterns), &erased);
327 
328         if (!erased && !prologue)
329           prologue = res;
330         if (!erased)
331           epilogue = res;
332       }
333     } else {
334       // Start of first interval.
335       lbShift = d * step;
336     }
337     // Augment the list of operations that get into the current open interval.
338     opGroupQueue.emplace_back(d, sortedOpGroups[d]);
339   }
340 
341   // Those operations groups left in the queue now need to be processed (FIFO)
342   // and their loops completed.
343   for (unsigned i = 0, e = opGroupQueue.size(); i < e; ++i) {
344     uint64_t ubShift = (opGroupQueue[i].first + tripCount) * step;
345     epilogue = generateShiftedLoop(b.getShiftedAffineMap(origLbMap, lbShift),
346                                    b.getShiftedAffineMap(origLbMap, ubShift),
347                                    opGroupQueue, /*offset=*/i, forOp, b);
348     lbShift = ubShift;
349     if (!prologue)
350       prologue = epilogue;
351   }
352 
353   // Erase the original for op.
354   forOp.erase();
355 
356   if (unrollPrologueEpilogue && prologue)
357     (void)loopUnrollFull(prologue);
358   if (unrollPrologueEpilogue && !epilogue && epilogue != prologue)
359     (void)loopUnrollFull(epilogue);
360 
361   return success();
362 }
363 
364 /// Checks the legality of tiling of a hyper-rectangular loop nest by simply
365 /// checking if there is a 'negative' dependence in the memrefs present in
366 /// the loop nest. If yes then tiling is invalid.
367 static bool
368 checkTilingLegalityImpl(MutableArrayRef<mlir::AffineForOp> origLoops) {
369   assert(!origLoops.empty() && "no original loops provided");
370 
371   // We first find out all dependences we intend to check.
372   SmallVector<Operation *, 8> loadAndStoreOps;
373   origLoops[0]->walk([&](Operation *op) {
374     if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op))
375       loadAndStoreOps.push_back(op);
376   });
377 
378   unsigned numOps = loadAndStoreOps.size();
379   unsigned numLoops = origLoops.size();
380   FlatAffineValueConstraints dependenceConstraints;
381   for (unsigned d = 1; d <= numLoops + 1; ++d) {
382     for (unsigned i = 0; i < numOps; ++i) {
383       Operation *srcOp = loadAndStoreOps[i];
384       MemRefAccess srcAccess(srcOp);
385       for (unsigned j = 0; j < numOps; ++j) {
386         Operation *dstOp = loadAndStoreOps[j];
387         MemRefAccess dstAccess(dstOp);
388 
389         SmallVector<DependenceComponent, 2> depComps;
390         dependenceConstraints.reset();
391         DependenceResult result = checkMemrefAccessDependence(
392             srcAccess, dstAccess, d, &dependenceConstraints, &depComps);
393 
394         // Skip if there is no dependence in this case.
395         if (!hasDependence(result))
396           continue;
397 
398         // Check whether there is any negative direction vector in the
399         // dependence components found above, which means that dependence is
400         // violated by the default hyper-rect tiling method.
401         LLVM_DEBUG(llvm::dbgs() << "Checking whether tiling legality violated "
402                                    "for dependence at depth: "
403                                 << Twine(d) << " between:\n";);
404         LLVM_DEBUG(srcAccess.opInst->dump(););
405         LLVM_DEBUG(dstAccess.opInst->dump(););
406         for (unsigned k = 0, e = depComps.size(); k < e; k++) {
407           DependenceComponent depComp = depComps[k];
408           if (depComp.lb.hasValue() && depComp.ub.hasValue() &&
409               depComp.lb.getValue() < depComp.ub.getValue() &&
410               depComp.ub.getValue() < 0) {
411             LLVM_DEBUG(llvm::dbgs()
412                        << "Dependence component lb = "
413                        << Twine(depComp.lb.getValue())
414                        << " ub = " << Twine(depComp.ub.getValue())
415                        << " is negative  at depth: " << Twine(d)
416                        << " and thus violates the legality rule.\n");
417             return false;
418           }
419         }
420       }
421     }
422   }
423 
424   return true;
425 }
426 
427 /// Checks whether hyper-rectangular loop tiling of the nest
428 /// represented by `origLoops` is valid. The validity condition is from Irigoin
429 /// and Triolet, which states that two tiles cannot depend on each other. We
430 /// simplify such condition to just checking whether there is any negative
431 /// dependence direction, since we have the prior knowledge that the tiling
432 /// results will be hyper-rectangles, which are scheduled in the
433 /// lexicographically increasing order on the vector of loop indices. This
434 /// function will return failure when any dependence component is negative along
435 /// any of `origLoops`.
436 LogicalResult
437 checkTilingLegality(MutableArrayRef<mlir::AffineForOp> origLoops) {
438   return success(checkTilingLegalityImpl(origLoops));
439 }
440 
441 /// Checks whether a loop nest is hyper-rectangular or not.
442 LogicalResult checkIfHyperRectangular(MutableArrayRef<AffineForOp> input) {
443   FlatAffineValueConstraints cst;
444   SmallVector<Operation *, 8> ops(input.begin(), input.end());
445   // 0-d or 1-d is trivially hyper-rectangular.
446   if (input.size() <= 1)
447     return success();
448   if (failed(getIndexSet(ops, &cst))) {
449     LLVM_DEBUG(llvm::dbgs() << "Index set computation failed!\n");
450     return failure();
451   }
452   if (!cst.isHyperRectangular(0, input.size())) {
453     LLVM_DEBUG(llvm::dbgs()
454                << "Non-hyperrectangular nests not supported for tiling!\n");
455     return failure();
456   }
457   return success();
458 }
459 
460 /// Check if the input nest is supported for tiling and whether tiling would be
461 /// legal or not.
462 template <typename t>
463 LogicalResult performPreTilingChecks(MutableArrayRef<AffineForOp> input,
464                                      ArrayRef<t> tileSizes) {
465   assert(input.size() == tileSizes.size() && "Too few/many tile sizes");
466 
467   if (llvm::any_of(input,
468                    [](AffineForOp op) { return op.getNumResults() > 0; })) {
469     LLVM_DEBUG(llvm::dbgs()
470                << "Cannot tile nest where a loop has yield values\n");
471     return failure();
472   }
473 
474   // Check if the supplied `for` ops are all successively nested.
475   if (!isPerfectlyNested(input)) {
476     LLVM_DEBUG(llvm::dbgs() << "input loops not perfectly nested");
477     return failure();
478   }
479 
480   if (failed(checkIfHyperRectangular(input)))
481     return failure();
482 
483   // Check if tiling is legal.
484   if (failed(checkTilingLegality(input))) {
485     input[0].emitRemark("tiling code is illegal due to dependences");
486     return failure();
487   }
488 
489   return success();
490 }
491 
492 /// Move the loop body of AffineForOp 'src' from 'src' into the specified
493 /// location in destination's body, ignoring the terminator.
494 static void moveLoopBodyImpl(AffineForOp src, AffineForOp dest,
495                              Block::iterator loc) {
496   auto &ops = src.getBody()->getOperations();
497   dest.getBody()->getOperations().splice(loc, ops, ops.begin(),
498                                          std::prev(ops.end()));
499 }
500 
501 /// Move the loop body of AffineForOp 'src' from 'src' to the start of dest
502 /// body.
503 void moveLoopBody(AffineForOp src, AffineForOp dest) {
504   moveLoopBodyImpl(src, dest, dest.getBody()->begin());
505 }
506 
507 /// Constructs tiled loop nest, without setting the loop bounds and move the
508 /// body of the original loop nest to the tiled loop nest.
509 void constructTiledLoopNest(MutableArrayRef<AffineForOp> origLoops,
510                             AffineForOp rootAffineForOp, unsigned width,
511                             MutableArrayRef<AffineForOp> tiledLoops) {
512   Location loc = rootAffineForOp.getLoc();
513 
514   // The outermost among the loops as we add more..
515   Operation *topLoop = rootAffineForOp.getOperation();
516   AffineForOp innermostPointLoop;
517 
518   // Add intra-tile (or point) loops.
519   for (unsigned i = 0; i < width; i++) {
520     OpBuilder b(topLoop);
521     // Loop bounds will be set later.
522     AffineForOp pointLoop = b.create<AffineForOp>(loc, 0, 0);
523     pointLoop.getBody()->getOperations().splice(
524         pointLoop.getBody()->begin(), topLoop->getBlock()->getOperations(),
525         topLoop);
526     tiledLoops[2 * width - 1 - i] = pointLoop;
527     topLoop = pointLoop.getOperation();
528     if (i == 0)
529       innermostPointLoop = pointLoop;
530   }
531 
532   // Add tile space loops;
533   for (unsigned i = width; i < 2 * width; i++) {
534     OpBuilder b(topLoop);
535     // Loop bounds will be set later.
536     AffineForOp tileSpaceLoop = b.create<AffineForOp>(loc, 0, 0);
537     tileSpaceLoop.getBody()->getOperations().splice(
538         tileSpaceLoop.getBody()->begin(), topLoop->getBlock()->getOperations(),
539         topLoop);
540     tiledLoops[2 * width - i - 1] = tileSpaceLoop;
541     topLoop = tileSpaceLoop.getOperation();
542   }
543 
544   // Move the loop body of the original nest to the new one.
545   moveLoopBody(origLoops.back(), innermostPointLoop);
546 }
547 
548 /// Set lower and upper bounds of intra-tile loops for parametric tiling.
549 //  TODO: Handle non-constant lower bounds.
550 static void setIntraTileBoundsParametric(OpBuilder &b, AffineForOp origLoop,
551                                          AffineForOp newInterTileLoop,
552                                          AffineForOp newIntraTileLoop,
553                                          Value tileSize) {
554   // The lower bound for the intra-tile loop is represented by an affine map
555   // as (%i, %t0)->((%i - %origlb) * %t0 + %origlb). Similarly, the upper bound
556   // for the intra-tile loop is represented by an affine map as (%i, %t0)->((%i
557   // - %origlb) * %t0) + (%t0 * %origLoopStep) + %origlb), where %i is loop IV
558   // of the corresponding inter-tile loop, %t0 is the corresponding tiling
559   // parameter, %origlb is lower bound and %origLoopStep is the loop step of the
560   // corresponding inter-tile loop.
561 
562   assert(origLoop.hasConstantLowerBound() &&
563          "expected input loops to have constant lower bound.");
564 
565   // Get lower bound of original loop as an affine expression.
566   AffineExpr origLowerBoundExpr;
567   origLowerBoundExpr =
568       b.getAffineConstantExpr(origLoop.getConstantLowerBound());
569 
570   // Add dim operands from original lower/upper bound.
571   SmallVector<Value, 4> lbOperands, ubOperands;
572   AffineBound lb = origLoop.getLowerBound();
573   AffineBound ub = origLoop.getUpperBound();
574   lbOperands.reserve(lb.getNumOperands() + 2);
575   ubOperands.reserve(ub.getNumOperands() + 2);
576   AffineMap origLbMap = lb.getMap();
577   AffineMap origUbMap = ub.getMap();
578   for (unsigned j = 0, e = origLbMap.getNumDims(); j < e; ++j)
579     lbOperands.push_back(lb.getOperand(j));
580   for (unsigned j = 0, e = origUbMap.getNumDims(); j < e; ++j)
581     ubOperands.push_back(ub.getOperand(j));
582 
583   // Add a new dim operand in lb/ubOperands corresponding to the origLoop
584   // IV.
585   lbOperands.push_back(newInterTileLoop.getInductionVar());
586   ubOperands.push_back(newInterTileLoop.getInductionVar());
587 
588   // Get loop IV as an affine expression for lower/upper bound. Size of
589   // lb/ubOperands is guaranteed to be atleast one.
590   AffineExpr lbLoopIvExpr = b.getAffineDimExpr(lbOperands.size() - 1);
591   AffineExpr ubLoopIvExpr = b.getAffineDimExpr(ubOperands.size() - 1);
592 
593   // Add symbol operands from original lower/upper bound.
594   for (unsigned j = 0, e = origLbMap.getNumSymbols(); j < e; ++j)
595     lbOperands.push_back(lb.getOperand(origLbMap.getNumDims() + j));
596   for (unsigned j = 0, e = origUbMap.getNumSymbols(); j < e; ++j)
597     ubOperands.push_back(ub.getOperand(origUbMap.getNumDims() + j));
598 
599   // Add a new symbol operand which is the tile size for this loop.
600   lbOperands.push_back(tileSize);
601   ubOperands.push_back(tileSize);
602 
603   SmallVector<AffineExpr, 4> lbBoundExprs;
604   SmallVector<AffineExpr, 4> ubBoundExprs;
605   lbBoundExprs.reserve(origLbMap.getNumResults());
606   ubBoundExprs.reserve(origUbMap.getNumResults());
607 
608   // Get tiling parameter as an affine expression for lb/ub.
609   AffineExpr lbTileParameter = b.getAffineSymbolExpr(origLbMap.getNumSymbols());
610   AffineExpr ubTileParameter = b.getAffineSymbolExpr(origUbMap.getNumSymbols());
611 
612   // Insert lb as inter-tile ((loop IV - origlb) * tilingParameter) + origlb.
613   lbBoundExprs.push_back(
614       ((lbLoopIvExpr - origLowerBoundExpr) * lbTileParameter) +
615       origLowerBoundExpr);
616 
617   // Get the origLoopStep as an affine expression.
618   AffineExpr origLoopStep = b.getAffineConstantExpr(origLoop.getStep());
619 
620   // Insert ub as inter-tile ((loop IV - origlb) * tilingParameter) +
621   // (tilingParameter * origLoopStep) + origlb.
622   ubBoundExprs.push_back(
623       ((ubLoopIvExpr - origLowerBoundExpr) * ubTileParameter) +
624       (ubTileParameter * origLoopStep) + origLowerBoundExpr);
625 
626   ubBoundExprs.append(origUbMap.getResults().begin(),
627                       origUbMap.getResults().end());
628 
629   AffineMap lbMap =
630       AffineMap::get(origLbMap.getNumDims() + 1, origLbMap.getNumSymbols() + 1,
631                      lbBoundExprs, b.getContext());
632   newIntraTileLoop.setLowerBound(lbOperands, lbMap);
633 
634   AffineMap ubMap =
635       AffineMap::get(origUbMap.getNumDims() + 1, origUbMap.getNumSymbols() + 1,
636                      ubBoundExprs, b.getContext());
637   newIntraTileLoop.setUpperBound(ubOperands, ubMap);
638 
639   // Original loop step must be preserved.
640   newIntraTileLoop.setStep(origLoop.getStep());
641 }
642 
643 /// Set lower and upper bounds of inter-tile loops for parametric tiling.
644 //  TODO: Handle non-constant lower bounds.
645 static void setInterTileBoundsParametric(OpBuilder &b, AffineForOp origLoop,
646                                          AffineForOp newLoop, Value tileSize) {
647   OperandRange newLbOperands = origLoop.getLowerBoundOperands();
648 
649   // The lower bounds for inter-tile loops are same as the corresponding lower
650   // bounds of original loops.
651   newLoop.setLowerBound(newLbOperands, origLoop.getLowerBoundMap());
652 
653   // The new upper bound map for inter-tile loops, assuming constant lower
654   // bounds, are now originalLowerBound + ceildiv((originalUpperBound -
655   // originalLowerBound), tiling parameter); where tiling parameter is the
656   // respective tile size for that loop. For e.g. if the original ubmap was
657   // ()->(1024), the new map will be
658   // ()[s0]->(ceildiv((1024 -lb) % s0)), where s0 is the tiling parameter.
659   // Therefore a new symbol operand is inserted in the map and the result
660   // expression is overwritten.
661 
662   assert(origLoop.hasConstantLowerBound() &&
663          "expected input loops to have constant lower bound.");
664 
665   // Get lower bound of original loop as an affine expression.
666   AffineExpr origLowerBoundExpr;
667   origLowerBoundExpr =
668       b.getAffineConstantExpr(origLoop.getConstantLowerBound());
669 
670   // Add dim operands from original upper bound.
671   SmallVector<Value, 4> ubOperands;
672   AffineBound ub = origLoop.getUpperBound();
673   ubOperands.reserve(ub.getNumOperands() + 1);
674   AffineMap origUbMap = ub.getMap();
675   for (unsigned j = 0, e = origUbMap.getNumDims(); j < e; ++j)
676     ubOperands.push_back(ub.getOperand(j));
677 
678   // Add symbol operands from original upper bound.
679   for (unsigned j = 0, e = origUbMap.getNumSymbols(); j < e; ++j)
680     ubOperands.push_back(ub.getOperand(origUbMap.getNumDims() + j));
681 
682   // Add a new symbol operand which is the tile size for this loop.
683   ubOperands.push_back(tileSize);
684 
685   // Get tiling parameter as an affine expression.
686   AffineExpr tileParameter = b.getAffineSymbolExpr(origUbMap.getNumSymbols());
687 
688   SmallVector<AffineExpr, 4> boundExprs;
689   boundExprs.reserve(origUbMap.getNumResults());
690   int64_t origUpperBound;
691   AffineExpr origUpperBoundExpr;
692 
693   // If upper bound for the original loop is constant, then the constant can
694   // be obtained as an affine expression straight away.
695   if (origLoop.hasConstantUpperBound()) {
696     origUpperBound = origLoop.getConstantUpperBound();
697 
698     // Get original constant upper bound as an affine expression.
699     origUpperBoundExpr = b.getAffineConstantExpr(origUpperBound);
700 
701     // Insert the bound as originalLowerBoundceildiv((originalUpperBound -
702     // originalLowerBound), tilingParameter).
703     boundExprs.push_back(
704         origLowerBoundExpr +
705         (origUpperBoundExpr - origLowerBoundExpr).ceilDiv(tileParameter));
706   } else {
707     // If upper bound for the original loop is not constant then two cases
708     // are possible, although there handeling is the same, 1.) The result of
709     // ubmap has only one result expression. For e.g.
710     //    affine.for %i = 5 to %ub
711     //
712     // A symbol operand is added which represents the tiling parameter. The
713     // new loop bounds here will be like ()[s0, s1] -> ((s0 - 5) ceildiv s1 + 5)
714     // where 's0' is the original upper bound and 's1' is the tiling
715     // parameter. 2.) When ubMap has more than one result expression. For e.g.
716     //    #map0 = affine_map<()[s0, s1] -> (s0, s1)
717     //    affine.for %i = 5 to min #map0()[%s0, %s1]
718     //
719     // A symbol operand is added which represents the tiling parameter. The
720     // new loop bounds will be like ()[s0, s1, s2] -> ((s0 - 5) ceildiv s2 + 5,
721     // (s1 -5) ceildiv s2 + 5), where s2 is the tiling parameter.
722 
723     // Insert the bounds as originalLowerBound + ceildiv((originalUpperBound -
724     // originalLowerBound), tilingParameter).
725     for (AffineExpr origUpperBoundExpr : origUbMap.getResults())
726       boundExprs.push_back(
727           origLowerBoundExpr +
728           (origUpperBoundExpr - origLowerBoundExpr).ceilDiv(tileParameter));
729   }
730 
731   AffineMap ubMap =
732       AffineMap::get(origUbMap.getNumDims(), origUbMap.getNumSymbols() + 1,
733                      boundExprs, b.getContext());
734   newLoop.setUpperBound(ubOperands, ubMap);
735 
736   // Original loop step must be preserved.
737   newLoop.setStep(origLoop.getStep());
738 }
739 
740 /// Constructs and sets new loop bounds after tiling for the case of
741 /// hyper-rectangular index sets, where the bounds of one dimension do not
742 /// depend on other dimensions and tiling parameters are captured from SSA
743 /// values. Bounds of each dimension can thus be treated independently,
744 /// and deriving the new bounds is much simpler and faster than for the case of
745 /// tiling arbitrary polyhedral shapes.
746 static void constructParametricallyTiledIndexSetHyperRect(
747     MutableArrayRef<AffineForOp> origLoops,
748     MutableArrayRef<AffineForOp> newLoops, ArrayRef<Value> tileSizes) {
749   assert(!origLoops.empty() && "expected atleast one loop in band");
750   assert(origLoops.size() == tileSizes.size() &&
751          "expected tiling parameter for each loop in band.");
752 
753   OpBuilder b(origLoops[0].getOperation());
754   unsigned width = origLoops.size();
755 
756   // Set bounds for tile space loops.
757   for (unsigned i = 0; i < width; ++i) {
758     setInterTileBoundsParametric(b, origLoops[i], newLoops[i], tileSizes[i]);
759   }
760 
761   // Set bounds for intra-tile loops.
762   for (unsigned i = 0; i < width; ++i) {
763     setIntraTileBoundsParametric(b, origLoops[i], newLoops[i],
764                                  newLoops[i + width], tileSizes[i]);
765   }
766 }
767 
768 /// Constructs and sets new loop bounds after tiling for the case of
769 /// hyper-rectangular index sets, where the bounds of one dimension do not
770 /// depend on other dimensions. Bounds of each dimension can thus be treated
771 /// independently, and deriving the new bounds is much simpler and faster
772 /// than for the case of tiling arbitrary polyhedral shapes.
773 static void
774 constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
775                                 MutableArrayRef<AffineForOp> newLoops,
776                                 ArrayRef<unsigned> tileSizes) {
777   assert(!origLoops.empty());
778   assert(origLoops.size() == tileSizes.size());
779 
780   OpBuilder b(origLoops[0].getOperation());
781   unsigned width = origLoops.size();
782 
783   // Bounds for tile space loops.
784   for (unsigned i = 0; i < width; i++) {
785     OperandRange newLbOperands = origLoops[i].getLowerBoundOperands();
786     OperandRange newUbOperands = origLoops[i].getUpperBoundOperands();
787     newLoops[i].setLowerBound(newLbOperands, origLoops[i].getLowerBoundMap());
788     newLoops[i].setUpperBound(newUbOperands, origLoops[i].getUpperBoundMap());
789     // If the step size of original loop is x and tileSize is y then after
790     // tiling the tile space loops' step size becomes x*y.
791     newLoops[i].setStep(tileSizes[i] * origLoops[i].getStep());
792   }
793   // Bounds for intra-tile loops.
794   for (unsigned i = 0; i < width; i++) {
795     int64_t largestDiv = getLargestDivisorOfTripCount(origLoops[i]);
796     Optional<uint64_t> mayBeConstantCount = getConstantTripCount(origLoops[i]);
797     // The lower bound is just the tile-space loop.
798     AffineMap lbMap = b.getDimIdentityMap();
799     newLoops[width + i].setLowerBound(
800         /*operands=*/newLoops[i].getInductionVar(), lbMap);
801     // The step sizes of intra-tile loops is just the original loops' step size.
802     newLoops[width + i].setStep(origLoops[i].getStep());
803 
804     // Set the upper bound.
805     if (mayBeConstantCount && mayBeConstantCount.getValue() < tileSizes[i]) {
806       // Trip count is less than the tile size: upper bound is lower bound +
807       // trip count * stepSize.
808       AffineMap ubMap = b.getSingleDimShiftAffineMap(
809           mayBeConstantCount.getValue() * origLoops[i].getStep());
810       newLoops[width + i].setUpperBound(
811           /*operands=*/newLoops[i].getInductionVar(), ubMap);
812     } else if (largestDiv % tileSizes[i] != 0) {
813       // Intra-tile loop ii goes from i to min(i + tileSize * stepSize, ub_i).
814       // Construct the upper bound map; the operands are the original operands
815       // with 'i' (tile-space loop) appended to it. The new upper bound map is
816       // the original one with an additional expression i + tileSize * stepSize
817       // appended.
818 
819       // Add dim operands from original upper bound.
820       SmallVector<Value, 4> ubOperands;
821       AffineBound ub = origLoops[i].getUpperBound();
822       ubOperands.reserve(ub.getNumOperands() + 1);
823       AffineMap origUbMap = ub.getMap();
824       for (unsigned j = 0, e = origUbMap.getNumDims(); j < e; ++j)
825         ubOperands.push_back(ub.getOperand(j));
826 
827       // Add dim operand for new loop upper bound.
828       ubOperands.push_back(newLoops[i].getInductionVar());
829 
830       // Add symbol operands from original upper bound.
831       for (unsigned j = 0, e = origUbMap.getNumSymbols(); j < e; ++j)
832         ubOperands.push_back(ub.getOperand(origUbMap.getNumDims() + j));
833 
834       SmallVector<AffineExpr, 4> boundExprs;
835       boundExprs.reserve(1 + origUbMap.getNumResults());
836       AffineExpr dim = b.getAffineDimExpr(origUbMap.getNumDims());
837       // The new upper bound map is the original one with an additional
838       // expression i + tileSize * stepSize (of original loop) appended.
839       boundExprs.push_back(dim + tileSizes[i] * origLoops[i].getStep());
840       boundExprs.append(origUbMap.getResults().begin(),
841                         origUbMap.getResults().end());
842       AffineMap ubMap =
843           AffineMap::get(origUbMap.getNumDims() + 1, origUbMap.getNumSymbols(),
844                          boundExprs, b.getContext());
845       newLoops[width + i].setUpperBound(/*operands=*/ubOperands, ubMap);
846     } else {
847       // No need of the min expression.
848       AffineExpr dim = b.getAffineDimExpr(0);
849       AffineMap ubMap =
850           AffineMap::get(1, 0, dim + tileSizes[i] * origLoops[i].getStep());
851       newLoops[width + i].setUpperBound(newLoops[i].getInductionVar(), ubMap);
852     }
853   }
854 }
855 
856 /// Tiles the specified band of perfectly nested loops creating tile-space loops
857 /// and intra-tile loops. A band is a contiguous set of loops.
858 //  TODO: handle non hyper-rectangular spaces.
859 LogicalResult
860 mlir::tilePerfectlyNested(MutableArrayRef<AffineForOp> input,
861                           ArrayRef<unsigned> tileSizes,
862                           SmallVectorImpl<AffineForOp> *tiledNest) {
863   if (input.empty())
864     return success();
865 
866   if (failed(performPreTilingChecks(input, tileSizes)))
867     return failure();
868 
869   MutableArrayRef<AffineForOp> origLoops = input;
870   AffineForOp rootAffineForOp = origLoops[0];
871 
872   // Note that width is at least one since the band isn't empty.
873   unsigned width = input.size();
874   SmallVector<AffineForOp, 6> tiledLoops(2 * width);
875 
876   // Construct a tiled loop nest without setting their bounds. Bounds are
877   // set later.
878   constructTiledLoopNest(origLoops, rootAffineForOp, width, tiledLoops);
879 
880   SmallVector<Value, 8> origLoopIVs;
881   extractForInductionVars(input, &origLoopIVs);
882 
883   // Set loop bounds for the tiled loop nest.
884   constructTiledIndexSetHyperRect(origLoops, tiledLoops, tileSizes);
885 
886   // Replace original IVs with intra-tile loop IVs.
887   for (unsigned i = 0; i < width; i++)
888     origLoopIVs[i].replaceAllUsesWith(tiledLoops[i + width].getInductionVar());
889 
890   // Erase the old loop nest.
891   rootAffineForOp.erase();
892 
893   if (tiledNest)
894     *tiledNest = std::move(tiledLoops);
895 
896   return success();
897 }
898 
899 /// Tiles the specified band of perfectly nested loops creating tile-space
900 /// loops and intra-tile loops, using SSA values as tiling parameters. A band
901 /// is a contiguous set of loops.
902 //  TODO: handle non hyper-rectangular spaces.
903 LogicalResult
904 mlir::tilePerfectlyNestedParametric(MutableArrayRef<AffineForOp> input,
905                                     ArrayRef<Value> tileSizes,
906                                     SmallVectorImpl<AffineForOp> *tiledNest) {
907   if (input.empty())
908     return success();
909 
910   if (failed(performPreTilingChecks(input, tileSizes)))
911     return failure();
912 
913   MutableArrayRef<AffineForOp> origLoops = input;
914   AffineForOp rootAffineForOp = origLoops[0];
915   unsigned width = input.size();
916   SmallVector<AffineForOp, 6> tiledLoops(2 * width);
917 
918   // Construct a tiled loop nest without setting their bounds. Bounds are
919   // set later.
920   constructTiledLoopNest(origLoops, rootAffineForOp, width, tiledLoops);
921 
922   SmallVector<Value, 8> origLoopIVs;
923   extractForInductionVars(input, &origLoopIVs);
924 
925   // Set loop bounds for the tiled loop nest.
926   constructParametricallyTiledIndexSetHyperRect(origLoops, tiledLoops,
927                                                 tileSizes);
928 
929   // Replace original IVs with intra-tile loop IVs.
930   for (unsigned i = 0; i < width; i++)
931     origLoopIVs[i].replaceAllUsesWith(tiledLoops[i + width].getInductionVar());
932 
933   // Erase the old loop nest.
934   rootAffineForOp.erase();
935 
936   if (tiledNest)
937     *tiledNest = std::move(tiledLoops);
938 
939   return success();
940 }
941 
942 /// Get perfectly nested sequence of loops starting at root of loop nest
943 /// (the first op being another AffineFor, and the second op - a terminator).
944 /// A loop is perfectly nested iff: the first op in the loop's body is another
945 /// AffineForOp, and the second op is a terminator).
946 void mlir::getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops,
947                                    AffineForOp root) {
948   for (unsigned i = 0; i < std::numeric_limits<unsigned>::max(); ++i) {
949     nestedLoops.push_back(root);
950     Block &body = root.getRegion().front();
951     if (body.begin() != std::prev(body.end(), 2))
952       return;
953 
954     root = dyn_cast<AffineForOp>(&body.front());
955     if (!root)
956       return;
957   }
958 }
959 
960 /// Identify valid and profitable bands of loops to tile. This is currently just
961 /// a temporary placeholder to test the mechanics of tiled code generation.
962 /// Returns all maximal outermost perfect loop nests to tile.
963 void mlir::getTileableBands(func::FuncOp f,
964                             std::vector<SmallVector<AffineForOp, 6>> *bands) {
965   // Get maximal perfect nest of 'affine.for' insts starting from root
966   // (inclusive).
967   for (AffineForOp forOp : f.getOps<AffineForOp>()) {
968     SmallVector<AffineForOp, 6> band;
969     getPerfectlyNestedLoops(band, forOp);
970     bands->push_back(band);
971   }
972 }
973 
974 /// Unrolls this loop completely.
975 LogicalResult mlir::loopUnrollFull(AffineForOp forOp) {
976   Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
977   if (mayBeConstantTripCount.hasValue()) {
978     uint64_t tripCount = mayBeConstantTripCount.getValue();
979     if (tripCount == 0)
980       return success();
981     if (tripCount == 1)
982       return promoteIfSingleIteration(forOp);
983     return loopUnrollByFactor(forOp, tripCount);
984   }
985   return failure();
986 }
987 
988 /// Unrolls this loop by the specified factor or by the trip count (if constant)
989 /// whichever is lower.
990 LogicalResult mlir::loopUnrollUpToFactor(AffineForOp forOp,
991                                          uint64_t unrollFactor) {
992   Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
993   if (mayBeConstantTripCount.hasValue() &&
994       mayBeConstantTripCount.getValue() < unrollFactor)
995     return loopUnrollByFactor(forOp, mayBeConstantTripCount.getValue());
996   return loopUnrollByFactor(forOp, unrollFactor);
997 }
998 
999 /// Generates unrolled copies of AffineForOp 'loopBodyBlock', with associated
1000 /// 'forOpIV' by 'unrollFactor', calling 'ivRemapFn' to remap 'forOpIV' for each
1001 /// unrolled body. If specified, annotates the Ops in each unrolled iteration
1002 /// using annotateFn.
1003 static void generateUnrolledLoop(
1004     Block *loopBodyBlock, Value forOpIV, uint64_t unrollFactor,
1005     function_ref<Value(unsigned, Value, OpBuilder)> ivRemapFn,
1006     function_ref<void(unsigned, Operation *, OpBuilder)> annotateFn,
1007     ValueRange iterArgs, ValueRange yieldedValues) {
1008   // Builder to insert unrolled bodies just before the terminator of the body of
1009   // 'forOp'.
1010   auto builder = OpBuilder::atBlockTerminator(loopBodyBlock);
1011 
1012   if (!annotateFn)
1013     annotateFn = [](unsigned, Operation *, OpBuilder) {};
1014 
1015   // Keep a pointer to the last non-terminator operation in the original block
1016   // so that we know what to clone (since we are doing this in-place).
1017   Block::iterator srcBlockEnd = std::prev(loopBodyBlock->end(), 2);
1018 
1019   // Unroll the contents of 'forOp' (append unrollFactor - 1 additional copies).
1020   SmallVector<Value, 4> lastYielded(yieldedValues);
1021 
1022   for (unsigned i = 1; i < unrollFactor; i++) {
1023     BlockAndValueMapping operandMap;
1024 
1025     // Prepare operand map.
1026     operandMap.map(iterArgs, lastYielded);
1027 
1028     // If the induction variable is used, create a remapping to the value for
1029     // this unrolled instance.
1030     if (!forOpIV.use_empty()) {
1031       Value ivUnroll = ivRemapFn(i, forOpIV, builder);
1032       operandMap.map(forOpIV, ivUnroll);
1033     }
1034 
1035     // Clone the original body of 'forOp'.
1036     for (auto it = loopBodyBlock->begin(); it != std::next(srcBlockEnd); it++) {
1037       Operation *clonedOp = builder.clone(*it, operandMap);
1038       annotateFn(i, clonedOp, builder);
1039     }
1040 
1041     // Update yielded values.
1042     for (unsigned i = 0, e = lastYielded.size(); i < e; i++)
1043       lastYielded[i] = operandMap.lookup(yieldedValues[i]);
1044   }
1045 
1046   // Make sure we annotate the Ops in the original body. We do this last so that
1047   // any annotations are not copied into the cloned Ops above.
1048   for (auto it = loopBodyBlock->begin(); it != std::next(srcBlockEnd); it++)
1049     annotateFn(0, &*it, builder);
1050 
1051   // Update operands of the yield statement.
1052   loopBodyBlock->getTerminator()->setOperands(lastYielded);
1053 }
1054 
1055 /// Helper to generate cleanup loop for unroll or unroll-and-jam when the trip
1056 /// count is not a multiple of `unrollFactor`.
1057 static LogicalResult generateCleanupLoopForUnroll(AffineForOp forOp,
1058                                                   uint64_t unrollFactor) {
1059   // Insert the cleanup loop right after 'forOp'.
1060   OpBuilder builder(forOp->getBlock(), std::next(Block::iterator(forOp)));
1061   auto cleanupForOp = cast<AffineForOp>(builder.clone(*forOp));
1062 
1063   // Update uses of `forOp` results. `cleanupForOp` should use `forOp` result
1064   // and produce results for the original users of `forOp` results.
1065   auto results = forOp.getResults();
1066   auto cleanupResults = cleanupForOp.getResults();
1067   auto cleanupIterOperands = cleanupForOp.getIterOperands();
1068 
1069   for (auto e : llvm::zip(results, cleanupResults, cleanupIterOperands)) {
1070     std::get<0>(e).replaceAllUsesWith(std::get<1>(e));
1071     cleanupForOp->replaceUsesOfWith(std::get<2>(e), std::get<0>(e));
1072   }
1073 
1074   AffineMap cleanupMap;
1075   SmallVector<Value, 4> cleanupOperands;
1076   getCleanupLoopLowerBound(forOp, unrollFactor, cleanupMap, cleanupOperands);
1077   if (!cleanupMap)
1078     return failure();
1079 
1080   cleanupForOp.setLowerBound(cleanupOperands, cleanupMap);
1081   // Promote the loop body up if this has turned into a single iteration loop.
1082   (void)promoteIfSingleIteration(cleanupForOp);
1083 
1084   // Adjust upper bound of the original loop; this is the same as the lower
1085   // bound of the cleanup loop.
1086   forOp.setUpperBound(cleanupOperands, cleanupMap);
1087   return success();
1088 }
1089 
1090 /// Unrolls this loop by the specified factor. Returns success if the loop
1091 /// is successfully unrolled.
1092 LogicalResult mlir::loopUnrollByFactor(
1093     AffineForOp forOp, uint64_t unrollFactor,
1094     function_ref<void(unsigned, Operation *, OpBuilder)> annotateFn) {
1095   assert(unrollFactor > 0 && "unroll factor should be positive");
1096 
1097   Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
1098   if (unrollFactor == 1) {
1099     if (mayBeConstantTripCount.hasValue() &&
1100         mayBeConstantTripCount.getValue() == 1 &&
1101         failed(promoteIfSingleIteration(forOp)))
1102       return failure();
1103     return success();
1104   }
1105 
1106   // Nothing in the loop body other than the terminator.
1107   if (llvm::hasSingleElement(forOp.getBody()->getOperations()))
1108     return success();
1109 
1110   // If the trip count is lower than the unroll factor, no unrolled body.
1111   // TODO: option to specify cleanup loop unrolling.
1112   if (mayBeConstantTripCount.hasValue() &&
1113       mayBeConstantTripCount.getValue() < unrollFactor)
1114     return failure();
1115 
1116   // Generate the cleanup loop if trip count isn't a multiple of unrollFactor.
1117   if (getLargestDivisorOfTripCount(forOp) % unrollFactor != 0) {
1118     // Loops where the lower bound is a max expression or the upper bound is
1119     // a min expression and the trip count doesn't divide the unroll factor
1120     // can't be unrolled since the lower bound of the cleanup loop in such cases
1121     // cannot be expressed as an affine function or a max over affine functions.
1122     if (forOp.getLowerBoundMap().getNumResults() != 1 ||
1123         forOp.getUpperBoundMap().getNumResults() != 1)
1124       return failure();
1125     if (failed(generateCleanupLoopForUnroll(forOp, unrollFactor)))
1126       assert(false && "cleanup loop lower bound map for single result lower "
1127                       "and upper bound maps can always be determined");
1128   }
1129 
1130   ValueRange iterArgs(forOp.getRegionIterArgs());
1131   auto yieldedValues = forOp.getBody()->getTerminator()->getOperands();
1132 
1133   // Scale the step of loop being unrolled by unroll factor.
1134   int64_t step = forOp.getStep();
1135   forOp.setStep(step * unrollFactor);
1136   generateUnrolledLoop(
1137       forOp.getBody(), forOp.getInductionVar(), unrollFactor,
1138       [&](unsigned i, Value iv, OpBuilder b) {
1139         // iv' = iv + i * step
1140         auto d0 = b.getAffineDimExpr(0);
1141         auto bumpMap = AffineMap::get(1, 0, d0 + i * step);
1142         return b.create<AffineApplyOp>(forOp.getLoc(), bumpMap, iv);
1143       },
1144       /*annotateFn=*/annotateFn,
1145       /*iterArgs=*/iterArgs, /*yieldedValues=*/yieldedValues);
1146 
1147   // Promote the loop body up if this has turned into a single iteration loop.
1148   (void)promoteIfSingleIteration(forOp);
1149   return success();
1150 }
1151 
1152 LogicalResult mlir::loopUnrollJamUpToFactor(AffineForOp forOp,
1153                                             uint64_t unrollJamFactor) {
1154   Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
1155   if (mayBeConstantTripCount.hasValue() &&
1156       mayBeConstantTripCount.getValue() < unrollJamFactor)
1157     return loopUnrollJamByFactor(forOp, mayBeConstantTripCount.getValue());
1158   return loopUnrollJamByFactor(forOp, unrollJamFactor);
1159 }
1160 
1161 /// Check if all control operands of all loops are defined outside of `forOp`
1162 /// and return false if not.
1163 static bool areInnerBoundsInvariant(AffineForOp forOp) {
1164   auto walkResult = forOp.walk([&](AffineForOp aForOp) {
1165     for (auto controlOperand : aForOp.getControlOperands()) {
1166       if (!forOp.isDefinedOutsideOfLoop(controlOperand))
1167         return WalkResult::interrupt();
1168     }
1169     return WalkResult::advance();
1170   });
1171   return !walkResult.wasInterrupted();
1172 }
1173 
1174 // Gathers all maximal sub-blocks of operations that do not themselves
1175 // include a for op (a operation could have a descendant for op though
1176 // in its tree).  Ignore the block terminators.
1177 struct JamBlockGatherer {
1178   // Store iterators to the first and last op of each sub-block found.
1179   std::vector<std::pair<Block::iterator, Block::iterator>> subBlocks;
1180 
1181   // This is a linear time walk.
1182   void walk(Operation *op) {
1183     for (auto &region : op->getRegions())
1184       for (auto &block : region)
1185         walk(block);
1186   }
1187 
1188   void walk(Block &block) {
1189     for (auto it = block.begin(), e = std::prev(block.end()); it != e;) {
1190       auto subBlockStart = it;
1191       while (it != e && !isa<AffineForOp>(&*it))
1192         ++it;
1193       if (it != subBlockStart)
1194         subBlocks.emplace_back(subBlockStart, std::prev(it));
1195       // Process all for ops that appear next.
1196       while (it != e && isa<AffineForOp>(&*it))
1197         walk(&*it++);
1198     }
1199   }
1200 };
1201 
1202 /// Unrolls and jams this loop by the specified factor.
1203 LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
1204                                           uint64_t unrollJamFactor) {
1205   assert(unrollJamFactor > 0 && "unroll jam factor should be positive");
1206 
1207   Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
1208   if (unrollJamFactor == 1) {
1209     if (mayBeConstantTripCount.hasValue() &&
1210         mayBeConstantTripCount.getValue() == 1 &&
1211         failed(promoteIfSingleIteration(forOp)))
1212       return failure();
1213     return success();
1214   }
1215 
1216   // Nothing in the loop body other than the terminator.
1217   if (llvm::hasSingleElement(forOp.getBody()->getOperations()))
1218     return success();
1219 
1220   // If the trip count is lower than the unroll jam factor, no unroll jam.
1221   if (mayBeConstantTripCount.hasValue() &&
1222       mayBeConstantTripCount.getValue() < unrollJamFactor) {
1223     LLVM_DEBUG(llvm::dbgs() << "[failed] trip count < unroll-jam factor\n");
1224     return failure();
1225   }
1226 
1227   // If any control operand of any inner loop of `forOp` is defined within
1228   // `forOp`, no unroll jam.
1229   if (!areInnerBoundsInvariant(forOp))
1230     return failure();
1231 
1232   // Gather all sub-blocks to jam upon the loop being unrolled.
1233   JamBlockGatherer jbg;
1234   jbg.walk(forOp);
1235   auto &subBlocks = jbg.subBlocks;
1236 
1237   // Collect loops with iter_args.
1238   SmallVector<AffineForOp, 4> loopsWithIterArgs;
1239   forOp.walk([&](AffineForOp aForOp) {
1240     if (aForOp.getNumIterOperands() > 0)
1241       loopsWithIterArgs.push_back(aForOp);
1242   });
1243 
1244   // Get supported reductions to be used for creating reduction ops at the end.
1245   SmallVector<LoopReduction> reductions;
1246   if (forOp.getNumIterOperands() > 0)
1247     getSupportedReductions(forOp, reductions);
1248 
1249   // Generate the cleanup loop if trip count isn't a multiple of
1250   // unrollJamFactor.
1251   if (getLargestDivisorOfTripCount(forOp) % unrollJamFactor != 0) {
1252     // Loops where the lower bound is a max expression or the upper bound is
1253     // a min expression and the trip count doesn't divide the unroll factor
1254     // can't be unrolled since the lower bound of the cleanup loop in such cases
1255     // cannot be expressed as an affine function or a max over affine functions.
1256     if (forOp.getLowerBoundMap().getNumResults() != 1 ||
1257         forOp.getUpperBoundMap().getNumResults() != 1)
1258       return failure();
1259     if (failed(generateCleanupLoopForUnroll(forOp, unrollJamFactor)))
1260       assert(false && "cleanup loop lower bound map for single result lower "
1261                       "and upper bound maps can always be determined");
1262   }
1263 
1264   // `operandMaps[i - 1]` carries old->new operand mapping for the ith unrolled
1265   // iteration. There are (`unrollJamFactor` - 1) iterations.
1266   SmallVector<BlockAndValueMapping, 4> operandMaps(unrollJamFactor - 1);
1267 
1268   // For any loop with iter_args, replace it with a new loop that has
1269   // `unrollJamFactor` copies of its iterOperands, iter_args and yield
1270   // operands.
1271   SmallVector<AffineForOp, 4> newLoopsWithIterArgs;
1272   OpBuilder builder(forOp.getContext());
1273   for (AffineForOp oldForOp : loopsWithIterArgs) {
1274     SmallVector<Value, 4> dupIterOperands, dupIterArgs, dupYieldOperands;
1275     ValueRange oldIterOperands = oldForOp.getIterOperands();
1276     ValueRange oldIterArgs = oldForOp.getRegionIterArgs();
1277     ValueRange oldYieldOperands =
1278         cast<AffineYieldOp>(oldForOp.getBody()->getTerminator()).getOperands();
1279     // Get additional iterOperands, iterArgs, and yield operands. We will
1280     // fix iterOperands and yield operands after cloning of sub-blocks.
1281     for (unsigned i = unrollJamFactor - 1; i >= 1; --i) {
1282       dupIterOperands.append(oldIterOperands.begin(), oldIterOperands.end());
1283       dupIterArgs.append(oldIterArgs.begin(), oldIterArgs.end());
1284       dupYieldOperands.append(oldYieldOperands.begin(), oldYieldOperands.end());
1285     }
1286     // Create a new loop with additional iterOperands, iter_args and yield
1287     // operands. This new loop will take the loop body of the original loop.
1288     AffineForOp newForOp = mlir::replaceForOpWithNewYields(
1289         builder, oldForOp, dupIterOperands, dupYieldOperands, dupIterArgs);
1290     newLoopsWithIterArgs.push_back(newForOp);
1291     // `forOp` has been replaced with a new loop.
1292     if (oldForOp == forOp)
1293       forOp = newForOp;
1294     assert(oldForOp.use_empty() && "old for op should not have any user");
1295     oldForOp.erase();
1296     // Update `operandMaps` for `newForOp` iterArgs and results.
1297     ValueRange newIterArgs = newForOp.getRegionIterArgs();
1298     unsigned oldNumIterArgs = oldIterArgs.size();
1299     ValueRange newResults = newForOp.getResults();
1300     unsigned oldNumResults = newResults.size() / unrollJamFactor;
1301     assert(oldNumIterArgs == oldNumResults &&
1302            "oldNumIterArgs must be the same as oldNumResults");
1303     for (unsigned i = unrollJamFactor - 1; i >= 1; --i) {
1304       for (unsigned j = 0; j < oldNumIterArgs; ++j) {
1305         // `newForOp` has `unrollJamFactor` - 1 new sets of iterArgs and
1306         // results. Update `operandMaps[i - 1]` to map old iterArgs and results
1307         // to those in the `i`th new set.
1308         operandMaps[i - 1].map(newIterArgs[j],
1309                                newIterArgs[i * oldNumIterArgs + j]);
1310         operandMaps[i - 1].map(newResults[j],
1311                                newResults[i * oldNumResults + j]);
1312       }
1313     }
1314   }
1315 
1316   // Scale the step of loop being unroll-jammed by the unroll-jam factor.
1317   int64_t step = forOp.getStep();
1318   forOp.setStep(step * unrollJamFactor);
1319 
1320   auto forOpIV = forOp.getInductionVar();
1321   // Unroll and jam (appends unrollJamFactor - 1 additional copies).
1322   for (unsigned i = unrollJamFactor - 1; i >= 1; --i) {
1323     for (auto &subBlock : subBlocks) {
1324       // Builder to insert unroll-jammed bodies. Insert right at the end of
1325       // sub-block.
1326       OpBuilder builder(subBlock.first->getBlock(), std::next(subBlock.second));
1327 
1328       // If the induction variable is used, create a remapping to the value for
1329       // this unrolled instance.
1330       if (!forOpIV.use_empty()) {
1331         // iv' = iv + i * step, i = 1 to unrollJamFactor-1.
1332         auto d0 = builder.getAffineDimExpr(0);
1333         auto bumpMap = AffineMap::get(1, 0, d0 + i * step);
1334         auto ivUnroll =
1335             builder.create<AffineApplyOp>(forOp.getLoc(), bumpMap, forOpIV);
1336         operandMaps[i - 1].map(forOpIV, ivUnroll);
1337       }
1338       // Clone the sub-block being unroll-jammed.
1339       for (auto it = subBlock.first; it != std::next(subBlock.second); ++it)
1340         builder.clone(*it, operandMaps[i - 1]);
1341     }
1342     // Fix iterOperands and yield op operands of newly created loops.
1343     for (auto newForOp : newLoopsWithIterArgs) {
1344       unsigned oldNumIterOperands =
1345           newForOp.getNumIterOperands() / unrollJamFactor;
1346       unsigned numControlOperands = newForOp.getNumControlOperands();
1347       auto yieldOp = cast<AffineYieldOp>(newForOp.getBody()->getTerminator());
1348       unsigned oldNumYieldOperands = yieldOp.getNumOperands() / unrollJamFactor;
1349       assert(oldNumIterOperands == oldNumYieldOperands &&
1350              "oldNumIterOperands must be the same as oldNumYieldOperands");
1351       for (unsigned j = 0; j < oldNumIterOperands; ++j) {
1352         // The `i`th duplication of an old iterOperand or yield op operand
1353         // needs to be replaced with a mapped value from `operandMaps[i - 1]`
1354         // if such mapped value exists.
1355         newForOp.setOperand(numControlOperands + i * oldNumIterOperands + j,
1356                             operandMaps[i - 1].lookupOrDefault(
1357                                 newForOp.getOperand(numControlOperands + j)));
1358         yieldOp.setOperand(
1359             i * oldNumYieldOperands + j,
1360             operandMaps[i - 1].lookupOrDefault(yieldOp.getOperand(j)));
1361       }
1362     }
1363   }
1364   if (forOp.getNumResults() > 0) {
1365     // Create reduction ops to combine every `unrollJamFactor` related results
1366     // into one value. For example, for %0:2 = affine.for ... and addf, we add
1367     // %1 = arith.addf %0#0, %0#1, and replace the following uses of %0#0 with
1368     // %1.
1369     builder.setInsertionPointAfter(forOp);
1370     auto loc = forOp.getLoc();
1371     unsigned oldNumResults = forOp.getNumResults() / unrollJamFactor;
1372     for (LoopReduction &reduction : reductions) {
1373       unsigned pos = reduction.iterArgPosition;
1374       Value lhs = forOp.getResult(pos);
1375       Value rhs;
1376       SmallPtrSet<Operation *, 4> newOps;
1377       for (unsigned i = unrollJamFactor - 1; i >= 1; --i) {
1378         rhs = forOp.getResult(i * oldNumResults + pos);
1379         // Create ops based on reduction type.
1380         lhs = arith::getReductionOp(reduction.kind, builder, loc, lhs, rhs);
1381         if (!lhs)
1382           return failure();
1383         Operation *op = lhs.getDefiningOp();
1384         assert(op && "Reduction op should have been created");
1385         newOps.insert(op);
1386       }
1387       // Replace all uses except those in newly created reduction ops.
1388       forOp.getResult(pos).replaceAllUsesExcept(lhs, newOps);
1389     }
1390   }
1391 
1392   // Promote the loop body up if this has turned into a single iteration loop.
1393   (void)promoteIfSingleIteration(forOp);
1394   return success();
1395 }
1396 
1397 /// Performs loop interchange on 'forOpA' and 'forOpB', where 'forOpB' is
1398 /// nested within 'forOpA' as the only non-terminator operation in its block.
1399 void mlir::interchangeLoops(AffineForOp forOpA, AffineForOp forOpB) {
1400   assert(&*forOpA.getBody()->begin() == forOpB.getOperation());
1401   auto &forOpABody = forOpA.getBody()->getOperations();
1402   auto &forOpBBody = forOpB.getBody()->getOperations();
1403 
1404   // 1) Splice forOpA's non-terminator operations (which is just forOpB) just
1405   // before forOpA (in ForOpA's parent's block) this should leave 'forOpA's
1406   // body containing only the terminator.
1407   forOpA->getBlock()->getOperations().splice(Block::iterator(forOpA),
1408                                              forOpABody, forOpABody.begin(),
1409                                              std::prev(forOpABody.end()));
1410   // 2) Splice forOpB's non-terminator operations into the beginning of forOpA's
1411   // body (this leaves forOpB's body containing only the terminator).
1412   forOpABody.splice(forOpABody.begin(), forOpBBody, forOpBBody.begin(),
1413                     std::prev(forOpBBody.end()));
1414   // 3) Splice forOpA into the beginning of forOpB's body.
1415   forOpBBody.splice(forOpBBody.begin(), forOpA->getBlock()->getOperations(),
1416                     Block::iterator(forOpA));
1417 }
1418 
1419 // Checks each dependence component against the permutation to see if the
1420 // desired loop interchange would violate dependences by making the
1421 // dependence component lexicographically negative.
1422 static bool checkLoopInterchangeDependences(
1423     const std::vector<SmallVector<DependenceComponent, 2>> &depCompsVec,
1424     ArrayRef<AffineForOp> loops, ArrayRef<unsigned> loopPermMap) {
1425   // Invert permutation map.
1426   unsigned maxLoopDepth = loops.size();
1427   SmallVector<unsigned, 4> loopPermMapInv;
1428   loopPermMapInv.resize(maxLoopDepth);
1429   for (unsigned i = 0; i < maxLoopDepth; ++i)
1430     loopPermMapInv[loopPermMap[i]] = i;
1431 
1432   // Check each dependence component against the permutation to see if the
1433   // desired loop interchange permutation would make the dependence vectors
1434   // lexicographically negative.
1435   // Example 1: [-1, 1][0, 0]
1436   // Example 2: [0, 0][-1, 1]
1437   for (const auto &depComps : depCompsVec) {
1438     assert(depComps.size() >= maxLoopDepth);
1439     // Check if the first non-zero dependence component is positive.
1440     // This iterates through loops in the desired order.
1441     for (unsigned j = 0; j < maxLoopDepth; ++j) {
1442       unsigned permIndex = loopPermMapInv[j];
1443       assert(depComps[permIndex].lb.hasValue());
1444       int64_t depCompLb = depComps[permIndex].lb.getValue();
1445       if (depCompLb > 0)
1446         break;
1447       if (depCompLb < 0)
1448         return false;
1449     }
1450   }
1451   return true;
1452 }
1453 
1454 /// Checks if the loop interchange permutation 'loopPermMap' of the perfectly
1455 /// nested sequence of loops in 'loops' would violate dependences.
1456 bool mlir::isValidLoopInterchangePermutation(ArrayRef<AffineForOp> loops,
1457                                              ArrayRef<unsigned> loopPermMap) {
1458   // Gather dependence components for dependences between all ops in loop nest
1459   // rooted at 'loops[0]', at loop depths in range [1, maxLoopDepth].
1460   assert(loopPermMap.size() == loops.size());
1461   unsigned maxLoopDepth = loops.size();
1462   std::vector<SmallVector<DependenceComponent, 2>> depCompsVec;
1463   getDependenceComponents(loops[0], maxLoopDepth, &depCompsVec);
1464   return checkLoopInterchangeDependences(depCompsVec, loops, loopPermMap);
1465 }
1466 
1467 /// Returns true if `loops` is a perfectly nested loop nest, where loops appear
1468 /// in it from outermost to innermost.
1469 bool LLVM_ATTRIBUTE_UNUSED
1470 mlir::isPerfectlyNested(ArrayRef<AffineForOp> loops) {
1471   assert(!loops.empty() && "no loops provided");
1472 
1473   // We already know that the block can't be empty.
1474   auto hasTwoElements = [](Block *block) {
1475     auto secondOpIt = std::next(block->begin());
1476     return secondOpIt != block->end() && &*secondOpIt == &block->back();
1477   };
1478 
1479   auto enclosingLoop = loops.front();
1480   for (auto loop : loops.drop_front()) {
1481     auto parentForOp = dyn_cast<AffineForOp>(loop->getParentOp());
1482     // parentForOp's body should be just this loop and the terminator.
1483     if (parentForOp != enclosingLoop || !hasTwoElements(parentForOp.getBody()))
1484       return false;
1485     enclosingLoop = loop;
1486   }
1487   return true;
1488 }
1489 
1490 // input[i] should move from position i -> permMap[i]. Returns the position in
1491 // `input` that becomes the new outermost loop.
1492 unsigned mlir::permuteLoops(MutableArrayRef<AffineForOp> input,
1493                             ArrayRef<unsigned> permMap) {
1494   assert(input.size() == permMap.size() && "invalid permutation map size");
1495   // Check whether the permutation spec is valid. This is a small vector - we'll
1496   // just sort and check if it's iota.
1497   SmallVector<unsigned, 4> checkPermMap(permMap.begin(), permMap.end());
1498   llvm::sort(checkPermMap);
1499   if (llvm::any_of(llvm::enumerate(checkPermMap),
1500                    [](const auto &en) { return en.value() != en.index(); }))
1501     assert(false && "invalid permutation map");
1502 
1503   // Nothing to do.
1504   if (input.size() < 2)
1505     return 0;
1506 
1507   assert(isPerfectlyNested(input) && "input not perfectly nested");
1508 
1509   // Compute the inverse mapping, invPermMap: since input[i] goes to position
1510   // permMap[i], position i of the permuted nest is at input[invPermMap[i]].
1511   SmallVector<std::pair<unsigned, unsigned>, 4> invPermMap;
1512   for (unsigned i = 0, e = input.size(); i < e; ++i)
1513     invPermMap.push_back({permMap[i], i});
1514   llvm::sort(invPermMap);
1515 
1516   // Move the innermost loop body to the loop that would be the innermost in the
1517   // permuted nest (only if the innermost loop is going to change).
1518   if (permMap.back() != input.size() - 1) {
1519     auto *destBody = input[invPermMap.back().second].getBody();
1520     auto *srcBody = input.back().getBody();
1521     destBody->getOperations().splice(destBody->begin(),
1522                                      srcBody->getOperations(), srcBody->begin(),
1523                                      std::prev(srcBody->end()));
1524   }
1525 
1526   // We'll move each loop in `input` in the reverse order so that its body is
1527   // empty when we are moving it; this incurs zero copies and no erasing.
1528   for (int i = input.size() - 1; i >= 0; --i) {
1529     // If this has to become the outermost loop after permutation, add it to the
1530     // parent block of the original root.
1531     if (permMap[i] == 0) {
1532       // If the root remains the same, nothing to do.
1533       if (i == 0)
1534         continue;
1535       // Make input[i] the new outermost loop moving it into parentBlock.
1536       auto *parentBlock = input[0]->getBlock();
1537       parentBlock->getOperations().splice(Block::iterator(input[0]),
1538                                           input[i]->getBlock()->getOperations(),
1539                                           Block::iterator(input[i]));
1540       continue;
1541     }
1542 
1543     // If the parent in the permuted order is the same as in the original,
1544     // nothing to do.
1545     unsigned parentPosInInput = invPermMap[permMap[i] - 1].second;
1546     if (i > 0 && static_cast<unsigned>(i - 1) == parentPosInInput)
1547       continue;
1548 
1549     // Move input[i] to its surrounding loop in the transformed nest.
1550     auto *destBody = input[parentPosInInput].getBody();
1551     destBody->getOperations().splice(destBody->begin(),
1552                                      input[i]->getBlock()->getOperations(),
1553                                      Block::iterator(input[i]));
1554   }
1555 
1556   return invPermMap[0].second;
1557 }
1558 
1559 // Sinks all sequential loops to the innermost levels (while preserving
1560 // relative order among them) and moves all parallel loops to the
1561 // outermost (while again preserving relative order among them).
1562 AffineForOp mlir::sinkSequentialLoops(AffineForOp forOp) {
1563   SmallVector<AffineForOp, 4> loops;
1564   getPerfectlyNestedLoops(loops, forOp);
1565   if (loops.size() < 2)
1566     return forOp;
1567 
1568   // Gather dependence components for dependences between all ops in loop nest
1569   // rooted at 'loops[0]', at loop depths in range [1, maxLoopDepth].
1570   unsigned maxLoopDepth = loops.size();
1571   std::vector<SmallVector<DependenceComponent, 2>> depCompsVec;
1572   getDependenceComponents(loops[0], maxLoopDepth, &depCompsVec);
1573 
1574   // Mark loops as either parallel or sequential.
1575   SmallVector<bool, 8> isParallelLoop(maxLoopDepth, true);
1576   for (auto &depComps : depCompsVec) {
1577     assert(depComps.size() >= maxLoopDepth);
1578     for (unsigned j = 0; j < maxLoopDepth; ++j) {
1579       DependenceComponent &depComp = depComps[j];
1580       assert(depComp.lb.hasValue() && depComp.ub.hasValue());
1581       if (depComp.lb.getValue() != 0 || depComp.ub.getValue() != 0)
1582         isParallelLoop[j] = false;
1583     }
1584   }
1585 
1586   // Count the number of parallel loops.
1587   unsigned numParallelLoops = 0;
1588   for (unsigned i = 0, e = isParallelLoop.size(); i < e; ++i)
1589     if (isParallelLoop[i])
1590       ++numParallelLoops;
1591 
1592   // Compute permutation of loops that sinks sequential loops (and thus raises
1593   // parallel loops) while preserving relative order.
1594   SmallVector<unsigned, 4> loopPermMap(maxLoopDepth);
1595   unsigned nextSequentialLoop = numParallelLoops;
1596   unsigned nextParallelLoop = 0;
1597   for (unsigned i = 0; i < maxLoopDepth; ++i) {
1598     if (isParallelLoop[i]) {
1599       loopPermMap[i] = nextParallelLoop++;
1600     } else {
1601       loopPermMap[i] = nextSequentialLoop++;
1602     }
1603   }
1604 
1605   // Check if permutation 'loopPermMap' would violate dependences.
1606   if (!checkLoopInterchangeDependences(depCompsVec, loops, loopPermMap))
1607     return forOp;
1608   // Perform loop interchange according to permutation 'loopPermMap'.
1609   unsigned loopNestRootIndex = permuteLoops(loops, loopPermMap);
1610   return loops[loopNestRootIndex];
1611 }
1612 
1613 // Factors out common behavior to add a new `iv` (resp. `iv` + `offset`) to the
1614 // lower (resp. upper) loop bound. When called for both the lower and upper
1615 // bounds, the resulting IR resembles:
1616 //
1617 // ```mlir
1618 //    affine.for %i = max (`iv, ...) to min (`iv` + `offset`) {
1619 //      ...
1620 //    }
1621 // ```
1622 static void augmentMapAndBounds(OpBuilder &b, Value iv, AffineMap *map,
1623                                 SmallVector<Value, 4> *operands,
1624                                 int64_t offset = 0) {
1625   auto bounds = llvm::to_vector<4>(map->getResults());
1626   bounds.push_back(b.getAffineDimExpr(map->getNumDims()) + offset);
1627   operands->insert(operands->begin() + map->getNumDims(), iv);
1628   *map = AffineMap::get(map->getNumDims() + 1, map->getNumSymbols(), bounds,
1629                         b.getContext());
1630   canonicalizeMapAndOperands(map, operands);
1631 }
1632 
1633 // Stripmines `forOp` by `factor` and sinks it under each of the `targets`.
1634 // Stripmine-sink is a primitive building block for generalized tiling of
1635 // imperfectly nested loops.
1636 // This transformation is purely mechanical and does not check legality,
1637 // profitability or even structural correctness. It is the user's
1638 // responsibility to specify `targets` that are dominated by `forOp`.
1639 // Returns the new AffineForOps, one per `targets`, nested immediately under
1640 // each of the `targets`.
1641 static SmallVector<AffineForOp, 8>
1642 stripmineSink(AffineForOp forOp, uint64_t factor,
1643               ArrayRef<AffineForOp> targets) {
1644   auto originalStep = forOp.getStep();
1645   auto scaledStep = originalStep * factor;
1646   forOp.setStep(scaledStep);
1647 
1648   OpBuilder b(forOp->getBlock(), std::next(Block::iterator(forOp)));
1649 
1650   // Lower-bound map creation.
1651   auto lbMap = forOp.getLowerBoundMap();
1652   SmallVector<Value, 4> lbOperands(forOp.getLowerBoundOperands());
1653   augmentMapAndBounds(b, forOp.getInductionVar(), &lbMap, &lbOperands);
1654 
1655   // Upper-bound map creation.
1656   auto ubMap = forOp.getUpperBoundMap();
1657   SmallVector<Value, 4> ubOperands(forOp.getUpperBoundOperands());
1658   augmentMapAndBounds(b, forOp.getInductionVar(), &ubMap, &ubOperands,
1659                       /*offset=*/scaledStep);
1660 
1661   auto iv = forOp.getInductionVar();
1662   SmallVector<AffineForOp, 8> innerLoops;
1663   for (auto t : targets) {
1664     // Insert newForOp before the terminator of `t`.
1665     auto b = OpBuilder::atBlockTerminator(t.getBody());
1666     auto newForOp = b.create<AffineForOp>(t.getLoc(), lbOperands, lbMap,
1667                                           ubOperands, ubMap, originalStep);
1668     auto begin = t.getBody()->begin();
1669     // Skip terminator and `newForOp` which is just before the terminator.
1670     auto nOps = t.getBody()->getOperations().size() - 2;
1671     newForOp.getBody()->getOperations().splice(
1672         newForOp.getBody()->getOperations().begin(),
1673         t.getBody()->getOperations(), begin, std::next(begin, nOps));
1674     replaceAllUsesInRegionWith(iv, newForOp.getInductionVar(),
1675                                newForOp.region());
1676     innerLoops.push_back(newForOp);
1677   }
1678 
1679   return innerLoops;
1680 }
1681 
1682 // Stripmines a `forOp` by `factor` and sinks it under a single `target`.
1683 // Returns the new AffineForOps, nested immediately under `target`.
1684 template <typename SizeType>
1685 static AffineForOp stripmineSink(AffineForOp forOp, SizeType factor,
1686                                  AffineForOp target) {
1687   // TODO: Use cheap structural assertions that targets are nested under
1688   // forOp and that targets are not nested under each other when DominanceInfo
1689   // exposes the capability. It seems overkill to construct a whole function
1690   // dominance tree at this point.
1691   auto res = stripmineSink(forOp, factor, ArrayRef<AffineForOp>(target));
1692   assert(res.size() == 1 && "Expected 1 inner forOp");
1693   return res[0];
1694 }
1695 
1696 SmallVector<SmallVector<AffineForOp, 8>, 8>
1697 mlir::tile(ArrayRef<AffineForOp> forOps, ArrayRef<uint64_t> sizes,
1698            ArrayRef<AffineForOp> targets) {
1699   SmallVector<SmallVector<AffineForOp, 8>, 8> res;
1700   SmallVector<AffineForOp, 8> currentTargets(targets.begin(), targets.end());
1701   for (auto it : llvm::zip(forOps, sizes)) {
1702     auto step = stripmineSink(std::get<0>(it), std::get<1>(it), currentTargets);
1703     res.push_back(step);
1704     currentTargets = step;
1705   }
1706   return res;
1707 }
1708 
1709 SmallVector<AffineForOp, 8> mlir::tile(ArrayRef<AffineForOp> forOps,
1710                                        ArrayRef<uint64_t> sizes,
1711                                        AffineForOp target) {
1712   SmallVector<AffineForOp, 8> res;
1713   for (auto loops : tile(forOps, sizes, ArrayRef<AffineForOp>(target))) {
1714     assert(loops.size() == 1);
1715     res.push_back(loops[0]);
1716   }
1717   return res;
1718 }
1719 
1720 LogicalResult mlir::coalesceLoops(MutableArrayRef<AffineForOp> loops) {
1721   if (loops.size() < 2)
1722     return success();
1723 
1724   AffineForOp innermost = loops.back();
1725   AffineForOp outermost = loops.front();
1726   AffineBound ub = outermost.getUpperBound();
1727   AffineMap origUbMap = ub.getMap();
1728   Location loc = outermost.getLoc();
1729   OpBuilder builder(outermost);
1730   for (AffineForOp loop : loops) {
1731     // We only work on normalized loops.
1732     if (loop.getStep() != 1 || !loop.hasConstantLowerBound() ||
1733         loop.getConstantLowerBound() != 0)
1734       return failure();
1735   }
1736   SmallVector<Value, 4> upperBoundSymbols;
1737   SmallVector<Value, 4> ubOperands(ub.getOperands().begin(),
1738                                    ub.getOperands().end());
1739 
1740   // 1. Store the upper bound of the outermost loop in a variable.
1741   Value prev;
1742   if (!llvm::hasSingleElement(origUbMap.getResults()))
1743     prev = builder.create<AffineMinOp>(loc, origUbMap, ubOperands);
1744   else
1745     prev = builder.create<AffineApplyOp>(loc, origUbMap, ubOperands);
1746   upperBoundSymbols.push_back(prev);
1747 
1748   // 2. Emit code computing the upper bound of the coalesced loop as product of
1749   // the number of iterations of all loops.
1750   for (AffineForOp loop : loops.drop_front()) {
1751     ub = loop.getUpperBound();
1752     origUbMap = ub.getMap();
1753     ubOperands = ub.getOperands();
1754     Value upperBound;
1755     // If upper bound map has more than one result, take their minimum.
1756     if (!llvm::hasSingleElement(origUbMap.getResults()))
1757       upperBound = builder.create<AffineMinOp>(loc, origUbMap, ubOperands);
1758     else
1759       upperBound = builder.create<AffineApplyOp>(loc, origUbMap, ubOperands);
1760     upperBoundSymbols.push_back(upperBound);
1761     SmallVector<Value, 4> operands;
1762     operands.push_back(prev);
1763     operands.push_back(upperBound);
1764     // Maintain running product of loop upper bounds.
1765     prev = builder.create<AffineApplyOp>(
1766         loc,
1767         AffineMap::get(/*numDims=*/1,
1768                        /*numSymbols=*/1,
1769                        builder.getAffineDimExpr(0) *
1770                            builder.getAffineSymbolExpr(0)),
1771         operands);
1772   }
1773   // Set upper bound of the coalesced loop.
1774   AffineMap newUbMap = AffineMap::get(
1775       /*numDims=*/0,
1776       /*numSymbols=*/1, builder.getAffineSymbolExpr(0), builder.getContext());
1777   outermost.setUpperBound(prev, newUbMap);
1778 
1779   builder.setInsertionPointToStart(outermost.getBody());
1780 
1781   // 3. Remap induction variables. For each original loop, the value of the
1782   // induction variable can be obtained by dividing the induction variable of
1783   // the linearized loop by the total number of iterations of the loops nested
1784   // in it modulo the number of iterations in this loop (remove the values
1785   // related to the outer loops):
1786   //   iv_i = floordiv(iv_linear, product-of-loop-ranges-until-i) mod range_i.
1787   // Compute these iteratively from the innermost loop by creating a "running
1788   // quotient" of division by the range.
1789   Value previous = outermost.getInductionVar();
1790   for (unsigned idx = loops.size(); idx > 0; --idx) {
1791     if (idx != loops.size()) {
1792       SmallVector<Value, 4> operands;
1793       operands.push_back(previous);
1794       operands.push_back(upperBoundSymbols[idx]);
1795       previous = builder.create<AffineApplyOp>(
1796           loc,
1797           AffineMap::get(
1798               /*numDims=*/1, /*numSymbols=*/1,
1799               builder.getAffineDimExpr(0).floorDiv(
1800                   builder.getAffineSymbolExpr(0))),
1801           operands);
1802     }
1803     // Modified value of the induction variables of the nested loops after
1804     // coalescing.
1805     Value inductionVariable;
1806     if (idx == 1) {
1807       inductionVariable = previous;
1808     } else {
1809       SmallVector<Value, 4> applyOperands;
1810       applyOperands.push_back(previous);
1811       applyOperands.push_back(upperBoundSymbols[idx - 1]);
1812       inductionVariable = builder.create<AffineApplyOp>(
1813           loc,
1814           AffineMap::get(
1815               /*numDims=*/1, /*numSymbols=*/1,
1816               builder.getAffineDimExpr(0) % builder.getAffineSymbolExpr(0)),
1817           applyOperands);
1818     }
1819     replaceAllUsesInRegionWith(loops[idx - 1].getInductionVar(),
1820                                inductionVariable, loops.back().region());
1821   }
1822 
1823   // 4. Move the operations from the innermost just above the second-outermost
1824   // loop, delete the extra terminator and the second-outermost loop.
1825   AffineForOp secondOutermostLoop = loops[1];
1826   innermost.getBody()->back().erase();
1827   outermost.getBody()->getOperations().splice(
1828       Block::iterator(secondOutermostLoop.getOperation()),
1829       innermost.getBody()->getOperations());
1830   secondOutermostLoop.erase();
1831   return success();
1832 }
1833 
1834 void mlir::mapLoopToProcessorIds(scf::ForOp forOp, ArrayRef<Value> processorId,
1835                                  ArrayRef<Value> numProcessors) {
1836   assert(processorId.size() == numProcessors.size());
1837   if (processorId.empty())
1838     return;
1839 
1840   OpBuilder b(forOp);
1841   Location loc(forOp.getLoc());
1842   AffineExpr lhs, rhs;
1843   bindSymbols(forOp.getContext(), lhs, rhs);
1844   auto mulMap = AffineMap::get(0, 2, lhs * rhs);
1845   auto addMap = AffineMap::get(0, 2, lhs + rhs);
1846 
1847   Value linearIndex = processorId.front();
1848   for (unsigned i = 1, e = processorId.size(); i < e; ++i) {
1849     auto mulApplyOp = b.create<AffineApplyOp>(
1850         loc, mulMap, ValueRange{linearIndex, numProcessors[i]});
1851     linearIndex = b.create<AffineApplyOp>(
1852         loc, addMap, ValueRange{mulApplyOp, processorId[i]});
1853   }
1854 
1855   auto mulApplyOp = b.create<AffineApplyOp>(
1856       loc, mulMap, ValueRange{linearIndex, forOp.getStep()});
1857   Value lb = b.create<AffineApplyOp>(
1858       loc, addMap, ValueRange{mulApplyOp, forOp.getLowerBound()});
1859   forOp.setLowerBound(lb);
1860 
1861   Value step = forOp.getStep();
1862   for (auto numProcs : numProcessors)
1863     step = b.create<AffineApplyOp>(loc, mulMap, ValueRange{numProcs, step});
1864   forOp.setStep(step);
1865 }
1866 
1867 /// Given a memref region, determine the lowest depth at which transfers can be
1868 /// placed for it, and return the corresponding block, start and end positions
1869 /// in the block for placing incoming (read) and outgoing (write) copies
1870 /// respectively. The lowest depth depends on whether the region being accessed
1871 /// is hoistable with respect to one or more immediately surrounding loops.
1872 static void
1873 findHighestBlockForPlacement(const MemRefRegion &region, Block &block,
1874                              Block::iterator &begin, Block::iterator &end,
1875                              Block **copyPlacementBlock,
1876                              Block::iterator *copyInPlacementStart,
1877                              Block::iterator *copyOutPlacementStart) {
1878   const auto *cst = region.getConstraints();
1879   SmallVector<Value, 4> symbols;
1880   cst->getValues(cst->getNumDimIds(), cst->getNumDimAndSymbolIds(), &symbols);
1881 
1882   SmallVector<AffineForOp, 4> enclosingFors;
1883   getLoopIVs(*block.begin(), &enclosingFors);
1884   // Walk up loop parents till we find an IV on which this region is
1885   // symbolic/variant.
1886   auto it = enclosingFors.rbegin();
1887   for (auto e = enclosingFors.rend(); it != e; ++it) {
1888     // TODO: also need to be checking this for regions symbols that
1889     // aren't loop IVs, whether we are within their resp. defs' dominance scope.
1890     if (llvm::is_contained(symbols, it->getInductionVar()))
1891       break;
1892   }
1893 
1894   if (it != enclosingFors.rbegin()) {
1895     auto lastInvariantIV = *std::prev(it);
1896     *copyInPlacementStart = Block::iterator(lastInvariantIV.getOperation());
1897     *copyOutPlacementStart = std::next(*copyInPlacementStart);
1898     *copyPlacementBlock = lastInvariantIV->getBlock();
1899   } else {
1900     *copyInPlacementStart = begin;
1901     *copyOutPlacementStart = end;
1902     *copyPlacementBlock = &block;
1903   }
1904 }
1905 
1906 // Info comprising stride and number of elements transferred every stride.
1907 struct StrideInfo {
1908   int64_t stride;
1909   int64_t numEltPerStride;
1910 };
1911 
1912 /// Returns striding information for a copy/transfer of this region with
1913 /// potentially multiple striding levels from outermost to innermost. For an
1914 /// n-dimensional region, there can be at most n-1 levels of striding
1915 /// successively nested.
1916 //  TODO: make this work with non-identity layout maps.
1917 static void getMultiLevelStrides(const MemRefRegion &region,
1918                                  ArrayRef<int64_t> bufferShape,
1919                                  SmallVectorImpl<StrideInfo> *strideInfos) {
1920   if (bufferShape.size() <= 1)
1921     return;
1922 
1923   int64_t numEltPerStride = 1;
1924   int64_t stride = 1;
1925   for (int d = bufferShape.size() - 1; d >= 1; d--) {
1926     int64_t dimSize = region.memref.getType().cast<MemRefType>().getDimSize(d);
1927     stride *= dimSize;
1928     numEltPerStride *= bufferShape[d];
1929     // A stride is needed only if the region has a shorter extent than the
1930     // memref along the dimension *and* has an extent greater than one along the
1931     // next major dimension.
1932     if (bufferShape[d] < dimSize && bufferShape[d - 1] > 1) {
1933       strideInfos->push_back({stride, numEltPerStride});
1934     }
1935   }
1936 }
1937 
1938 /// Generates a point-wise copy from/to `memref' to/from `fastMemRef' and
1939 /// returns the outermost AffineForOp of the copy loop nest. `lbMaps` and
1940 /// `ubMaps` along with `lbOperands` and `ubOperands` hold the lower and upper
1941 /// bound information for the copy loop nest. `fastBufOffsets` contain the
1942 /// expressions to be subtracted out from the respective copy loop iterators in
1943 /// order to index the fast buffer. If `copyOut' is true, generates a copy-out;
1944 /// otherwise a copy-in. Builder `b` should be set to the point the copy nest is
1945 /// inserted.
1946 //
1947 /// The copy-in nest is generated as follows as an example for a 2-d region:
1948 /// for x = ...
1949 ///   for y = ...
1950 ///     fast_buf[x - offset_x][y - offset_y] = memref[x][y]
1951 ///
1952 static AffineForOp
1953 generatePointWiseCopy(Location loc, Value memref, Value fastMemRef,
1954                       ArrayRef<AffineMap> lbMaps, ArrayRef<Value> lbOperands,
1955                       ArrayRef<AffineMap> ubMaps, ArrayRef<Value> ubOperands,
1956                       ArrayRef<AffineExpr> fastBufOffsets, bool isCopyOut,
1957                       OpBuilder b) {
1958   assert(llvm::all_of(lbMaps, [&](AffineMap lbMap) {
1959     return lbMap.getNumInputs() == lbOperands.size();
1960   }));
1961   assert(llvm::all_of(ubMaps, [&](AffineMap ubMap) {
1962     return ubMap.getNumInputs() == ubOperands.size();
1963   }));
1964 
1965   unsigned rank = memref.getType().cast<MemRefType>().getRank();
1966   assert(lbMaps.size() == rank && "wrong number of lb maps");
1967   assert(ubMaps.size() == rank && "wrong number of ub maps");
1968 
1969   SmallVector<Value, 4> memIndices;
1970   SmallVector<AffineExpr, 4> fastBufExprs;
1971   SmallVector<Value, 4> fastBufMapOperands;
1972   AffineForOp copyNestRoot;
1973   SmallVector<AffineApplyOp, 4> mayBeDeadApplys;
1974   for (unsigned d = 0; d < rank; ++d) {
1975     auto forOp = createCanonicalizedAffineForOp(b, loc, lbOperands, lbMaps[d],
1976                                                 ubOperands, ubMaps[d]);
1977     if (d == 0)
1978       copyNestRoot = forOp;
1979 
1980     b = OpBuilder::atBlockTerminator(forOp.getBody());
1981 
1982     auto fastBufOffsetMap =
1983         AffineMap::get(lbOperands.size(), 0, fastBufOffsets[d]);
1984     auto offset = b.create<AffineApplyOp>(loc, fastBufOffsetMap, lbOperands);
1985 
1986     // Construct the subscript for the fast memref being copied into/from:
1987     // x - offset_x.
1988     fastBufExprs.push_back(b.getAffineDimExpr(2 * d + 1) -
1989                            b.getAffineDimExpr(2 * d));
1990     fastBufMapOperands.push_back(offset);
1991     fastBufMapOperands.push_back(forOp.getInductionVar());
1992     mayBeDeadApplys.push_back(offset);
1993 
1994     // Subscript for the slow memref being copied.
1995     memIndices.push_back(forOp.getInductionVar());
1996   }
1997 
1998   auto fastBufMap =
1999       AffineMap::get(2 * rank, /*symbolCount=*/0, fastBufExprs, b.getContext());
2000   fullyComposeAffineMapAndOperands(&fastBufMap, &fastBufMapOperands);
2001   fastBufMap = simplifyAffineMap(fastBufMap);
2002   canonicalizeMapAndOperands(&fastBufMap, &fastBufMapOperands);
2003 
2004   // Drop any dead affine.applys.
2005   for (auto applyOp : mayBeDeadApplys)
2006     if (applyOp.use_empty())
2007       applyOp.erase();
2008 
2009   if (!isCopyOut) {
2010     // Copy in.
2011     auto load = b.create<AffineLoadOp>(loc, memref, memIndices);
2012     b.create<AffineStoreOp>(loc, load, fastMemRef, fastBufMap,
2013                             fastBufMapOperands);
2014     return copyNestRoot;
2015   }
2016 
2017   // Copy out.
2018   auto load =
2019       b.create<AffineLoadOp>(loc, fastMemRef, fastBufMap, fastBufMapOperands);
2020   b.create<AffineStoreOp>(loc, load, memref, memIndices);
2021   return copyNestRoot;
2022 }
2023 
2024 static InFlightDiagnostic LLVM_ATTRIBUTE_UNUSED
2025 emitRemarkForBlock(Block &block) {
2026   return block.getParentOp()->emitRemark();
2027 }
2028 
2029 /// Creates a buffer in the faster memory space for the specified memref region;
2030 /// generates a copy from the lower memory space to this one, and replaces all
2031 /// loads/stores in the block range [`begin', `end') of `block' to load/store
2032 /// from that buffer. Returns failure if copies could not be generated due to
2033 /// yet unimplemented cases. `copyInPlacementStart` and `copyOutPlacementStart`
2034 /// in copyPlacementBlock specify the insertion points where the incoming copies
2035 /// and outgoing copies, respectively, should be inserted (the insertion happens
2036 /// right before the insertion point). Since `begin` can itself be invalidated
2037 /// due to the memref rewriting done from this method, the output argument
2038 /// `nBegin` is set to its replacement (set to `begin` if no invalidation
2039 /// happens). Since outgoing copies could have  been inserted at `end`, the
2040 /// output argument `nEnd` is set to the new end. `sizeInBytes` is set to the
2041 /// size of the fast buffer allocated.
2042 static LogicalResult generateCopy(
2043     const MemRefRegion &region, Block *block, Block::iterator begin,
2044     Block::iterator end, Block *copyPlacementBlock,
2045     Block::iterator copyInPlacementStart, Block::iterator copyOutPlacementStart,
2046     AffineCopyOptions copyOptions, DenseMap<Value, Value> &fastBufferMap,
2047     DenseSet<Operation *> &copyNests, uint64_t *sizeInBytes,
2048     Block::iterator *nBegin, Block::iterator *nEnd) {
2049   *nBegin = begin;
2050   *nEnd = end;
2051 
2052   func::FuncOp f = begin->getParentOfType<func::FuncOp>();
2053   OpBuilder topBuilder(f.getBody());
2054   Value zeroIndex = topBuilder.create<arith::ConstantIndexOp>(f.getLoc(), 0);
2055 
2056   if (begin == end)
2057     return success();
2058 
2059   // Is the copy out point at the end of the block where we are doing
2060   // explicit copying.
2061   bool isCopyOutAtEndOfBlock = (end == copyOutPlacementStart);
2062 
2063   // Copies for read regions are going to be inserted at 'begin'.
2064   OpBuilder prologue(copyPlacementBlock, copyInPlacementStart);
2065   // Copies for write regions are going to be inserted at 'end'.
2066   OpBuilder epilogue(copyPlacementBlock, copyOutPlacementStart);
2067   OpBuilder &b = region.isWrite() ? epilogue : prologue;
2068 
2069   // Builder to create constants at the top level.
2070   auto func = copyPlacementBlock->getParent()->getParentOfType<func::FuncOp>();
2071   OpBuilder top(func.getBody());
2072 
2073   auto loc = region.loc;
2074   auto memref = region.memref;
2075   auto memRefType = memref.getType().cast<MemRefType>();
2076 
2077   if (!memRefType.getLayout().isIdentity()) {
2078     LLVM_DEBUG(llvm::dbgs() << "Non-identity layout map not yet supported\n");
2079     return failure();
2080   }
2081 
2082   // Indices to use for the copying.
2083   // Indices for the original memref being copied from/to.
2084   SmallVector<Value, 4> memIndices;
2085   // Indices for the faster buffer being copied into/from.
2086   SmallVector<Value, 4> bufIndices;
2087 
2088   unsigned rank = memRefType.getRank();
2089   SmallVector<int64_t, 4> fastBufferShape;
2090 
2091   // Compute the extents of the buffer.
2092   std::vector<SmallVector<int64_t, 4>> lbs;
2093   SmallVector<int64_t, 8> lbDivisors;
2094   lbs.reserve(rank);
2095   Optional<int64_t> numElements = region.getConstantBoundingSizeAndShape(
2096       &fastBufferShape, &lbs, &lbDivisors);
2097   if (!numElements.hasValue()) {
2098     LLVM_DEBUG(llvm::dbgs() << "Non-constant region size not supported\n");
2099     return failure();
2100   }
2101 
2102   if (numElements.getValue() == 0) {
2103     LLVM_DEBUG(llvm::dbgs() << "Nothing to copy\n");
2104     *sizeInBytes = 0;
2105     return success();
2106   }
2107 
2108   SmallVector<AffineMap, 4> lbMaps(rank), ubMaps(rank);
2109   for (unsigned i = 0; i < rank; ++i)
2110     region.getLowerAndUpperBound(i, lbMaps[i], ubMaps[i]);
2111 
2112   const FlatAffineValueConstraints *cst = region.getConstraints();
2113   // 'regionSymbols' hold values that this memory region is symbolic/parametric
2114   // on; these typically include loop IVs surrounding the level at which the
2115   // copy generation is being done or other valid symbols in MLIR.
2116   SmallVector<Value, 8> regionSymbols;
2117   cst->getValues(rank, cst->getNumIds(), &regionSymbols);
2118 
2119   // Construct the index expressions for the fast memory buffer. The index
2120   // expression for a particular dimension of the fast buffer is obtained by
2121   // subtracting out the lower bound on the original memref's data region
2122   // along the corresponding dimension.
2123 
2124   // Index start offsets for faster memory buffer relative to the original.
2125   SmallVector<AffineExpr, 4> fastBufOffsets;
2126   fastBufOffsets.reserve(rank);
2127   for (unsigned d = 0; d < rank; d++) {
2128     assert(lbs[d].size() == cst->getNumCols() - rank && "incorrect bound size");
2129 
2130     AffineExpr offset = top.getAffineConstantExpr(0);
2131     for (unsigned j = 0, e = cst->getNumCols() - rank - 1; j < e; j++)
2132       offset = offset + lbs[d][j] * top.getAffineDimExpr(j);
2133     assert(lbDivisors[d] > 0);
2134     offset =
2135         (offset + lbs[d][cst->getNumCols() - 1 - rank]).floorDiv(lbDivisors[d]);
2136 
2137     // Set copy start location for this dimension in the lower memory space
2138     // memref.
2139     if (auto caf = offset.dyn_cast<AffineConstantExpr>()) {
2140       auto indexVal = caf.getValue();
2141       if (indexVal == 0) {
2142         memIndices.push_back(zeroIndex);
2143       } else {
2144         memIndices.push_back(
2145             top.create<arith::ConstantIndexOp>(loc, indexVal).getResult());
2146       }
2147     } else {
2148       // The coordinate for the start location is just the lower bound along the
2149       // corresponding dimension on the memory region (stored in 'offset').
2150       auto map = AffineMap::get(
2151           cst->getNumDimIds() + cst->getNumSymbolIds() - rank, 0, offset);
2152       memIndices.push_back(b.create<AffineApplyOp>(loc, map, regionSymbols));
2153     }
2154     // The fast buffer is copied into at location zero; addressing is relative.
2155     bufIndices.push_back(zeroIndex);
2156 
2157     // Record the offsets since they are needed to remap the memory accesses of
2158     // the original memref further below.
2159     fastBufOffsets.push_back(offset);
2160   }
2161 
2162   // The faster memory space buffer.
2163   Value fastMemRef;
2164 
2165   // Check if a buffer was already created.
2166   bool existingBuf = fastBufferMap.count(memref) > 0;
2167   if (!existingBuf) {
2168     AffineMap fastBufferLayout = b.getMultiDimIdentityMap(rank);
2169     auto fastMemRefType =
2170         MemRefType::get(fastBufferShape, memRefType.getElementType(),
2171                         fastBufferLayout, copyOptions.fastMemorySpace);
2172 
2173     // Create the fast memory space buffer just before the 'affine.for'
2174     // operation.
2175     fastMemRef =
2176         prologue.create<memref::AllocOp>(loc, fastMemRefType).getResult();
2177     // Record it.
2178     fastBufferMap[memref] = fastMemRef;
2179     // fastMemRefType is a constant shaped memref.
2180     *sizeInBytes = getMemRefSizeInBytes(fastMemRefType).getValue();
2181     LLVM_DEBUG(emitRemarkForBlock(*block)
2182                << "Creating fast buffer of type " << fastMemRefType
2183                << " and size " << llvm::divideCeil(*sizeInBytes, 1024)
2184                << " KiB\n");
2185   } else {
2186     // Reuse the one already created.
2187     fastMemRef = fastBufferMap[memref];
2188     *sizeInBytes = 0;
2189   }
2190 
2191   auto numElementsSSA =
2192       top.create<arith::ConstantIndexOp>(loc, numElements.getValue());
2193 
2194   Value dmaStride = nullptr;
2195   Value numEltPerDmaStride = nullptr;
2196   if (copyOptions.generateDma) {
2197     SmallVector<StrideInfo, 4> dmaStrideInfos;
2198     getMultiLevelStrides(region, fastBufferShape, &dmaStrideInfos);
2199 
2200     // TODO: use all stride levels once DmaStartOp is extended for
2201     // multi-level strides.
2202     if (dmaStrideInfos.size() > 1) {
2203       LLVM_DEBUG(llvm::dbgs() << "Only up to one level of stride supported\n");
2204       return failure();
2205     }
2206 
2207     if (!dmaStrideInfos.empty()) {
2208       dmaStride =
2209           top.create<arith::ConstantIndexOp>(loc, dmaStrideInfos[0].stride);
2210       numEltPerDmaStride = top.create<arith::ConstantIndexOp>(
2211           loc, dmaStrideInfos[0].numEltPerStride);
2212     }
2213   }
2214 
2215   // Record the last operation where we want the memref replacement to end. We
2216   // later do the memref replacement only in [begin, postDomFilter] so
2217   // that the original memref's used in the data movement code themselves don't
2218   // get replaced.
2219   auto postDomFilter = std::prev(end);
2220 
2221   // Create fully composed affine maps for each memref.
2222   auto memAffineMap = b.getMultiDimIdentityMap(memIndices.size());
2223   fullyComposeAffineMapAndOperands(&memAffineMap, &memIndices);
2224   auto bufAffineMap = b.getMultiDimIdentityMap(bufIndices.size());
2225   fullyComposeAffineMapAndOperands(&bufAffineMap, &bufIndices);
2226 
2227   if (!copyOptions.generateDma) {
2228     // Point-wise copy generation.
2229     auto copyNest =
2230         generatePointWiseCopy(loc, memref, fastMemRef, lbMaps,
2231                               /*lbOperands=*/regionSymbols, ubMaps,
2232                               /*ubOperands=*/regionSymbols, fastBufOffsets,
2233                               /*isCopyOut=*/region.isWrite(), b);
2234 
2235     // Record this so that we can skip it from yet another copy.
2236     copyNests.insert(copyNest);
2237 
2238     // Since new ops are being appended (for copy out's), adjust the end to
2239     // mark end of block range being processed if necessary.
2240     if (region.isWrite() && isCopyOutAtEndOfBlock)
2241       *nEnd = Block::iterator(copyNest.getOperation());
2242   } else {
2243     // DMA generation.
2244     // Create a tag (single element 1-d memref) for the DMA.
2245     auto tagMemRefType = MemRefType::get({1}, top.getIntegerType(32), {},
2246                                          copyOptions.tagMemorySpace);
2247     auto tagMemRef = prologue.create<memref::AllocOp>(loc, tagMemRefType);
2248 
2249     SmallVector<Value, 4> tagIndices({zeroIndex});
2250     auto tagAffineMap = b.getMultiDimIdentityMap(tagIndices.size());
2251     fullyComposeAffineMapAndOperands(&tagAffineMap, &tagIndices);
2252     if (!region.isWrite()) {
2253       // DMA non-blocking read from original buffer to fast buffer.
2254       b.create<AffineDmaStartOp>(loc, memref, memAffineMap, memIndices,
2255                                  fastMemRef, bufAffineMap, bufIndices,
2256                                  tagMemRef, tagAffineMap, tagIndices,
2257                                  numElementsSSA, dmaStride, numEltPerDmaStride);
2258     } else {
2259       // DMA non-blocking write from fast buffer to the original memref.
2260       auto op = b.create<AffineDmaStartOp>(
2261           loc, fastMemRef, bufAffineMap, bufIndices, memref, memAffineMap,
2262           memIndices, tagMemRef, tagAffineMap, tagIndices, numElementsSSA,
2263           dmaStride, numEltPerDmaStride);
2264       // Since new ops may be appended at 'end' (for outgoing DMAs), adjust the
2265       // end to mark end of block range being processed.
2266       if (isCopyOutAtEndOfBlock)
2267         *nEnd = Block::iterator(op.getOperation());
2268     }
2269 
2270     // Matching DMA wait to block on completion; tag always has a 0 index.
2271     b.create<AffineDmaWaitOp>(loc, tagMemRef, tagAffineMap, zeroIndex,
2272                               numElementsSSA);
2273 
2274     // Generate dealloc for the tag.
2275     auto tagDeallocOp = epilogue.create<memref::DeallocOp>(loc, tagMemRef);
2276     if (*nEnd == end && isCopyOutAtEndOfBlock)
2277       // Since new ops are being appended (for outgoing DMAs), adjust the end to
2278       // mark end of range of the original.
2279       *nEnd = Block::iterator(tagDeallocOp.getOperation());
2280   }
2281 
2282   // Generate dealloc for the buffer.
2283   if (!existingBuf) {
2284     auto bufDeallocOp = epilogue.create<memref::DeallocOp>(loc, fastMemRef);
2285     // When generating pointwise copies, `nEnd' has to be set to deallocOp on
2286     // the fast buffer (since it marks the new end insertion point).
2287     if (!copyOptions.generateDma && *nEnd == end && isCopyOutAtEndOfBlock)
2288       *nEnd = Block::iterator(bufDeallocOp.getOperation());
2289   }
2290 
2291   // Replace all uses of the old memref with the faster one while remapping
2292   // access indices (subtracting out lower bound offsets for each dimension).
2293   // Ex: to replace load %A[%i, %j] with load %Abuf[%i - %iT, %j - %jT],
2294   // index remap will be (%i, %j) -> (%i - %iT, %j - %jT),
2295   // i.e., affine.apply (d0, d1, d2, d3) -> (d2-d0, d3-d1) (%iT, %jT, %i, %j),
2296   // and (%iT, %jT) will be the 'extraOperands' for 'rep all memref uses with'.
2297   // d2, d3 correspond to the original indices (%i, %j).
2298   SmallVector<AffineExpr, 4> remapExprs;
2299   remapExprs.reserve(rank);
2300   for (unsigned i = 0; i < rank; i++) {
2301     // The starting operands of indexRemap will be regionSymbols (the symbols on
2302     // which the memref region is parametric); then those corresponding to
2303     // the memref's original indices follow.
2304     auto dimExpr = b.getAffineDimExpr(regionSymbols.size() + i);
2305     remapExprs.push_back(dimExpr - fastBufOffsets[i]);
2306   }
2307   auto indexRemap = AffineMap::get(regionSymbols.size() + rank, 0, remapExprs,
2308                                    b.getContext());
2309 
2310   // Record the begin since it may be invalidated by memref replacement.
2311   Block::iterator prevOfBegin;
2312   bool isBeginAtStartOfBlock = (begin == block->begin());
2313   if (!isBeginAtStartOfBlock)
2314     prevOfBegin = std::prev(begin);
2315 
2316   // *Only* those uses within the range [begin, end) of 'block' are replaced.
2317   (void)replaceAllMemRefUsesWith(memref, fastMemRef,
2318                                  /*extraIndices=*/{}, indexRemap,
2319                                  /*extraOperands=*/regionSymbols,
2320                                  /*symbolOperands=*/{},
2321                                  /*domOpFilter=*/&*begin,
2322                                  /*postDomOpFilter=*/&*postDomFilter);
2323 
2324   *nBegin = isBeginAtStartOfBlock ? block->begin() : std::next(prevOfBegin);
2325 
2326   return success();
2327 }
2328 
2329 /// Construct the memref region to just include the entire memref. Returns false
2330 /// dynamic shaped memref's for now. `numParamLoopIVs` is the number of
2331 /// enclosing loop IVs of `op` (starting from the outermost) that the region
2332 /// is parametric on.
2333 static bool getFullMemRefAsRegion(Operation *op, unsigned numParamLoopIVs,
2334                                   MemRefRegion *region) {
2335   unsigned rank;
2336   if (auto loadOp = dyn_cast<AffineLoadOp>(op)) {
2337     rank = loadOp.getMemRefType().getRank();
2338     region->memref = loadOp.getMemRef();
2339     region->setWrite(false);
2340   } else if (auto storeOp = dyn_cast<AffineStoreOp>(op)) {
2341     rank = storeOp.getMemRefType().getRank();
2342     region->memref = storeOp.getMemRef();
2343     region->setWrite(true);
2344   } else {
2345     assert(false && "expected load or store op");
2346     return false;
2347   }
2348   auto memRefType = region->memref.getType().cast<MemRefType>();
2349   if (!memRefType.hasStaticShape())
2350     return false;
2351 
2352   auto *regionCst = region->getConstraints();
2353 
2354   // Just get the first numSymbols IVs, which the memref region is parametric
2355   // on.
2356   SmallVector<AffineForOp, 4> ivs;
2357   getLoopIVs(*op, &ivs);
2358   ivs.resize(numParamLoopIVs);
2359   SmallVector<Value, 4> symbols;
2360   extractForInductionVars(ivs, &symbols);
2361   regionCst->reset(rank, numParamLoopIVs, 0);
2362   regionCst->setValues(rank, rank + numParamLoopIVs, symbols);
2363 
2364   // Memref dim sizes provide the bounds.
2365   for (unsigned d = 0; d < rank; d++) {
2366     auto dimSize = memRefType.getDimSize(d);
2367     assert(dimSize > 0 && "filtered dynamic shapes above");
2368     regionCst->addBound(IntegerPolyhedron::LB, d, 0);
2369     regionCst->addBound(IntegerPolyhedron::UB, d, dimSize - 1);
2370   }
2371   return true;
2372 }
2373 
2374 LogicalResult mlir::affineDataCopyGenerate(Block::iterator begin,
2375                                            Block::iterator end,
2376                                            const AffineCopyOptions &copyOptions,
2377                                            Optional<Value> filterMemRef,
2378                                            DenseSet<Operation *> &copyNests) {
2379   if (begin == end)
2380     return success();
2381 
2382   assert(begin->getBlock() == std::prev(end)->getBlock() &&
2383          "Inconsistent block begin/end args");
2384   assert(end != end->getBlock()->end() && "end can't be the block terminator");
2385 
2386   Block *block = begin->getBlock();
2387 
2388   // Copies will be generated for this depth, i.e., symbolic in all loops
2389   // surrounding the this block range.
2390   unsigned copyDepth = getNestingDepth(&*begin);
2391 
2392   LLVM_DEBUG(llvm::dbgs() << "Generating copies at depth " << copyDepth
2393                           << "\n");
2394   LLVM_DEBUG(llvm::dbgs() << "from begin: " << *begin << "\n");
2395   LLVM_DEBUG(llvm::dbgs() << "to inclusive end: " << *std::prev(end) << "\n");
2396 
2397   // List of memory regions to copy for. We need a map vector to have a
2398   // guaranteed iteration order to write test cases. CHECK-DAG doesn't help here
2399   // since the alloc's for example are identical except for the SSA id.
2400   SmallMapVector<Value, std::unique_ptr<MemRefRegion>, 4> readRegions;
2401   SmallMapVector<Value, std::unique_ptr<MemRefRegion>, 4> writeRegions;
2402 
2403   // Map from original memref's to the fast buffers that their accesses are
2404   // replaced with.
2405   DenseMap<Value, Value> fastBufferMap;
2406 
2407   // To check for errors when walking the block.
2408   bool error = false;
2409 
2410   // Walk this range of operations  to gather all memory regions.
2411   block->walk(begin, end, [&](Operation *opInst) {
2412     // Gather regions to allocate to buffers in faster memory space.
2413     if (auto loadOp = dyn_cast<AffineLoadOp>(opInst)) {
2414       if ((filterMemRef.hasValue() && filterMemRef != loadOp.getMemRef()) ||
2415           (loadOp.getMemRefType().getMemorySpaceAsInt() !=
2416            copyOptions.slowMemorySpace))
2417         return;
2418     } else if (auto storeOp = dyn_cast<AffineStoreOp>(opInst)) {
2419       if ((filterMemRef.hasValue() && filterMemRef != storeOp.getMemRef()) ||
2420           storeOp.getMemRefType().getMemorySpaceAsInt() !=
2421               copyOptions.slowMemorySpace)
2422         return;
2423     } else {
2424       // Neither load nor a store op.
2425       return;
2426     }
2427 
2428     // Compute the MemRefRegion accessed.
2429     auto region = std::make_unique<MemRefRegion>(opInst->getLoc());
2430     if (failed(region->compute(opInst, copyDepth, /*sliceState=*/nullptr,
2431                                /*addMemRefDimBounds=*/false))) {
2432       LLVM_DEBUG(llvm::dbgs()
2433                  << "Error obtaining memory region: semi-affine maps?\n");
2434       LLVM_DEBUG(llvm::dbgs() << "over-approximating to the entire memref\n");
2435       if (!getFullMemRefAsRegion(opInst, copyDepth, region.get())) {
2436         LLVM_DEBUG(
2437             opInst->emitError("non-constant memref sizes not yet supported"));
2438         error = true;
2439         return;
2440       }
2441     }
2442 
2443     // Each memref has a single buffer associated with it irrespective of how
2444     // many load's and store's happen on it.
2445     // TODO: in the future, when regions don't intersect and satisfy
2446     // other properties (based on load/store regions), we could consider
2447     // multiple buffers per memref.
2448 
2449     // Add to the appropriate region if it's not already in it, or take a
2450     // bounding box union with the existing one if it's already in there.
2451     // Note that a memref may have both read and write regions - so update the
2452     // region in the other list if one exists (write in case of read and vice
2453     // versa) since there is a single bounding box for a memref across all reads
2454     // and writes that happen on it.
2455 
2456     // Attempts to update; returns true if 'region' exists in targetRegions.
2457     auto updateRegion =
2458         [&](const SmallMapVector<Value, std::unique_ptr<MemRefRegion>, 4>
2459                 &targetRegions) {
2460           const auto *const it = targetRegions.find(region->memref);
2461           if (it == targetRegions.end())
2462             return false;
2463 
2464           // Perform a union with the existing region.
2465           if (failed(it->second->unionBoundingBox(*region))) {
2466             LLVM_DEBUG(llvm::dbgs()
2467                        << "Memory region bounding box failed; "
2468                           "over-approximating to the entire memref\n");
2469             // If the union fails, we will overapproximate.
2470             if (!getFullMemRefAsRegion(opInst, copyDepth, region.get())) {
2471               LLVM_DEBUG(opInst->emitError(
2472                   "non-constant memref sizes not yet supported"));
2473               error = true;
2474               return true;
2475             }
2476             it->second->getConstraints()->clearAndCopyFrom(
2477                 *region->getConstraints());
2478           } else {
2479             // Union was computed and stored in 'it->second': copy to 'region'.
2480             region->getConstraints()->clearAndCopyFrom(
2481                 *it->second->getConstraints());
2482           }
2483           return true;
2484         };
2485 
2486     bool existsInRead = updateRegion(readRegions);
2487     if (error)
2488       return;
2489     bool existsInWrite = updateRegion(writeRegions);
2490     if (error)
2491       return;
2492 
2493     // Finally add it to the region list.
2494     if (region->isWrite() && !existsInWrite) {
2495       writeRegions[region->memref] = std::move(region);
2496     } else if (!region->isWrite() && !existsInRead) {
2497       readRegions[region->memref] = std::move(region);
2498     }
2499   });
2500 
2501   if (error) {
2502     LLVM_DEBUG(begin->emitError(
2503         "copy generation failed for one or more memref's in this block\n"));
2504     return failure();
2505   }
2506 
2507   uint64_t totalCopyBuffersSizeInBytes = 0;
2508   bool ret = true;
2509   auto processRegions =
2510       [&](const SmallMapVector<Value, std::unique_ptr<MemRefRegion>, 4>
2511               &regions) {
2512         for (const auto &regionEntry : regions) {
2513           // For each region, hoist copy in/out past all hoistable
2514           // 'affine.for's.
2515           Block::iterator copyInPlacementStart, copyOutPlacementStart;
2516           Block *copyPlacementBlock;
2517           findHighestBlockForPlacement(
2518               *regionEntry.second, *block, begin, end, &copyPlacementBlock,
2519               &copyInPlacementStart, &copyOutPlacementStart);
2520 
2521           uint64_t sizeInBytes;
2522           Block::iterator nBegin, nEnd;
2523           LogicalResult iRet = generateCopy(
2524               *regionEntry.second, block, begin, end, copyPlacementBlock,
2525               copyInPlacementStart, copyOutPlacementStart, copyOptions,
2526               fastBufferMap, copyNests, &sizeInBytes, &nBegin, &nEnd);
2527           if (succeeded(iRet)) {
2528             // begin/end could have been invalidated, and need update.
2529             begin = nBegin;
2530             end = nEnd;
2531             totalCopyBuffersSizeInBytes += sizeInBytes;
2532           }
2533           ret = ret & succeeded(iRet);
2534         }
2535       };
2536   processRegions(readRegions);
2537   processRegions(writeRegions);
2538 
2539   if (!ret) {
2540     LLVM_DEBUG(begin->emitError(
2541         "copy generation failed for one or more memref's in this block\n"));
2542     return failure();
2543   }
2544 
2545   // For a range of operations, a note will be emitted at the caller.
2546   AffineForOp forOp;
2547   if (llvm::DebugFlag && (forOp = dyn_cast<AffineForOp>(&*begin))) {
2548     LLVM_DEBUG(forOp.emitRemark()
2549                << llvm::divideCeil(totalCopyBuffersSizeInBytes, 1024)
2550                << " KiB of copy buffers in fast memory space for this block\n");
2551   }
2552 
2553   if (totalCopyBuffersSizeInBytes > copyOptions.fastMemCapacityBytes) {
2554     StringRef str = "Total size of all copy buffers' for this block "
2555                     "exceeds fast memory capacity\n";
2556     block->getParentOp()->emitWarning(str);
2557   }
2558 
2559   return success();
2560 }
2561 
2562 // A convenience version of affineDataCopyGenerate for all ops in the body of
2563 // an AffineForOp.
2564 LogicalResult mlir::affineDataCopyGenerate(AffineForOp forOp,
2565                                            const AffineCopyOptions &copyOptions,
2566                                            Optional<Value> filterMemRef,
2567                                            DenseSet<Operation *> &copyNests) {
2568   return affineDataCopyGenerate(forOp.getBody()->begin(),
2569                                 std::prev(forOp.getBody()->end()), copyOptions,
2570                                 filterMemRef, copyNests);
2571 }
2572 
2573 LogicalResult mlir::generateCopyForMemRegion(
2574     const MemRefRegion &memrefRegion, Operation *analyzedOp,
2575     const AffineCopyOptions &copyOptions, CopyGenerateResult &result) {
2576   Block *block = analyzedOp->getBlock();
2577   auto begin = analyzedOp->getIterator();
2578   auto end = std::next(begin);
2579   DenseMap<Value, Value> fastBufferMap;
2580   DenseSet<Operation *> copyNests;
2581 
2582   auto err = generateCopy(memrefRegion, block, begin, end, block, begin, end,
2583                           copyOptions, fastBufferMap, copyNests,
2584                           &result.sizeInBytes, &begin, &end);
2585   if (failed(err))
2586     return err;
2587 
2588   const auto &en = fastBufferMap.find(memrefRegion.memref);
2589   // In some cases (empty loops), no copy generation would have happened.
2590   if (en == fastBufferMap.end())
2591     return failure();
2592   result.alloc = en->second.getDefiningOp();
2593   assert(result.alloc && "fast buffer expected to be locally allocated");
2594   assert(copyNests.size() <= 1 && "At most one copy nest is expected.");
2595   result.copyNest = copyNests.empty() ? nullptr : *copyNests.begin();
2596   return success();
2597 }
2598 
2599 /// Gathers all AffineForOps in 'block' at 'currLoopDepth' in 'depthToLoops'.
2600 static void
2601 gatherLoopsInBlock(Block *block, unsigned currLoopDepth,
2602                    std::vector<SmallVector<AffineForOp, 2>> &depthToLoops) {
2603   // Add a new empty level to output if it doesn't exist level already.
2604   assert(currLoopDepth <= depthToLoops.size() && "Unexpected currLoopDepth");
2605   if (currLoopDepth == depthToLoops.size())
2606     depthToLoops.emplace_back();
2607 
2608   for (auto &op : *block) {
2609     if (auto forOp = dyn_cast<AffineForOp>(op)) {
2610       depthToLoops[currLoopDepth].push_back(forOp);
2611       gatherLoopsInBlock(forOp.getBody(), currLoopDepth + 1, depthToLoops);
2612     }
2613   }
2614 }
2615 
2616 /// Gathers all AffineForOps in 'func.func' grouped by loop depth.
2617 void mlir::gatherLoops(func::FuncOp func,
2618                        std::vector<SmallVector<AffineForOp, 2>> &depthToLoops) {
2619   for (auto &block : func)
2620     gatherLoopsInBlock(&block, /*currLoopDepth=*/0, depthToLoops);
2621 
2622   // Remove last loop level from output since it's empty.
2623   if (!depthToLoops.empty()) {
2624     assert(depthToLoops.back().empty() && "Last loop level is not empty?");
2625     depthToLoops.pop_back();
2626   }
2627 }
2628 
2629 // TODO: if necessary, this can be extended to also compose in any
2630 // affine.applys, fold to constant if all result dimensions of the map are
2631 // constant (canonicalizeMapAndOperands below already does this for single
2632 // result bound maps), and use simplifyMap to perform algebraic simplification.
2633 AffineForOp mlir::createCanonicalizedAffineForOp(
2634     OpBuilder b, Location loc, ValueRange lbOperands, AffineMap lbMap,
2635     ValueRange ubOperands, AffineMap ubMap, int64_t step) {
2636   SmallVector<Value, 4> lowerOperands(lbOperands);
2637   SmallVector<Value, 4> upperOperands(ubOperands);
2638 
2639   fullyComposeAffineMapAndOperands(&lbMap, &lowerOperands);
2640   canonicalizeMapAndOperands(&lbMap, &lowerOperands);
2641   lbMap = removeDuplicateExprs(lbMap);
2642   fullyComposeAffineMapAndOperands(&ubMap, &upperOperands);
2643   canonicalizeMapAndOperands(&ubMap, &upperOperands);
2644   ubMap = removeDuplicateExprs(ubMap);
2645 
2646   return b.create<AffineForOp>(loc, lowerOperands, lbMap, upperOperands, ubMap,
2647                                step);
2648 }
2649 
2650 /// Creates an AffineIfOp that encodes the conditional to choose between
2651 /// the constant trip count version and an unknown trip count version of this
2652 /// nest of loops. This is used to separate partial and full tiles if `loops`
2653 /// has the intra-tile loops. The affine.if op is inserted at the builder
2654 /// insertion point of `b`.
2655 static AffineIfOp createSeparationCondition(MutableArrayRef<AffineForOp> loops,
2656                                             OpBuilder b) {
2657   if (loops.empty())
2658     return nullptr;
2659 
2660   auto *context = loops[0].getContext();
2661 
2662   FlatAffineValueConstraints cst;
2663   SmallVector<Operation *, 8> ops;
2664   llvm::append_range(ops, loops);
2665   (void)getIndexSet(ops, &cst);
2666 
2667   // Remove constraints that are independent of these loop IVs.
2668   cst.removeIndependentConstraints(/*pos=*/0, /*num=*/loops.size());
2669 
2670   // Construct the constraint set representing the guard for full tiles. The
2671   // lower bound (and upper bound) corresponding to the full tile should be
2672   // larger (and resp. smaller) than any other lower (or upper bound).
2673   SmallVector<int64_t, 8> fullTileLb, fullTileUb;
2674   for (auto loop : loops) {
2675     (void)loop;
2676     // TODO: Non-unit stride is not an issue to generalize to.
2677     assert(loop.getStep() == 1 && "point loop step expected to be one");
2678     // Mark everything symbols for the purpose of finding a constant diff pair.
2679     cst.setDimSymbolSeparation(/*newSymbolCount=*/cst.getNumDimAndSymbolIds() -
2680                                1);
2681     unsigned fullTileLbPos, fullTileUbPos;
2682     if (!cst.getConstantBoundOnDimSize(0, /*lb=*/nullptr,
2683                                        /*boundFloorDivisor=*/nullptr,
2684                                        /*ub=*/nullptr, &fullTileLbPos,
2685                                        &fullTileUbPos)) {
2686       LLVM_DEBUG(llvm::dbgs() << "Can't get constant diff pair for a loop\n");
2687       return nullptr;
2688     }
2689 
2690     SmallVector<unsigned, 4> lbIndices, ubIndices;
2691     cst.getLowerAndUpperBoundIndices(/*pos=*/0, &lbIndices, &ubIndices);
2692 
2693     auto fLb = cst.getInequality(fullTileLbPos);
2694     auto fUb = cst.getInequality(fullTileUbPos);
2695     fullTileLb.assign(fLb.begin(), fLb.end());
2696     fullTileUb.assign(fUb.begin(), fUb.end());
2697 
2698     // Full tile lower bound should be >= than any other lower bound.
2699     for (auto lbIndex : lbIndices)
2700       for (unsigned i = 0, e = cst.getNumCols(); i < e; ++i)
2701         cst.atIneq(lbIndex, i) = fullTileLb[i] - cst.atIneq(lbIndex, i);
2702 
2703     // Full tile upper bound should be <= any other upper bound.
2704     for (auto ubIndex : ubIndices)
2705       for (unsigned i = 0, e = cst.getNumCols(); i < e; ++i)
2706         cst.atIneq(ubIndex, i) -= fullTileUb[i];
2707 
2708     cst.removeId(0);
2709   }
2710 
2711   // The previous step leads to all zeros for the full tile lb and ub position
2712   // itself; remove those and any other duplicates / trivial redundancies.
2713   cst.removeTrivialRedundancy();
2714 
2715   // Turn everything into dims conservatively since we earlier turned all
2716   // trailing ids past point loop IV into symbols. Some of these could be outer
2717   // loop IVs; we'll canonicalize anyway.
2718   cst.setDimSymbolSeparation(0);
2719 
2720   IntegerSet ifCondSet = cst.getAsIntegerSet(context);
2721   // ifCondSet can be null if cst was empty -- this can happen if all loops
2722   // in the nest have constant trip counts.
2723   if (!ifCondSet)
2724     return nullptr;
2725 
2726   SmallVector<Value, 4> setOperands;
2727   cst.getValues(0, cst.getNumDimAndSymbolIds(), &setOperands);
2728   canonicalizeSetAndOperands(&ifCondSet, &setOperands);
2729   return b.create<AffineIfOp>(loops[0].getLoc(), ifCondSet, setOperands,
2730                               /*withElseRegion=*/true);
2731 }
2732 
2733 /// Create the full tile loop nest (along with its body).
2734 static LogicalResult
2735 createFullTiles(MutableArrayRef<AffineForOp> inputNest,
2736                 SmallVectorImpl<AffineForOp> &fullTileLoops, OpBuilder b) {
2737   fullTileLoops.reserve(inputNest.size());
2738 
2739   // For each loop in the original nest identify a lower/upper bound pair such
2740   // that their difference is a constant.
2741   FlatAffineValueConstraints cst;
2742   for (auto loop : inputNest) {
2743     // TODO: straightforward to generalize to a non-unit stride.
2744     if (loop.getStep() != 1) {
2745       LLVM_DEBUG(llvm::dbgs()
2746                  << "[tile separation] non-unit stride not implemented\n");
2747       return failure();
2748     }
2749     SmallVector<Operation *, 1> loopOp{loop.getOperation()};
2750     (void)getIndexSet(loopOp, &cst);
2751     // We will mark everything other than this loop IV as symbol for getting a
2752     // pair of <lb, ub> with a constant difference.
2753     cst.setDimSymbolSeparation(cst.getNumDimAndSymbolIds() - 1);
2754     unsigned lbPos, ubPos;
2755     if (!cst.getConstantBoundOnDimSize(/*pos=*/0, /*lb=*/nullptr,
2756                                        /*lbDivisor=*/nullptr, /*ub=*/nullptr,
2757                                        &lbPos, &ubPos) ||
2758         lbPos == ubPos) {
2759       LLVM_DEBUG(llvm::dbgs() << "[tile separation] Can't get constant diff / "
2760                                  "equalities not yet handled\n");
2761       return failure();
2762     }
2763 
2764     // Set all identifiers as dimensions uniformly since some of those marked as
2765     // symbols above could be outer loop IVs (corresponding tile space IVs).
2766     cst.setDimSymbolSeparation(/*newSymbolCount=*/0);
2767 
2768     AffineValueMap lbVmap, ubVmap;
2769     cst.getIneqAsAffineValueMap(/*pos=*/0, lbPos, lbVmap, b.getContext());
2770     cst.getIneqAsAffineValueMap(/*pos=*/0, ubPos, ubVmap, b.getContext());
2771     AffineForOp fullTileLoop = createCanonicalizedAffineForOp(
2772         b, loop.getLoc(), lbVmap.getOperands(), lbVmap.getAffineMap(),
2773         ubVmap.getOperands(), ubVmap.getAffineMap());
2774     b = OpBuilder::atBlockTerminator(fullTileLoop.getBody());
2775     fullTileLoops.push_back(fullTileLoop);
2776   }
2777 
2778   // Add the body for the full tile loop nest.
2779   BlockAndValueMapping operandMap;
2780   for (const auto &loopEn : llvm::enumerate(inputNest))
2781     operandMap.map(loopEn.value().getInductionVar(),
2782                    fullTileLoops[loopEn.index()].getInductionVar());
2783   b = OpBuilder::atBlockTerminator(fullTileLoops.back().getBody());
2784   for (auto &op : inputNest.back().getBody()->without_terminator())
2785     b.clone(op, operandMap);
2786   return success();
2787 }
2788 
2789 LogicalResult
2790 mlir::separateFullTiles(MutableArrayRef<AffineForOp> inputNest,
2791                         SmallVectorImpl<AffineForOp> *fullTileNest) {
2792   if (inputNest.empty())
2793     return success();
2794 
2795   auto firstLoop = inputNest[0];
2796 
2797   // Each successive for op has to be nested in the other.
2798   auto prevLoop = firstLoop;
2799   for (auto loop : inputNest.drop_front(1)) {
2800     assert(loop->getParentOp() == prevLoop && "input not contiguously nested");
2801     prevLoop = loop;
2802   }
2803 
2804   // Create the full tile loop nest.
2805   SmallVector<AffineForOp, 4> fullTileLoops;
2806   OpBuilder b(firstLoop);
2807   if (failed(createFullTiles(inputNest, fullTileLoops, b))) {
2808     if (!fullTileLoops.empty())
2809       fullTileLoops.front().erase();
2810     return failure();
2811   }
2812 
2813   // Create and insert the version select right before the root of the nest.
2814   b = OpBuilder(firstLoop);
2815   AffineIfOp ifOp = createSeparationCondition(inputNest, b);
2816   if (!ifOp) {
2817     fullTileLoops.front().erase();
2818     LLVM_DEBUG(llvm::dbgs() << "All tiles are full tiles, or failure creating "
2819                                "separation condition\n");
2820     return failure();
2821   }
2822 
2823   // Move the full tile into the then block.
2824   Block *thenBlock = ifOp.getThenBlock();
2825   AffineForOp outermostFullTileLoop = fullTileLoops[0];
2826   thenBlock->getOperations().splice(
2827       std::prev(thenBlock->end()),
2828       outermostFullTileLoop->getBlock()->getOperations(),
2829       Block::iterator(outermostFullTileLoop));
2830 
2831   // Move the partial tile into the else block. The partial tile is the same as
2832   // the original loop nest.
2833   Block *elseBlock = ifOp.getElseBlock();
2834   elseBlock->getOperations().splice(std::prev(elseBlock->end()),
2835                                     firstLoop->getBlock()->getOperations(),
2836                                     Block::iterator(firstLoop));
2837 
2838   if (fullTileNest)
2839     *fullTileNest = std::move(fullTileLoops);
2840 
2841   return success();
2842 }
2843