1 //===- LoopSpecialization.cpp - scf.parallel/SCR.for specialization -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Specializes parallel loops and for loops for easier unrolling and
10 // vectorization.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PassDetail.h"
15 #include "mlir/Analysis/AffineStructures.h"
16 #include "mlir/Dialect/Affine/IR/AffineOps.h"
17 #include "mlir/Dialect/SCF/Passes.h"
18 #include "mlir/Dialect/SCF/SCF.h"
19 #include "mlir/Dialect/SCF/Transforms.h"
20 #include "mlir/Dialect/StandardOps/IR/Ops.h"
21 #include "mlir/Dialect/Utils/StaticValueUtils.h"
22 #include "mlir/IR/AffineExpr.h"
23 #include "mlir/IR/BlockAndValueMapping.h"
24 #include "mlir/IR/PatternMatch.h"
25 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
26 #include "llvm/ADT/DenseMap.h"
27 
28 using namespace mlir;
29 using scf::ForOp;
30 using scf::ParallelOp;
31 
32 /// Rewrite a parallel loop with bounds defined by an affine.min with a constant
33 /// into 2 loops after checking if the bounds are equal to that constant. This
34 /// is beneficial if the loop will almost always have the constant bound and
35 /// that version can be fully unrolled and vectorized.
36 static void specializeParallelLoopForUnrolling(ParallelOp op) {
37   SmallVector<int64_t, 2> constantIndices;
38   constantIndices.reserve(op.upperBound().size());
39   for (auto bound : op.upperBound()) {
40     auto minOp = bound.getDefiningOp<AffineMinOp>();
41     if (!minOp)
42       return;
43     int64_t minConstant = std::numeric_limits<int64_t>::max();
44     for (AffineExpr expr : minOp.map().getResults()) {
45       if (auto constantIndex = expr.dyn_cast<AffineConstantExpr>())
46         minConstant = std::min(minConstant, constantIndex.getValue());
47     }
48     if (minConstant == std::numeric_limits<int64_t>::max())
49       return;
50     constantIndices.push_back(minConstant);
51   }
52 
53   OpBuilder b(op);
54   BlockAndValueMapping map;
55   Value cond;
56   for (auto bound : llvm::zip(op.upperBound(), constantIndices)) {
57     Value constant = b.create<ConstantIndexOp>(op.getLoc(), std::get<1>(bound));
58     Value cmp = b.create<CmpIOp>(op.getLoc(), CmpIPredicate::eq,
59                                  std::get<0>(bound), constant);
60     cond = cond ? b.create<AndOp>(op.getLoc(), cond, cmp) : cmp;
61     map.map(std::get<0>(bound), constant);
62   }
63   auto ifOp = b.create<scf::IfOp>(op.getLoc(), cond, /*withElseRegion=*/true);
64   ifOp.getThenBodyBuilder().clone(*op.getOperation(), map);
65   ifOp.getElseBodyBuilder().clone(*op.getOperation());
66   op.erase();
67 }
68 
69 /// Rewrite a for loop with bounds defined by an affine.min with a constant into
70 /// 2 loops after checking if the bounds are equal to that constant. This is
71 /// beneficial if the loop will almost always have the constant bound and that
72 /// version can be fully unrolled and vectorized.
73 static void specializeForLoopForUnrolling(ForOp op) {
74   auto bound = op.upperBound();
75   auto minOp = bound.getDefiningOp<AffineMinOp>();
76   if (!minOp)
77     return;
78   int64_t minConstant = std::numeric_limits<int64_t>::max();
79   for (AffineExpr expr : minOp.map().getResults()) {
80     if (auto constantIndex = expr.dyn_cast<AffineConstantExpr>())
81       minConstant = std::min(minConstant, constantIndex.getValue());
82   }
83   if (minConstant == std::numeric_limits<int64_t>::max())
84     return;
85 
86   OpBuilder b(op);
87   BlockAndValueMapping map;
88   Value constant = b.create<ConstantIndexOp>(op.getLoc(), minConstant);
89   Value cond =
90       b.create<CmpIOp>(op.getLoc(), CmpIPredicate::eq, bound, constant);
91   map.map(bound, constant);
92   auto ifOp = b.create<scf::IfOp>(op.getLoc(), cond, /*withElseRegion=*/true);
93   ifOp.getThenBodyBuilder().clone(*op.getOperation(), map);
94   ifOp.getElseBodyBuilder().clone(*op.getOperation());
95   op.erase();
96 }
97 
98 /// Rewrite a for loop with bounds/step that potentially do not divide evenly
99 /// into a for loop where the step divides the iteration space evenly, followed
100 /// by an scf.if for the last (partial) iteration (if any).
101 ///
102 /// This function rewrites the given scf.for loop in-place and creates a new
103 /// scf.if operation for the last iteration. It replaces all uses of the
104 /// unpeeled loop with the results of the newly generated scf.if.
105 ///
106 /// The newly generated scf.if operation is returned via `ifOp`. The boundary
107 /// at which the loop is split (new upper bound) is returned via `splitBound`.
108 /// The return value indicates whether the loop was rewritten or not.
109 static LogicalResult peelForLoop(RewriterBase &b, ForOp forOp, scf::IfOp &ifOp,
110                                  Value &splitBound) {
111   RewriterBase::InsertionGuard guard(b);
112   auto lbInt = getConstantIntValue(forOp.lowerBound());
113   auto ubInt = getConstantIntValue(forOp.upperBound());
114   auto stepInt = getConstantIntValue(forOp.step());
115 
116   // No specialization necessary if step already divides upper bound evenly.
117   if (lbInt && ubInt && stepInt && (*ubInt - *lbInt) % *stepInt == 0)
118     return failure();
119   // No specialization necessary if step size is 1.
120   if (stepInt == static_cast<int64_t>(1))
121     return failure();
122 
123   auto loc = forOp.getLoc();
124   AffineExpr dim0, dim1, dim2;
125   bindDims(b.getContext(), dim0, dim1, dim2);
126   // New upper bound: %ub - (%ub - %lb) mod %step
127   auto modMap = AffineMap::get(3, 0, {dim1 - ((dim1 - dim0) % dim2)});
128   b.setInsertionPoint(forOp);
129   splitBound = b.createOrFold<AffineApplyOp>(
130       loc, modMap,
131       ValueRange{forOp.lowerBound(), forOp.upperBound(), forOp.step()});
132 
133   // Set new upper loop bound.
134   Value previousUb = forOp.upperBound();
135   b.updateRootInPlace(forOp,
136                       [&]() { forOp.upperBoundMutable().assign(splitBound); });
137   b.setInsertionPointAfter(forOp);
138 
139   // Do we need one more iteration?
140   Value hasMoreIter =
141       b.create<CmpIOp>(loc, CmpIPredicate::slt, splitBound, previousUb);
142 
143   // Create IfOp for last iteration.
144   auto resultTypes = forOp.getResultTypes();
145   ifOp = b.create<scf::IfOp>(loc, resultTypes, hasMoreIter,
146                              /*withElseRegion=*/!resultTypes.empty());
147   forOp.replaceAllUsesWith(ifOp->getResults());
148 
149   // Build then case.
150   BlockAndValueMapping bvm;
151   bvm.map(forOp.region().getArgument(0), splitBound);
152   for (auto it : llvm::zip(forOp.getRegionIterArgs(), forOp->getResults())) {
153     bvm.map(std::get<0>(it), std::get<1>(it));
154   }
155   b.cloneRegionBefore(forOp.region(), ifOp.thenRegion(),
156                       ifOp.thenRegion().begin(), bvm);
157   // Build else case.
158   if (!resultTypes.empty())
159     ifOp.getElseBodyBuilder(b.getListener())
160         .create<scf::YieldOp>(loc, forOp->getResults());
161 
162   return success();
163 }
164 
165 static void unpackOptionalValues(ArrayRef<Optional<Value>> source,
166                                  SmallVector<Value> &target) {
167   target = llvm::to_vector<4>(llvm::map_range(source, [](Optional<Value> val) {
168     return val.hasValue() ? *val : Value();
169   }));
170 }
171 
172 /// Bound an identifier `pos` in a given FlatAffineValueConstraints with
173 /// constraints drawn from an affine map. Before adding the constraint, the
174 /// dimensions/symbols of the affine map are aligned with `constraints`.
175 /// `operands` are the SSA Value operands used with the affine map.
176 /// Note: This function adds a new symbol column to the `constraints` for each
177 /// dimension/symbol that exists in the affine map but not in `constraints`.
178 static LogicalResult alignAndAddBound(FlatAffineValueConstraints &constraints,
179                                       FlatAffineConstraints::BoundType type,
180                                       unsigned pos, AffineMap map,
181                                       ValueRange operands) {
182   SmallVector<Value> dims, syms, newSyms;
183   unpackOptionalValues(constraints.getMaybeDimValues(), dims);
184   unpackOptionalValues(constraints.getMaybeSymbolValues(), syms);
185 
186   AffineMap alignedMap =
187       alignAffineMapWithValues(map, operands, dims, syms, &newSyms);
188   for (unsigned i = syms.size(); i < newSyms.size(); ++i)
189     constraints.addSymbolId(constraints.getNumSymbolIds(), newSyms[i]);
190   return constraints.addBound(type, pos, alignedMap);
191 }
192 
193 /// This function tries to canonicalize affine.min operations by proving that
194 /// its value is bounded by the same lower and upper bound. In that case, the
195 /// operation can be folded away.
196 ///
197 /// Bounds are computed by FlatAffineValueConstraints. Invariants required for
198 /// finding/proving bounds should be supplied via `constraints`.
199 ///
200 /// 1. Add dimensions for `minOp` and `minOpUb` (upper bound of `minOp`).
201 /// 2. Compute an upper bound of `minOp` and bind it to `minOpUb`. SSA values
202 ///    that are used in `minOp` but are not part of `dims`, are added as extra
203 ///    symbols to the constraint set.
204 /// 3. For each result of `minOp`: Add result as a dimension `r_i`. Prove that
205 ///    r_i >= minOpUb. If this is the case, ub(minOp) == lb(minOp) and `minOp`
206 ///    can be replaced with that bound.
207 ///
208 /// In summary, the following constraints are added throughout this function.
209 /// Note: `invar` are dimensions added by the caller to express the invariants.
210 ///
211 ///  invar | minOp | minOpUb | r_i | extra syms... | const |           eq/ineq
212 ///  ------+-------+---------+-----+---------------+-------+-------------------
213 ///   (various eq./ineq. constraining `invar`, added by the caller)
214 ///    ... |     0 |       0 |   0 |             0 |   ... |               ...
215 ///  ------+-------+---------+-----+---------------+-------+-------------------
216 ///   (various ineq. constraining `minOp` in terms of `minOp` operands (`invar`
217 ///    and extra `minOp` operands "extra syms" that are not in `invar`)).
218 ///    ... |    -1 |       0 |   0 |           ... |   ... |              >= 0
219 ///  ------+-------+---------+-----+---------------+-------+-------------------
220 ///   (set `minOpUb` to `minOp` upper bound in terms of `invar` and extra syms)
221 ///    ... |     0 |      -1 |   0 |           ... |   ... |               = 0
222 ///  ------+-------+---------+-----+---------------+-------+-------------------
223 ///   (for each `minOp` map result r_i: copy previous constraints, set r_i to
224 ///    corresponding map result, prove r_i >= minOpUb via contradiction)
225 ///    ... |     0 |       0 |  -1 |           ... |   ... |               = 0
226 ///      0 |     0 |       1 |  -1 |             0 |    -1 |              >= 0
227 ///
228 static LogicalResult
229 canonicalizeAffineMinOp(RewriterBase &rewriter, AffineMinOp minOp,
230                         FlatAffineValueConstraints constraints) {
231   RewriterBase::InsertionGuard guard(rewriter);
232   AffineMap minOpMap = minOp.getAffineMap();
233   unsigned numResults = minOpMap.getNumResults();
234 
235   // Add a few extra dimensions.
236   unsigned dimMinOp = constraints.addDimId();   // `minOp`
237   unsigned dimMinOpUb = constraints.addDimId(); // `minOp` upper bound
238   unsigned resultDimStart = constraints.getNumDimIds();
239   for (unsigned i = 0; i < numResults; ++i)
240     constraints.addDimId();
241 
242   // Add an inequality for each result expr_i of minOpMap: minOp <= expr_i
243   if (failed(alignAndAddBound(constraints, FlatAffineConstraints::UB, dimMinOp,
244                               minOpMap, minOp.operands())))
245     return failure();
246 
247   // Try to compute an upper bound for minOp, expressed in terms of the other
248   // `dims` and extra symbols.
249   SmallVector<AffineMap> minOpValLb(1), minOpValUb(1);
250   constraints.getSliceBounds(dimMinOp, 1, minOp.getContext(), &minOpValLb,
251                              &minOpValUb);
252   // TODO: `getSliceBounds` may return multiple bounds at the moment. This is
253   // a TODO of `getSliceBounds` and not handled here.
254   if (!minOpValUb[0] || minOpValUb[0].getNumResults() != 1)
255     return failure(); // No or multiple upper bounds found.
256 
257   // Add an equality: dimMinOpUb = minOpValUb[0]
258   // Add back dimension for minOp. (Was removed by `getSliceBounds`.)
259   AffineMap alignedUbMap = minOpValUb[0].shiftDims(/*shift=*/1,
260                                                    /*offset=*/dimMinOp);
261   if (failed(constraints.addBound(FlatAffineConstraints::EQ, dimMinOpUb,
262                                   alignedUbMap)))
263     return failure();
264 
265   // If the constraint system is empty, there is an inconsistency. (E.g., this
266   // can happen if loop lb > ub.)
267   if (constraints.isEmpty())
268     return failure();
269 
270   // Prove that each result of minOpMap has a lower bound that is equal to (or
271   // greater than) the upper bound of minOp (`kDimMinOpUb`). In that case,
272   // minOp can be replaced with the bound. I.e., prove that for each result
273   // expr_i (represented by dimension r_i):
274   //
275   // r_i >= minOpUb
276   //
277   // To prove this inequality, add its negation to the constraint set and prove
278   // that the constraint set is empty.
279   for (unsigned i = resultDimStart; i < resultDimStart + numResults; ++i) {
280     FlatAffineValueConstraints newConstr(constraints);
281 
282     // Add an equality: r_i = expr_i
283     // Note: These equalities could have been added earlier and used to express
284     // minOp <= expr_i. However, then we run the risk that `getSliceBounds`
285     // computes minOpUb in terms of r_i dims, which is not desired.
286     if (failed(alignAndAddBound(newConstr, FlatAffineConstraints::EQ, i,
287                                 minOpMap.getSubMap({i - resultDimStart}),
288                                 minOp.operands())))
289       return failure();
290 
291     // Add inequality: r_i < minOpUb (equiv.: minOpUb - r_i - 1 >= 0)
292     SmallVector<int64_t> ineq(newConstr.getNumCols(), 0);
293     ineq[dimMinOpUb] = 1;
294     ineq[i] = -1;
295     ineq[newConstr.getNumCols() - 1] = -1;
296     newConstr.addInequality(ineq);
297     if (!newConstr.isEmpty())
298       return failure();
299   }
300 
301   // Lower and upper bound of `minOp` are equal. Replace `minOp` with its bound.
302   AffineMap newMap = alignedUbMap;
303   SmallVector<Value> newOperands;
304   unpackOptionalValues(constraints.getMaybeDimAndSymbolValues(), newOperands);
305   mlir::canonicalizeMapAndOperands(&newMap, &newOperands);
306   rewriter.setInsertionPoint(minOp);
307   rewriter.replaceOpWithNewOp<AffineApplyOp>(minOp, newMap, newOperands);
308   return success();
309 }
310 
311 /// Try to simplify an affine.min operation `minOp` after loop peeling. This
312 /// function detects affine.min operations such as (ub is the previous upper
313 /// bound of the unpeeled loop):
314 /// ```
315 /// #map = affine_map<(d0)[s0, s1] -> (s0, -d0 + s1)>
316 /// %r = affine.min #affine.min #map(%iv)[%step, %ub]
317 /// ```
318 /// and rewrites them into (in the case the peeled loop):
319 /// ```
320 /// %r = %step
321 /// ```
322 /// affine.min operations inside the generated scf.if operation are rewritten in
323 /// a similar way.
324 ///
325 /// This function builds up a set of constraints, capable of proving that:
326 /// * Inside the peeled loop: min(step, ub - iv) == step
327 /// * Inside the scf.if operation: min(step, ub - iv) == ub - iv
328 ///
329 /// Note: `ub` is the previous upper bound of the loop (before peeling).
330 /// `insideLoop` must be true for affine.min ops inside the loop and false for
331 /// affine.min ops inside the scf.for op.
332 static LogicalResult rewritePeeledAffineOp(RewriterBase &rewriter,
333                                            AffineMinOp minOp, Value iv,
334                                            Value ub, Value step,
335                                            bool insideLoop) {
336   FlatAffineValueConstraints constraints;
337   constraints.addDimId(0, iv);
338   constraints.addDimId(1, ub);
339   constraints.addDimId(2, step);
340   if (auto constUb = getConstantIntValue(ub))
341     constraints.addBound(FlatAffineConstraints::EQ, 1, *constUb);
342   if (auto constStep = getConstantIntValue(step))
343     constraints.addBound(FlatAffineConstraints::EQ, 2, *constStep);
344 
345   // Add loop peeling invariant. This is the main piece of knowledge that
346   // enables AffineMinOp simplification.
347   if (insideLoop) {
348     // ub - iv >= step (equiv.: -iv + ub - step + 0 >= 0)
349     // Intuitively: Inside the peeled loop, every iteration is a "full"
350     // iteration, i.e., step divides the iteration space `ub - lb` evenly.
351     constraints.addInequality({-1, 1, -1, 0});
352   } else {
353     // ub - iv < step (equiv.: iv + -ub + step - 1 >= 0)
354     // Intuitively: `iv` is the split bound here, i.e., the iteration variable
355     // value of the very last iteration (in the unpeeled loop). At that point,
356     // there are less than `step` elements remaining. (Otherwise, the peeled
357     // loop would run for at least one more iteration.)
358     constraints.addInequality({1, -1, 1, -1});
359   }
360 
361   return canonicalizeAffineMinOp(rewriter, minOp, constraints);
362 }
363 
364 LogicalResult mlir::scf::peelAndCanonicalizeForLoop(RewriterBase &rewriter,
365                                                     ForOp forOp,
366                                                     scf::IfOp &ifOp) {
367   Value ub = forOp.upperBound();
368   Value splitBound;
369   if (failed(peelForLoop(rewriter, forOp, ifOp, splitBound)))
370     return failure();
371 
372   // Rewrite affine.min ops.
373   forOp.walk([&](AffineMinOp minOp) {
374     (void)rewritePeeledAffineOp(rewriter, minOp, forOp.getInductionVar(), ub,
375                                 forOp.step(), /*insideLoop=*/true);
376   });
377   ifOp.walk([&](AffineMinOp minOp) {
378     (void)rewritePeeledAffineOp(rewriter, minOp, splitBound, ub, forOp.step(),
379                                 /*insideLoop=*/false);
380   });
381 
382   return success();
383 }
384 
385 static constexpr char kPeeledLoopLabel[] = "__peeled_loop__";
386 static constexpr char kPartialIterationLabel[] = "__partial_iteration__";
387 
388 namespace {
389 struct ForLoopPeelingPattern : public OpRewritePattern<ForOp> {
390   ForLoopPeelingPattern(MLIRContext *ctx, bool skipPartial)
391       : OpRewritePattern<ForOp>(ctx), skipPartial(skipPartial) {}
392 
393   LogicalResult matchAndRewrite(ForOp forOp,
394                                 PatternRewriter &rewriter) const override {
395     // Do not peel already peeled loops.
396     if (forOp->hasAttr(kPeeledLoopLabel))
397       return failure();
398     if (skipPartial) {
399       // No peeling of loops inside the partial iteration (scf.if) of another
400       // peeled loop.
401       Operation *op = forOp.getOperation();
402       while ((op = op->getParentOfType<scf::IfOp>())) {
403         if (op->hasAttr(kPartialIterationLabel))
404           return failure();
405       }
406     }
407     // Apply loop peeling.
408     scf::IfOp ifOp;
409     if (failed(peelAndCanonicalizeForLoop(rewriter, forOp, ifOp)))
410       return failure();
411     // Apply label, so that the same loop is not rewritten a second time.
412     rewriter.updateRootInPlace(forOp, [&]() {
413       forOp->setAttr(kPeeledLoopLabel, rewriter.getUnitAttr());
414     });
415     ifOp->setAttr(kPartialIterationLabel, rewriter.getUnitAttr());
416     return success();
417   }
418 
419   /// If set to true, loops inside partial iterations of another peeled loop
420   /// are not peeled. This reduces the size of the generated code. Partial
421   /// iterations are not usually performance critical.
422   /// Note: Takes into account the entire chain of parent operations, not just
423   /// the direct parent.
424   bool skipPartial;
425 };
426 } // namespace
427 
428 namespace {
429 struct ParallelLoopSpecialization
430     : public SCFParallelLoopSpecializationBase<ParallelLoopSpecialization> {
431   void runOnFunction() override {
432     getFunction().walk(
433         [](ParallelOp op) { specializeParallelLoopForUnrolling(op); });
434   }
435 };
436 
437 struct ForLoopSpecialization
438     : public SCFForLoopSpecializationBase<ForLoopSpecialization> {
439   void runOnFunction() override {
440     getFunction().walk([](ForOp op) { specializeForLoopForUnrolling(op); });
441   }
442 };
443 
444 struct ForLoopPeeling : public SCFForLoopPeelingBase<ForLoopPeeling> {
445   void runOnFunction() override {
446     FuncOp funcOp = getFunction();
447     MLIRContext *ctx = funcOp.getContext();
448     RewritePatternSet patterns(ctx);
449     patterns.add<ForLoopPeelingPattern>(ctx, skipPartial);
450     (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
451 
452     // Drop the markers.
453     funcOp.walk([](Operation *op) {
454       op->removeAttr(kPeeledLoopLabel);
455       op->removeAttr(kPartialIterationLabel);
456     });
457   }
458 };
459 } // namespace
460 
461 std::unique_ptr<Pass> mlir::createParallelLoopSpecializationPass() {
462   return std::make_unique<ParallelLoopSpecialization>();
463 }
464 
465 std::unique_ptr<Pass> mlir::createForLoopSpecializationPass() {
466   return std::make_unique<ForLoopSpecialization>();
467 }
468 
469 std::unique_ptr<Pass> mlir::createForLoopPeelingPass() {
470   return std::make_unique<ForLoopPeeling>();
471 }
472