1 //===- LinalgTransforms.cpp - Linalg transformations as patterns ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements logic and helpers to expose Linalg transforms as rewrite
10 // patterns.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
15 #include "mlir/Dialect/Affine/Utils.h"
16 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
17 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
18 #include "mlir/Dialect/Linalg/Utils/Utils.h"
19 #include "mlir/Dialect/Tensor/IR/Tensor.h"
20 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
21 #include "mlir/Dialect/Vector/VectorOps.h"
22 #include "mlir/IR/AffineExpr.h"
23 #include "mlir/IR/Matchers.h"
24 #include "mlir/Pass/Pass.h"
25 #include "mlir/Support/LLVM.h"
26 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
27 #include "llvm/ADT/ScopeExit.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include <type_traits>
31 
32 #define DEBUG_TYPE "linalg-transforms"
33 
34 using namespace mlir;
35 using namespace mlir::linalg;
36 
37 #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE << "]: ")
38 
39 //===----------------------------------------------------------------------===//
40 // Transformations exposed as rewrite patterns.
41 //===----------------------------------------------------------------------===//
42 // Marker used as attribute name in generated Linalg rewriting transformations.
43 const StringLiteral mlir::linalg::LinalgTransforms::kLinalgTransformMarker =
44     "__internal_linalg_transform__";
45 
46 mlir::linalg::LinalgTransformationFilter::LinalgTransformationFilter(
47     ArrayRef<Identifier> matchDisjunction, Optional<Identifier> replacement)
48     : matchDisjunction(matchDisjunction.begin(), matchDisjunction.end()),
49       replacement(replacement) {}
50 
51 mlir::linalg::LinalgTransformationFilter::LinalgTransformationFilter(
52     FilterFunction f, ArrayRef<Identifier> matchDisjunction,
53     Optional<Identifier> replacement)
54     : filters(),
55       matchDisjunction(matchDisjunction.begin(), matchDisjunction.end()),
56       replacement(replacement) {
57   if (f)
58     filters.push_back(f);
59 }
60 
61 LogicalResult mlir::linalg::LinalgTransformationFilter::checkAndNotify(
62     PatternRewriter &rewriter, Operation *op) const {
63   if (llvm::any_of(filters,
64                    [&](const FilterFunction &f) { return failed(f(op)); }))
65     return failure();
66 
67   auto attr = op->template getAttrOfType<StringAttr>(
68       LinalgTransforms::kLinalgTransformMarker);
69 
70   if (!attr) {
71     // 1. Has no filter case and matchDisjunction is empty.
72     if (matchDisjunction.empty())
73       return success();
74 
75     // 2. Has no filter but was expecting a filter.
76     return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
77       diag << " does not have any filter from list: ";
78       interleaveComma(matchDisjunction, diag);
79     });
80   }
81 
82   // 4. Match explicit filter.
83   for (auto filter : matchDisjunction)
84     if (attr.getValue() == filter)
85       return success();
86 
87   // 5. Fail to match.
88   return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
89     diag << " does not have any filter from list: ";
90     interleaveComma(matchDisjunction, diag);
91   });
92 }
93 
94 void mlir::linalg::LinalgTransformationFilter::
95     replaceLinalgTransformationFilter(PatternRewriter &rewriter,
96                                       Operation *op) const {
97   if (replacement.hasValue())
98     op->setAttr(LinalgTransforms::kLinalgTransformMarker,
99                 rewriter.getStringAttr(replacement.getValue().strref()));
100   else
101     op->removeAttr(Identifier::get(LinalgTransforms::kLinalgTransformMarker,
102                                    rewriter.getContext()));
103 }
104 
105 LinalgTilingOptions &
106 mlir::linalg::LinalgTilingOptions::setTileSizes(ArrayRef<int64_t> ts) {
107   SmallVector<int64_t, 4> tileSizes(ts.begin(), ts.end());
108   tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
109     OpBuilder::InsertionGuard guard(b);
110     b.setInsertionPointToStart(
111         &op->getParentOfType<FuncOp>().getBody().front());
112     return llvm::to_vector<4>(map_range(tileSizes, [&](int64_t s) {
113       Value v = b.create<ConstantIndexOp>(op->getLoc(), s);
114       return v;
115     }));
116   };
117   return *this;
118 }
119 
120 /// Try to compute a static bounding box for `operand`
121 /// Return success if either:
122 ///   1. The operand is already statically shaped, `result` is left unchanged.
123 ///   2. The operand is (partially) dynamic, `result` is the result of a freshly
124 ///      created PadTensorOp.
125 /// Return failure if the operand cannot be padded to a static shape.
126 static LogicalResult padOperandToSmallestStaticBoundingBox(
127     PatternRewriter &rewriter, linalg::LinalgOp opToPad, OpOperand *opOperand,
128     const LinalgTilingOptions &options, Value &result) {
129   // Already static shape, no need to pad.
130   if (llvm::none_of(opToPad.getShape(opOperand), ShapedType::isDynamic))
131     return success();
132   auto sliceOp = opOperand->get().getDefiningOp<tensor::ExtractSliceOp>();
133   // Not a slice op, cannot construct a static bounding box.
134   if (!sliceOp)
135     return failure();
136   SmallVector<int64_t> staticSizes;
137   staticSizes.reserve(opToPad.getRank(opOperand));
138   auto shapedOp = cast<OffsetSizeAndStrideOpInterface>(sliceOp.getOperation());
139   for (auto size : shapedOp.getMixedSizes()) {
140     auto indexAttr = size.is<Attribute>()
141                          ? size.get<Attribute>().dyn_cast<IntegerAttr>()
142                          : linalg::getSmallestBoundingIndex(size.get<Value>());
143     // SmallestBoundingIndex must exist for all sizes.
144     // For now return an error if we can't find it.
145     if (!indexAttr)
146       return rewriter.notifyMatchFailure(
147           opToPad, "No constant bounding box can be found for padding");
148     staticSizes.push_back(indexAttr.getInt());
149   }
150   Value pad = options.paddingValueComputationFunction(rewriter, *opOperand);
151   auto staticTensorType = RankedTensorType::get(
152       staticSizes, getElementTypeOrSelf(opOperand->get()));
153   result = linalg::PadTensorOp::createPadHighOp(
154       staticTensorType, opOperand->get(), pad, opToPad->getLoc(), rewriter);
155   return success();
156 }
157 
158 // Try to create a static bounding box around each operand of `res.op`.
159 // If successful, `res.op` is rewritten in static form with padded operands.
160 // `res.op` is updated to the cloned static form of the op on success.
161 static LogicalResult rewriteAsPaddedOp(PatternRewriter &rewriter,
162                                        TiledLinalgOp &res,
163                                        const LinalgTilingOptions &options) {
164   LinalgOp opToPad = res.op;
165   Location loc = opToPad->getLoc();
166 
167   // If the op is fully static, it does not need padding.
168   // TODO: there are cases where we may still want to pad to larger sizes.
169   assert(opToPad.hasTensorSemantics() &&
170          "expected operation to have tensor semantics");
171   if (!opToPad.hasDynamicShape())
172     return success();
173 
174   OpBuilder::InsertionGuard g(rewriter);
175   // Set IP after op because we also take the dims of the original output.
176   rewriter.setInsertionPointAfter(opToPad);
177   // Make a copy of the shaped operands and update it.
178   SmallVector<Value> newOperands;
179   newOperands.reserve(opToPad.getNumInputsAndOutputs());
180   for (OpOperand *opOperand : opToPad.getInputAndOutputOperands()) {
181     Value paddedOperand;
182     // If padding was requested but the shape cannot be bounded statically then
183     // the pattern fails to apply.
184     if (failed(padOperandToSmallestStaticBoundingBox(
185             rewriter, opToPad, opOperand, options, paddedOperand)))
186       return failure();
187     newOperands.push_back(paddedOperand ? paddedOperand : opOperand->get());
188   }
189 
190   // Clone `opToPad` to operate on the statically padded shapes.
191   auto resultTensorTypes =
192       ValueRange(newOperands).take_back(opToPad.getNumOutputs()).getTypes();
193   linalg::LinalgOp paddedOp =
194       opToPad.clone(rewriter, loc, resultTensorTypes, newOperands);
195 
196   // Recover the slice out of the new static results. This keeps the original
197   // linalg op around because it uses the dims of the original results.
198   // This later folds away.
199   SmallVector<Value> paddedSubviewResults;
200   paddedSubviewResults.reserve(opToPad->getNumResults());
201   SetVector<Operation *> newUsersOfOpToPad;
202   for (auto it : llvm::zip(opToPad->getResults(), paddedOp->getResults())) {
203     auto rank = std::get<0>(it).getType().cast<RankedTensorType>().getRank();
204     SmallVector<OpFoldResult> offsets(rank, rewriter.getIndexAttr(0));
205     auto sizes = llvm::to_vector<4>(llvm::map_range(
206         llvm::seq<unsigned>(0, rank), [&](unsigned d) -> OpFoldResult {
207           auto dimOp = rewriter.create<memref::DimOp>(loc, std::get<0>(it), d);
208           newUsersOfOpToPad.insert(dimOp);
209           return dimOp.getResult();
210         }));
211     SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
212     paddedSubviewResults.push_back(rewriter.create<tensor::ExtractSliceOp>(
213         loc, std::get<1>(it), offsets, sizes, strides));
214   }
215   // Replace the transient `opToPad` locally, except for uses that we just
216   // created for the purpose of extracting the dims.
217   rewriter.replaceOpWithIf(opToPad, paddedSubviewResults, [&](OpOperand &opOp) {
218     return !newUsersOfOpToPad.contains(opOp.getOwner());
219   });
220 
221   res = TiledLinalgOp{paddedOp, res.loops, res.tensorResults};
222   return success();
223 }
224 
225 /// Linalg base tiling pattern.
226 mlir::linalg::LinalgBaseTilingPattern::LinalgBaseTilingPattern(
227     StringRef opName, MLIRContext *context, LinalgTilingOptions options,
228     LinalgTransformationFilter filter, PatternBenefit benefit)
229     : RewritePattern(opName, benefit, context), filter(filter),
230       options(options) {}
231 
232 mlir::linalg::LinalgBaseTilingPattern::LinalgBaseTilingPattern(
233     MLIRContext *context, LinalgTilingOptions options,
234     LinalgTransformationFilter filter, PatternBenefit benefit)
235     : RewritePattern(MatchAnyOpTypeTag(), benefit, context), filter(filter),
236       options(options) {}
237 
238 LogicalResult mlir::linalg::LinalgBaseTilingPattern::matchAndRewriteBase(
239     Operation *op, PatternRewriter &rewriter, TiledLinalgOp &result) const {
240   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
241   if (!linalgOp)
242     return failure();
243   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
244     return failure();
245 
246   Optional<TiledLinalgOp> res = tileLinalgOp(rewriter, linalgOp, options);
247 
248   if (!res)
249     return failure();
250 
251   // Setup RAII guard to return properly.
252   LinalgOp tiledOp = res->op;
253   auto guard = llvm::make_scope_exit([&]() {
254     // Return relevant information to derived pattern.
255     result = *res;
256     // Replace filter on both tiledOp and tiledAndPaddedOp, if necessary.
257     filter.replaceLinalgTransformationFilter(rewriter, tiledOp);
258     if (tiledOp != res->op)
259       filter.replaceLinalgTransformationFilter(rewriter, res->op);
260   });
261 
262   // Consider padding on the fly only if the op has tensor semantics.
263   if (!options.paddingValueComputationFunction ||
264       !linalgOp.hasTensorSemantics())
265     return success();
266 
267   // Try to pad on the fly by rewriting res->op as a padded op.
268   if (failed(rewriteAsPaddedOp(rewriter, *res, options))) {
269     // Set so RAII guard does not propagate TiledLinalgOp to `result`.
270     return failure();
271   }
272 
273   // Do not perform replacement of `linalgOp`, let the derived patterns
274   // do this as they see fit, from the resulting TiledLinalgOp.
275   return success();
276 }
277 
278 static ValueRange getTiledOpResult(TiledLinalgOp tiledOp) {
279   if (tiledOp.loops.empty())
280     return tiledOp.op.getOperation()->getResults();
281   return tiledOp.loops.front()->getResults();
282 }
283 
284 static ValueRange
285 getTiledAndFusedOpResult(TiledAndFusedLinalgOps tiledAndFusedOp) {
286   if (tiledAndFusedOp.fusedLoops.empty())
287     return tiledAndFusedOp.op.getOperation()->getResults();
288   return tiledAndFusedOp.fusedLoops.front()->getResults();
289 }
290 
291 mlir::linalg::LinalgBaseTileAndFusePattern::LinalgBaseTileAndFusePattern(
292     StringRef opName, MLIRContext *context,
293     const LinalgDependenceGraph &dependenceGraph,
294     LinalgTilingOptions tilingOptions, LinalgFusionOptions fusionOptions,
295     LinalgTransformationFilter filter, LinalgTransformationFilter fusedOpMarker,
296     LinalgTransformationFilter originalOpMarker, PatternBenefit benefit)
297     : RewritePattern(opName, benefit, context, {}),
298       dependenceGraph(dependenceGraph), tilingOptions(tilingOptions),
299       fusionOptions(fusionOptions), filter(filter),
300       fusedOpMarker(fusedOpMarker), originalOpMarker(originalOpMarker) {}
301 
302 LogicalResult mlir::linalg::LinalgBaseTileAndFusePattern::matchAndRewrite(
303     Operation *op, PatternRewriter &rewriter) const {
304   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
305   // TODO: remove hasIndexSemantics check once index ops are supported.
306   if (!linalgOp || linalgOp.hasIndexSemantics())
307     return failure();
308   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
309     return failure();
310 
311   DenseSet<Operation *> producers;
312   producers.insert(linalgOp);
313   for (auto dependence : dependenceGraph.getDependentOperationsInto(linalgOp)) {
314     Optional<unsigned> operandNumber = dependence.getIndexingOpViewOperandNum();
315     // When looking at dependences into, indexingOp is always OpOperand. We
316     // could assert, but continue if this is not the case.
317     if (!operandNumber)
318       continue;
319     if (!fusionOptions.indicesToFuse.count(operandNumber.getValue()))
320       continue;
321     if (isa<LinalgOp>(dependence.getDependentOp()))
322       producers.insert(dependence.getDependentOp());
323   }
324 
325   SmallVector<LinalgOp, 1> fusionOps;
326   for (auto it = op->getBlock()->begin(), ie = Block::iterator(op); it != ie;
327        ++it) {
328     auto producerLinalgOp = dyn_cast<LinalgOp>(&(*it));
329     if (producerLinalgOp && producers.count(producerLinalgOp))
330       fusionOps.push_back(producerLinalgOp);
331   }
332   fusionOps.push_back(linalgOp);
333 
334   SmallVector<Value, 4> tileSizes =
335       tilingOptions.tileSizeComputationFunction(rewriter, op);
336   LinalgTilingOptions instanceTilingOptions = tilingOptions;
337   instanceTilingOptions.setTileSizes(tileSizes);
338   Optional<TiledAndFusedLinalgOps> tiledAndFusedOps = tileAndFuseLinalgOps(
339       rewriter, fusionOps, dependenceGraph, instanceTilingOptions);
340   if (!tiledAndFusedOps)
341     return failure();
342 
343   // Tile the unfused loops;
344   SmallVector<Value, 4> unfusedLoopTileSizes;
345   Value zero = rewriter.create<ConstantIndexOp>(op->getLoc(), 0);
346   for (auto tileSize : enumerate(tileSizes)) {
347     if (tiledAndFusedOps->fusedLoopDims.count(tileSize.index()))
348       unfusedLoopTileSizes.push_back(zero);
349     else
350       unfusedLoopTileSizes.push_back(tileSize.value());
351   }
352   // Tile the loop only if there is a non-zero tile size.
353   if (unfusedLoopTileSizes.size() > linalgOp.getNumLoops())
354     unfusedLoopTileSizes.resize(linalgOp.getNumLoops());
355   if (llvm::any_of(unfusedLoopTileSizes, [](Value val) {
356         if (auto cst = val.getDefiningOp<ConstantIndexOp>())
357           return cst.getValue() != 0;
358         return true;
359       })) {
360     LinalgTilingOptions unfusedTilingOptions = tilingOptions;
361     unfusedTilingOptions.setTileSizes(unfusedLoopTileSizes);
362     Optional<TiledLinalgOp> unfusedTiledOp =
363         tileLinalgOp(rewriter, tiledAndFusedOps->op, unfusedTilingOptions);
364     if (!unfusedTiledOp)
365       return failure();
366     rewriter.replaceOp(tiledAndFusedOps->op,
367                        getTiledOpResult(unfusedTiledOp.getValue()));
368     tiledAndFusedOps->op = unfusedTiledOp->op;
369   }
370   op->replaceAllUsesWith(getTiledAndFusedOpResult(tiledAndFusedOps.getValue()));
371 
372   filter.replaceLinalgTransformationFilter(rewriter,
373                                            tiledAndFusedOps->op.getOperation());
374   for (auto fusedOp : tiledAndFusedOps->fusedProducers) {
375     fusedOpMarker.replaceLinalgTransformationFilter(rewriter,
376                                                     fusedOp.getOperation());
377   }
378   for (auto origProducerOp : ArrayRef<LinalgOp>(fusionOps).drop_back()) {
379     originalOpMarker.replaceLinalgTransformationFilter(
380         rewriter, origProducerOp.getOperation());
381   }
382   rewriter.updateRootInPlace(op, [&]() {
383     originalOpMarker.replaceLinalgTransformationFilter(rewriter, op);
384   });
385   return success();
386 }
387 
388 /// Linalg generic interchange pattern.
389 mlir::linalg::GenericOpInterchangePattern::GenericOpInterchangePattern(
390     MLIRContext *context, ArrayRef<unsigned> interchangeVector,
391     LinalgTransformationFilter filter, PatternBenefit benefit)
392     : OpRewritePattern(context, benefit), filter(filter),
393       interchangeVector(interchangeVector.begin(), interchangeVector.end()) {}
394 
395 LogicalResult mlir::linalg::GenericOpInterchangePattern::matchAndRewrite(
396     GenericOp genericOp, PatternRewriter &rewriter) const {
397   if (failed(filter.checkAndNotify(rewriter, genericOp)))
398     return failure();
399   if (failed(interchangeGenericOpPrecondition(genericOp, interchangeVector)))
400     return failure();
401 
402   // TODO: figure out how this interplays with named ops. In particular this
403   // should break the named op property.
404   rewriter.updateRootInPlace(genericOp, [&]() {
405     interchangeGenericOp(rewriter, genericOp, interchangeVector);
406     // New filter if specified.
407     filter.replaceLinalgTransformationFilter(rewriter, genericOp);
408   });
409   return success();
410 }
411 
412 mlir::linalg::LinalgBasePromotionPattern::LinalgBasePromotionPattern(
413     StringRef opName, MLIRContext *context, LinalgPromotionOptions options,
414     LinalgTransformationFilter filter, PatternBenefit benefit)
415     : RewritePattern(opName, benefit, context, {}), filter(filter),
416       options(options) {}
417 
418 LogicalResult mlir::linalg::LinalgBasePromotionPattern::matchAndRewrite(
419     Operation *op, PatternRewriter &rewriter) const {
420   if (failed(filter.checkAndNotify(rewriter, op)))
421     return failure();
422   if (failed(promoteSubviewsPrecondition(op, options)))
423     return failure();
424 
425   // TODO: We cannot use root update here. This pattern is creating other ops,
426   // so if the promotion fails, those need to be cleaned up, which doesnt seem
427   // to be happening here. So to fail properly, we should be cloning the op and
428   // deleting the previous op. This needs more investigation.
429   rewriter.startRootUpdate(op);
430   Optional<LinalgOp> promotedOp = promoteSubViews(rewriter, op, options);
431   if (!promotedOp) {
432     rewriter.cancelRootUpdate(op);
433     return op->emitError("subview promotion failed");
434   }
435   rewriter.finalizeRootUpdate(op);
436   filter.replaceLinalgTransformationFilter(rewriter, op);
437   return success();
438 }
439 
440 mlir::linalg::LinalgBaseVectorizationPattern::LinalgBaseVectorizationPattern(
441     MLIRContext *context, LinalgTransformationFilter filter,
442     PatternBenefit benefit)
443     : RewritePattern(MatchAnyOpTypeTag(), benefit, context), filter(filter) {}
444 
445 mlir::linalg::LinalgBaseVectorizationPattern::LinalgBaseVectorizationPattern(
446     StringRef opName, MLIRContext *context, LinalgTransformationFilter filter,
447     PatternBenefit benefit)
448     : RewritePattern(opName, benefit, context, {}), filter(filter) {}
449 
450 LogicalResult mlir::linalg::LinalgBaseVectorizationPattern::matchAndRewrite(
451     Operation *op, PatternRewriter &rewriter) const {
452   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
453   if (!linalgOp)
454     return failure();
455   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
456     return failure();
457   SmallVector<Value> newResults;
458   if (failed(vectorizeLinalgOp(rewriter, op, newResults)))
459     return failure();
460   if (!newResults.empty())
461     rewriter.replaceOp(op, newResults);
462   else
463     rewriter.eraseOp(op);
464   return success();
465 }
466 
467 LogicalResult mlir::linalg::applyStagedPatterns(
468     Operation *op, ArrayRef<FrozenRewritePatternSet> stage1Patterns,
469     const FrozenRewritePatternSet &stage2Patterns,
470     function_ref<LogicalResult(Operation *)> stage3Lambda) {
471   unsigned iteration = 0;
472   (void)iteration;
473   for (const auto &patterns : stage1Patterns) {
474     LLVM_DEBUG(DBGS() << "Before 1st stage, iter: " << ++iteration << "\n"
475                       << *op);
476     if (failed(applyPatternsAndFoldGreedily(op, patterns))) {
477       LLVM_DEBUG(DBGS() << "Underlying first stage rewrite did not converge");
478       return failure();
479     }
480     LLVM_DEBUG(DBGS() << "After 1st stage, iter: " << ++iteration << "\n"
481                       << *op);
482     if (failed(applyPatternsAndFoldGreedily(op, stage2Patterns))) {
483       LLVM_DEBUG(DBGS() << "Underlying 2nd stage rewrite did not converge");
484       return failure();
485     }
486     LLVM_DEBUG(DBGS() << "After 2nd stage, iter : " << iteration << "\n"
487                       << *op);
488     if (stage3Lambda) {
489       if (failed(stage3Lambda(op)))
490         return failure();
491       LLVM_DEBUG(DBGS() << "After 3rd stage, iter : " << iteration << "\n"
492                         << *op);
493     }
494   }
495   return success();
496 }
497 
498 /// Traverse the `dims` and substitute known min or max expressions returned by
499 /// the lambda |getMinMaxExpr|.
500 static AffineMap substitute(AffineMap map, SmallVectorImpl<Value> &dims,
501                             SmallVectorImpl<Value> &symbols,
502                             GetMinMaxExprFn getMinMaxExpr) {
503   auto exprs = llvm::to_vector<4>(map.getResults());
504   for (AffineExpr &expr : exprs) {
505     bool substituted = true;
506     while (substituted) {
507       substituted = false;
508       for (unsigned dimIdx = 0; dimIdx < dims.size(); ++dimIdx) {
509         Value dim = dims[dimIdx];
510         auto minMax = getMinMaxExpr(dim, dims, symbols);
511         if (!minMax)
512           continue;
513         AffineExpr dimExpr = getAffineDimExpr(dimIdx, expr.getContext());
514         LLVM_DEBUG(DBGS() << "Subst: " << dim << " @ " << dimExpr << "\n");
515         LLVM_DEBUG(DBGS() << "Before: " << expr << "\n");
516         // Substitute occurrences of `dimExpr` by either the min expression or
517         // the max expression depending on whether the value is used with a
518         // positive or negative  coefficient.
519         AffineExpr substitutedExpr =
520             substWithMin(expr, dimExpr, minMax->first, minMax->second);
521         LLVM_DEBUG(DBGS() << "After: " << substitutedExpr << "\n");
522         substituted = (substitutedExpr != expr);
523         expr = substitutedExpr;
524       }
525     }
526 
527     // Cleanup and simplify the results.
528     // This needs to happen outside of the loop iterating on dims.size() since
529     // it modifies dims.
530     SmallVector<Value, 4> operands(dims.begin(), dims.end());
531     operands.append(symbols.begin(), symbols.end());
532     auto map = AffineMap::get(dims.size(), symbols.size(), exprs,
533                               exprs.front().getContext());
534 
535     LLVM_DEBUG({
536       DBGS() << "Map to simplify: " << map << "\n";
537       DBGS() << "Operands:\n";
538       for (Value v : operands)
539         DBGS() << v << "\n";
540     });
541 
542     // Pull in affine.apply operations and compose them fully into the
543     // result.
544     fullyComposeAffineMapAndOperands(&map, &operands);
545     canonicalizeMapAndOperands(&map, &operands);
546     map = simplifyAffineMap(map);
547     // Assign the results.
548     exprs.assign(map.getResults().begin(), map.getResults().end());
549     dims.assign(operands.begin(), operands.begin() + map.getNumDims());
550     symbols.assign(operands.begin() + map.getNumDims(), operands.end());
551 
552     LLVM_DEBUG(DBGS() << "Map simplified: " << map << "\n");
553   }
554 
555   assert(!exprs.empty() && "Unexpected empty exprs");
556   return AffineMap::get(dims.size(), symbols.size(), exprs, map.getContext());
557 }
558 
559 /// Traverse the dims of the AffineMap of `affineMinOp` and substitute
560 /// dimensions with known range by new expressions involving the min or max
561 /// expression:
562 ///   - If the AffineDimExpr mapped to a known value has a positive sign, it
563 ///     is replaced by the min expression.
564 ///   - If the AffineDimExpr mapped to a known value has a negative sign, it is
565 ///     replaced by the max expression.
566 /// All known values are iteratively replaced.
567 /// This is used as an intermediate step in computing bounding boxes and
568 /// canonicalize AffineMinOps. All dim and symbol operands are assumed to have
569 /// positive values (positive orthant assumptions).
570 /// Return a new AffineMap, dims and symbols that have been canonicalized and
571 /// simplified.
572 AffineMapAndOperands
573 mlir::linalg::substituteMin(AffineMinOp affineMinOp,
574                             GetMinMaxExprFn getMinMaxExpr) {
575   AffineMapAndOperands res{affineMinOp.getAffineMap(),
576                            SmallVector<Value>(affineMinOp.getDimOperands()),
577                            SmallVector<Value>(affineMinOp.getSymbolOperands())};
578   res.map = substitute(affineMinOp.getAffineMap(), res.dims, res.symbols,
579                        getMinMaxExpr);
580   return res;
581 }
582 
583 LogicalResult AffineMinRangeCanonicalizationPattern::matchAndRewrite(
584     AffineMinOp minOp, PatternRewriter &rewriter) const {
585   LLVM_DEBUG(DBGS() << "Canonicalize AffineMinSCF: " << *minOp.getOperation()
586                     << "\n");
587 
588   auto affineMapAndOperands = substituteMin(minOp, getMinMaxFn);
589   AffineMap map = affineMapAndOperands.map;
590 
591   LLVM_DEBUG(DBGS() << "Resulting map: " << map << "\n");
592 
593   // Check whether any of the expressions, when subtracted from all other
594   // expressions, produces only >= 0 constants. If so, it is the min.
595   for (auto e : minOp.getAffineMap().getResults()) {
596     LLVM_DEBUG(DBGS() << "Candidate min: " << e << "\n");
597     if (!e.isSymbolicOrConstant())
598       continue;
599 
600     auto isNonPositive = [](AffineExpr e) {
601       if (auto cst = e.dyn_cast<AffineConstantExpr>())
602         return cst.getValue() < 0;
603       return true;
604     };
605 
606     // Build the subMap and check everything is statically known to be
607     // positive.
608     SmallVector<AffineExpr, 4> subExprs;
609     subExprs.reserve(map.getNumResults());
610     for (auto ee : map.getResults())
611       subExprs.push_back(ee - e);
612     MLIRContext *ctx = minOp.getContext();
613     AffineMap subMap = simplifyAffineMap(
614         AffineMap::get(map.getNumDims(), map.getNumSymbols(), subExprs, ctx));
615     LLVM_DEBUG(DBGS() << "simplified subMap: " << subMap << "\n");
616     if (llvm::any_of(subMap.getResults(), isNonPositive))
617       continue;
618 
619     // Static min found.
620     if (auto cst = e.dyn_cast<AffineConstantExpr>()) {
621       rewriter.replaceOpWithNewOp<ConstantIndexOp>(minOp, cst.getValue());
622     } else {
623       auto resultMap = AffineMap::get(0, map.getNumSymbols(), {e}, ctx);
624       SmallVector<Value> resultOperands = affineMapAndOperands.dims;
625       llvm::append_range(resultOperands, affineMapAndOperands.symbols);
626       canonicalizeMapAndOperands(&resultMap, &resultOperands);
627       resultMap = simplifyAffineMap(resultMap);
628       rewriter.replaceOpWithNewOp<AffineApplyOp>(minOp, resultMap,
629                                                  resultOperands);
630     }
631     return success();
632   }
633 
634   return failure();
635 }
636 
637 static SmallVector<StringRef> getNParallelLoopsAttrs(unsigned nParallelLoops) {
638   return SmallVector<StringRef>(nParallelLoops, getParallelIteratorTypeName());
639 }
640 
641 /// Rewrite a PadTensorOp into a sequence of InitTensorOp, FillOp (to initialize
642 /// with pad_val) and GenericOp (to copy contents).
643 LogicalResult PadTensorOpTransformationPattern::matchAndRewrite(
644     linalg::PadTensorOp padOp, PatternRewriter &rewriter) const {
645 
646   auto inputShapedType = padOp.source().getType().cast<ShapedType>();
647   auto resultShapedType = padOp.result().getType().cast<ShapedType>();
648 
649   // Bail on non-static shapes.
650   if (!inputShapedType.hasStaticShape())
651     return failure();
652   if (!resultShapedType.hasStaticShape())
653     return failure();
654 
655   // Only support padding with a constant for now, i.e. either:
656   //   1. A BBarg from a different block.
657   //   2. A value defined outside of the current block.
658   Block &block = padOp.region().front();
659   auto yieldOp = cast<YieldOp>(block.getTerminator());
660   assert(yieldOp.getNumOperands() == 1 && "expected single operand yield");
661   Value padValue = yieldOp.values().front();
662   Operation *definingOp = padValue.getDefiningOp();
663   if (definingOp && definingOp->getBlock() == &block)
664     return failure();
665   if (!definingOp && padValue.cast<BlockArgument>().getOwner() == &block)
666     return failure();
667 
668   // Create tensor with the padded shape
669   Location loc = padOp.getLoc();
670   SmallVector<Value> indices(resultShapedType.getRank(),
671                              rewriter.create<ConstantIndexOp>(loc, 0));
672   Value initTensor = rewriter.create<InitTensorOp>(
673       loc, resultShapedType.getShape(), resultShapedType.getElementType());
674 
675   // Initialize tensor with the pad value
676   Value tmpTensor =
677       rewriter.create<linalg::FillOp>(loc, padValue, initTensor).result();
678 
679   // Copy original contents into new tensor
680   // Uses linalg.generic, but could be done with tensor.insert_slice
681   SmallVector<AffineExpr, 4> outputExprs;
682   for (unsigned i = 0; i < resultShapedType.getRank(); ++i) {
683     outputExprs.push_back(getAffineDimExpr(i, rewriter.getContext()) +
684                           padOp.static_low()[i].cast<IntegerAttr>().getInt());
685   }
686 
687   SmallVector<AffineMap, 2> transferMaps = {
688       rewriter.getMultiDimIdentityMap(inputShapedType.getRank()),
689       AffineMap::get(resultShapedType.getRank(),
690                      /*symbolCount=*/0, outputExprs, rewriter.getContext())};
691 
692   rewriter.replaceOpWithNewOp<linalg::GenericOp>(
693       padOp, resultShapedType, padOp.source(), tmpTensor, transferMaps,
694       getNParallelLoopsAttrs(resultShapedType.getRank()),
695       [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
696         nestedBuilder.create<linalg::YieldOp>(nestedLoc, args[0]);
697       });
698 
699   return success();
700 }
701 
702 /// Given an OpFoldResult, return a Value. If the OpFoldResult is an Attribute,
703 /// it must be of type Integer.
704 static Value asValue(OpBuilder &builder, Location loc, OpFoldResult ofr) {
705   if (auto val = ofr.dyn_cast<Value>())
706     return val;
707   auto intVal = getConstantIntValue(ofr);
708   assert(intVal && "expected Value or IntegerAttr");
709   return builder.create<ConstantIndexOp>(loc, *intVal);
710 }
711 
712 /// Given a value, try to extract a constant index-type integer as an Attribute.
713 /// If this fails, return the original value.
714 static OpFoldResult asOpFoldResult(OpBuilder &builder, Value val) {
715   if (auto constInt = getConstantIntValue(val))
716     return builder.getIndexAttr(*constInt);
717   return val;
718 }
719 
720 LogicalResult ExtractSliceOfPadTensorSwapPattern::matchAndRewrite(
721     tensor::ExtractSliceOp sliceOp, PatternRewriter &rewriter) const {
722   auto padOp = sliceOp.source().getDefiningOp<PadTensorOp>();
723   if (!padOp)
724     return failure();
725   // Only unit stride supported.
726   if (!sliceOp.hasUnitStride())
727     return failure();
728   // Only constant padding value supported.
729   Value padValue = padOp.getConstantPaddingValue();
730   if (!padValue)
731     return failure();
732 
733   // Helper variables and functions for various arithmetic operations. These are
734   // used extensively for computing new offset/length and padding values.
735   Location loc = sliceOp.getLoc();
736   AffineExpr dim0, dim1;
737   bindDims(rewriter.getContext(), dim0, dim1);
738   // Add two integers.
739   auto addMap = AffineMap::get(2, 0, {dim0 + dim1});
740   auto add = [&](Value v1, Value v2) {
741     return rewriter.createOrFold<AffineApplyOp>(loc, addMap,
742                                                 ValueRange{v1, v2});
743   };
744   // Subtract two integers.
745   auto subMap = AffineMap::get(2, 0, {dim0 - dim1});
746   auto sub = [&](Value v1, Value v2) {
747     return rewriter.createOrFold<AffineApplyOp>(loc, subMap,
748                                                 ValueRange{v1, v2});
749   };
750   // Take the minimum of two integers.
751   auto idMap = AffineMap::getMultiDimIdentityMap(2, rewriter.getContext());
752   auto min = [&](Value v1, Value v2) {
753     return rewriter.createOrFold<AffineMinOp>(loc, idMap, ValueRange{v1, v2});
754   };
755   // Take the maximum of two integers.
756   auto max = [&](Value v1, Value v2) {
757     return rewriter.createOrFold<AffineMaxOp>(loc, idMap, ValueRange{v1, v2});
758   };
759   // Zero index-typed integer.
760   auto zero = rewriter.create<ConstantIndexOp>(loc, 0);
761 
762   // Helper function for filling static/dynamic low/high padding indices vectors
763   // of PadTensorOp.
764   auto appendIndex = [&](Value val, SmallVector<Value> &dynIndices,
765                          SmallVector<int64_t> &staticIndices) {
766     if (auto constInt = getConstantIntValue(val)) {
767       staticIndices.push_back(*constInt);
768     } else {
769       staticIndices.push_back(ShapedType::kDynamicSize);
770       dynIndices.push_back(val);
771     }
772   };
773 
774   // Compute new offsets, lengths, low padding, high padding.
775   SmallVector<OpFoldResult> newOffsets, newLengths, newStrides;
776   SmallVector<Value> newLows, newHighs;
777   SmallVector<int64_t> staticNewLows, staticNewHighs;
778   // Set to true if the original data source is not read at all.
779   bool hasZeroLen = false;
780   // Same as hasZeroLen, but for dynamic dimension sizes. This condition
781   // is true if the original data source turns out to be unused at runtime.
782   Value dynHasZeroLenCond;
783 
784   int64_t rank = padOp.getSourceType().getRank();
785   for (unsigned dim = 0; dim < rank; ++dim) {
786     auto low = asValue(rewriter, loc, padOp.getMixedLowPad()[dim]);
787     auto offset = asValue(rewriter, loc, sliceOp.getMixedOffsets()[dim]);
788     auto length = asValue(rewriter, loc, sliceOp.getMixedSizes()[dim]);
789     auto srcSize = rewriter.createOrFold<memref::DimOp>(
790         loc, padOp.source(), dim);
791 
792     // The new amount of low padding is `low - offset`. Except for the case
793     // where none of the low padding is read. In that case, the new amount of
794     // low padding is zero.
795     Value newLow = max(zero, sub(low, offset));
796     appendIndex(newLow, newLows, staticNewLows);
797 
798     // Start reading the data from position `offset - low`. Since the original
799     // read may have started in the low padding zone, this value could be
800     // negative. Therefore, start reading from:
801     //
802     // max(offset - low, 0)
803     //
804     // The original read could also have started in the high padding zone.
805     // In that case, set the offset to the end of source tensor. The new
806     // ExtractSliceOp length will be zero in that case. (Effectively reading no
807     // data from the source.)
808     Value newOffset = min(max(sub(offset, low), zero), srcSize);
809     newOffsets.push_back(asOpFoldResult(rewriter, newOffset));
810 
811     // The original ExtractSliceOp was reading until position `offset + length`.
812     // Therefore, the corresponding position within the source tensor is:
813     //
814     // offset + length - low
815     //
816     // In case the original ExtractSliceOp stopped reading within the low
817     // padding zone, this value can be negative. In that case, the end position
818     // of the read should be zero. (Similar to newOffset.)
819     //
820     // The original read could also have stopped in the high padding zone.
821     // In that case, set the end positition of the read should be the end of the
822     // source tensor. (Similar to newOffset.)
823     //
824     // endLoc = min(max(offset - low + length, 0), srcSize)
825     //
826     // The new ExtractSliceOp length is `endLoc - newOffset`.
827     Value endLoc = min(max(add(sub(offset, low), length), zero), srcSize);
828     Value newLength = sub(endLoc, newOffset);
829     newLengths.push_back(asOpFoldResult(rewriter, newLength));
830 
831     // Check if newLength is zero. In that case, no SubTensorOp should be
832     // executed.
833     if (auto newLengthInt = getConstantIntValue(newLength)) {
834       hasZeroLen |= *newLengthInt == 0;
835     } else {
836       Value check = rewriter.create<CmpIOp>(
837           loc, CmpIPredicate::eq, newLength, zero);
838       dynHasZeroLenCond =
839           dynHasZeroLenCond
840               ? rewriter.create<OrOp>(loc, check, dynHasZeroLenCond)
841               : check;
842     }
843 
844     // The amount of high padding is simply the number of elements remaining,
845     // so that the result has the same length as the original ExtractSliceOp.
846     Value newHigh = sub(sub(length, newLength), newLow);
847     appendIndex(newHigh, newHighs, staticNewHighs);
848 
849     // Only unit stride supported.
850     newStrides.push_back(rewriter.getIndexAttr(1));
851   }
852 
853   // Insert cast to ensure that types match. (May be folded away.)
854   auto castResult = [&](Value val) -> Value {
855     auto castOp = rewriter.create<tensor::CastOp>(loc, sliceOp.getType(), val);
856     return castOp;
857   };
858 
859   // In cases where the original data source is unused: Emit a GenerateOp and
860   // do not generate a SliceOp. (The result shape of the SliceOp would
861   // have a dimension of size 0, the semantics of which is unclear.)
862   auto createGenerateOp = [&]() {
863     // The shape of the GenerateOp is the same as the existing SliceOp.
864     RankedTensorType type = sliceOp.getType();
865     SmallVector<Value> dynDims;
866     for (unsigned i = 0; i < type.getRank(); ++i) {
867       if (type.isDynamicDim(i))
868         dynDims.push_back(asValue(rewriter, loc, sliceOp.getMixedOffsets()[i]));
869     }
870 
871     // Create GenerateOp.
872     auto generateOp  = rewriter.create<tensor::GenerateOp>(loc, type, dynDims);
873 
874     // Copy region to new op.
875     BlockAndValueMapping bvm;
876     padOp.region().cloneInto(&generateOp.getRegion(), bvm);
877     // Rewrite linalg::YieldOp to tensor::YieldOp.
878     {
879       OpBuilder::InsertionGuard guard(rewriter);
880       auto yieldOp = dyn_cast<linalg::YieldOp>(
881           generateOp.getRegion().front().getTerminator());
882       assert(yieldOp && "malformed PadTensorOp: expected YieldOp terminator");
883       assert(yieldOp.values().size() == 1);
884       rewriter.setInsertionPoint(yieldOp);
885       rewriter.replaceOpWithNewOp<tensor::YieldOp>(
886           yieldOp, yieldOp.values()[0]);
887     }
888 
889     return castResult(generateOp);
890   };
891 
892   // Emit a SliceOp and a PadTensorOp. Should not be used in cases where
893   // the result shape of the new SliceOp has a zero dimension.
894   auto createPadTensorOfSubTensor = [&]() {
895     // Create pad_tensor(subtensor(x)).
896     auto newSliceOp = rewriter.create<tensor::ExtractSliceOp>(
897         loc, padOp.source(), newOffsets, newLengths, newStrides);
898     auto newPadTensorOp = rewriter.create<PadTensorOp>(
899         loc, newSliceOp, staticNewLows, staticNewHighs, newLows, newHighs);
900 
901     // Copy region to new PadTensorOp.
902     BlockAndValueMapping bvm;
903     padOp.region().cloneInto(&newPadTensorOp.getRegion(), bvm);
904 
905     // Cast result and return.
906     return castResult(newPadTensorOp);
907   };
908 
909   // Rewrite subtensor(pad_tensor(x)) into a GenerateOp it is statically known
910   // that the original data source x is not used.
911   if (hasZeroLen) {
912     rewriter.replaceOp(sliceOp, createGenerateOp());
913     return success();
914   }
915 
916   // If there are dynamic dimensions: Generate an scf.if check to avoid creating
917   // SliceOps with result dimensions of size 0 at runtime.
918   if (dynHasZeroLenCond) {
919     auto result = rewriter.create<scf::IfOp>(
920         loc, sliceOp.getType(), dynHasZeroLenCond,
921         /*thenBuilder=*/
922         [&](OpBuilder &b, Location loc) {
923           b.create<scf::YieldOp>(loc, createGenerateOp());
924         },
925         /*elseBuilder=*/
926         [&](OpBuilder &b, Location loc) {
927           b.create<scf::YieldOp>(loc, createPadTensorOfSubTensor());
928         });
929     rewriter.replaceOp(sliceOp, result.getResult(0));
930     return success();
931   }
932 
933   // All shapes are static and the data source is actually used. Rewrite into
934   // pad_tensor(subtensor(x)).
935   rewriter.replaceOp(sliceOp, createPadTensorOfSubTensor());
936   return success();
937 }
938