14a661602SNicolas Vasilache //===- Transforms.cpp - Linalg transformations as patterns ----------------===//
2307cfdf5SNicolas Vasilache //
3307cfdf5SNicolas Vasilache // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4307cfdf5SNicolas Vasilache // See https://llvm.org/LICENSE.txt for license information.
5307cfdf5SNicolas Vasilache // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6307cfdf5SNicolas Vasilache //
7307cfdf5SNicolas Vasilache //===----------------------------------------------------------------------===//
8307cfdf5SNicolas Vasilache //
9307cfdf5SNicolas Vasilache // This file implements logic and helpers to expose Linalg transforms as rewrite
10307cfdf5SNicolas Vasilache // patterns.
11307cfdf5SNicolas Vasilache //
12307cfdf5SNicolas Vasilache //===----------------------------------------------------------------------===//
13307cfdf5SNicolas Vasilache 
14307cfdf5SNicolas Vasilache #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
15eda6f907SRiver Riddle #include "mlir/Dialect/Affine/IR/AffineOps.h"
16a54f4eaeSMogball #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
1736550692SRiver Riddle #include "mlir/Dialect/Func/IR/FuncOps.h"
18307cfdf5SNicolas Vasilache #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
19b7f2c108Sgysit #include "mlir/Dialect/Linalg/IR/Linalg.h"
20d0ec4a8eSTobias Gysi #include "mlir/Dialect/Linalg/Transforms/HoistPadding.h"
21307cfdf5SNicolas Vasilache #include "mlir/Dialect/Linalg/Utils/Utils.h"
228b68da2cSAlex Zinenko #include "mlir/Dialect/SCF/Transforms/Transforms.h"
23060208b4SMatthias Springer #include "mlir/Dialect/Tensor/IR/Tensor.h"
240edb4127SLei Zhang #include "mlir/Dialect/Tensor/IR/TensorTilingInterfaceImpl.h"
25d624c1b5SMatthias Springer #include "mlir/Dialect/Utils/StaticValueUtils.h"
26307cfdf5SNicolas Vasilache #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
2799ef9eebSMatthias Springer #include "mlir/Dialect/Vector/IR/VectorOps.h"
28307cfdf5SNicolas Vasilache #include "mlir/IR/AffineExpr.h"
29307cfdf5SNicolas Vasilache #include "mlir/IR/Matchers.h"
30307cfdf5SNicolas Vasilache #include "mlir/Pass/Pass.h"
31307cfdf5SNicolas Vasilache #include "mlir/Support/LLVM.h"
32b6eb26fdSRiver Riddle #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
333747eb9cSNicolas Vasilache #include "llvm/ADT/ScopeExit.h"
348faf35c0SMatthias Springer #include "llvm/ADT/TypeSwitch.h"
35307cfdf5SNicolas Vasilache #include "llvm/Support/Debug.h"
36307cfdf5SNicolas Vasilache #include "llvm/Support/raw_ostream.h"
37307cfdf5SNicolas Vasilache #include <type_traits>
381fc096afSMehdi Amini #include <utility>
39307cfdf5SNicolas Vasilache 
40307cfdf5SNicolas Vasilache #define DEBUG_TYPE "linalg-transforms"
41307cfdf5SNicolas Vasilache 
42307cfdf5SNicolas Vasilache using namespace mlir;
43307cfdf5SNicolas Vasilache using namespace mlir::linalg;
44307cfdf5SNicolas Vasilache 
4556ce65e2SNicolas Vasilache #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE << "]: ")
463110e7b0SNicolas Vasilache 
47307cfdf5SNicolas Vasilache //===----------------------------------------------------------------------===//
48307cfdf5SNicolas Vasilache // Transformations exposed as rewrite patterns.
49307cfdf5SNicolas Vasilache //===----------------------------------------------------------------------===//
50307cfdf5SNicolas Vasilache // Marker used as attribute name in generated Linalg rewriting transformations.
51307cfdf5SNicolas Vasilache const StringLiteral mlir::linalg::LinalgTransforms::kLinalgTransformMarker =
52307cfdf5SNicolas Vasilache     "__internal_linalg_transform__";
53307cfdf5SNicolas Vasilache 
LinalgTransformationFilter(ArrayRef<StringAttr> matchDisjunction,Optional<StringAttr> replacement)54299cc5daSNicolas Vasilache mlir::linalg::LinalgTransformationFilter::LinalgTransformationFilter(
55195730a6SRiver Riddle     ArrayRef<StringAttr> matchDisjunction, Optional<StringAttr> replacement)
56e4a503a2SNicolas Vasilache     : matchDisjunction(matchDisjunction.begin(), matchDisjunction.end()),
573ecc2a63SMaheshRavishankar       replacement(replacement), matchByDefault(false) {}
58299cc5daSNicolas Vasilache 
LinalgTransformationFilter(const FilterFunction & f,ArrayRef<StringAttr> matchDisjunction,Optional<StringAttr> replacement)59299cc5daSNicolas Vasilache mlir::linalg::LinalgTransformationFilter::LinalgTransformationFilter(
601fc096afSMehdi Amini     const FilterFunction &f, ArrayRef<StringAttr> matchDisjunction,
61195730a6SRiver Riddle     Optional<StringAttr> replacement)
62e4a503a2SNicolas Vasilache     : filters(),
63299cc5daSNicolas Vasilache       matchDisjunction(matchDisjunction.begin(), matchDisjunction.end()),
643ecc2a63SMaheshRavishankar       replacement(replacement), matchByDefault(false) {
65e4a503a2SNicolas Vasilache   if (f)
66e4a503a2SNicolas Vasilache     filters.push_back(f);
67e4a503a2SNicolas Vasilache }
68307cfdf5SNicolas Vasilache 
checkAndNotify(PatternRewriter & rewriter,Operation * op) const69299cc5daSNicolas Vasilache LogicalResult mlir::linalg::LinalgTransformationFilter::checkAndNotify(
70299cc5daSNicolas Vasilache     PatternRewriter &rewriter, Operation *op) const {
71e4a503a2SNicolas Vasilache   if (llvm::any_of(filters,
72e4a503a2SNicolas Vasilache                    [&](const FilterFunction &f) { return failed(f(op)); }))
73299cc5daSNicolas Vasilache     return failure();
74299cc5daSNicolas Vasilache 
75307cfdf5SNicolas Vasilache   auto attr = op->template getAttrOfType<StringAttr>(
76307cfdf5SNicolas Vasilache       LinalgTransforms::kLinalgTransformMarker);
77307cfdf5SNicolas Vasilache 
78307cfdf5SNicolas Vasilache   if (!attr) {
79e4a503a2SNicolas Vasilache     // 1. Has no filter case and matchDisjunction is empty.
803ecc2a63SMaheshRavishankar     if (matchDisjunction.empty() || matchByDefault)
81307cfdf5SNicolas Vasilache       return success();
82307cfdf5SNicolas Vasilache 
83e4a503a2SNicolas Vasilache     // 2. Has no filter but was expecting a filter.
84307cfdf5SNicolas Vasilache     return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
85e4a503a2SNicolas Vasilache       diag << " does not have any filter from list: ";
8691beb517SNicolas Vasilache       interleaveComma(matchDisjunction, diag);
87307cfdf5SNicolas Vasilache     });
88307cfdf5SNicolas Vasilache   }
89307cfdf5SNicolas Vasilache 
90e4a503a2SNicolas Vasilache   // 4. Match explicit filter.
91e4a503a2SNicolas Vasilache   for (auto filter : matchDisjunction)
92e4a503a2SNicolas Vasilache     if (attr.getValue() == filter)
93307cfdf5SNicolas Vasilache       return success();
94307cfdf5SNicolas Vasilache 
95307cfdf5SNicolas Vasilache   // 5. Fail to match.
96307cfdf5SNicolas Vasilache   return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
97e4a503a2SNicolas Vasilache     diag << " does not have any filter from list: ";
9891beb517SNicolas Vasilache     interleaveComma(matchDisjunction, diag);
99307cfdf5SNicolas Vasilache   });
100307cfdf5SNicolas Vasilache }
101307cfdf5SNicolas Vasilache 
102299cc5daSNicolas Vasilache void mlir::linalg::LinalgTransformationFilter::
replaceLinalgTransformationFilter(PatternRewriter & rewriter,Operation * op) const103299cc5daSNicolas Vasilache     replaceLinalgTransformationFilter(PatternRewriter &rewriter,
104307cfdf5SNicolas Vasilache                                       Operation *op) const {
105491d2701SKazu Hirata   if (replacement.has_value())
106c27d8152SKazu Hirata     op->setAttr(LinalgTransforms::kLinalgTransformMarker, replacement.value());
107307cfdf5SNicolas Vasilache   else
108195730a6SRiver Riddle     op->removeAttr(
109195730a6SRiver Riddle         rewriter.getStringAttr(LinalgTransforms::kLinalgTransformMarker));
110307cfdf5SNicolas Vasilache }
111307cfdf5SNicolas Vasilache 
hasReplacementFilter(Operation * op) const112d26beb0bSMaheshRavishankar bool mlir::linalg::LinalgTransformationFilter::hasReplacementFilter(
113d26beb0bSMaheshRavishankar     Operation *op) const {
114d26beb0bSMaheshRavishankar   if (!replacement)
115d26beb0bSMaheshRavishankar     return false;
116d26beb0bSMaheshRavishankar   auto attr = op->getAttr(LinalgTransforms::kLinalgTransformMarker)
117d26beb0bSMaheshRavishankar                   .dyn_cast<StringAttr>();
1186d5fc1e3SKazu Hirata   return attr && attr == *replacement;
119d26beb0bSMaheshRavishankar }
120d26beb0bSMaheshRavishankar 
121004a3d4fSNicolas Vasilache LinalgTilingOptions &
setTileSizes(ArrayRef<int64_t> ts)122004a3d4fSNicolas Vasilache mlir::linalg::LinalgTilingOptions::setTileSizes(ArrayRef<int64_t> ts) {
123fb1def9cSMatthias Springer   assert(!tileSizeComputationFunction && "tile sizes already set");
124004a3d4fSNicolas Vasilache   SmallVector<int64_t, 4> tileSizes(ts.begin(), ts.end());
125004a3d4fSNicolas Vasilache   tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
126004a3d4fSNicolas Vasilache     OpBuilder::InsertionGuard guard(b);
127004a3d4fSNicolas Vasilache     b.setInsertionPointToStart(
12858ceae95SRiver Riddle         &op->getParentOfType<func::FuncOp>().getBody().front());
12991beb517SNicolas Vasilache     return llvm::to_vector<4>(map_range(tileSizes, [&](int64_t s) {
130a54f4eaeSMogball       Value v = b.create<arith::ConstantIndexOp>(op->getLoc(), s);
131004a3d4fSNicolas Vasilache       return v;
132004a3d4fSNicolas Vasilache     }));
133004a3d4fSNicolas Vasilache   };
134004a3d4fSNicolas Vasilache   return *this;
135e9ac7927SAlexander Belyaev }
136004a3d4fSNicolas Vasilache 
scalarizeDynamicDims()137fb1def9cSMatthias Springer LinalgTilingOptions &mlir::linalg::LinalgTilingOptions::scalarizeDynamicDims() {
138fb1def9cSMatthias Springer   assert(!tileSizeComputationFunction && "tile sizes already set");
139fb1def9cSMatthias Springer   tileSizeComputationFunction = [](OpBuilder &b, Operation *op) {
140fb1def9cSMatthias Springer     SmallVector<Value, 4> tileSizes;
141fb1def9cSMatthias Springer     auto linalgOp = dyn_cast<LinalgOp>(op);
142fb1def9cSMatthias Springer     if (!linalgOp)
143fb1def9cSMatthias Springer       return tileSizes;
144fb1def9cSMatthias Springer     Location loc = linalgOp.getLoc();
145fb1def9cSMatthias Springer     auto allShapeSizes = linalgOp.createFlatListOfOperandDims(b, loc);
146fb1def9cSMatthias Springer     AffineMap map = linalgOp.getShapesToLoopsMap();
147fb1def9cSMatthias Springer     if (!map)
148fb1def9cSMatthias Springer       return tileSizes;
149fb1def9cSMatthias Springer     auto shapeSizes = applyMapToValues(b, loc, map, allShapeSizes);
150fb1def9cSMatthias Springer     // If the shape size is dynamic, tile by 1. Otherwise, do not tile (tile
151fb1def9cSMatthias Springer     // size 0).
152fb1def9cSMatthias Springer     for (Value shapeSize : shapeSizes)
1530916d96dSKazu Hirata       tileSizes.push_back(getConstantIntValue(shapeSize)
154a54f4eaeSMogball                               ? b.create<arith::ConstantIndexOp>(loc, 0)
155a54f4eaeSMogball                               : b.create<arith::ConstantIndexOp>(loc, 1));
156fb1def9cSMatthias Springer     return tileSizes;
157fb1def9cSMatthias Springer   };
158fb1def9cSMatthias Springer   return *this;
159fb1def9cSMatthias Springer }
160fb1def9cSMatthias Springer 
161d26c42afSgysit /// Pad the `opOperand` in the `paddingDimensions` using the padding value and
162d26c42afSgysit /// the nofold flag found in `paddingValues` and `packPaddings`, respectively.
163d26c42afSgysit /// Exit early and return the `opOperand` value if the shape dimensions that
164d26c42afSgysit /// match `paddingDimensions` have a static size and the nofold flag is not set.
165d26c42afSgysit /// Otherwise, try to pad the shape dimensions that match the iterator
166d26c42afSgysit /// dimensions `paddingDimensions` and return the tensor::PadOp result if
167d26c42afSgysit /// padding succeeds or failure otherwise.
padOperandToSmallestStaticBoundingBox(OpBuilder & b,linalg::LinalgOp opToPad,OpOperand * opOperand,ArrayRef<int64_t> paddingDimensions,ArrayRef<Attribute> paddingValues,ArrayRef<bool> packPaddings)168d26c42afSgysit static FailureOr<Value> padOperandToSmallestStaticBoundingBox(
1691eae247aSTobias Gysi     OpBuilder &b, linalg::LinalgOp opToPad, OpOperand *opOperand,
170d26c42afSgysit     ArrayRef<int64_t> paddingDimensions, ArrayRef<Attribute> paddingValues,
171d26c42afSgysit     ArrayRef<bool> packPaddings) {
172d26c42afSgysit   AffineMap indexingMap = opToPad.getTiedIndexingMap(opOperand);
173a4fd8cb7STobias Gysi   ArrayRef<int64_t> shape = opToPad.getShape(opOperand);
174a4fd8cb7STobias Gysi 
175d26c42afSgysit   // Collect the shape dimension that are a function of the `paddingDimensions`.
176d26c42afSgysit   llvm::SmallDenseSet<int64_t> shapeDimsToPad;
177d26c42afSgysit   for (int64_t dim : paddingDimensions)
178d26c42afSgysit     for (const auto &en : enumerate(indexingMap.getResults()))
179d26c42afSgysit       if (en.value().isFunctionOfDim(dim))
180d26c42afSgysit         shapeDimsToPad.insert(en.index());
181a4fd8cb7STobias Gysi 
182d26c42afSgysit   // Return the unpadded operand if padding to a static shape is not needed and
183d26c42afSgysit   // if the nofold flag is not set.
184d26c42afSgysit   bool nofold = opOperand->getOperandNumber() < packPaddings.size()
185d26c42afSgysit                     ? packPaddings[opOperand->getOperandNumber()]
186d26c42afSgysit                     : false;
187d26c42afSgysit   bool hasStaticShape = llvm::none_of(shapeDimsToPad, [&](int64_t dim) {
188d26c42afSgysit     return ShapedType::isDynamic(shape[dim]);
189d26c42afSgysit   });
190d26c42afSgysit   if (!nofold && hasStaticShape)
191d26c42afSgysit     return opOperand->get();
192d26c42afSgysit 
193d26c42afSgysit   // Fail if `paddingValues` specifies no padding value.
19458d0da88Sgysit   if (opOperand->getOperandNumber() >= paddingValues.size())
195d26c42afSgysit     return failure();
19658d0da88Sgysit   Attribute paddingAttr = paddingValues[opOperand->getOperandNumber()];
19758d0da88Sgysit   Value paddingValue = b.create<arith::ConstantOp>(
19858d0da88Sgysit       opToPad.getLoc(), paddingAttr.getType(), paddingAttr);
199a4fd8cb7STobias Gysi 
200b1b57f81Sgysit   // Follow the use-def chain if `currOpOperand` is defined by a LinalgOp.
201b1b57f81Sgysit   OpOperand *currOpOperand = opOperand;
202b1b57f81Sgysit   while (auto linalgOp = currOpOperand->get().getDefiningOp<LinalgOp>()) {
203b1b57f81Sgysit     OpResult result = currOpOperand->get().cast<OpResult>();
204b1b57f81Sgysit     currOpOperand = linalgOp.getOutputOperand(result.getResultNumber());
205b1b57f81Sgysit   }
206b1b57f81Sgysit 
207d26c42afSgysit   // Fail if `currOpOperand` is not defined by an ExtractSliceOp.
208b1b57f81Sgysit   auto sliceOp = currOpOperand->get().getDefiningOp<tensor::ExtractSliceOp>();
209060208b4SMatthias Springer   if (!sliceOp)
210d26c42afSgysit     return failure();
211a4fd8cb7STobias Gysi 
212f895e951Sgysit   // Compute the dropped dimensions if `sliceOp` is ranke-reducing.
2136635c12aSBenjamin Kramer   llvm::SmallBitVector droppedDims = sliceOp.getDroppedDims();
214d26c42afSgysit   OffsetSizeAndStrideOpInterface shapedOp = sliceOp;
215f895e951Sgysit 
216a4fd8cb7STobias Gysi   // Upper bound the `sliceOp` sizes to obtain a static bounding box.
217d26c42afSgysit   SmallVector<int64_t> paddedShape(shape.begin(), shape.end());
218d26c42afSgysit   int64_t shapeIdx = 0;
219e4853be2SMehdi Amini   for (const auto &en : enumerate(shapedOp.getMixedSizes())) {
220f895e951Sgysit     // Skip dropped dimensions.
2216635c12aSBenjamin Kramer     if (droppedDims.test(en.index()))
222f895e951Sgysit       continue;
223d26c42afSgysit     // Skip dimensions that do not require padding.
224d26c42afSgysit     if (!shapeDimsToPad.contains(shapeIdx)) {
225d26c42afSgysit       shapeIdx++;
226d26c42afSgysit       continue;
227d26c42afSgysit     }
228d26c42afSgysit     // If the size is an attribute add it directly to `paddedShape`.
229f895e951Sgysit     if (en.value().is<Attribute>()) {
230d26c42afSgysit       paddedShape[shapeIdx++] =
231d26c42afSgysit           en.value().get<Attribute>().dyn_cast<IntegerAttr>().getInt();
232ea53a693STobias Gysi       continue;
233ea53a693STobias Gysi     }
234ea53a693STobias Gysi     // Otherwise, try to compute a constant upper bound for the size value.
235ea53a693STobias Gysi     FailureOr<int64_t> upperBound =
236f895e951Sgysit         getConstantUpperBoundForIndex(en.value().get<Value>());
237ea53a693STobias Gysi     if (failed(upperBound)) {
2381eae247aSTobias Gysi       LLVM_DEBUG(DBGS() << "No constant bounding box can be found for padding");
2391eae247aSTobias Gysi       return failure();
2401eae247aSTobias Gysi     }
2416d5fc1e3SKazu Hirata     paddedShape[shapeIdx++] = *upperBound;
2423747eb9cSNicolas Vasilache   }
243d26c42afSgysit   assert(shapeIdx == static_cast<int64_t>(shape.size()) &&
244f895e951Sgysit          "expect the dynamic and static ranks to match");
245a4fd8cb7STobias Gysi 
246d26c42afSgysit   // Pad the operand to the bounding box defined by `paddedShape`.
247d26c42afSgysit   auto paddedTensorType = RankedTensorType::get(
248d26c42afSgysit       paddedShape, getElementTypeOrSelf(opOperand->get()));
249d26c42afSgysit   return makeComposedPadHighOp(b, opToPad->getLoc(), paddedTensorType,
25058d0da88Sgysit                                opOperand->get(), paddingValue, nofold);
2513747eb9cSNicolas Vasilache }
2523747eb9cSNicolas Vasilache 
2531eae247aSTobias Gysi FailureOr<SmallVector<Value>>
rewriteAsPaddedOp(OpBuilder & b,LinalgOp opToPad,ArrayRef<int64_t> paddingDimensions,ArrayRef<Attribute> paddingValues,ArrayRef<bool> packPaddings,LinalgOp & paddedOp)2541eae247aSTobias Gysi linalg::rewriteAsPaddedOp(OpBuilder &b, LinalgOp opToPad,
255d26c42afSgysit                           ArrayRef<int64_t> paddingDimensions,
25658d0da88Sgysit                           ArrayRef<Attribute> paddingValues,
25758d0da88Sgysit                           ArrayRef<bool> packPaddings, LinalgOp &paddedOp) {
2583747eb9cSNicolas Vasilache   Location loc = opToPad->getLoc();
2593747eb9cSNicolas Vasilache 
2603747eb9cSNicolas Vasilache   // TODO: there are cases where we may still want to pad to larger sizes.
2619f815cb5STobias Gysi   assert(opToPad.hasTensorSemantics() &&
2629f815cb5STobias Gysi          "expected operation to have tensor semantics");
2633747eb9cSNicolas Vasilache 
2641eae247aSTobias Gysi   OpBuilder::InsertionGuard g(b);
2653747eb9cSNicolas Vasilache   // Set IP after op because we also take the dims of the original output.
2661eae247aSTobias Gysi   b.setInsertionPointAfter(opToPad);
2673747eb9cSNicolas Vasilache   // Make a copy of the shaped operands and update it.
268d57a305fSNicolas Vasilache   SmallVector<Value> newOperands;
2699f815cb5STobias Gysi   newOperands.reserve(opToPad.getNumInputsAndOutputs());
2709f815cb5STobias Gysi   for (OpOperand *opOperand : opToPad.getInputAndOutputOperands()) {
271d26c42afSgysit     FailureOr<Value> paddedOperand = padOperandToSmallestStaticBoundingBox(
272d26c42afSgysit         b, opToPad, opOperand, paddingDimensions, paddingValues, packPaddings);
273d26c42afSgysit     // Exit if `paddingDimensions` cannot be bounded statically.
274d26c42afSgysit     if (failed(paddedOperand))
2753747eb9cSNicolas Vasilache       return failure();
276d26c42afSgysit     newOperands.push_back(*paddedOperand);
2773747eb9cSNicolas Vasilache   }
2783747eb9cSNicolas Vasilache 
279b01d223fSNicolas Vasilache   SmallVector<SmallVector<Value>> reifiedResultShapes;
280b01d223fSNicolas Vasilache   if (failed(cast<ReifyRankedShapedTypeOpInterface>(opToPad.getOperation())
2811eae247aSTobias Gysi                  .reifyResultShapes(b, reifiedResultShapes)))
282b01d223fSNicolas Vasilache     return failure();
283b01d223fSNicolas Vasilache   assert(reifiedResultShapes.size() == opToPad->getNumResults() &&
284b01d223fSNicolas Vasilache          "expected same number of results");
285b01d223fSNicolas Vasilache 
2863747eb9cSNicolas Vasilache   // Clone `opToPad` to operate on the statically padded shapes.
2873747eb9cSNicolas Vasilache   auto resultTensorTypes =
288d57a305fSNicolas Vasilache       ValueRange(newOperands).take_back(opToPad.getNumOutputs()).getTypes();
2891eae247aSTobias Gysi   paddedOp = opToPad.clone(b, loc, resultTensorTypes, newOperands);
2903747eb9cSNicolas Vasilache 
291060208b4SMatthias Springer   // Recover the slice out of the new static results. This keeps the original
292060208b4SMatthias Springer   // linalg op around because it uses the dims of the original results.
2933747eb9cSNicolas Vasilache   SmallVector<Value> paddedSubviewResults;
2943747eb9cSNicolas Vasilache   paddedSubviewResults.reserve(opToPad->getNumResults());
295e4853be2SMehdi Amini   for (const auto &en : llvm::enumerate(paddedOp->getResults())) {
296b01d223fSNicolas Vasilache     Value paddedResult = en.value();
297b01d223fSNicolas Vasilache     int64_t resultNumber = en.index();
298b01d223fSNicolas Vasilache     int64_t rank = paddedResult.getType().cast<RankedTensorType>().getRank();
2991eae247aSTobias Gysi     SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
300b01d223fSNicolas Vasilache     SmallVector<OpFoldResult> sizes;
301b01d223fSNicolas Vasilache     for (Value v : reifiedResultShapes[resultNumber])
302247a1a55STobias Gysi       sizes.push_back(getAsOpFoldResult(v));
3031eae247aSTobias Gysi     SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
3041eae247aSTobias Gysi     paddedSubviewResults.push_back(b.create<tensor::ExtractSliceOp>(
305b01d223fSNicolas Vasilache         loc, paddedResult, offsets, sizes, strides));
3063747eb9cSNicolas Vasilache   }
3071eae247aSTobias Gysi   return paddedSubviewResults;
3083747eb9cSNicolas Vasilache }
3093747eb9cSNicolas Vasilache 
3108faf35c0SMatthias Springer /// Try to peel a loop `op` and return the new result.
3112190f8a8SMatthias Springer // TODO: Add support for scf.parallel and affine.for loops.
peelLoop(RewriterBase & rewriter,Operation * op)3128faf35c0SMatthias Springer static SmallVector<Value, 4> peelLoop(RewriterBase &rewriter, Operation *op) {
3138faf35c0SMatthias Springer   return llvm::TypeSwitch<Operation *, SmallVector<Value, 4>>(op)
3148faf35c0SMatthias Springer       .Case<scf::ForOp>([&](scf::ForOp forOp) {
3158faf35c0SMatthias Springer         scf::ForOp partialIteration;
3168faf35c0SMatthias Springer         if (succeeded(scf::peelAndCanonicalizeForLoop(rewriter, forOp,
3178faf35c0SMatthias Springer                                                       partialIteration)))
3188faf35c0SMatthias Springer           return partialIteration->getResults();
3198faf35c0SMatthias Springer         assert(!partialIteration && "expected that loop was not peeled");
3208faf35c0SMatthias Springer         return forOp->getResults();
3218faf35c0SMatthias Springer       })
3228faf35c0SMatthias Springer       .Default([&](Operation *op) { return op->getResults(); });
3238faf35c0SMatthias Springer }
3248faf35c0SMatthias Springer 
3259a79b1b0SDiego Caballero /// Peel and canonicalize 'loops'.
peelLoops(RewriterBase & rewriter,ArrayRef<scf::ForOp> loops)3269a79b1b0SDiego Caballero void mlir::linalg::peelLoops(RewriterBase &rewriter,
3279a79b1b0SDiego Caballero                              ArrayRef<scf::ForOp> loops) {
3289a79b1b0SDiego Caballero   for (auto loopOp : loops) {
3299a79b1b0SDiego Caballero     SmallVector<Value, 4> loopResults;
3309a79b1b0SDiego Caballero     loopResults = peelLoop(rewriter, loopOp);
3319a79b1b0SDiego Caballero   }
3329a79b1b0SDiego Caballero }
3339a79b1b0SDiego Caballero 
3342190f8a8SMatthias Springer /// Peel loops after tiling.
peelTiledLinalgOp(RewriterBase & rewriter,TiledLinalgOp & res,ArrayRef<int64_t> peeledLoops,LinalgTilingLoopType loopType)3354a661602SNicolas Vasilache void mlir::linalg::peelTiledLinalgOp(RewriterBase &rewriter, TiledLinalgOp &res,
3364a661602SNicolas Vasilache                                      ArrayRef<int64_t> peeledLoops,
3374a661602SNicolas Vasilache                                      LinalgTilingLoopType loopType) {
3384a661602SNicolas Vasilache   for (int64_t loop : peeledLoops) {
3392190f8a8SMatthias Springer     assert(loop < static_cast<int64_t>(res.loops.size()) &&
3402190f8a8SMatthias Springer            "requested peeling of non-existing loop");
3412190f8a8SMatthias Springer     SmallVector<Value, 4> loopResults;
3422190f8a8SMatthias Springer     Operation *loopOp = res.loops[loop];
3432190f8a8SMatthias Springer     loopResults = peelLoop(rewriter, loopOp);
3442190f8a8SMatthias Springer 
3452190f8a8SMatthias Springer     // The result of the loop nest may change with peeling.
3462190f8a8SMatthias Springer     if (res.tensorResults.size() == loopOp->getNumResults() &&
3472190f8a8SMatthias Springer         std::equal(res.tensorResults.begin(), res.tensorResults.end(),
3482190f8a8SMatthias Springer                    loopOp->getResults().begin()))
3492190f8a8SMatthias Springer       res.tensorResults = loopResults;
3502190f8a8SMatthias Springer   }
3512190f8a8SMatthias Springer }
3522190f8a8SMatthias Springer 
3534a661602SNicolas Vasilache /// Linalg tiling pattern.
LinalgTilingPattern(MLIRContext * context,LinalgTilingOptions options,LinalgTransformationFilter f,PatternBenefit benefit)3544a661602SNicolas Vasilache mlir::linalg::LinalgTilingPattern::LinalgTilingPattern(
3554a661602SNicolas Vasilache     MLIRContext *context, LinalgTilingOptions options,
3564a661602SNicolas Vasilache     LinalgTransformationFilter f, PatternBenefit benefit)
3574a661602SNicolas Vasilache     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
3584a661602SNicolas Vasilache       filter(std::move(f)), options(std::move(options)) {}
3594a661602SNicolas Vasilache 
LinalgTilingPattern(StringRef opName,MLIRContext * context,LinalgTilingOptions options,LinalgTransformationFilter f,PatternBenefit benefit)3604a661602SNicolas Vasilache mlir::linalg::LinalgTilingPattern::LinalgTilingPattern(
3614a661602SNicolas Vasilache     StringRef opName, MLIRContext *context, LinalgTilingOptions options,
3624a661602SNicolas Vasilache     LinalgTransformationFilter f, PatternBenefit benefit)
3634a661602SNicolas Vasilache     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
3649cd7e880SNicolas Vasilache       filter(f.addOpNameFilter(opName)), options(std::move(options)) {}
3654a661602SNicolas Vasilache 
3664a661602SNicolas Vasilache FailureOr<TiledLinalgOp>
returningMatchAndRewrite(LinalgOp op,PatternRewriter & rewriter) const3674a661602SNicolas Vasilache mlir::linalg::LinalgTilingPattern::returningMatchAndRewrite(
3684a661602SNicolas Vasilache     LinalgOp op, PatternRewriter &rewriter) const {
3694a661602SNicolas Vasilache   if (failed(filter.checkAndNotify(rewriter, op)))
3704a661602SNicolas Vasilache     return failure();
3714a661602SNicolas Vasilache 
3724a661602SNicolas Vasilache   FailureOr<TiledLinalgOp> res = tileLinalgOp(rewriter, op, options);
3734a661602SNicolas Vasilache   if (failed(res))
3744a661602SNicolas Vasilache     return failure();
3754a661602SNicolas Vasilache 
3764a661602SNicolas Vasilache   // Clear filter to stop recursive pattern application.
3774a661602SNicolas Vasilache   // This must be done here to properly propagate to peeling branches.
3784a661602SNicolas Vasilache   filter.replaceLinalgTransformationFilter(rewriter, res->op);
3794a661602SNicolas Vasilache 
3804a661602SNicolas Vasilache   // Peel the loops of the TiledLinalgOp.
3814a661602SNicolas Vasilache   peelTiledLinalgOp(rewriter, *res, options.peeledLoops, options.loopType);
3824a661602SNicolas Vasilache 
3834a661602SNicolas Vasilache   if (res->tensorResults.empty())
3844a661602SNicolas Vasilache     rewriter.eraseOp(op);
3854a661602SNicolas Vasilache   else
3864a661602SNicolas Vasilache     rewriter.replaceOp(op, res->tensorResults);
3874a661602SNicolas Vasilache 
3884a661602SNicolas Vasilache   return res;
3894a661602SNicolas Vasilache }
3904a661602SNicolas Vasilache 
391d0ec4a8eSTobias Gysi /// Linalg padding pattern.
LinalgPaddingPattern(MLIRContext * context,LinalgPaddingOptions options,LinalgTransformationFilter f,PatternBenefit benefit)392d0ec4a8eSTobias Gysi mlir::linalg::LinalgPaddingPattern::LinalgPaddingPattern(
393d0ec4a8eSTobias Gysi     MLIRContext *context, LinalgPaddingOptions options,
3949cd7e880SNicolas Vasilache     LinalgTransformationFilter f, PatternBenefit benefit)
3952c4a56c4SNicolas Vasilache     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
3969cd7e880SNicolas Vasilache       filter(std::move(f)), options(std::move(options)) {}
397d0ec4a8eSTobias Gysi 
LinalgPaddingPattern(StringRef opName,MLIRContext * context,LinalgPaddingOptions options,LinalgTransformationFilter f,PatternBenefit benefit)398d0ec4a8eSTobias Gysi mlir::linalg::LinalgPaddingPattern::LinalgPaddingPattern(
399d0ec4a8eSTobias Gysi     StringRef opName, MLIRContext *context, LinalgPaddingOptions options,
4009cd7e880SNicolas Vasilache     LinalgTransformationFilter f, PatternBenefit benefit)
4012c4a56c4SNicolas Vasilache     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
4029cd7e880SNicolas Vasilache       filter(f.addOpNameFilter(opName)), options(std::move(options)) {}
403d0ec4a8eSTobias Gysi 
4049cd7e880SNicolas Vasilache FailureOr<LinalgOp>
returningMatchAndRewrite(LinalgOp linalgOp,PatternRewriter & rewriter) const4059cd7e880SNicolas Vasilache mlir::linalg::LinalgPaddingPattern::returningMatchAndRewrite(
4062c4a56c4SNicolas Vasilache     LinalgOp linalgOp, PatternRewriter &rewriter) const {
407d0ec4a8eSTobias Gysi   if (!linalgOp.hasTensorSemantics())
408d0ec4a8eSTobias Gysi     return failure();
4092c4a56c4SNicolas Vasilache   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
410d0ec4a8eSTobias Gysi     return failure();
411d0ec4a8eSTobias Gysi 
412d0ec4a8eSTobias Gysi   // Pad the operation.
413d0ec4a8eSTobias Gysi   LinalgOp paddedOp;
41458d0da88Sgysit   FailureOr<SmallVector<Value>> newResults =
415d26c42afSgysit       rewriteAsPaddedOp(rewriter, linalgOp, options.paddingDimensions,
416d26c42afSgysit                         options.paddingValues, options.packPaddings, paddedOp);
41769bcff46Sgysit   if (failed(newResults))
418d0ec4a8eSTobias Gysi     return failure();
419d0ec4a8eSTobias Gysi 
420d0ec4a8eSTobias Gysi   // Hoist the padding.
42158d0da88Sgysit   for (const auto &en : enumerate(options.hoistPaddings)) {
42258d0da88Sgysit     if (static_cast<int64_t>(en.index()) >= paddedOp.getNumInputsAndOutputs())
42358d0da88Sgysit       break;
424d26c42afSgysit     OpOperand *opOperand = &paddedOp->getOpOperand(en.index());
425d26c42afSgysit     auto padOp = opOperand->get().getDefiningOp<tensor::PadOp>();
426e494278cSgysit     if (!padOp || en.value() == 0)
427d0ec4a8eSTobias Gysi       continue;
428d26c42afSgysit 
429d26c42afSgysit     // Fail hoisting if the operand shape is not fully static.
430*380a1b20SKazu Hirata     if (llvm::any_of(paddedOp.getShape(opOperand), ShapedType::isDynamic))
431d26c42afSgysit       return failure();
432d26c42afSgysit 
433fd0c6f53SAlexander Belyaev     tensor::PadOp hoistedOp;
434e494278cSgysit     SmallVector<GenericOp> transposeOps;
435e494278cSgysit     SmallVector<int64_t> transposeVector =
43658d0da88Sgysit         en.index() < options.transposePaddings.size()
43758d0da88Sgysit             ? options.transposePaddings[en.index()]
43858d0da88Sgysit             : SmallVector<int64_t>{};
439e494278cSgysit 
440e494278cSgysit     FailureOr<Value> newResult = hoistPaddingOnTensors(
441e494278cSgysit         padOp, en.value(), transposeVector, hoistedOp, transposeOps);
442d0ec4a8eSTobias Gysi     if (failed(newResult))
443d0ec4a8eSTobias Gysi       continue;
4446d5fc1e3SKazu Hirata     rewriter.replaceOp(padOp, *newResult);
445e494278cSgysit 
446e494278cSgysit     // Do not apply hoist padding to the newly introduced transpose operations.
447e494278cSgysit     for (GenericOp transposeOp : transposeOps)
448e494278cSgysit       filter.replaceLinalgTransformationFilter(rewriter, transposeOp);
449d0ec4a8eSTobias Gysi   }
450d0ec4a8eSTobias Gysi 
451d0ec4a8eSTobias Gysi   // Replace the original operation to pad.
4526d5fc1e3SKazu Hirata   rewriter.replaceOp(linalgOp, *newResults);
453d0ec4a8eSTobias Gysi   filter.replaceLinalgTransformationFilter(rewriter, paddedOp);
454e494278cSgysit 
4559cd7e880SNicolas Vasilache   return paddedOp;
456d0ec4a8eSTobias Gysi }
457d0ec4a8eSTobias Gysi 
458e3d386eaSTobias Gysi /// Linalg tile and fuse tensor ops pattern.
459e3d386eaSTobias Gysi mlir::linalg::LinalgTileAndFuseTensorOpsPattern::
LinalgTileAndFuseTensorOpsPattern(MLIRContext * context,LinalgTilingAndFusionOptions options,LinalgTransformationFilter f,PatternBenefit benefit)460e3d386eaSTobias Gysi     LinalgTileAndFuseTensorOpsPattern(MLIRContext *context,
461e3d386eaSTobias Gysi                                       LinalgTilingAndFusionOptions options,
4629cd7e880SNicolas Vasilache                                       LinalgTransformationFilter f,
463e3d386eaSTobias Gysi                                       PatternBenefit benefit)
4641fc096afSMehdi Amini     : RewritePattern(MatchAnyOpTypeTag(), benefit, context),
4659cd7e880SNicolas Vasilache       filter(std::move(f)), options(std::move(options)) {}
466e3d386eaSTobias Gysi 
467e3d386eaSTobias Gysi mlir::linalg::LinalgTileAndFuseTensorOpsPattern::
LinalgTileAndFuseTensorOpsPattern(StringRef opName,MLIRContext * context,LinalgTilingAndFusionOptions options,LinalgTransformationFilter f,PatternBenefit benefit)468e3d386eaSTobias Gysi     LinalgTileAndFuseTensorOpsPattern(StringRef opName, MLIRContext *context,
469e3d386eaSTobias Gysi                                       LinalgTilingAndFusionOptions options,
4709cd7e880SNicolas Vasilache                                       LinalgTransformationFilter f,
471e3d386eaSTobias Gysi                                       PatternBenefit benefit)
4729cd7e880SNicolas Vasilache     : RewritePattern(opName, benefit, context), filter(std::move(f)),
4731fc096afSMehdi Amini       options(std::move(options)) {}
474e3d386eaSTobias Gysi 
4758d785070Sgysit FailureOr<mlir::linalg::TileLoopNest>
returningMatchAndRewrite(Operation * op,PatternRewriter & rewriter) const4768d785070Sgysit mlir::linalg::LinalgTileAndFuseTensorOpsPattern::returningMatchAndRewrite(
477e3d386eaSTobias Gysi     Operation *op, PatternRewriter &rewriter) const {
478e3d386eaSTobias Gysi   LinalgOp rootOp = dyn_cast<LinalgOp>(op);
479e3d386eaSTobias Gysi   if (!rootOp)
480e3d386eaSTobias Gysi     return failure();
481e3d386eaSTobias Gysi   if (failed(filter.checkAndNotify(rewriter, op)))
482e3d386eaSTobias Gysi     return failure();
483e3d386eaSTobias Gysi 
484e3d386eaSTobias Gysi   // Check `tileSizes` contains a tile size for every `rootOp` loop dimension.
485e3d386eaSTobias Gysi   if (options.tileSizes.size() < rootOp.getNumLoops())
486e3d386eaSTobias Gysi     return rewriter.notifyMatchFailure(op, "expect #tile sizes >= #loops");
487e3d386eaSTobias Gysi 
488e3d386eaSTobias Gysi   // Check `tileInterchange` contains no entries or as many as `tileSizes`.
489e3d386eaSTobias Gysi   if (!options.tileInterchange.empty() &&
490e3d386eaSTobias Gysi       options.tileInterchange.size() != options.tileSizes.size())
491e3d386eaSTobias Gysi     return rewriter.notifyMatchFailure(
492e3d386eaSTobias Gysi         op, "expect the number of tile sizes and interchange dims to match");
493e3d386eaSTobias Gysi 
494e3d386eaSTobias Gysi   // Copy the `tileSizes` and `tileInterchange` prefixes needed for `rootOp`.
495e3d386eaSTobias Gysi   SmallVector<int64_t> rootTileSizes(options.tileSizes.begin(),
496e3d386eaSTobias Gysi                                      options.tileSizes.begin() +
497e3d386eaSTobias Gysi                                          rootOp.getNumLoops());
498e3d386eaSTobias Gysi   SmallVector<int64_t> rootInterchange =
499e3d386eaSTobias Gysi       options.tileInterchange.empty()
500e3d386eaSTobias Gysi           ? llvm::to_vector<6>(llvm::seq<int64_t>(0, rootOp.getNumLoops()))
501e3d386eaSTobias Gysi           : SmallVector<int64_t>(options.tileInterchange.begin(),
502e3d386eaSTobias Gysi                                  options.tileInterchange.begin() +
503e3d386eaSTobias Gysi                                      rootOp.getNumLoops());
504e3d386eaSTobias Gysi 
505b5ea288dSgysit   // Check `rootTileSizes` contains non-zero tile sizes.
506b5ea288dSgysit   if (llvm::count(rootTileSizes, 0) == static_cast<long>(rootTileSizes.size()))
507b5ea288dSgysit     return rewriter.notifyMatchFailure(
508b5ea288dSgysit         op, "expect at least one non-zero tile size");
509b5ea288dSgysit 
510e3d386eaSTobias Gysi   // Check `rootInterchange` is a permutation of the `rootOp` loop dimensions.
511e3d386eaSTobias Gysi   // It has to be a permutation since the tiling cannot tile the same loop
512e3d386eaSTobias Gysi   // dimension multiple times.
513e3d386eaSTobias Gysi   if (!isPermutation(rootInterchange))
514e3d386eaSTobias Gysi     return rewriter.notifyMatchFailure(
515e3d386eaSTobias Gysi         op, "expect the tile interchange permutes the root loops");
516e3d386eaSTobias Gysi 
517e3d386eaSTobias Gysi   // Tile `rootOp` and fuse its producers.
518748bf4bbSHanhan Wang   FailureOr<TileLoopNest> tileLoopNest =
519748bf4bbSHanhan Wang       tileConsumerAndFuseProducers(rewriter, rootOp, rootTileSizes,
520748bf4bbSHanhan Wang                                    rootInterchange, options.tileDistribution);
521e3d386eaSTobias Gysi   if (failed(tileLoopNest))
522e3d386eaSTobias Gysi     return rewriter.notifyMatchFailure(
523e3d386eaSTobias Gysi         op, "tileConsumerAndFuseProducers failed unexpectedly");
524e3d386eaSTobias Gysi 
525e3d386eaSTobias Gysi   // Replace all uses of the tiled loop operation.
526e3d386eaSTobias Gysi   rootOp->replaceAllUsesWith(tileLoopNest->getRootOpReplacementResults());
527e3d386eaSTobias Gysi 
528e3d386eaSTobias Gysi   // Apply the filter if specified.
529e3d386eaSTobias Gysi   for (LinalgOp linalgOp : tileLoopNest->getAllTiledAndFusedOps())
530e3d386eaSTobias Gysi     filter.replaceLinalgTransformationFilter(rewriter, linalgOp);
5318d785070Sgysit   return tileLoopNest;
532e3d386eaSTobias Gysi }
533e3d386eaSTobias Gysi 
53406bb9cf3STobias Gysi /// Linalg generic interchange pattern.
GenericOpInterchangePattern(MLIRContext * context,ArrayRef<unsigned> interchangeVector,LinalgTransformationFilter f,PatternBenefit benefit)53506bb9cf3STobias Gysi mlir::linalg::GenericOpInterchangePattern::GenericOpInterchangePattern(
53606bb9cf3STobias Gysi     MLIRContext *context, ArrayRef<unsigned> interchangeVector,
5379cd7e880SNicolas Vasilache     LinalgTransformationFilter f, PatternBenefit benefit)
5389cd7e880SNicolas Vasilache     : OpRewritePattern(context, benefit), filter(std::move(f)),
539307cfdf5SNicolas Vasilache       interchangeVector(interchangeVector.begin(), interchangeVector.end()) {}
540307cfdf5SNicolas Vasilache 
5419cd7e880SNicolas Vasilache FailureOr<GenericOp>
returningMatchAndRewrite(GenericOp genericOp,PatternRewriter & rewriter) const5429cd7e880SNicolas Vasilache mlir::linalg::GenericOpInterchangePattern::returningMatchAndRewrite(
54306bb9cf3STobias Gysi     GenericOp genericOp, PatternRewriter &rewriter) const {
54406bb9cf3STobias Gysi   if (failed(filter.checkAndNotify(rewriter, genericOp)))
545307cfdf5SNicolas Vasilache     return failure();
5469a7d111fSNicolas Vasilache 
5479a7d111fSNicolas Vasilache   FailureOr<GenericOp> transformedOp =
5489a7d111fSNicolas Vasilache       interchangeGenericOp(rewriter, genericOp, interchangeVector);
5499a7d111fSNicolas Vasilache   if (failed(transformedOp))
550307cfdf5SNicolas Vasilache     return failure();
551307cfdf5SNicolas Vasilache 
552e4a503a2SNicolas Vasilache   // New filter if specified.
55306bb9cf3STobias Gysi   filter.replaceLinalgTransformationFilter(rewriter, genericOp);
55407c009edSNicolas Vasilache   return transformedOp;
555307cfdf5SNicolas Vasilache }
556307cfdf5SNicolas Vasilache 
557e826db62STobias Gysi /// Linalg generalization pattern.
LinalgGeneralizationPattern(MLIRContext * context,LinalgTransformationFilter f,PatternBenefit benefit)558e826db62STobias Gysi mlir::linalg::LinalgGeneralizationPattern::LinalgGeneralizationPattern(
5599cd7e880SNicolas Vasilache     MLIRContext *context, LinalgTransformationFilter f, PatternBenefit benefit)
5609cd7e880SNicolas Vasilache     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
5619cd7e880SNicolas Vasilache       filter(std::move(f)) {}
562e826db62STobias Gysi 
LinalgGeneralizationPattern(StringRef opName,MLIRContext * context,LinalgTransformationFilter f,PatternBenefit benefit)563e826db62STobias Gysi mlir::linalg::LinalgGeneralizationPattern::LinalgGeneralizationPattern(
5649cd7e880SNicolas Vasilache     StringRef opName, MLIRContext *context, LinalgTransformationFilter f,
565e826db62STobias Gysi     PatternBenefit benefit)
5669cd7e880SNicolas Vasilache     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
5679cd7e880SNicolas Vasilache       filter(f.addOpNameFilter(opName)) {}
568e826db62STobias Gysi 
5699cd7e880SNicolas Vasilache FailureOr<GenericOp>
returningMatchAndRewrite(LinalgOp linalgOp,PatternRewriter & rewriter) const5709cd7e880SNicolas Vasilache mlir::linalg::LinalgGeneralizationPattern::returningMatchAndRewrite(
5719cd7e880SNicolas Vasilache     LinalgOp linalgOp, PatternRewriter &rewriter) const {
5729cd7e880SNicolas Vasilache   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
573e826db62STobias Gysi     return failure();
574c05db638SNicolas Vasilache   FailureOr<GenericOp> genericOp = generalizeNamedOp(rewriter, linalgOp);
5759a7d111fSNicolas Vasilache   if (failed(genericOp))
576e826db62STobias Gysi     return failure();
5779a7d111fSNicolas Vasilache   filter.replaceLinalgTransformationFilter(rewriter, *genericOp);
5789cd7e880SNicolas Vasilache   return genericOp;
579e826db62STobias Gysi }
580e826db62STobias Gysi 
LinalgPeelingPattern(MLIRContext * context,LinalgTransformationFilter f,LinalgPeelOptions options,PatternBenefit benefit)5819a79b1b0SDiego Caballero mlir::linalg::LinalgPeelingPattern::LinalgPeelingPattern(
5829a79b1b0SDiego Caballero     MLIRContext *context, LinalgTransformationFilter f,
5839a79b1b0SDiego Caballero     LinalgPeelOptions options, PatternBenefit benefit)
5849a79b1b0SDiego Caballero     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
5859a79b1b0SDiego Caballero       filter(std::move(f)), options(std::move(options)) {}
5869a79b1b0SDiego Caballero 
LinalgPeelingPattern(StringRef opName,MLIRContext * context,LinalgPeelOptions options,LinalgTransformationFilter f,PatternBenefit benefit)5879a79b1b0SDiego Caballero mlir::linalg::LinalgPeelingPattern::LinalgPeelingPattern(
5889a79b1b0SDiego Caballero     StringRef opName, MLIRContext *context, LinalgPeelOptions options,
5899a79b1b0SDiego Caballero     LinalgTransformationFilter f, PatternBenefit benefit)
5909a79b1b0SDiego Caballero     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
5919a79b1b0SDiego Caballero       filter(f.addOpNameFilter(opName)), options(std::move(options)) {}
5929a79b1b0SDiego Caballero 
matchAndRewrite(LinalgOp linalgOp,PatternRewriter & rewriter) const5939a79b1b0SDiego Caballero LogicalResult mlir::linalg::LinalgPeelingPattern::matchAndRewrite(
5949a79b1b0SDiego Caballero     LinalgOp linalgOp, PatternRewriter &rewriter) const {
5959a79b1b0SDiego Caballero   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
5969a79b1b0SDiego Caballero     return failure();
5979a79b1b0SDiego Caballero 
5989a79b1b0SDiego Caballero   // Increase marker counter even if peeling doesn't happen for this op.
5999a79b1b0SDiego Caballero   filter.replaceLinalgTransformationFilter(rewriter, linalgOp);
6009a79b1b0SDiego Caballero 
6019a79b1b0SDiego Caballero   if (!options.loopsToPeelComputationFunction)
6029a79b1b0SDiego Caballero     return failure();
6039a79b1b0SDiego Caballero 
6049a79b1b0SDiego Caballero   SmallVector<scf::ForOp, 4> loopsToPeel;
6059a79b1b0SDiego Caballero   options.loopsToPeelComputationFunction(rewriter, linalgOp, loopsToPeel);
6069a79b1b0SDiego Caballero   peelLoops(rewriter, loopsToPeel);
6079a79b1b0SDiego Caballero   return success();
6089a79b1b0SDiego Caballero }
6099a79b1b0SDiego Caballero 
LinalgVectorizationPattern(MLIRContext * context,LinalgTransformationFilter f,LinalgVectorizationOptions options,PatternBenefit benefit)6109cd7e880SNicolas Vasilache mlir::linalg::LinalgVectorizationPattern::LinalgVectorizationPattern(
6119cd7e880SNicolas Vasilache     MLIRContext *context, LinalgTransformationFilter f,
6129cd7e880SNicolas Vasilache     LinalgVectorizationOptions options, PatternBenefit benefit)
6139cd7e880SNicolas Vasilache     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
6149cd7e880SNicolas Vasilache       filter(std::move(f)) {}
615e4a503a2SNicolas Vasilache 
LinalgVectorizationPattern(StringRef opName,MLIRContext * context,LinalgVectorizationOptions options,LinalgTransformationFilter f,PatternBenefit benefit)6169cd7e880SNicolas Vasilache mlir::linalg::LinalgVectorizationPattern::LinalgVectorizationPattern(
6179cd7e880SNicolas Vasilache     StringRef opName, MLIRContext *context, LinalgVectorizationOptions options,
6189cd7e880SNicolas Vasilache     LinalgTransformationFilter f, PatternBenefit benefit)
6199cd7e880SNicolas Vasilache     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
6209cd7e880SNicolas Vasilache       filter(f.addOpNameFilter(opName)) {}
621307cfdf5SNicolas Vasilache 
matchAndRewrite(LinalgOp linalgOp,PatternRewriter & rewriter) const6229cd7e880SNicolas Vasilache LogicalResult mlir::linalg::LinalgVectorizationPattern::matchAndRewrite(
6239cd7e880SNicolas Vasilache     LinalgOp linalgOp, PatternRewriter &rewriter) const {
6249cd7e880SNicolas Vasilache   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
625307cfdf5SNicolas Vasilache     return failure();
6269a7d111fSNicolas Vasilache   return vectorize(rewriter, linalgOp);
627307cfdf5SNicolas Vasilache }
628d12d05a7SNicolas Vasilache 
matchAndRewrite(memref::CopyOp copyOp,PatternRewriter & rewriter) const629ebc81537SAlexander Belyaev LogicalResult mlir::linalg::CopyVectorizationPattern::matchAndRewrite(
630ebc81537SAlexander Belyaev     memref::CopyOp copyOp, PatternRewriter &rewriter) const {
631ebc81537SAlexander Belyaev   return vectorizeCopy(rewriter, copyOp);
632ebc81537SAlexander Belyaev }
633ebc81537SAlexander Belyaev 
applyStagedPatterns(Operation * op,ArrayRef<FrozenRewritePatternSet> stage1Patterns,const FrozenRewritePatternSet & stage2Patterns,function_ref<LogicalResult (Operation *)> stage3Lambda)634d12d05a7SNicolas Vasilache LogicalResult mlir::linalg::applyStagedPatterns(
63579d7f618SChris Lattner     Operation *op, ArrayRef<FrozenRewritePatternSet> stage1Patterns,
63679d7f618SChris Lattner     const FrozenRewritePatternSet &stage2Patterns,
63791beb517SNicolas Vasilache     function_ref<LogicalResult(Operation *)> stage3Lambda) {
63891beb517SNicolas Vasilache   unsigned iteration = 0;
63991beb517SNicolas Vasilache   (void)iteration;
640d12d05a7SNicolas Vasilache   for (const auto &patterns : stage1Patterns) {
64156ce65e2SNicolas Vasilache     LLVM_DEBUG(DBGS() << "Before 1st stage, iter: " << ++iteration << "\n"
64256ce65e2SNicolas Vasilache                       << *op);
6433e98fbf4SRiver Riddle     if (failed(applyPatternsAndFoldGreedily(op, patterns))) {
64456ce65e2SNicolas Vasilache       LLVM_DEBUG(DBGS() << "Underlying first stage rewrite did not converge");
645d12d05a7SNicolas Vasilache       return failure();
646d12d05a7SNicolas Vasilache     }
64756ce65e2SNicolas Vasilache     LLVM_DEBUG(DBGS() << "After 1st stage, iter: " << ++iteration << "\n"
64891beb517SNicolas Vasilache                       << *op);
6493e98fbf4SRiver Riddle     if (failed(applyPatternsAndFoldGreedily(op, stage2Patterns))) {
65056ce65e2SNicolas Vasilache       LLVM_DEBUG(DBGS() << "Underlying 2nd stage rewrite did not converge");
651d12d05a7SNicolas Vasilache       return failure();
652d12d05a7SNicolas Vasilache     }
65356ce65e2SNicolas Vasilache     LLVM_DEBUG(DBGS() << "After 2nd stage, iter : " << iteration << "\n"
65491beb517SNicolas Vasilache                       << *op);
655d12d05a7SNicolas Vasilache     if (stage3Lambda) {
656d12d05a7SNicolas Vasilache       if (failed(stage3Lambda(op)))
657d12d05a7SNicolas Vasilache         return failure();
65856ce65e2SNicolas Vasilache       LLVM_DEBUG(DBGS() << "After 3rd stage, iter : " << iteration << "\n"
65991beb517SNicolas Vasilache                         << *op);
660d12d05a7SNicolas Vasilache     }
661d12d05a7SNicolas Vasilache   }
662d12d05a7SNicolas Vasilache   return success();
663d12d05a7SNicolas Vasilache }
6643110e7b0SNicolas Vasilache 
getNParallelLoopsAttrs(unsigned nParallelLoops)6650804a88eSNicolas Agostini static SmallVector<StringRef> getNParallelLoopsAttrs(unsigned nParallelLoops) {
6660804a88eSNicolas Agostini   return SmallVector<StringRef>(nParallelLoops, getParallelIteratorTypeName());
6670804a88eSNicolas Agostini }
6680804a88eSNicolas Agostini 
669fd0c6f53SAlexander Belyaev /// Rewrite a tensor::PadOp into a sequence of InitTensorOp, FillOp (to
6709a7d111fSNicolas Vasilache /// initialize with pad_val) and GenericOp (to copy contents).
671fd0c6f53SAlexander Belyaev LogicalResult
matchAndRewrite(tensor::PadOp padOp,PatternRewriter & rewriter) const672fd0c6f53SAlexander Belyaev PadOpTransformationPattern::matchAndRewrite(tensor::PadOp padOp,
673fd0c6f53SAlexander Belyaev                                             PatternRewriter &rewriter) const {
6740804a88eSNicolas Agostini 
67504235d07SJacques Pienaar   auto inputShapedType = padOp.getSource().getType().cast<ShapedType>();
67604235d07SJacques Pienaar   auto resultShapedType = padOp.getResult().getType().cast<ShapedType>();
6770804a88eSNicolas Agostini 
6780804a88eSNicolas Agostini   // Bail on non-static shapes.
6790804a88eSNicolas Agostini   if (!inputShapedType.hasStaticShape())
6800804a88eSNicolas Agostini     return failure();
6810804a88eSNicolas Agostini   if (!resultShapedType.hasStaticShape())
6820804a88eSNicolas Agostini     return failure();
6830804a88eSNicolas Agostini 
6840804a88eSNicolas Agostini   // Only support padding with a constant for now, i.e. either:
6850804a88eSNicolas Agostini   //   1. A BBarg from a different block.
6860804a88eSNicolas Agostini   //   2. A value defined outside of the current block.
68704235d07SJacques Pienaar   Block &block = padOp.getRegion().front();
688fd0c6f53SAlexander Belyaev   auto yieldOp = cast<tensor::YieldOp>(block.getTerminator());
68904235d07SJacques Pienaar   Value padValue = yieldOp.getValue();
6900804a88eSNicolas Agostini   Operation *definingOp = padValue.getDefiningOp();
6910804a88eSNicolas Agostini   if (definingOp && definingOp->getBlock() == &block)
6920804a88eSNicolas Agostini     return failure();
6930804a88eSNicolas Agostini   if (!definingOp && padValue.cast<BlockArgument>().getOwner() == &block)
6940804a88eSNicolas Agostini     return failure();
6950804a88eSNicolas Agostini 
6960804a88eSNicolas Agostini   // Create tensor with the padded shape
6970804a88eSNicolas Agostini   Location loc = padOp.getLoc();
6980804a88eSNicolas Agostini   SmallVector<Value> indices(resultShapedType.getRank(),
699a54f4eaeSMogball                              rewriter.create<arith::ConstantIndexOp>(loc, 0));
7000804a88eSNicolas Agostini   Value initTensor = rewriter.create<InitTensorOp>(
7010804a88eSNicolas Agostini       loc, resultShapedType.getShape(), resultShapedType.getElementType());
7020804a88eSNicolas Agostini 
7030804a88eSNicolas Agostini   // Initialize tensor with the pad value
7047294be2bSgysit   Value tmpTensor = rewriter
7057294be2bSgysit                         .create<linalg::FillOp>(loc, ValueRange{padValue},
7067294be2bSgysit                                                 ValueRange{initTensor})
7077294be2bSgysit                         .result();
7080804a88eSNicolas Agostini 
7090804a88eSNicolas Agostini   // Copy original contents into new tensor
710060208b4SMatthias Springer   // Uses linalg.generic, but could be done with tensor.insert_slice
7110804a88eSNicolas Agostini   SmallVector<AffineExpr, 4> outputExprs;
7120804a88eSNicolas Agostini   for (unsigned i = 0; i < resultShapedType.getRank(); ++i) {
7130804a88eSNicolas Agostini     outputExprs.push_back(getAffineDimExpr(i, rewriter.getContext()) +
71404235d07SJacques Pienaar                           padOp.getStaticLow()[i].cast<IntegerAttr>().getInt());
7150804a88eSNicolas Agostini   }
7160804a88eSNicolas Agostini 
7170804a88eSNicolas Agostini   SmallVector<AffineMap, 2> transferMaps = {
7180804a88eSNicolas Agostini       rewriter.getMultiDimIdentityMap(inputShapedType.getRank()),
7190804a88eSNicolas Agostini       AffineMap::get(resultShapedType.getRank(),
7200804a88eSNicolas Agostini                      /*symbolCount=*/0, outputExprs, rewriter.getContext())};
7210804a88eSNicolas Agostini 
7220804a88eSNicolas Agostini   rewriter.replaceOpWithNewOp<linalg::GenericOp>(
72304235d07SJacques Pienaar       padOp, resultShapedType, padOp.getSource(), tmpTensor, transferMaps,
7240804a88eSNicolas Agostini       getNParallelLoopsAttrs(resultShapedType.getRank()),
7250804a88eSNicolas Agostini       [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
7260804a88eSNicolas Agostini         nestedBuilder.create<linalg::YieldOp>(nestedLoc, args[0]);
7270804a88eSNicolas Agostini       });
7280804a88eSNicolas Agostini 
7290804a88eSNicolas Agostini   return success();
7300804a88eSNicolas Agostini }
73124199f53SMatthias Springer 
73235df2f6fSYi Zhang /// Filling `dest` using FillOp constant padding value if possible.
73335df2f6fSYi Zhang /// Otherwise, generate a tensor::GenerateOp.
createFillOrGenerateOp(PatternRewriter & rewriter,tensor::PadOp padOp,Value dest,const SmallVector<Value> & dynSizes) const734fd0c6f53SAlexander Belyaev Value GeneralizePadOpPattern::createFillOrGenerateOp(
735fd0c6f53SAlexander Belyaev     PatternRewriter &rewriter, tensor::PadOp padOp, Value dest,
73635df2f6fSYi Zhang     const SmallVector<Value> &dynSizes) const {
73735df2f6fSYi Zhang   auto padValue = padOp.getConstantPaddingValue();
73835df2f6fSYi Zhang   if (padValue)
73935df2f6fSYi Zhang     return rewriter.create<FillOp>(padOp.getLoc(), padValue, dest).result();
74035df2f6fSYi Zhang 
74135df2f6fSYi Zhang   // Fill could not be optimized: Lower to tensor::GenerateOp with region.
74235df2f6fSYi Zhang   auto generateOp = rewriter.create<tensor::GenerateOp>(
74335df2f6fSYi Zhang       padOp.getLoc(), padOp.getResultType(), dynSizes);
74435df2f6fSYi Zhang   // Copy region to new op.
74535df2f6fSYi Zhang   BlockAndValueMapping bvm;
74604235d07SJacques Pienaar   padOp.getRegion().cloneInto(&generateOp.getRegion(), bvm);
74735df2f6fSYi Zhang   return generateOp;
74835df2f6fSYi Zhang }
74935df2f6fSYi Zhang 
75035df2f6fSYi Zhang LogicalResult
matchAndRewrite(tensor::PadOp padOp,PatternRewriter & rewriter) const751fd0c6f53SAlexander Belyaev GeneralizePadOpPattern::matchAndRewrite(tensor::PadOp padOp,
75235df2f6fSYi Zhang                                         PatternRewriter &rewriter) const {
75335df2f6fSYi Zhang   // Given an OpFoldResult, return an index-typed value.
75435df2f6fSYi Zhang   auto getIdxValue = [&](OpFoldResult ofr) {
75535df2f6fSYi Zhang     if (auto val = ofr.dyn_cast<Value>())
75635df2f6fSYi Zhang       return val;
75735df2f6fSYi Zhang     return rewriter
758a54f4eaeSMogball         .create<arith::ConstantIndexOp>(
75935df2f6fSYi Zhang             padOp.getLoc(), ofr.get<Attribute>().cast<IntegerAttr>().getInt())
76035df2f6fSYi Zhang         .getResult();
76135df2f6fSYi Zhang   };
76235df2f6fSYi Zhang 
76335df2f6fSYi Zhang   auto resultType = padOp.getResultType();
76435df2f6fSYi Zhang   // Compute size of InitTensorOp. Any combination of static/dynamic is
76535df2f6fSYi Zhang   // supported.
76635df2f6fSYi Zhang   SmallVector<Value> dynSizes;
76735df2f6fSYi Zhang   SmallVector<int64_t> staticSizes;
76835df2f6fSYi Zhang   for (unsigned dim = 0; dim < resultType.getRank(); ++dim) {
76935df2f6fSYi Zhang     if (resultType.isDynamicDim(dim)) {
77004235d07SJacques Pienaar       auto srcSize = rewriter.createOrFold<tensor::DimOp>(
77104235d07SJacques Pienaar           padOp.getLoc(), padOp.getSource(), dim);
77235df2f6fSYi Zhang       // Add low and high padding value.
773a54f4eaeSMogball       auto plusLow = rewriter.createOrFold<arith::AddIOp>(
77435df2f6fSYi Zhang           padOp.getLoc(), srcSize, getIdxValue(padOp.getMixedLowPad()[dim]));
775a54f4eaeSMogball       auto plusHigh = rewriter.createOrFold<arith::AddIOp>(
77635df2f6fSYi Zhang           padOp.getLoc(), plusLow, getIdxValue(padOp.getMixedHighPad()[dim]));
77735df2f6fSYi Zhang       dynSizes.push_back(plusHigh);
77835df2f6fSYi Zhang     }
77935df2f6fSYi Zhang     staticSizes.push_back(resultType.getDimSize(dim));
78035df2f6fSYi Zhang   }
78135df2f6fSYi Zhang 
78235df2f6fSYi Zhang   // Init tensor and fill it with padding.
78335df2f6fSYi Zhang   Value init = rewriter.create<InitTensorOp>(
78435df2f6fSYi Zhang       padOp.getLoc(), dynSizes, staticSizes, resultType.getElementType());
78535df2f6fSYi Zhang   Value fill = createFillOrGenerateOp(rewriter, padOp, init, dynSizes);
78635df2f6fSYi Zhang 
78735df2f6fSYi Zhang   // Try optimize the copy of source.
78835df2f6fSYi Zhang   if (optimizeCopyFn && optimizeCopyFn(rewriter, padOp, fill).succeeded())
78935df2f6fSYi Zhang     return success();
79035df2f6fSYi Zhang 
791fd0c6f53SAlexander Belyaev   // tensor::PadOps cannot be optimized. Generate a InsertSliceOp instead
79235df2f6fSYi Zhang   // for copying the PadOp source.
79335df2f6fSYi Zhang   auto sourceType = padOp.getSourceType();
794fd0c6f53SAlexander Belyaev   // Compute size of source of tensor::PadOp.
79535df2f6fSYi Zhang   SmallVector<OpFoldResult> srcSizes;
79635df2f6fSYi Zhang   for (unsigned dim = 0; dim < sourceType.getRank(); ++dim) {
79735df2f6fSYi Zhang     if (sourceType.isDynamicDim(dim)) {
79835df2f6fSYi Zhang       srcSizes.push_back(rewriter.createOrFold<tensor::DimOp>(
79904235d07SJacques Pienaar           padOp.getLoc(), padOp.getSource(), dim));
80035df2f6fSYi Zhang     } else {
80135df2f6fSYi Zhang       srcSizes.push_back(rewriter.getIndexAttr(sourceType.getDimSize(dim)));
80235df2f6fSYi Zhang     }
80335df2f6fSYi Zhang   }
80435df2f6fSYi Zhang   // Strides of InsertSliceOp are all 1.
80535df2f6fSYi Zhang   SmallVector<OpFoldResult> strides(sourceType.getRank(),
80635df2f6fSYi Zhang                                     rewriter.getIndexAttr(1));
80735df2f6fSYi Zhang   rewriter.replaceOpWithNewOp<tensor::InsertSliceOp>(
80804235d07SJacques Pienaar       padOp, padOp.getSource(), fill, padOp.getMixedLowPad(), srcSizes,
80904235d07SJacques Pienaar       strides);
81035df2f6fSYi Zhang 
81135df2f6fSYi Zhang   return success();
81235df2f6fSYi Zhang }
81335df2f6fSYi Zhang 
matchAndRewrite(tensor::ExtractSliceOp sliceOp,PatternRewriter & rewriter) const814060208b4SMatthias Springer LogicalResult ExtractSliceOfPadTensorSwapPattern::matchAndRewrite(
815060208b4SMatthias Springer     tensor::ExtractSliceOp sliceOp, PatternRewriter &rewriter) const {
816060208b4SMatthias Springer   if (!sliceOp.hasUnitStride())
81724199f53SMatthias Springer     return failure();
81824199f53SMatthias Springer 
81904235d07SJacques Pienaar   auto padOp = sliceOp.getSource().getDefiningOp<tensor::PadOp>();
8200edb4127SLei Zhang   if (!padOp)
8210edb4127SLei Zhang     return failure();
8220edb4127SLei Zhang 
8230edb4127SLei Zhang   bool zeroSliceGuard = true;
8240edb4127SLei Zhang   if (controlFn) {
8250edb4127SLei Zhang     if (Optional<bool> control = controlFn(sliceOp))
8266d5fc1e3SKazu Hirata       zeroSliceGuard = *control;
8270edb4127SLei Zhang     else
8280edb4127SLei Zhang       return failure();
8290edb4127SLei Zhang   }
8300edb4127SLei Zhang 
83161ba9f91SNicolas Vasilache   Operation *tiledPadOp =
8320edb4127SLei Zhang       tensor::bubbleUpPadSlice(rewriter, padOp, sliceOp.getMixedOffsets(),
8330edb4127SLei Zhang                                sliceOp.getMixedSizes(), zeroSliceGuard);
83424199f53SMatthias Springer   // All shapes are static and the data source is actually used. Rewrite into
8350edb4127SLei Zhang   // pad(extract_slice(x)).
836ba72cfe7SMaheshRavishankar   rewriter.replaceOp(sliceOp, tiledPadOp->getResults());
83724199f53SMatthias Springer   return success();
83824199f53SMatthias Springer }
8397b615a87SLei Zhang 
8407b615a87SLei Zhang // The following are patterns for downscaling convolution ops with size-1
8417b615a87SLei Zhang // window dimensions.
8427b615a87SLei Zhang //
8437b615a87SLei Zhang // Note that we'd eventually want to write such transformations in a generic
8447b615a87SLei Zhang // way, e.g., converting to linalg.generic, removing the size-1 dimensions,
8457b615a87SLei Zhang // and then turning back to named ops. But for now it's fine to have a few
8467b615a87SLei Zhang // patterns matching special ops to get started.
8477b615a87SLei Zhang 
848ce2e198bSAlex Zinenko FailureOr<Conv1DNwcWcfOp>
returningMatchAndRewrite(linalg::Conv2DNhwcHwcfOp convOp,PatternRewriter & rewriter) const849ce2e198bSAlex Zinenko DownscaleSizeOneWindowed2DConvolution::returningMatchAndRewrite(
850ce2e198bSAlex Zinenko     linalg::Conv2DNhwcHwcfOp convOp, PatternRewriter &rewriter) const {
85198dbcff1Sgysit   if (failed(filter.checkAndNotify(rewriter, convOp)))
85298dbcff1Sgysit     return failure();
85398dbcff1Sgysit   if (convOp.hasBufferSemantics())
854ce2e198bSAlex Zinenko     return failure(); // To be implemented.
8557b615a87SLei Zhang 
8567b615a87SLei Zhang   Value input = convOp.inputs().front();
85798dbcff1Sgysit   Value kernel = convOp.inputs().back();
8587b615a87SLei Zhang   Value output = convOp.outputs().front();
8597b615a87SLei Zhang 
8607b615a87SLei Zhang   auto inputType = input.getType().dyn_cast<RankedTensorType>();
86198dbcff1Sgysit   auto kernelType = kernel.getType().dyn_cast<RankedTensorType>();
8627b615a87SLei Zhang   auto outputType = output.getType().dyn_cast<RankedTensorType>();
8637b615a87SLei Zhang 
86498dbcff1Sgysit   auto kernelShape = kernelType.getShape();
8657b615a87SLei Zhang   auto outputShape = outputType.getShape();
8667b615a87SLei Zhang 
8677b615a87SLei Zhang   // Only handle the case where at least one of the window dimensions is
8687b615a87SLei Zhang   // of size 1. Other cases can rely on tiling to reduce to such cases.
86998dbcff1Sgysit   int64_t khSize = kernelShape[0], kwSize = kernelShape[1];
8707b615a87SLei Zhang   int64_t ohSize = outputShape[1], owSize = outputShape[2];
87198dbcff1Sgysit   bool removeH = (khSize == 1 && ohSize == 1);
87298dbcff1Sgysit   bool removeW = (kwSize == 1 && owSize == 1);
873aa373180SNicolas Vasilache   if (!removeH && !removeW)
8747b615a87SLei Zhang     return failure();
8757b615a87SLei Zhang 
8767b615a87SLei Zhang   // Get new shapes and types for all operands by removing the size-1
8777b615a87SLei Zhang   // dimension.
878aa373180SNicolas Vasilache   using RTTBuilder = RankedTensorType::Builder;
879789c88e8SNicolas Vasilache   RankedTensorType newInputType =
880789c88e8SNicolas Vasilache       RTTBuilder(inputType).dropDim((removeH ? 1 : 2));
88198dbcff1Sgysit   RankedTensorType newKernelType =
88298dbcff1Sgysit       RTTBuilder(kernelType).dropDim((removeH ? 0 : 1));
883789c88e8SNicolas Vasilache   RankedTensorType newOutputType =
884789c88e8SNicolas Vasilache       RTTBuilder(outputType).dropDim(removeH ? 1 : 2);
8857b615a87SLei Zhang 
886aa373180SNicolas Vasilache   // Rank-reduce operands.
8877b615a87SLei Zhang   Location loc = convOp.getLoc();
888aa373180SNicolas Vasilache   Value newInput = tensor::createCanonicalRankReducingExtractSliceOp(
889aa373180SNicolas Vasilache       rewriter, loc, input, newInputType);
89098dbcff1Sgysit   Value newKernel = tensor::createCanonicalRankReducingExtractSliceOp(
89198dbcff1Sgysit       rewriter, loc, kernel, newKernelType);
892aa373180SNicolas Vasilache   Value newOutput = tensor::createCanonicalRankReducingExtractSliceOp(
893aa373180SNicolas Vasilache       rewriter, loc, output, newOutputType);
8947b615a87SLei Zhang 
895aa373180SNicolas Vasilache   // Rank-reduce strides and dilations too.
896aa373180SNicolas Vasilache   // TODO: dropDim 1-liner helper.
897aa373180SNicolas Vasilache   auto strides = llvm::to_vector<4>(convOp.strides().getValues<int64_t>());
898aa373180SNicolas Vasilache   strides.erase(strides.begin() + (removeH ? 0 : 1));
899aa373180SNicolas Vasilache   auto stridesAttr = rewriter.getI64VectorAttr(strides);
900aa373180SNicolas Vasilache 
901ce2e198bSAlex Zinenko   auto dilations = llvm::to_vector<4>(convOp.dilations().getValues<int64_t>());
902aa373180SNicolas Vasilache   dilations.erase(dilations.begin() + (removeH ? 0 : 1));
903aa373180SNicolas Vasilache   auto dilationsAttr = rewriter.getI64VectorAttr(dilations);
9047b615a87SLei Zhang 
9057b615a87SLei Zhang   auto conv1DOp = rewriter.create<linalg::Conv1DNwcWcfOp>(
90698dbcff1Sgysit       loc, newOutputType, ValueRange{newInput, newKernel},
9077b615a87SLei Zhang       ValueRange{newOutput}, stridesAttr, dilationsAttr);
9087b615a87SLei Zhang 
909aa373180SNicolas Vasilache   // Insert back.
910aa373180SNicolas Vasilache   Value inserted = tensor::createCanonicalRankReducingInsertSliceOp(
911aa373180SNicolas Vasilache       rewriter, loc, conv1DOp.getResult(0), output);
912aa373180SNicolas Vasilache   rewriter.replaceOp(convOp, inserted);
913aa373180SNicolas Vasilache 
91498dbcff1Sgysit   filter.replaceLinalgTransformationFilter(rewriter, conv1DOp);
915ce2e198bSAlex Zinenko   return conv1DOp;
916ce2e198bSAlex Zinenko }
91798dbcff1Sgysit 
918ce2e198bSAlex Zinenko FailureOr<DepthwiseConv1DNwcWcOp>
returningMatchAndRewrite(DepthwiseConv2DNhwcHwcOp convOp,PatternRewriter & rewriter) const919ce2e198bSAlex Zinenko DownscaleDepthwiseConv2DNhwcHwcOp::returningMatchAndRewrite(
920ce2e198bSAlex Zinenko     DepthwiseConv2DNhwcHwcOp convOp, PatternRewriter &rewriter) const {
92198dbcff1Sgysit   if (failed(filter.checkAndNotify(rewriter, convOp)))
92298dbcff1Sgysit     return failure();
92398dbcff1Sgysit   if (convOp.hasBufferSemantics())
924ce2e198bSAlex Zinenko     return failure(); // To be implemented.
925b828506eSNicolas Vasilache 
926b828506eSNicolas Vasilache   Value input = convOp.inputs().front();
927b828506eSNicolas Vasilache   Value kernel = convOp.inputs().back();
928b828506eSNicolas Vasilache   Value output = convOp.outputs().front();
929b828506eSNicolas Vasilache 
930b828506eSNicolas Vasilache   auto inputType = input.getType().dyn_cast<RankedTensorType>();
931b828506eSNicolas Vasilache   auto kernelType = kernel.getType().dyn_cast<RankedTensorType>();
932b828506eSNicolas Vasilache   auto outputType = output.getType().dyn_cast<RankedTensorType>();
933b828506eSNicolas Vasilache 
934b828506eSNicolas Vasilache   auto kernelShape = kernelType.getShape();
935b828506eSNicolas Vasilache   auto outputShape = outputType.getShape();
936b828506eSNicolas Vasilache 
937b828506eSNicolas Vasilache   // Only handle the case where at least one of the window dimensions is
938b828506eSNicolas Vasilache   // of size 1. Other cases can rely on tiling to reduce to such cases.
939b828506eSNicolas Vasilache   int64_t khSize = kernelShape[0], kwSize = kernelShape[1];
940b828506eSNicolas Vasilache   int64_t ohSize = outputShape[1], owSize = outputShape[2];
941b828506eSNicolas Vasilache   bool removeH = (khSize == 1 && ohSize == 1);
942b828506eSNicolas Vasilache   bool removeW = (kwSize == 1 && owSize == 1);
943b828506eSNicolas Vasilache   if (!removeH && !removeW)
944b828506eSNicolas Vasilache     return failure();
945b828506eSNicolas Vasilache 
946b828506eSNicolas Vasilache   // Get new shapes and types for all operands by removing the size-1
947b828506eSNicolas Vasilache   // dimension.
948b828506eSNicolas Vasilache   using RTTBuilder = RankedTensorType::Builder;
949789c88e8SNicolas Vasilache   RankedTensorType newInputType =
950789c88e8SNicolas Vasilache       RTTBuilder(inputType).dropDim((removeH ? 1 : 2));
951789c88e8SNicolas Vasilache   RankedTensorType newKernelType =
952789c88e8SNicolas Vasilache       RTTBuilder(kernelType).dropDim((removeH ? 0 : 1));
953789c88e8SNicolas Vasilache   RankedTensorType newOutputType =
954789c88e8SNicolas Vasilache       RTTBuilder(outputType).dropDim(removeH ? 1 : 2);
955b828506eSNicolas Vasilache 
956b828506eSNicolas Vasilache   // Rank-reduce operands.
957b828506eSNicolas Vasilache   Location loc = convOp.getLoc();
958b828506eSNicolas Vasilache   Value newInput = tensor::createCanonicalRankReducingExtractSliceOp(
959b828506eSNicolas Vasilache       rewriter, loc, input, newInputType);
960b828506eSNicolas Vasilache   Value newKernel = tensor::createCanonicalRankReducingExtractSliceOp(
961b828506eSNicolas Vasilache       rewriter, loc, kernel, newKernelType);
962b828506eSNicolas Vasilache   Value newOutput = tensor::createCanonicalRankReducingExtractSliceOp(
963b828506eSNicolas Vasilache       rewriter, loc, output, newOutputType);
964b828506eSNicolas Vasilache 
965b828506eSNicolas Vasilache   // Rank-reduce strides and dilations too.
966b828506eSNicolas Vasilache   // TODO: dropDim 1-liner helper.
967b828506eSNicolas Vasilache   auto strides = llvm::to_vector<4>(convOp.strides().getValues<int64_t>());
968b828506eSNicolas Vasilache   strides.erase(strides.begin() + (removeH ? 0 : 1));
969b828506eSNicolas Vasilache   auto stridesAttr = rewriter.getI64VectorAttr(strides);
970b828506eSNicolas Vasilache 
971ce2e198bSAlex Zinenko   auto dilations = llvm::to_vector<4>(convOp.dilations().getValues<int64_t>());
972b828506eSNicolas Vasilache   dilations.erase(dilations.begin() + (removeH ? 0 : 1));
973b828506eSNicolas Vasilache   auto dilationsAttr = rewriter.getI64VectorAttr(dilations);
974b828506eSNicolas Vasilache 
975b828506eSNicolas Vasilache   auto conv1DOp = rewriter.create<DepthwiseConv1DNwcWcOp>(
976b828506eSNicolas Vasilache       loc, newOutputType, ValueRange{newInput, newKernel},
977b828506eSNicolas Vasilache       ValueRange{newOutput}, stridesAttr, dilationsAttr);
978b828506eSNicolas Vasilache 
979b828506eSNicolas Vasilache   // Insert back.
980b828506eSNicolas Vasilache   Value inserted = tensor::createCanonicalRankReducingInsertSliceOp(
981b828506eSNicolas Vasilache       rewriter, loc, conv1DOp.getResult(0), output);
982b828506eSNicolas Vasilache   rewriter.replaceOp(convOp, inserted);
983b828506eSNicolas Vasilache 
98498dbcff1Sgysit   filter.replaceLinalgTransformationFilter(rewriter, conv1DOp);
985ce2e198bSAlex Zinenko   return conv1DOp;
986ce2e198bSAlex Zinenko }
9877b615a87SLei Zhang 
populateDecomposeConvolutionPatterns(RewritePatternSet & patterns,const LinalgTransformationFilter & filter,PatternBenefit benefit)98898dbcff1Sgysit void linalg::populateDecomposeConvolutionPatterns(
9891fc096afSMehdi Amini     RewritePatternSet &patterns, const LinalgTransformationFilter &filter,
9907b615a87SLei Zhang     PatternBenefit benefit) {
991b828506eSNicolas Vasilache   patterns.add<DownscaleSizeOneWindowed2DConvolution,
99298dbcff1Sgysit                DownscaleDepthwiseConv2DNhwcHwcOp>(patterns.getContext(), filter,
9937b615a87SLei Zhang                                                   benefit);
9947b615a87SLei Zhang }
995