1 //===- Transforms.cpp - Linalg transformations as patterns ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements logic and helpers to expose Linalg transforms as rewrite
10 // patterns.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
15 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
16 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
17 #include "mlir/Dialect/Linalg/IR/Linalg.h"
18 #include "mlir/Dialect/Linalg/Transforms/HoistPadding.h"
19 #include "mlir/Dialect/Linalg/Utils/Utils.h"
20 #include "mlir/Dialect/SCF/Transforms.h"
21 #include "mlir/Dialect/Tensor/IR/Tensor.h"
22 #include "mlir/Dialect/Tensor/IR/TensorTilingInterfaceImpl.h"
23 #include "mlir/Dialect/Utils/StaticValueUtils.h"
24 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
25 #include "mlir/Dialect/Vector/IR/VectorOps.h"
26 #include "mlir/IR/AffineExpr.h"
27 #include "mlir/IR/Matchers.h"
28 #include "mlir/Pass/Pass.h"
29 #include "mlir/Support/LLVM.h"
30 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
31 #include "llvm/ADT/ScopeExit.h"
32 #include "llvm/ADT/TypeSwitch.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include <type_traits>
36 #include <utility>
37 
38 #define DEBUG_TYPE "linalg-transforms"
39 
40 using namespace mlir;
41 using namespace mlir::linalg;
42 
43 #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE << "]: ")
44 
45 //===----------------------------------------------------------------------===//
46 // Transformations exposed as rewrite patterns.
47 //===----------------------------------------------------------------------===//
48 // Marker used as attribute name in generated Linalg rewriting transformations.
49 const StringLiteral mlir::linalg::LinalgTransforms::kLinalgTransformMarker =
50     "__internal_linalg_transform__";
51 
52 mlir::linalg::LinalgTransformationFilter::LinalgTransformationFilter(
53     ArrayRef<StringAttr> matchDisjunction, Optional<StringAttr> replacement)
54     : matchDisjunction(matchDisjunction.begin(), matchDisjunction.end()),
55       replacement(replacement), matchByDefault(false) {}
56 
57 mlir::linalg::LinalgTransformationFilter::LinalgTransformationFilter(
58     const FilterFunction &f, ArrayRef<StringAttr> matchDisjunction,
59     Optional<StringAttr> replacement)
60     : filters(),
61       matchDisjunction(matchDisjunction.begin(), matchDisjunction.end()),
62       replacement(replacement), matchByDefault(false) {
63   if (f)
64     filters.push_back(f);
65 }
66 
67 LogicalResult mlir::linalg::LinalgTransformationFilter::checkAndNotify(
68     PatternRewriter &rewriter, Operation *op) const {
69   if (llvm::any_of(filters,
70                    [&](const FilterFunction &f) { return failed(f(op)); }))
71     return failure();
72 
73   auto attr = op->template getAttrOfType<StringAttr>(
74       LinalgTransforms::kLinalgTransformMarker);
75 
76   if (!attr) {
77     // 1. Has no filter case and matchDisjunction is empty.
78     if (matchDisjunction.empty() || matchByDefault)
79       return success();
80 
81     // 2. Has no filter but was expecting a filter.
82     return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
83       diag << " does not have any filter from list: ";
84       interleaveComma(matchDisjunction, diag);
85     });
86   }
87 
88   // 4. Match explicit filter.
89   for (auto filter : matchDisjunction)
90     if (attr.getValue() == filter)
91       return success();
92 
93   // 5. Fail to match.
94   return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
95     diag << " does not have any filter from list: ";
96     interleaveComma(matchDisjunction, diag);
97   });
98 }
99 
100 void mlir::linalg::LinalgTransformationFilter::
101     replaceLinalgTransformationFilter(PatternRewriter &rewriter,
102                                       Operation *op) const {
103   if (replacement.hasValue())
104     op->setAttr(LinalgTransforms::kLinalgTransformMarker,
105                 replacement.getValue());
106   else
107     op->removeAttr(
108         rewriter.getStringAttr(LinalgTransforms::kLinalgTransformMarker));
109 }
110 
111 bool mlir::linalg::LinalgTransformationFilter::hasReplacementFilter(
112     Operation *op) const {
113   if (!replacement)
114     return false;
115   auto attr = op->getAttr(LinalgTransforms::kLinalgTransformMarker)
116                   .dyn_cast<StringAttr>();
117   return attr && attr == replacement.getValue();
118 }
119 
120 LinalgTilingOptions &
121 mlir::linalg::LinalgTilingOptions::setTileSizes(ArrayRef<int64_t> ts) {
122   assert(!tileSizeComputationFunction && "tile sizes already set");
123   SmallVector<int64_t, 4> tileSizes(ts.begin(), ts.end());
124   tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
125     OpBuilder::InsertionGuard guard(b);
126     b.setInsertionPointToStart(
127         &op->getParentOfType<FuncOp>().getBody().front());
128     return llvm::to_vector<4>(map_range(tileSizes, [&](int64_t s) {
129       Value v = b.create<arith::ConstantIndexOp>(op->getLoc(), s);
130       return v;
131     }));
132   };
133   return *this;
134 }
135 
136 LinalgTilingOptions &mlir::linalg::LinalgTilingOptions::scalarizeDynamicDims() {
137   assert(!tileSizeComputationFunction && "tile sizes already set");
138   tileSizeComputationFunction = [](OpBuilder &b, Operation *op) {
139     SmallVector<Value, 4> tileSizes;
140     auto linalgOp = dyn_cast<LinalgOp>(op);
141     if (!linalgOp)
142       return tileSizes;
143     Location loc = linalgOp.getLoc();
144     auto allShapeSizes = linalgOp.createFlatListOfOperandDims(b, loc);
145     AffineMap map = linalgOp.getShapesToLoopsMap();
146     if (!map)
147       return tileSizes;
148     auto shapeSizes = applyMapToValues(b, loc, map, allShapeSizes);
149     // If the shape size is dynamic, tile by 1. Otherwise, do not tile (tile
150     // size 0).
151     for (Value shapeSize : shapeSizes)
152       tileSizes.push_back(getConstantIntValue(shapeSize).hasValue()
153                               ? b.create<arith::ConstantIndexOp>(loc, 0)
154                               : b.create<arith::ConstantIndexOp>(loc, 1));
155     return tileSizes;
156   };
157   return *this;
158 }
159 
160 /// Helper function that tries to pad `opOperand`. Exit early for scalar
161 /// operands, if `paddingFunc` returns failure, or if `opOperand` is not defined
162 /// by an ExtractSliceOp. Otherwise, try to pad the operand even if it already
163 /// has a static shape. Set `result` to the result of the created tensor::PadOp
164 /// or and return success if the operand either has been padded to a static
165 /// shape or already had a static shape and failure otherwise.
166 static LogicalResult padOperandToSmallestStaticBoundingBox(
167     OpBuilder &b, linalg::LinalgOp opToPad, OpOperand *opOperand,
168     const PaddingValueComputationFunction &paddingFunc,
169     const PaddingNoFoldComputationFunction &nofoldFunc, Value &result) {
170   // Get the shape of the operand and check if it has a dynamic shape. Only
171   // return failure if the operand is not a scalar and has a dynamic shape.
172   ArrayRef<int64_t> shape = opToPad.getShape(opOperand);
173   bool hasDynamicShape = llvm::is_contained(shape, ShapedType::kDynamicSize);
174 
175   // Cannot pad scalar operands.
176   if (shape.empty())
177     return success();
178 
179   // Cannot pad if the padding value is unknown.
180   FailureOr<Value> paddingValue = paddingFunc(b, *opOperand);
181   if (failed(paddingValue))
182     return failure(hasDynamicShape);
183 
184   // Cannot construct a static bounding box if the operand is not defined by an
185   // ExtractSliceOp.
186   auto sliceOp = opOperand->get().getDefiningOp<tensor::ExtractSliceOp>();
187   if (!sliceOp)
188     return failure(hasDynamicShape);
189 
190   // Compute the dropped dimensions if `sliceOp` is ranke-reducing.
191   llvm::SmallBitVector droppedDims = sliceOp.getDroppedDims();
192 
193   // Upper bound the `sliceOp` sizes to obtain a static bounding box.
194   SmallVector<int64_t> staticSizes;
195   staticSizes.reserve(shape.size());
196   auto shapedOp = cast<OffsetSizeAndStrideOpInterface>(sliceOp.getOperation());
197   for (const auto &en : enumerate(shapedOp.getMixedSizes())) {
198     // Skip dropped dimensions.
199     if (droppedDims.test(en.index()))
200       continue;
201     // If the size is an attribute add it directly to `staticSizes`.
202     if (en.value().is<Attribute>()) {
203       staticSizes.push_back(
204           en.value().get<Attribute>().dyn_cast<IntegerAttr>().getInt());
205       continue;
206     }
207     // Otherwise, try to compute a constant upper bound for the size value.
208     FailureOr<int64_t> upperBound =
209         getConstantUpperBoundForIndex(en.value().get<Value>());
210     if (failed(upperBound)) {
211       LLVM_DEBUG(DBGS() << "No constant bounding box can be found for padding");
212       return failure();
213     }
214     staticSizes.push_back(upperBound.getValue());
215   }
216   assert(staticSizes.size() == shape.size() &&
217          "expect the dynamic and static ranks to match");
218 
219   // Pad the operand to the bounding box defined by `staticSizes`.
220   auto staticTensorType = RankedTensorType::get(
221       staticSizes, getElementTypeOrSelf(opOperand->get()));
222   bool nofold = nofoldFunc ? nofoldFunc(*opOperand) : false;
223   result =
224       makeComposedPadHighOp(b, opToPad->getLoc(), staticTensorType,
225                             opOperand->get(), paddingValue.getValue(), nofold);
226   return success();
227 }
228 
229 FailureOr<SmallVector<Value>>
230 linalg::rewriteAsPaddedOp(OpBuilder &b, LinalgOp opToPad,
231                           const PaddingValueComputationFunction &paddingFunc,
232                           const PaddingNoFoldComputationFunction &nofoldFunc,
233                           LinalgOp &paddedOp) {
234   Location loc = opToPad->getLoc();
235 
236   // TODO: there are cases where we may still want to pad to larger sizes.
237   assert(opToPad.hasTensorSemantics() &&
238          "expected operation to have tensor semantics");
239 
240   OpBuilder::InsertionGuard g(b);
241   // Set IP after op because we also take the dims of the original output.
242   b.setInsertionPointAfter(opToPad);
243   // Make a copy of the shaped operands and update it.
244   SmallVector<Value> newOperands;
245   newOperands.reserve(opToPad.getNumInputsAndOutputs());
246   for (OpOperand *opOperand : opToPad.getInputAndOutputOperands()) {
247     Value paddedOperand;
248     // If padding was requested but the shape cannot be bounded statically then
249     // the pattern fails to apply.
250     if (failed(padOperandToSmallestStaticBoundingBox(
251             b, opToPad, opOperand, paddingFunc, nofoldFunc, paddedOperand)))
252       return failure();
253     newOperands.push_back(paddedOperand ? paddedOperand : opOperand->get());
254   }
255 
256   SmallVector<SmallVector<Value>> reifiedResultShapes;
257   if (failed(cast<ReifyRankedShapedTypeOpInterface>(opToPad.getOperation())
258                  .reifyResultShapes(b, reifiedResultShapes)))
259     return failure();
260   assert(reifiedResultShapes.size() == opToPad->getNumResults() &&
261          "expected same number of results");
262 
263   // Clone `opToPad` to operate on the statically padded shapes.
264   auto resultTensorTypes =
265       ValueRange(newOperands).take_back(opToPad.getNumOutputs()).getTypes();
266   paddedOp = opToPad.clone(b, loc, resultTensorTypes, newOperands);
267 
268   // Recover the slice out of the new static results. This keeps the original
269   // linalg op around because it uses the dims of the original results.
270   SmallVector<Value> paddedSubviewResults;
271   paddedSubviewResults.reserve(opToPad->getNumResults());
272   for (const auto &en : llvm::enumerate(paddedOp->getResults())) {
273     Value paddedResult = en.value();
274     int64_t resultNumber = en.index();
275     int64_t rank = paddedResult.getType().cast<RankedTensorType>().getRank();
276     SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
277     SmallVector<OpFoldResult> sizes;
278     for (Value v : reifiedResultShapes[resultNumber])
279       sizes.push_back(getAsOpFoldResult(v));
280     SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
281     paddedSubviewResults.push_back(b.create<tensor::ExtractSliceOp>(
282         loc, paddedResult, offsets, sizes, strides));
283   }
284   return paddedSubviewResults;
285 }
286 
287 /// Try to peel a loop `op` and return the new result.
288 // TODO: Add support for scf.parallel and affine.for loops.
289 static SmallVector<Value, 4> peelLoop(RewriterBase &rewriter, Operation *op) {
290   return llvm::TypeSwitch<Operation *, SmallVector<Value, 4>>(op)
291       .Case<scf::ForOp>([&](scf::ForOp forOp) {
292         scf::ForOp partialIteration;
293         if (succeeded(scf::peelAndCanonicalizeForLoop(rewriter, forOp,
294                                                       partialIteration)))
295           return partialIteration->getResults();
296         assert(!partialIteration && "expected that loop was not peeled");
297         return forOp->getResults();
298       })
299       .Default([&](Operation *op) { return op->getResults(); });
300 }
301 
302 /// Peel loops after tiling.
303 void mlir::linalg::peelTiledLinalgOp(RewriterBase &rewriter, TiledLinalgOp &res,
304                                      ArrayRef<int64_t> peeledLoops,
305                                      LinalgTilingLoopType loopType) {
306   for (int64_t loop : peeledLoops) {
307     assert(loop < static_cast<int64_t>(res.loops.size()) &&
308            "requested peeling of non-existing loop");
309     SmallVector<Value, 4> loopResults;
310     Operation *loopOp = res.loops[loop];
311     loopResults = peelLoop(rewriter, loopOp);
312 
313     // The result of the loop nest may change with peeling.
314     if (res.tensorResults.size() == loopOp->getNumResults() &&
315         std::equal(res.tensorResults.begin(), res.tensorResults.end(),
316                    loopOp->getResults().begin()))
317       res.tensorResults = loopResults;
318   }
319 }
320 
321 static ValueRange getTiledOpResult(TiledLinalgOp tiledOp) {
322   if (tiledOp.loops.empty())
323     return tiledOp.op.getOperation()->getResults();
324   return tiledOp.loops.front()->getResults();
325 }
326 
327 static ValueRange
328 getTiledAndFusedOpResult(TiledAndFusedLinalgOps tiledAndFusedOp) {
329   if (tiledAndFusedOp.fusedLoops.empty())
330     return tiledAndFusedOp.op.getOperation()->getResults();
331   return tiledAndFusedOp.fusedLoops.front()->getResults();
332 }
333 
334 mlir::linalg::LinalgBaseTileAndFusePattern::LinalgBaseTileAndFusePattern(
335     StringRef opName, MLIRContext *context,
336     const LinalgDependenceGraph &dependenceGraph,
337     LinalgTilingOptions tilingOptions, LinalgFusionOptions fusionOptions,
338     LinalgTransformationFilter f, LinalgTransformationFilter fusedOpMarker,
339     LinalgTransformationFilter originalOpMarker, PatternBenefit benefit)
340     : RewritePattern(opName, benefit, context, {}),
341       dependenceGraph(dependenceGraph), tilingOptions(std::move(tilingOptions)),
342       fusionOptions(std::move(fusionOptions)), filter(std::move(f)),
343       fusedOpMarker(std::move(fusedOpMarker)),
344       originalOpMarker(std::move(originalOpMarker)) {}
345 
346 LogicalResult mlir::linalg::LinalgBaseTileAndFusePattern::matchAndRewrite(
347     Operation *op, PatternRewriter &rewriter) const {
348   LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
349   // TODO: remove hasIndexSemantics check once index ops are supported.
350   if (!linalgOp || linalgOp.hasIndexSemantics())
351     return failure();
352   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
353     return failure();
354 
355   DenseSet<Operation *> producers;
356   producers.insert(linalgOp);
357   for (auto dependence : dependenceGraph.getDependentOperationsInto(linalgOp)) {
358     Optional<unsigned> operandNumber = dependence.getIndexingOpViewOperandNum();
359     // When looking at dependences into, indexingOp is always OpOperand. We
360     // could assert, but continue if this is not the case.
361     if (!operandNumber)
362       continue;
363     if (!fusionOptions.indicesToFuse.count(operandNumber.getValue()))
364       continue;
365     if (isa<LinalgOp>(dependence.getDependentOp()))
366       producers.insert(dependence.getDependentOp());
367   }
368 
369   SmallVector<LinalgOp, 1> fusionOps;
370   for (auto it = op->getBlock()->begin(), ie = Block::iterator(op); it != ie;
371        ++it) {
372     auto producerLinalgOp = dyn_cast<LinalgOp>(&(*it));
373     if (producerLinalgOp && producers.count(producerLinalgOp))
374       fusionOps.push_back(producerLinalgOp);
375   }
376   fusionOps.push_back(linalgOp);
377 
378   SmallVector<Value, 4> tileSizes =
379       tilingOptions.tileSizeComputationFunction(rewriter, op);
380   LinalgTilingOptions instanceTilingOptions = tilingOptions;
381   instanceTilingOptions.setTileSizes(tileSizes);
382   Optional<TiledAndFusedLinalgOps> tiledAndFusedOps = tileAndFuseLinalgOps(
383       rewriter, fusionOps, dependenceGraph, instanceTilingOptions);
384   if (!tiledAndFusedOps)
385     return failure();
386 
387   // Tile the unfused loops;
388   SmallVector<Value, 4> unfusedLoopTileSizes;
389   Value zero = rewriter.create<arith::ConstantIndexOp>(op->getLoc(), 0);
390   for (const auto &tileSize : enumerate(tileSizes)) {
391     if (tiledAndFusedOps->fusedLoopDims.count(tileSize.index()))
392       unfusedLoopTileSizes.push_back(zero);
393     else
394       unfusedLoopTileSizes.push_back(tileSize.value());
395   }
396   // Tile the loop only if there is a non-zero tile size.
397   if (unfusedLoopTileSizes.size() > linalgOp.getNumLoops())
398     unfusedLoopTileSizes.resize(linalgOp.getNumLoops());
399   if (llvm::any_of(unfusedLoopTileSizes, [](Value val) {
400         if (auto cst = val.getDefiningOp<arith::ConstantIndexOp>())
401           return cst.value() != 0;
402         return true;
403       })) {
404     LinalgTilingOptions unfusedTilingOptions = tilingOptions;
405     unfusedTilingOptions.setTileSizes(unfusedLoopTileSizes);
406     FailureOr<TiledLinalgOp> unfusedTiledOp =
407         tileLinalgOp(rewriter, tiledAndFusedOps->op, unfusedTilingOptions);
408     if (failed(unfusedTiledOp))
409       return failure();
410     rewriter.replaceOp(tiledAndFusedOps->op,
411                        getTiledOpResult(unfusedTiledOp.getValue()));
412     tiledAndFusedOps->op = unfusedTiledOp->op;
413   }
414   op->replaceAllUsesWith(getTiledAndFusedOpResult(tiledAndFusedOps.getValue()));
415 
416   filter.replaceLinalgTransformationFilter(rewriter,
417                                            tiledAndFusedOps->op.getOperation());
418   for (auto fusedOp : tiledAndFusedOps->fusedProducers) {
419     fusedOpMarker.replaceLinalgTransformationFilter(rewriter,
420                                                     fusedOp.getOperation());
421   }
422   for (auto origProducerOp : ArrayRef<LinalgOp>(fusionOps).drop_back()) {
423     originalOpMarker.replaceLinalgTransformationFilter(
424         rewriter, origProducerOp.getOperation());
425   }
426   rewriter.updateRootInPlace(op, [&]() {
427     originalOpMarker.replaceLinalgTransformationFilter(rewriter, op);
428   });
429   return success();
430 }
431 
432 /// Linalg tiling pattern.
433 mlir::linalg::LinalgTilingPattern::LinalgTilingPattern(
434     MLIRContext *context, LinalgTilingOptions options,
435     LinalgTransformationFilter f, PatternBenefit benefit)
436     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
437       filter(std::move(f)), options(std::move(options)) {}
438 
439 mlir::linalg::LinalgTilingPattern::LinalgTilingPattern(
440     StringRef opName, MLIRContext *context, LinalgTilingOptions options,
441     LinalgTransformationFilter f, PatternBenefit benefit)
442     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
443       filter(f.addOpNameFilter(opName)), options(std::move(options)) {}
444 
445 FailureOr<TiledLinalgOp>
446 mlir::linalg::LinalgTilingPattern::returningMatchAndRewrite(
447     LinalgOp op, PatternRewriter &rewriter) const {
448   if (failed(filter.checkAndNotify(rewriter, op)))
449     return failure();
450 
451   FailureOr<TiledLinalgOp> res = tileLinalgOp(rewriter, op, options);
452   if (failed(res))
453     return failure();
454 
455   // Clear filter to stop recursive pattern application.
456   // This must be done here to properly propagate to peeling branches.
457   filter.replaceLinalgTransformationFilter(rewriter, res->op);
458 
459   // Peel the loops of the TiledLinalgOp.
460   peelTiledLinalgOp(rewriter, *res, options.peeledLoops, options.loopType);
461 
462   if (res->tensorResults.empty())
463     rewriter.eraseOp(op);
464   else
465     rewriter.replaceOp(op, res->tensorResults);
466 
467   return res;
468 }
469 
470 /// Linalg padding pattern.
471 mlir::linalg::LinalgPaddingPattern::LinalgPaddingPattern(
472     MLIRContext *context, LinalgPaddingOptions options,
473     LinalgTransformationFilter f, PatternBenefit benefit)
474     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
475       filter(std::move(f)), options(std::move(options)) {}
476 
477 mlir::linalg::LinalgPaddingPattern::LinalgPaddingPattern(
478     StringRef opName, MLIRContext *context, LinalgPaddingOptions options,
479     LinalgTransformationFilter f, PatternBenefit benefit)
480     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
481       filter(f.addOpNameFilter(opName)), options(std::move(options)) {}
482 
483 FailureOr<LinalgOp>
484 mlir::linalg::LinalgPaddingPattern::returningMatchAndRewrite(
485     LinalgOp linalgOp, PatternRewriter &rewriter) const {
486   if (!linalgOp.hasTensorSemantics())
487     return failure();
488   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
489     return failure();
490 
491   // Pad the operation.
492   LinalgOp paddedOp;
493   FailureOr<SmallVector<Value>> newResults = rewriteAsPaddedOp(
494       rewriter, linalgOp, options.paddingValueComputationFunction,
495       options.paddingNoFoldComputationFunction, paddedOp);
496   if (failed(newResults))
497     return failure();
498 
499   // Compute the desired hoisting depths.
500   SmallVector<int64_t> depths;
501   if (options.paddingHoistComputationFunction) {
502     for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands())
503       depths.push_back(options.paddingHoistComputationFunction(*opOperand));
504   }
505 
506   // Hoist the padding.
507   for (const auto &en : enumerate(depths)) {
508     OpOperand &opOperand = paddedOp->getOpOperand(en.index());
509     auto padOp = opOperand.get().getDefiningOp<tensor::PadOp>();
510     if (!padOp || en.value() == 0)
511       continue;
512     tensor::PadOp hoistedOp;
513     SmallVector<GenericOp> transposeOps;
514     SmallVector<int64_t> transposeVector =
515         options.paddingTransposeComputationFunction(opOperand);
516 
517     FailureOr<Value> newResult = hoistPaddingOnTensors(
518         padOp, en.value(), transposeVector, hoistedOp, transposeOps);
519     if (failed(newResult))
520       continue;
521     rewriter.replaceOp(padOp, newResult.getValue());
522 
523     // Do not apply hoist padding to the newly introduced transpose operations.
524     for (GenericOp transposeOp : transposeOps)
525       filter.replaceLinalgTransformationFilter(rewriter, transposeOp);
526   }
527 
528   // Replace the original operation to pad.
529   rewriter.replaceOp(linalgOp, newResults.getValue());
530   filter.replaceLinalgTransformationFilter(rewriter, paddedOp);
531 
532   return paddedOp;
533 }
534 
535 /// Linalg tile and fuse tensor ops pattern.
536 mlir::linalg::LinalgTileAndFuseTensorOpsPattern::
537     LinalgTileAndFuseTensorOpsPattern(MLIRContext *context,
538                                       LinalgTilingAndFusionOptions options,
539                                       LinalgTransformationFilter f,
540                                       PatternBenefit benefit)
541     : RewritePattern(MatchAnyOpTypeTag(), benefit, context),
542       filter(std::move(f)), options(std::move(options)) {}
543 
544 mlir::linalg::LinalgTileAndFuseTensorOpsPattern::
545     LinalgTileAndFuseTensorOpsPattern(StringRef opName, MLIRContext *context,
546                                       LinalgTilingAndFusionOptions options,
547                                       LinalgTransformationFilter f,
548                                       PatternBenefit benefit)
549     : RewritePattern(opName, benefit, context), filter(std::move(f)),
550       options(std::move(options)) {}
551 
552 FailureOr<mlir::linalg::TileLoopNest>
553 mlir::linalg::LinalgTileAndFuseTensorOpsPattern::returningMatchAndRewrite(
554     Operation *op, PatternRewriter &rewriter) const {
555   LinalgOp rootOp = dyn_cast<LinalgOp>(op);
556   if (!rootOp)
557     return failure();
558   if (failed(filter.checkAndNotify(rewriter, op)))
559     return failure();
560 
561   // Check `tileSizes` contains a tile size for every `rootOp` loop dimension.
562   if (options.tileSizes.size() < rootOp.getNumLoops())
563     return rewriter.notifyMatchFailure(op, "expect #tile sizes >= #loops");
564 
565   // Check `tileInterchange` contains no entries or as many as `tileSizes`.
566   if (!options.tileInterchange.empty() &&
567       options.tileInterchange.size() != options.tileSizes.size())
568     return rewriter.notifyMatchFailure(
569         op, "expect the number of tile sizes and interchange dims to match");
570 
571   // Copy the `tileSizes` and `tileInterchange` prefixes needed for `rootOp`.
572   SmallVector<int64_t> rootTileSizes(options.tileSizes.begin(),
573                                      options.tileSizes.begin() +
574                                          rootOp.getNumLoops());
575   SmallVector<int64_t> rootInterchange =
576       options.tileInterchange.empty()
577           ? llvm::to_vector<6>(llvm::seq<int64_t>(0, rootOp.getNumLoops()))
578           : SmallVector<int64_t>(options.tileInterchange.begin(),
579                                  options.tileInterchange.begin() +
580                                      rootOp.getNumLoops());
581 
582   // Check `rootTileSizes` contains non-zero tile sizes.
583   if (llvm::count(rootTileSizes, 0) == static_cast<long>(rootTileSizes.size()))
584     return rewriter.notifyMatchFailure(
585         op, "expect at least one non-zero tile size");
586 
587   // Check `rootInterchange` is a permutation of the `rootOp` loop dimensions.
588   // It has to be a permutation since the tiling cannot tile the same loop
589   // dimension multiple times.
590   if (!isPermutation(rootInterchange))
591     return rewriter.notifyMatchFailure(
592         op, "expect the tile interchange permutes the root loops");
593 
594   // Tile `rootOp` and fuse its producers.
595   FailureOr<TileLoopNest> tileLoopNest =
596       tileConsumerAndFuseProducers(rewriter, rootOp, rootTileSizes,
597                                    rootInterchange, options.tileDistribution);
598   if (failed(tileLoopNest))
599     return rewriter.notifyMatchFailure(
600         op, "tileConsumerAndFuseProducers failed unexpectedly");
601 
602   // Replace all uses of the tiled loop operation.
603   rootOp->replaceAllUsesWith(tileLoopNest->getRootOpReplacementResults());
604 
605   // Apply the filter if specified.
606   for (LinalgOp linalgOp : tileLoopNest->getAllTiledAndFusedOps())
607     filter.replaceLinalgTransformationFilter(rewriter, linalgOp);
608   return tileLoopNest;
609 }
610 
611 /// Linalg generic interchange pattern.
612 mlir::linalg::GenericOpInterchangePattern::GenericOpInterchangePattern(
613     MLIRContext *context, ArrayRef<unsigned> interchangeVector,
614     LinalgTransformationFilter f, PatternBenefit benefit)
615     : OpRewritePattern(context, benefit), filter(std::move(f)),
616       interchangeVector(interchangeVector.begin(), interchangeVector.end()) {}
617 
618 FailureOr<GenericOp>
619 mlir::linalg::GenericOpInterchangePattern::returningMatchAndRewrite(
620     GenericOp genericOp, PatternRewriter &rewriter) const {
621   if (failed(filter.checkAndNotify(rewriter, genericOp)))
622     return failure();
623 
624   FailureOr<GenericOp> transformedOp =
625       interchangeGenericOp(rewriter, genericOp, interchangeVector);
626   if (failed(transformedOp))
627     return failure();
628 
629   // New filter if specified.
630   filter.replaceLinalgTransformationFilter(rewriter, genericOp);
631   return transformedOp;
632 }
633 
634 /// Linalg generalization pattern.
635 mlir::linalg::LinalgGeneralizationPattern::LinalgGeneralizationPattern(
636     MLIRContext *context, LinalgTransformationFilter f, PatternBenefit benefit)
637     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
638       filter(std::move(f)) {}
639 
640 mlir::linalg::LinalgGeneralizationPattern::LinalgGeneralizationPattern(
641     StringRef opName, MLIRContext *context, LinalgTransformationFilter f,
642     PatternBenefit benefit)
643     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
644       filter(f.addOpNameFilter(opName)) {}
645 
646 FailureOr<GenericOp>
647 mlir::linalg::LinalgGeneralizationPattern::returningMatchAndRewrite(
648     LinalgOp linalgOp, PatternRewriter &rewriter) const {
649   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
650     return failure();
651   FailureOr<GenericOp> genericOp = generalizeNamedOp(rewriter, linalgOp);
652   if (failed(genericOp))
653     return failure();
654   filter.replaceLinalgTransformationFilter(rewriter, *genericOp);
655   return genericOp;
656 }
657 
658 mlir::linalg::LinalgBasePromotionPattern::LinalgBasePromotionPattern(
659     MLIRContext *context, LinalgTransformationFilter f,
660     LinalgPromotionOptions options, PatternBenefit benefit)
661     : RewritePattern(MatchAnyOpTypeTag(), benefit, context),
662       filter(std::move(f)), options(std::move(options)) {}
663 
664 mlir::linalg::LinalgBasePromotionPattern::LinalgBasePromotionPattern(
665     StringRef opName, MLIRContext *context, LinalgPromotionOptions options,
666     LinalgTransformationFilter f, PatternBenefit benefit)
667     : RewritePattern(opName, benefit, context, {}), filter(std::move(f)),
668       options(std::move(options)) {}
669 
670 LogicalResult mlir::linalg::LinalgBasePromotionPattern::matchAndRewrite(
671     Operation *op, PatternRewriter &rewriter) const {
672   if (failed(filter.checkAndNotify(rewriter, op)))
673     return failure();
674   if (failed(promoteSubviewsPrecondition(op, options)))
675     return failure();
676 
677   // TODO: We cannot use root update here. This pattern is creating other ops,
678   // so if the promotion fails, those need to be cleaned up, which doesnt seem
679   // to be happening here. So to fail properly, we should be cloning the op and
680   // deleting the previous op. This needs more investigation.
681   rewriter.startRootUpdate(op);
682   Optional<LinalgOp> promotedOp = promoteSubViews(rewriter, op, options);
683   if (!promotedOp) {
684     rewriter.cancelRootUpdate(op);
685     return op->emitError("subview promotion failed");
686   }
687   rewriter.finalizeRootUpdate(op);
688   filter.replaceLinalgTransformationFilter(rewriter, op);
689   return success();
690 }
691 
692 mlir::linalg::LinalgVectorizationPattern::LinalgVectorizationPattern(
693     MLIRContext *context, LinalgTransformationFilter f,
694     LinalgVectorizationOptions options, PatternBenefit benefit)
695     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
696       filter(std::move(f)) {}
697 
698 mlir::linalg::LinalgVectorizationPattern::LinalgVectorizationPattern(
699     StringRef opName, MLIRContext *context, LinalgVectorizationOptions options,
700     LinalgTransformationFilter f, PatternBenefit benefit)
701     : OpInterfaceRewritePattern<LinalgOp>(context, benefit),
702       filter(f.addOpNameFilter(opName)) {}
703 
704 LogicalResult mlir::linalg::LinalgVectorizationPattern::matchAndRewrite(
705     LinalgOp linalgOp, PatternRewriter &rewriter) const {
706   if (failed(filter.checkAndNotify(rewriter, linalgOp)))
707     return failure();
708   return vectorize(rewriter, linalgOp);
709 }
710 
711 LogicalResult mlir::linalg::CopyVectorizationPattern::matchAndRewrite(
712     memref::CopyOp copyOp, PatternRewriter &rewriter) const {
713   return vectorizeCopy(rewriter, copyOp);
714 }
715 
716 LogicalResult mlir::linalg::applyStagedPatterns(
717     Operation *op, ArrayRef<FrozenRewritePatternSet> stage1Patterns,
718     const FrozenRewritePatternSet &stage2Patterns,
719     function_ref<LogicalResult(Operation *)> stage3Lambda) {
720   unsigned iteration = 0;
721   (void)iteration;
722   for (const auto &patterns : stage1Patterns) {
723     LLVM_DEBUG(DBGS() << "Before 1st stage, iter: " << ++iteration << "\n"
724                       << *op);
725     if (failed(applyPatternsAndFoldGreedily(op, patterns))) {
726       LLVM_DEBUG(DBGS() << "Underlying first stage rewrite did not converge");
727       return failure();
728     }
729     LLVM_DEBUG(DBGS() << "After 1st stage, iter: " << ++iteration << "\n"
730                       << *op);
731     if (failed(applyPatternsAndFoldGreedily(op, stage2Patterns))) {
732       LLVM_DEBUG(DBGS() << "Underlying 2nd stage rewrite did not converge");
733       return failure();
734     }
735     LLVM_DEBUG(DBGS() << "After 2nd stage, iter : " << iteration << "\n"
736                       << *op);
737     if (stage3Lambda) {
738       if (failed(stage3Lambda(op)))
739         return failure();
740       LLVM_DEBUG(DBGS() << "After 3rd stage, iter : " << iteration << "\n"
741                         << *op);
742     }
743   }
744   return success();
745 }
746 
747 static SmallVector<StringRef> getNParallelLoopsAttrs(unsigned nParallelLoops) {
748   return SmallVector<StringRef>(nParallelLoops, getParallelIteratorTypeName());
749 }
750 
751 /// Rewrite a tensor::PadOp into a sequence of InitTensorOp, FillOp (to
752 /// initialize with pad_val) and GenericOp (to copy contents).
753 LogicalResult
754 PadOpTransformationPattern::matchAndRewrite(tensor::PadOp padOp,
755                                             PatternRewriter &rewriter) const {
756 
757   auto inputShapedType = padOp.source().getType().cast<ShapedType>();
758   auto resultShapedType = padOp.result().getType().cast<ShapedType>();
759 
760   // Bail on non-static shapes.
761   if (!inputShapedType.hasStaticShape())
762     return failure();
763   if (!resultShapedType.hasStaticShape())
764     return failure();
765 
766   // Only support padding with a constant for now, i.e. either:
767   //   1. A BBarg from a different block.
768   //   2. A value defined outside of the current block.
769   Block &block = padOp.region().front();
770   auto yieldOp = cast<tensor::YieldOp>(block.getTerminator());
771   Value padValue = yieldOp.value();
772   Operation *definingOp = padValue.getDefiningOp();
773   if (definingOp && definingOp->getBlock() == &block)
774     return failure();
775   if (!definingOp && padValue.cast<BlockArgument>().getOwner() == &block)
776     return failure();
777 
778   // Create tensor with the padded shape
779   Location loc = padOp.getLoc();
780   SmallVector<Value> indices(resultShapedType.getRank(),
781                              rewriter.create<arith::ConstantIndexOp>(loc, 0));
782   Value initTensor = rewriter.create<InitTensorOp>(
783       loc, resultShapedType.getShape(), resultShapedType.getElementType());
784 
785   // Initialize tensor with the pad value
786   Value tmpTensor =
787       rewriter.create<linalg::FillOp>(loc, padValue, initTensor).result();
788 
789   // Copy original contents into new tensor
790   // Uses linalg.generic, but could be done with tensor.insert_slice
791   SmallVector<AffineExpr, 4> outputExprs;
792   for (unsigned i = 0; i < resultShapedType.getRank(); ++i) {
793     outputExprs.push_back(getAffineDimExpr(i, rewriter.getContext()) +
794                           padOp.static_low()[i].cast<IntegerAttr>().getInt());
795   }
796 
797   SmallVector<AffineMap, 2> transferMaps = {
798       rewriter.getMultiDimIdentityMap(inputShapedType.getRank()),
799       AffineMap::get(resultShapedType.getRank(),
800                      /*symbolCount=*/0, outputExprs, rewriter.getContext())};
801 
802   rewriter.replaceOpWithNewOp<linalg::GenericOp>(
803       padOp, resultShapedType, padOp.source(), tmpTensor, transferMaps,
804       getNParallelLoopsAttrs(resultShapedType.getRank()),
805       [&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
806         nestedBuilder.create<linalg::YieldOp>(nestedLoc, args[0]);
807       });
808 
809   return success();
810 }
811 
812 /// Filling `dest` using FillOp constant padding value if possible.
813 /// Otherwise, generate a tensor::GenerateOp.
814 Value GeneralizePadOpPattern::createFillOrGenerateOp(
815     PatternRewriter &rewriter, tensor::PadOp padOp, Value dest,
816     const SmallVector<Value> &dynSizes) const {
817   auto padValue = padOp.getConstantPaddingValue();
818   if (padValue)
819     return rewriter.create<FillOp>(padOp.getLoc(), padValue, dest).result();
820 
821   // Fill could not be optimized: Lower to tensor::GenerateOp with region.
822   auto generateOp = rewriter.create<tensor::GenerateOp>(
823       padOp.getLoc(), padOp.getResultType(), dynSizes);
824   // Copy region to new op.
825   BlockAndValueMapping bvm;
826   padOp.region().cloneInto(&generateOp.getRegion(), bvm);
827   return generateOp;
828 }
829 
830 LogicalResult
831 GeneralizePadOpPattern::matchAndRewrite(tensor::PadOp padOp,
832                                         PatternRewriter &rewriter) const {
833   // Given an OpFoldResult, return an index-typed value.
834   auto getIdxValue = [&](OpFoldResult ofr) {
835     if (auto val = ofr.dyn_cast<Value>())
836       return val;
837     return rewriter
838         .create<arith::ConstantIndexOp>(
839             padOp.getLoc(), ofr.get<Attribute>().cast<IntegerAttr>().getInt())
840         .getResult();
841   };
842 
843   auto resultType = padOp.getResultType();
844   // Compute size of InitTensorOp. Any combination of static/dynamic is
845   // supported.
846   SmallVector<Value> dynSizes;
847   SmallVector<int64_t> staticSizes;
848   for (unsigned dim = 0; dim < resultType.getRank(); ++dim) {
849     if (resultType.isDynamicDim(dim)) {
850       auto srcSize = rewriter.createOrFold<tensor::DimOp>(padOp.getLoc(),
851                                                           padOp.source(), dim);
852       // Add low and high padding value.
853       auto plusLow = rewriter.createOrFold<arith::AddIOp>(
854           padOp.getLoc(), srcSize, getIdxValue(padOp.getMixedLowPad()[dim]));
855       auto plusHigh = rewriter.createOrFold<arith::AddIOp>(
856           padOp.getLoc(), plusLow, getIdxValue(padOp.getMixedHighPad()[dim]));
857       dynSizes.push_back(plusHigh);
858     }
859     staticSizes.push_back(resultType.getDimSize(dim));
860   }
861 
862   // Init tensor and fill it with padding.
863   Value init = rewriter.create<InitTensorOp>(
864       padOp.getLoc(), dynSizes, staticSizes, resultType.getElementType());
865   Value fill = createFillOrGenerateOp(rewriter, padOp, init, dynSizes);
866 
867   // Try optimize the copy of source.
868   if (optimizeCopyFn && optimizeCopyFn(rewriter, padOp, fill).succeeded())
869     return success();
870 
871   // tensor::PadOps cannot be optimized. Generate a InsertSliceOp instead
872   // for copying the PadOp source.
873   auto sourceType = padOp.getSourceType();
874   // Compute size of source of tensor::PadOp.
875   SmallVector<OpFoldResult> srcSizes;
876   for (unsigned dim = 0; dim < sourceType.getRank(); ++dim) {
877     if (sourceType.isDynamicDim(dim)) {
878       srcSizes.push_back(rewriter.createOrFold<tensor::DimOp>(
879           padOp.getLoc(), padOp.source(), dim));
880     } else {
881       srcSizes.push_back(rewriter.getIndexAttr(sourceType.getDimSize(dim)));
882     }
883   }
884   // Strides of InsertSliceOp are all 1.
885   SmallVector<OpFoldResult> strides(sourceType.getRank(),
886                                     rewriter.getIndexAttr(1));
887   rewriter.replaceOpWithNewOp<tensor::InsertSliceOp>(
888       padOp, padOp.source(), fill, padOp.getMixedLowPad(), srcSizes, strides);
889 
890   return success();
891 }
892 
893 LogicalResult ExtractSliceOfPadTensorSwapPattern::matchAndRewrite(
894     tensor::ExtractSliceOp sliceOp, PatternRewriter &rewriter) const {
895   if (!sliceOp.hasUnitStride())
896     return failure();
897 
898   auto padOp = sliceOp.source().getDefiningOp<tensor::PadOp>();
899   if (!padOp)
900     return failure();
901 
902   bool zeroSliceGuard = true;
903   if (controlFn) {
904     if (Optional<bool> control = controlFn(sliceOp))
905       zeroSliceGuard = control.getValue();
906     else
907       return failure();
908   }
909 
910   Operation *tiledPadOp =
911       tensor::bubbleUpPadSlice(rewriter, padOp, sliceOp.getMixedOffsets(),
912                                sliceOp.getMixedSizes(), zeroSliceGuard);
913   // All shapes are static and the data source is actually used. Rewrite into
914   // pad(extract_slice(x)).
915   rewriter.replaceOp(sliceOp, tiledPadOp->getResults());
916   return success();
917 }
918 
919 namespace {
920 // The following are patterns for downscaling convolution ops with size-1
921 // window dimensions.
922 //
923 // Note that we'd eventually want to write such transformations in a generic
924 // way, e.g., converting to linalg.generic, removing the size-1 dimensions,
925 // and then turning back to named ops. But for now it's fine to have a few
926 // patterns matching special ops to get started.
927 
928 /// Rewrites 2-D convolution ops with size-1 window dimensions into 1-D
929 /// convolution ops.
930 struct DownscaleSizeOneWindowed2DConvolution final
931     : public OpRewritePattern<Conv2DNhwcHwcfOp> {
932   DownscaleSizeOneWindowed2DConvolution(
933       MLIRContext *context,
934       LinalgTransformationFilter f = LinalgTransformationFilter(),
935       PatternBenefit benefit = 1)
936       : OpRewritePattern<Conv2DNhwcHwcfOp>(context, benefit),
937         filter(std::move(f)) {}
938 
939   LogicalResult matchAndRewrite(linalg::Conv2DNhwcHwcfOp convOp,
940                                 PatternRewriter &rewriter) const override {
941     if (failed(filter.checkAndNotify(rewriter, convOp)))
942       return failure();
943     if (convOp.hasBufferSemantics())
944       return failure(); // To be implemented
945 
946     Value input = convOp.inputs().front();
947     Value kernel = convOp.inputs().back();
948     Value output = convOp.outputs().front();
949 
950     auto inputType = input.getType().dyn_cast<RankedTensorType>();
951     auto kernelType = kernel.getType().dyn_cast<RankedTensorType>();
952     auto outputType = output.getType().dyn_cast<RankedTensorType>();
953 
954     auto kernelShape = kernelType.getShape();
955     auto outputShape = outputType.getShape();
956 
957     // Only handle the case where at least one of the window dimensions is
958     // of size 1. Other cases can rely on tiling to reduce to such cases.
959     int64_t khSize = kernelShape[0], kwSize = kernelShape[1];
960     int64_t ohSize = outputShape[1], owSize = outputShape[2];
961     bool removeH = (khSize == 1 && ohSize == 1);
962     bool removeW = (kwSize == 1 && owSize == 1);
963     if (!removeH && !removeW)
964       return failure();
965 
966     // Get new shapes and types for all operands by removing the size-1
967     // dimension.
968     using RTTBuilder = RankedTensorType::Builder;
969     RankedTensorType newInputType =
970         RTTBuilder(inputType).dropDim((removeH ? 1 : 2));
971     RankedTensorType newKernelType =
972         RTTBuilder(kernelType).dropDim((removeH ? 0 : 1));
973     RankedTensorType newOutputType =
974         RTTBuilder(outputType).dropDim(removeH ? 1 : 2);
975 
976     // Rank-reduce operands.
977     Location loc = convOp.getLoc();
978     Value newInput = tensor::createCanonicalRankReducingExtractSliceOp(
979         rewriter, loc, input, newInputType);
980     Value newKernel = tensor::createCanonicalRankReducingExtractSliceOp(
981         rewriter, loc, kernel, newKernelType);
982     Value newOutput = tensor::createCanonicalRankReducingExtractSliceOp(
983         rewriter, loc, output, newOutputType);
984 
985     // Rank-reduce strides and dilations too.
986     // TODO: dropDim 1-liner helper.
987     auto strides = llvm::to_vector<4>(convOp.strides().getValues<int64_t>());
988     strides.erase(strides.begin() + (removeH ? 0 : 1));
989     auto stridesAttr = rewriter.getI64VectorAttr(strides);
990 
991     auto dilations =
992         llvm::to_vector<4>(convOp.dilations().getValues<int64_t>());
993     dilations.erase(dilations.begin() + (removeH ? 0 : 1));
994     auto dilationsAttr = rewriter.getI64VectorAttr(dilations);
995 
996     auto conv1DOp = rewriter.create<linalg::Conv1DNwcWcfOp>(
997         loc, newOutputType, ValueRange{newInput, newKernel},
998         ValueRange{newOutput}, stridesAttr, dilationsAttr);
999 
1000     // Insert back.
1001     Value inserted = tensor::createCanonicalRankReducingInsertSliceOp(
1002         rewriter, loc, conv1DOp.getResult(0), output);
1003     rewriter.replaceOp(convOp, inserted);
1004 
1005     filter.replaceLinalgTransformationFilter(rewriter, conv1DOp);
1006     return success();
1007   };
1008 
1009 private:
1010   /// LinalgTransformMarker handles special attribute manipulations.
1011   LinalgTransformationFilter filter;
1012 };
1013 
1014 /// Rewrites 2-D depthwise convolution ops with size-1 (w, kw) or (h, kh)
1015 /// dimensions into 1-D depthwise convolution ops.
1016 struct DownscaleDepthwiseConv2DNhwcHwcOp final
1017     : public OpRewritePattern<DepthwiseConv2DNhwcHwcOp> {
1018   DownscaleDepthwiseConv2DNhwcHwcOp(
1019       MLIRContext *context,
1020       LinalgTransformationFilter f = LinalgTransformationFilter(),
1021       PatternBenefit benefit = 1)
1022       : OpRewritePattern<DepthwiseConv2DNhwcHwcOp>(context, benefit),
1023         filter(std::move(f)) {}
1024 
1025   LogicalResult matchAndRewrite(DepthwiseConv2DNhwcHwcOp convOp,
1026                                 PatternRewriter &rewriter) const override {
1027     if (failed(filter.checkAndNotify(rewriter, convOp)))
1028       return failure();
1029     if (convOp.hasBufferSemantics())
1030       return failure(); // To be implemented
1031 
1032     Value input = convOp.inputs().front();
1033     Value kernel = convOp.inputs().back();
1034     Value output = convOp.outputs().front();
1035 
1036     auto inputType = input.getType().dyn_cast<RankedTensorType>();
1037     auto kernelType = kernel.getType().dyn_cast<RankedTensorType>();
1038     auto outputType = output.getType().dyn_cast<RankedTensorType>();
1039 
1040     auto kernelShape = kernelType.getShape();
1041     auto outputShape = outputType.getShape();
1042 
1043     // Only handle the case where at least one of the window dimensions is
1044     // of size 1. Other cases can rely on tiling to reduce to such cases.
1045     int64_t khSize = kernelShape[0], kwSize = kernelShape[1];
1046     int64_t ohSize = outputShape[1], owSize = outputShape[2];
1047     bool removeH = (khSize == 1 && ohSize == 1);
1048     bool removeW = (kwSize == 1 && owSize == 1);
1049     if (!removeH && !removeW)
1050       return failure();
1051 
1052     // Get new shapes and types for all operands by removing the size-1
1053     // dimension.
1054     using RTTBuilder = RankedTensorType::Builder;
1055     RankedTensorType newInputType =
1056         RTTBuilder(inputType).dropDim((removeH ? 1 : 2));
1057     RankedTensorType newKernelType =
1058         RTTBuilder(kernelType).dropDim((removeH ? 0 : 1));
1059     RankedTensorType newOutputType =
1060         RTTBuilder(outputType).dropDim(removeH ? 1 : 2);
1061 
1062     // Rank-reduce operands.
1063     Location loc = convOp.getLoc();
1064     Value newInput = tensor::createCanonicalRankReducingExtractSliceOp(
1065         rewriter, loc, input, newInputType);
1066     Value newKernel = tensor::createCanonicalRankReducingExtractSliceOp(
1067         rewriter, loc, kernel, newKernelType);
1068     Value newOutput = tensor::createCanonicalRankReducingExtractSliceOp(
1069         rewriter, loc, output, newOutputType);
1070 
1071     // Rank-reduce strides and dilations too.
1072     // TODO: dropDim 1-liner helper.
1073     auto strides = llvm::to_vector<4>(convOp.strides().getValues<int64_t>());
1074     strides.erase(strides.begin() + (removeH ? 0 : 1));
1075     auto stridesAttr = rewriter.getI64VectorAttr(strides);
1076 
1077     auto dilations =
1078         llvm::to_vector<4>(convOp.dilations().getValues<int64_t>());
1079     dilations.erase(dilations.begin() + (removeH ? 0 : 1));
1080     auto dilationsAttr = rewriter.getI64VectorAttr(dilations);
1081 
1082     auto conv1DOp = rewriter.create<DepthwiseConv1DNwcWcOp>(
1083         loc, newOutputType, ValueRange{newInput, newKernel},
1084         ValueRange{newOutput}, stridesAttr, dilationsAttr);
1085 
1086     // Insert back.
1087     Value inserted = tensor::createCanonicalRankReducingInsertSliceOp(
1088         rewriter, loc, conv1DOp.getResult(0), output);
1089     rewriter.replaceOp(convOp, inserted);
1090 
1091     filter.replaceLinalgTransformationFilter(rewriter, conv1DOp);
1092     return success();
1093   };
1094 
1095 private:
1096   /// LinalgTransformMarker handles special attribute manipulations.
1097   LinalgTransformationFilter filter;
1098 };
1099 
1100 } // namespace
1101 
1102 void linalg::populateDecomposeConvolutionPatterns(
1103     RewritePatternSet &patterns, const LinalgTransformationFilter &filter,
1104     PatternBenefit benefit) {
1105   patterns.add<DownscaleSizeOneWindowed2DConvolution,
1106                DownscaleDepthwiseConv2DNhwcHwcOp>(patterns.getContext(), filter,
1107                                                   benefit);
1108 }
1109