1 //===- DropUnitDims.cpp - Pass to drop use of unit-extent for broadcasting ===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements patterns/pass to remove usage of unit-extent dimensions
10 // to specify broadcasting in favor of more canonical representation of the
11 // computation
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "PassDetail.h"
16 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
17 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
18 #include "mlir/Dialect/Linalg/Passes.h"
19 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
20 #include "mlir/Dialect/Linalg/Utils/Utils.h"
21 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
22 #include "mlir/IR/AffineExpr.h"
23 #include "mlir/IR/AffineMap.h"
24 #include "mlir/Transforms/FoldUtils.h"
25 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Debug.h"
28 
29 #define DEBUG_TYPE "linalg-drop-unit-dims"
30 
31 using namespace mlir;
32 using namespace mlir::edsc;
33 using namespace mlir::edsc::intrinsics;
34 using namespace mlir::linalg;
35 
36 /// Implements a pass that canonicalizes the uses of unit-extent dimensions for
37 /// broadcasting. For example,
38 ///
39 /// ```mlir
40 /// #accesses = [
41 ///   affine_map<(d0, d1) -> (0, d1)>,
42 ///   affine_map<(d0, d1) -> (d0, 0)>,
43 ///   affine_map<(d0, d1) -> (d0, d1)>
44 /// ]
45 ///
46 /// #trait = {
47 ///   args_in = 2,
48 ///   args_out = 1,
49 ///   indexing_maps = #accesses,
50 ///   iterator_types = ["parallel", "parallel"],
51 ///   library_call = "some_external_fn"
52 /// }
53 ///
54 /// func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>) ->
55 /// tensor<5x5xf32>
56 /// {
57 ///   %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1) -> (d0, d1)>] :
58 ///        tensor<5xf32> into tensor<1x5xf32>
59 ///   %1 = linalg.tensor_reshape %arg1 [affine_map<(d0, d1) -> (d0, d1)>] :
60 ///        tensor<5xf32> into tensor<5x1xf32>
61 ///   %2 = linalg.generic #trait %0, %1 {
62 ///        ^bb0(%arg2: f32, %arg3: f32):
63 ///          %3 = addf %arg2, %arg3 : f32
64 ///          linalg.yield %3 : f32
65 ///        } : tensor<1x5xf32>, tensor<5x1xf32> -> tensor<5x5xf32>
66 ///   return %2 : tensor<5x5xf32>
67 /// }
68 ///
69 /// would canonicalize to
70 ///
71 /// ```mlir
72 /// #accesses = [
73 ///   affine_map<(d0, d1) -> (d1)>,
74 ///   affine_map<(d0, d1) -> (d0)>,
75 ///   affine_map<(d0, d1) -> (d0, d1)>
76 /// ]
77 ///
78 /// #trait = {
79 ///   args_in = 2,
80 ///   args_out = 1,
81 ///   indexing_maps = #accesses,
82 ///   iterator_types = ["parallel", "parallel"],
83 ///   library_call = "some_external_fn"
84 /// }
85 ///
86 /// func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>) ->
87 /// tensor<5x5xf32>
88 /// {
89 ///   %0 = linalg.generic #trait %arg0, %arg1 {
90 ///        ^bb0(%arg2: f32, %arg3: f32):
91 ///          %3 = addf %arg2, %arg3 : f32
92 ///          linalg.yield %3 : f32
93 ///        } : tensor<5xf32>, tensor<5xf32> -> tensor<5x5xf32>
94 ///   return %0 : tensor<5x5xf32>
95 /// }
96 
97 /// Given dims of the iteration space of a structured op that are known to be
98 /// single trip count (`unitDims`), return the indexing maps to use in the
99 /// canonicalized op with these dims removed, given the original `indexingMaps`.
100 static ArrayAttr replaceUnitDims(DenseSet<unsigned> &unitDims,
101                                  ArrayRef<AffineMap> indexingMaps,
102                                  MLIRContext *context) {
103   if (indexingMaps.empty())
104     return nullptr;
105   unsigned numIterationDims = indexingMaps.front().getNumDims();
106   unsigned numSymbols = indexingMaps.front().getNumSymbols();
107 
108   // Compute the replacement for each dim expr.
109   SmallVector<AffineExpr, 4> dimReplacements;
110   dimReplacements.reserve(numIterationDims);
111   unsigned numKeptDims = 0;
112   for (unsigned dim : llvm::seq<unsigned>(0, numIterationDims)) {
113     if (unitDims.count(dim))
114       dimReplacements.push_back(getAffineConstantExpr(0, context));
115     else
116       dimReplacements.push_back(getAffineDimExpr(numKeptDims++, context));
117   }
118 
119   // Symbols remain the same.
120   SmallVector<AffineExpr, 4> symReplacements;
121   symReplacements.reserve(numSymbols);
122   for (unsigned symbol : llvm::seq<unsigned>(0, numSymbols))
123     symReplacements.push_back(getAffineSymbolExpr(symbol, context));
124 
125   SmallVector<AffineMap, 4> newIndexingMaps;
126   newIndexingMaps.reserve(indexingMaps.size());
127   for (AffineMap operandMap : indexingMaps) {
128     // Expected indexing maps to have no symbols.
129     if (operandMap.getNumSymbols())
130       return nullptr;
131     newIndexingMaps.push_back(simplifyAffineMap(
132         operandMap.replaceDimsAndSymbols(dimReplacements, symReplacements,
133                                          numIterationDims - unitDims.size(),
134                                          numSymbols)));
135   }
136 
137   // Check that the new index maps are invertible. If not, something went
138   // wrong, so abort.
139   if (!inversePermutation(concatAffineMaps(newIndexingMaps)))
140     return nullptr;
141   return ArrayAttr::get(context,
142                         llvm::to_vector<4>(llvm::map_range(
143                             newIndexingMaps, [](AffineMap map) -> Attribute {
144                               return AffineMapAttr::get(map);
145                             })));
146 }
147 
148 /// Update the index accesses of linalg operations having index semantics.
149 template <typename GenericOpTy>
150 static void replaceUnitDimIndexOps(GenericOpTy op,
151                                    const DenseSet<unsigned> &unitDims,
152                                    PatternRewriter &rewriter) {
153   assert(op->getNumRegions() == 1 && op->getRegion(0).getBlocks().size() == 1 &&
154          "expected generic operation to have one block.");
155   Block &block = op->getRegion(0).front();
156 
157   for (IndexOp indexOp : llvm::make_early_inc_range(block.getOps<IndexOp>())) {
158     OpBuilder::InsertionGuard guard(rewriter);
159     rewriter.setInsertionPoint(indexOp);
160     if (unitDims.count(indexOp.dim()) != 0) {
161       rewriter.replaceOpWithNewOp<ConstantIndexOp>(indexOp, 0);
162     } else {
163       // Update the dimension of the index operation if needed.
164       unsigned droppedDims = llvm::count_if(
165           unitDims, [&](unsigned dim) { return dim < indexOp.dim(); });
166       if (droppedDims != 0)
167         rewriter.replaceOpWithNewOp<IndexOp>(indexOp,
168                                              indexOp.dim() - droppedDims);
169     }
170   }
171 }
172 
173 /// Modify the region of indexed generic op to drop arguments corresponding to
174 /// loops that are unit trip count.
175 template <typename OpTy>
176 static LogicalResult
177 replaceBlockArgForUnitDimLoops(OpTy op, const DenseSet<unsigned> &unitDims,
178                                PatternRewriter &rewriterp) {
179   return success();
180 }
181 
182 template <>
183 LogicalResult replaceBlockArgForUnitDimLoops<IndexedGenericOp>(
184     IndexedGenericOp op, const DenseSet<unsigned> &unitDims,
185     PatternRewriter &rewriter) {
186   OpBuilder::InsertionGuard guard(rewriter);
187   Block *entryBlock = &op->getRegion(0).front();
188   rewriter.setInsertionPointToStart(entryBlock);
189   Value zero = rewriter.create<ConstantIndexOp>(op.getLoc(), 0);
190   for (unsigned unitDimLoop : unitDims) {
191     entryBlock->getArgument(unitDimLoop).replaceAllUsesWith(zero);
192   }
193   SmallVector<unsigned, 8> unitDimsToErase(unitDims.begin(), unitDims.end());
194   entryBlock->eraseArguments(unitDimsToErase);
195   return success();
196 }
197 
198 namespace {
199 /// Pattern to fold unit-trip count loops in GenericOps.
200 template <typename GenericOpTy>
201 struct FoldUnitDimLoops : public OpRewritePattern<GenericOpTy> {
202   using OpRewritePattern<GenericOpTy>::OpRewritePattern;
203   LogicalResult matchAndRewrite(GenericOpTy op,
204                                 PatternRewriter &rewriter) const override {
205     SmallVector<AffineMap, 4> indexingMaps = op.getIndexingMaps();
206     if (indexingMaps.empty())
207       return failure();
208 
209     // Check if any of the iteration dimensions are unit-trip count. They will
210     // end up being unit-trip count if they are used to index into a unit-dim
211     // tensor/memref.
212     AffineMap invertedMap = inversePermutation(concatAffineMaps(indexingMaps));
213     if (!invertedMap)
214       return failure();
215     SmallVector<int64_t, 4> dims;
216     for (ShapedType shapedType : op.getShapedOperandTypes())
217       dims.append(shapedType.getShape().begin(), shapedType.getShape().end());
218 
219     // Find all the reduction iterators. Those need some special consideration
220     // (see below).
221     auto getLoopDimsOfType =
222         [&](StringRef iteratorTypeName) -> SmallVector<unsigned, 4> {
223       SmallVector<AffineExpr> dimExprs;
224       getDimsOfType(op, iteratorTypeName, dimExprs);
225       return llvm::to_vector<4>(llvm::map_range(dimExprs, [](AffineExpr expr) {
226         return expr.cast<AffineDimExpr>().getPosition();
227       }));
228     };
229     auto reductionDims = getLoopDimsOfType(getReductionIteratorTypeName());
230 
231     DenseSet<unsigned> unitDims;
232     SmallVector<unsigned, 4> unitDimsReductionLoops;
233     ArrayAttr iteratorTypes = op.iterator_types();
234     for (auto expr : enumerate(invertedMap.getResults())) {
235       if (AffineDimExpr dimExpr = expr.value().dyn_cast<AffineDimExpr>())
236         if (dims[dimExpr.getPosition()] == 1) {
237           if (isParallelIterator(iteratorTypes[expr.index()]))
238             unitDims.insert(expr.index());
239           else if (isReductionIterator(iteratorTypes[expr.index()]))
240             unitDimsReductionLoops.push_back(expr.index());
241         }
242     }
243 
244     // Reduction loops can be dropped if there is at least one other reduction
245     // loop that is not dropped. This accounts for the initial value read in the
246     // reduction loop.
247     if (!unitDimsReductionLoops.empty() && reductionDims.size() > 1) {
248       if (unitDimsReductionLoops.size() == reductionDims.size())
249         unitDims.insert(reductionDims.begin(), std::prev(reductionDims.end()));
250       else
251         unitDims.insert(unitDimsReductionLoops.begin(),
252                         unitDimsReductionLoops.end());
253     }
254 
255     if (unitDims.empty())
256       return failure();
257 
258     // Compute the modified indexing maps.
259     MLIRContext *context = rewriter.getContext();
260     ArrayAttr newIndexingMapAttr =
261         replaceUnitDims(unitDims, indexingMaps, context);
262     if (!newIndexingMapAttr)
263       return op.emitError("unable to compute modified indexing_maps");
264 
265     // Compute the iterator types of the modified op by dropping the one-trip
266     // count loops.
267     SmallVector<Attribute, 4> newIteratorTypes;
268     for (auto attr : llvm::enumerate(iteratorTypes)) {
269       if (!unitDims.count(attr.index()))
270         newIteratorTypes.push_back(attr.value());
271     }
272 
273     rewriter.startRootUpdate(op);
274     op.indexing_mapsAttr(newIndexingMapAttr);
275     op.iterator_typesAttr(ArrayAttr::get(context, newIteratorTypes));
276     (void)replaceBlockArgForUnitDimLoops(op, unitDims, rewriter);
277     replaceUnitDimIndexOps(op, unitDims, rewriter);
278     rewriter.finalizeRootUpdate(op);
279     return success();
280   }
281 };
282 
283 struct UnitExtentReplacementInfo {
284   RankedTensorType type;
285   AffineMap indexMap;
286   ArrayAttr reassociation;
287 };
288 } // namespace
289 
290 /// Utility function for replacing operands/results to a linalg generic
291 /// operation on tensors with unit-extent dimensions. These can be replaced with
292 /// an operand/result with the unit-extent dimension removed. This is only done
293 /// if the indexing map used to access that didimensionmension has a
294 /// AffineConstantExpr of value 0. Given the `type` of an result/operand of a
295 /// Linalg op, and its `indexMap` the utility function returns:
296 /// - the new type with dimensions of size 1 removed.
297 /// - modified index map that can be used to access the replaced result/operand
298 /// - the reassociation that converts from the original tensor type to the
299 ///   modified tensor type.
300 static UnitExtentReplacementInfo replaceUnitExtents(AffineMap indexMap,
301                                                     RankedTensorType type,
302                                                     MLIRContext *context) {
303   ArrayRef<int64_t> shape = type.getShape();
304   ArrayRef<AffineExpr> exprs = indexMap.getResults();
305   SmallVector<AffineExpr, 2> reassociations;
306   SmallVector<Attribute, 4> reassociationMaps;
307   SmallVector<AffineExpr, 4> newIndexExprs;
308   SmallVector<int64_t, 4> newShape;
309 
310   int64_t origRank = type.getRank();
311   AffineExpr zeroExpr = getAffineConstantExpr(0, context);
312   auto isUnitExtent = [&](int64_t dim) -> bool {
313     return shape[dim] == 1 && exprs[dim] == zeroExpr;
314   };
315 
316   unsigned dim = 0;
317   // Fold dimensions that are unit-extent at the beginning of the tensor.
318   while (dim < origRank && isUnitExtent(dim))
319     reassociations.push_back(getAffineDimExpr(dim++, context));
320   while (dim < origRank) {
321     reassociations.push_back(getAffineDimExpr(dim, context));
322     newIndexExprs.push_back(exprs[dim]);
323     newShape.push_back(shape[dim]);
324     // Fold all following dimensions that are unit-extent.
325     while (dim + 1 < origRank && isUnitExtent(dim + 1)) {
326       ++dim;
327       reassociations.push_back(getAffineDimExpr(dim, context));
328     }
329     reassociationMaps.push_back(AffineMapAttr::get(AffineMap::get(
330         origRank, /*symbolCount = */ 0, reassociations, context)));
331     reassociations.clear();
332     ++dim;
333   }
334   UnitExtentReplacementInfo info = {
335       RankedTensorType::get(newShape, type.getElementType()),
336       AffineMap::get(indexMap.getNumDims(), indexMap.getNumSymbols(),
337                      newIndexExprs, context),
338       ArrayAttr::get(context, reassociationMaps)};
339   return info;
340 }
341 
342 namespace {
343 
344 SmallVector<ReassociationExprs, 2>
345 convertAffineMapArrayToExprs(ArrayAttr affineMapArrayAttr) {
346   SmallVector<ReassociationExprs, 2> reassociationExprs;
347   for (auto attr : affineMapArrayAttr)
348     reassociationExprs.push_back(
349         llvm::to_vector<4>(attr.cast<AffineMapAttr>().getValue().getResults()));
350   return reassociationExprs;
351 }
352 
353 /// Pattern to replace tensors operands/results that are unit extents.
354 template <typename GenericOpTy>
355 struct ReplaceUnitExtentTensors : public OpRewritePattern<GenericOpTy> {
356   using OpRewritePattern<GenericOpTy>::OpRewritePattern;
357   LogicalResult matchAndRewrite(GenericOpTy op,
358                                 PatternRewriter &rewriter) const override {
359     if (!op.hasTensorSemantics())
360       return failure();
361 
362     MLIRContext *context = rewriter.getContext();
363     Location loc = op.getLoc();
364 
365     SmallVector<AffineMap, 4> newIndexingMaps;
366     SmallVector<ArrayAttr, 4> reassociationMaps;
367     SmallVector<ShapedType, 4> newInputOutputTypes;
368     bool doCanonicalization = false;
369     for (auto it :
370          llvm::zip(op.getIndexingMaps(), op.getShapedOperandTypes())) {
371       auto replacementInfo = replaceUnitExtents(
372           std::get<0>(it), std::get<1>(it).template cast<RankedTensorType>(),
373           context);
374       reassociationMaps.push_back(replacementInfo.reassociation);
375       newIndexingMaps.push_back(replacementInfo.indexMap);
376       newInputOutputTypes.push_back(replacementInfo.type);
377       doCanonicalization |= replacementInfo.type != std::get<1>(it);
378     }
379 
380     // If the indexing maps of the result operation are not invertible (i.e. not
381     // legal), abort.
382     if (!doCanonicalization ||
383         !inversePermutation(concatAffineMaps(newIndexingMaps)))
384       return failure();
385 
386     // If any operand type change, insert a reshape to convert from the original
387     // type to the new type.
388     // TODO: get rid of flattenedIdx which assumes operand order and contiguity.
389     unsigned flattenedIdx = 0;
390     auto insertReshapes = [&](ValueRange values) {
391       SmallVector<Value, 4> res;
392       res.reserve(values.size());
393       for (auto operand : llvm::enumerate(values)) {
394         if (operand.value().getType() == newInputOutputTypes[flattenedIdx])
395           res.push_back(operand.value());
396         else
397           res.push_back(rewriter.create<linalg::TensorReshapeOp>(
398               loc, newInputOutputTypes[flattenedIdx], operand.value(),
399               convertAffineMapArrayToExprs(reassociationMaps[flattenedIdx])));
400         ++flattenedIdx;
401       }
402       return res;
403     };
404 
405     SmallVector<Value, 4> newInputs = insertReshapes(op.inputs());
406     SmallVector<Value, 4> newOutputs = insertReshapes(op.outputs());
407 
408     // If any result type changes, insert a reshape to convert from the original
409     // type to the new type.
410     SmallVector<Type, 4> resultTypes;
411     resultTypes.reserve(op.getNumResults());
412     for (unsigned i : llvm::seq<unsigned>(0, op.getNumResults()))
413       resultTypes.push_back(newInputOutputTypes[i + op.getNumInputs()]);
414     GenericOpTy replacementOp = rewriter.create<GenericOpTy>(
415         loc, resultTypes, newInputs, newOutputs, newIndexingMaps,
416         llvm::to_vector<4>(
417             op.iterator_types().template getAsValueRange<StringAttr>()));
418     rewriter.inlineRegionBefore(op.region(), replacementOp.region(),
419                                 replacementOp.region().begin());
420 
421     // If any result tensor has a modified shape, then add reshape to recover
422     // the original shape.
423     SmallVector<Value, 4> resultReplacements;
424     for (auto result : llvm::enumerate(replacementOp.getResults())) {
425       unsigned index = result.index() + replacementOp.getNumInputs();
426       RankedTensorType origResultType = op.getResult(result.index())
427                                             .getType()
428                                             .template cast<RankedTensorType>();
429       if (origResultType != result.value().getType())
430         resultReplacements.push_back(rewriter.create<linalg::TensorReshapeOp>(
431             loc, origResultType, result.value(),
432             convertAffineMapArrayToExprs(reassociationMaps[index])));
433       else
434         resultReplacements.push_back(result.value());
435     }
436     rewriter.replaceOp(op, resultReplacements);
437     return success();
438   }
439 };
440 } // namespace
441 
442 /// Get the reassociation maps to fold the result of a subtensor (or source of a
443 /// subtensor_insert) operation with given offsets, and sizes to its
444 /// rank-reduced version. This is only done for the cases where the size is 1
445 /// and offset is 0. Strictly speaking the offset 0 is not required in general,
446 /// but non-zero offsets are not handled by SPIR-V backend at this point (and
447 /// potentially cannot be handled).
448 static Optional<SmallVector<ReassociationIndices>>
449 getReassociationMapForFoldingUnitDims(ArrayRef<OpFoldResult> mixedSizes) {
450   SmallVector<ReassociationIndices> reassociation;
451   ReassociationIndices curr;
452   for (auto it : llvm::enumerate(mixedSizes)) {
453     auto dim = it.index();
454     auto size = it.value();
455     curr.push_back(dim);
456     auto attr = size.dyn_cast<Attribute>();
457     if (attr && attr.cast<IntegerAttr>().getInt() == 1)
458       continue;
459     reassociation.emplace_back(ReassociationIndices{});
460     std::swap(reassociation.back(), curr);
461   }
462   if (!curr.empty())
463     reassociation.back().append(curr.begin(), curr.end());
464   return reassociation;
465 }
466 
467 namespace {
468 /// Convert `subtensor` operations to rank-reduced versions.
469 struct UseRankReducedSubTensorOp : public OpRewritePattern<SubTensorOp> {
470   using OpRewritePattern<SubTensorOp>::OpRewritePattern;
471 
472   LogicalResult matchAndRewrite(SubTensorOp subTensorOp,
473                                 PatternRewriter &rewriter) const override {
474     RankedTensorType resultType = subTensorOp.getType();
475     SmallVector<OpFoldResult> offsets = subTensorOp.getMixedOffsets();
476     SmallVector<OpFoldResult> sizes = subTensorOp.getMixedSizes();
477     SmallVector<OpFoldResult> strides = subTensorOp.getMixedStrides();
478     auto reassociation = getReassociationMapForFoldingUnitDims(sizes);
479     if (!reassociation ||
480         reassociation->size() == static_cast<size_t>(resultType.getRank()))
481       return failure();
482     auto rankReducedType =
483         SubTensorOp::inferRankReducedResultType(reassociation->size(),
484                                                 subTensorOp.getSourceType(),
485                                                 offsets, sizes, strides)
486             .cast<RankedTensorType>();
487 
488     Location loc = subTensorOp.getLoc();
489     Value newSubTensor = rewriter.create<SubTensorOp>(
490         loc, rankReducedType, subTensorOp.source(), offsets, sizes, strides);
491     rewriter.replaceOpWithNewOp<TensorReshapeOp>(subTensorOp, resultType,
492                                                  newSubTensor, *reassociation);
493     return success();
494   }
495 };
496 
497 /// Convert `subtensor_insert` operations to rank-reduced versions.
498 struct UseRankReducedSubTensorInsertOp
499     : public OpRewritePattern<SubTensorInsertOp> {
500   using OpRewritePattern<SubTensorInsertOp>::OpRewritePattern;
501 
502   LogicalResult matchAndRewrite(SubTensorInsertOp insertOp,
503                                 PatternRewriter &rewriter) const override {
504     RankedTensorType sourceType = insertOp.getSourceType();
505     SmallVector<OpFoldResult> offsets = insertOp.getMixedOffsets();
506     SmallVector<OpFoldResult> sizes = insertOp.getMixedSizes();
507     SmallVector<OpFoldResult> strides = insertOp.getMixedStrides();
508     auto reassociation = getReassociationMapForFoldingUnitDims(sizes);
509     if (!reassociation ||
510         reassociation->size() == static_cast<size_t>(sourceType.getRank()))
511       return failure();
512     Location loc = insertOp.getLoc();
513     auto reshapedSource = rewriter.create<TensorReshapeOp>(
514         loc, insertOp.source(), *reassociation);
515     rewriter.replaceOpWithNewOp<SubTensorInsertOp>(
516         insertOp, reshapedSource, insertOp.dest(), insertOp.getMixedOffsets(),
517         insertOp.getMixedSizes(), insertOp.getMixedStrides());
518     return success();
519   }
520 };
521 } // namespace
522 
523 /// Patterns that are used to canonicalize the use of unit-extent dims for
524 /// broadcasting.
525 void mlir::linalg::populateFoldUnitExtentDimsPatterns(
526     RewritePatternSet &patterns) {
527   auto *context = patterns.getContext();
528   patterns.add<FoldUnitDimLoops<GenericOp>, FoldUnitDimLoops<IndexedGenericOp>,
529                ReplaceUnitExtentTensors<GenericOp>,
530                ReplaceUnitExtentTensors<IndexedGenericOp>,
531                UseRankReducedSubTensorOp, UseRankReducedSubTensorInsertOp>(
532       context);
533   TensorReshapeOp::getCanonicalizationPatterns(patterns, context);
534 }
535 
536 namespace {
537 /// Pass that removes unit-extent dims within generic ops.
538 struct LinalgFoldUnitExtentDimsPass
539     : public LinalgFoldUnitExtentDimsBase<LinalgFoldUnitExtentDimsPass> {
540   void runOnFunction() override {
541     FuncOp funcOp = getFunction();
542     MLIRContext *context = funcOp.getContext();
543     RewritePatternSet patterns(context);
544     if (foldOneTripLoopsOnly)
545       patterns
546           .add<FoldUnitDimLoops<GenericOp>, FoldUnitDimLoops<IndexedGenericOp>>(
547               context);
548     else
549       populateFoldUnitExtentDimsPatterns(patterns);
550     (void)applyPatternsAndFoldGreedily(funcOp.getBody(), std::move(patterns));
551   }
552 };
553 } // namespace
554 
555 std::unique_ptr<OperationPass<FuncOp>>
556 mlir::createLinalgFoldUnitExtentDimsPass() {
557   return std::make_unique<LinalgFoldUnitExtentDimsPass>();
558 }
559