1 //===- DropUnitDims.cpp - Pass to drop use of unit-extent for broadcasting ===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements patterns/pass to remove usage of unit-extent dimensions 10 // to specify broadcasting in favor of more canonical representation of the 11 // computation 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "PassDetail.h" 16 #include "mlir/Dialect/Linalg/IR/LinalgOps.h" 17 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h" 18 #include "mlir/Dialect/Linalg/Passes.h" 19 #include "mlir/Dialect/Linalg/Transforms/Transforms.h" 20 #include "mlir/Dialect/Linalg/Utils/Utils.h" 21 #include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h" 22 #include "mlir/IR/AffineExpr.h" 23 #include "mlir/IR/AffineMap.h" 24 #include "mlir/Transforms/FoldUtils.h" 25 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 26 #include "llvm/Support/CommandLine.h" 27 #include "llvm/Support/Debug.h" 28 29 #define DEBUG_TYPE "linalg-drop-unit-dims" 30 31 using namespace mlir; 32 using namespace mlir::edsc; 33 using namespace mlir::edsc::intrinsics; 34 using namespace mlir::linalg; 35 36 /// Implements a pass that canonicalizes the uses of unit-extent dimensions for 37 /// broadcasting. For example, 38 /// 39 /// ```mlir 40 /// #accesses = [ 41 /// affine_map<(d0, d1) -> (0, d1)>, 42 /// affine_map<(d0, d1) -> (d0, 0)>, 43 /// affine_map<(d0, d1) -> (d0, d1)> 44 /// ] 45 /// 46 /// #trait = { 47 /// args_in = 2, 48 /// args_out = 1, 49 /// indexing_maps = #accesses, 50 /// iterator_types = ["parallel", "parallel"], 51 /// library_call = "some_external_fn" 52 /// } 53 /// 54 /// func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>) -> 55 /// tensor<5x5xf32> 56 /// { 57 /// %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1) -> (d0, d1)>] : 58 /// tensor<5xf32> into tensor<1x5xf32> 59 /// %1 = linalg.tensor_reshape %arg1 [affine_map<(d0, d1) -> (d0, d1)>] : 60 /// tensor<5xf32> into tensor<5x1xf32> 61 /// %2 = linalg.generic #trait %0, %1 { 62 /// ^bb0(%arg2: f32, %arg3: f32): 63 /// %3 = addf %arg2, %arg3 : f32 64 /// linalg.yield %3 : f32 65 /// } : tensor<1x5xf32>, tensor<5x1xf32> -> tensor<5x5xf32> 66 /// return %2 : tensor<5x5xf32> 67 /// } 68 /// 69 /// would canonicalize to 70 /// 71 /// ```mlir 72 /// #accesses = [ 73 /// affine_map<(d0, d1) -> (d1)>, 74 /// affine_map<(d0, d1) -> (d0)>, 75 /// affine_map<(d0, d1) -> (d0, d1)> 76 /// ] 77 /// 78 /// #trait = { 79 /// args_in = 2, 80 /// args_out = 1, 81 /// indexing_maps = #accesses, 82 /// iterator_types = ["parallel", "parallel"], 83 /// library_call = "some_external_fn" 84 /// } 85 /// 86 /// func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>) -> 87 /// tensor<5x5xf32> 88 /// { 89 /// %0 = linalg.generic #trait %arg0, %arg1 { 90 /// ^bb0(%arg2: f32, %arg3: f32): 91 /// %3 = addf %arg2, %arg3 : f32 92 /// linalg.yield %3 : f32 93 /// } : tensor<5xf32>, tensor<5xf32> -> tensor<5x5xf32> 94 /// return %0 : tensor<5x5xf32> 95 /// } 96 97 /// Given dims of the iteration space of a structured op that are known to be 98 /// single trip count (`unitDims`), return the indexing maps to use in the 99 /// canonicalized op with these dims removed, given the original `indexingMaps`. 100 static ArrayAttr replaceUnitDims(DenseSet<unsigned> &unitDims, 101 ArrayRef<AffineMap> indexingMaps, 102 MLIRContext *context) { 103 if (indexingMaps.empty()) 104 return nullptr; 105 unsigned numIterationDims = indexingMaps.front().getNumDims(); 106 unsigned numSymbols = indexingMaps.front().getNumSymbols(); 107 108 // Compute the replacement for each dim expr. 109 SmallVector<AffineExpr, 4> dimReplacements; 110 dimReplacements.reserve(numIterationDims); 111 unsigned numKeptDims = 0; 112 for (unsigned dim : llvm::seq<unsigned>(0, numIterationDims)) { 113 if (unitDims.count(dim)) 114 dimReplacements.push_back(getAffineConstantExpr(0, context)); 115 else 116 dimReplacements.push_back(getAffineDimExpr(numKeptDims++, context)); 117 } 118 119 // Symbols remain the same. 120 SmallVector<AffineExpr, 4> symReplacements; 121 symReplacements.reserve(numSymbols); 122 for (unsigned symbol : llvm::seq<unsigned>(0, numSymbols)) 123 symReplacements.push_back(getAffineSymbolExpr(symbol, context)); 124 125 SmallVector<AffineMap, 4> newIndexingMaps; 126 newIndexingMaps.reserve(indexingMaps.size()); 127 for (AffineMap operandMap : indexingMaps) { 128 // Expected indexing maps to have no symbols. 129 if (operandMap.getNumSymbols()) 130 return nullptr; 131 newIndexingMaps.push_back(simplifyAffineMap( 132 operandMap.replaceDimsAndSymbols(dimReplacements, symReplacements, 133 numIterationDims - unitDims.size(), 134 numSymbols))); 135 } 136 137 // Check that the new index maps are invertible. If not, something went 138 // wrong, so abort. 139 if (!inversePermutation(concatAffineMaps(newIndexingMaps))) 140 return nullptr; 141 return ArrayAttr::get(context, 142 llvm::to_vector<4>(llvm::map_range( 143 newIndexingMaps, [](AffineMap map) -> Attribute { 144 return AffineMapAttr::get(map); 145 }))); 146 } 147 148 /// Modify the region of indexed generic op to drop arguments corresponding to 149 /// loops that are unit trip count. 150 template <typename OpTy> 151 static LogicalResult 152 replaceBlockArgForUnitDimLoops(OpTy op, const DenseSet<unsigned> &unitDims, 153 PatternRewriter &rewriterp) { 154 return success(); 155 } 156 157 template <> 158 LogicalResult replaceBlockArgForUnitDimLoops<IndexedGenericOp>( 159 IndexedGenericOp op, const DenseSet<unsigned> &unitDims, 160 PatternRewriter &rewriter) { 161 OpBuilder::InsertionGuard guard(rewriter); 162 Block *entryBlock = &op->getRegion(0).front(); 163 rewriter.setInsertionPointToStart(entryBlock); 164 Value zero = rewriter.create<ConstantIndexOp>(op.getLoc(), 0); 165 for (unsigned unitDimLoop : unitDims) { 166 entryBlock->getArgument(unitDimLoop).replaceAllUsesWith(zero); 167 } 168 SmallVector<unsigned, 8> unitDimsToErase(unitDims.begin(), unitDims.end()); 169 entryBlock->eraseArguments(unitDimsToErase); 170 return success(); 171 } 172 173 namespace { 174 /// Pattern to fold unit-trip count loops in GenericOps. 175 template <typename GenericOpTy> 176 struct FoldUnitDimLoops : public OpRewritePattern<GenericOpTy> { 177 using OpRewritePattern<GenericOpTy>::OpRewritePattern; 178 LogicalResult matchAndRewrite(GenericOpTy op, 179 PatternRewriter &rewriter) const override { 180 SmallVector<AffineMap, 4> indexingMaps = op.getIndexingMaps(); 181 if (indexingMaps.empty()) 182 return failure(); 183 184 // Check if any of the iteration dimensions are unit-trip count. They will 185 // end up being unit-trip count if they are used to index into a unit-dim 186 // tensor/memref. 187 AffineMap invertedMap = inversePermutation(concatAffineMaps(indexingMaps)); 188 if (!invertedMap) 189 return failure(); 190 SmallVector<int64_t, 4> dims; 191 for (ShapedType shapedType : op.getShapedOperandTypes()) 192 dims.append(shapedType.getShape().begin(), shapedType.getShape().end()); 193 194 // Find all the reduction iterators. Those need some special consideration 195 // (see below). 196 auto getLoopDimsOfType = 197 [&](StringRef iteratorTypeName) -> SmallVector<unsigned, 4> { 198 SmallVector<AffineExpr> dimExprs; 199 getDimsOfType(op, iteratorTypeName, dimExprs); 200 return llvm::to_vector<4>(llvm::map_range(dimExprs, [](AffineExpr expr) { 201 return expr.cast<AffineDimExpr>().getPosition(); 202 })); 203 }; 204 auto reductionDims = getLoopDimsOfType(getReductionIteratorTypeName()); 205 206 DenseSet<unsigned> unitDims; 207 SmallVector<unsigned, 4> unitDimsReductionLoops; 208 ArrayAttr iteratorTypes = op.iterator_types(); 209 for (auto expr : enumerate(invertedMap.getResults())) { 210 if (AffineDimExpr dimExpr = expr.value().dyn_cast<AffineDimExpr>()) 211 if (dims[dimExpr.getPosition()] == 1) { 212 if (isParallelIterator(iteratorTypes[expr.index()])) 213 unitDims.insert(expr.index()); 214 else if (isReductionIterator(iteratorTypes[expr.index()])) 215 unitDimsReductionLoops.push_back(expr.index()); 216 } 217 } 218 219 // Reduction loops can be dropped if there is at least one other reduction 220 // loop that is not dropped. This accounts for the initial value read in the 221 // reduction loop. 222 if (!unitDimsReductionLoops.empty() && reductionDims.size() > 1) { 223 if (unitDimsReductionLoops.size() == reductionDims.size()) 224 unitDims.insert(reductionDims.begin(), std::prev(reductionDims.end())); 225 else 226 unitDims.insert(unitDimsReductionLoops.begin(), 227 unitDimsReductionLoops.end()); 228 } 229 230 if (unitDims.empty()) 231 return failure(); 232 233 // Compute the modified indexing maps. 234 MLIRContext *context = rewriter.getContext(); 235 ArrayAttr newIndexingMapAttr = 236 replaceUnitDims(unitDims, indexingMaps, context); 237 if (!newIndexingMapAttr) 238 return op.emitError("unable to compute modified indexing_maps"); 239 240 // Compute the iterator types of the modified op by dropping the one-trip 241 // count loops. 242 SmallVector<Attribute, 4> newIteratorTypes; 243 for (auto attr : llvm::enumerate(iteratorTypes)) { 244 if (!unitDims.count(attr.index())) 245 newIteratorTypes.push_back(attr.value()); 246 } 247 248 rewriter.startRootUpdate(op); 249 op.indexing_mapsAttr(newIndexingMapAttr); 250 op.iterator_typesAttr(ArrayAttr::get(context, newIteratorTypes)); 251 (void)replaceBlockArgForUnitDimLoops(op, unitDims, rewriter); 252 rewriter.finalizeRootUpdate(op); 253 return success(); 254 } 255 }; 256 257 struct UnitExtentReplacementInfo { 258 RankedTensorType type; 259 AffineMap indexMap; 260 ArrayAttr reassociation; 261 }; 262 } // namespace 263 264 /// Utility function for replacing operands/results to a linalg generic 265 /// operation on tensors with unit-extent dimensions. These can be replaced with 266 /// an operand/result with the unit-extent dimension removed. This is only done 267 /// if the indexing map used to access that didimensionmension has a 268 /// AffineConstantExpr of value 0. Given the `type` of an result/operand of a 269 /// Linalg op, and its `indexMap` the utility function returns: 270 /// - the new type with dimensions of size 1 removed. 271 /// - modified index map that can be used to access the replaced result/operand 272 /// - the reassociation that converts from the original tensor type to the 273 /// modified tensor type. 274 static UnitExtentReplacementInfo replaceUnitExtents(AffineMap indexMap, 275 RankedTensorType type, 276 MLIRContext *context) { 277 ArrayRef<int64_t> shape = type.getShape(); 278 ArrayRef<AffineExpr> exprs = indexMap.getResults(); 279 SmallVector<AffineExpr, 2> reassociations; 280 SmallVector<Attribute, 4> reassociationMaps; 281 SmallVector<AffineExpr, 4> newIndexExprs; 282 SmallVector<int64_t, 4> newShape; 283 284 int64_t origRank = type.getRank(); 285 AffineExpr zeroExpr = getAffineConstantExpr(0, context); 286 auto isUnitExtent = [&](int64_t dim) -> bool { 287 return shape[dim] == 1 && exprs[dim] == zeroExpr; 288 }; 289 290 unsigned dim = 0; 291 // Fold dimensions that are unit-extent at the beginning of the tensor. 292 while (dim < origRank && isUnitExtent(dim)) 293 reassociations.push_back(getAffineDimExpr(dim++, context)); 294 while (dim < origRank) { 295 reassociations.push_back(getAffineDimExpr(dim, context)); 296 newIndexExprs.push_back(exprs[dim]); 297 newShape.push_back(shape[dim]); 298 // Fold all following dimensions that are unit-extent. 299 while (dim + 1 < origRank && isUnitExtent(dim + 1)) { 300 ++dim; 301 reassociations.push_back(getAffineDimExpr(dim, context)); 302 } 303 reassociationMaps.push_back(AffineMapAttr::get(AffineMap::get( 304 origRank, /*numSymbols = */ 0, reassociations, context))); 305 reassociations.clear(); 306 ++dim; 307 } 308 UnitExtentReplacementInfo info = { 309 RankedTensorType::get(newShape, type.getElementType()), 310 AffineMap::get(indexMap.getNumDims(), indexMap.getNumSymbols(), 311 newIndexExprs, context), 312 ArrayAttr::get(context, reassociationMaps)}; 313 return info; 314 } 315 316 namespace { 317 318 /// Pattern to replace tensors operands/results that are unit extents. 319 template <typename GenericOpTy> 320 struct ReplaceUnitExtentTensors : public OpRewritePattern<GenericOpTy> { 321 using OpRewritePattern<GenericOpTy>::OpRewritePattern; 322 LogicalResult matchAndRewrite(GenericOpTy op, 323 PatternRewriter &rewriter) const override { 324 if (!op.hasTensorSemantics()) 325 return failure(); 326 327 MLIRContext *context = rewriter.getContext(); 328 Location loc = op.getLoc(); 329 330 SmallVector<AffineMap, 4> newIndexingMaps; 331 SmallVector<ArrayAttr, 4> reassociationMaps; 332 SmallVector<ShapedType, 4> newInputOutputTypes; 333 bool doCanonicalization = false; 334 for (auto it : 335 llvm::zip(op.getIndexingMaps(), op.getShapedOperandTypes())) { 336 auto replacementInfo = replaceUnitExtents( 337 std::get<0>(it), std::get<1>(it).template cast<RankedTensorType>(), 338 context); 339 reassociationMaps.push_back(replacementInfo.reassociation); 340 newIndexingMaps.push_back(replacementInfo.indexMap); 341 newInputOutputTypes.push_back(replacementInfo.type); 342 doCanonicalization |= replacementInfo.type != std::get<1>(it); 343 } 344 345 // If the indexing maps of the result operation are not invertible (i.e. not 346 // legal), abort. 347 if (!doCanonicalization || 348 !inversePermutation(concatAffineMaps(newIndexingMaps))) 349 return failure(); 350 351 // If any operand type change, insert a reshape to convert from the original 352 // type to the new type. 353 // TODO: get rid of flattenedIdx which assumes operand order and contiguity. 354 unsigned flattenedIdx = 0; 355 auto insertReshapes = [&](ValueRange values) { 356 SmallVector<Value, 4> res; 357 res.reserve(values.size()); 358 for (auto operand : llvm::enumerate(values)) { 359 if (operand.value().getType() == newInputOutputTypes[flattenedIdx]) 360 res.push_back(operand.value()); 361 else 362 res.push_back(rewriter.create<linalg::TensorReshapeOp>( 363 loc, newInputOutputTypes[flattenedIdx], operand.value(), 364 reassociationMaps[flattenedIdx])); 365 ++flattenedIdx; 366 } 367 return res; 368 }; 369 370 SmallVector<Value, 4> newInputs = insertReshapes(op.inputs()); 371 SmallVector<Value, 4> newOutputs = insertReshapes(op.outputs()); 372 373 // If any result type changes, insert a reshape to convert from the original 374 // type to the new type. 375 SmallVector<Type, 4> resultTypes; 376 resultTypes.reserve(op.getNumResults()); 377 for (unsigned i : llvm::seq<unsigned>(0, op.getNumResults())) 378 resultTypes.push_back(newInputOutputTypes[i + op.getNumInputs()]); 379 GenericOpTy replacementOp = rewriter.create<GenericOpTy>( 380 loc, resultTypes, newInputs, newOutputs, newIndexingMaps, 381 llvm::to_vector<4>( 382 op.iterator_types().template getAsValueRange<StringAttr>())); 383 rewriter.inlineRegionBefore(op.region(), replacementOp.region(), 384 replacementOp.region().begin()); 385 386 // If any result tensor has a modified shape, then add reshape to recover 387 // the original shape. 388 SmallVector<Value, 4> resultReplacements; 389 for (auto result : llvm::enumerate(replacementOp.getResults())) { 390 unsigned index = result.index() + replacementOp.getNumInputs(); 391 RankedTensorType origResultType = op.getResult(result.index()) 392 .getType() 393 .template cast<RankedTensorType>(); 394 if (origResultType != result.value().getType()) 395 resultReplacements.push_back(rewriter.create<linalg::TensorReshapeOp>( 396 loc, origResultType, result.value(), reassociationMaps[index])); 397 else 398 resultReplacements.push_back(result.value()); 399 } 400 rewriter.replaceOp(op, resultReplacements); 401 return success(); 402 } 403 }; 404 405 /// Pattern to fold pair of reshape ops where the intermediate has unit-dims for 406 /// example: 407 /// 408 /// %0 = linalg.tensor_reshape %arg0 409 /// [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>] 410 /// : tensor<2048xf32> into tensor<1x4x1x512xf32> 411 /// %1 = linalg.tensor_reshape %0 412 /// [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>, 413 /// affine_map<(d0, d1, d2, d3) -> (d3)>] 414 /// : tensor<1x4x1x512xf32> into tensor<4x512xf32> 415 /// 416 /// can be replaced with 417 /// 418 /// %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1) -> (d0, d1)>] 419 /// : tensor<2048xf32> into tensor<4x512xf32> 420 /// 421 /// Similarly, 422 /// 423 /// %0 = linalg.tensor_reshape %arg0 424 /// [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>, 425 /// affine_map<(d0, d1, d2, d3) -> (d3)>] 426 /// : tensor<4x512xf32> into tensor<1x4x1x512xf32> 427 /// %1 = linalg.tensor_reshape %0 428 /// [affine_map<(d0, d1, d2, d3) -> (d0, d1, d2, d3)>] 429 /// : tensor<1x4x1x512xf32> into tensor<2048xf32> 430 /// 431 /// can be replaced with 432 /// 433 /// %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1) -> (d0, d1)>] 434 /// : tensor<4x512xf32> into tensor<2048xf32> 435 struct FoldReshapeOpWithUnitExtent : OpRewritePattern<TensorReshapeOp> { 436 using OpRewritePattern<TensorReshapeOp>::OpRewritePattern; 437 438 LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp, 439 PatternRewriter &rewriter) const override { 440 // Check that the source operand is created from a reshape as well. 441 TensorReshapeOp parentReshapeOp = 442 reshapeOp.src().getDefiningOp<TensorReshapeOp>(); 443 if (!parentReshapeOp) 444 return failure(); 445 446 RankedTensorType srcType = reshapeOp.getSrcType(), 447 dstType = reshapeOp.getResultType(), 448 parentSrcType = parentReshapeOp.getSrcType(); 449 if (!srcType.hasStaticShape() || !dstType.hasStaticShape() || 450 !parentSrcType.hasStaticShape() || 451 srcType.getRank() < dstType.getRank() || 452 parentSrcType.getRank() == dstType.getRank()) 453 return failure(); 454 455 // Check if the result tensor_reshape is folding or expanding after folding 456 // the reshapeOp and parentReshapeOp are combined. If the final 457 // tensor_reshape is folding, the parentReshapeOp is introducing unit-dims, 458 // and the reshapeOp does an actual reshape. If the final tensor_reshape op 459 // is expanding, the reshapeOp is introducing unit-dims, and the 460 // parentReshapeOp does an actual reshape. 461 bool isFoldingPattern = parentSrcType.getRank() > dstType.getRank(); 462 ArrayRef<int64_t> expandedShape = 463 isFoldingPattern ? parentSrcType.getShape() : dstType.getShape(); 464 ArrayRef<int64_t> foldedShape = 465 isFoldingPattern ? dstType.getShape() : parentSrcType.getShape(); 466 467 unsigned expandedDim = 0, foldedDim = 0; 468 SmallVector<SmallVector<AffineExpr, 4>, 4> reassociationExprs( 469 foldedShape.size()); 470 while (expandedDim < expandedShape.size() && 471 foldedDim < foldedShape.size()) { 472 int64_t dstSize = foldedShape[foldedDim]; 473 int64_t srcSize = expandedShape[expandedDim]; 474 while (srcSize < dstSize && expandedDim < expandedShape.size()) { 475 reassociationExprs[foldedDim].push_back( 476 rewriter.getAffineDimExpr(expandedDim++)); 477 srcSize *= expandedShape[expandedDim]; 478 } 479 if (srcSize == dstSize) { 480 reassociationExprs[foldedDim].push_back( 481 rewriter.getAffineDimExpr(expandedDim++)); 482 // If the next dim in foldedShape is not 1, treat subsequent dims in 483 // expandedShape which are 1 to be collapsed. 484 if (foldedDim == foldedShape.size() - 1 || 485 foldedShape[foldedDim + 1] != 1) { 486 while (expandedDim < expandedShape.size() && 487 expandedShape[expandedDim] == 1) { 488 reassociationExprs[foldedDim].push_back( 489 rewriter.getAffineDimExpr(expandedDim++)); 490 } 491 } 492 } else { 493 return failure(); 494 } 495 foldedDim++; 496 } 497 if (expandedDim != expandedShape.size()) 498 return failure(); 499 500 SmallVector<AffineMap, 4> reassociationMaps = 501 llvm::to_vector<4>(llvm::map_range( 502 reassociationExprs, [&](ArrayRef<AffineExpr> exprs) -> AffineMap { 503 return AffineMap::get(expandedShape.size(), 0, exprs, 504 rewriter.getContext()); 505 })); 506 rewriter.replaceOpWithNewOp<TensorReshapeOp>( 507 reshapeOp, dstType, parentReshapeOp.src(), 508 rewriter.getAffineMapArrayAttr(reassociationMaps)); 509 return success(); 510 } 511 }; 512 513 /// Pattern to fold subtensors that are just taking a slice of unit-dimension 514 /// tensor. For example 515 /// 516 /// %1 = subtensor %0[0, %o1, 0] [1, %s1, 1] [1, 1, 1] 517 /// : tensor<1x?x1xf32> to tensor<1x?x1xf32> 518 /// 519 /// can be replaced with 520 /// 521 /// %0 = linalg.tensor_reshape %0 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] 522 /// : tensor<1x?x1xf32> into tensor<?xf32> 523 /// %1 = subtensor %0[%o1] [%s1] [1] : tensor<?xf32> to tensor<?xf32> 524 /// %2 = linalg.tensor_reshape %1 [affine_map<(d0, d1, d2) -> (d0, d1, d2)>] 525 /// : tensor<?xf32> into tensor<1x?x1xf32> 526 /// 527 /// The additional tensor_reshapes will hopefully get canonicalized away with 528 /// other reshapes that drop unit dimensions. Three condiitions to fold a 529 /// dimension 530 /// - The offset must be 0 531 /// - The size must be 1 532 /// - The dimension of the source type must be 1. 533 struct FoldUnitDimSubTensorOp : public OpRewritePattern<SubTensorOp> { 534 using OpRewritePattern<SubTensorOp>::OpRewritePattern; 535 536 LogicalResult matchAndRewrite(SubTensorOp subTensorOp, 537 PatternRewriter &rewriter) const override { 538 SmallVector<OpFoldResult> mixedOffsets = subTensorOp.getMixedOffsets(); 539 SmallVector<OpFoldResult> mixedSizes = subTensorOp.getMixedSizes(); 540 SmallVector<OpFoldResult> mixedStrides = subTensorOp.getMixedStrides(); 541 auto hasValue = [](OpFoldResult valueOrAttr, int64_t val) { 542 auto attr = valueOrAttr.dyn_cast<Attribute>(); 543 return attr && attr.cast<IntegerAttr>().getInt() == val; 544 }; 545 546 if (llvm::any_of(mixedStrides, [&](OpFoldResult valueOrAttr) { 547 return !hasValue(valueOrAttr, 1); 548 })) 549 return failure(); 550 551 // Find the expanded unit dimensions. 552 SmallVector<ReassociationIndices> reassociation; 553 SmallVector<OpFoldResult> newOffsets, newSizes; 554 ArrayRef<int64_t> sourceShape = subTensorOp.getSourceType().getShape(); 555 ReassociationIndices curr; 556 for (int64_t dim : llvm::seq<int64_t>(0, mixedOffsets.size())) { 557 curr.push_back(dim); 558 if (sourceShape[dim] == 1 && hasValue(mixedOffsets[dim], 0) && 559 hasValue(mixedSizes[dim], 1)) { 560 continue; 561 } 562 newOffsets.push_back(mixedOffsets[dim]); 563 newSizes.push_back(mixedSizes[dim]); 564 reassociation.emplace_back(ReassociationIndices{}); 565 std::swap(reassociation.back(), curr); 566 } 567 if (newOffsets.size() == mixedOffsets.size()) 568 return failure(); 569 reassociation.back().append(curr.begin(), curr.end()); 570 SmallVector<OpFoldResult> newStrides(newOffsets.size(), 571 rewriter.getI64IntegerAttr(1)); 572 Location loc = subTensorOp->getLoc(); 573 auto srcReshape = rewriter.create<TensorReshapeOp>( 574 loc, subTensorOp.source(), reassociation); 575 auto newSubTensorOp = rewriter.create<SubTensorOp>( 576 loc, srcReshape, newOffsets, newSizes, newStrides); 577 rewriter.replaceOpWithNewOp<TensorReshapeOp>( 578 subTensorOp, subTensorOp.getType(), newSubTensorOp, reassociation); 579 return success(); 580 } 581 }; 582 583 } // namespace 584 585 /// Patterns that are used to canonicalize the use of unit-extent dims for 586 /// broadcasting. 587 void mlir::linalg::populateFoldUnitExtentDimsPatterns( 588 RewritePatternSet &patterns) { 589 auto *context = patterns.getContext(); 590 patterns.add<FoldUnitDimLoops<GenericOp>, FoldUnitDimLoops<IndexedGenericOp>, 591 FoldUnitDimSubTensorOp, ReplaceUnitExtentTensors<GenericOp>, 592 ReplaceUnitExtentTensors<IndexedGenericOp>>(context); 593 TensorReshapeOp::getCanonicalizationPatterns(patterns, context); 594 patterns.add<FoldReshapeOpWithUnitExtent>(context); 595 } 596 597 namespace { 598 /// Pass that removes unit-extent dims within generic ops. 599 struct LinalgFoldUnitExtentDimsPass 600 : public LinalgFoldUnitExtentDimsBase<LinalgFoldUnitExtentDimsPass> { 601 void runOnFunction() override { 602 FuncOp funcOp = getFunction(); 603 MLIRContext *context = funcOp.getContext(); 604 RewritePatternSet patterns(context); 605 if (foldOneTripLoopsOnly) 606 patterns 607 .add<FoldUnitDimLoops<GenericOp>, FoldUnitDimLoops<IndexedGenericOp>>( 608 context); 609 else 610 populateFoldUnitExtentDimsPatterns(patterns); 611 (void)applyPatternsAndFoldGreedily(funcOp.getBody(), std::move(patterns)); 612 } 613 }; 614 } // namespace 615 616 std::unique_ptr<OperationPass<FuncOp>> 617 mlir::createLinalgFoldUnitExtentDimsPass() { 618 return std::make_unique<LinalgFoldUnitExtentDimsPass>(); 619 } 620