1 //===----------------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 10 #include "mlir/Dialect/MemRef/IR/MemRef.h" 11 #include "mlir/Dialect/MemRef/Utils/MemRefUtils.h" 12 #include "mlir/Dialect/StandardOps/IR/Ops.h" 13 #include "mlir/Dialect/StandardOps/Utils/Utils.h" 14 #include "mlir/Dialect/Utils/StaticValueUtils.h" 15 #include "mlir/IR/AffineMap.h" 16 #include "mlir/IR/Builders.h" 17 #include "mlir/IR/BuiltinTypes.h" 18 #include "mlir/IR/Matchers.h" 19 #include "mlir/IR/PatternMatch.h" 20 #include "mlir/IR/TypeUtilities.h" 21 #include "mlir/Interfaces/InferTypeOpInterface.h" 22 #include "mlir/Interfaces/ViewLikeInterface.h" 23 #include "llvm/ADT/STLExtras.h" 24 25 using namespace mlir; 26 using namespace mlir::memref; 27 28 /// Materialize a single constant operation from a given attribute value with 29 /// the desired resultant type. 30 Operation *MemRefDialect::materializeConstant(OpBuilder &builder, 31 Attribute value, Type type, 32 Location loc) { 33 if (arith::ConstantOp::isBuildableWith(value, type)) 34 return builder.create<arith::ConstantOp>(loc, value, type); 35 if (ConstantOp::isBuildableWith(value, type)) 36 return builder.create<ConstantOp>(loc, value, type); 37 return nullptr; 38 } 39 40 //===----------------------------------------------------------------------===// 41 // Common canonicalization pattern support logic 42 //===----------------------------------------------------------------------===// 43 44 /// This is a common class used for patterns of the form 45 /// "someop(memrefcast) -> someop". It folds the source of any memref.cast 46 /// into the root operation directly. 47 LogicalResult mlir::memref::foldMemRefCast(Operation *op, Value inner) { 48 bool folded = false; 49 for (OpOperand &operand : op->getOpOperands()) { 50 auto cast = operand.get().getDefiningOp<CastOp>(); 51 if (cast && operand.get() != inner && 52 !cast.getOperand().getType().isa<UnrankedMemRefType>()) { 53 operand.set(cast.getOperand()); 54 folded = true; 55 } 56 } 57 return success(folded); 58 } 59 60 /// Return an unranked/ranked tensor type for the given unranked/ranked memref 61 /// type. 62 Type mlir::memref::getTensorTypeFromMemRefType(Type type) { 63 if (auto memref = type.dyn_cast<MemRefType>()) 64 return RankedTensorType::get(memref.getShape(), memref.getElementType()); 65 if (auto memref = type.dyn_cast<UnrankedMemRefType>()) 66 return UnrankedTensorType::get(memref.getElementType()); 67 return NoneType::get(type.getContext()); 68 } 69 70 //===----------------------------------------------------------------------===// 71 // AllocOp / AllocaOp 72 //===----------------------------------------------------------------------===// 73 74 template <typename AllocLikeOp> 75 static LogicalResult verifyAllocLikeOp(AllocLikeOp op) { 76 static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value, 77 "applies to only alloc or alloca"); 78 auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>(); 79 if (!memRefType) 80 return op.emitOpError("result must be a memref"); 81 82 if (static_cast<int64_t>(op.dynamicSizes().size()) != 83 memRefType.getNumDynamicDims()) 84 return op.emitOpError("dimension operand count does not equal memref " 85 "dynamic dimension count"); 86 87 unsigned numSymbols = 0; 88 if (!memRefType.getLayout().isIdentity()) 89 numSymbols = memRefType.getLayout().getAffineMap().getNumSymbols(); 90 if (op.symbolOperands().size() != numSymbols) 91 return op.emitOpError("symbol operand count does not equal memref symbol " 92 "count: expected ") 93 << numSymbols << ", got " << op.symbolOperands().size(); 94 95 return success(); 96 } 97 98 static LogicalResult verify(AllocOp op) { return verifyAllocLikeOp(op); } 99 100 static LogicalResult verify(AllocaOp op) { 101 // An alloca op needs to have an ancestor with an allocation scope trait. 102 if (!op->getParentWithTrait<OpTrait::AutomaticAllocationScope>()) 103 return op.emitOpError( 104 "requires an ancestor op with AutomaticAllocationScope trait"); 105 106 return verifyAllocLikeOp(op); 107 } 108 109 namespace { 110 /// Fold constant dimensions into an alloc like operation. 111 template <typename AllocLikeOp> 112 struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> { 113 using OpRewritePattern<AllocLikeOp>::OpRewritePattern; 114 115 LogicalResult matchAndRewrite(AllocLikeOp alloc, 116 PatternRewriter &rewriter) const override { 117 // Check to see if any dimensions operands are constants. If so, we can 118 // substitute and drop them. 119 if (llvm::none_of(alloc.dynamicSizes(), [](Value operand) { 120 return matchPattern(operand, matchConstantIndex()); 121 })) 122 return failure(); 123 124 auto memrefType = alloc.getType(); 125 126 // Ok, we have one or more constant operands. Collect the non-constant ones 127 // and keep track of the resultant memref type to build. 128 SmallVector<int64_t, 4> newShapeConstants; 129 newShapeConstants.reserve(memrefType.getRank()); 130 SmallVector<Value, 4> dynamicSizes; 131 132 unsigned dynamicDimPos = 0; 133 for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) { 134 int64_t dimSize = memrefType.getDimSize(dim); 135 // If this is already static dimension, keep it. 136 if (dimSize != -1) { 137 newShapeConstants.push_back(dimSize); 138 continue; 139 } 140 auto dynamicSize = alloc.dynamicSizes()[dynamicDimPos]; 141 auto *defOp = dynamicSize.getDefiningOp(); 142 if (auto constantIndexOp = 143 dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) { 144 // Dynamic shape dimension will be folded. 145 newShapeConstants.push_back(constantIndexOp.value()); 146 } else { 147 // Dynamic shape dimension not folded; copy dynamicSize from old memref. 148 newShapeConstants.push_back(-1); 149 dynamicSizes.push_back(dynamicSize); 150 } 151 dynamicDimPos++; 152 } 153 154 // Create new memref type (which will have fewer dynamic dimensions). 155 MemRefType newMemRefType = 156 MemRefType::Builder(memrefType).setShape(newShapeConstants); 157 assert(static_cast<int64_t>(dynamicSizes.size()) == 158 newMemRefType.getNumDynamicDims()); 159 160 // Create and insert the alloc op for the new memref. 161 auto newAlloc = rewriter.create<AllocLikeOp>( 162 alloc.getLoc(), newMemRefType, dynamicSizes, alloc.symbolOperands(), 163 alloc.alignmentAttr()); 164 // Insert a cast so we have the same type as the old alloc. 165 auto resultCast = 166 rewriter.create<CastOp>(alloc.getLoc(), newAlloc, alloc.getType()); 167 168 rewriter.replaceOp(alloc, {resultCast}); 169 return success(); 170 } 171 }; 172 173 /// Fold alloc operations with no users or only store and dealloc uses. 174 template <typename T> 175 struct SimplifyDeadAlloc : public OpRewritePattern<T> { 176 using OpRewritePattern<T>::OpRewritePattern; 177 178 LogicalResult matchAndRewrite(T alloc, 179 PatternRewriter &rewriter) const override { 180 if (llvm::any_of(alloc->getUsers(), [&](Operation *op) { 181 if (auto storeOp = dyn_cast<StoreOp>(op)) 182 return storeOp.value() == alloc; 183 return !isa<DeallocOp>(op); 184 })) 185 return failure(); 186 187 for (Operation *user : llvm::make_early_inc_range(alloc->getUsers())) 188 rewriter.eraseOp(user); 189 190 rewriter.eraseOp(alloc); 191 return success(); 192 } 193 }; 194 } // end anonymous namespace. 195 196 void AllocOp::getCanonicalizationPatterns(RewritePatternSet &results, 197 MLIRContext *context) { 198 results.add<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc<AllocOp>>(context); 199 } 200 201 void AllocaOp::getCanonicalizationPatterns(RewritePatternSet &results, 202 MLIRContext *context) { 203 results.add<SimplifyAllocConst<AllocaOp>, SimplifyDeadAlloc<AllocaOp>>( 204 context); 205 } 206 207 //===----------------------------------------------------------------------===// 208 // AllocaScopeOp 209 //===----------------------------------------------------------------------===// 210 211 static void print(OpAsmPrinter &p, AllocaScopeOp &op) { 212 bool printBlockTerminators = false; 213 214 p << " "; 215 if (!op.results().empty()) { 216 p << " -> (" << op.getResultTypes() << ")"; 217 printBlockTerminators = true; 218 } 219 p.printRegion(op.bodyRegion(), 220 /*printEntryBlockArgs=*/false, 221 /*printBlockTerminators=*/printBlockTerminators); 222 p.printOptionalAttrDict(op->getAttrs()); 223 } 224 225 static ParseResult parseAllocaScopeOp(OpAsmParser &parser, 226 OperationState &result) { 227 // Create a region for the body. 228 result.regions.reserve(1); 229 Region *bodyRegion = result.addRegion(); 230 231 // Parse optional results type list. 232 if (parser.parseOptionalArrowTypeList(result.types)) 233 return failure(); 234 235 // Parse the body region. 236 if (parser.parseRegion(*bodyRegion, /*arguments=*/{}, /*argTypes=*/{})) 237 return failure(); 238 AllocaScopeOp::ensureTerminator(*bodyRegion, parser.getBuilder(), 239 result.location); 240 241 // Parse the optional attribute list. 242 if (parser.parseOptionalAttrDict(result.attributes)) 243 return failure(); 244 245 return success(); 246 } 247 248 static LogicalResult verify(AllocaScopeOp op) { 249 if (failed(RegionBranchOpInterface::verifyTypes(op))) 250 return failure(); 251 252 return success(); 253 } 254 255 void AllocaScopeOp::getSuccessorRegions( 256 Optional<unsigned> index, ArrayRef<Attribute> operands, 257 SmallVectorImpl<RegionSuccessor> ®ions) { 258 if (index.hasValue()) { 259 regions.push_back(RegionSuccessor(getResults())); 260 return; 261 } 262 263 regions.push_back(RegionSuccessor(&bodyRegion())); 264 } 265 266 //===----------------------------------------------------------------------===// 267 // AssumeAlignmentOp 268 //===----------------------------------------------------------------------===// 269 270 static LogicalResult verify(AssumeAlignmentOp op) { 271 unsigned alignment = op.alignment(); 272 if (!llvm::isPowerOf2_32(alignment)) 273 return op.emitOpError("alignment must be power of 2"); 274 return success(); 275 } 276 277 //===----------------------------------------------------------------------===// 278 // CastOp 279 //===----------------------------------------------------------------------===// 280 281 /// Determines whether MemRef_CastOp casts to a more dynamic version of the 282 /// source memref. This is useful to to fold a memref.cast into a consuming op 283 /// and implement canonicalization patterns for ops in different dialects that 284 /// may consume the results of memref.cast operations. Such foldable memref.cast 285 /// operations are typically inserted as `view` and `subview` ops are 286 /// canonicalized, to preserve the type compatibility of their uses. 287 /// 288 /// Returns true when all conditions are met: 289 /// 1. source and result are ranked memrefs with strided semantics and same 290 /// element type and rank. 291 /// 2. each of the source's size, offset or stride has more static information 292 /// than the corresponding result's size, offset or stride. 293 /// 294 /// Example 1: 295 /// ```mlir 296 /// %1 = memref.cast %0 : memref<8x16xf32> to memref<?x?xf32> 297 /// %2 = consumer %1 ... : memref<?x?xf32> ... 298 /// ``` 299 /// 300 /// may fold into: 301 /// 302 /// ```mlir 303 /// %2 = consumer %0 ... : memref<8x16xf32> ... 304 /// ``` 305 /// 306 /// Example 2: 307 /// ``` 308 /// %1 = memref.cast %0 : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>> 309 /// to memref<?x?xf32> 310 /// consumer %1 : memref<?x?xf32> ... 311 /// ``` 312 /// 313 /// may fold into: 314 /// 315 /// ``` 316 /// consumer %0 ... : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>> 317 /// ``` 318 bool CastOp::canFoldIntoConsumerOp(CastOp castOp) { 319 MemRefType sourceType = castOp.source().getType().dyn_cast<MemRefType>(); 320 MemRefType resultType = castOp.getType().dyn_cast<MemRefType>(); 321 322 // Requires ranked MemRefType. 323 if (!sourceType || !resultType) 324 return false; 325 326 // Requires same elemental type. 327 if (sourceType.getElementType() != resultType.getElementType()) 328 return false; 329 330 // Requires same rank. 331 if (sourceType.getRank() != resultType.getRank()) 332 return false; 333 334 // Only fold casts between strided memref forms. 335 int64_t sourceOffset, resultOffset; 336 SmallVector<int64_t, 4> sourceStrides, resultStrides; 337 if (failed(getStridesAndOffset(sourceType, sourceStrides, sourceOffset)) || 338 failed(getStridesAndOffset(resultType, resultStrides, resultOffset))) 339 return false; 340 341 // If cast is towards more static sizes along any dimension, don't fold. 342 for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) { 343 auto ss = std::get<0>(it), st = std::get<1>(it); 344 if (ss != st) 345 if (MemRefType::isDynamic(ss) && !MemRefType::isDynamic(st)) 346 return false; 347 } 348 349 // If cast is towards more static offset along any dimension, don't fold. 350 if (sourceOffset != resultOffset) 351 if (MemRefType::isDynamicStrideOrOffset(sourceOffset) && 352 !MemRefType::isDynamicStrideOrOffset(resultOffset)) 353 return false; 354 355 // If cast is towards more static strides along any dimension, don't fold. 356 for (auto it : llvm::zip(sourceStrides, resultStrides)) { 357 auto ss = std::get<0>(it), st = std::get<1>(it); 358 if (ss != st) 359 if (MemRefType::isDynamicStrideOrOffset(ss) && 360 !MemRefType::isDynamicStrideOrOffset(st)) 361 return false; 362 } 363 364 return true; 365 } 366 367 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) { 368 if (inputs.size() != 1 || outputs.size() != 1) 369 return false; 370 Type a = inputs.front(), b = outputs.front(); 371 auto aT = a.dyn_cast<MemRefType>(); 372 auto bT = b.dyn_cast<MemRefType>(); 373 374 auto uaT = a.dyn_cast<UnrankedMemRefType>(); 375 auto ubT = b.dyn_cast<UnrankedMemRefType>(); 376 377 if (aT && bT) { 378 if (aT.getElementType() != bT.getElementType()) 379 return false; 380 if (aT.getLayout() != bT.getLayout()) { 381 int64_t aOffset, bOffset; 382 SmallVector<int64_t, 4> aStrides, bStrides; 383 if (failed(getStridesAndOffset(aT, aStrides, aOffset)) || 384 failed(getStridesAndOffset(bT, bStrides, bOffset)) || 385 aStrides.size() != bStrides.size()) 386 return false; 387 388 // Strides along a dimension/offset are compatible if the value in the 389 // source memref is static and the value in the target memref is the 390 // same. They are also compatible if either one is dynamic (see 391 // description of MemRefCastOp for details). 392 auto checkCompatible = [](int64_t a, int64_t b) { 393 return (a == MemRefType::getDynamicStrideOrOffset() || 394 b == MemRefType::getDynamicStrideOrOffset() || a == b); 395 }; 396 if (!checkCompatible(aOffset, bOffset)) 397 return false; 398 for (auto aStride : enumerate(aStrides)) 399 if (!checkCompatible(aStride.value(), bStrides[aStride.index()])) 400 return false; 401 } 402 if (aT.getMemorySpace() != bT.getMemorySpace()) 403 return false; 404 405 // They must have the same rank, and any specified dimensions must match. 406 if (aT.getRank() != bT.getRank()) 407 return false; 408 409 for (unsigned i = 0, e = aT.getRank(); i != e; ++i) { 410 int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i); 411 if (aDim != -1 && bDim != -1 && aDim != bDim) 412 return false; 413 } 414 return true; 415 } else { 416 if (!aT && !uaT) 417 return false; 418 if (!bT && !ubT) 419 return false; 420 // Unranked to unranked casting is unsupported 421 if (uaT && ubT) 422 return false; 423 424 auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType(); 425 auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType(); 426 if (aEltType != bEltType) 427 return false; 428 429 auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace(); 430 auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace(); 431 if (aMemSpace != bMemSpace) 432 return false; 433 434 return true; 435 } 436 437 return false; 438 } 439 440 OpFoldResult CastOp::fold(ArrayRef<Attribute> operands) { 441 return succeeded(foldMemRefCast(*this)) ? getResult() : Value(); 442 } 443 444 //===----------------------------------------------------------------------===// 445 // DeallocOp 446 //===----------------------------------------------------------------------===// 447 448 LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands, 449 SmallVectorImpl<OpFoldResult> &results) { 450 /// dealloc(memrefcast) -> dealloc 451 return foldMemRefCast(*this); 452 } 453 454 //===----------------------------------------------------------------------===// 455 // DimOp 456 //===----------------------------------------------------------------------===// 457 458 void DimOp::build(OpBuilder &builder, OperationState &result, Value source, 459 int64_t index) { 460 auto loc = result.location; 461 Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index); 462 build(builder, result, source, indexValue); 463 } 464 465 void DimOp::build(OpBuilder &builder, OperationState &result, Value source, 466 Value index) { 467 auto indexTy = builder.getIndexType(); 468 build(builder, result, indexTy, source, index); 469 } 470 471 Optional<int64_t> DimOp::getConstantIndex() { 472 if (auto constantOp = index().getDefiningOp<arith::ConstantOp>()) 473 return constantOp.getValue().cast<IntegerAttr>().getInt(); 474 return {}; 475 } 476 477 static LogicalResult verify(DimOp op) { 478 // Assume unknown index to be in range. 479 Optional<int64_t> index = op.getConstantIndex(); 480 if (!index.hasValue()) 481 return success(); 482 483 // Check that constant index is not knowingly out of range. 484 auto type = op.source().getType(); 485 if (auto memrefType = type.dyn_cast<MemRefType>()) { 486 if (index.getValue() >= memrefType.getRank()) 487 return op.emitOpError("index is out of range"); 488 } else if (type.isa<UnrankedMemRefType>()) { 489 // Assume index to be in range. 490 } else { 491 llvm_unreachable("expected operand with memref type"); 492 } 493 return success(); 494 } 495 496 /// Return a map with key being elements in `vals` and data being number of 497 /// occurences of it. Use std::map, since the `vals` here are strides and the 498 /// dynamic stride value is the same as the tombstone value for 499 /// `DenseMap<int64_t>`. 500 static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) { 501 std::map<int64_t, unsigned> numOccurences; 502 for (auto val : vals) 503 numOccurences[val]++; 504 return numOccurences; 505 } 506 507 /// Given the type of the un-rank reduced subview result type and the 508 /// rank-reduced result type, computes the dropped dimensions. This accounts for 509 /// cases where there are multiple unit-dims, but only a subset of those are 510 /// dropped. For MemRefTypes these can be disambiguated using the strides. If a 511 /// dimension is dropped the stride must be dropped too. 512 static llvm::Optional<llvm::SmallDenseSet<unsigned>> 513 computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType, 514 ArrayAttr staticSizes) { 515 llvm::SmallDenseSet<unsigned> unusedDims; 516 if (originalType.getRank() == reducedType.getRank()) 517 return unusedDims; 518 519 for (auto dim : llvm::enumerate(staticSizes)) 520 if (dim.value().cast<IntegerAttr>().getInt() == 1) 521 unusedDims.insert(dim.index()); 522 SmallVector<int64_t> originalStrides, candidateStrides; 523 int64_t originalOffset, candidateOffset; 524 if (failed( 525 getStridesAndOffset(originalType, originalStrides, originalOffset)) || 526 failed( 527 getStridesAndOffset(reducedType, candidateStrides, candidateOffset))) 528 return llvm::None; 529 530 // For memrefs, a dimension is truly dropped if its corresponding stride is 531 // also dropped. This is particularly important when more than one of the dims 532 // is 1. Track the number of occurences of the strides in the original type 533 // and the candidate type. For each unused dim that stride should not be 534 // present in the candidate type. Note that there could be multiple dimensions 535 // that have the same size. We dont need to exactly figure out which dim 536 // corresponds to which stride, we just need to verify that the number of 537 // reptitions of a stride in the original + number of unused dims with that 538 // stride == number of repititions of a stride in the candidate. 539 std::map<int64_t, unsigned> currUnaccountedStrides = 540 getNumOccurences(originalStrides); 541 std::map<int64_t, unsigned> candidateStridesNumOccurences = 542 getNumOccurences(candidateStrides); 543 llvm::SmallDenseSet<unsigned> prunedUnusedDims; 544 for (unsigned dim : unusedDims) { 545 int64_t originalStride = originalStrides[dim]; 546 if (currUnaccountedStrides[originalStride] > 547 candidateStridesNumOccurences[originalStride]) { 548 // This dim can be treated as dropped. 549 currUnaccountedStrides[originalStride]--; 550 continue; 551 } 552 if (currUnaccountedStrides[originalStride] == 553 candidateStridesNumOccurences[originalStride]) { 554 // The stride for this is not dropped. Keep as is. 555 prunedUnusedDims.insert(dim); 556 continue; 557 } 558 if (currUnaccountedStrides[originalStride] < 559 candidateStridesNumOccurences[originalStride]) { 560 // This should never happen. Cant have a stride in the reduced rank type 561 // that wasnt in the original one. 562 return llvm::None; 563 } 564 } 565 566 for (auto prunedDim : prunedUnusedDims) 567 unusedDims.erase(prunedDim); 568 if (unusedDims.size() + reducedType.getRank() != originalType.getRank()) 569 return llvm::None; 570 return unusedDims; 571 } 572 573 llvm::SmallDenseSet<unsigned> SubViewOp::getDroppedDims() { 574 MemRefType sourceType = getSourceType(); 575 MemRefType resultType = getType(); 576 llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims = 577 computeMemRefRankReductionMask(sourceType, resultType, static_sizes()); 578 assert(unusedDims && "unable to find unused dims of subview"); 579 return *unusedDims; 580 } 581 582 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) { 583 // All forms of folding require a known index. 584 auto index = operands[1].dyn_cast_or_null<IntegerAttr>(); 585 if (!index) 586 return {}; 587 588 // Folding for unranked types (UnrankedMemRefType) is not supported. 589 auto memrefType = source().getType().dyn_cast<MemRefType>(); 590 if (!memrefType) 591 return {}; 592 593 // Fold if the shape extent along the given index is known. 594 if (!memrefType.isDynamicDim(index.getInt())) { 595 Builder builder(getContext()); 596 return builder.getIndexAttr(memrefType.getShape()[index.getInt()]); 597 } 598 599 // The size at the given index is now known to be a dynamic size. 600 unsigned unsignedIndex = index.getValue().getZExtValue(); 601 602 // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`. 603 Operation *definingOp = source().getDefiningOp(); 604 605 if (auto alloc = dyn_cast_or_null<AllocOp>(definingOp)) 606 return *(alloc.getDynamicSizes().begin() + 607 memrefType.getDynamicDimIndex(unsignedIndex)); 608 609 if (auto alloca = dyn_cast_or_null<AllocaOp>(definingOp)) 610 return *(alloca.getDynamicSizes().begin() + 611 memrefType.getDynamicDimIndex(unsignedIndex)); 612 613 if (auto view = dyn_cast_or_null<ViewOp>(definingOp)) 614 return *(view.getDynamicSizes().begin() + 615 memrefType.getDynamicDimIndex(unsignedIndex)); 616 617 if (auto subview = dyn_cast_or_null<SubViewOp>(definingOp)) { 618 llvm::SmallDenseSet<unsigned> unusedDims = subview.getDroppedDims(); 619 unsigned resultIndex = 0; 620 unsigned sourceRank = subview.getSourceType().getRank(); 621 unsigned sourceIndex = 0; 622 for (auto i : llvm::seq<unsigned>(0, sourceRank)) { 623 if (unusedDims.count(i)) 624 continue; 625 if (resultIndex == unsignedIndex) { 626 sourceIndex = i; 627 break; 628 } 629 resultIndex++; 630 } 631 assert(subview.isDynamicSize(sourceIndex) && 632 "expected dynamic subview size"); 633 return subview.getDynamicSize(sourceIndex); 634 } 635 636 if (auto sizeInterface = 637 dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) { 638 assert(sizeInterface.isDynamicSize(unsignedIndex) && 639 "Expected dynamic subview size"); 640 return sizeInterface.getDynamicSize(unsignedIndex); 641 } 642 643 // dim(memrefcast) -> dim 644 if (succeeded(foldMemRefCast(*this))) 645 return getResult(); 646 647 return {}; 648 } 649 650 namespace { 651 /// Fold dim of a memref reshape operation to a load into the reshape's shape 652 /// operand. 653 struct DimOfMemRefReshape : public OpRewritePattern<DimOp> { 654 using OpRewritePattern<DimOp>::OpRewritePattern; 655 656 LogicalResult matchAndRewrite(DimOp dim, 657 PatternRewriter &rewriter) const override { 658 auto reshape = dim.source().getDefiningOp<ReshapeOp>(); 659 660 if (!reshape) 661 return failure(); 662 663 // Place the load directly after the reshape to ensure that the shape memref 664 // was not mutated. 665 rewriter.setInsertionPointAfter(reshape); 666 Location loc = dim.getLoc(); 667 Value load = rewriter.create<LoadOp>(loc, reshape.shape(), dim.index()); 668 if (load.getType() != dim.getType()) 669 load = rewriter.create<arith::IndexCastOp>(loc, dim.getType(), load); 670 rewriter.replaceOp(dim, load); 671 return success(); 672 } 673 }; 674 675 } // end anonymous namespace. 676 677 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results, 678 MLIRContext *context) { 679 results.add<DimOfMemRefReshape>(context); 680 } 681 682 // --------------------------------------------------------------------------- 683 // DmaStartOp 684 // --------------------------------------------------------------------------- 685 686 void DmaStartOp::build(OpBuilder &builder, OperationState &result, 687 Value srcMemRef, ValueRange srcIndices, Value destMemRef, 688 ValueRange destIndices, Value numElements, 689 Value tagMemRef, ValueRange tagIndices, Value stride, 690 Value elementsPerStride) { 691 result.addOperands(srcMemRef); 692 result.addOperands(srcIndices); 693 result.addOperands(destMemRef); 694 result.addOperands(destIndices); 695 result.addOperands({numElements, tagMemRef}); 696 result.addOperands(tagIndices); 697 if (stride) 698 result.addOperands({stride, elementsPerStride}); 699 } 700 701 static void print(OpAsmPrinter &p, DmaStartOp op) { 702 p << " " << op.getSrcMemRef() << '[' << op.getSrcIndices() << "], " 703 << op.getDstMemRef() << '[' << op.getDstIndices() << "], " 704 << op.getNumElements() << ", " << op.getTagMemRef() << '[' 705 << op.getTagIndices() << ']'; 706 if (op.isStrided()) 707 p << ", " << op.getStride() << ", " << op.getNumElementsPerStride(); 708 709 p.printOptionalAttrDict(op->getAttrs()); 710 p << " : " << op.getSrcMemRef().getType() << ", " 711 << op.getDstMemRef().getType() << ", " << op.getTagMemRef().getType(); 712 } 713 714 // Parse DmaStartOp. 715 // Ex: 716 // %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size, 717 // %tag[%index], %stride, %num_elt_per_stride : 718 // : memref<3076 x f32, 0>, 719 // memref<1024 x f32, 2>, 720 // memref<1 x i32> 721 // 722 static ParseResult parseDmaStartOp(OpAsmParser &parser, 723 OperationState &result) { 724 OpAsmParser::OperandType srcMemRefInfo; 725 SmallVector<OpAsmParser::OperandType, 4> srcIndexInfos; 726 OpAsmParser::OperandType dstMemRefInfo; 727 SmallVector<OpAsmParser::OperandType, 4> dstIndexInfos; 728 OpAsmParser::OperandType numElementsInfo; 729 OpAsmParser::OperandType tagMemrefInfo; 730 SmallVector<OpAsmParser::OperandType, 4> tagIndexInfos; 731 SmallVector<OpAsmParser::OperandType, 2> strideInfo; 732 733 SmallVector<Type, 3> types; 734 auto indexType = parser.getBuilder().getIndexType(); 735 736 // Parse and resolve the following list of operands: 737 // *) source memref followed by its indices (in square brackets). 738 // *) destination memref followed by its indices (in square brackets). 739 // *) dma size in KiB. 740 if (parser.parseOperand(srcMemRefInfo) || 741 parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) || 742 parser.parseComma() || parser.parseOperand(dstMemRefInfo) || 743 parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) || 744 parser.parseComma() || parser.parseOperand(numElementsInfo) || 745 parser.parseComma() || parser.parseOperand(tagMemrefInfo) || 746 parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square)) 747 return failure(); 748 749 // Parse optional stride and elements per stride. 750 if (parser.parseTrailingOperandList(strideInfo)) 751 return failure(); 752 753 bool isStrided = strideInfo.size() == 2; 754 if (!strideInfo.empty() && !isStrided) { 755 return parser.emitError(parser.getNameLoc(), 756 "expected two stride related operands"); 757 } 758 759 if (parser.parseColonTypeList(types)) 760 return failure(); 761 if (types.size() != 3) 762 return parser.emitError(parser.getNameLoc(), "fewer/more types expected"); 763 764 if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) || 765 parser.resolveOperands(srcIndexInfos, indexType, result.operands) || 766 parser.resolveOperand(dstMemRefInfo, types[1], result.operands) || 767 parser.resolveOperands(dstIndexInfos, indexType, result.operands) || 768 // size should be an index. 769 parser.resolveOperand(numElementsInfo, indexType, result.operands) || 770 parser.resolveOperand(tagMemrefInfo, types[2], result.operands) || 771 // tag indices should be index. 772 parser.resolveOperands(tagIndexInfos, indexType, result.operands)) 773 return failure(); 774 775 if (isStrided) { 776 if (parser.resolveOperands(strideInfo, indexType, result.operands)) 777 return failure(); 778 } 779 780 return success(); 781 } 782 783 static LogicalResult verify(DmaStartOp op) { 784 unsigned numOperands = op.getNumOperands(); 785 786 // Mandatory non-variadic operands are: src memref, dst memref, tag memref and 787 // the number of elements. 788 if (numOperands < 4) 789 return op.emitOpError("expected at least 4 operands"); 790 791 // Check types of operands. The order of these calls is important: the later 792 // calls rely on some type properties to compute the operand position. 793 // 1. Source memref. 794 if (!op.getSrcMemRef().getType().isa<MemRefType>()) 795 return op.emitOpError("expected source to be of memref type"); 796 if (numOperands < op.getSrcMemRefRank() + 4) 797 return op.emitOpError() 798 << "expected at least " << op.getSrcMemRefRank() + 4 << " operands"; 799 if (!op.getSrcIndices().empty() && 800 !llvm::all_of(op.getSrcIndices().getTypes(), 801 [](Type t) { return t.isIndex(); })) 802 return op.emitOpError("expected source indices to be of index type"); 803 804 // 2. Destination memref. 805 if (!op.getDstMemRef().getType().isa<MemRefType>()) 806 return op.emitOpError("expected destination to be of memref type"); 807 unsigned numExpectedOperands = 808 op.getSrcMemRefRank() + op.getDstMemRefRank() + 4; 809 if (numOperands < numExpectedOperands) 810 return op.emitOpError() 811 << "expected at least " << numExpectedOperands << " operands"; 812 if (!op.getDstIndices().empty() && 813 !llvm::all_of(op.getDstIndices().getTypes(), 814 [](Type t) { return t.isIndex(); })) 815 return op.emitOpError("expected destination indices to be of index type"); 816 817 // 3. Number of elements. 818 if (!op.getNumElements().getType().isIndex()) 819 return op.emitOpError("expected num elements to be of index type"); 820 821 // 4. Tag memref. 822 if (!op.getTagMemRef().getType().isa<MemRefType>()) 823 return op.emitOpError("expected tag to be of memref type"); 824 numExpectedOperands += op.getTagMemRefRank(); 825 if (numOperands < numExpectedOperands) 826 return op.emitOpError() 827 << "expected at least " << numExpectedOperands << " operands"; 828 if (!op.getTagIndices().empty() && 829 !llvm::all_of(op.getTagIndices().getTypes(), 830 [](Type t) { return t.isIndex(); })) 831 return op.emitOpError("expected tag indices to be of index type"); 832 833 // Optional stride-related operands must be either both present or both 834 // absent. 835 if (numOperands != numExpectedOperands && 836 numOperands != numExpectedOperands + 2) 837 return op.emitOpError("incorrect number of operands"); 838 839 // 5. Strides. 840 if (op.isStrided()) { 841 if (!op.getStride().getType().isIndex() || 842 !op.getNumElementsPerStride().getType().isIndex()) 843 return op.emitOpError( 844 "expected stride and num elements per stride to be of type index"); 845 } 846 847 return success(); 848 } 849 850 LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands, 851 SmallVectorImpl<OpFoldResult> &results) { 852 /// dma_start(memrefcast) -> dma_start 853 return foldMemRefCast(*this); 854 } 855 856 // --------------------------------------------------------------------------- 857 // DmaWaitOp 858 // --------------------------------------------------------------------------- 859 860 LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands, 861 SmallVectorImpl<OpFoldResult> &results) { 862 /// dma_wait(memrefcast) -> dma_wait 863 return foldMemRefCast(*this); 864 } 865 866 static LogicalResult verify(DmaWaitOp op) { 867 // Check that the number of tag indices matches the tagMemRef rank. 868 unsigned numTagIndices = op.tagIndices().size(); 869 unsigned tagMemRefRank = op.getTagMemRefRank(); 870 if (numTagIndices != tagMemRefRank) 871 return op.emitOpError() << "expected tagIndices to have the same number of " 872 "elements as the tagMemRef rank, expected " 873 << tagMemRefRank << ", but got " << numTagIndices; 874 return success(); 875 } 876 877 //===----------------------------------------------------------------------===// 878 // GlobalOp 879 //===----------------------------------------------------------------------===// 880 881 static void printGlobalMemrefOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, 882 TypeAttr type, 883 Attribute initialValue) { 884 p << type; 885 if (!op.isExternal()) { 886 p << " = "; 887 if (op.isUninitialized()) 888 p << "uninitialized"; 889 else 890 p.printAttributeWithoutType(initialValue); 891 } 892 } 893 894 static ParseResult 895 parseGlobalMemrefOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, 896 Attribute &initialValue) { 897 Type type; 898 if (parser.parseType(type)) 899 return failure(); 900 901 auto memrefType = type.dyn_cast<MemRefType>(); 902 if (!memrefType || !memrefType.hasStaticShape()) 903 return parser.emitError(parser.getNameLoc()) 904 << "type should be static shaped memref, but got " << type; 905 typeAttr = TypeAttr::get(type); 906 907 if (parser.parseOptionalEqual()) 908 return success(); 909 910 if (succeeded(parser.parseOptionalKeyword("uninitialized"))) { 911 initialValue = UnitAttr::get(parser.getContext()); 912 return success(); 913 } 914 915 Type tensorType = getTensorTypeFromMemRefType(memrefType); 916 if (parser.parseAttribute(initialValue, tensorType)) 917 return failure(); 918 if (!initialValue.isa<ElementsAttr>()) 919 return parser.emitError(parser.getNameLoc()) 920 << "initial value should be a unit or elements attribute"; 921 return success(); 922 } 923 924 static LogicalResult verify(GlobalOp op) { 925 auto memrefType = op.type().dyn_cast<MemRefType>(); 926 if (!memrefType || !memrefType.hasStaticShape()) 927 return op.emitOpError("type should be static shaped memref, but got ") 928 << op.type(); 929 930 // Verify that the initial value, if present, is either a unit attribute or 931 // an elements attribute. 932 if (op.initial_value().hasValue()) { 933 Attribute initValue = op.initial_value().getValue(); 934 if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>()) 935 return op.emitOpError("initial value should be a unit or elements " 936 "attribute, but got ") 937 << initValue; 938 939 // Check that the type of the initial value is compatible with the type of 940 // the global variable. 941 if (initValue.isa<ElementsAttr>()) { 942 Type initType = initValue.getType(); 943 Type tensorType = getTensorTypeFromMemRefType(memrefType); 944 if (initType != tensorType) 945 return op.emitOpError("initial value expected to be of type ") 946 << tensorType << ", but was of type " << initType; 947 } 948 } 949 950 if (Optional<uint64_t> alignAttr = op.alignment()) { 951 uint64_t alignment = alignAttr.getValue(); 952 953 if (!llvm::isPowerOf2_64(alignment)) 954 return op->emitError() << "alignment attribute value " << alignment 955 << " is not a power of 2"; 956 } 957 958 // TODO: verify visibility for declarations. 959 return success(); 960 } 961 962 //===----------------------------------------------------------------------===// 963 // GetGlobalOp 964 //===----------------------------------------------------------------------===// 965 966 LogicalResult 967 GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { 968 // Verify that the result type is same as the type of the referenced 969 // memref.global op. 970 auto global = 971 symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, nameAttr()); 972 if (!global) 973 return emitOpError("'") 974 << name() << "' does not reference a valid global memref"; 975 976 Type resultType = result().getType(); 977 if (global.type() != resultType) 978 return emitOpError("result type ") 979 << resultType << " does not match type " << global.type() 980 << " of the global memref @" << name(); 981 return success(); 982 } 983 984 //===----------------------------------------------------------------------===// 985 // LoadOp 986 //===----------------------------------------------------------------------===// 987 988 static LogicalResult verify(LoadOp op) { 989 if (op.getNumOperands() != 1 + op.getMemRefType().getRank()) 990 return op.emitOpError("incorrect number of indices for load"); 991 return success(); 992 } 993 994 OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) { 995 /// load(memrefcast) -> load 996 if (succeeded(foldMemRefCast(*this))) 997 return getResult(); 998 return OpFoldResult(); 999 } 1000 1001 //===----------------------------------------------------------------------===// 1002 // PrefetchOp 1003 //===----------------------------------------------------------------------===// 1004 1005 static void print(OpAsmPrinter &p, PrefetchOp op) { 1006 p << " " << op.memref() << '['; 1007 p.printOperands(op.indices()); 1008 p << ']' << ", " << (op.isWrite() ? "write" : "read"); 1009 p << ", locality<" << op.localityHint(); 1010 p << ">, " << (op.isDataCache() ? "data" : "instr"); 1011 p.printOptionalAttrDict( 1012 op->getAttrs(), 1013 /*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"}); 1014 p << " : " << op.getMemRefType(); 1015 } 1016 1017 static ParseResult parsePrefetchOp(OpAsmParser &parser, 1018 OperationState &result) { 1019 OpAsmParser::OperandType memrefInfo; 1020 SmallVector<OpAsmParser::OperandType, 4> indexInfo; 1021 IntegerAttr localityHint; 1022 MemRefType type; 1023 StringRef readOrWrite, cacheType; 1024 1025 auto indexTy = parser.getBuilder().getIndexType(); 1026 auto i32Type = parser.getBuilder().getIntegerType(32); 1027 if (parser.parseOperand(memrefInfo) || 1028 parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) || 1029 parser.parseComma() || parser.parseKeyword(&readOrWrite) || 1030 parser.parseComma() || parser.parseKeyword("locality") || 1031 parser.parseLess() || 1032 parser.parseAttribute(localityHint, i32Type, "localityHint", 1033 result.attributes) || 1034 parser.parseGreater() || parser.parseComma() || 1035 parser.parseKeyword(&cacheType) || parser.parseColonType(type) || 1036 parser.resolveOperand(memrefInfo, type, result.operands) || 1037 parser.resolveOperands(indexInfo, indexTy, result.operands)) 1038 return failure(); 1039 1040 if (!readOrWrite.equals("read") && !readOrWrite.equals("write")) 1041 return parser.emitError(parser.getNameLoc(), 1042 "rw specifier has to be 'read' or 'write'"); 1043 result.addAttribute( 1044 PrefetchOp::getIsWriteAttrName(), 1045 parser.getBuilder().getBoolAttr(readOrWrite.equals("write"))); 1046 1047 if (!cacheType.equals("data") && !cacheType.equals("instr")) 1048 return parser.emitError(parser.getNameLoc(), 1049 "cache type has to be 'data' or 'instr'"); 1050 1051 result.addAttribute( 1052 PrefetchOp::getIsDataCacheAttrName(), 1053 parser.getBuilder().getBoolAttr(cacheType.equals("data"))); 1054 1055 return success(); 1056 } 1057 1058 static LogicalResult verify(PrefetchOp op) { 1059 if (op.getNumOperands() != 1 + op.getMemRefType().getRank()) 1060 return op.emitOpError("too few indices"); 1061 1062 return success(); 1063 } 1064 1065 LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands, 1066 SmallVectorImpl<OpFoldResult> &results) { 1067 // prefetch(memrefcast) -> prefetch 1068 return foldMemRefCast(*this); 1069 } 1070 1071 //===----------------------------------------------------------------------===// 1072 // ReinterpretCastOp 1073 //===----------------------------------------------------------------------===// 1074 1075 /// Build a ReinterpretCastOp with all dynamic entries: `staticOffsets`, 1076 /// `staticSizes` and `staticStrides` are automatically filled with 1077 /// source-memref-rank sentinel values that encode dynamic entries. 1078 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result, 1079 MemRefType resultType, Value source, 1080 OpFoldResult offset, ArrayRef<OpFoldResult> sizes, 1081 ArrayRef<OpFoldResult> strides, 1082 ArrayRef<NamedAttribute> attrs) { 1083 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1084 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1085 dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets, 1086 ShapedType::kDynamicStrideOrOffset); 1087 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, 1088 ShapedType::kDynamicSize); 1089 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, 1090 ShapedType::kDynamicStrideOrOffset); 1091 build(b, result, resultType, source, dynamicOffsets, dynamicSizes, 1092 dynamicStrides, b.getI64ArrayAttr(staticOffsets), 1093 b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); 1094 result.addAttributes(attrs); 1095 } 1096 1097 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result, 1098 MemRefType resultType, Value source, 1099 int64_t offset, ArrayRef<int64_t> sizes, 1100 ArrayRef<int64_t> strides, 1101 ArrayRef<NamedAttribute> attrs) { 1102 SmallVector<OpFoldResult> sizeValues = 1103 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { 1104 return b.getI64IntegerAttr(v); 1105 })); 1106 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1107 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { 1108 return b.getI64IntegerAttr(v); 1109 })); 1110 build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues, 1111 strideValues, attrs); 1112 } 1113 1114 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result, 1115 MemRefType resultType, Value source, Value offset, 1116 ValueRange sizes, ValueRange strides, 1117 ArrayRef<NamedAttribute> attrs) { 1118 SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>( 1119 llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); 1120 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1121 llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); 1122 build(b, result, resultType, source, offset, sizeValues, strideValues, attrs); 1123 } 1124 1125 // TODO: ponder whether we want to allow missing trailing sizes/strides that are 1126 // completed automatically, like we have for subview and extract_slice. 1127 static LogicalResult verify(ReinterpretCastOp op) { 1128 // The source and result memrefs should be in the same memory space. 1129 auto srcType = op.source().getType().cast<BaseMemRefType>(); 1130 auto resultType = op.getType().cast<MemRefType>(); 1131 if (srcType.getMemorySpace() != resultType.getMemorySpace()) 1132 return op.emitError("different memory spaces specified for source type ") 1133 << srcType << " and result memref type " << resultType; 1134 if (srcType.getElementType() != resultType.getElementType()) 1135 return op.emitError("different element types specified for source type ") 1136 << srcType << " and result memref type " << resultType; 1137 1138 // Match sizes in result memref type and in static_sizes attribute. 1139 for (auto &en : 1140 llvm::enumerate(llvm::zip(resultType.getShape(), 1141 extractFromI64ArrayAttr(op.static_sizes())))) { 1142 int64_t resultSize = std::get<0>(en.value()); 1143 int64_t expectedSize = std::get<1>(en.value()); 1144 if (resultSize != expectedSize) 1145 return op.emitError("expected result type with size = ") 1146 << expectedSize << " instead of " << resultSize 1147 << " in dim = " << en.index(); 1148 } 1149 1150 // Match offset and strides in static_offset and static_strides attributes if 1151 // result memref type has an affine map specified. 1152 if (!resultType.getLayout().isIdentity()) { 1153 int64_t resultOffset; 1154 SmallVector<int64_t, 4> resultStrides; 1155 if (failed(getStridesAndOffset(resultType, resultStrides, resultOffset))) 1156 return failure(); 1157 1158 // Match offset in result memref type and in static_offsets attribute. 1159 int64_t expectedOffset = 1160 extractFromI64ArrayAttr(op.static_offsets()).front(); 1161 if (resultOffset != expectedOffset) 1162 return op.emitError("expected result type with offset = ") 1163 << resultOffset << " instead of " << expectedOffset; 1164 1165 // Match strides in result memref type and in static_strides attribute. 1166 for (auto &en : llvm::enumerate(llvm::zip( 1167 resultStrides, extractFromI64ArrayAttr(op.static_strides())))) { 1168 int64_t resultStride = std::get<0>(en.value()); 1169 int64_t expectedStride = std::get<1>(en.value()); 1170 if (resultStride != expectedStride) 1171 return op.emitError("expected result type with stride = ") 1172 << expectedStride << " instead of " << resultStride 1173 << " in dim = " << en.index(); 1174 } 1175 } 1176 return success(); 1177 } 1178 1179 //===----------------------------------------------------------------------===// 1180 // Reassociative reshape ops 1181 //===----------------------------------------------------------------------===// 1182 1183 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() { 1184 return getSymbolLessAffineMaps(getReassociationExprs()); 1185 } 1186 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() { 1187 return convertReassociationIndicesToExprs(getContext(), 1188 getReassociationIndices()); 1189 } 1190 1191 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() { 1192 return getSymbolLessAffineMaps(getReassociationExprs()); 1193 } 1194 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() { 1195 return convertReassociationIndicesToExprs(getContext(), 1196 getReassociationIndices()); 1197 } 1198 1199 static void print(OpAsmPrinter &p, ExpandShapeOp op) { 1200 ::mlir::printReshapeOp<ExpandShapeOp>(p, op); 1201 } 1202 1203 static void print(OpAsmPrinter &p, CollapseShapeOp op) { 1204 ::mlir::printReshapeOp<CollapseShapeOp>(p, op); 1205 } 1206 1207 /// Detect whether memref dims [dim, dim + extent) can be reshaped without 1208 /// copies. 1209 static bool isReshapableDimBand(unsigned dim, unsigned extent, 1210 ArrayRef<int64_t> sizes, 1211 ArrayRef<AffineExpr> strides) { 1212 assert(sizes.size() == strides.size() && "mismatched ranks"); 1213 // off by 1 indexing to avoid out of bounds 1214 // V 1215 for (auto idx = dim, e = dim + extent; idx + 1 < e; ++idx) { 1216 // Only bands of static shapes are reshapable. This is due to the fact that 1217 // there is no relation between dynamic sizes and dynamic strides: we do not 1218 // have enough information to know whether a "-1" size corresponds to the 1219 // proper symbol in the AffineExpr of a stride. 1220 if (ShapedType::isDynamic(sizes[dim + 1])) 1221 return false; 1222 // TODO: Refine this by passing the proper nDims and nSymbols so we can 1223 // simplify on the fly and catch more reshapable cases. 1224 if (strides[idx] != strides[idx + 1] * sizes[idx + 1]) 1225 return false; 1226 } 1227 return true; 1228 } 1229 1230 /// Compute the MemRefType obtained by applying the `reassociation` (which is 1231 /// expected to be valid) to `type`. 1232 /// If `type` is Contiguous MemRefType, this always produce a contiguous 1233 /// MemRefType. 1234 static MemRefType 1235 computeReshapeCollapsedType(MemRefType type, 1236 ArrayRef<AffineMap> reassociation) { 1237 auto sizes = type.getShape(); 1238 AffineExpr offset; 1239 SmallVector<AffineExpr, 4> strides; 1240 auto status = getStridesAndOffset(type, strides, offset); 1241 (void)status; 1242 assert(succeeded(status) && "expected strided memref"); 1243 1244 SmallVector<int64_t, 4> newSizes; 1245 newSizes.reserve(reassociation.size()); 1246 SmallVector<AffineExpr, 4> newStrides; 1247 newStrides.reserve(reassociation.size()); 1248 1249 // Use the fact that reassociation is valid to simplify the logic: only use 1250 // each map's rank. 1251 assert(isReassociationValid(reassociation) && "invalid reassociation"); 1252 unsigned currentDim = 0; 1253 for (AffineMap m : reassociation) { 1254 unsigned dim = m.getNumResults(); 1255 int64_t size = 1; 1256 AffineExpr stride = strides[currentDim + dim - 1]; 1257 if (!isReshapableDimBand(currentDim, dim, sizes, strides)) { 1258 size = ShapedType::kDynamicSize; 1259 stride = AffineExpr(); 1260 } else { 1261 for (unsigned d = 0; d < dim; ++d) 1262 size *= sizes[currentDim + d]; 1263 } 1264 newSizes.push_back(size); 1265 newStrides.push_back(stride); 1266 currentDim += dim; 1267 } 1268 1269 // Early-exit: if `type` is contiguous, the result must be contiguous. 1270 if (canonicalizeStridedLayout(type).getLayout().isIdentity()) 1271 return MemRefType::Builder(type).setShape(newSizes).setLayout({}); 1272 1273 // Convert back to int64_t because we don't have enough information to create 1274 // new strided layouts from AffineExpr only. This corresponds to a case where 1275 // copies may be necessary. 1276 int64_t intOffset = ShapedType::kDynamicStrideOrOffset; 1277 if (auto o = offset.dyn_cast<AffineConstantExpr>()) 1278 intOffset = o.getValue(); 1279 SmallVector<int64_t, 4> intStrides; 1280 intStrides.reserve(strides.size()); 1281 for (auto stride : newStrides) { 1282 if (auto cst = stride.dyn_cast_or_null<AffineConstantExpr>()) 1283 intStrides.push_back(cst.getValue()); 1284 else 1285 intStrides.push_back(ShapedType::kDynamicStrideOrOffset); 1286 } 1287 auto layout = 1288 makeStridedLinearLayoutMap(intStrides, intOffset, type.getContext()); 1289 return canonicalizeStridedLayout( 1290 MemRefType::Builder(type).setShape(newSizes).setLayout( 1291 AffineMapAttr::get(layout))); 1292 } 1293 1294 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src, 1295 ArrayRef<ReassociationIndices> reassociation, 1296 ArrayRef<NamedAttribute> attrs) { 1297 auto memRefType = src.getType().cast<MemRefType>(); 1298 auto resultType = computeReshapeCollapsedType( 1299 memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs( 1300 b.getContext(), reassociation))); 1301 build(b, result, resultType, src, attrs); 1302 result.addAttribute(getReassociationAttrName(), 1303 getReassociationIndicesAttribute(b, reassociation)); 1304 } 1305 1306 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src, 1307 ArrayRef<ReassociationIndices> reassociation, 1308 ArrayRef<NamedAttribute> attrs) { 1309 auto memRefType = src.getType().cast<MemRefType>(); 1310 auto resultType = computeReshapeCollapsedType( 1311 memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs( 1312 b.getContext(), reassociation))); 1313 build(b, result, resultType, src, attrs); 1314 result.addAttribute(getReassociationAttrName(), 1315 getReassociationIndicesAttribute(b, reassociation)); 1316 } 1317 1318 template <typename ReshapeOp, 1319 bool isExpansion = std::is_same<ReshapeOp, ExpandShapeOp>::value> 1320 static LogicalResult verifyReshapeOp(ReshapeOp op, MemRefType expandedType, 1321 MemRefType collapsedType) { 1322 if (failed( 1323 verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion))) 1324 return failure(); 1325 auto maps = op.getReassociationMaps(); 1326 MemRefType expectedType = computeReshapeCollapsedType(expandedType, maps); 1327 if (collapsedType != expectedType) 1328 return op.emitOpError("expected collapsed type to be ") 1329 << expectedType << ", but got " << collapsedType; 1330 return success(); 1331 } 1332 1333 static LogicalResult verify(ExpandShapeOp op) { 1334 return verifyReshapeOp(op, op.getResultType(), op.getSrcType()); 1335 } 1336 1337 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results, 1338 MLIRContext *context) { 1339 results.add<CollapseReshapeOps<ExpandShapeOp>, 1340 CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>>(context); 1341 } 1342 1343 static LogicalResult verify(CollapseShapeOp op) { 1344 return verifyReshapeOp(op, op.getSrcType(), op.getResultType()); 1345 } 1346 1347 struct CollapseShapeOpMemRefCastFolder 1348 : public OpRewritePattern<CollapseShapeOp> { 1349 public: 1350 using OpRewritePattern<CollapseShapeOp>::OpRewritePattern; 1351 1352 LogicalResult matchAndRewrite(CollapseShapeOp op, 1353 PatternRewriter &rewriter) const override { 1354 auto cast = op.getOperand().getDefiningOp<CastOp>(); 1355 if (!cast) 1356 return failure(); 1357 1358 if (!CastOp::canFoldIntoConsumerOp(cast)) 1359 return failure(); 1360 1361 Type newResultType = computeReshapeCollapsedType( 1362 cast.getOperand().getType().cast<MemRefType>(), 1363 op.getReassociationMaps()); 1364 1365 if (newResultType == op.getResultType()) { 1366 rewriter.updateRootInPlace( 1367 op, [&]() { op.srcMutable().assign(cast.source()); }); 1368 } else { 1369 Value newOp = rewriter.create<CollapseShapeOp>( 1370 op->getLoc(), cast.source(), op.getReassociationIndices()); 1371 rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp); 1372 } 1373 return success(); 1374 } 1375 }; 1376 1377 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results, 1378 MLIRContext *context) { 1379 results.add<CollapseReshapeOps<CollapseShapeOp>, 1380 CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>, 1381 CollapseShapeOpMemRefCastFolder>(context); 1382 } 1383 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) { 1384 return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands); 1385 } 1386 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) { 1387 return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands); 1388 } 1389 1390 //===----------------------------------------------------------------------===// 1391 // ReshapeOp 1392 //===----------------------------------------------------------------------===// 1393 1394 static LogicalResult verify(ReshapeOp op) { 1395 Type operandType = op.source().getType(); 1396 Type resultType = op.result().getType(); 1397 1398 Type operandElementType = operandType.cast<ShapedType>().getElementType(); 1399 Type resultElementType = resultType.cast<ShapedType>().getElementType(); 1400 if (operandElementType != resultElementType) 1401 return op.emitOpError("element types of source and destination memref " 1402 "types should be the same"); 1403 1404 if (auto operandMemRefType = operandType.dyn_cast<MemRefType>()) 1405 if (!operandMemRefType.getLayout().isIdentity()) 1406 return op.emitOpError( 1407 "source memref type should have identity affine map"); 1408 1409 int64_t shapeSize = op.shape().getType().cast<MemRefType>().getDimSize(0); 1410 auto resultMemRefType = resultType.dyn_cast<MemRefType>(); 1411 if (resultMemRefType) { 1412 if (!resultMemRefType.getLayout().isIdentity()) 1413 return op.emitOpError( 1414 "result memref type should have identity affine map"); 1415 if (shapeSize == ShapedType::kDynamicSize) 1416 return op.emitOpError("cannot use shape operand with dynamic length to " 1417 "reshape to statically-ranked memref type"); 1418 if (shapeSize != resultMemRefType.getRank()) 1419 return op.emitOpError( 1420 "length of shape operand differs from the result's memref rank"); 1421 } 1422 return success(); 1423 } 1424 1425 //===----------------------------------------------------------------------===// 1426 // StoreOp 1427 //===----------------------------------------------------------------------===// 1428 1429 static LogicalResult verify(StoreOp op) { 1430 if (op.getNumOperands() != 2 + op.getMemRefType().getRank()) 1431 return op.emitOpError("store index operand count not equal to memref rank"); 1432 1433 return success(); 1434 } 1435 1436 LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands, 1437 SmallVectorImpl<OpFoldResult> &results) { 1438 /// store(memrefcast) -> store 1439 return foldMemRefCast(*this, getValueToStore()); 1440 } 1441 1442 //===----------------------------------------------------------------------===// 1443 // SubViewOp 1444 //===----------------------------------------------------------------------===// 1445 1446 namespace { 1447 /// Helpers to write more idiomatic operations. 1448 namespace saturated_arith { 1449 struct Wrapper { 1450 explicit Wrapper(int64_t v) : v(v) {} 1451 operator int64_t() { return v; } 1452 int64_t v; 1453 }; 1454 Wrapper operator+(Wrapper a, int64_t b) { 1455 if (ShapedType::isDynamicStrideOrOffset(a) || 1456 ShapedType::isDynamicStrideOrOffset(b)) 1457 return Wrapper(ShapedType::kDynamicStrideOrOffset); 1458 return Wrapper(a.v + b); 1459 } 1460 Wrapper operator*(Wrapper a, int64_t b) { 1461 if (ShapedType::isDynamicStrideOrOffset(a) || 1462 ShapedType::isDynamicStrideOrOffset(b)) 1463 return Wrapper(ShapedType::kDynamicStrideOrOffset); 1464 return Wrapper(a.v * b); 1465 } 1466 } // end namespace saturated_arith 1467 } // end namespace 1468 1469 /// A subview result type can be fully inferred from the source type and the 1470 /// static representation of offsets, sizes and strides. Special sentinels 1471 /// encode the dynamic case. 1472 Type SubViewOp::inferResultType(MemRefType sourceMemRefType, 1473 ArrayRef<int64_t> leadingStaticOffsets, 1474 ArrayRef<int64_t> leadingStaticSizes, 1475 ArrayRef<int64_t> leadingStaticStrides) { 1476 // A subview may specify only a leading subset of offset/sizes/strides in 1477 // which case we complete with offset=0, sizes from memref type and strides=1. 1478 unsigned rank = sourceMemRefType.getRank(); 1479 assert(leadingStaticOffsets.size() <= rank && 1480 "unexpected leadingStaticOffsets overflow"); 1481 assert(leadingStaticSizes.size() <= rank && 1482 "unexpected leadingStaticSizes overflow"); 1483 assert(leadingStaticStrides.size() <= rank && 1484 "unexpected leadingStaticStrides overflow"); 1485 auto staticOffsets = llvm::to_vector<4>(leadingStaticOffsets); 1486 auto staticSizes = llvm::to_vector<4>(leadingStaticSizes); 1487 auto staticStrides = llvm::to_vector<4>(leadingStaticStrides); 1488 unsigned numTrailingOffsets = rank - staticOffsets.size(); 1489 unsigned numTrailingSizes = rank - staticSizes.size(); 1490 unsigned numTrailingStrides = rank - staticStrides.size(); 1491 staticOffsets.append(numTrailingOffsets, 0); 1492 llvm::append_range(staticSizes, 1493 sourceMemRefType.getShape().take_back(numTrailingSizes)); 1494 staticStrides.append(numTrailingStrides, 1); 1495 1496 // Extract source offset and strides. 1497 int64_t sourceOffset; 1498 SmallVector<int64_t, 4> sourceStrides; 1499 auto res = getStridesAndOffset(sourceMemRefType, sourceStrides, sourceOffset); 1500 assert(succeeded(res) && "SubViewOp expected strided memref type"); 1501 (void)res; 1502 1503 // Compute target offset whose value is: 1504 // `sourceOffset + sum_i(staticOffset_i * sourceStrides_i)`. 1505 int64_t targetOffset = sourceOffset; 1506 for (auto it : llvm::zip(staticOffsets, sourceStrides)) { 1507 auto staticOffset = std::get<0>(it), targetStride = std::get<1>(it); 1508 using namespace saturated_arith; 1509 targetOffset = Wrapper(targetOffset) + Wrapper(staticOffset) * targetStride; 1510 } 1511 1512 // Compute target stride whose value is: 1513 // `sourceStrides_i * staticStrides_i`. 1514 SmallVector<int64_t, 4> targetStrides; 1515 targetStrides.reserve(staticOffsets.size()); 1516 for (auto it : llvm::zip(sourceStrides, staticStrides)) { 1517 auto sourceStride = std::get<0>(it), staticStride = std::get<1>(it); 1518 using namespace saturated_arith; 1519 targetStrides.push_back(Wrapper(sourceStride) * staticStride); 1520 } 1521 1522 // The type is now known. 1523 return MemRefType::get( 1524 staticSizes, sourceMemRefType.getElementType(), 1525 makeStridedLinearLayoutMap(targetStrides, targetOffset, 1526 sourceMemRefType.getContext()), 1527 sourceMemRefType.getMemorySpace()); 1528 } 1529 1530 Type SubViewOp::inferResultType(MemRefType sourceMemRefType, 1531 ArrayRef<OpFoldResult> leadingStaticOffsets, 1532 ArrayRef<OpFoldResult> leadingStaticSizes, 1533 ArrayRef<OpFoldResult> leadingStaticStrides) { 1534 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1535 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1536 dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets, 1537 staticOffsets, ShapedType::kDynamicStrideOrOffset); 1538 dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes, 1539 ShapedType::kDynamicSize); 1540 dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides, 1541 staticStrides, ShapedType::kDynamicStrideOrOffset); 1542 return SubViewOp::inferResultType(sourceMemRefType, staticOffsets, 1543 staticSizes, staticStrides) 1544 .cast<MemRefType>(); 1545 } 1546 1547 Type SubViewOp::inferRankReducedResultType( 1548 unsigned resultRank, MemRefType sourceRankedTensorType, 1549 ArrayRef<int64_t> leadingStaticOffsets, 1550 ArrayRef<int64_t> leadingStaticSizes, 1551 ArrayRef<int64_t> leadingStaticStrides) { 1552 auto inferredType = 1553 inferResultType(sourceRankedTensorType, leadingStaticOffsets, 1554 leadingStaticSizes, leadingStaticStrides) 1555 .cast<MemRefType>(); 1556 assert(inferredType.getRank() >= resultRank && "expected "); 1557 int rankDiff = inferredType.getRank() - resultRank; 1558 if (rankDiff > 0) { 1559 auto shape = inferredType.getShape(); 1560 llvm::SmallDenseSet<unsigned> dimsToProject; 1561 mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject); 1562 SmallVector<int64_t> projectedShape; 1563 for (unsigned pos = 0, e = shape.size(); pos < e; ++pos) 1564 if (!dimsToProject.contains(pos)) 1565 projectedShape.push_back(shape[pos]); 1566 1567 AffineMap map = inferredType.getLayout().getAffineMap(); 1568 if (!map.isIdentity()) 1569 map = getProjectedMap(map, dimsToProject); 1570 inferredType = 1571 MemRefType::get(projectedShape, inferredType.getElementType(), map, 1572 inferredType.getMemorySpace()); 1573 } 1574 return inferredType; 1575 } 1576 1577 Type SubViewOp::inferRankReducedResultType( 1578 unsigned resultRank, MemRefType sourceRankedTensorType, 1579 ArrayRef<OpFoldResult> leadingStaticOffsets, 1580 ArrayRef<OpFoldResult> leadingStaticSizes, 1581 ArrayRef<OpFoldResult> leadingStaticStrides) { 1582 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1583 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1584 dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets, 1585 staticOffsets, ShapedType::kDynamicStrideOrOffset); 1586 dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes, 1587 ShapedType::kDynamicSize); 1588 dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides, 1589 staticStrides, ShapedType::kDynamicStrideOrOffset); 1590 return SubViewOp::inferRankReducedResultType( 1591 resultRank, sourceRankedTensorType, staticOffsets, staticSizes, 1592 staticStrides); 1593 } 1594 // Build a SubViewOp with mixed static and dynamic entries and custom result 1595 // type. If the type passed is nullptr, it is inferred. 1596 void SubViewOp::build(OpBuilder &b, OperationState &result, 1597 MemRefType resultType, Value source, 1598 ArrayRef<OpFoldResult> offsets, 1599 ArrayRef<OpFoldResult> sizes, 1600 ArrayRef<OpFoldResult> strides, 1601 ArrayRef<NamedAttribute> attrs) { 1602 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1603 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1604 dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, 1605 ShapedType::kDynamicStrideOrOffset); 1606 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, 1607 ShapedType::kDynamicSize); 1608 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, 1609 ShapedType::kDynamicStrideOrOffset); 1610 auto sourceMemRefType = source.getType().cast<MemRefType>(); 1611 // Structuring implementation this way avoids duplication between builders. 1612 if (!resultType) { 1613 resultType = SubViewOp::inferResultType(sourceMemRefType, staticOffsets, 1614 staticSizes, staticStrides) 1615 .cast<MemRefType>(); 1616 } 1617 build(b, result, resultType, source, dynamicOffsets, dynamicSizes, 1618 dynamicStrides, b.getI64ArrayAttr(staticOffsets), 1619 b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); 1620 result.addAttributes(attrs); 1621 } 1622 1623 // Build a SubViewOp with mixed static and dynamic entries and inferred result 1624 // type. 1625 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source, 1626 ArrayRef<OpFoldResult> offsets, 1627 ArrayRef<OpFoldResult> sizes, 1628 ArrayRef<OpFoldResult> strides, 1629 ArrayRef<NamedAttribute> attrs) { 1630 build(b, result, MemRefType(), source, offsets, sizes, strides, attrs); 1631 } 1632 1633 // Build a SubViewOp with static entries and inferred result type. 1634 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source, 1635 ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes, 1636 ArrayRef<int64_t> strides, 1637 ArrayRef<NamedAttribute> attrs) { 1638 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>( 1639 llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult { 1640 return b.getI64IntegerAttr(v); 1641 })); 1642 SmallVector<OpFoldResult> sizeValues = 1643 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { 1644 return b.getI64IntegerAttr(v); 1645 })); 1646 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1647 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { 1648 return b.getI64IntegerAttr(v); 1649 })); 1650 build(b, result, source, offsetValues, sizeValues, strideValues, attrs); 1651 } 1652 1653 // Build a SubViewOp with dynamic entries and custom result type. If the 1654 // type passed is nullptr, it is inferred. 1655 void SubViewOp::build(OpBuilder &b, OperationState &result, 1656 MemRefType resultType, Value source, 1657 ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes, 1658 ArrayRef<int64_t> strides, 1659 ArrayRef<NamedAttribute> attrs) { 1660 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>( 1661 llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult { 1662 return b.getI64IntegerAttr(v); 1663 })); 1664 SmallVector<OpFoldResult> sizeValues = 1665 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { 1666 return b.getI64IntegerAttr(v); 1667 })); 1668 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1669 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { 1670 return b.getI64IntegerAttr(v); 1671 })); 1672 build(b, result, resultType, source, offsetValues, sizeValues, strideValues, 1673 attrs); 1674 } 1675 1676 // Build a SubViewOp with dynamic entries and custom result type. If the type 1677 // passed is nullptr, it is inferred. 1678 void SubViewOp::build(OpBuilder &b, OperationState &result, 1679 MemRefType resultType, Value source, ValueRange offsets, 1680 ValueRange sizes, ValueRange strides, 1681 ArrayRef<NamedAttribute> attrs) { 1682 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>( 1683 llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; })); 1684 SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>( 1685 llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); 1686 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1687 llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); 1688 build(b, result, resultType, source, offsetValues, sizeValues, strideValues); 1689 } 1690 1691 // Build a SubViewOp with dynamic entries and inferred result type. 1692 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source, 1693 ValueRange offsets, ValueRange sizes, ValueRange strides, 1694 ArrayRef<NamedAttribute> attrs) { 1695 build(b, result, MemRefType(), source, offsets, sizes, strides, attrs); 1696 } 1697 1698 /// For ViewLikeOpInterface. 1699 Value SubViewOp::getViewSource() { return source(); } 1700 1701 enum SubViewVerificationResult { 1702 Success, 1703 RankTooLarge, 1704 SizeMismatch, 1705 ElemTypeMismatch, 1706 MemSpaceMismatch, 1707 AffineMapMismatch 1708 }; 1709 1710 /// Checks if `original` Type type can be rank reduced to `reduced` type. 1711 /// This function is slight variant of `is subsequence` algorithm where 1712 /// not matching dimension must be 1. 1713 static SubViewVerificationResult 1714 isRankReducedType(Type originalType, Type candidateReducedType, 1715 ArrayAttr staticSizes, std::string *errMsg = nullptr) { 1716 if (originalType == candidateReducedType) 1717 return SubViewVerificationResult::Success; 1718 if (!originalType.isa<MemRefType>()) 1719 return SubViewVerificationResult::Success; 1720 if (originalType.isa<MemRefType>() && !candidateReducedType.isa<MemRefType>()) 1721 return SubViewVerificationResult::Success; 1722 1723 ShapedType originalShapedType = originalType.cast<ShapedType>(); 1724 ShapedType candidateReducedShapedType = 1725 candidateReducedType.cast<ShapedType>(); 1726 1727 // Rank and size logic is valid for all ShapedTypes. 1728 ArrayRef<int64_t> originalShape = originalShapedType.getShape(); 1729 ArrayRef<int64_t> candidateReducedShape = 1730 candidateReducedShapedType.getShape(); 1731 unsigned originalRank = originalShape.size(), 1732 candidateReducedRank = candidateReducedShape.size(); 1733 if (candidateReducedRank > originalRank) 1734 return SubViewVerificationResult::RankTooLarge; 1735 1736 MemRefType original = originalType.cast<MemRefType>(); 1737 MemRefType candidateReduced = candidateReducedType.cast<MemRefType>(); 1738 1739 auto optionalUnusedDimsMask = 1740 computeMemRefRankReductionMask(original, candidateReduced, staticSizes); 1741 1742 // Sizes cannot be matched in case empty vector is returned. 1743 if (!optionalUnusedDimsMask.hasValue()) 1744 return SubViewVerificationResult::SizeMismatch; 1745 1746 if (originalShapedType.getElementType() != 1747 candidateReducedShapedType.getElementType()) 1748 return SubViewVerificationResult::ElemTypeMismatch; 1749 1750 // Strided layout logic is relevant for MemRefType only. 1751 if (original.getMemorySpace() != candidateReduced.getMemorySpace()) 1752 return SubViewVerificationResult::MemSpaceMismatch; 1753 return SubViewVerificationResult::Success; 1754 } 1755 1756 template <typename OpTy> 1757 static LogicalResult produceSubViewErrorMsg(SubViewVerificationResult result, 1758 OpTy op, Type expectedType, 1759 StringRef errMsg = "") { 1760 auto memrefType = expectedType.cast<ShapedType>(); 1761 switch (result) { 1762 case SubViewVerificationResult::Success: 1763 return success(); 1764 case SubViewVerificationResult::RankTooLarge: 1765 return op.emitError("expected result rank to be smaller or equal to ") 1766 << "the source rank. " << errMsg; 1767 case SubViewVerificationResult::SizeMismatch: 1768 return op.emitError("expected result type to be ") 1769 << expectedType 1770 << " or a rank-reduced version. (mismatch of result sizes) " 1771 << errMsg; 1772 case SubViewVerificationResult::ElemTypeMismatch: 1773 return op.emitError("expected result element type to be ") 1774 << memrefType.getElementType() << errMsg; 1775 case SubViewVerificationResult::MemSpaceMismatch: 1776 return op.emitError("expected result and source memory spaces to match.") 1777 << errMsg; 1778 case SubViewVerificationResult::AffineMapMismatch: 1779 return op.emitError("expected result type to be ") 1780 << expectedType 1781 << " or a rank-reduced version. (mismatch of result affine map) " 1782 << errMsg; 1783 } 1784 llvm_unreachable("unexpected subview verification result"); 1785 } 1786 1787 /// Verifier for SubViewOp. 1788 static LogicalResult verify(SubViewOp op) { 1789 MemRefType baseType = op.getSourceType(); 1790 MemRefType subViewType = op.getType(); 1791 1792 // The base memref and the view memref should be in the same memory space. 1793 if (baseType.getMemorySpace() != subViewType.getMemorySpace()) 1794 return op.emitError("different memory spaces specified for base memref " 1795 "type ") 1796 << baseType << " and subview memref type " << subViewType; 1797 1798 // Verify that the base memref type has a strided layout map. 1799 if (!isStrided(baseType)) 1800 return op.emitError("base type ") << baseType << " is not strided"; 1801 1802 // Verify result type against inferred type. 1803 auto expectedType = SubViewOp::inferResultType( 1804 baseType, extractFromI64ArrayAttr(op.static_offsets()), 1805 extractFromI64ArrayAttr(op.static_sizes()), 1806 extractFromI64ArrayAttr(op.static_strides())); 1807 1808 std::string errMsg; 1809 auto result = 1810 isRankReducedType(expectedType, subViewType, op.static_sizes(), &errMsg); 1811 return produceSubViewErrorMsg(result, op, expectedType, errMsg); 1812 } 1813 1814 raw_ostream &mlir::operator<<(raw_ostream &os, const Range &range) { 1815 return os << "range " << range.offset << ":" << range.size << ":" 1816 << range.stride; 1817 } 1818 1819 /// Return the list of Range (i.e. offset, size, stride). Each Range 1820 /// entry contains either the dynamic value or a ConstantIndexOp constructed 1821 /// with `b` at location `loc`. 1822 SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op, 1823 OpBuilder &b, Location loc) { 1824 std::array<unsigned, 3> ranks = op.getArrayAttrMaxRanks(); 1825 assert(ranks[0] == ranks[1] && "expected offset and sizes of equal ranks"); 1826 assert(ranks[1] == ranks[2] && "expected sizes and strides of equal ranks"); 1827 SmallVector<Range, 8> res; 1828 unsigned rank = ranks[0]; 1829 res.reserve(rank); 1830 for (unsigned idx = 0; idx < rank; ++idx) { 1831 Value offset = 1832 op.isDynamicOffset(idx) 1833 ? op.getDynamicOffset(idx) 1834 : b.create<arith::ConstantIndexOp>(loc, op.getStaticOffset(idx)); 1835 Value size = 1836 op.isDynamicSize(idx) 1837 ? op.getDynamicSize(idx) 1838 : b.create<arith::ConstantIndexOp>(loc, op.getStaticSize(idx)); 1839 Value stride = 1840 op.isDynamicStride(idx) 1841 ? op.getDynamicStride(idx) 1842 : b.create<arith::ConstantIndexOp>(loc, op.getStaticStride(idx)); 1843 res.emplace_back(Range{offset, size, stride}); 1844 } 1845 return res; 1846 } 1847 1848 /// Infer the canonical type of the result of a subview operation. Returns a 1849 /// type with rank `resultRank` that is either the rank of the rank-reduced 1850 /// type, or the non-rank-reduced type. 1851 static MemRefType 1852 getCanonicalSubViewResultType(unsigned resultRank, MemRefType sourceType, 1853 ArrayRef<OpFoldResult> mixedOffsets, 1854 ArrayRef<OpFoldResult> mixedSizes, 1855 ArrayRef<OpFoldResult> mixedStrides) { 1856 auto resultType = 1857 SubViewOp::inferRankReducedResultType( 1858 resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides) 1859 .cast<MemRefType>(); 1860 if (resultType.getRank() != resultRank) { 1861 resultType = SubViewOp::inferResultType(sourceType, mixedOffsets, 1862 mixedSizes, mixedStrides) 1863 .cast<MemRefType>(); 1864 } 1865 return resultType; 1866 } 1867 1868 namespace { 1869 /// Pattern to rewrite a subview op with MemRefCast arguments. 1870 /// This essentially pushes memref.cast past its consuming subview when 1871 /// `canFoldIntoConsumerOp` is true. 1872 /// 1873 /// Example: 1874 /// ``` 1875 /// %0 = memref.cast %V : memref<16x16xf32> to memref<?x?xf32> 1876 /// %1 = memref.subview %0[0, 0][3, 4][1, 1] : 1877 /// memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]> 1878 /// ``` 1879 /// is rewritten into: 1880 /// ``` 1881 /// %0 = memref.subview %V: memref<16x16xf32> to memref<3x4xf32, #[[map0]]> 1882 /// %1 = memref.cast %0: memref<3x4xf32, offset:0, strides:[16, 1]> to 1883 /// memref<3x4xf32, offset:?, strides:[?, 1]> 1884 /// ``` 1885 class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> { 1886 public: 1887 using OpRewritePattern<SubViewOp>::OpRewritePattern; 1888 1889 LogicalResult matchAndRewrite(SubViewOp subViewOp, 1890 PatternRewriter &rewriter) const override { 1891 // Any constant operand, just return to let SubViewOpConstantFolder kick in. 1892 if (llvm::any_of(subViewOp.getOperands(), [](Value operand) { 1893 return matchPattern(operand, matchConstantIndex()); 1894 })) 1895 return failure(); 1896 1897 auto castOp = subViewOp.source().getDefiningOp<CastOp>(); 1898 if (!castOp) 1899 return failure(); 1900 1901 if (!CastOp::canFoldIntoConsumerOp(castOp)) 1902 return failure(); 1903 1904 /// Deduce the resultType of the SubViewOp using `inferSubViewResultType` on 1905 /// the cast source operand type and the SubViewOp static information. This 1906 /// is the resulting type if the MemRefCastOp were folded. 1907 auto resultType = getCanonicalSubViewResultType( 1908 subViewOp.getType().getRank(), 1909 castOp.source().getType().cast<MemRefType>(), 1910 subViewOp.getMixedOffsets(), subViewOp.getMixedSizes(), 1911 subViewOp.getMixedStrides()); 1912 Value newSubView = rewriter.create<SubViewOp>( 1913 subViewOp.getLoc(), resultType, castOp.source(), subViewOp.offsets(), 1914 subViewOp.sizes(), subViewOp.strides(), subViewOp.static_offsets(), 1915 subViewOp.static_sizes(), subViewOp.static_strides()); 1916 rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(), 1917 newSubView); 1918 return success(); 1919 } 1920 }; 1921 } // namespace 1922 1923 /// Return the canonical type of the result of a subview. 1924 struct SubViewReturnTypeCanonicalizer { 1925 MemRefType operator()(SubViewOp op, ArrayRef<OpFoldResult> mixedOffsets, 1926 ArrayRef<OpFoldResult> mixedSizes, 1927 ArrayRef<OpFoldResult> mixedStrides) { 1928 return getCanonicalSubViewResultType(op.getType().getRank(), 1929 op.getSourceType(), mixedOffsets, 1930 mixedSizes, mixedStrides); 1931 } 1932 }; 1933 1934 /// A canonicalizer wrapper to replace SubViewOps. 1935 struct SubViewCanonicalizer { 1936 void operator()(PatternRewriter &rewriter, SubViewOp op, SubViewOp newOp) { 1937 rewriter.replaceOpWithNewOp<CastOp>(op, newOp, op.getType()); 1938 } 1939 }; 1940 1941 void SubViewOp::getCanonicalizationPatterns(RewritePatternSet &results, 1942 MLIRContext *context) { 1943 results 1944 .add<OpWithOffsetSizesAndStridesConstantArgumentFolder< 1945 SubViewOp, SubViewReturnTypeCanonicalizer, SubViewCanonicalizer>, 1946 SubViewOpMemRefCastFolder>(context); 1947 } 1948 1949 OpFoldResult SubViewOp::fold(ArrayRef<Attribute> operands) { 1950 auto resultShapedType = getResult().getType().cast<ShapedType>(); 1951 auto sourceShapedType = source().getType().cast<ShapedType>(); 1952 1953 if (resultShapedType.hasStaticShape() && 1954 resultShapedType == sourceShapedType) { 1955 return getViewSource(); 1956 } 1957 1958 return {}; 1959 } 1960 1961 //===----------------------------------------------------------------------===// 1962 // TransposeOp 1963 //===----------------------------------------------------------------------===// 1964 1965 /// Build a strided memref type by applying `permutationMap` tp `memRefType`. 1966 static MemRefType inferTransposeResultType(MemRefType memRefType, 1967 AffineMap permutationMap) { 1968 auto rank = memRefType.getRank(); 1969 auto originalSizes = memRefType.getShape(); 1970 // Compute permuted sizes. 1971 SmallVector<int64_t, 4> sizes(rank, 0); 1972 for (auto en : llvm::enumerate(permutationMap.getResults())) 1973 sizes[en.index()] = 1974 originalSizes[en.value().cast<AffineDimExpr>().getPosition()]; 1975 1976 // Compute permuted strides. 1977 int64_t offset; 1978 SmallVector<int64_t, 4> strides; 1979 auto res = getStridesAndOffset(memRefType, strides, offset); 1980 assert(succeeded(res) && strides.size() == static_cast<unsigned>(rank)); 1981 (void)res; 1982 auto map = 1983 makeStridedLinearLayoutMap(strides, offset, memRefType.getContext()); 1984 map = permutationMap ? map.compose(permutationMap) : map; 1985 return MemRefType::Builder(memRefType) 1986 .setShape(sizes) 1987 .setLayout(AffineMapAttr::get(map)); 1988 } 1989 1990 void TransposeOp::build(OpBuilder &b, OperationState &result, Value in, 1991 AffineMapAttr permutation, 1992 ArrayRef<NamedAttribute> attrs) { 1993 auto permutationMap = permutation.getValue(); 1994 assert(permutationMap); 1995 1996 auto memRefType = in.getType().cast<MemRefType>(); 1997 // Compute result type. 1998 MemRefType resultType = inferTransposeResultType(memRefType, permutationMap); 1999 2000 build(b, result, resultType, in, attrs); 2001 result.addAttribute(TransposeOp::getPermutationAttrName(), permutation); 2002 } 2003 2004 // transpose $in $permutation attr-dict : type($in) `to` type(results) 2005 static void print(OpAsmPrinter &p, TransposeOp op) { 2006 p << " " << op.in() << " " << op.permutation(); 2007 p.printOptionalAttrDict(op->getAttrs(), 2008 {TransposeOp::getPermutationAttrName()}); 2009 p << " : " << op.in().getType() << " to " << op.getType(); 2010 } 2011 2012 static ParseResult parseTransposeOp(OpAsmParser &parser, 2013 OperationState &result) { 2014 OpAsmParser::OperandType in; 2015 AffineMap permutation; 2016 MemRefType srcType, dstType; 2017 if (parser.parseOperand(in) || parser.parseAffineMap(permutation) || 2018 parser.parseOptionalAttrDict(result.attributes) || 2019 parser.parseColonType(srcType) || 2020 parser.resolveOperand(in, srcType, result.operands) || 2021 parser.parseKeywordType("to", dstType) || 2022 parser.addTypeToList(dstType, result.types)) 2023 return failure(); 2024 2025 result.addAttribute(TransposeOp::getPermutationAttrName(), 2026 AffineMapAttr::get(permutation)); 2027 return success(); 2028 } 2029 2030 static LogicalResult verify(TransposeOp op) { 2031 if (!op.permutation().isPermutation()) 2032 return op.emitOpError("expected a permutation map"); 2033 if (op.permutation().getNumDims() != op.getShapedType().getRank()) 2034 return op.emitOpError( 2035 "expected a permutation map of same rank as the input"); 2036 2037 auto srcType = op.in().getType().cast<MemRefType>(); 2038 auto dstType = op.getType().cast<MemRefType>(); 2039 auto transposedType = inferTransposeResultType(srcType, op.permutation()); 2040 if (dstType != transposedType) 2041 return op.emitOpError("output type ") 2042 << dstType << " does not match transposed input type " << srcType 2043 << ", " << transposedType; 2044 return success(); 2045 } 2046 2047 OpFoldResult TransposeOp::fold(ArrayRef<Attribute>) { 2048 if (succeeded(foldMemRefCast(*this))) 2049 return getResult(); 2050 return {}; 2051 } 2052 2053 //===----------------------------------------------------------------------===// 2054 // ViewOp 2055 //===----------------------------------------------------------------------===// 2056 2057 static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) { 2058 OpAsmParser::OperandType srcInfo; 2059 SmallVector<OpAsmParser::OperandType, 1> offsetInfo; 2060 SmallVector<OpAsmParser::OperandType, 4> sizesInfo; 2061 auto indexType = parser.getBuilder().getIndexType(); 2062 Type srcType, dstType; 2063 llvm::SMLoc offsetLoc; 2064 if (parser.parseOperand(srcInfo) || parser.getCurrentLocation(&offsetLoc) || 2065 parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square)) 2066 return failure(); 2067 2068 if (offsetInfo.size() != 1) 2069 return parser.emitError(offsetLoc) << "expects 1 offset operand"; 2070 2071 return failure( 2072 parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) || 2073 parser.parseOptionalAttrDict(result.attributes) || 2074 parser.parseColonType(srcType) || 2075 parser.resolveOperand(srcInfo, srcType, result.operands) || 2076 parser.resolveOperands(offsetInfo, indexType, result.operands) || 2077 parser.resolveOperands(sizesInfo, indexType, result.operands) || 2078 parser.parseKeywordType("to", dstType) || 2079 parser.addTypeToList(dstType, result.types)); 2080 } 2081 2082 static void print(OpAsmPrinter &p, ViewOp op) { 2083 p << ' ' << op.getOperand(0) << '['; 2084 p.printOperand(op.byte_shift()); 2085 p << "][" << op.sizes() << ']'; 2086 p.printOptionalAttrDict(op->getAttrs()); 2087 p << " : " << op.getOperand(0).getType() << " to " << op.getType(); 2088 } 2089 2090 static LogicalResult verify(ViewOp op) { 2091 auto baseType = op.getOperand(0).getType().cast<MemRefType>(); 2092 auto viewType = op.getType(); 2093 2094 // The base memref should have identity layout map (or none). 2095 if (!baseType.getLayout().isIdentity()) 2096 return op.emitError("unsupported map for base memref type ") << baseType; 2097 2098 // The result memref should have identity layout map (or none). 2099 if (!viewType.getLayout().isIdentity()) 2100 return op.emitError("unsupported map for result memref type ") << viewType; 2101 2102 // The base memref and the view memref should be in the same memory space. 2103 if (baseType.getMemorySpace() != viewType.getMemorySpace()) 2104 return op.emitError("different memory spaces specified for base memref " 2105 "type ") 2106 << baseType << " and view memref type " << viewType; 2107 2108 // Verify that we have the correct number of sizes for the result type. 2109 unsigned numDynamicDims = viewType.getNumDynamicDims(); 2110 if (op.sizes().size() != numDynamicDims) 2111 return op.emitError("incorrect number of size operands for type ") 2112 << viewType; 2113 2114 return success(); 2115 } 2116 2117 Value ViewOp::getViewSource() { return source(); } 2118 2119 namespace { 2120 2121 struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> { 2122 using OpRewritePattern<ViewOp>::OpRewritePattern; 2123 2124 LogicalResult matchAndRewrite(ViewOp viewOp, 2125 PatternRewriter &rewriter) const override { 2126 // Return if none of the operands are constants. 2127 if (llvm::none_of(viewOp.getOperands(), [](Value operand) { 2128 return matchPattern(operand, matchConstantIndex()); 2129 })) 2130 return failure(); 2131 2132 // Get result memref type. 2133 auto memrefType = viewOp.getType(); 2134 2135 // Get offset from old memref view type 'memRefType'. 2136 int64_t oldOffset; 2137 SmallVector<int64_t, 4> oldStrides; 2138 if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset))) 2139 return failure(); 2140 assert(oldOffset == 0 && "Expected 0 offset"); 2141 2142 SmallVector<Value, 4> newOperands; 2143 2144 // Offset cannot be folded into result type. 2145 2146 // Fold any dynamic dim operands which are produced by a constant. 2147 SmallVector<int64_t, 4> newShapeConstants; 2148 newShapeConstants.reserve(memrefType.getRank()); 2149 2150 unsigned dynamicDimPos = 0; 2151 unsigned rank = memrefType.getRank(); 2152 for (unsigned dim = 0, e = rank; dim < e; ++dim) { 2153 int64_t dimSize = memrefType.getDimSize(dim); 2154 // If this is already static dimension, keep it. 2155 if (!ShapedType::isDynamic(dimSize)) { 2156 newShapeConstants.push_back(dimSize); 2157 continue; 2158 } 2159 auto *defOp = viewOp.sizes()[dynamicDimPos].getDefiningOp(); 2160 if (auto constantIndexOp = 2161 dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) { 2162 // Dynamic shape dimension will be folded. 2163 newShapeConstants.push_back(constantIndexOp.value()); 2164 } else { 2165 // Dynamic shape dimension not folded; copy operand from old memref. 2166 newShapeConstants.push_back(dimSize); 2167 newOperands.push_back(viewOp.sizes()[dynamicDimPos]); 2168 } 2169 dynamicDimPos++; 2170 } 2171 2172 // Create new memref type with constant folded dims. 2173 MemRefType newMemRefType = 2174 MemRefType::Builder(memrefType).setShape(newShapeConstants); 2175 // Nothing new, don't fold. 2176 if (newMemRefType == memrefType) 2177 return failure(); 2178 2179 // Create new ViewOp. 2180 auto newViewOp = rewriter.create<ViewOp>(viewOp.getLoc(), newMemRefType, 2181 viewOp.getOperand(0), 2182 viewOp.byte_shift(), newOperands); 2183 // Insert a cast so we have the same type as the old memref type. 2184 rewriter.replaceOpWithNewOp<CastOp>(viewOp, newViewOp, viewOp.getType()); 2185 return success(); 2186 } 2187 }; 2188 2189 struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> { 2190 using OpRewritePattern<ViewOp>::OpRewritePattern; 2191 2192 LogicalResult matchAndRewrite(ViewOp viewOp, 2193 PatternRewriter &rewriter) const override { 2194 Value memrefOperand = viewOp.getOperand(0); 2195 CastOp memrefCastOp = memrefOperand.getDefiningOp<CastOp>(); 2196 if (!memrefCastOp) 2197 return failure(); 2198 Value allocOperand = memrefCastOp.getOperand(); 2199 AllocOp allocOp = allocOperand.getDefiningOp<AllocOp>(); 2200 if (!allocOp) 2201 return failure(); 2202 rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand, 2203 viewOp.byte_shift(), viewOp.sizes()); 2204 return success(); 2205 } 2206 }; 2207 2208 } // end anonymous namespace 2209 2210 void ViewOp::getCanonicalizationPatterns(RewritePatternSet &results, 2211 MLIRContext *context) { 2212 results.add<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context); 2213 } 2214 2215 //===----------------------------------------------------------------------===// 2216 // TableGen'd op method definitions 2217 //===----------------------------------------------------------------------===// 2218 2219 #define GET_OP_CLASSES 2220 #include "mlir/Dialect/MemRef/IR/MemRefOps.cpp.inc" 2221