1 //===----------------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 10 #include "mlir/Dialect/MemRef/IR/MemRef.h" 11 #include "mlir/Dialect/MemRef/Utils/MemRefUtils.h" 12 #include "mlir/Dialect/StandardOps/IR/Ops.h" 13 #include "mlir/Dialect/StandardOps/Utils/Utils.h" 14 #include "mlir/Dialect/Utils/StaticValueUtils.h" 15 #include "mlir/IR/AffineMap.h" 16 #include "mlir/IR/Builders.h" 17 #include "mlir/IR/BuiltinTypes.h" 18 #include "mlir/IR/Matchers.h" 19 #include "mlir/IR/PatternMatch.h" 20 #include "mlir/IR/TypeUtilities.h" 21 #include "mlir/Interfaces/InferTypeOpInterface.h" 22 #include "mlir/Interfaces/ViewLikeInterface.h" 23 #include "llvm/ADT/STLExtras.h" 24 25 using namespace mlir; 26 using namespace mlir::memref; 27 28 /// Materialize a single constant operation from a given attribute value with 29 /// the desired resultant type. 30 Operation *MemRefDialect::materializeConstant(OpBuilder &builder, 31 Attribute value, Type type, 32 Location loc) { 33 if (arith::ConstantOp::isBuildableWith(value, type)) 34 return builder.create<arith::ConstantOp>(loc, value, type); 35 if (ConstantOp::isBuildableWith(value, type)) 36 return builder.create<ConstantOp>(loc, value, type); 37 return nullptr; 38 } 39 40 //===----------------------------------------------------------------------===// 41 // Common canonicalization pattern support logic 42 //===----------------------------------------------------------------------===// 43 44 /// This is a common class used for patterns of the form 45 /// "someop(memrefcast) -> someop". It folds the source of any memref.cast 46 /// into the root operation directly. 47 LogicalResult mlir::memref::foldMemRefCast(Operation *op, Value inner) { 48 bool folded = false; 49 for (OpOperand &operand : op->getOpOperands()) { 50 auto cast = operand.get().getDefiningOp<CastOp>(); 51 if (cast && operand.get() != inner && 52 !cast.getOperand().getType().isa<UnrankedMemRefType>()) { 53 operand.set(cast.getOperand()); 54 folded = true; 55 } 56 } 57 return success(folded); 58 } 59 60 /// Return an unranked/ranked tensor type for the given unranked/ranked memref 61 /// type. 62 Type mlir::memref::getTensorTypeFromMemRefType(Type type) { 63 if (auto memref = type.dyn_cast<MemRefType>()) 64 return RankedTensorType::get(memref.getShape(), memref.getElementType()); 65 if (auto memref = type.dyn_cast<UnrankedMemRefType>()) 66 return UnrankedTensorType::get(memref.getElementType()); 67 return NoneType::get(type.getContext()); 68 } 69 70 //===----------------------------------------------------------------------===// 71 // AllocOp / AllocaOp 72 //===----------------------------------------------------------------------===// 73 74 template <typename AllocLikeOp> 75 static LogicalResult verifyAllocLikeOp(AllocLikeOp op) { 76 static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value, 77 "applies to only alloc or alloca"); 78 auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>(); 79 if (!memRefType) 80 return op.emitOpError("result must be a memref"); 81 82 if (static_cast<int64_t>(op.dynamicSizes().size()) != 83 memRefType.getNumDynamicDims()) 84 return op.emitOpError("dimension operand count does not equal memref " 85 "dynamic dimension count"); 86 87 unsigned numSymbols = 0; 88 if (!memRefType.getLayout().isIdentity()) 89 numSymbols = memRefType.getLayout().getAffineMap().getNumSymbols(); 90 if (op.symbolOperands().size() != numSymbols) 91 return op.emitOpError("symbol operand count does not equal memref symbol " 92 "count: expected ") 93 << numSymbols << ", got " << op.symbolOperands().size(); 94 95 return success(); 96 } 97 98 static LogicalResult verify(AllocOp op) { return verifyAllocLikeOp(op); } 99 100 static LogicalResult verify(AllocaOp op) { 101 // An alloca op needs to have an ancestor with an allocation scope trait. 102 if (!op->getParentWithTrait<OpTrait::AutomaticAllocationScope>()) 103 return op.emitOpError( 104 "requires an ancestor op with AutomaticAllocationScope trait"); 105 106 return verifyAllocLikeOp(op); 107 } 108 109 namespace { 110 /// Fold constant dimensions into an alloc like operation. 111 template <typename AllocLikeOp> 112 struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> { 113 using OpRewritePattern<AllocLikeOp>::OpRewritePattern; 114 115 LogicalResult matchAndRewrite(AllocLikeOp alloc, 116 PatternRewriter &rewriter) const override { 117 // Check to see if any dimensions operands are constants. If so, we can 118 // substitute and drop them. 119 if (llvm::none_of(alloc.dynamicSizes(), [](Value operand) { 120 return matchPattern(operand, matchConstantIndex()); 121 })) 122 return failure(); 123 124 auto memrefType = alloc.getType(); 125 126 // Ok, we have one or more constant operands. Collect the non-constant ones 127 // and keep track of the resultant memref type to build. 128 SmallVector<int64_t, 4> newShapeConstants; 129 newShapeConstants.reserve(memrefType.getRank()); 130 SmallVector<Value, 4> dynamicSizes; 131 132 unsigned dynamicDimPos = 0; 133 for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) { 134 int64_t dimSize = memrefType.getDimSize(dim); 135 // If this is already static dimension, keep it. 136 if (dimSize != -1) { 137 newShapeConstants.push_back(dimSize); 138 continue; 139 } 140 auto dynamicSize = alloc.dynamicSizes()[dynamicDimPos]; 141 auto *defOp = dynamicSize.getDefiningOp(); 142 if (auto constantIndexOp = 143 dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) { 144 // Dynamic shape dimension will be folded. 145 newShapeConstants.push_back(constantIndexOp.value()); 146 } else { 147 // Dynamic shape dimension not folded; copy dynamicSize from old memref. 148 newShapeConstants.push_back(-1); 149 dynamicSizes.push_back(dynamicSize); 150 } 151 dynamicDimPos++; 152 } 153 154 // Create new memref type (which will have fewer dynamic dimensions). 155 MemRefType newMemRefType = 156 MemRefType::Builder(memrefType).setShape(newShapeConstants); 157 assert(static_cast<int64_t>(dynamicSizes.size()) == 158 newMemRefType.getNumDynamicDims()); 159 160 // Create and insert the alloc op for the new memref. 161 auto newAlloc = rewriter.create<AllocLikeOp>( 162 alloc.getLoc(), newMemRefType, dynamicSizes, alloc.symbolOperands(), 163 alloc.alignmentAttr()); 164 // Insert a cast so we have the same type as the old alloc. 165 auto resultCast = 166 rewriter.create<CastOp>(alloc.getLoc(), newAlloc, alloc.getType()); 167 168 rewriter.replaceOp(alloc, {resultCast}); 169 return success(); 170 } 171 }; 172 173 /// Fold alloc operations with no users or only store and dealloc uses. 174 template <typename T> 175 struct SimplifyDeadAlloc : public OpRewritePattern<T> { 176 using OpRewritePattern<T>::OpRewritePattern; 177 178 LogicalResult matchAndRewrite(T alloc, 179 PatternRewriter &rewriter) const override { 180 if (llvm::any_of(alloc->getUsers(), [&](Operation *op) { 181 if (auto storeOp = dyn_cast<StoreOp>(op)) 182 return storeOp.value() == alloc; 183 return !isa<DeallocOp>(op); 184 })) 185 return failure(); 186 187 for (Operation *user : llvm::make_early_inc_range(alloc->getUsers())) 188 rewriter.eraseOp(user); 189 190 rewriter.eraseOp(alloc); 191 return success(); 192 } 193 }; 194 } // namespace 195 196 void AllocOp::getCanonicalizationPatterns(RewritePatternSet &results, 197 MLIRContext *context) { 198 results.add<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc<AllocOp>>(context); 199 } 200 201 void AllocaOp::getCanonicalizationPatterns(RewritePatternSet &results, 202 MLIRContext *context) { 203 results.add<SimplifyAllocConst<AllocaOp>, SimplifyDeadAlloc<AllocaOp>>( 204 context); 205 } 206 207 //===----------------------------------------------------------------------===// 208 // AllocaScopeOp 209 //===----------------------------------------------------------------------===// 210 211 static void print(OpAsmPrinter &p, AllocaScopeOp &op) { 212 bool printBlockTerminators = false; 213 214 p << ' '; 215 if (!op.results().empty()) { 216 p << " -> (" << op.getResultTypes() << ")"; 217 printBlockTerminators = true; 218 } 219 p << ' '; 220 p.printRegion(op.bodyRegion(), 221 /*printEntryBlockArgs=*/false, 222 /*printBlockTerminators=*/printBlockTerminators); 223 p.printOptionalAttrDict(op->getAttrs()); 224 } 225 226 static ParseResult parseAllocaScopeOp(OpAsmParser &parser, 227 OperationState &result) { 228 // Create a region for the body. 229 result.regions.reserve(1); 230 Region *bodyRegion = result.addRegion(); 231 232 // Parse optional results type list. 233 if (parser.parseOptionalArrowTypeList(result.types)) 234 return failure(); 235 236 // Parse the body region. 237 if (parser.parseRegion(*bodyRegion, /*arguments=*/{}, /*argTypes=*/{})) 238 return failure(); 239 AllocaScopeOp::ensureTerminator(*bodyRegion, parser.getBuilder(), 240 result.location); 241 242 // Parse the optional attribute list. 243 if (parser.parseOptionalAttrDict(result.attributes)) 244 return failure(); 245 246 return success(); 247 } 248 249 static LogicalResult verify(AllocaScopeOp op) { 250 if (failed(RegionBranchOpInterface::verifyTypes(op))) 251 return failure(); 252 253 return success(); 254 } 255 256 void AllocaScopeOp::getSuccessorRegions( 257 Optional<unsigned> index, ArrayRef<Attribute> operands, 258 SmallVectorImpl<RegionSuccessor> ®ions) { 259 if (index.hasValue()) { 260 regions.push_back(RegionSuccessor(getResults())); 261 return; 262 } 263 264 regions.push_back(RegionSuccessor(&bodyRegion())); 265 } 266 267 //===----------------------------------------------------------------------===// 268 // AssumeAlignmentOp 269 //===----------------------------------------------------------------------===// 270 271 static LogicalResult verify(AssumeAlignmentOp op) { 272 unsigned alignment = op.alignment(); 273 if (!llvm::isPowerOf2_32(alignment)) 274 return op.emitOpError("alignment must be power of 2"); 275 return success(); 276 } 277 278 //===----------------------------------------------------------------------===// 279 // CastOp 280 //===----------------------------------------------------------------------===// 281 282 /// Determines whether MemRef_CastOp casts to a more dynamic version of the 283 /// source memref. This is useful to to fold a memref.cast into a consuming op 284 /// and implement canonicalization patterns for ops in different dialects that 285 /// may consume the results of memref.cast operations. Such foldable memref.cast 286 /// operations are typically inserted as `view` and `subview` ops are 287 /// canonicalized, to preserve the type compatibility of their uses. 288 /// 289 /// Returns true when all conditions are met: 290 /// 1. source and result are ranked memrefs with strided semantics and same 291 /// element type and rank. 292 /// 2. each of the source's size, offset or stride has more static information 293 /// than the corresponding result's size, offset or stride. 294 /// 295 /// Example 1: 296 /// ```mlir 297 /// %1 = memref.cast %0 : memref<8x16xf32> to memref<?x?xf32> 298 /// %2 = consumer %1 ... : memref<?x?xf32> ... 299 /// ``` 300 /// 301 /// may fold into: 302 /// 303 /// ```mlir 304 /// %2 = consumer %0 ... : memref<8x16xf32> ... 305 /// ``` 306 /// 307 /// Example 2: 308 /// ``` 309 /// %1 = memref.cast %0 : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>> 310 /// to memref<?x?xf32> 311 /// consumer %1 : memref<?x?xf32> ... 312 /// ``` 313 /// 314 /// may fold into: 315 /// 316 /// ``` 317 /// consumer %0 ... : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>> 318 /// ``` 319 bool CastOp::canFoldIntoConsumerOp(CastOp castOp) { 320 MemRefType sourceType = castOp.source().getType().dyn_cast<MemRefType>(); 321 MemRefType resultType = castOp.getType().dyn_cast<MemRefType>(); 322 323 // Requires ranked MemRefType. 324 if (!sourceType || !resultType) 325 return false; 326 327 // Requires same elemental type. 328 if (sourceType.getElementType() != resultType.getElementType()) 329 return false; 330 331 // Requires same rank. 332 if (sourceType.getRank() != resultType.getRank()) 333 return false; 334 335 // Only fold casts between strided memref forms. 336 int64_t sourceOffset, resultOffset; 337 SmallVector<int64_t, 4> sourceStrides, resultStrides; 338 if (failed(getStridesAndOffset(sourceType, sourceStrides, sourceOffset)) || 339 failed(getStridesAndOffset(resultType, resultStrides, resultOffset))) 340 return false; 341 342 // If cast is towards more static sizes along any dimension, don't fold. 343 for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) { 344 auto ss = std::get<0>(it), st = std::get<1>(it); 345 if (ss != st) 346 if (ShapedType::isDynamic(ss) && !ShapedType::isDynamic(st)) 347 return false; 348 } 349 350 // If cast is towards more static offset along any dimension, don't fold. 351 if (sourceOffset != resultOffset) 352 if (ShapedType::isDynamicStrideOrOffset(sourceOffset) && 353 !ShapedType::isDynamicStrideOrOffset(resultOffset)) 354 return false; 355 356 // If cast is towards more static strides along any dimension, don't fold. 357 for (auto it : llvm::zip(sourceStrides, resultStrides)) { 358 auto ss = std::get<0>(it), st = std::get<1>(it); 359 if (ss != st) 360 if (ShapedType::isDynamicStrideOrOffset(ss) && 361 !ShapedType::isDynamicStrideOrOffset(st)) 362 return false; 363 } 364 365 return true; 366 } 367 368 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) { 369 if (inputs.size() != 1 || outputs.size() != 1) 370 return false; 371 Type a = inputs.front(), b = outputs.front(); 372 auto aT = a.dyn_cast<MemRefType>(); 373 auto bT = b.dyn_cast<MemRefType>(); 374 375 auto uaT = a.dyn_cast<UnrankedMemRefType>(); 376 auto ubT = b.dyn_cast<UnrankedMemRefType>(); 377 378 if (aT && bT) { 379 if (aT.getElementType() != bT.getElementType()) 380 return false; 381 if (aT.getLayout() != bT.getLayout()) { 382 int64_t aOffset, bOffset; 383 SmallVector<int64_t, 4> aStrides, bStrides; 384 if (failed(getStridesAndOffset(aT, aStrides, aOffset)) || 385 failed(getStridesAndOffset(bT, bStrides, bOffset)) || 386 aStrides.size() != bStrides.size()) 387 return false; 388 389 // Strides along a dimension/offset are compatible if the value in the 390 // source memref is static and the value in the target memref is the 391 // same. They are also compatible if either one is dynamic (see 392 // description of MemRefCastOp for details). 393 auto checkCompatible = [](int64_t a, int64_t b) { 394 return (a == MemRefType::getDynamicStrideOrOffset() || 395 b == MemRefType::getDynamicStrideOrOffset() || a == b); 396 }; 397 if (!checkCompatible(aOffset, bOffset)) 398 return false; 399 for (const auto &aStride : enumerate(aStrides)) 400 if (!checkCompatible(aStride.value(), bStrides[aStride.index()])) 401 return false; 402 } 403 if (aT.getMemorySpace() != bT.getMemorySpace()) 404 return false; 405 406 // They must have the same rank, and any specified dimensions must match. 407 if (aT.getRank() != bT.getRank()) 408 return false; 409 410 for (unsigned i = 0, e = aT.getRank(); i != e; ++i) { 411 int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i); 412 if (aDim != -1 && bDim != -1 && aDim != bDim) 413 return false; 414 } 415 return true; 416 } else { 417 if (!aT && !uaT) 418 return false; 419 if (!bT && !ubT) 420 return false; 421 // Unranked to unranked casting is unsupported 422 if (uaT && ubT) 423 return false; 424 425 auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType(); 426 auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType(); 427 if (aEltType != bEltType) 428 return false; 429 430 auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace(); 431 auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace(); 432 return aMemSpace == bMemSpace; 433 } 434 435 return false; 436 } 437 438 OpFoldResult CastOp::fold(ArrayRef<Attribute> operands) { 439 return succeeded(foldMemRefCast(*this)) ? getResult() : Value(); 440 } 441 442 //===----------------------------------------------------------------------===// 443 // CopyOp 444 //===----------------------------------------------------------------------===// 445 446 namespace { 447 /// If the source/target of a CopyOp is a CastOp that does not modify the shape 448 /// and element type, the cast can be skipped. Such CastOps only cast the layout 449 /// of the type. 450 struct FoldCopyOfCast : public OpRewritePattern<CopyOp> { 451 using OpRewritePattern<CopyOp>::OpRewritePattern; 452 453 LogicalResult matchAndRewrite(CopyOp copyOp, 454 PatternRewriter &rewriter) const override { 455 bool modified = false; 456 457 // Check source. 458 if (auto castOp = copyOp.source().getDefiningOp<CastOp>()) { 459 auto fromType = castOp.source().getType().dyn_cast<MemRefType>(); 460 auto toType = castOp.source().getType().dyn_cast<MemRefType>(); 461 462 if (fromType && toType) { 463 if (fromType.getShape() == toType.getShape() && 464 fromType.getElementType() == toType.getElementType()) { 465 rewriter.updateRootInPlace( 466 copyOp, [&] { copyOp.sourceMutable().assign(castOp.source()); }); 467 modified = true; 468 } 469 } 470 } 471 472 // Check target. 473 if (auto castOp = copyOp.target().getDefiningOp<CastOp>()) { 474 auto fromType = castOp.source().getType().dyn_cast<MemRefType>(); 475 auto toType = castOp.source().getType().dyn_cast<MemRefType>(); 476 477 if (fromType && toType) { 478 if (fromType.getShape() == toType.getShape() && 479 fromType.getElementType() == toType.getElementType()) { 480 rewriter.updateRootInPlace( 481 copyOp, [&] { copyOp.targetMutable().assign(castOp.source()); }); 482 modified = true; 483 } 484 } 485 } 486 487 return success(modified); 488 } 489 }; 490 491 /// Fold memref.copy(%x, %x). 492 struct FoldSelfCopy : public OpRewritePattern<CopyOp> { 493 using OpRewritePattern<CopyOp>::OpRewritePattern; 494 495 LogicalResult matchAndRewrite(CopyOp copyOp, 496 PatternRewriter &rewriter) const override { 497 if (copyOp.source() != copyOp.target()) 498 return failure(); 499 500 rewriter.eraseOp(copyOp); 501 return success(); 502 } 503 }; 504 } // namespace 505 506 void CopyOp::getCanonicalizationPatterns(RewritePatternSet &results, 507 MLIRContext *context) { 508 results.add<FoldCopyOfCast, FoldSelfCopy>(context); 509 } 510 511 //===----------------------------------------------------------------------===// 512 // DeallocOp 513 //===----------------------------------------------------------------------===// 514 515 LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands, 516 SmallVectorImpl<OpFoldResult> &results) { 517 /// dealloc(memrefcast) -> dealloc 518 return foldMemRefCast(*this); 519 } 520 521 //===----------------------------------------------------------------------===// 522 // DimOp 523 //===----------------------------------------------------------------------===// 524 525 void DimOp::build(OpBuilder &builder, OperationState &result, Value source, 526 int64_t index) { 527 auto loc = result.location; 528 Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index); 529 build(builder, result, source, indexValue); 530 } 531 532 void DimOp::build(OpBuilder &builder, OperationState &result, Value source, 533 Value index) { 534 auto indexTy = builder.getIndexType(); 535 build(builder, result, indexTy, source, index); 536 } 537 538 Optional<int64_t> DimOp::getConstantIndex() { 539 if (auto constantOp = index().getDefiningOp<arith::ConstantOp>()) 540 return constantOp.getValue().cast<IntegerAttr>().getInt(); 541 return {}; 542 } 543 544 static LogicalResult verify(DimOp op) { 545 // Assume unknown index to be in range. 546 Optional<int64_t> index = op.getConstantIndex(); 547 if (!index.hasValue()) 548 return success(); 549 550 // Check that constant index is not knowingly out of range. 551 auto type = op.source().getType(); 552 if (auto memrefType = type.dyn_cast<MemRefType>()) { 553 if (index.getValue() >= memrefType.getRank()) 554 return op.emitOpError("index is out of range"); 555 } else if (type.isa<UnrankedMemRefType>()) { 556 // Assume index to be in range. 557 } else { 558 llvm_unreachable("expected operand with memref type"); 559 } 560 return success(); 561 } 562 563 /// Return a map with key being elements in `vals` and data being number of 564 /// occurences of it. Use std::map, since the `vals` here are strides and the 565 /// dynamic stride value is the same as the tombstone value for 566 /// `DenseMap<int64_t>`. 567 static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) { 568 std::map<int64_t, unsigned> numOccurences; 569 for (auto val : vals) 570 numOccurences[val]++; 571 return numOccurences; 572 } 573 574 /// Given the `originalType` and a `candidateReducedType` whose shape is assumed 575 /// to be a subset of `originalType` with some `1` entries erased, return the 576 /// set of indices that specifies which of the entries of `originalShape` are 577 /// dropped to obtain `reducedShape`. 578 /// This accounts for cases where there are multiple unit-dims, but only a 579 /// subset of those are dropped. For MemRefTypes these can be disambiguated 580 /// using the strides. If a dimension is dropped the stride must be dropped too. 581 static llvm::Optional<llvm::SmallDenseSet<unsigned>> 582 computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType, 583 ArrayRef<OpFoldResult> sizes) { 584 llvm::SmallDenseSet<unsigned> unusedDims; 585 if (originalType.getRank() == reducedType.getRank()) 586 return unusedDims; 587 588 for (const auto &dim : llvm::enumerate(sizes)) 589 if (auto attr = dim.value().dyn_cast<Attribute>()) 590 if (attr.cast<IntegerAttr>().getInt() == 1) 591 unusedDims.insert(dim.index()); 592 593 SmallVector<int64_t> originalStrides, candidateStrides; 594 int64_t originalOffset, candidateOffset; 595 if (failed( 596 getStridesAndOffset(originalType, originalStrides, originalOffset)) || 597 failed( 598 getStridesAndOffset(reducedType, candidateStrides, candidateOffset))) 599 return llvm::None; 600 601 // For memrefs, a dimension is truly dropped if its corresponding stride is 602 // also dropped. This is particularly important when more than one of the dims 603 // is 1. Track the number of occurences of the strides in the original type 604 // and the candidate type. For each unused dim that stride should not be 605 // present in the candidate type. Note that there could be multiple dimensions 606 // that have the same size. We dont need to exactly figure out which dim 607 // corresponds to which stride, we just need to verify that the number of 608 // reptitions of a stride in the original + number of unused dims with that 609 // stride == number of repititions of a stride in the candidate. 610 std::map<int64_t, unsigned> currUnaccountedStrides = 611 getNumOccurences(originalStrides); 612 std::map<int64_t, unsigned> candidateStridesNumOccurences = 613 getNumOccurences(candidateStrides); 614 llvm::SmallDenseSet<unsigned> prunedUnusedDims; 615 for (unsigned dim : unusedDims) { 616 int64_t originalStride = originalStrides[dim]; 617 if (currUnaccountedStrides[originalStride] > 618 candidateStridesNumOccurences[originalStride]) { 619 // This dim can be treated as dropped. 620 currUnaccountedStrides[originalStride]--; 621 continue; 622 } 623 if (currUnaccountedStrides[originalStride] == 624 candidateStridesNumOccurences[originalStride]) { 625 // The stride for this is not dropped. Keep as is. 626 prunedUnusedDims.insert(dim); 627 continue; 628 } 629 if (currUnaccountedStrides[originalStride] < 630 candidateStridesNumOccurences[originalStride]) { 631 // This should never happen. Cant have a stride in the reduced rank type 632 // that wasnt in the original one. 633 return llvm::None; 634 } 635 } 636 637 for (auto prunedDim : prunedUnusedDims) 638 unusedDims.erase(prunedDim); 639 if (unusedDims.size() + reducedType.getRank() != originalType.getRank()) 640 return llvm::None; 641 return unusedDims; 642 } 643 644 llvm::SmallDenseSet<unsigned> SubViewOp::getDroppedDims() { 645 MemRefType sourceType = getSourceType(); 646 MemRefType resultType = getType(); 647 llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims = 648 computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes()); 649 assert(unusedDims && "unable to find unused dims of subview"); 650 return *unusedDims; 651 } 652 653 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) { 654 // All forms of folding require a known index. 655 auto index = operands[1].dyn_cast_or_null<IntegerAttr>(); 656 if (!index) 657 return {}; 658 659 // Folding for unranked types (UnrankedMemRefType) is not supported. 660 auto memrefType = source().getType().dyn_cast<MemRefType>(); 661 if (!memrefType) 662 return {}; 663 664 // Fold if the shape extent along the given index is known. 665 if (!memrefType.isDynamicDim(index.getInt())) { 666 Builder builder(getContext()); 667 return builder.getIndexAttr(memrefType.getShape()[index.getInt()]); 668 } 669 670 // The size at the given index is now known to be a dynamic size. 671 unsigned unsignedIndex = index.getValue().getZExtValue(); 672 673 // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`. 674 Operation *definingOp = source().getDefiningOp(); 675 676 if (auto alloc = dyn_cast_or_null<AllocOp>(definingOp)) 677 return *(alloc.getDynamicSizes().begin() + 678 memrefType.getDynamicDimIndex(unsignedIndex)); 679 680 if (auto alloca = dyn_cast_or_null<AllocaOp>(definingOp)) 681 return *(alloca.getDynamicSizes().begin() + 682 memrefType.getDynamicDimIndex(unsignedIndex)); 683 684 if (auto view = dyn_cast_or_null<ViewOp>(definingOp)) 685 return *(view.getDynamicSizes().begin() + 686 memrefType.getDynamicDimIndex(unsignedIndex)); 687 688 if (auto subview = dyn_cast_or_null<SubViewOp>(definingOp)) { 689 llvm::SmallDenseSet<unsigned> unusedDims = subview.getDroppedDims(); 690 unsigned resultIndex = 0; 691 unsigned sourceRank = subview.getSourceType().getRank(); 692 unsigned sourceIndex = 0; 693 for (auto i : llvm::seq<unsigned>(0, sourceRank)) { 694 if (unusedDims.count(i)) 695 continue; 696 if (resultIndex == unsignedIndex) { 697 sourceIndex = i; 698 break; 699 } 700 resultIndex++; 701 } 702 assert(subview.isDynamicSize(sourceIndex) && 703 "expected dynamic subview size"); 704 return subview.getDynamicSize(sourceIndex); 705 } 706 707 if (auto sizeInterface = 708 dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) { 709 assert(sizeInterface.isDynamicSize(unsignedIndex) && 710 "Expected dynamic subview size"); 711 return sizeInterface.getDynamicSize(unsignedIndex); 712 } 713 714 // dim(memrefcast) -> dim 715 if (succeeded(foldMemRefCast(*this))) 716 return getResult(); 717 718 return {}; 719 } 720 721 namespace { 722 /// Fold dim of a memref reshape operation to a load into the reshape's shape 723 /// operand. 724 struct DimOfMemRefReshape : public OpRewritePattern<DimOp> { 725 using OpRewritePattern<DimOp>::OpRewritePattern; 726 727 LogicalResult matchAndRewrite(DimOp dim, 728 PatternRewriter &rewriter) const override { 729 auto reshape = dim.source().getDefiningOp<ReshapeOp>(); 730 731 if (!reshape) 732 return failure(); 733 734 // Place the load directly after the reshape to ensure that the shape memref 735 // was not mutated. 736 rewriter.setInsertionPointAfter(reshape); 737 Location loc = dim.getLoc(); 738 Value load = rewriter.create<LoadOp>(loc, reshape.shape(), dim.index()); 739 if (load.getType() != dim.getType()) 740 load = rewriter.create<arith::IndexCastOp>(loc, dim.getType(), load); 741 rewriter.replaceOp(dim, load); 742 return success(); 743 } 744 }; 745 746 } // namespace 747 748 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results, 749 MLIRContext *context) { 750 results.add<DimOfMemRefReshape>(context); 751 } 752 753 // --------------------------------------------------------------------------- 754 // DmaStartOp 755 // --------------------------------------------------------------------------- 756 757 void DmaStartOp::build(OpBuilder &builder, OperationState &result, 758 Value srcMemRef, ValueRange srcIndices, Value destMemRef, 759 ValueRange destIndices, Value numElements, 760 Value tagMemRef, ValueRange tagIndices, Value stride, 761 Value elementsPerStride) { 762 result.addOperands(srcMemRef); 763 result.addOperands(srcIndices); 764 result.addOperands(destMemRef); 765 result.addOperands(destIndices); 766 result.addOperands({numElements, tagMemRef}); 767 result.addOperands(tagIndices); 768 if (stride) 769 result.addOperands({stride, elementsPerStride}); 770 } 771 772 static void print(OpAsmPrinter &p, DmaStartOp op) { 773 p << " " << op.getSrcMemRef() << '[' << op.getSrcIndices() << "], " 774 << op.getDstMemRef() << '[' << op.getDstIndices() << "], " 775 << op.getNumElements() << ", " << op.getTagMemRef() << '[' 776 << op.getTagIndices() << ']'; 777 if (op.isStrided()) 778 p << ", " << op.getStride() << ", " << op.getNumElementsPerStride(); 779 780 p.printOptionalAttrDict(op->getAttrs()); 781 p << " : " << op.getSrcMemRef().getType() << ", " 782 << op.getDstMemRef().getType() << ", " << op.getTagMemRef().getType(); 783 } 784 785 // Parse DmaStartOp. 786 // Ex: 787 // %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size, 788 // %tag[%index], %stride, %num_elt_per_stride : 789 // : memref<3076 x f32, 0>, 790 // memref<1024 x f32, 2>, 791 // memref<1 x i32> 792 // 793 static ParseResult parseDmaStartOp(OpAsmParser &parser, 794 OperationState &result) { 795 OpAsmParser::OperandType srcMemRefInfo; 796 SmallVector<OpAsmParser::OperandType, 4> srcIndexInfos; 797 OpAsmParser::OperandType dstMemRefInfo; 798 SmallVector<OpAsmParser::OperandType, 4> dstIndexInfos; 799 OpAsmParser::OperandType numElementsInfo; 800 OpAsmParser::OperandType tagMemrefInfo; 801 SmallVector<OpAsmParser::OperandType, 4> tagIndexInfos; 802 SmallVector<OpAsmParser::OperandType, 2> strideInfo; 803 804 SmallVector<Type, 3> types; 805 auto indexType = parser.getBuilder().getIndexType(); 806 807 // Parse and resolve the following list of operands: 808 // *) source memref followed by its indices (in square brackets). 809 // *) destination memref followed by its indices (in square brackets). 810 // *) dma size in KiB. 811 if (parser.parseOperand(srcMemRefInfo) || 812 parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) || 813 parser.parseComma() || parser.parseOperand(dstMemRefInfo) || 814 parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) || 815 parser.parseComma() || parser.parseOperand(numElementsInfo) || 816 parser.parseComma() || parser.parseOperand(tagMemrefInfo) || 817 parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square)) 818 return failure(); 819 820 // Parse optional stride and elements per stride. 821 if (parser.parseTrailingOperandList(strideInfo)) 822 return failure(); 823 824 bool isStrided = strideInfo.size() == 2; 825 if (!strideInfo.empty() && !isStrided) { 826 return parser.emitError(parser.getNameLoc(), 827 "expected two stride related operands"); 828 } 829 830 if (parser.parseColonTypeList(types)) 831 return failure(); 832 if (types.size() != 3) 833 return parser.emitError(parser.getNameLoc(), "fewer/more types expected"); 834 835 if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) || 836 parser.resolveOperands(srcIndexInfos, indexType, result.operands) || 837 parser.resolveOperand(dstMemRefInfo, types[1], result.operands) || 838 parser.resolveOperands(dstIndexInfos, indexType, result.operands) || 839 // size should be an index. 840 parser.resolveOperand(numElementsInfo, indexType, result.operands) || 841 parser.resolveOperand(tagMemrefInfo, types[2], result.operands) || 842 // tag indices should be index. 843 parser.resolveOperands(tagIndexInfos, indexType, result.operands)) 844 return failure(); 845 846 if (isStrided) { 847 if (parser.resolveOperands(strideInfo, indexType, result.operands)) 848 return failure(); 849 } 850 851 return success(); 852 } 853 854 static LogicalResult verify(DmaStartOp op) { 855 unsigned numOperands = op.getNumOperands(); 856 857 // Mandatory non-variadic operands are: src memref, dst memref, tag memref and 858 // the number of elements. 859 if (numOperands < 4) 860 return op.emitOpError("expected at least 4 operands"); 861 862 // Check types of operands. The order of these calls is important: the later 863 // calls rely on some type properties to compute the operand position. 864 // 1. Source memref. 865 if (!op.getSrcMemRef().getType().isa<MemRefType>()) 866 return op.emitOpError("expected source to be of memref type"); 867 if (numOperands < op.getSrcMemRefRank() + 4) 868 return op.emitOpError() 869 << "expected at least " << op.getSrcMemRefRank() + 4 << " operands"; 870 if (!op.getSrcIndices().empty() && 871 !llvm::all_of(op.getSrcIndices().getTypes(), 872 [](Type t) { return t.isIndex(); })) 873 return op.emitOpError("expected source indices to be of index type"); 874 875 // 2. Destination memref. 876 if (!op.getDstMemRef().getType().isa<MemRefType>()) 877 return op.emitOpError("expected destination to be of memref type"); 878 unsigned numExpectedOperands = 879 op.getSrcMemRefRank() + op.getDstMemRefRank() + 4; 880 if (numOperands < numExpectedOperands) 881 return op.emitOpError() 882 << "expected at least " << numExpectedOperands << " operands"; 883 if (!op.getDstIndices().empty() && 884 !llvm::all_of(op.getDstIndices().getTypes(), 885 [](Type t) { return t.isIndex(); })) 886 return op.emitOpError("expected destination indices to be of index type"); 887 888 // 3. Number of elements. 889 if (!op.getNumElements().getType().isIndex()) 890 return op.emitOpError("expected num elements to be of index type"); 891 892 // 4. Tag memref. 893 if (!op.getTagMemRef().getType().isa<MemRefType>()) 894 return op.emitOpError("expected tag to be of memref type"); 895 numExpectedOperands += op.getTagMemRefRank(); 896 if (numOperands < numExpectedOperands) 897 return op.emitOpError() 898 << "expected at least " << numExpectedOperands << " operands"; 899 if (!op.getTagIndices().empty() && 900 !llvm::all_of(op.getTagIndices().getTypes(), 901 [](Type t) { return t.isIndex(); })) 902 return op.emitOpError("expected tag indices to be of index type"); 903 904 // Optional stride-related operands must be either both present or both 905 // absent. 906 if (numOperands != numExpectedOperands && 907 numOperands != numExpectedOperands + 2) 908 return op.emitOpError("incorrect number of operands"); 909 910 // 5. Strides. 911 if (op.isStrided()) { 912 if (!op.getStride().getType().isIndex() || 913 !op.getNumElementsPerStride().getType().isIndex()) 914 return op.emitOpError( 915 "expected stride and num elements per stride to be of type index"); 916 } 917 918 return success(); 919 } 920 921 LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands, 922 SmallVectorImpl<OpFoldResult> &results) { 923 /// dma_start(memrefcast) -> dma_start 924 return foldMemRefCast(*this); 925 } 926 927 // --------------------------------------------------------------------------- 928 // DmaWaitOp 929 // --------------------------------------------------------------------------- 930 931 LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands, 932 SmallVectorImpl<OpFoldResult> &results) { 933 /// dma_wait(memrefcast) -> dma_wait 934 return foldMemRefCast(*this); 935 } 936 937 static LogicalResult verify(DmaWaitOp op) { 938 // Check that the number of tag indices matches the tagMemRef rank. 939 unsigned numTagIndices = op.tagIndices().size(); 940 unsigned tagMemRefRank = op.getTagMemRefRank(); 941 if (numTagIndices != tagMemRefRank) 942 return op.emitOpError() << "expected tagIndices to have the same number of " 943 "elements as the tagMemRef rank, expected " 944 << tagMemRefRank << ", but got " << numTagIndices; 945 return success(); 946 } 947 948 //===----------------------------------------------------------------------===// 949 // GlobalOp 950 //===----------------------------------------------------------------------===// 951 952 static void printGlobalMemrefOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, 953 TypeAttr type, 954 Attribute initialValue) { 955 p << type; 956 if (!op.isExternal()) { 957 p << " = "; 958 if (op.isUninitialized()) 959 p << "uninitialized"; 960 else 961 p.printAttributeWithoutType(initialValue); 962 } 963 } 964 965 static ParseResult 966 parseGlobalMemrefOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, 967 Attribute &initialValue) { 968 Type type; 969 if (parser.parseType(type)) 970 return failure(); 971 972 auto memrefType = type.dyn_cast<MemRefType>(); 973 if (!memrefType || !memrefType.hasStaticShape()) 974 return parser.emitError(parser.getNameLoc()) 975 << "type should be static shaped memref, but got " << type; 976 typeAttr = TypeAttr::get(type); 977 978 if (parser.parseOptionalEqual()) 979 return success(); 980 981 if (succeeded(parser.parseOptionalKeyword("uninitialized"))) { 982 initialValue = UnitAttr::get(parser.getContext()); 983 return success(); 984 } 985 986 Type tensorType = getTensorTypeFromMemRefType(memrefType); 987 if (parser.parseAttribute(initialValue, tensorType)) 988 return failure(); 989 if (!initialValue.isa<ElementsAttr>()) 990 return parser.emitError(parser.getNameLoc()) 991 << "initial value should be a unit or elements attribute"; 992 return success(); 993 } 994 995 static LogicalResult verify(GlobalOp op) { 996 auto memrefType = op.type().dyn_cast<MemRefType>(); 997 if (!memrefType || !memrefType.hasStaticShape()) 998 return op.emitOpError("type should be static shaped memref, but got ") 999 << op.type(); 1000 1001 // Verify that the initial value, if present, is either a unit attribute or 1002 // an elements attribute. 1003 if (op.initial_value().hasValue()) { 1004 Attribute initValue = op.initial_value().getValue(); 1005 if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>()) 1006 return op.emitOpError("initial value should be a unit or elements " 1007 "attribute, but got ") 1008 << initValue; 1009 1010 // Check that the type of the initial value is compatible with the type of 1011 // the global variable. 1012 if (initValue.isa<ElementsAttr>()) { 1013 Type initType = initValue.getType(); 1014 Type tensorType = getTensorTypeFromMemRefType(memrefType); 1015 if (initType != tensorType) 1016 return op.emitOpError("initial value expected to be of type ") 1017 << tensorType << ", but was of type " << initType; 1018 } 1019 } 1020 1021 if (Optional<uint64_t> alignAttr = op.alignment()) { 1022 uint64_t alignment = alignAttr.getValue(); 1023 1024 if (!llvm::isPowerOf2_64(alignment)) 1025 return op->emitError() << "alignment attribute value " << alignment 1026 << " is not a power of 2"; 1027 } 1028 1029 // TODO: verify visibility for declarations. 1030 return success(); 1031 } 1032 1033 //===----------------------------------------------------------------------===// 1034 // GetGlobalOp 1035 //===----------------------------------------------------------------------===// 1036 1037 LogicalResult 1038 GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { 1039 // Verify that the result type is same as the type of the referenced 1040 // memref.global op. 1041 auto global = 1042 symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, nameAttr()); 1043 if (!global) 1044 return emitOpError("'") 1045 << name() << "' does not reference a valid global memref"; 1046 1047 Type resultType = result().getType(); 1048 if (global.type() != resultType) 1049 return emitOpError("result type ") 1050 << resultType << " does not match type " << global.type() 1051 << " of the global memref @" << name(); 1052 return success(); 1053 } 1054 1055 //===----------------------------------------------------------------------===// 1056 // LoadOp 1057 //===----------------------------------------------------------------------===// 1058 1059 static LogicalResult verify(LoadOp op) { 1060 if (op.getNumOperands() != 1 + op.getMemRefType().getRank()) 1061 return op.emitOpError("incorrect number of indices for load"); 1062 return success(); 1063 } 1064 1065 OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) { 1066 /// load(memrefcast) -> load 1067 if (succeeded(foldMemRefCast(*this))) 1068 return getResult(); 1069 return OpFoldResult(); 1070 } 1071 1072 //===----------------------------------------------------------------------===// 1073 // PrefetchOp 1074 //===----------------------------------------------------------------------===// 1075 1076 static void print(OpAsmPrinter &p, PrefetchOp op) { 1077 p << " " << op.memref() << '['; 1078 p.printOperands(op.indices()); 1079 p << ']' << ", " << (op.isWrite() ? "write" : "read"); 1080 p << ", locality<" << op.localityHint(); 1081 p << ">, " << (op.isDataCache() ? "data" : "instr"); 1082 p.printOptionalAttrDict( 1083 op->getAttrs(), 1084 /*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"}); 1085 p << " : " << op.getMemRefType(); 1086 } 1087 1088 static ParseResult parsePrefetchOp(OpAsmParser &parser, 1089 OperationState &result) { 1090 OpAsmParser::OperandType memrefInfo; 1091 SmallVector<OpAsmParser::OperandType, 4> indexInfo; 1092 IntegerAttr localityHint; 1093 MemRefType type; 1094 StringRef readOrWrite, cacheType; 1095 1096 auto indexTy = parser.getBuilder().getIndexType(); 1097 auto i32Type = parser.getBuilder().getIntegerType(32); 1098 if (parser.parseOperand(memrefInfo) || 1099 parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) || 1100 parser.parseComma() || parser.parseKeyword(&readOrWrite) || 1101 parser.parseComma() || parser.parseKeyword("locality") || 1102 parser.parseLess() || 1103 parser.parseAttribute(localityHint, i32Type, "localityHint", 1104 result.attributes) || 1105 parser.parseGreater() || parser.parseComma() || 1106 parser.parseKeyword(&cacheType) || parser.parseColonType(type) || 1107 parser.resolveOperand(memrefInfo, type, result.operands) || 1108 parser.resolveOperands(indexInfo, indexTy, result.operands)) 1109 return failure(); 1110 1111 if (!readOrWrite.equals("read") && !readOrWrite.equals("write")) 1112 return parser.emitError(parser.getNameLoc(), 1113 "rw specifier has to be 'read' or 'write'"); 1114 result.addAttribute( 1115 PrefetchOp::getIsWriteAttrName(), 1116 parser.getBuilder().getBoolAttr(readOrWrite.equals("write"))); 1117 1118 if (!cacheType.equals("data") && !cacheType.equals("instr")) 1119 return parser.emitError(parser.getNameLoc(), 1120 "cache type has to be 'data' or 'instr'"); 1121 1122 result.addAttribute( 1123 PrefetchOp::getIsDataCacheAttrName(), 1124 parser.getBuilder().getBoolAttr(cacheType.equals("data"))); 1125 1126 return success(); 1127 } 1128 1129 static LogicalResult verify(PrefetchOp op) { 1130 if (op.getNumOperands() != 1 + op.getMemRefType().getRank()) 1131 return op.emitOpError("too few indices"); 1132 1133 return success(); 1134 } 1135 1136 LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands, 1137 SmallVectorImpl<OpFoldResult> &results) { 1138 // prefetch(memrefcast) -> prefetch 1139 return foldMemRefCast(*this); 1140 } 1141 1142 //===----------------------------------------------------------------------===// 1143 // RankOp 1144 //===----------------------------------------------------------------------===// 1145 1146 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) { 1147 // Constant fold rank when the rank of the operand is known. 1148 auto type = getOperand().getType(); 1149 auto shapedType = type.dyn_cast<ShapedType>(); 1150 if (shapedType && shapedType.hasRank()) 1151 return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank()); 1152 return IntegerAttr(); 1153 } 1154 1155 //===----------------------------------------------------------------------===// 1156 // ReinterpretCastOp 1157 //===----------------------------------------------------------------------===// 1158 1159 /// Build a ReinterpretCastOp with all dynamic entries: `staticOffsets`, 1160 /// `staticSizes` and `staticStrides` are automatically filled with 1161 /// source-memref-rank sentinel values that encode dynamic entries. 1162 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result, 1163 MemRefType resultType, Value source, 1164 OpFoldResult offset, ArrayRef<OpFoldResult> sizes, 1165 ArrayRef<OpFoldResult> strides, 1166 ArrayRef<NamedAttribute> attrs) { 1167 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1168 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1169 dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets, 1170 ShapedType::kDynamicStrideOrOffset); 1171 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, 1172 ShapedType::kDynamicSize); 1173 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, 1174 ShapedType::kDynamicStrideOrOffset); 1175 build(b, result, resultType, source, dynamicOffsets, dynamicSizes, 1176 dynamicStrides, b.getI64ArrayAttr(staticOffsets), 1177 b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); 1178 result.addAttributes(attrs); 1179 } 1180 1181 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result, 1182 MemRefType resultType, Value source, 1183 int64_t offset, ArrayRef<int64_t> sizes, 1184 ArrayRef<int64_t> strides, 1185 ArrayRef<NamedAttribute> attrs) { 1186 SmallVector<OpFoldResult> sizeValues = 1187 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { 1188 return b.getI64IntegerAttr(v); 1189 })); 1190 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1191 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { 1192 return b.getI64IntegerAttr(v); 1193 })); 1194 build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues, 1195 strideValues, attrs); 1196 } 1197 1198 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result, 1199 MemRefType resultType, Value source, Value offset, 1200 ValueRange sizes, ValueRange strides, 1201 ArrayRef<NamedAttribute> attrs) { 1202 SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>( 1203 llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); 1204 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1205 llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); 1206 build(b, result, resultType, source, offset, sizeValues, strideValues, attrs); 1207 } 1208 1209 // TODO: ponder whether we want to allow missing trailing sizes/strides that are 1210 // completed automatically, like we have for subview and extract_slice. 1211 static LogicalResult verify(ReinterpretCastOp op) { 1212 // The source and result memrefs should be in the same memory space. 1213 auto srcType = op.source().getType().cast<BaseMemRefType>(); 1214 auto resultType = op.getType().cast<MemRefType>(); 1215 if (srcType.getMemorySpace() != resultType.getMemorySpace()) 1216 return op.emitError("different memory spaces specified for source type ") 1217 << srcType << " and result memref type " << resultType; 1218 if (srcType.getElementType() != resultType.getElementType()) 1219 return op.emitError("different element types specified for source type ") 1220 << srcType << " and result memref type " << resultType; 1221 1222 // Match sizes in result memref type and in static_sizes attribute. 1223 for (auto &en : 1224 llvm::enumerate(llvm::zip(resultType.getShape(), 1225 extractFromI64ArrayAttr(op.static_sizes())))) { 1226 int64_t resultSize = std::get<0>(en.value()); 1227 int64_t expectedSize = std::get<1>(en.value()); 1228 if (!ShapedType::isDynamic(resultSize) && 1229 !ShapedType::isDynamic(expectedSize) && resultSize != expectedSize) 1230 return op.emitError("expected result type with size = ") 1231 << expectedSize << " instead of " << resultSize 1232 << " in dim = " << en.index(); 1233 } 1234 1235 // Match offset and strides in static_offset and static_strides attributes. If 1236 // result memref type has no affine map specified, this will assume an 1237 // identity layout. 1238 int64_t resultOffset; 1239 SmallVector<int64_t, 4> resultStrides; 1240 if (failed(getStridesAndOffset(resultType, resultStrides, resultOffset))) 1241 return op.emitError( 1242 "expected result type to have strided layout but found ") 1243 << resultType; 1244 1245 // Match offset in result memref type and in static_offsets attribute. 1246 int64_t expectedOffset = extractFromI64ArrayAttr(op.static_offsets()).front(); 1247 if (!ShapedType::isDynamicStrideOrOffset(resultOffset) && 1248 !ShapedType::isDynamicStrideOrOffset(expectedOffset) && 1249 resultOffset != expectedOffset) 1250 return op.emitError("expected result type with offset = ") 1251 << resultOffset << " instead of " << expectedOffset; 1252 1253 // Match strides in result memref type and in static_strides attribute. 1254 for (auto &en : llvm::enumerate(llvm::zip( 1255 resultStrides, extractFromI64ArrayAttr(op.static_strides())))) { 1256 int64_t resultStride = std::get<0>(en.value()); 1257 int64_t expectedStride = std::get<1>(en.value()); 1258 if (!ShapedType::isDynamicStrideOrOffset(resultStride) && 1259 !ShapedType::isDynamicStrideOrOffset(expectedStride) && 1260 resultStride != expectedStride) 1261 return op.emitError("expected result type with stride = ") 1262 << expectedStride << " instead of " << resultStride 1263 << " in dim = " << en.index(); 1264 } 1265 1266 return success(); 1267 } 1268 1269 //===----------------------------------------------------------------------===// 1270 // Reassociative reshape ops 1271 //===----------------------------------------------------------------------===// 1272 1273 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() { 1274 return getSymbolLessAffineMaps(getReassociationExprs()); 1275 } 1276 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() { 1277 return convertReassociationIndicesToExprs(getContext(), 1278 getReassociationIndices()); 1279 } 1280 1281 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() { 1282 return getSymbolLessAffineMaps(getReassociationExprs()); 1283 } 1284 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() { 1285 return convertReassociationIndicesToExprs(getContext(), 1286 getReassociationIndices()); 1287 } 1288 1289 static void print(OpAsmPrinter &p, ExpandShapeOp op) { 1290 ::mlir::printReshapeOp<ExpandShapeOp>(p, op); 1291 } 1292 1293 static void print(OpAsmPrinter &p, CollapseShapeOp op) { 1294 ::mlir::printReshapeOp<CollapseShapeOp>(p, op); 1295 } 1296 1297 /// Detect whether memref dims [dim, dim + extent) can be reshaped without 1298 /// copies. 1299 static bool isReshapableDimBand(unsigned dim, unsigned extent, 1300 ArrayRef<int64_t> sizes, 1301 ArrayRef<AffineExpr> strides) { 1302 // Bands of extent one can be reshaped, as they are not reshaped at all. 1303 if (extent == 1) 1304 return true; 1305 // Otherwise, the size of the first dimension needs to be known. 1306 if (ShapedType::isDynamic(sizes[dim])) 1307 return false; 1308 assert(sizes.size() == strides.size() && "mismatched ranks"); 1309 // off by 1 indexing to avoid out of bounds 1310 // V 1311 for (auto idx = dim, e = dim + extent; idx + 1 < e; ++idx) { 1312 // Only bands of static shapes are reshapable. This is due to the fact that 1313 // there is no relation between dynamic sizes and dynamic strides: we do not 1314 // have enough information to know whether a "-1" size corresponds to the 1315 // proper symbol in the AffineExpr of a stride. 1316 if (ShapedType::isDynamic(sizes[idx + 1])) 1317 return false; 1318 // TODO: Refine this by passing the proper nDims and nSymbols so we can 1319 // simplify on the fly and catch more reshapable cases. 1320 if (strides[idx] != strides[idx + 1] * sizes[idx + 1]) 1321 return false; 1322 } 1323 return true; 1324 } 1325 1326 /// Compute the MemRefType obtained by applying the `reassociation` (which is 1327 /// expected to be valid) to `type`. 1328 /// If `type` is Contiguous MemRefType, this always produce a contiguous 1329 /// MemRefType. 1330 static MemRefType 1331 computeReshapeCollapsedType(MemRefType type, 1332 ArrayRef<AffineMap> reassociation) { 1333 auto sizes = type.getShape(); 1334 AffineExpr offset; 1335 SmallVector<AffineExpr, 4> strides; 1336 auto status = getStridesAndOffset(type, strides, offset); 1337 auto isIdentityLayout = type.getLayout().isIdentity(); 1338 (void)status; 1339 assert(succeeded(status) && "expected strided memref"); 1340 1341 SmallVector<int64_t, 4> newSizes; 1342 newSizes.reserve(reassociation.size()); 1343 SmallVector<AffineExpr, 4> newStrides; 1344 newStrides.reserve(reassociation.size()); 1345 1346 // Use the fact that reassociation is valid to simplify the logic: only use 1347 // each map's rank. 1348 assert(isReassociationValid(reassociation) && "invalid reassociation"); 1349 unsigned currentDim = 0; 1350 for (AffineMap m : reassociation) { 1351 unsigned dim = m.getNumResults(); 1352 int64_t size = 1; 1353 AffineExpr stride = strides[currentDim + dim - 1]; 1354 if (isIdentityLayout || 1355 isReshapableDimBand(currentDim, dim, sizes, strides)) { 1356 for (unsigned d = 0; d < dim; ++d) { 1357 int64_t currentSize = sizes[currentDim + d]; 1358 if (ShapedType::isDynamic(currentSize)) { 1359 size = ShapedType::kDynamicSize; 1360 break; 1361 } 1362 size *= currentSize; 1363 } 1364 } else { 1365 size = ShapedType::kDynamicSize; 1366 stride = AffineExpr(); 1367 } 1368 newSizes.push_back(size); 1369 newStrides.push_back(stride); 1370 currentDim += dim; 1371 } 1372 1373 // Early-exit: if `type` is contiguous, the result must be contiguous. 1374 if (canonicalizeStridedLayout(type).getLayout().isIdentity()) 1375 return MemRefType::Builder(type).setShape(newSizes).setLayout({}); 1376 1377 // Convert back to int64_t because we don't have enough information to create 1378 // new strided layouts from AffineExpr only. This corresponds to a case where 1379 // copies may be necessary. 1380 int64_t intOffset = ShapedType::kDynamicStrideOrOffset; 1381 if (auto o = offset.dyn_cast<AffineConstantExpr>()) 1382 intOffset = o.getValue(); 1383 SmallVector<int64_t, 4> intStrides; 1384 intStrides.reserve(strides.size()); 1385 for (auto stride : newStrides) { 1386 if (auto cst = stride.dyn_cast_or_null<AffineConstantExpr>()) 1387 intStrides.push_back(cst.getValue()); 1388 else 1389 intStrides.push_back(ShapedType::kDynamicStrideOrOffset); 1390 } 1391 auto layout = 1392 makeStridedLinearLayoutMap(intStrides, intOffset, type.getContext()); 1393 return canonicalizeStridedLayout( 1394 MemRefType::Builder(type).setShape(newSizes).setLayout( 1395 AffineMapAttr::get(layout))); 1396 } 1397 1398 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src, 1399 ArrayRef<ReassociationIndices> reassociation, 1400 ArrayRef<NamedAttribute> attrs) { 1401 auto memRefType = src.getType().cast<MemRefType>(); 1402 auto resultType = computeReshapeCollapsedType( 1403 memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs( 1404 b.getContext(), reassociation))); 1405 build(b, result, resultType, src, attrs); 1406 result.addAttribute(getReassociationAttrName(), 1407 getReassociationIndicesAttribute(b, reassociation)); 1408 } 1409 1410 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src, 1411 ArrayRef<ReassociationIndices> reassociation, 1412 ArrayRef<NamedAttribute> attrs) { 1413 auto memRefType = src.getType().cast<MemRefType>(); 1414 auto resultType = computeReshapeCollapsedType( 1415 memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs( 1416 b.getContext(), reassociation))); 1417 build(b, result, resultType, src, attrs); 1418 result.addAttribute(getReassociationAttrName(), 1419 getReassociationIndicesAttribute(b, reassociation)); 1420 } 1421 1422 template <typename ReshapeOp, 1423 bool isExpansion = std::is_same<ReshapeOp, ExpandShapeOp>::value> 1424 static LogicalResult verifyReshapeOp(ReshapeOp op, MemRefType expandedType, 1425 MemRefType collapsedType) { 1426 if (failed( 1427 verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion))) 1428 return failure(); 1429 auto maps = op.getReassociationMaps(); 1430 MemRefType expectedType = computeReshapeCollapsedType(expandedType, maps); 1431 if (collapsedType != expectedType) 1432 return op.emitOpError("expected collapsed type to be ") 1433 << expectedType << ", but got " << collapsedType; 1434 return success(); 1435 } 1436 1437 static LogicalResult verify(ExpandShapeOp op) { 1438 return verifyReshapeOp(op, op.getResultType(), op.getSrcType()); 1439 } 1440 1441 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results, 1442 MLIRContext *context) { 1443 results.add<CollapseReshapeOps<ExpandShapeOp>, 1444 CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>>(context); 1445 } 1446 1447 static LogicalResult verify(CollapseShapeOp op) { 1448 return verifyReshapeOp(op, op.getSrcType(), op.getResultType()); 1449 } 1450 1451 struct CollapseShapeOpMemRefCastFolder 1452 : public OpRewritePattern<CollapseShapeOp> { 1453 public: 1454 using OpRewritePattern<CollapseShapeOp>::OpRewritePattern; 1455 1456 LogicalResult matchAndRewrite(CollapseShapeOp op, 1457 PatternRewriter &rewriter) const override { 1458 auto cast = op.getOperand().getDefiningOp<CastOp>(); 1459 if (!cast) 1460 return failure(); 1461 1462 if (!CastOp::canFoldIntoConsumerOp(cast)) 1463 return failure(); 1464 1465 Type newResultType = computeReshapeCollapsedType( 1466 cast.getOperand().getType().cast<MemRefType>(), 1467 op.getReassociationMaps()); 1468 1469 if (newResultType == op.getResultType()) { 1470 rewriter.updateRootInPlace( 1471 op, [&]() { op.srcMutable().assign(cast.source()); }); 1472 } else { 1473 Value newOp = rewriter.create<CollapseShapeOp>( 1474 op->getLoc(), cast.source(), op.getReassociationIndices()); 1475 rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp); 1476 } 1477 return success(); 1478 } 1479 }; 1480 1481 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results, 1482 MLIRContext *context) { 1483 results.add<CollapseReshapeOps<CollapseShapeOp>, 1484 CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>, 1485 CollapseShapeOpMemRefCastFolder>(context); 1486 } 1487 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) { 1488 return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands); 1489 } 1490 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) { 1491 return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands); 1492 } 1493 1494 //===----------------------------------------------------------------------===// 1495 // ReshapeOp 1496 //===----------------------------------------------------------------------===// 1497 1498 static LogicalResult verify(ReshapeOp op) { 1499 Type operandType = op.source().getType(); 1500 Type resultType = op.result().getType(); 1501 1502 Type operandElementType = operandType.cast<ShapedType>().getElementType(); 1503 Type resultElementType = resultType.cast<ShapedType>().getElementType(); 1504 if (operandElementType != resultElementType) 1505 return op.emitOpError("element types of source and destination memref " 1506 "types should be the same"); 1507 1508 if (auto operandMemRefType = operandType.dyn_cast<MemRefType>()) 1509 if (!operandMemRefType.getLayout().isIdentity()) 1510 return op.emitOpError( 1511 "source memref type should have identity affine map"); 1512 1513 int64_t shapeSize = op.shape().getType().cast<MemRefType>().getDimSize(0); 1514 auto resultMemRefType = resultType.dyn_cast<MemRefType>(); 1515 if (resultMemRefType) { 1516 if (!resultMemRefType.getLayout().isIdentity()) 1517 return op.emitOpError( 1518 "result memref type should have identity affine map"); 1519 if (shapeSize == ShapedType::kDynamicSize) 1520 return op.emitOpError("cannot use shape operand with dynamic length to " 1521 "reshape to statically-ranked memref type"); 1522 if (shapeSize != resultMemRefType.getRank()) 1523 return op.emitOpError( 1524 "length of shape operand differs from the result's memref rank"); 1525 } 1526 return success(); 1527 } 1528 1529 //===----------------------------------------------------------------------===// 1530 // StoreOp 1531 //===----------------------------------------------------------------------===// 1532 1533 static LogicalResult verify(StoreOp op) { 1534 if (op.getNumOperands() != 2 + op.getMemRefType().getRank()) 1535 return op.emitOpError("store index operand count not equal to memref rank"); 1536 1537 return success(); 1538 } 1539 1540 LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands, 1541 SmallVectorImpl<OpFoldResult> &results) { 1542 /// store(memrefcast) -> store 1543 return foldMemRefCast(*this, getValueToStore()); 1544 } 1545 1546 //===----------------------------------------------------------------------===// 1547 // SubViewOp 1548 //===----------------------------------------------------------------------===// 1549 1550 namespace { 1551 /// Helpers to write more idiomatic operations. 1552 namespace saturated_arith { 1553 struct Wrapper { 1554 explicit Wrapper(int64_t v) : v(v) {} 1555 operator int64_t() { return v; } 1556 int64_t v; 1557 }; 1558 Wrapper operator+(Wrapper a, int64_t b) { 1559 if (ShapedType::isDynamicStrideOrOffset(a) || 1560 ShapedType::isDynamicStrideOrOffset(b)) 1561 return Wrapper(ShapedType::kDynamicStrideOrOffset); 1562 return Wrapper(a.v + b); 1563 } 1564 Wrapper operator*(Wrapper a, int64_t b) { 1565 if (ShapedType::isDynamicStrideOrOffset(a) || 1566 ShapedType::isDynamicStrideOrOffset(b)) 1567 return Wrapper(ShapedType::kDynamicStrideOrOffset); 1568 return Wrapper(a.v * b); 1569 } 1570 } // namespace saturated_arith 1571 } // namespace 1572 1573 /// A subview result type can be fully inferred from the source type and the 1574 /// static representation of offsets, sizes and strides. Special sentinels 1575 /// encode the dynamic case. 1576 Type SubViewOp::inferResultType(MemRefType sourceMemRefType, 1577 ArrayRef<int64_t> staticOffsets, 1578 ArrayRef<int64_t> staticSizes, 1579 ArrayRef<int64_t> staticStrides) { 1580 unsigned rank = sourceMemRefType.getRank(); 1581 (void)rank; 1582 assert(staticOffsets.size() == rank && "staticOffsets length mismatch"); 1583 assert(staticSizes.size() == rank && "staticSizes length mismatch"); 1584 assert(staticStrides.size() == rank && "staticStrides length mismatch"); 1585 1586 // Extract source offset and strides. 1587 int64_t sourceOffset; 1588 SmallVector<int64_t, 4> sourceStrides; 1589 auto res = getStridesAndOffset(sourceMemRefType, sourceStrides, sourceOffset); 1590 assert(succeeded(res) && "SubViewOp expected strided memref type"); 1591 (void)res; 1592 1593 // Compute target offset whose value is: 1594 // `sourceOffset + sum_i(staticOffset_i * sourceStrides_i)`. 1595 int64_t targetOffset = sourceOffset; 1596 for (auto it : llvm::zip(staticOffsets, sourceStrides)) { 1597 auto staticOffset = std::get<0>(it), targetStride = std::get<1>(it); 1598 using namespace saturated_arith; 1599 targetOffset = Wrapper(targetOffset) + Wrapper(staticOffset) * targetStride; 1600 } 1601 1602 // Compute target stride whose value is: 1603 // `sourceStrides_i * staticStrides_i`. 1604 SmallVector<int64_t, 4> targetStrides; 1605 targetStrides.reserve(staticOffsets.size()); 1606 for (auto it : llvm::zip(sourceStrides, staticStrides)) { 1607 auto sourceStride = std::get<0>(it), staticStride = std::get<1>(it); 1608 using namespace saturated_arith; 1609 targetStrides.push_back(Wrapper(sourceStride) * staticStride); 1610 } 1611 1612 // The type is now known. 1613 return MemRefType::get( 1614 staticSizes, sourceMemRefType.getElementType(), 1615 makeStridedLinearLayoutMap(targetStrides, targetOffset, 1616 sourceMemRefType.getContext()), 1617 sourceMemRefType.getMemorySpace()); 1618 } 1619 1620 Type SubViewOp::inferResultType(MemRefType sourceMemRefType, 1621 ArrayRef<OpFoldResult> offsets, 1622 ArrayRef<OpFoldResult> sizes, 1623 ArrayRef<OpFoldResult> strides) { 1624 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1625 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1626 dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, 1627 ShapedType::kDynamicStrideOrOffset); 1628 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, 1629 ShapedType::kDynamicSize); 1630 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, 1631 ShapedType::kDynamicStrideOrOffset); 1632 return SubViewOp::inferResultType(sourceMemRefType, staticOffsets, 1633 staticSizes, staticStrides); 1634 } 1635 1636 Type SubViewOp::inferRankReducedResultType(unsigned resultRank, 1637 MemRefType sourceRankedTensorType, 1638 ArrayRef<int64_t> offsets, 1639 ArrayRef<int64_t> sizes, 1640 ArrayRef<int64_t> strides) { 1641 auto inferredType = 1642 inferResultType(sourceRankedTensorType, offsets, sizes, strides) 1643 .cast<MemRefType>(); 1644 assert(inferredType.getRank() >= resultRank && "expected "); 1645 int rankDiff = inferredType.getRank() - resultRank; 1646 if (rankDiff > 0) { 1647 auto shape = inferredType.getShape(); 1648 llvm::SmallDenseSet<unsigned> dimsToProject; 1649 mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject); 1650 SmallVector<int64_t> projectedShape; 1651 for (unsigned pos = 0, e = shape.size(); pos < e; ++pos) 1652 if (!dimsToProject.contains(pos)) 1653 projectedShape.push_back(shape[pos]); 1654 1655 AffineMap map = inferredType.getLayout().getAffineMap(); 1656 if (!map.isIdentity()) 1657 map = getProjectedMap(map, dimsToProject); 1658 inferredType = 1659 MemRefType::get(projectedShape, inferredType.getElementType(), map, 1660 inferredType.getMemorySpace()); 1661 } 1662 return inferredType; 1663 } 1664 1665 Type SubViewOp::inferRankReducedResultType(unsigned resultRank, 1666 MemRefType sourceRankedTensorType, 1667 ArrayRef<OpFoldResult> offsets, 1668 ArrayRef<OpFoldResult> sizes, 1669 ArrayRef<OpFoldResult> strides) { 1670 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1671 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1672 dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, 1673 ShapedType::kDynamicStrideOrOffset); 1674 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, 1675 ShapedType::kDynamicSize); 1676 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, 1677 ShapedType::kDynamicStrideOrOffset); 1678 return SubViewOp::inferRankReducedResultType( 1679 resultRank, sourceRankedTensorType, staticOffsets, staticSizes, 1680 staticStrides); 1681 } 1682 // Build a SubViewOp with mixed static and dynamic entries and custom result 1683 // type. If the type passed is nullptr, it is inferred. 1684 void SubViewOp::build(OpBuilder &b, OperationState &result, 1685 MemRefType resultType, Value source, 1686 ArrayRef<OpFoldResult> offsets, 1687 ArrayRef<OpFoldResult> sizes, 1688 ArrayRef<OpFoldResult> strides, 1689 ArrayRef<NamedAttribute> attrs) { 1690 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1691 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1692 dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, 1693 ShapedType::kDynamicStrideOrOffset); 1694 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, 1695 ShapedType::kDynamicSize); 1696 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, 1697 ShapedType::kDynamicStrideOrOffset); 1698 auto sourceMemRefType = source.getType().cast<MemRefType>(); 1699 // Structuring implementation this way avoids duplication between builders. 1700 if (!resultType) { 1701 resultType = SubViewOp::inferResultType(sourceMemRefType, staticOffsets, 1702 staticSizes, staticStrides) 1703 .cast<MemRefType>(); 1704 } 1705 build(b, result, resultType, source, dynamicOffsets, dynamicSizes, 1706 dynamicStrides, b.getI64ArrayAttr(staticOffsets), 1707 b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); 1708 result.addAttributes(attrs); 1709 } 1710 1711 // Build a SubViewOp with mixed static and dynamic entries and inferred result 1712 // type. 1713 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source, 1714 ArrayRef<OpFoldResult> offsets, 1715 ArrayRef<OpFoldResult> sizes, 1716 ArrayRef<OpFoldResult> strides, 1717 ArrayRef<NamedAttribute> attrs) { 1718 build(b, result, MemRefType(), source, offsets, sizes, strides, attrs); 1719 } 1720 1721 // Build a SubViewOp with static entries and inferred result type. 1722 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source, 1723 ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes, 1724 ArrayRef<int64_t> strides, 1725 ArrayRef<NamedAttribute> attrs) { 1726 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>( 1727 llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult { 1728 return b.getI64IntegerAttr(v); 1729 })); 1730 SmallVector<OpFoldResult> sizeValues = 1731 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { 1732 return b.getI64IntegerAttr(v); 1733 })); 1734 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1735 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { 1736 return b.getI64IntegerAttr(v); 1737 })); 1738 build(b, result, source, offsetValues, sizeValues, strideValues, attrs); 1739 } 1740 1741 // Build a SubViewOp with dynamic entries and custom result type. If the 1742 // type passed is nullptr, it is inferred. 1743 void SubViewOp::build(OpBuilder &b, OperationState &result, 1744 MemRefType resultType, Value source, 1745 ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes, 1746 ArrayRef<int64_t> strides, 1747 ArrayRef<NamedAttribute> attrs) { 1748 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>( 1749 llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult { 1750 return b.getI64IntegerAttr(v); 1751 })); 1752 SmallVector<OpFoldResult> sizeValues = 1753 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { 1754 return b.getI64IntegerAttr(v); 1755 })); 1756 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1757 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { 1758 return b.getI64IntegerAttr(v); 1759 })); 1760 build(b, result, resultType, source, offsetValues, sizeValues, strideValues, 1761 attrs); 1762 } 1763 1764 // Build a SubViewOp with dynamic entries and custom result type. If the type 1765 // passed is nullptr, it is inferred. 1766 void SubViewOp::build(OpBuilder &b, OperationState &result, 1767 MemRefType resultType, Value source, ValueRange offsets, 1768 ValueRange sizes, ValueRange strides, 1769 ArrayRef<NamedAttribute> attrs) { 1770 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>( 1771 llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; })); 1772 SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>( 1773 llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); 1774 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1775 llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); 1776 build(b, result, resultType, source, offsetValues, sizeValues, strideValues); 1777 } 1778 1779 // Build a SubViewOp with dynamic entries and inferred result type. 1780 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source, 1781 ValueRange offsets, ValueRange sizes, ValueRange strides, 1782 ArrayRef<NamedAttribute> attrs) { 1783 build(b, result, MemRefType(), source, offsets, sizes, strides, attrs); 1784 } 1785 1786 /// For ViewLikeOpInterface. 1787 Value SubViewOp::getViewSource() { return source(); } 1788 1789 /// Return true if t1 and t2 have equal offsets (both dynamic or of same static 1790 /// value). 1791 static bool haveCompatibleOffsets(MemRefType t1, MemRefType t2) { 1792 AffineExpr t1Offset, t2Offset; 1793 SmallVector<AffineExpr> t1Strides, t2Strides; 1794 auto res1 = getStridesAndOffset(t1, t1Strides, t1Offset); 1795 auto res2 = getStridesAndOffset(t2, t2Strides, t2Offset); 1796 return succeeded(res1) && succeeded(res2) && t1Offset == t2Offset; 1797 } 1798 1799 /// Checks if `original` Type type can be rank reduced to `reduced` type. 1800 /// This function is slight variant of `is subsequence` algorithm where 1801 /// not matching dimension must be 1. 1802 static SliceVerificationResult 1803 isRankReducedMemRefType(MemRefType originalType, 1804 MemRefType candidateRankReducedType, 1805 ArrayRef<OpFoldResult> sizes) { 1806 auto partialRes = isRankReducedType(originalType, candidateRankReducedType); 1807 if (partialRes != SliceVerificationResult::Success) 1808 return partialRes; 1809 1810 auto optionalUnusedDimsMask = computeMemRefRankReductionMask( 1811 originalType, candidateRankReducedType, sizes); 1812 1813 // Sizes cannot be matched in case empty vector is returned. 1814 if (!optionalUnusedDimsMask.hasValue()) 1815 return SliceVerificationResult::LayoutMismatch; 1816 1817 if (originalType.getMemorySpace() != 1818 candidateRankReducedType.getMemorySpace()) 1819 return SliceVerificationResult::MemSpaceMismatch; 1820 1821 // No amount of stride dropping can reconcile incompatible offsets. 1822 if (!haveCompatibleOffsets(originalType, candidateRankReducedType)) 1823 return SliceVerificationResult::LayoutMismatch; 1824 1825 return SliceVerificationResult::Success; 1826 } 1827 1828 template <typename OpTy> 1829 static LogicalResult produceSubViewErrorMsg(SliceVerificationResult result, 1830 OpTy op, Type expectedType) { 1831 auto memrefType = expectedType.cast<ShapedType>(); 1832 switch (result) { 1833 case SliceVerificationResult::Success: 1834 return success(); 1835 case SliceVerificationResult::RankTooLarge: 1836 return op.emitError("expected result rank to be smaller or equal to ") 1837 << "the source rank. "; 1838 case SliceVerificationResult::SizeMismatch: 1839 return op.emitError("expected result type to be ") 1840 << expectedType 1841 << " or a rank-reduced version. (mismatch of result sizes) "; 1842 case SliceVerificationResult::ElemTypeMismatch: 1843 return op.emitError("expected result element type to be ") 1844 << memrefType.getElementType(); 1845 case SliceVerificationResult::MemSpaceMismatch: 1846 return op.emitError("expected result and source memory spaces to match."); 1847 case SliceVerificationResult::LayoutMismatch: 1848 return op.emitError("expected result type to be ") 1849 << expectedType 1850 << " or a rank-reduced version. (mismatch of result layout) "; 1851 } 1852 llvm_unreachable("unexpected subview verification result"); 1853 } 1854 1855 /// Verifier for SubViewOp. 1856 static LogicalResult verify(SubViewOp op) { 1857 MemRefType baseType = op.getSourceType(); 1858 MemRefType subViewType = op.getType(); 1859 1860 // The base memref and the view memref should be in the same memory space. 1861 if (baseType.getMemorySpace() != subViewType.getMemorySpace()) 1862 return op.emitError("different memory spaces specified for base memref " 1863 "type ") 1864 << baseType << " and subview memref type " << subViewType; 1865 1866 // Verify that the base memref type has a strided layout map. 1867 if (!isStrided(baseType)) 1868 return op.emitError("base type ") << baseType << " is not strided"; 1869 1870 // Verify result type against inferred type. 1871 auto expectedType = SubViewOp::inferResultType( 1872 baseType, extractFromI64ArrayAttr(op.static_offsets()), 1873 extractFromI64ArrayAttr(op.static_sizes()), 1874 extractFromI64ArrayAttr(op.static_strides())); 1875 1876 auto result = isRankReducedMemRefType(expectedType.cast<MemRefType>(), 1877 subViewType, op.getMixedSizes()); 1878 return produceSubViewErrorMsg(result, op, expectedType); 1879 } 1880 1881 raw_ostream &mlir::operator<<(raw_ostream &os, const Range &range) { 1882 return os << "range " << range.offset << ":" << range.size << ":" 1883 << range.stride; 1884 } 1885 1886 /// Return the list of Range (i.e. offset, size, stride). Each Range 1887 /// entry contains either the dynamic value or a ConstantIndexOp constructed 1888 /// with `b` at location `loc`. 1889 SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op, 1890 OpBuilder &b, Location loc) { 1891 std::array<unsigned, 3> ranks = op.getArrayAttrMaxRanks(); 1892 assert(ranks[0] == ranks[1] && "expected offset and sizes of equal ranks"); 1893 assert(ranks[1] == ranks[2] && "expected sizes and strides of equal ranks"); 1894 SmallVector<Range, 8> res; 1895 unsigned rank = ranks[0]; 1896 res.reserve(rank); 1897 for (unsigned idx = 0; idx < rank; ++idx) { 1898 Value offset = 1899 op.isDynamicOffset(idx) 1900 ? op.getDynamicOffset(idx) 1901 : b.create<arith::ConstantIndexOp>(loc, op.getStaticOffset(idx)); 1902 Value size = 1903 op.isDynamicSize(idx) 1904 ? op.getDynamicSize(idx) 1905 : b.create<arith::ConstantIndexOp>(loc, op.getStaticSize(idx)); 1906 Value stride = 1907 op.isDynamicStride(idx) 1908 ? op.getDynamicStride(idx) 1909 : b.create<arith::ConstantIndexOp>(loc, op.getStaticStride(idx)); 1910 res.emplace_back(Range{offset, size, stride}); 1911 } 1912 return res; 1913 } 1914 1915 /// Compute the canonical result type of a SubViewOp. Call `inferResultType` to 1916 /// deduce the result type for the given `sourceType`. Additionally, reduce the 1917 /// rank of the inferred result type if `currentResultType` is lower rank than 1918 /// `currentSourceType`. Use this signature if `sourceType` is updated together 1919 /// with the result type. In this case, it is important to compute the dropped 1920 /// dimensions using `currentSourceType` whose strides align with 1921 /// `currentResultType`. 1922 static MemRefType getCanonicalSubViewResultType( 1923 MemRefType currentResultType, MemRefType currentSourceType, 1924 MemRefType sourceType, ArrayRef<OpFoldResult> mixedOffsets, 1925 ArrayRef<OpFoldResult> mixedSizes, ArrayRef<OpFoldResult> mixedStrides) { 1926 auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets, 1927 mixedSizes, mixedStrides) 1928 .cast<MemRefType>(); 1929 llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims = 1930 computeMemRefRankReductionMask(currentSourceType, currentResultType, 1931 mixedSizes); 1932 // Return nullptr as failure mode. 1933 if (!unusedDims) 1934 return nullptr; 1935 SmallVector<int64_t> shape; 1936 for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) { 1937 if (unusedDims->count(sizes.index())) 1938 continue; 1939 shape.push_back(sizes.value()); 1940 } 1941 AffineMap layoutMap = nonRankReducedType.getLayout().getAffineMap(); 1942 if (!layoutMap.isIdentity()) 1943 layoutMap = getProjectedMap(layoutMap, unusedDims.getValue()); 1944 return MemRefType::get(shape, nonRankReducedType.getElementType(), layoutMap, 1945 nonRankReducedType.getMemorySpace()); 1946 } 1947 1948 /// Compute the canonical result type of a SubViewOp. Call `inferResultType` to 1949 /// deduce the result type. Additionally, reduce the rank of the inferred result 1950 /// type if `currentResultType` is lower rank than `sourceType`. 1951 static MemRefType getCanonicalSubViewResultType( 1952 MemRefType currentResultType, MemRefType sourceType, 1953 ArrayRef<OpFoldResult> mixedOffsets, ArrayRef<OpFoldResult> mixedSizes, 1954 ArrayRef<OpFoldResult> mixedStrides) { 1955 return getCanonicalSubViewResultType(currentResultType, sourceType, 1956 sourceType, mixedOffsets, mixedSizes, 1957 mixedStrides); 1958 } 1959 1960 /// Helper method to check if a `subview` operation is trivially a no-op. This 1961 /// is the case if the all offsets are zero, all strides are 1, and the source 1962 /// shape is same as the size of the subview. In such cases, the subview can be 1963 /// folded into its source. 1964 static bool isTrivialSubViewOp(SubViewOp subViewOp) { 1965 if (subViewOp.getSourceType().getRank() != subViewOp.getType().getRank()) 1966 return false; 1967 1968 auto mixedOffsets = subViewOp.getMixedOffsets(); 1969 auto mixedSizes = subViewOp.getMixedSizes(); 1970 auto mixedStrides = subViewOp.getMixedStrides(); 1971 1972 // Check offsets are zero. 1973 if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) { 1974 Optional<int64_t> intValue = getConstantIntValue(ofr); 1975 return !intValue || intValue.getValue() != 0; 1976 })) 1977 return false; 1978 1979 // Check strides are one. 1980 if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) { 1981 Optional<int64_t> intValue = getConstantIntValue(ofr); 1982 return !intValue || intValue.getValue() != 1; 1983 })) 1984 return false; 1985 1986 // Check all size values are static and matches the (static) source shape. 1987 ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape(); 1988 for (const auto &size : llvm::enumerate(mixedSizes)) { 1989 Optional<int64_t> intValue = getConstantIntValue(size.value()); 1990 if (!intValue || intValue.getValue() != sourceShape[size.index()]) 1991 return false; 1992 } 1993 // All conditions met. The `SubViewOp` is foldable as a no-op. 1994 return true; 1995 } 1996 1997 namespace { 1998 /// Pattern to rewrite a subview op with MemRefCast arguments. 1999 /// This essentially pushes memref.cast past its consuming subview when 2000 /// `canFoldIntoConsumerOp` is true. 2001 /// 2002 /// Example: 2003 /// ``` 2004 /// %0 = memref.cast %V : memref<16x16xf32> to memref<?x?xf32> 2005 /// %1 = memref.subview %0[0, 0][3, 4][1, 1] : 2006 /// memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]> 2007 /// ``` 2008 /// is rewritten into: 2009 /// ``` 2010 /// %0 = memref.subview %V: memref<16x16xf32> to memref<3x4xf32, #[[map0]]> 2011 /// %1 = memref.cast %0: memref<3x4xf32, offset:0, strides:[16, 1]> to 2012 /// memref<3x4xf32, offset:?, strides:[?, 1]> 2013 /// ``` 2014 class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> { 2015 public: 2016 using OpRewritePattern<SubViewOp>::OpRewritePattern; 2017 2018 LogicalResult matchAndRewrite(SubViewOp subViewOp, 2019 PatternRewriter &rewriter) const override { 2020 // Any constant operand, just return to let SubViewOpConstantFolder kick in. 2021 if (llvm::any_of(subViewOp.getOperands(), [](Value operand) { 2022 return matchPattern(operand, matchConstantIndex()); 2023 })) 2024 return failure(); 2025 2026 auto castOp = subViewOp.source().getDefiningOp<CastOp>(); 2027 if (!castOp) 2028 return failure(); 2029 2030 if (!CastOp::canFoldIntoConsumerOp(castOp)) 2031 return failure(); 2032 2033 // Compute the SubViewOp result type after folding the MemRefCastOp. Use the 2034 // MemRefCastOp source operand type to infer the result type and the current 2035 // SubViewOp source operand type to compute the dropped dimensions if the 2036 // operation is rank-reducing. 2037 auto resultType = getCanonicalSubViewResultType( 2038 subViewOp.getType(), subViewOp.getSourceType(), 2039 castOp.source().getType().cast<MemRefType>(), 2040 subViewOp.getMixedOffsets(), subViewOp.getMixedSizes(), 2041 subViewOp.getMixedStrides()); 2042 if (!resultType) 2043 return failure(); 2044 2045 Value newSubView = rewriter.create<SubViewOp>( 2046 subViewOp.getLoc(), resultType, castOp.source(), subViewOp.offsets(), 2047 subViewOp.sizes(), subViewOp.strides(), subViewOp.static_offsets(), 2048 subViewOp.static_sizes(), subViewOp.static_strides()); 2049 rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(), 2050 newSubView); 2051 return success(); 2052 } 2053 }; 2054 2055 /// Canonicalize subview ops that are no-ops. When the source shape is not same 2056 /// as a result shape due to use of `affine_map`. 2057 class TrivialSubViewOpFolder final : public OpRewritePattern<SubViewOp> { 2058 public: 2059 using OpRewritePattern<SubViewOp>::OpRewritePattern; 2060 2061 LogicalResult matchAndRewrite(SubViewOp subViewOp, 2062 PatternRewriter &rewriter) const override { 2063 if (!isTrivialSubViewOp(subViewOp)) 2064 return failure(); 2065 if (subViewOp.getSourceType() == subViewOp.getType()) { 2066 rewriter.replaceOp(subViewOp, subViewOp.source()); 2067 return success(); 2068 } 2069 rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.source(), 2070 subViewOp.getType()); 2071 return success(); 2072 } 2073 }; 2074 } // namespace 2075 2076 /// Return the canonical type of the result of a subview. 2077 struct SubViewReturnTypeCanonicalizer { 2078 MemRefType operator()(SubViewOp op, ArrayRef<OpFoldResult> mixedOffsets, 2079 ArrayRef<OpFoldResult> mixedSizes, 2080 ArrayRef<OpFoldResult> mixedStrides) { 2081 return getCanonicalSubViewResultType(op.getType(), op.getSourceType(), 2082 mixedOffsets, mixedSizes, 2083 mixedStrides); 2084 } 2085 }; 2086 2087 /// A canonicalizer wrapper to replace SubViewOps. 2088 struct SubViewCanonicalizer { 2089 void operator()(PatternRewriter &rewriter, SubViewOp op, SubViewOp newOp) { 2090 rewriter.replaceOpWithNewOp<CastOp>(op, newOp, op.getType()); 2091 } 2092 }; 2093 2094 void SubViewOp::getCanonicalizationPatterns(RewritePatternSet &results, 2095 MLIRContext *context) { 2096 results 2097 .add<OpWithOffsetSizesAndStridesConstantArgumentFolder< 2098 SubViewOp, SubViewReturnTypeCanonicalizer, SubViewCanonicalizer>, 2099 SubViewOpMemRefCastFolder, TrivialSubViewOpFolder>(context); 2100 } 2101 2102 OpFoldResult SubViewOp::fold(ArrayRef<Attribute> operands) { 2103 auto resultShapedType = getResult().getType().cast<ShapedType>(); 2104 auto sourceShapedType = source().getType().cast<ShapedType>(); 2105 2106 if (resultShapedType.hasStaticShape() && 2107 resultShapedType == sourceShapedType) { 2108 return getViewSource(); 2109 } 2110 2111 return {}; 2112 } 2113 2114 //===----------------------------------------------------------------------===// 2115 // TransposeOp 2116 //===----------------------------------------------------------------------===// 2117 2118 /// Build a strided memref type by applying `permutationMap` tp `memRefType`. 2119 static MemRefType inferTransposeResultType(MemRefType memRefType, 2120 AffineMap permutationMap) { 2121 auto rank = memRefType.getRank(); 2122 auto originalSizes = memRefType.getShape(); 2123 // Compute permuted sizes. 2124 SmallVector<int64_t, 4> sizes(rank, 0); 2125 for (const auto &en : llvm::enumerate(permutationMap.getResults())) 2126 sizes[en.index()] = 2127 originalSizes[en.value().cast<AffineDimExpr>().getPosition()]; 2128 2129 // Compute permuted strides. 2130 int64_t offset; 2131 SmallVector<int64_t, 4> strides; 2132 auto res = getStridesAndOffset(memRefType, strides, offset); 2133 assert(succeeded(res) && strides.size() == static_cast<unsigned>(rank)); 2134 (void)res; 2135 auto map = 2136 makeStridedLinearLayoutMap(strides, offset, memRefType.getContext()); 2137 map = permutationMap ? map.compose(permutationMap) : map; 2138 return MemRefType::Builder(memRefType) 2139 .setShape(sizes) 2140 .setLayout(AffineMapAttr::get(map)); 2141 } 2142 2143 void TransposeOp::build(OpBuilder &b, OperationState &result, Value in, 2144 AffineMapAttr permutation, 2145 ArrayRef<NamedAttribute> attrs) { 2146 auto permutationMap = permutation.getValue(); 2147 assert(permutationMap); 2148 2149 auto memRefType = in.getType().cast<MemRefType>(); 2150 // Compute result type. 2151 MemRefType resultType = inferTransposeResultType(memRefType, permutationMap); 2152 2153 build(b, result, resultType, in, attrs); 2154 result.addAttribute(TransposeOp::getPermutationAttrName(), permutation); 2155 } 2156 2157 // transpose $in $permutation attr-dict : type($in) `to` type(results) 2158 static void print(OpAsmPrinter &p, TransposeOp op) { 2159 p << " " << op.in() << " " << op.permutation(); 2160 p.printOptionalAttrDict(op->getAttrs(), 2161 {TransposeOp::getPermutationAttrName()}); 2162 p << " : " << op.in().getType() << " to " << op.getType(); 2163 } 2164 2165 static ParseResult parseTransposeOp(OpAsmParser &parser, 2166 OperationState &result) { 2167 OpAsmParser::OperandType in; 2168 AffineMap permutation; 2169 MemRefType srcType, dstType; 2170 if (parser.parseOperand(in) || parser.parseAffineMap(permutation) || 2171 parser.parseOptionalAttrDict(result.attributes) || 2172 parser.parseColonType(srcType) || 2173 parser.resolveOperand(in, srcType, result.operands) || 2174 parser.parseKeywordType("to", dstType) || 2175 parser.addTypeToList(dstType, result.types)) 2176 return failure(); 2177 2178 result.addAttribute(TransposeOp::getPermutationAttrName(), 2179 AffineMapAttr::get(permutation)); 2180 return success(); 2181 } 2182 2183 static LogicalResult verify(TransposeOp op) { 2184 if (!op.permutation().isPermutation()) 2185 return op.emitOpError("expected a permutation map"); 2186 if (op.permutation().getNumDims() != op.getShapedType().getRank()) 2187 return op.emitOpError( 2188 "expected a permutation map of same rank as the input"); 2189 2190 auto srcType = op.in().getType().cast<MemRefType>(); 2191 auto dstType = op.getType().cast<MemRefType>(); 2192 auto transposedType = inferTransposeResultType(srcType, op.permutation()); 2193 if (dstType != transposedType) 2194 return op.emitOpError("output type ") 2195 << dstType << " does not match transposed input type " << srcType 2196 << ", " << transposedType; 2197 return success(); 2198 } 2199 2200 OpFoldResult TransposeOp::fold(ArrayRef<Attribute>) { 2201 if (succeeded(foldMemRefCast(*this))) 2202 return getResult(); 2203 return {}; 2204 } 2205 2206 //===----------------------------------------------------------------------===// 2207 // ViewOp 2208 //===----------------------------------------------------------------------===// 2209 2210 static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) { 2211 OpAsmParser::OperandType srcInfo; 2212 SmallVector<OpAsmParser::OperandType, 1> offsetInfo; 2213 SmallVector<OpAsmParser::OperandType, 4> sizesInfo; 2214 auto indexType = parser.getBuilder().getIndexType(); 2215 Type srcType, dstType; 2216 llvm::SMLoc offsetLoc; 2217 if (parser.parseOperand(srcInfo) || parser.getCurrentLocation(&offsetLoc) || 2218 parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square)) 2219 return failure(); 2220 2221 if (offsetInfo.size() != 1) 2222 return parser.emitError(offsetLoc) << "expects 1 offset operand"; 2223 2224 return failure( 2225 parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) || 2226 parser.parseOptionalAttrDict(result.attributes) || 2227 parser.parseColonType(srcType) || 2228 parser.resolveOperand(srcInfo, srcType, result.operands) || 2229 parser.resolveOperands(offsetInfo, indexType, result.operands) || 2230 parser.resolveOperands(sizesInfo, indexType, result.operands) || 2231 parser.parseKeywordType("to", dstType) || 2232 parser.addTypeToList(dstType, result.types)); 2233 } 2234 2235 static void print(OpAsmPrinter &p, ViewOp op) { 2236 p << ' ' << op.getOperand(0) << '['; 2237 p.printOperand(op.byte_shift()); 2238 p << "][" << op.sizes() << ']'; 2239 p.printOptionalAttrDict(op->getAttrs()); 2240 p << " : " << op.getOperand(0).getType() << " to " << op.getType(); 2241 } 2242 2243 static LogicalResult verify(ViewOp op) { 2244 auto baseType = op.getOperand(0).getType().cast<MemRefType>(); 2245 auto viewType = op.getType(); 2246 2247 // The base memref should have identity layout map (or none). 2248 if (!baseType.getLayout().isIdentity()) 2249 return op.emitError("unsupported map for base memref type ") << baseType; 2250 2251 // The result memref should have identity layout map (or none). 2252 if (!viewType.getLayout().isIdentity()) 2253 return op.emitError("unsupported map for result memref type ") << viewType; 2254 2255 // The base memref and the view memref should be in the same memory space. 2256 if (baseType.getMemorySpace() != viewType.getMemorySpace()) 2257 return op.emitError("different memory spaces specified for base memref " 2258 "type ") 2259 << baseType << " and view memref type " << viewType; 2260 2261 // Verify that we have the correct number of sizes for the result type. 2262 unsigned numDynamicDims = viewType.getNumDynamicDims(); 2263 if (op.sizes().size() != numDynamicDims) 2264 return op.emitError("incorrect number of size operands for type ") 2265 << viewType; 2266 2267 return success(); 2268 } 2269 2270 Value ViewOp::getViewSource() { return source(); } 2271 2272 namespace { 2273 2274 struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> { 2275 using OpRewritePattern<ViewOp>::OpRewritePattern; 2276 2277 LogicalResult matchAndRewrite(ViewOp viewOp, 2278 PatternRewriter &rewriter) const override { 2279 // Return if none of the operands are constants. 2280 if (llvm::none_of(viewOp.getOperands(), [](Value operand) { 2281 return matchPattern(operand, matchConstantIndex()); 2282 })) 2283 return failure(); 2284 2285 // Get result memref type. 2286 auto memrefType = viewOp.getType(); 2287 2288 // Get offset from old memref view type 'memRefType'. 2289 int64_t oldOffset; 2290 SmallVector<int64_t, 4> oldStrides; 2291 if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset))) 2292 return failure(); 2293 assert(oldOffset == 0 && "Expected 0 offset"); 2294 2295 SmallVector<Value, 4> newOperands; 2296 2297 // Offset cannot be folded into result type. 2298 2299 // Fold any dynamic dim operands which are produced by a constant. 2300 SmallVector<int64_t, 4> newShapeConstants; 2301 newShapeConstants.reserve(memrefType.getRank()); 2302 2303 unsigned dynamicDimPos = 0; 2304 unsigned rank = memrefType.getRank(); 2305 for (unsigned dim = 0, e = rank; dim < e; ++dim) { 2306 int64_t dimSize = memrefType.getDimSize(dim); 2307 // If this is already static dimension, keep it. 2308 if (!ShapedType::isDynamic(dimSize)) { 2309 newShapeConstants.push_back(dimSize); 2310 continue; 2311 } 2312 auto *defOp = viewOp.sizes()[dynamicDimPos].getDefiningOp(); 2313 if (auto constantIndexOp = 2314 dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) { 2315 // Dynamic shape dimension will be folded. 2316 newShapeConstants.push_back(constantIndexOp.value()); 2317 } else { 2318 // Dynamic shape dimension not folded; copy operand from old memref. 2319 newShapeConstants.push_back(dimSize); 2320 newOperands.push_back(viewOp.sizes()[dynamicDimPos]); 2321 } 2322 dynamicDimPos++; 2323 } 2324 2325 // Create new memref type with constant folded dims. 2326 MemRefType newMemRefType = 2327 MemRefType::Builder(memrefType).setShape(newShapeConstants); 2328 // Nothing new, don't fold. 2329 if (newMemRefType == memrefType) 2330 return failure(); 2331 2332 // Create new ViewOp. 2333 auto newViewOp = rewriter.create<ViewOp>(viewOp.getLoc(), newMemRefType, 2334 viewOp.getOperand(0), 2335 viewOp.byte_shift(), newOperands); 2336 // Insert a cast so we have the same type as the old memref type. 2337 rewriter.replaceOpWithNewOp<CastOp>(viewOp, newViewOp, viewOp.getType()); 2338 return success(); 2339 } 2340 }; 2341 2342 struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> { 2343 using OpRewritePattern<ViewOp>::OpRewritePattern; 2344 2345 LogicalResult matchAndRewrite(ViewOp viewOp, 2346 PatternRewriter &rewriter) const override { 2347 Value memrefOperand = viewOp.getOperand(0); 2348 CastOp memrefCastOp = memrefOperand.getDefiningOp<CastOp>(); 2349 if (!memrefCastOp) 2350 return failure(); 2351 Value allocOperand = memrefCastOp.getOperand(); 2352 AllocOp allocOp = allocOperand.getDefiningOp<AllocOp>(); 2353 if (!allocOp) 2354 return failure(); 2355 rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand, 2356 viewOp.byte_shift(), viewOp.sizes()); 2357 return success(); 2358 } 2359 }; 2360 2361 } // namespace 2362 2363 void ViewOp::getCanonicalizationPatterns(RewritePatternSet &results, 2364 MLIRContext *context) { 2365 results.add<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context); 2366 } 2367 2368 //===----------------------------------------------------------------------===// 2369 // AtomicRMWOp 2370 //===----------------------------------------------------------------------===// 2371 2372 static LogicalResult verify(AtomicRMWOp op) { 2373 if (op.getMemRefType().getRank() != op.getNumOperands() - 2) 2374 return op.emitOpError( 2375 "expects the number of subscripts to be equal to memref rank"); 2376 switch (op.kind()) { 2377 case arith::AtomicRMWKind::addf: 2378 case arith::AtomicRMWKind::maxf: 2379 case arith::AtomicRMWKind::minf: 2380 case arith::AtomicRMWKind::mulf: 2381 if (!op.value().getType().isa<FloatType>()) 2382 return op.emitOpError() 2383 << "with kind '" << arith::stringifyAtomicRMWKind(op.kind()) 2384 << "' expects a floating-point type"; 2385 break; 2386 case arith::AtomicRMWKind::addi: 2387 case arith::AtomicRMWKind::maxs: 2388 case arith::AtomicRMWKind::maxu: 2389 case arith::AtomicRMWKind::mins: 2390 case arith::AtomicRMWKind::minu: 2391 case arith::AtomicRMWKind::muli: 2392 case arith::AtomicRMWKind::ori: 2393 case arith::AtomicRMWKind::andi: 2394 if (!op.value().getType().isa<IntegerType>()) 2395 return op.emitOpError() 2396 << "with kind '" << arith::stringifyAtomicRMWKind(op.kind()) 2397 << "' expects an integer type"; 2398 break; 2399 default: 2400 break; 2401 } 2402 return success(); 2403 } 2404 2405 OpFoldResult AtomicRMWOp::fold(ArrayRef<Attribute> operands) { 2406 /// atomicrmw(memrefcast) -> atomicrmw 2407 if (succeeded(foldMemRefCast(*this, value()))) 2408 return getResult(); 2409 return OpFoldResult(); 2410 } 2411 2412 //===----------------------------------------------------------------------===// 2413 // TableGen'd op method definitions 2414 //===----------------------------------------------------------------------===// 2415 2416 #define GET_OP_CLASSES 2417 #include "mlir/Dialect/MemRef/IR/MemRefOps.cpp.inc" 2418