1 //===----------------------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 10 #include "mlir/Dialect/MemRef/IR/MemRef.h" 11 #include "mlir/Dialect/MemRef/Utils/MemRefUtils.h" 12 #include "mlir/Dialect/StandardOps/IR/Ops.h" 13 #include "mlir/Dialect/StandardOps/Utils/Utils.h" 14 #include "mlir/Dialect/Utils/StaticValueUtils.h" 15 #include "mlir/IR/AffineMap.h" 16 #include "mlir/IR/Builders.h" 17 #include "mlir/IR/BuiltinTypes.h" 18 #include "mlir/IR/Matchers.h" 19 #include "mlir/IR/PatternMatch.h" 20 #include "mlir/IR/TypeUtilities.h" 21 #include "mlir/Interfaces/InferTypeOpInterface.h" 22 #include "mlir/Interfaces/ViewLikeInterface.h" 23 #include "llvm/ADT/STLExtras.h" 24 25 using namespace mlir; 26 using namespace mlir::memref; 27 28 /// Materialize a single constant operation from a given attribute value with 29 /// the desired resultant type. 30 Operation *MemRefDialect::materializeConstant(OpBuilder &builder, 31 Attribute value, Type type, 32 Location loc) { 33 if (arith::ConstantOp::isBuildableWith(value, type)) 34 return builder.create<arith::ConstantOp>(loc, value, type); 35 if (ConstantOp::isBuildableWith(value, type)) 36 return builder.create<ConstantOp>(loc, value, type); 37 return nullptr; 38 } 39 40 //===----------------------------------------------------------------------===// 41 // Common canonicalization pattern support logic 42 //===----------------------------------------------------------------------===// 43 44 /// This is a common class used for patterns of the form 45 /// "someop(memrefcast) -> someop". It folds the source of any memref.cast 46 /// into the root operation directly. 47 LogicalResult mlir::memref::foldMemRefCast(Operation *op, Value inner) { 48 bool folded = false; 49 for (OpOperand &operand : op->getOpOperands()) { 50 auto cast = operand.get().getDefiningOp<CastOp>(); 51 if (cast && operand.get() != inner && 52 !cast.getOperand().getType().isa<UnrankedMemRefType>()) { 53 operand.set(cast.getOperand()); 54 folded = true; 55 } 56 } 57 return success(folded); 58 } 59 60 /// Return an unranked/ranked tensor type for the given unranked/ranked memref 61 /// type. 62 Type mlir::memref::getTensorTypeFromMemRefType(Type type) { 63 if (auto memref = type.dyn_cast<MemRefType>()) 64 return RankedTensorType::get(memref.getShape(), memref.getElementType()); 65 if (auto memref = type.dyn_cast<UnrankedMemRefType>()) 66 return UnrankedTensorType::get(memref.getElementType()); 67 return NoneType::get(type.getContext()); 68 } 69 70 //===----------------------------------------------------------------------===// 71 // AllocOp / AllocaOp 72 //===----------------------------------------------------------------------===// 73 74 template <typename AllocLikeOp> 75 static LogicalResult verifyAllocLikeOp(AllocLikeOp op) { 76 static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value, 77 "applies to only alloc or alloca"); 78 auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>(); 79 if (!memRefType) 80 return op.emitOpError("result must be a memref"); 81 82 if (static_cast<int64_t>(op.dynamicSizes().size()) != 83 memRefType.getNumDynamicDims()) 84 return op.emitOpError("dimension operand count does not equal memref " 85 "dynamic dimension count"); 86 87 unsigned numSymbols = 0; 88 if (!memRefType.getLayout().isIdentity()) 89 numSymbols = memRefType.getLayout().getAffineMap().getNumSymbols(); 90 if (op.symbolOperands().size() != numSymbols) 91 return op.emitOpError("symbol operand count does not equal memref symbol " 92 "count: expected ") 93 << numSymbols << ", got " << op.symbolOperands().size(); 94 95 return success(); 96 } 97 98 static LogicalResult verify(AllocOp op) { return verifyAllocLikeOp(op); } 99 100 static LogicalResult verify(AllocaOp op) { 101 // An alloca op needs to have an ancestor with an allocation scope trait. 102 if (!op->getParentWithTrait<OpTrait::AutomaticAllocationScope>()) 103 return op.emitOpError( 104 "requires an ancestor op with AutomaticAllocationScope trait"); 105 106 return verifyAllocLikeOp(op); 107 } 108 109 namespace { 110 /// Fold constant dimensions into an alloc like operation. 111 template <typename AllocLikeOp> 112 struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> { 113 using OpRewritePattern<AllocLikeOp>::OpRewritePattern; 114 115 LogicalResult matchAndRewrite(AllocLikeOp alloc, 116 PatternRewriter &rewriter) const override { 117 // Check to see if any dimensions operands are constants. If so, we can 118 // substitute and drop them. 119 if (llvm::none_of(alloc.dynamicSizes(), [](Value operand) { 120 return matchPattern(operand, matchConstantIndex()); 121 })) 122 return failure(); 123 124 auto memrefType = alloc.getType(); 125 126 // Ok, we have one or more constant operands. Collect the non-constant ones 127 // and keep track of the resultant memref type to build. 128 SmallVector<int64_t, 4> newShapeConstants; 129 newShapeConstants.reserve(memrefType.getRank()); 130 SmallVector<Value, 4> dynamicSizes; 131 132 unsigned dynamicDimPos = 0; 133 for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) { 134 int64_t dimSize = memrefType.getDimSize(dim); 135 // If this is already static dimension, keep it. 136 if (dimSize != -1) { 137 newShapeConstants.push_back(dimSize); 138 continue; 139 } 140 auto dynamicSize = alloc.dynamicSizes()[dynamicDimPos]; 141 auto *defOp = dynamicSize.getDefiningOp(); 142 if (auto constantIndexOp = 143 dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) { 144 // Dynamic shape dimension will be folded. 145 newShapeConstants.push_back(constantIndexOp.value()); 146 } else { 147 // Dynamic shape dimension not folded; copy dynamicSize from old memref. 148 newShapeConstants.push_back(-1); 149 dynamicSizes.push_back(dynamicSize); 150 } 151 dynamicDimPos++; 152 } 153 154 // Create new memref type (which will have fewer dynamic dimensions). 155 MemRefType newMemRefType = 156 MemRefType::Builder(memrefType).setShape(newShapeConstants); 157 assert(static_cast<int64_t>(dynamicSizes.size()) == 158 newMemRefType.getNumDynamicDims()); 159 160 // Create and insert the alloc op for the new memref. 161 auto newAlloc = rewriter.create<AllocLikeOp>( 162 alloc.getLoc(), newMemRefType, dynamicSizes, alloc.symbolOperands(), 163 alloc.alignmentAttr()); 164 // Insert a cast so we have the same type as the old alloc. 165 auto resultCast = 166 rewriter.create<CastOp>(alloc.getLoc(), newAlloc, alloc.getType()); 167 168 rewriter.replaceOp(alloc, {resultCast}); 169 return success(); 170 } 171 }; 172 173 /// Fold alloc operations with no users or only store and dealloc uses. 174 template <typename T> 175 struct SimplifyDeadAlloc : public OpRewritePattern<T> { 176 using OpRewritePattern<T>::OpRewritePattern; 177 178 LogicalResult matchAndRewrite(T alloc, 179 PatternRewriter &rewriter) const override { 180 if (llvm::any_of(alloc->getUsers(), [&](Operation *op) { 181 if (auto storeOp = dyn_cast<StoreOp>(op)) 182 return storeOp.value() == alloc; 183 return !isa<DeallocOp>(op); 184 })) 185 return failure(); 186 187 for (Operation *user : llvm::make_early_inc_range(alloc->getUsers())) 188 rewriter.eraseOp(user); 189 190 rewriter.eraseOp(alloc); 191 return success(); 192 } 193 }; 194 } // namespace 195 196 void AllocOp::getCanonicalizationPatterns(RewritePatternSet &results, 197 MLIRContext *context) { 198 results.add<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc<AllocOp>>(context); 199 } 200 201 void AllocaOp::getCanonicalizationPatterns(RewritePatternSet &results, 202 MLIRContext *context) { 203 results.add<SimplifyAllocConst<AllocaOp>, SimplifyDeadAlloc<AllocaOp>>( 204 context); 205 } 206 207 //===----------------------------------------------------------------------===// 208 // AllocaScopeOp 209 //===----------------------------------------------------------------------===// 210 211 static void print(OpAsmPrinter &p, AllocaScopeOp &op) { 212 bool printBlockTerminators = false; 213 214 p << ' '; 215 if (!op.results().empty()) { 216 p << " -> (" << op.getResultTypes() << ")"; 217 printBlockTerminators = true; 218 } 219 p << ' '; 220 p.printRegion(op.bodyRegion(), 221 /*printEntryBlockArgs=*/false, 222 /*printBlockTerminators=*/printBlockTerminators); 223 p.printOptionalAttrDict(op->getAttrs()); 224 } 225 226 static ParseResult parseAllocaScopeOp(OpAsmParser &parser, 227 OperationState &result) { 228 // Create a region for the body. 229 result.regions.reserve(1); 230 Region *bodyRegion = result.addRegion(); 231 232 // Parse optional results type list. 233 if (parser.parseOptionalArrowTypeList(result.types)) 234 return failure(); 235 236 // Parse the body region. 237 if (parser.parseRegion(*bodyRegion, /*arguments=*/{}, /*argTypes=*/{})) 238 return failure(); 239 AllocaScopeOp::ensureTerminator(*bodyRegion, parser.getBuilder(), 240 result.location); 241 242 // Parse the optional attribute list. 243 if (parser.parseOptionalAttrDict(result.attributes)) 244 return failure(); 245 246 return success(); 247 } 248 249 static LogicalResult verify(AllocaScopeOp op) { 250 if (failed(RegionBranchOpInterface::verifyTypes(op))) 251 return failure(); 252 253 return success(); 254 } 255 256 void AllocaScopeOp::getSuccessorRegions( 257 Optional<unsigned> index, ArrayRef<Attribute> operands, 258 SmallVectorImpl<RegionSuccessor> ®ions) { 259 if (index.hasValue()) { 260 regions.push_back(RegionSuccessor(getResults())); 261 return; 262 } 263 264 regions.push_back(RegionSuccessor(&bodyRegion())); 265 } 266 267 //===----------------------------------------------------------------------===// 268 // AssumeAlignmentOp 269 //===----------------------------------------------------------------------===// 270 271 static LogicalResult verify(AssumeAlignmentOp op) { 272 unsigned alignment = op.alignment(); 273 if (!llvm::isPowerOf2_32(alignment)) 274 return op.emitOpError("alignment must be power of 2"); 275 return success(); 276 } 277 278 //===----------------------------------------------------------------------===// 279 // CastOp 280 //===----------------------------------------------------------------------===// 281 282 /// Determines whether MemRef_CastOp casts to a more dynamic version of the 283 /// source memref. This is useful to to fold a memref.cast into a consuming op 284 /// and implement canonicalization patterns for ops in different dialects that 285 /// may consume the results of memref.cast operations. Such foldable memref.cast 286 /// operations are typically inserted as `view` and `subview` ops are 287 /// canonicalized, to preserve the type compatibility of their uses. 288 /// 289 /// Returns true when all conditions are met: 290 /// 1. source and result are ranked memrefs with strided semantics and same 291 /// element type and rank. 292 /// 2. each of the source's size, offset or stride has more static information 293 /// than the corresponding result's size, offset or stride. 294 /// 295 /// Example 1: 296 /// ```mlir 297 /// %1 = memref.cast %0 : memref<8x16xf32> to memref<?x?xf32> 298 /// %2 = consumer %1 ... : memref<?x?xf32> ... 299 /// ``` 300 /// 301 /// may fold into: 302 /// 303 /// ```mlir 304 /// %2 = consumer %0 ... : memref<8x16xf32> ... 305 /// ``` 306 /// 307 /// Example 2: 308 /// ``` 309 /// %1 = memref.cast %0 : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>> 310 /// to memref<?x?xf32> 311 /// consumer %1 : memref<?x?xf32> ... 312 /// ``` 313 /// 314 /// may fold into: 315 /// 316 /// ``` 317 /// consumer %0 ... : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>> 318 /// ``` 319 bool CastOp::canFoldIntoConsumerOp(CastOp castOp) { 320 MemRefType sourceType = castOp.source().getType().dyn_cast<MemRefType>(); 321 MemRefType resultType = castOp.getType().dyn_cast<MemRefType>(); 322 323 // Requires ranked MemRefType. 324 if (!sourceType || !resultType) 325 return false; 326 327 // Requires same elemental type. 328 if (sourceType.getElementType() != resultType.getElementType()) 329 return false; 330 331 // Requires same rank. 332 if (sourceType.getRank() != resultType.getRank()) 333 return false; 334 335 // Only fold casts between strided memref forms. 336 int64_t sourceOffset, resultOffset; 337 SmallVector<int64_t, 4> sourceStrides, resultStrides; 338 if (failed(getStridesAndOffset(sourceType, sourceStrides, sourceOffset)) || 339 failed(getStridesAndOffset(resultType, resultStrides, resultOffset))) 340 return false; 341 342 // If cast is towards more static sizes along any dimension, don't fold. 343 for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) { 344 auto ss = std::get<0>(it), st = std::get<1>(it); 345 if (ss != st) 346 if (ShapedType::isDynamic(ss) && !ShapedType::isDynamic(st)) 347 return false; 348 } 349 350 // If cast is towards more static offset along any dimension, don't fold. 351 if (sourceOffset != resultOffset) 352 if (ShapedType::isDynamicStrideOrOffset(sourceOffset) && 353 !ShapedType::isDynamicStrideOrOffset(resultOffset)) 354 return false; 355 356 // If cast is towards more static strides along any dimension, don't fold. 357 for (auto it : llvm::zip(sourceStrides, resultStrides)) { 358 auto ss = std::get<0>(it), st = std::get<1>(it); 359 if (ss != st) 360 if (ShapedType::isDynamicStrideOrOffset(ss) && 361 !ShapedType::isDynamicStrideOrOffset(st)) 362 return false; 363 } 364 365 return true; 366 } 367 368 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) { 369 if (inputs.size() != 1 || outputs.size() != 1) 370 return false; 371 Type a = inputs.front(), b = outputs.front(); 372 auto aT = a.dyn_cast<MemRefType>(); 373 auto bT = b.dyn_cast<MemRefType>(); 374 375 auto uaT = a.dyn_cast<UnrankedMemRefType>(); 376 auto ubT = b.dyn_cast<UnrankedMemRefType>(); 377 378 if (aT && bT) { 379 if (aT.getElementType() != bT.getElementType()) 380 return false; 381 if (aT.getLayout() != bT.getLayout()) { 382 int64_t aOffset, bOffset; 383 SmallVector<int64_t, 4> aStrides, bStrides; 384 if (failed(getStridesAndOffset(aT, aStrides, aOffset)) || 385 failed(getStridesAndOffset(bT, bStrides, bOffset)) || 386 aStrides.size() != bStrides.size()) 387 return false; 388 389 // Strides along a dimension/offset are compatible if the value in the 390 // source memref is static and the value in the target memref is the 391 // same. They are also compatible if either one is dynamic (see 392 // description of MemRefCastOp for details). 393 auto checkCompatible = [](int64_t a, int64_t b) { 394 return (a == MemRefType::getDynamicStrideOrOffset() || 395 b == MemRefType::getDynamicStrideOrOffset() || a == b); 396 }; 397 if (!checkCompatible(aOffset, bOffset)) 398 return false; 399 for (const auto &aStride : enumerate(aStrides)) 400 if (!checkCompatible(aStride.value(), bStrides[aStride.index()])) 401 return false; 402 } 403 if (aT.getMemorySpace() != bT.getMemorySpace()) 404 return false; 405 406 // They must have the same rank, and any specified dimensions must match. 407 if (aT.getRank() != bT.getRank()) 408 return false; 409 410 for (unsigned i = 0, e = aT.getRank(); i != e; ++i) { 411 int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i); 412 if (aDim != -1 && bDim != -1 && aDim != bDim) 413 return false; 414 } 415 return true; 416 } else { 417 if (!aT && !uaT) 418 return false; 419 if (!bT && !ubT) 420 return false; 421 // Unranked to unranked casting is unsupported 422 if (uaT && ubT) 423 return false; 424 425 auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType(); 426 auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType(); 427 if (aEltType != bEltType) 428 return false; 429 430 auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace(); 431 auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace(); 432 return aMemSpace == bMemSpace; 433 } 434 435 return false; 436 } 437 438 OpFoldResult CastOp::fold(ArrayRef<Attribute> operands) { 439 return succeeded(foldMemRefCast(*this)) ? getResult() : Value(); 440 } 441 442 //===----------------------------------------------------------------------===// 443 // CopyOp 444 //===----------------------------------------------------------------------===// 445 446 namespace { 447 /// If the source/target of a CopyOp is a CastOp that does not modify the shape 448 /// and element type, the cast can be skipped. Such CastOps only cast the layout 449 /// of the type. 450 struct FoldCopyOfCast : public OpRewritePattern<CopyOp> { 451 using OpRewritePattern<CopyOp>::OpRewritePattern; 452 453 LogicalResult matchAndRewrite(CopyOp copyOp, 454 PatternRewriter &rewriter) const override { 455 bool modified = false; 456 457 // Check source. 458 if (auto castOp = copyOp.source().getDefiningOp<CastOp>()) { 459 auto fromType = castOp.source().getType().dyn_cast<MemRefType>(); 460 auto toType = castOp.source().getType().dyn_cast<MemRefType>(); 461 462 if (fromType && toType) { 463 if (fromType.getShape() == toType.getShape() && 464 fromType.getElementType() == toType.getElementType()) { 465 rewriter.updateRootInPlace( 466 copyOp, [&] { copyOp.sourceMutable().assign(castOp.source()); }); 467 modified = true; 468 } 469 } 470 } 471 472 // Check target. 473 if (auto castOp = copyOp.target().getDefiningOp<CastOp>()) { 474 auto fromType = castOp.source().getType().dyn_cast<MemRefType>(); 475 auto toType = castOp.source().getType().dyn_cast<MemRefType>(); 476 477 if (fromType && toType) { 478 if (fromType.getShape() == toType.getShape() && 479 fromType.getElementType() == toType.getElementType()) { 480 rewriter.updateRootInPlace( 481 copyOp, [&] { copyOp.targetMutable().assign(castOp.source()); }); 482 modified = true; 483 } 484 } 485 } 486 487 return success(modified); 488 } 489 }; 490 491 /// Fold memref.copy(%x, %x). 492 struct FoldSelfCopy : public OpRewritePattern<CopyOp> { 493 using OpRewritePattern<CopyOp>::OpRewritePattern; 494 495 LogicalResult matchAndRewrite(CopyOp copyOp, 496 PatternRewriter &rewriter) const override { 497 if (copyOp.source() != copyOp.target()) 498 return failure(); 499 500 rewriter.eraseOp(copyOp); 501 return success(); 502 } 503 }; 504 } // namespace 505 506 void CopyOp::getCanonicalizationPatterns(RewritePatternSet &results, 507 MLIRContext *context) { 508 results.add<FoldCopyOfCast, FoldSelfCopy>(context); 509 } 510 511 //===----------------------------------------------------------------------===// 512 // DeallocOp 513 //===----------------------------------------------------------------------===// 514 515 LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands, 516 SmallVectorImpl<OpFoldResult> &results) { 517 /// dealloc(memrefcast) -> dealloc 518 return foldMemRefCast(*this); 519 } 520 521 //===----------------------------------------------------------------------===// 522 // DimOp 523 //===----------------------------------------------------------------------===// 524 525 void DimOp::build(OpBuilder &builder, OperationState &result, Value source, 526 int64_t index) { 527 auto loc = result.location; 528 Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index); 529 build(builder, result, source, indexValue); 530 } 531 532 void DimOp::build(OpBuilder &builder, OperationState &result, Value source, 533 Value index) { 534 auto indexTy = builder.getIndexType(); 535 build(builder, result, indexTy, source, index); 536 } 537 538 Optional<int64_t> DimOp::getConstantIndex() { 539 if (auto constantOp = index().getDefiningOp<arith::ConstantOp>()) 540 return constantOp.getValue().cast<IntegerAttr>().getInt(); 541 return {}; 542 } 543 544 static LogicalResult verify(DimOp op) { 545 // Assume unknown index to be in range. 546 Optional<int64_t> index = op.getConstantIndex(); 547 if (!index.hasValue()) 548 return success(); 549 550 // Check that constant index is not knowingly out of range. 551 auto type = op.source().getType(); 552 if (auto memrefType = type.dyn_cast<MemRefType>()) { 553 if (index.getValue() >= memrefType.getRank()) 554 return op.emitOpError("index is out of range"); 555 } else if (type.isa<UnrankedMemRefType>()) { 556 // Assume index to be in range. 557 } else { 558 llvm_unreachable("expected operand with memref type"); 559 } 560 return success(); 561 } 562 563 /// Return a map with key being elements in `vals` and data being number of 564 /// occurences of it. Use std::map, since the `vals` here are strides and the 565 /// dynamic stride value is the same as the tombstone value for 566 /// `DenseMap<int64_t>`. 567 static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) { 568 std::map<int64_t, unsigned> numOccurences; 569 for (auto val : vals) 570 numOccurences[val]++; 571 return numOccurences; 572 } 573 574 /// Given the `originalType` and a `candidateReducedType` whose shape is assumed 575 /// to be a subset of `originalType` with some `1` entries erased, return the 576 /// set of indices that specifies which of the entries of `originalShape` are 577 /// dropped to obtain `reducedShape`. 578 /// This accounts for cases where there are multiple unit-dims, but only a 579 /// subset of those are dropped. For MemRefTypes these can be disambiguated 580 /// using the strides. If a dimension is dropped the stride must be dropped too. 581 static llvm::Optional<llvm::SmallDenseSet<unsigned>> 582 computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType, 583 ArrayRef<OpFoldResult> sizes) { 584 llvm::SmallDenseSet<unsigned> unusedDims; 585 if (originalType.getRank() == reducedType.getRank()) 586 return unusedDims; 587 588 for (const auto &dim : llvm::enumerate(sizes)) 589 if (auto attr = dim.value().dyn_cast<Attribute>()) 590 if (attr.cast<IntegerAttr>().getInt() == 1) 591 unusedDims.insert(dim.index()); 592 593 SmallVector<int64_t> originalStrides, candidateStrides; 594 int64_t originalOffset, candidateOffset; 595 if (failed( 596 getStridesAndOffset(originalType, originalStrides, originalOffset)) || 597 failed( 598 getStridesAndOffset(reducedType, candidateStrides, candidateOffset))) 599 return llvm::None; 600 601 // For memrefs, a dimension is truly dropped if its corresponding stride is 602 // also dropped. This is particularly important when more than one of the dims 603 // is 1. Track the number of occurences of the strides in the original type 604 // and the candidate type. For each unused dim that stride should not be 605 // present in the candidate type. Note that there could be multiple dimensions 606 // that have the same size. We dont need to exactly figure out which dim 607 // corresponds to which stride, we just need to verify that the number of 608 // reptitions of a stride in the original + number of unused dims with that 609 // stride == number of repititions of a stride in the candidate. 610 std::map<int64_t, unsigned> currUnaccountedStrides = 611 getNumOccurences(originalStrides); 612 std::map<int64_t, unsigned> candidateStridesNumOccurences = 613 getNumOccurences(candidateStrides); 614 llvm::SmallDenseSet<unsigned> prunedUnusedDims; 615 for (unsigned dim : unusedDims) { 616 int64_t originalStride = originalStrides[dim]; 617 if (currUnaccountedStrides[originalStride] > 618 candidateStridesNumOccurences[originalStride]) { 619 // This dim can be treated as dropped. 620 currUnaccountedStrides[originalStride]--; 621 continue; 622 } 623 if (currUnaccountedStrides[originalStride] == 624 candidateStridesNumOccurences[originalStride]) { 625 // The stride for this is not dropped. Keep as is. 626 prunedUnusedDims.insert(dim); 627 continue; 628 } 629 if (currUnaccountedStrides[originalStride] < 630 candidateStridesNumOccurences[originalStride]) { 631 // This should never happen. Cant have a stride in the reduced rank type 632 // that wasnt in the original one. 633 return llvm::None; 634 } 635 } 636 637 for (auto prunedDim : prunedUnusedDims) 638 unusedDims.erase(prunedDim); 639 if (unusedDims.size() + reducedType.getRank() != originalType.getRank()) 640 return llvm::None; 641 return unusedDims; 642 } 643 644 llvm::SmallDenseSet<unsigned> SubViewOp::getDroppedDims() { 645 MemRefType sourceType = getSourceType(); 646 MemRefType resultType = getType(); 647 llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims = 648 computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes()); 649 assert(unusedDims && "unable to find unused dims of subview"); 650 return *unusedDims; 651 } 652 653 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) { 654 // All forms of folding require a known index. 655 auto index = operands[1].dyn_cast_or_null<IntegerAttr>(); 656 if (!index) 657 return {}; 658 659 // Folding for unranked types (UnrankedMemRefType) is not supported. 660 auto memrefType = source().getType().dyn_cast<MemRefType>(); 661 if (!memrefType) 662 return {}; 663 664 // Fold if the shape extent along the given index is known. 665 if (!memrefType.isDynamicDim(index.getInt())) { 666 Builder builder(getContext()); 667 return builder.getIndexAttr(memrefType.getShape()[index.getInt()]); 668 } 669 670 // The size at the given index is now known to be a dynamic size. 671 unsigned unsignedIndex = index.getValue().getZExtValue(); 672 673 // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`. 674 Operation *definingOp = source().getDefiningOp(); 675 676 if (auto alloc = dyn_cast_or_null<AllocOp>(definingOp)) 677 return *(alloc.getDynamicSizes().begin() + 678 memrefType.getDynamicDimIndex(unsignedIndex)); 679 680 if (auto alloca = dyn_cast_or_null<AllocaOp>(definingOp)) 681 return *(alloca.getDynamicSizes().begin() + 682 memrefType.getDynamicDimIndex(unsignedIndex)); 683 684 if (auto view = dyn_cast_or_null<ViewOp>(definingOp)) 685 return *(view.getDynamicSizes().begin() + 686 memrefType.getDynamicDimIndex(unsignedIndex)); 687 688 if (auto subview = dyn_cast_or_null<SubViewOp>(definingOp)) { 689 llvm::SmallDenseSet<unsigned> unusedDims = subview.getDroppedDims(); 690 unsigned resultIndex = 0; 691 unsigned sourceRank = subview.getSourceType().getRank(); 692 unsigned sourceIndex = 0; 693 for (auto i : llvm::seq<unsigned>(0, sourceRank)) { 694 if (unusedDims.count(i)) 695 continue; 696 if (resultIndex == unsignedIndex) { 697 sourceIndex = i; 698 break; 699 } 700 resultIndex++; 701 } 702 assert(subview.isDynamicSize(sourceIndex) && 703 "expected dynamic subview size"); 704 return subview.getDynamicSize(sourceIndex); 705 } 706 707 if (auto sizeInterface = 708 dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) { 709 assert(sizeInterface.isDynamicSize(unsignedIndex) && 710 "Expected dynamic subview size"); 711 return sizeInterface.getDynamicSize(unsignedIndex); 712 } 713 714 // dim(memrefcast) -> dim 715 if (succeeded(foldMemRefCast(*this))) 716 return getResult(); 717 718 return {}; 719 } 720 721 namespace { 722 /// Fold dim of a memref reshape operation to a load into the reshape's shape 723 /// operand. 724 struct DimOfMemRefReshape : public OpRewritePattern<DimOp> { 725 using OpRewritePattern<DimOp>::OpRewritePattern; 726 727 LogicalResult matchAndRewrite(DimOp dim, 728 PatternRewriter &rewriter) const override { 729 auto reshape = dim.source().getDefiningOp<ReshapeOp>(); 730 731 if (!reshape) 732 return failure(); 733 734 // Place the load directly after the reshape to ensure that the shape memref 735 // was not mutated. 736 rewriter.setInsertionPointAfter(reshape); 737 Location loc = dim.getLoc(); 738 Value load = rewriter.create<LoadOp>(loc, reshape.shape(), dim.index()); 739 if (load.getType() != dim.getType()) 740 load = rewriter.create<arith::IndexCastOp>(loc, dim.getType(), load); 741 rewriter.replaceOp(dim, load); 742 return success(); 743 } 744 }; 745 746 } // namespace 747 748 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results, 749 MLIRContext *context) { 750 results.add<DimOfMemRefReshape>(context); 751 } 752 753 // --------------------------------------------------------------------------- 754 // DmaStartOp 755 // --------------------------------------------------------------------------- 756 757 void DmaStartOp::build(OpBuilder &builder, OperationState &result, 758 Value srcMemRef, ValueRange srcIndices, Value destMemRef, 759 ValueRange destIndices, Value numElements, 760 Value tagMemRef, ValueRange tagIndices, Value stride, 761 Value elementsPerStride) { 762 result.addOperands(srcMemRef); 763 result.addOperands(srcIndices); 764 result.addOperands(destMemRef); 765 result.addOperands(destIndices); 766 result.addOperands({numElements, tagMemRef}); 767 result.addOperands(tagIndices); 768 if (stride) 769 result.addOperands({stride, elementsPerStride}); 770 } 771 772 static void print(OpAsmPrinter &p, DmaStartOp op) { 773 p << " " << op.getSrcMemRef() << '[' << op.getSrcIndices() << "], " 774 << op.getDstMemRef() << '[' << op.getDstIndices() << "], " 775 << op.getNumElements() << ", " << op.getTagMemRef() << '[' 776 << op.getTagIndices() << ']'; 777 if (op.isStrided()) 778 p << ", " << op.getStride() << ", " << op.getNumElementsPerStride(); 779 780 p.printOptionalAttrDict(op->getAttrs()); 781 p << " : " << op.getSrcMemRef().getType() << ", " 782 << op.getDstMemRef().getType() << ", " << op.getTagMemRef().getType(); 783 } 784 785 // Parse DmaStartOp. 786 // Ex: 787 // %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size, 788 // %tag[%index], %stride, %num_elt_per_stride : 789 // : memref<3076 x f32, 0>, 790 // memref<1024 x f32, 2>, 791 // memref<1 x i32> 792 // 793 static ParseResult parseDmaStartOp(OpAsmParser &parser, 794 OperationState &result) { 795 OpAsmParser::OperandType srcMemRefInfo; 796 SmallVector<OpAsmParser::OperandType, 4> srcIndexInfos; 797 OpAsmParser::OperandType dstMemRefInfo; 798 SmallVector<OpAsmParser::OperandType, 4> dstIndexInfos; 799 OpAsmParser::OperandType numElementsInfo; 800 OpAsmParser::OperandType tagMemrefInfo; 801 SmallVector<OpAsmParser::OperandType, 4> tagIndexInfos; 802 SmallVector<OpAsmParser::OperandType, 2> strideInfo; 803 804 SmallVector<Type, 3> types; 805 auto indexType = parser.getBuilder().getIndexType(); 806 807 // Parse and resolve the following list of operands: 808 // *) source memref followed by its indices (in square brackets). 809 // *) destination memref followed by its indices (in square brackets). 810 // *) dma size in KiB. 811 if (parser.parseOperand(srcMemRefInfo) || 812 parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) || 813 parser.parseComma() || parser.parseOperand(dstMemRefInfo) || 814 parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) || 815 parser.parseComma() || parser.parseOperand(numElementsInfo) || 816 parser.parseComma() || parser.parseOperand(tagMemrefInfo) || 817 parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square)) 818 return failure(); 819 820 // Parse optional stride and elements per stride. 821 if (parser.parseTrailingOperandList(strideInfo)) 822 return failure(); 823 824 bool isStrided = strideInfo.size() == 2; 825 if (!strideInfo.empty() && !isStrided) { 826 return parser.emitError(parser.getNameLoc(), 827 "expected two stride related operands"); 828 } 829 830 if (parser.parseColonTypeList(types)) 831 return failure(); 832 if (types.size() != 3) 833 return parser.emitError(parser.getNameLoc(), "fewer/more types expected"); 834 835 if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) || 836 parser.resolveOperands(srcIndexInfos, indexType, result.operands) || 837 parser.resolveOperand(dstMemRefInfo, types[1], result.operands) || 838 parser.resolveOperands(dstIndexInfos, indexType, result.operands) || 839 // size should be an index. 840 parser.resolveOperand(numElementsInfo, indexType, result.operands) || 841 parser.resolveOperand(tagMemrefInfo, types[2], result.operands) || 842 // tag indices should be index. 843 parser.resolveOperands(tagIndexInfos, indexType, result.operands)) 844 return failure(); 845 846 if (isStrided) { 847 if (parser.resolveOperands(strideInfo, indexType, result.operands)) 848 return failure(); 849 } 850 851 return success(); 852 } 853 854 static LogicalResult verify(DmaStartOp op) { 855 unsigned numOperands = op.getNumOperands(); 856 857 // Mandatory non-variadic operands are: src memref, dst memref, tag memref and 858 // the number of elements. 859 if (numOperands < 4) 860 return op.emitOpError("expected at least 4 operands"); 861 862 // Check types of operands. The order of these calls is important: the later 863 // calls rely on some type properties to compute the operand position. 864 // 1. Source memref. 865 if (!op.getSrcMemRef().getType().isa<MemRefType>()) 866 return op.emitOpError("expected source to be of memref type"); 867 if (numOperands < op.getSrcMemRefRank() + 4) 868 return op.emitOpError() 869 << "expected at least " << op.getSrcMemRefRank() + 4 << " operands"; 870 if (!op.getSrcIndices().empty() && 871 !llvm::all_of(op.getSrcIndices().getTypes(), 872 [](Type t) { return t.isIndex(); })) 873 return op.emitOpError("expected source indices to be of index type"); 874 875 // 2. Destination memref. 876 if (!op.getDstMemRef().getType().isa<MemRefType>()) 877 return op.emitOpError("expected destination to be of memref type"); 878 unsigned numExpectedOperands = 879 op.getSrcMemRefRank() + op.getDstMemRefRank() + 4; 880 if (numOperands < numExpectedOperands) 881 return op.emitOpError() 882 << "expected at least " << numExpectedOperands << " operands"; 883 if (!op.getDstIndices().empty() && 884 !llvm::all_of(op.getDstIndices().getTypes(), 885 [](Type t) { return t.isIndex(); })) 886 return op.emitOpError("expected destination indices to be of index type"); 887 888 // 3. Number of elements. 889 if (!op.getNumElements().getType().isIndex()) 890 return op.emitOpError("expected num elements to be of index type"); 891 892 // 4. Tag memref. 893 if (!op.getTagMemRef().getType().isa<MemRefType>()) 894 return op.emitOpError("expected tag to be of memref type"); 895 numExpectedOperands += op.getTagMemRefRank(); 896 if (numOperands < numExpectedOperands) 897 return op.emitOpError() 898 << "expected at least " << numExpectedOperands << " operands"; 899 if (!op.getTagIndices().empty() && 900 !llvm::all_of(op.getTagIndices().getTypes(), 901 [](Type t) { return t.isIndex(); })) 902 return op.emitOpError("expected tag indices to be of index type"); 903 904 // Optional stride-related operands must be either both present or both 905 // absent. 906 if (numOperands != numExpectedOperands && 907 numOperands != numExpectedOperands + 2) 908 return op.emitOpError("incorrect number of operands"); 909 910 // 5. Strides. 911 if (op.isStrided()) { 912 if (!op.getStride().getType().isIndex() || 913 !op.getNumElementsPerStride().getType().isIndex()) 914 return op.emitOpError( 915 "expected stride and num elements per stride to be of type index"); 916 } 917 918 return success(); 919 } 920 921 LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands, 922 SmallVectorImpl<OpFoldResult> &results) { 923 /// dma_start(memrefcast) -> dma_start 924 return foldMemRefCast(*this); 925 } 926 927 // --------------------------------------------------------------------------- 928 // DmaWaitOp 929 // --------------------------------------------------------------------------- 930 931 LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands, 932 SmallVectorImpl<OpFoldResult> &results) { 933 /// dma_wait(memrefcast) -> dma_wait 934 return foldMemRefCast(*this); 935 } 936 937 static LogicalResult verify(DmaWaitOp op) { 938 // Check that the number of tag indices matches the tagMemRef rank. 939 unsigned numTagIndices = op.tagIndices().size(); 940 unsigned tagMemRefRank = op.getTagMemRefRank(); 941 if (numTagIndices != tagMemRefRank) 942 return op.emitOpError() << "expected tagIndices to have the same number of " 943 "elements as the tagMemRef rank, expected " 944 << tagMemRefRank << ", but got " << numTagIndices; 945 return success(); 946 } 947 948 //===----------------------------------------------------------------------===// 949 // GlobalOp 950 //===----------------------------------------------------------------------===// 951 952 static void printGlobalMemrefOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op, 953 TypeAttr type, 954 Attribute initialValue) { 955 p << type; 956 if (!op.isExternal()) { 957 p << " = "; 958 if (op.isUninitialized()) 959 p << "uninitialized"; 960 else 961 p.printAttributeWithoutType(initialValue); 962 } 963 } 964 965 static ParseResult 966 parseGlobalMemrefOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr, 967 Attribute &initialValue) { 968 Type type; 969 if (parser.parseType(type)) 970 return failure(); 971 972 auto memrefType = type.dyn_cast<MemRefType>(); 973 if (!memrefType || !memrefType.hasStaticShape()) 974 return parser.emitError(parser.getNameLoc()) 975 << "type should be static shaped memref, but got " << type; 976 typeAttr = TypeAttr::get(type); 977 978 if (parser.parseOptionalEqual()) 979 return success(); 980 981 if (succeeded(parser.parseOptionalKeyword("uninitialized"))) { 982 initialValue = UnitAttr::get(parser.getContext()); 983 return success(); 984 } 985 986 Type tensorType = getTensorTypeFromMemRefType(memrefType); 987 if (parser.parseAttribute(initialValue, tensorType)) 988 return failure(); 989 if (!initialValue.isa<ElementsAttr>()) 990 return parser.emitError(parser.getNameLoc()) 991 << "initial value should be a unit or elements attribute"; 992 return success(); 993 } 994 995 static LogicalResult verify(GlobalOp op) { 996 auto memrefType = op.type().dyn_cast<MemRefType>(); 997 if (!memrefType || !memrefType.hasStaticShape()) 998 return op.emitOpError("type should be static shaped memref, but got ") 999 << op.type(); 1000 1001 // Verify that the initial value, if present, is either a unit attribute or 1002 // an elements attribute. 1003 if (op.initial_value().hasValue()) { 1004 Attribute initValue = op.initial_value().getValue(); 1005 if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>()) 1006 return op.emitOpError("initial value should be a unit or elements " 1007 "attribute, but got ") 1008 << initValue; 1009 1010 // Check that the type of the initial value is compatible with the type of 1011 // the global variable. 1012 if (initValue.isa<ElementsAttr>()) { 1013 Type initType = initValue.getType(); 1014 Type tensorType = getTensorTypeFromMemRefType(memrefType); 1015 if (initType != tensorType) 1016 return op.emitOpError("initial value expected to be of type ") 1017 << tensorType << ", but was of type " << initType; 1018 } 1019 } 1020 1021 if (Optional<uint64_t> alignAttr = op.alignment()) { 1022 uint64_t alignment = alignAttr.getValue(); 1023 1024 if (!llvm::isPowerOf2_64(alignment)) 1025 return op->emitError() << "alignment attribute value " << alignment 1026 << " is not a power of 2"; 1027 } 1028 1029 // TODO: verify visibility for declarations. 1030 return success(); 1031 } 1032 1033 //===----------------------------------------------------------------------===// 1034 // GetGlobalOp 1035 //===----------------------------------------------------------------------===// 1036 1037 LogicalResult 1038 GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { 1039 // Verify that the result type is same as the type of the referenced 1040 // memref.global op. 1041 auto global = 1042 symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, nameAttr()); 1043 if (!global) 1044 return emitOpError("'") 1045 << name() << "' does not reference a valid global memref"; 1046 1047 Type resultType = result().getType(); 1048 if (global.type() != resultType) 1049 return emitOpError("result type ") 1050 << resultType << " does not match type " << global.type() 1051 << " of the global memref @" << name(); 1052 return success(); 1053 } 1054 1055 //===----------------------------------------------------------------------===// 1056 // LoadOp 1057 //===----------------------------------------------------------------------===// 1058 1059 static LogicalResult verify(LoadOp op) { 1060 if (op.getNumOperands() != 1 + op.getMemRefType().getRank()) 1061 return op.emitOpError("incorrect number of indices for load"); 1062 return success(); 1063 } 1064 1065 OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) { 1066 /// load(memrefcast) -> load 1067 if (succeeded(foldMemRefCast(*this))) 1068 return getResult(); 1069 return OpFoldResult(); 1070 } 1071 1072 //===----------------------------------------------------------------------===// 1073 // PrefetchOp 1074 //===----------------------------------------------------------------------===// 1075 1076 static void print(OpAsmPrinter &p, PrefetchOp op) { 1077 p << " " << op.memref() << '['; 1078 p.printOperands(op.indices()); 1079 p << ']' << ", " << (op.isWrite() ? "write" : "read"); 1080 p << ", locality<" << op.localityHint(); 1081 p << ">, " << (op.isDataCache() ? "data" : "instr"); 1082 p.printOptionalAttrDict( 1083 op->getAttrs(), 1084 /*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"}); 1085 p << " : " << op.getMemRefType(); 1086 } 1087 1088 static ParseResult parsePrefetchOp(OpAsmParser &parser, 1089 OperationState &result) { 1090 OpAsmParser::OperandType memrefInfo; 1091 SmallVector<OpAsmParser::OperandType, 4> indexInfo; 1092 IntegerAttr localityHint; 1093 MemRefType type; 1094 StringRef readOrWrite, cacheType; 1095 1096 auto indexTy = parser.getBuilder().getIndexType(); 1097 auto i32Type = parser.getBuilder().getIntegerType(32); 1098 if (parser.parseOperand(memrefInfo) || 1099 parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) || 1100 parser.parseComma() || parser.parseKeyword(&readOrWrite) || 1101 parser.parseComma() || parser.parseKeyword("locality") || 1102 parser.parseLess() || 1103 parser.parseAttribute(localityHint, i32Type, "localityHint", 1104 result.attributes) || 1105 parser.parseGreater() || parser.parseComma() || 1106 parser.parseKeyword(&cacheType) || parser.parseColonType(type) || 1107 parser.resolveOperand(memrefInfo, type, result.operands) || 1108 parser.resolveOperands(indexInfo, indexTy, result.operands)) 1109 return failure(); 1110 1111 if (!readOrWrite.equals("read") && !readOrWrite.equals("write")) 1112 return parser.emitError(parser.getNameLoc(), 1113 "rw specifier has to be 'read' or 'write'"); 1114 result.addAttribute( 1115 PrefetchOp::getIsWriteAttrName(), 1116 parser.getBuilder().getBoolAttr(readOrWrite.equals("write"))); 1117 1118 if (!cacheType.equals("data") && !cacheType.equals("instr")) 1119 return parser.emitError(parser.getNameLoc(), 1120 "cache type has to be 'data' or 'instr'"); 1121 1122 result.addAttribute( 1123 PrefetchOp::getIsDataCacheAttrName(), 1124 parser.getBuilder().getBoolAttr(cacheType.equals("data"))); 1125 1126 return success(); 1127 } 1128 1129 static LogicalResult verify(PrefetchOp op) { 1130 if (op.getNumOperands() != 1 + op.getMemRefType().getRank()) 1131 return op.emitOpError("too few indices"); 1132 1133 return success(); 1134 } 1135 1136 LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands, 1137 SmallVectorImpl<OpFoldResult> &results) { 1138 // prefetch(memrefcast) -> prefetch 1139 return foldMemRefCast(*this); 1140 } 1141 1142 //===----------------------------------------------------------------------===// 1143 // RankOp 1144 //===----------------------------------------------------------------------===// 1145 1146 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) { 1147 // Constant fold rank when the rank of the operand is known. 1148 auto type = getOperand().getType(); 1149 auto shapedType = type.dyn_cast<ShapedType>(); 1150 if (shapedType && shapedType.hasRank()) 1151 return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank()); 1152 return IntegerAttr(); 1153 } 1154 1155 //===----------------------------------------------------------------------===// 1156 // ReinterpretCastOp 1157 //===----------------------------------------------------------------------===// 1158 1159 /// Build a ReinterpretCastOp with all dynamic entries: `staticOffsets`, 1160 /// `staticSizes` and `staticStrides` are automatically filled with 1161 /// source-memref-rank sentinel values that encode dynamic entries. 1162 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result, 1163 MemRefType resultType, Value source, 1164 OpFoldResult offset, ArrayRef<OpFoldResult> sizes, 1165 ArrayRef<OpFoldResult> strides, 1166 ArrayRef<NamedAttribute> attrs) { 1167 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1168 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1169 dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets, 1170 ShapedType::kDynamicStrideOrOffset); 1171 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, 1172 ShapedType::kDynamicSize); 1173 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, 1174 ShapedType::kDynamicStrideOrOffset); 1175 build(b, result, resultType, source, dynamicOffsets, dynamicSizes, 1176 dynamicStrides, b.getI64ArrayAttr(staticOffsets), 1177 b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); 1178 result.addAttributes(attrs); 1179 } 1180 1181 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result, 1182 MemRefType resultType, Value source, 1183 int64_t offset, ArrayRef<int64_t> sizes, 1184 ArrayRef<int64_t> strides, 1185 ArrayRef<NamedAttribute> attrs) { 1186 SmallVector<OpFoldResult> sizeValues = 1187 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { 1188 return b.getI64IntegerAttr(v); 1189 })); 1190 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1191 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { 1192 return b.getI64IntegerAttr(v); 1193 })); 1194 build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues, 1195 strideValues, attrs); 1196 } 1197 1198 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result, 1199 MemRefType resultType, Value source, Value offset, 1200 ValueRange sizes, ValueRange strides, 1201 ArrayRef<NamedAttribute> attrs) { 1202 SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>( 1203 llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); 1204 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1205 llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); 1206 build(b, result, resultType, source, offset, sizeValues, strideValues, attrs); 1207 } 1208 1209 // TODO: ponder whether we want to allow missing trailing sizes/strides that are 1210 // completed automatically, like we have for subview and extract_slice. 1211 static LogicalResult verify(ReinterpretCastOp op) { 1212 // The source and result memrefs should be in the same memory space. 1213 auto srcType = op.source().getType().cast<BaseMemRefType>(); 1214 auto resultType = op.getType().cast<MemRefType>(); 1215 if (srcType.getMemorySpace() != resultType.getMemorySpace()) 1216 return op.emitError("different memory spaces specified for source type ") 1217 << srcType << " and result memref type " << resultType; 1218 if (srcType.getElementType() != resultType.getElementType()) 1219 return op.emitError("different element types specified for source type ") 1220 << srcType << " and result memref type " << resultType; 1221 1222 // Match sizes in result memref type and in static_sizes attribute. 1223 for (auto &en : 1224 llvm::enumerate(llvm::zip(resultType.getShape(), 1225 extractFromI64ArrayAttr(op.static_sizes())))) { 1226 int64_t resultSize = std::get<0>(en.value()); 1227 int64_t expectedSize = std::get<1>(en.value()); 1228 if (!ShapedType::isDynamic(resultSize) && 1229 !ShapedType::isDynamic(expectedSize) && resultSize != expectedSize) 1230 return op.emitError("expected result type with size = ") 1231 << expectedSize << " instead of " << resultSize 1232 << " in dim = " << en.index(); 1233 } 1234 1235 // Match offset and strides in static_offset and static_strides attributes. If 1236 // result memref type has no affine map specified, this will assume an 1237 // identity layout. 1238 int64_t resultOffset; 1239 SmallVector<int64_t, 4> resultStrides; 1240 if (failed(getStridesAndOffset(resultType, resultStrides, resultOffset))) 1241 return op.emitError( 1242 "expected result type to have strided layout but found ") 1243 << resultType; 1244 1245 // Match offset in result memref type and in static_offsets attribute. 1246 int64_t expectedOffset = extractFromI64ArrayAttr(op.static_offsets()).front(); 1247 if (!ShapedType::isDynamicStrideOrOffset(resultOffset) && 1248 !ShapedType::isDynamicStrideOrOffset(expectedOffset) && 1249 resultOffset != expectedOffset) 1250 return op.emitError("expected result type with offset = ") 1251 << resultOffset << " instead of " << expectedOffset; 1252 1253 // Match strides in result memref type and in static_strides attribute. 1254 for (auto &en : llvm::enumerate(llvm::zip( 1255 resultStrides, extractFromI64ArrayAttr(op.static_strides())))) { 1256 int64_t resultStride = std::get<0>(en.value()); 1257 int64_t expectedStride = std::get<1>(en.value()); 1258 if (!ShapedType::isDynamicStrideOrOffset(resultStride) && 1259 !ShapedType::isDynamicStrideOrOffset(expectedStride) && 1260 resultStride != expectedStride) 1261 return op.emitError("expected result type with stride = ") 1262 << expectedStride << " instead of " << resultStride 1263 << " in dim = " << en.index(); 1264 } 1265 1266 return success(); 1267 } 1268 1269 //===----------------------------------------------------------------------===// 1270 // Reassociative reshape ops 1271 //===----------------------------------------------------------------------===// 1272 1273 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() { 1274 return getSymbolLessAffineMaps(getReassociationExprs()); 1275 } 1276 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() { 1277 return convertReassociationIndicesToExprs(getContext(), 1278 getReassociationIndices()); 1279 } 1280 1281 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() { 1282 return getSymbolLessAffineMaps(getReassociationExprs()); 1283 } 1284 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() { 1285 return convertReassociationIndicesToExprs(getContext(), 1286 getReassociationIndices()); 1287 } 1288 1289 static void print(OpAsmPrinter &p, ExpandShapeOp op) { 1290 ::mlir::printReshapeOp<ExpandShapeOp>(p, op); 1291 } 1292 1293 static void print(OpAsmPrinter &p, CollapseShapeOp op) { 1294 ::mlir::printReshapeOp<CollapseShapeOp>(p, op); 1295 } 1296 1297 /// Detect whether memref dims [dim, dim + extent) can be reshaped without 1298 /// copies. 1299 static bool isReshapableDimBand(unsigned dim, unsigned extent, 1300 ArrayRef<int64_t> sizes, 1301 ArrayRef<AffineExpr> strides) { 1302 // Bands of extent one can be reshaped, as they are not reshaped at all. 1303 if (extent == 1) 1304 return true; 1305 // Otherwise, the size of the first dimension needs to be known. 1306 if (ShapedType::isDynamic(sizes[dim])) 1307 return false; 1308 assert(sizes.size() == strides.size() && "mismatched ranks"); 1309 // off by 1 indexing to avoid out of bounds 1310 // V 1311 for (auto idx = dim, e = dim + extent; idx + 1 < e; ++idx) { 1312 // Only bands of static shapes are reshapable. This is due to the fact that 1313 // there is no relation between dynamic sizes and dynamic strides: we do not 1314 // have enough information to know whether a "-1" size corresponds to the 1315 // proper symbol in the AffineExpr of a stride. 1316 if (ShapedType::isDynamic(sizes[idx + 1])) 1317 return false; 1318 // TODO: Refine this by passing the proper nDims and nSymbols so we can 1319 // simplify on the fly and catch more reshapable cases. 1320 if (strides[idx] != strides[idx + 1] * sizes[idx + 1]) 1321 return false; 1322 } 1323 return true; 1324 } 1325 1326 /// Compute the MemRefType obtained by applying the `reassociation` (which is 1327 /// expected to be valid) to `type`. 1328 /// If `type` is Contiguous MemRefType, this always produce a contiguous 1329 /// MemRefType. 1330 static MemRefType 1331 computeReshapeCollapsedType(MemRefType type, 1332 ArrayRef<AffineMap> reassociation) { 1333 auto sizes = type.getShape(); 1334 AffineExpr offset; 1335 SmallVector<AffineExpr, 4> strides; 1336 auto status = getStridesAndOffset(type, strides, offset); 1337 (void)status; 1338 assert(succeeded(status) && "expected strided memref"); 1339 1340 SmallVector<int64_t, 4> newSizes; 1341 newSizes.reserve(reassociation.size()); 1342 SmallVector<AffineExpr, 4> newStrides; 1343 newStrides.reserve(reassociation.size()); 1344 1345 // Use the fact that reassociation is valid to simplify the logic: only use 1346 // each map's rank. 1347 assert(isReassociationValid(reassociation) && "invalid reassociation"); 1348 unsigned currentDim = 0; 1349 for (AffineMap m : reassociation) { 1350 unsigned dim = m.getNumResults(); 1351 int64_t size = 1; 1352 AffineExpr stride = strides[currentDim + dim - 1]; 1353 if (!isReshapableDimBand(currentDim, dim, sizes, strides)) { 1354 size = ShapedType::kDynamicSize; 1355 stride = AffineExpr(); 1356 } else { 1357 for (unsigned d = 0; d < dim; ++d) 1358 size *= sizes[currentDim + d]; 1359 } 1360 newSizes.push_back(size); 1361 newStrides.push_back(stride); 1362 currentDim += dim; 1363 } 1364 1365 // Early-exit: if `type` is contiguous, the result must be contiguous. 1366 if (canonicalizeStridedLayout(type).getLayout().isIdentity()) 1367 return MemRefType::Builder(type).setShape(newSizes).setLayout({}); 1368 1369 // Convert back to int64_t because we don't have enough information to create 1370 // new strided layouts from AffineExpr only. This corresponds to a case where 1371 // copies may be necessary. 1372 int64_t intOffset = ShapedType::kDynamicStrideOrOffset; 1373 if (auto o = offset.dyn_cast<AffineConstantExpr>()) 1374 intOffset = o.getValue(); 1375 SmallVector<int64_t, 4> intStrides; 1376 intStrides.reserve(strides.size()); 1377 for (auto stride : newStrides) { 1378 if (auto cst = stride.dyn_cast_or_null<AffineConstantExpr>()) 1379 intStrides.push_back(cst.getValue()); 1380 else 1381 intStrides.push_back(ShapedType::kDynamicStrideOrOffset); 1382 } 1383 auto layout = 1384 makeStridedLinearLayoutMap(intStrides, intOffset, type.getContext()); 1385 return canonicalizeStridedLayout( 1386 MemRefType::Builder(type).setShape(newSizes).setLayout( 1387 AffineMapAttr::get(layout))); 1388 } 1389 1390 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src, 1391 ArrayRef<ReassociationIndices> reassociation, 1392 ArrayRef<NamedAttribute> attrs) { 1393 auto memRefType = src.getType().cast<MemRefType>(); 1394 auto resultType = computeReshapeCollapsedType( 1395 memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs( 1396 b.getContext(), reassociation))); 1397 build(b, result, resultType, src, attrs); 1398 result.addAttribute(getReassociationAttrName(), 1399 getReassociationIndicesAttribute(b, reassociation)); 1400 } 1401 1402 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src, 1403 ArrayRef<ReassociationIndices> reassociation, 1404 ArrayRef<NamedAttribute> attrs) { 1405 auto memRefType = src.getType().cast<MemRefType>(); 1406 auto resultType = computeReshapeCollapsedType( 1407 memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs( 1408 b.getContext(), reassociation))); 1409 build(b, result, resultType, src, attrs); 1410 result.addAttribute(getReassociationAttrName(), 1411 getReassociationIndicesAttribute(b, reassociation)); 1412 } 1413 1414 template <typename ReshapeOp, 1415 bool isExpansion = std::is_same<ReshapeOp, ExpandShapeOp>::value> 1416 static LogicalResult verifyReshapeOp(ReshapeOp op, MemRefType expandedType, 1417 MemRefType collapsedType) { 1418 if (failed( 1419 verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion))) 1420 return failure(); 1421 auto maps = op.getReassociationMaps(); 1422 MemRefType expectedType = computeReshapeCollapsedType(expandedType, maps); 1423 if (collapsedType != expectedType) 1424 return op.emitOpError("expected collapsed type to be ") 1425 << expectedType << ", but got " << collapsedType; 1426 return success(); 1427 } 1428 1429 static LogicalResult verify(ExpandShapeOp op) { 1430 return verifyReshapeOp(op, op.getResultType(), op.getSrcType()); 1431 } 1432 1433 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results, 1434 MLIRContext *context) { 1435 results.add<CollapseReshapeOps<ExpandShapeOp>, 1436 CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>>(context); 1437 } 1438 1439 static LogicalResult verify(CollapseShapeOp op) { 1440 return verifyReshapeOp(op, op.getSrcType(), op.getResultType()); 1441 } 1442 1443 struct CollapseShapeOpMemRefCastFolder 1444 : public OpRewritePattern<CollapseShapeOp> { 1445 public: 1446 using OpRewritePattern<CollapseShapeOp>::OpRewritePattern; 1447 1448 LogicalResult matchAndRewrite(CollapseShapeOp op, 1449 PatternRewriter &rewriter) const override { 1450 auto cast = op.getOperand().getDefiningOp<CastOp>(); 1451 if (!cast) 1452 return failure(); 1453 1454 if (!CastOp::canFoldIntoConsumerOp(cast)) 1455 return failure(); 1456 1457 Type newResultType = computeReshapeCollapsedType( 1458 cast.getOperand().getType().cast<MemRefType>(), 1459 op.getReassociationMaps()); 1460 1461 if (newResultType == op.getResultType()) { 1462 rewriter.updateRootInPlace( 1463 op, [&]() { op.srcMutable().assign(cast.source()); }); 1464 } else { 1465 Value newOp = rewriter.create<CollapseShapeOp>( 1466 op->getLoc(), cast.source(), op.getReassociationIndices()); 1467 rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp); 1468 } 1469 return success(); 1470 } 1471 }; 1472 1473 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results, 1474 MLIRContext *context) { 1475 results.add<CollapseReshapeOps<CollapseShapeOp>, 1476 CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>, 1477 CollapseShapeOpMemRefCastFolder>(context); 1478 } 1479 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) { 1480 return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands); 1481 } 1482 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) { 1483 return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands); 1484 } 1485 1486 //===----------------------------------------------------------------------===// 1487 // ReshapeOp 1488 //===----------------------------------------------------------------------===// 1489 1490 static LogicalResult verify(ReshapeOp op) { 1491 Type operandType = op.source().getType(); 1492 Type resultType = op.result().getType(); 1493 1494 Type operandElementType = operandType.cast<ShapedType>().getElementType(); 1495 Type resultElementType = resultType.cast<ShapedType>().getElementType(); 1496 if (operandElementType != resultElementType) 1497 return op.emitOpError("element types of source and destination memref " 1498 "types should be the same"); 1499 1500 if (auto operandMemRefType = operandType.dyn_cast<MemRefType>()) 1501 if (!operandMemRefType.getLayout().isIdentity()) 1502 return op.emitOpError( 1503 "source memref type should have identity affine map"); 1504 1505 int64_t shapeSize = op.shape().getType().cast<MemRefType>().getDimSize(0); 1506 auto resultMemRefType = resultType.dyn_cast<MemRefType>(); 1507 if (resultMemRefType) { 1508 if (!resultMemRefType.getLayout().isIdentity()) 1509 return op.emitOpError( 1510 "result memref type should have identity affine map"); 1511 if (shapeSize == ShapedType::kDynamicSize) 1512 return op.emitOpError("cannot use shape operand with dynamic length to " 1513 "reshape to statically-ranked memref type"); 1514 if (shapeSize != resultMemRefType.getRank()) 1515 return op.emitOpError( 1516 "length of shape operand differs from the result's memref rank"); 1517 } 1518 return success(); 1519 } 1520 1521 //===----------------------------------------------------------------------===// 1522 // StoreOp 1523 //===----------------------------------------------------------------------===// 1524 1525 static LogicalResult verify(StoreOp op) { 1526 if (op.getNumOperands() != 2 + op.getMemRefType().getRank()) 1527 return op.emitOpError("store index operand count not equal to memref rank"); 1528 1529 return success(); 1530 } 1531 1532 LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands, 1533 SmallVectorImpl<OpFoldResult> &results) { 1534 /// store(memrefcast) -> store 1535 return foldMemRefCast(*this, getValueToStore()); 1536 } 1537 1538 //===----------------------------------------------------------------------===// 1539 // SubViewOp 1540 //===----------------------------------------------------------------------===// 1541 1542 namespace { 1543 /// Helpers to write more idiomatic operations. 1544 namespace saturated_arith { 1545 struct Wrapper { 1546 explicit Wrapper(int64_t v) : v(v) {} 1547 operator int64_t() { return v; } 1548 int64_t v; 1549 }; 1550 Wrapper operator+(Wrapper a, int64_t b) { 1551 if (ShapedType::isDynamicStrideOrOffset(a) || 1552 ShapedType::isDynamicStrideOrOffset(b)) 1553 return Wrapper(ShapedType::kDynamicStrideOrOffset); 1554 return Wrapper(a.v + b); 1555 } 1556 Wrapper operator*(Wrapper a, int64_t b) { 1557 if (ShapedType::isDynamicStrideOrOffset(a) || 1558 ShapedType::isDynamicStrideOrOffset(b)) 1559 return Wrapper(ShapedType::kDynamicStrideOrOffset); 1560 return Wrapper(a.v * b); 1561 } 1562 } // namespace saturated_arith 1563 } // namespace 1564 1565 /// A subview result type can be fully inferred from the source type and the 1566 /// static representation of offsets, sizes and strides. Special sentinels 1567 /// encode the dynamic case. 1568 Type SubViewOp::inferResultType(MemRefType sourceMemRefType, 1569 ArrayRef<int64_t> staticOffsets, 1570 ArrayRef<int64_t> staticSizes, 1571 ArrayRef<int64_t> staticStrides) { 1572 unsigned rank = sourceMemRefType.getRank(); 1573 (void)rank; 1574 assert(staticOffsets.size() == rank && "staticOffsets length mismatch"); 1575 assert(staticSizes.size() == rank && "staticSizes length mismatch"); 1576 assert(staticStrides.size() == rank && "staticStrides length mismatch"); 1577 1578 // Extract source offset and strides. 1579 int64_t sourceOffset; 1580 SmallVector<int64_t, 4> sourceStrides; 1581 auto res = getStridesAndOffset(sourceMemRefType, sourceStrides, sourceOffset); 1582 assert(succeeded(res) && "SubViewOp expected strided memref type"); 1583 (void)res; 1584 1585 // Compute target offset whose value is: 1586 // `sourceOffset + sum_i(staticOffset_i * sourceStrides_i)`. 1587 int64_t targetOffset = sourceOffset; 1588 for (auto it : llvm::zip(staticOffsets, sourceStrides)) { 1589 auto staticOffset = std::get<0>(it), targetStride = std::get<1>(it); 1590 using namespace saturated_arith; 1591 targetOffset = Wrapper(targetOffset) + Wrapper(staticOffset) * targetStride; 1592 } 1593 1594 // Compute target stride whose value is: 1595 // `sourceStrides_i * staticStrides_i`. 1596 SmallVector<int64_t, 4> targetStrides; 1597 targetStrides.reserve(staticOffsets.size()); 1598 for (auto it : llvm::zip(sourceStrides, staticStrides)) { 1599 auto sourceStride = std::get<0>(it), staticStride = std::get<1>(it); 1600 using namespace saturated_arith; 1601 targetStrides.push_back(Wrapper(sourceStride) * staticStride); 1602 } 1603 1604 // The type is now known. 1605 return MemRefType::get( 1606 staticSizes, sourceMemRefType.getElementType(), 1607 makeStridedLinearLayoutMap(targetStrides, targetOffset, 1608 sourceMemRefType.getContext()), 1609 sourceMemRefType.getMemorySpace()); 1610 } 1611 1612 Type SubViewOp::inferResultType(MemRefType sourceMemRefType, 1613 ArrayRef<OpFoldResult> offsets, 1614 ArrayRef<OpFoldResult> sizes, 1615 ArrayRef<OpFoldResult> strides) { 1616 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1617 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1618 dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, 1619 ShapedType::kDynamicStrideOrOffset); 1620 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, 1621 ShapedType::kDynamicSize); 1622 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, 1623 ShapedType::kDynamicStrideOrOffset); 1624 return SubViewOp::inferResultType(sourceMemRefType, staticOffsets, 1625 staticSizes, staticStrides); 1626 } 1627 1628 Type SubViewOp::inferRankReducedResultType(unsigned resultRank, 1629 MemRefType sourceRankedTensorType, 1630 ArrayRef<int64_t> offsets, 1631 ArrayRef<int64_t> sizes, 1632 ArrayRef<int64_t> strides) { 1633 auto inferredType = 1634 inferResultType(sourceRankedTensorType, offsets, sizes, strides) 1635 .cast<MemRefType>(); 1636 assert(inferredType.getRank() >= resultRank && "expected "); 1637 int rankDiff = inferredType.getRank() - resultRank; 1638 if (rankDiff > 0) { 1639 auto shape = inferredType.getShape(); 1640 llvm::SmallDenseSet<unsigned> dimsToProject; 1641 mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject); 1642 SmallVector<int64_t> projectedShape; 1643 for (unsigned pos = 0, e = shape.size(); pos < e; ++pos) 1644 if (!dimsToProject.contains(pos)) 1645 projectedShape.push_back(shape[pos]); 1646 1647 AffineMap map = inferredType.getLayout().getAffineMap(); 1648 if (!map.isIdentity()) 1649 map = getProjectedMap(map, dimsToProject); 1650 inferredType = 1651 MemRefType::get(projectedShape, inferredType.getElementType(), map, 1652 inferredType.getMemorySpace()); 1653 } 1654 return inferredType; 1655 } 1656 1657 Type SubViewOp::inferRankReducedResultType(unsigned resultRank, 1658 MemRefType sourceRankedTensorType, 1659 ArrayRef<OpFoldResult> offsets, 1660 ArrayRef<OpFoldResult> sizes, 1661 ArrayRef<OpFoldResult> strides) { 1662 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1663 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1664 dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, 1665 ShapedType::kDynamicStrideOrOffset); 1666 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, 1667 ShapedType::kDynamicSize); 1668 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, 1669 ShapedType::kDynamicStrideOrOffset); 1670 return SubViewOp::inferRankReducedResultType( 1671 resultRank, sourceRankedTensorType, staticOffsets, staticSizes, 1672 staticStrides); 1673 } 1674 // Build a SubViewOp with mixed static and dynamic entries and custom result 1675 // type. If the type passed is nullptr, it is inferred. 1676 void SubViewOp::build(OpBuilder &b, OperationState &result, 1677 MemRefType resultType, Value source, 1678 ArrayRef<OpFoldResult> offsets, 1679 ArrayRef<OpFoldResult> sizes, 1680 ArrayRef<OpFoldResult> strides, 1681 ArrayRef<NamedAttribute> attrs) { 1682 SmallVector<int64_t> staticOffsets, staticSizes, staticStrides; 1683 SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides; 1684 dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets, 1685 ShapedType::kDynamicStrideOrOffset); 1686 dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes, 1687 ShapedType::kDynamicSize); 1688 dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides, 1689 ShapedType::kDynamicStrideOrOffset); 1690 auto sourceMemRefType = source.getType().cast<MemRefType>(); 1691 // Structuring implementation this way avoids duplication between builders. 1692 if (!resultType) { 1693 resultType = SubViewOp::inferResultType(sourceMemRefType, staticOffsets, 1694 staticSizes, staticStrides) 1695 .cast<MemRefType>(); 1696 } 1697 build(b, result, resultType, source, dynamicOffsets, dynamicSizes, 1698 dynamicStrides, b.getI64ArrayAttr(staticOffsets), 1699 b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides)); 1700 result.addAttributes(attrs); 1701 } 1702 1703 // Build a SubViewOp with mixed static and dynamic entries and inferred result 1704 // type. 1705 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source, 1706 ArrayRef<OpFoldResult> offsets, 1707 ArrayRef<OpFoldResult> sizes, 1708 ArrayRef<OpFoldResult> strides, 1709 ArrayRef<NamedAttribute> attrs) { 1710 build(b, result, MemRefType(), source, offsets, sizes, strides, attrs); 1711 } 1712 1713 // Build a SubViewOp with static entries and inferred result type. 1714 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source, 1715 ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes, 1716 ArrayRef<int64_t> strides, 1717 ArrayRef<NamedAttribute> attrs) { 1718 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>( 1719 llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult { 1720 return b.getI64IntegerAttr(v); 1721 })); 1722 SmallVector<OpFoldResult> sizeValues = 1723 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { 1724 return b.getI64IntegerAttr(v); 1725 })); 1726 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1727 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { 1728 return b.getI64IntegerAttr(v); 1729 })); 1730 build(b, result, source, offsetValues, sizeValues, strideValues, attrs); 1731 } 1732 1733 // Build a SubViewOp with dynamic entries and custom result type. If the 1734 // type passed is nullptr, it is inferred. 1735 void SubViewOp::build(OpBuilder &b, OperationState &result, 1736 MemRefType resultType, Value source, 1737 ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes, 1738 ArrayRef<int64_t> strides, 1739 ArrayRef<NamedAttribute> attrs) { 1740 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>( 1741 llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult { 1742 return b.getI64IntegerAttr(v); 1743 })); 1744 SmallVector<OpFoldResult> sizeValues = 1745 llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult { 1746 return b.getI64IntegerAttr(v); 1747 })); 1748 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1749 llvm::map_range(strides, [&](int64_t v) -> OpFoldResult { 1750 return b.getI64IntegerAttr(v); 1751 })); 1752 build(b, result, resultType, source, offsetValues, sizeValues, strideValues, 1753 attrs); 1754 } 1755 1756 // Build a SubViewOp with dynamic entries and custom result type. If the type 1757 // passed is nullptr, it is inferred. 1758 void SubViewOp::build(OpBuilder &b, OperationState &result, 1759 MemRefType resultType, Value source, ValueRange offsets, 1760 ValueRange sizes, ValueRange strides, 1761 ArrayRef<NamedAttribute> attrs) { 1762 SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>( 1763 llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; })); 1764 SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>( 1765 llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; })); 1766 SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>( 1767 llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; })); 1768 build(b, result, resultType, source, offsetValues, sizeValues, strideValues); 1769 } 1770 1771 // Build a SubViewOp with dynamic entries and inferred result type. 1772 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source, 1773 ValueRange offsets, ValueRange sizes, ValueRange strides, 1774 ArrayRef<NamedAttribute> attrs) { 1775 build(b, result, MemRefType(), source, offsets, sizes, strides, attrs); 1776 } 1777 1778 /// For ViewLikeOpInterface. 1779 Value SubViewOp::getViewSource() { return source(); } 1780 1781 /// Return true if t1 and t2 have equal offsets (both dynamic or of same static 1782 /// value). 1783 static bool haveCompatibleOffsets(MemRefType t1, MemRefType t2) { 1784 AffineExpr t1Offset, t2Offset; 1785 SmallVector<AffineExpr> t1Strides, t2Strides; 1786 auto res1 = getStridesAndOffset(t1, t1Strides, t1Offset); 1787 auto res2 = getStridesAndOffset(t2, t2Strides, t2Offset); 1788 return succeeded(res1) && succeeded(res2) && t1Offset == t2Offset; 1789 } 1790 1791 /// Checks if `original` Type type can be rank reduced to `reduced` type. 1792 /// This function is slight variant of `is subsequence` algorithm where 1793 /// not matching dimension must be 1. 1794 static SliceVerificationResult 1795 isRankReducedMemRefType(MemRefType originalType, 1796 MemRefType candidateRankReducedType, 1797 ArrayRef<OpFoldResult> sizes) { 1798 auto partialRes = isRankReducedType(originalType, candidateRankReducedType); 1799 if (partialRes != SliceVerificationResult::Success) 1800 return partialRes; 1801 1802 auto optionalUnusedDimsMask = computeMemRefRankReductionMask( 1803 originalType, candidateRankReducedType, sizes); 1804 1805 // Sizes cannot be matched in case empty vector is returned. 1806 if (!optionalUnusedDimsMask.hasValue()) 1807 return SliceVerificationResult::LayoutMismatch; 1808 1809 if (originalType.getMemorySpace() != 1810 candidateRankReducedType.getMemorySpace()) 1811 return SliceVerificationResult::MemSpaceMismatch; 1812 1813 // No amount of stride dropping can reconcile incompatible offsets. 1814 if (!haveCompatibleOffsets(originalType, candidateRankReducedType)) 1815 return SliceVerificationResult::LayoutMismatch; 1816 1817 return SliceVerificationResult::Success; 1818 } 1819 1820 template <typename OpTy> 1821 static LogicalResult produceSubViewErrorMsg(SliceVerificationResult result, 1822 OpTy op, Type expectedType) { 1823 auto memrefType = expectedType.cast<ShapedType>(); 1824 switch (result) { 1825 case SliceVerificationResult::Success: 1826 return success(); 1827 case SliceVerificationResult::RankTooLarge: 1828 return op.emitError("expected result rank to be smaller or equal to ") 1829 << "the source rank. "; 1830 case SliceVerificationResult::SizeMismatch: 1831 return op.emitError("expected result type to be ") 1832 << expectedType 1833 << " or a rank-reduced version. (mismatch of result sizes) "; 1834 case SliceVerificationResult::ElemTypeMismatch: 1835 return op.emitError("expected result element type to be ") 1836 << memrefType.getElementType(); 1837 case SliceVerificationResult::MemSpaceMismatch: 1838 return op.emitError("expected result and source memory spaces to match."); 1839 case SliceVerificationResult::LayoutMismatch: 1840 return op.emitError("expected result type to be ") 1841 << expectedType 1842 << " or a rank-reduced version. (mismatch of result layout) "; 1843 } 1844 llvm_unreachable("unexpected subview verification result"); 1845 } 1846 1847 /// Verifier for SubViewOp. 1848 static LogicalResult verify(SubViewOp op) { 1849 MemRefType baseType = op.getSourceType(); 1850 MemRefType subViewType = op.getType(); 1851 1852 // The base memref and the view memref should be in the same memory space. 1853 if (baseType.getMemorySpace() != subViewType.getMemorySpace()) 1854 return op.emitError("different memory spaces specified for base memref " 1855 "type ") 1856 << baseType << " and subview memref type " << subViewType; 1857 1858 // Verify that the base memref type has a strided layout map. 1859 if (!isStrided(baseType)) 1860 return op.emitError("base type ") << baseType << " is not strided"; 1861 1862 // Verify result type against inferred type. 1863 auto expectedType = SubViewOp::inferResultType( 1864 baseType, extractFromI64ArrayAttr(op.static_offsets()), 1865 extractFromI64ArrayAttr(op.static_sizes()), 1866 extractFromI64ArrayAttr(op.static_strides())); 1867 1868 auto result = isRankReducedMemRefType(expectedType.cast<MemRefType>(), 1869 subViewType, op.getMixedSizes()); 1870 return produceSubViewErrorMsg(result, op, expectedType); 1871 } 1872 1873 raw_ostream &mlir::operator<<(raw_ostream &os, const Range &range) { 1874 return os << "range " << range.offset << ":" << range.size << ":" 1875 << range.stride; 1876 } 1877 1878 /// Return the list of Range (i.e. offset, size, stride). Each Range 1879 /// entry contains either the dynamic value or a ConstantIndexOp constructed 1880 /// with `b` at location `loc`. 1881 SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op, 1882 OpBuilder &b, Location loc) { 1883 std::array<unsigned, 3> ranks = op.getArrayAttrMaxRanks(); 1884 assert(ranks[0] == ranks[1] && "expected offset and sizes of equal ranks"); 1885 assert(ranks[1] == ranks[2] && "expected sizes and strides of equal ranks"); 1886 SmallVector<Range, 8> res; 1887 unsigned rank = ranks[0]; 1888 res.reserve(rank); 1889 for (unsigned idx = 0; idx < rank; ++idx) { 1890 Value offset = 1891 op.isDynamicOffset(idx) 1892 ? op.getDynamicOffset(idx) 1893 : b.create<arith::ConstantIndexOp>(loc, op.getStaticOffset(idx)); 1894 Value size = 1895 op.isDynamicSize(idx) 1896 ? op.getDynamicSize(idx) 1897 : b.create<arith::ConstantIndexOp>(loc, op.getStaticSize(idx)); 1898 Value stride = 1899 op.isDynamicStride(idx) 1900 ? op.getDynamicStride(idx) 1901 : b.create<arith::ConstantIndexOp>(loc, op.getStaticStride(idx)); 1902 res.emplace_back(Range{offset, size, stride}); 1903 } 1904 return res; 1905 } 1906 1907 /// Compute the canonical result type of a SubViewOp. Call `inferResultType` to 1908 /// deduce the result type for the given `sourceType`. Additionally, reduce the 1909 /// rank of the inferred result type if `currentResultType` is lower rank than 1910 /// `currentSourceType`. Use this signature if `sourceType` is updated together 1911 /// with the result type. In this case, it is important to compute the dropped 1912 /// dimensions using `currentSourceType` whose strides align with 1913 /// `currentResultType`. 1914 static MemRefType getCanonicalSubViewResultType( 1915 MemRefType currentResultType, MemRefType currentSourceType, 1916 MemRefType sourceType, ArrayRef<OpFoldResult> mixedOffsets, 1917 ArrayRef<OpFoldResult> mixedSizes, ArrayRef<OpFoldResult> mixedStrides) { 1918 auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets, 1919 mixedSizes, mixedStrides) 1920 .cast<MemRefType>(); 1921 llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims = 1922 computeMemRefRankReductionMask(currentSourceType, currentResultType, 1923 mixedSizes); 1924 // Return nullptr as failure mode. 1925 if (!unusedDims) 1926 return nullptr; 1927 SmallVector<int64_t> shape; 1928 for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) { 1929 if (unusedDims->count(sizes.index())) 1930 continue; 1931 shape.push_back(sizes.value()); 1932 } 1933 AffineMap layoutMap = nonRankReducedType.getLayout().getAffineMap(); 1934 if (!layoutMap.isIdentity()) 1935 layoutMap = getProjectedMap(layoutMap, unusedDims.getValue()); 1936 return MemRefType::get(shape, nonRankReducedType.getElementType(), layoutMap, 1937 nonRankReducedType.getMemorySpace()); 1938 } 1939 1940 /// Compute the canonical result type of a SubViewOp. Call `inferResultType` to 1941 /// deduce the result type. Additionally, reduce the rank of the inferred result 1942 /// type if `currentResultType` is lower rank than `sourceType`. 1943 static MemRefType getCanonicalSubViewResultType( 1944 MemRefType currentResultType, MemRefType sourceType, 1945 ArrayRef<OpFoldResult> mixedOffsets, ArrayRef<OpFoldResult> mixedSizes, 1946 ArrayRef<OpFoldResult> mixedStrides) { 1947 return getCanonicalSubViewResultType(currentResultType, sourceType, 1948 sourceType, mixedOffsets, mixedSizes, 1949 mixedStrides); 1950 } 1951 1952 /// Helper method to check if a `subview` operation is trivially a no-op. This 1953 /// is the case if the all offsets are zero, all strides are 1, and the source 1954 /// shape is same as the size of the subview. In such cases, the subview can be 1955 /// folded into its source. 1956 static bool isTrivialSubViewOp(SubViewOp subViewOp) { 1957 if (subViewOp.getSourceType().getRank() != subViewOp.getType().getRank()) 1958 return false; 1959 1960 auto mixedOffsets = subViewOp.getMixedOffsets(); 1961 auto mixedSizes = subViewOp.getMixedSizes(); 1962 auto mixedStrides = subViewOp.getMixedStrides(); 1963 1964 // Check offsets are zero. 1965 if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) { 1966 Optional<int64_t> intValue = getConstantIntValue(ofr); 1967 return !intValue || intValue.getValue() != 0; 1968 })) 1969 return false; 1970 1971 // Check strides are one. 1972 if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) { 1973 Optional<int64_t> intValue = getConstantIntValue(ofr); 1974 return !intValue || intValue.getValue() != 1; 1975 })) 1976 return false; 1977 1978 // Check all size values are static and matches the (static) source shape. 1979 ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape(); 1980 for (const auto &size : llvm::enumerate(mixedSizes)) { 1981 Optional<int64_t> intValue = getConstantIntValue(size.value()); 1982 if (!intValue || intValue.getValue() != sourceShape[size.index()]) 1983 return false; 1984 } 1985 // All conditions met. The `SubViewOp` is foldable as a no-op. 1986 return true; 1987 } 1988 1989 namespace { 1990 /// Pattern to rewrite a subview op with MemRefCast arguments. 1991 /// This essentially pushes memref.cast past its consuming subview when 1992 /// `canFoldIntoConsumerOp` is true. 1993 /// 1994 /// Example: 1995 /// ``` 1996 /// %0 = memref.cast %V : memref<16x16xf32> to memref<?x?xf32> 1997 /// %1 = memref.subview %0[0, 0][3, 4][1, 1] : 1998 /// memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]> 1999 /// ``` 2000 /// is rewritten into: 2001 /// ``` 2002 /// %0 = memref.subview %V: memref<16x16xf32> to memref<3x4xf32, #[[map0]]> 2003 /// %1 = memref.cast %0: memref<3x4xf32, offset:0, strides:[16, 1]> to 2004 /// memref<3x4xf32, offset:?, strides:[?, 1]> 2005 /// ``` 2006 class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> { 2007 public: 2008 using OpRewritePattern<SubViewOp>::OpRewritePattern; 2009 2010 LogicalResult matchAndRewrite(SubViewOp subViewOp, 2011 PatternRewriter &rewriter) const override { 2012 // Any constant operand, just return to let SubViewOpConstantFolder kick in. 2013 if (llvm::any_of(subViewOp.getOperands(), [](Value operand) { 2014 return matchPattern(operand, matchConstantIndex()); 2015 })) 2016 return failure(); 2017 2018 auto castOp = subViewOp.source().getDefiningOp<CastOp>(); 2019 if (!castOp) 2020 return failure(); 2021 2022 if (!CastOp::canFoldIntoConsumerOp(castOp)) 2023 return failure(); 2024 2025 // Compute the SubViewOp result type after folding the MemRefCastOp. Use the 2026 // MemRefCastOp source operand type to infer the result type and the current 2027 // SubViewOp source operand type to compute the dropped dimensions if the 2028 // operation is rank-reducing. 2029 auto resultType = getCanonicalSubViewResultType( 2030 subViewOp.getType(), subViewOp.getSourceType(), 2031 castOp.source().getType().cast<MemRefType>(), 2032 subViewOp.getMixedOffsets(), subViewOp.getMixedSizes(), 2033 subViewOp.getMixedStrides()); 2034 if (!resultType) 2035 return failure(); 2036 2037 Value newSubView = rewriter.create<SubViewOp>( 2038 subViewOp.getLoc(), resultType, castOp.source(), subViewOp.offsets(), 2039 subViewOp.sizes(), subViewOp.strides(), subViewOp.static_offsets(), 2040 subViewOp.static_sizes(), subViewOp.static_strides()); 2041 rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(), 2042 newSubView); 2043 return success(); 2044 } 2045 }; 2046 2047 /// Canonicalize subview ops that are no-ops. When the source shape is not same 2048 /// as a result shape due to use of `affine_map`. 2049 class TrivialSubViewOpFolder final : public OpRewritePattern<SubViewOp> { 2050 public: 2051 using OpRewritePattern<SubViewOp>::OpRewritePattern; 2052 2053 LogicalResult matchAndRewrite(SubViewOp subViewOp, 2054 PatternRewriter &rewriter) const override { 2055 if (!isTrivialSubViewOp(subViewOp)) 2056 return failure(); 2057 if (subViewOp.getSourceType() == subViewOp.getType()) { 2058 rewriter.replaceOp(subViewOp, subViewOp.source()); 2059 return success(); 2060 } 2061 rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.source(), 2062 subViewOp.getType()); 2063 return success(); 2064 } 2065 }; 2066 } // namespace 2067 2068 /// Return the canonical type of the result of a subview. 2069 struct SubViewReturnTypeCanonicalizer { 2070 MemRefType operator()(SubViewOp op, ArrayRef<OpFoldResult> mixedOffsets, 2071 ArrayRef<OpFoldResult> mixedSizes, 2072 ArrayRef<OpFoldResult> mixedStrides) { 2073 return getCanonicalSubViewResultType(op.getType(), op.getSourceType(), 2074 mixedOffsets, mixedSizes, 2075 mixedStrides); 2076 } 2077 }; 2078 2079 /// A canonicalizer wrapper to replace SubViewOps. 2080 struct SubViewCanonicalizer { 2081 void operator()(PatternRewriter &rewriter, SubViewOp op, SubViewOp newOp) { 2082 rewriter.replaceOpWithNewOp<CastOp>(op, newOp, op.getType()); 2083 } 2084 }; 2085 2086 void SubViewOp::getCanonicalizationPatterns(RewritePatternSet &results, 2087 MLIRContext *context) { 2088 results 2089 .add<OpWithOffsetSizesAndStridesConstantArgumentFolder< 2090 SubViewOp, SubViewReturnTypeCanonicalizer, SubViewCanonicalizer>, 2091 SubViewOpMemRefCastFolder, TrivialSubViewOpFolder>(context); 2092 } 2093 2094 OpFoldResult SubViewOp::fold(ArrayRef<Attribute> operands) { 2095 auto resultShapedType = getResult().getType().cast<ShapedType>(); 2096 auto sourceShapedType = source().getType().cast<ShapedType>(); 2097 2098 if (resultShapedType.hasStaticShape() && 2099 resultShapedType == sourceShapedType) { 2100 return getViewSource(); 2101 } 2102 2103 return {}; 2104 } 2105 2106 //===----------------------------------------------------------------------===// 2107 // TransposeOp 2108 //===----------------------------------------------------------------------===// 2109 2110 /// Build a strided memref type by applying `permutationMap` tp `memRefType`. 2111 static MemRefType inferTransposeResultType(MemRefType memRefType, 2112 AffineMap permutationMap) { 2113 auto rank = memRefType.getRank(); 2114 auto originalSizes = memRefType.getShape(); 2115 // Compute permuted sizes. 2116 SmallVector<int64_t, 4> sizes(rank, 0); 2117 for (const auto &en : llvm::enumerate(permutationMap.getResults())) 2118 sizes[en.index()] = 2119 originalSizes[en.value().cast<AffineDimExpr>().getPosition()]; 2120 2121 // Compute permuted strides. 2122 int64_t offset; 2123 SmallVector<int64_t, 4> strides; 2124 auto res = getStridesAndOffset(memRefType, strides, offset); 2125 assert(succeeded(res) && strides.size() == static_cast<unsigned>(rank)); 2126 (void)res; 2127 auto map = 2128 makeStridedLinearLayoutMap(strides, offset, memRefType.getContext()); 2129 map = permutationMap ? map.compose(permutationMap) : map; 2130 return MemRefType::Builder(memRefType) 2131 .setShape(sizes) 2132 .setLayout(AffineMapAttr::get(map)); 2133 } 2134 2135 void TransposeOp::build(OpBuilder &b, OperationState &result, Value in, 2136 AffineMapAttr permutation, 2137 ArrayRef<NamedAttribute> attrs) { 2138 auto permutationMap = permutation.getValue(); 2139 assert(permutationMap); 2140 2141 auto memRefType = in.getType().cast<MemRefType>(); 2142 // Compute result type. 2143 MemRefType resultType = inferTransposeResultType(memRefType, permutationMap); 2144 2145 build(b, result, resultType, in, attrs); 2146 result.addAttribute(TransposeOp::getPermutationAttrName(), permutation); 2147 } 2148 2149 // transpose $in $permutation attr-dict : type($in) `to` type(results) 2150 static void print(OpAsmPrinter &p, TransposeOp op) { 2151 p << " " << op.in() << " " << op.permutation(); 2152 p.printOptionalAttrDict(op->getAttrs(), 2153 {TransposeOp::getPermutationAttrName()}); 2154 p << " : " << op.in().getType() << " to " << op.getType(); 2155 } 2156 2157 static ParseResult parseTransposeOp(OpAsmParser &parser, 2158 OperationState &result) { 2159 OpAsmParser::OperandType in; 2160 AffineMap permutation; 2161 MemRefType srcType, dstType; 2162 if (parser.parseOperand(in) || parser.parseAffineMap(permutation) || 2163 parser.parseOptionalAttrDict(result.attributes) || 2164 parser.parseColonType(srcType) || 2165 parser.resolveOperand(in, srcType, result.operands) || 2166 parser.parseKeywordType("to", dstType) || 2167 parser.addTypeToList(dstType, result.types)) 2168 return failure(); 2169 2170 result.addAttribute(TransposeOp::getPermutationAttrName(), 2171 AffineMapAttr::get(permutation)); 2172 return success(); 2173 } 2174 2175 static LogicalResult verify(TransposeOp op) { 2176 if (!op.permutation().isPermutation()) 2177 return op.emitOpError("expected a permutation map"); 2178 if (op.permutation().getNumDims() != op.getShapedType().getRank()) 2179 return op.emitOpError( 2180 "expected a permutation map of same rank as the input"); 2181 2182 auto srcType = op.in().getType().cast<MemRefType>(); 2183 auto dstType = op.getType().cast<MemRefType>(); 2184 auto transposedType = inferTransposeResultType(srcType, op.permutation()); 2185 if (dstType != transposedType) 2186 return op.emitOpError("output type ") 2187 << dstType << " does not match transposed input type " << srcType 2188 << ", " << transposedType; 2189 return success(); 2190 } 2191 2192 OpFoldResult TransposeOp::fold(ArrayRef<Attribute>) { 2193 if (succeeded(foldMemRefCast(*this))) 2194 return getResult(); 2195 return {}; 2196 } 2197 2198 //===----------------------------------------------------------------------===// 2199 // ViewOp 2200 //===----------------------------------------------------------------------===// 2201 2202 static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) { 2203 OpAsmParser::OperandType srcInfo; 2204 SmallVector<OpAsmParser::OperandType, 1> offsetInfo; 2205 SmallVector<OpAsmParser::OperandType, 4> sizesInfo; 2206 auto indexType = parser.getBuilder().getIndexType(); 2207 Type srcType, dstType; 2208 llvm::SMLoc offsetLoc; 2209 if (parser.parseOperand(srcInfo) || parser.getCurrentLocation(&offsetLoc) || 2210 parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square)) 2211 return failure(); 2212 2213 if (offsetInfo.size() != 1) 2214 return parser.emitError(offsetLoc) << "expects 1 offset operand"; 2215 2216 return failure( 2217 parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) || 2218 parser.parseOptionalAttrDict(result.attributes) || 2219 parser.parseColonType(srcType) || 2220 parser.resolveOperand(srcInfo, srcType, result.operands) || 2221 parser.resolveOperands(offsetInfo, indexType, result.operands) || 2222 parser.resolveOperands(sizesInfo, indexType, result.operands) || 2223 parser.parseKeywordType("to", dstType) || 2224 parser.addTypeToList(dstType, result.types)); 2225 } 2226 2227 static void print(OpAsmPrinter &p, ViewOp op) { 2228 p << ' ' << op.getOperand(0) << '['; 2229 p.printOperand(op.byte_shift()); 2230 p << "][" << op.sizes() << ']'; 2231 p.printOptionalAttrDict(op->getAttrs()); 2232 p << " : " << op.getOperand(0).getType() << " to " << op.getType(); 2233 } 2234 2235 static LogicalResult verify(ViewOp op) { 2236 auto baseType = op.getOperand(0).getType().cast<MemRefType>(); 2237 auto viewType = op.getType(); 2238 2239 // The base memref should have identity layout map (or none). 2240 if (!baseType.getLayout().isIdentity()) 2241 return op.emitError("unsupported map for base memref type ") << baseType; 2242 2243 // The result memref should have identity layout map (or none). 2244 if (!viewType.getLayout().isIdentity()) 2245 return op.emitError("unsupported map for result memref type ") << viewType; 2246 2247 // The base memref and the view memref should be in the same memory space. 2248 if (baseType.getMemorySpace() != viewType.getMemorySpace()) 2249 return op.emitError("different memory spaces specified for base memref " 2250 "type ") 2251 << baseType << " and view memref type " << viewType; 2252 2253 // Verify that we have the correct number of sizes for the result type. 2254 unsigned numDynamicDims = viewType.getNumDynamicDims(); 2255 if (op.sizes().size() != numDynamicDims) 2256 return op.emitError("incorrect number of size operands for type ") 2257 << viewType; 2258 2259 return success(); 2260 } 2261 2262 Value ViewOp::getViewSource() { return source(); } 2263 2264 namespace { 2265 2266 struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> { 2267 using OpRewritePattern<ViewOp>::OpRewritePattern; 2268 2269 LogicalResult matchAndRewrite(ViewOp viewOp, 2270 PatternRewriter &rewriter) const override { 2271 // Return if none of the operands are constants. 2272 if (llvm::none_of(viewOp.getOperands(), [](Value operand) { 2273 return matchPattern(operand, matchConstantIndex()); 2274 })) 2275 return failure(); 2276 2277 // Get result memref type. 2278 auto memrefType = viewOp.getType(); 2279 2280 // Get offset from old memref view type 'memRefType'. 2281 int64_t oldOffset; 2282 SmallVector<int64_t, 4> oldStrides; 2283 if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset))) 2284 return failure(); 2285 assert(oldOffset == 0 && "Expected 0 offset"); 2286 2287 SmallVector<Value, 4> newOperands; 2288 2289 // Offset cannot be folded into result type. 2290 2291 // Fold any dynamic dim operands which are produced by a constant. 2292 SmallVector<int64_t, 4> newShapeConstants; 2293 newShapeConstants.reserve(memrefType.getRank()); 2294 2295 unsigned dynamicDimPos = 0; 2296 unsigned rank = memrefType.getRank(); 2297 for (unsigned dim = 0, e = rank; dim < e; ++dim) { 2298 int64_t dimSize = memrefType.getDimSize(dim); 2299 // If this is already static dimension, keep it. 2300 if (!ShapedType::isDynamic(dimSize)) { 2301 newShapeConstants.push_back(dimSize); 2302 continue; 2303 } 2304 auto *defOp = viewOp.sizes()[dynamicDimPos].getDefiningOp(); 2305 if (auto constantIndexOp = 2306 dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) { 2307 // Dynamic shape dimension will be folded. 2308 newShapeConstants.push_back(constantIndexOp.value()); 2309 } else { 2310 // Dynamic shape dimension not folded; copy operand from old memref. 2311 newShapeConstants.push_back(dimSize); 2312 newOperands.push_back(viewOp.sizes()[dynamicDimPos]); 2313 } 2314 dynamicDimPos++; 2315 } 2316 2317 // Create new memref type with constant folded dims. 2318 MemRefType newMemRefType = 2319 MemRefType::Builder(memrefType).setShape(newShapeConstants); 2320 // Nothing new, don't fold. 2321 if (newMemRefType == memrefType) 2322 return failure(); 2323 2324 // Create new ViewOp. 2325 auto newViewOp = rewriter.create<ViewOp>(viewOp.getLoc(), newMemRefType, 2326 viewOp.getOperand(0), 2327 viewOp.byte_shift(), newOperands); 2328 // Insert a cast so we have the same type as the old memref type. 2329 rewriter.replaceOpWithNewOp<CastOp>(viewOp, newViewOp, viewOp.getType()); 2330 return success(); 2331 } 2332 }; 2333 2334 struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> { 2335 using OpRewritePattern<ViewOp>::OpRewritePattern; 2336 2337 LogicalResult matchAndRewrite(ViewOp viewOp, 2338 PatternRewriter &rewriter) const override { 2339 Value memrefOperand = viewOp.getOperand(0); 2340 CastOp memrefCastOp = memrefOperand.getDefiningOp<CastOp>(); 2341 if (!memrefCastOp) 2342 return failure(); 2343 Value allocOperand = memrefCastOp.getOperand(); 2344 AllocOp allocOp = allocOperand.getDefiningOp<AllocOp>(); 2345 if (!allocOp) 2346 return failure(); 2347 rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand, 2348 viewOp.byte_shift(), viewOp.sizes()); 2349 return success(); 2350 } 2351 }; 2352 2353 } // namespace 2354 2355 void ViewOp::getCanonicalizationPatterns(RewritePatternSet &results, 2356 MLIRContext *context) { 2357 results.add<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context); 2358 } 2359 2360 //===----------------------------------------------------------------------===// 2361 // AtomicRMWOp 2362 //===----------------------------------------------------------------------===// 2363 2364 static LogicalResult verify(AtomicRMWOp op) { 2365 if (op.getMemRefType().getRank() != op.getNumOperands() - 2) 2366 return op.emitOpError( 2367 "expects the number of subscripts to be equal to memref rank"); 2368 switch (op.kind()) { 2369 case arith::AtomicRMWKind::addf: 2370 case arith::AtomicRMWKind::maxf: 2371 case arith::AtomicRMWKind::minf: 2372 case arith::AtomicRMWKind::mulf: 2373 if (!op.value().getType().isa<FloatType>()) 2374 return op.emitOpError() 2375 << "with kind '" << arith::stringifyAtomicRMWKind(op.kind()) 2376 << "' expects a floating-point type"; 2377 break; 2378 case arith::AtomicRMWKind::addi: 2379 case arith::AtomicRMWKind::maxs: 2380 case arith::AtomicRMWKind::maxu: 2381 case arith::AtomicRMWKind::mins: 2382 case arith::AtomicRMWKind::minu: 2383 case arith::AtomicRMWKind::muli: 2384 case arith::AtomicRMWKind::ori: 2385 case arith::AtomicRMWKind::andi: 2386 if (!op.value().getType().isa<IntegerType>()) 2387 return op.emitOpError() 2388 << "with kind '" << arith::stringifyAtomicRMWKind(op.kind()) 2389 << "' expects an integer type"; 2390 break; 2391 default: 2392 break; 2393 } 2394 return success(); 2395 } 2396 2397 OpFoldResult AtomicRMWOp::fold(ArrayRef<Attribute> operands) { 2398 /// atomicrmw(memrefcast) -> atomicrmw 2399 if (succeeded(foldMemRefCast(*this, value()))) 2400 return getResult(); 2401 return OpFoldResult(); 2402 } 2403 2404 //===----------------------------------------------------------------------===// 2405 // TableGen'd op method definitions 2406 //===----------------------------------------------------------------------===// 2407 2408 #define GET_OP_CLASSES 2409 #include "mlir/Dialect/MemRef/IR/MemRefOps.cpp.inc" 2410