1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/Arithmetic/Utils/Utils.h"
11 #include "mlir/Dialect/MemRef/IR/MemRef.h"
12 #include "mlir/Dialect/MemRef/Utils/MemRefUtils.h"
13 #include "mlir/Dialect/Utils/StaticValueUtils.h"
14 #include "mlir/IR/AffineMap.h"
15 #include "mlir/IR/Builders.h"
16 #include "mlir/IR/BuiltinTypes.h"
17 #include "mlir/IR/Matchers.h"
18 #include "mlir/IR/PatternMatch.h"
19 #include "mlir/IR/TypeUtilities.h"
20 #include "mlir/Interfaces/InferTypeOpInterface.h"
21 #include "mlir/Interfaces/SideEffectInterfaces.h"
22 #include "mlir/Interfaces/ViewLikeInterface.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 
26 using namespace mlir;
27 using namespace mlir::memref;
28 
29 /// Materialize a single constant operation from a given attribute value with
30 /// the desired resultant type.
31 Operation *MemRefDialect::materializeConstant(OpBuilder &builder,
32                                               Attribute value, Type type,
33                                               Location loc) {
34   if (arith::ConstantOp::isBuildableWith(value, type))
35     return builder.create<arith::ConstantOp>(loc, value, type);
36   return nullptr;
37 }
38 
39 //===----------------------------------------------------------------------===//
40 // Common canonicalization pattern support logic
41 //===----------------------------------------------------------------------===//
42 
43 /// This is a common class used for patterns of the form
44 /// "someop(memrefcast) -> someop".  It folds the source of any memref.cast
45 /// into the root operation directly.
46 LogicalResult mlir::memref::foldMemRefCast(Operation *op, Value inner) {
47   bool folded = false;
48   for (OpOperand &operand : op->getOpOperands()) {
49     auto cast = operand.get().getDefiningOp<CastOp>();
50     if (cast && operand.get() != inner &&
51         !cast.getOperand().getType().isa<UnrankedMemRefType>()) {
52       operand.set(cast.getOperand());
53       folded = true;
54     }
55   }
56   return success(folded);
57 }
58 
59 /// Return an unranked/ranked tensor type for the given unranked/ranked memref
60 /// type.
61 Type mlir::memref::getTensorTypeFromMemRefType(Type type) {
62   if (auto memref = type.dyn_cast<MemRefType>())
63     return RankedTensorType::get(memref.getShape(), memref.getElementType());
64   if (auto memref = type.dyn_cast<UnrankedMemRefType>())
65     return UnrankedTensorType::get(memref.getElementType());
66   return NoneType::get(type.getContext());
67 }
68 
69 //===----------------------------------------------------------------------===//
70 // AllocOp / AllocaOp
71 //===----------------------------------------------------------------------===//
72 
73 template <typename AllocLikeOp>
74 static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
75   static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value,
76                 "applies to only alloc or alloca");
77   auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>();
78   if (!memRefType)
79     return op.emitOpError("result must be a memref");
80 
81   if (static_cast<int64_t>(op.dynamicSizes().size()) !=
82       memRefType.getNumDynamicDims())
83     return op.emitOpError("dimension operand count does not equal memref "
84                           "dynamic dimension count");
85 
86   unsigned numSymbols = 0;
87   if (!memRefType.getLayout().isIdentity())
88     numSymbols = memRefType.getLayout().getAffineMap().getNumSymbols();
89   if (op.symbolOperands().size() != numSymbols)
90     return op.emitOpError("symbol operand count does not equal memref symbol "
91                           "count: expected ")
92            << numSymbols << ", got " << op.symbolOperands().size();
93 
94   return success();
95 }
96 
97 LogicalResult AllocOp::verify() { return verifyAllocLikeOp(*this); }
98 
99 LogicalResult AllocaOp::verify() {
100   // An alloca op needs to have an ancestor with an allocation scope trait.
101   if (!(*this)->getParentWithTrait<OpTrait::AutomaticAllocationScope>())
102     return emitOpError(
103         "requires an ancestor op with AutomaticAllocationScope trait");
104 
105   return verifyAllocLikeOp(*this);
106 }
107 
108 namespace {
109 /// Fold constant dimensions into an alloc like operation.
110 template <typename AllocLikeOp>
111 struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
112   using OpRewritePattern<AllocLikeOp>::OpRewritePattern;
113 
114   LogicalResult matchAndRewrite(AllocLikeOp alloc,
115                                 PatternRewriter &rewriter) const override {
116     // Check to see if any dimensions operands are constants.  If so, we can
117     // substitute and drop them.
118     if (llvm::none_of(alloc.dynamicSizes(), [](Value operand) {
119           return matchPattern(operand, matchConstantIndex());
120         }))
121       return failure();
122 
123     auto memrefType = alloc.getType();
124 
125     // Ok, we have one or more constant operands.  Collect the non-constant ones
126     // and keep track of the resultant memref type to build.
127     SmallVector<int64_t, 4> newShapeConstants;
128     newShapeConstants.reserve(memrefType.getRank());
129     SmallVector<Value, 4> dynamicSizes;
130 
131     unsigned dynamicDimPos = 0;
132     for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
133       int64_t dimSize = memrefType.getDimSize(dim);
134       // If this is already static dimension, keep it.
135       if (dimSize != -1) {
136         newShapeConstants.push_back(dimSize);
137         continue;
138       }
139       auto dynamicSize = alloc.dynamicSizes()[dynamicDimPos];
140       auto *defOp = dynamicSize.getDefiningOp();
141       if (auto constantIndexOp =
142               dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) {
143         // Dynamic shape dimension will be folded.
144         newShapeConstants.push_back(constantIndexOp.value());
145       } else {
146         // Dynamic shape dimension not folded; copy dynamicSize from old memref.
147         newShapeConstants.push_back(-1);
148         dynamicSizes.push_back(dynamicSize);
149       }
150       dynamicDimPos++;
151     }
152 
153     // Create new memref type (which will have fewer dynamic dimensions).
154     MemRefType newMemRefType =
155         MemRefType::Builder(memrefType).setShape(newShapeConstants);
156     assert(static_cast<int64_t>(dynamicSizes.size()) ==
157            newMemRefType.getNumDynamicDims());
158 
159     // Create and insert the alloc op for the new memref.
160     auto newAlloc = rewriter.create<AllocLikeOp>(
161         alloc.getLoc(), newMemRefType, dynamicSizes, alloc.symbolOperands(),
162         alloc.alignmentAttr());
163     // Insert a cast so we have the same type as the old alloc.
164     auto resultCast =
165         rewriter.create<CastOp>(alloc.getLoc(), alloc.getType(), newAlloc);
166 
167     rewriter.replaceOp(alloc, {resultCast});
168     return success();
169   }
170 };
171 
172 /// Fold alloc operations with no users or only store and dealloc uses.
173 template <typename T>
174 struct SimplifyDeadAlloc : public OpRewritePattern<T> {
175   using OpRewritePattern<T>::OpRewritePattern;
176 
177   LogicalResult matchAndRewrite(T alloc,
178                                 PatternRewriter &rewriter) const override {
179     if (llvm::any_of(alloc->getUsers(), [&](Operation *op) {
180           if (auto storeOp = dyn_cast<StoreOp>(op))
181             return storeOp.value() == alloc;
182           return !isa<DeallocOp>(op);
183         }))
184       return failure();
185 
186     for (Operation *user : llvm::make_early_inc_range(alloc->getUsers()))
187       rewriter.eraseOp(user);
188 
189     rewriter.eraseOp(alloc);
190     return success();
191   }
192 };
193 } // namespace
194 
195 void AllocOp::getCanonicalizationPatterns(RewritePatternSet &results,
196                                           MLIRContext *context) {
197   results.add<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc<AllocOp>>(context);
198 }
199 
200 void AllocaOp::getCanonicalizationPatterns(RewritePatternSet &results,
201                                            MLIRContext *context) {
202   results.add<SimplifyAllocConst<AllocaOp>, SimplifyDeadAlloc<AllocaOp>>(
203       context);
204 }
205 
206 //===----------------------------------------------------------------------===//
207 // AllocaScopeOp
208 //===----------------------------------------------------------------------===//
209 
210 void AllocaScopeOp::print(OpAsmPrinter &p) {
211   bool printBlockTerminators = false;
212 
213   p << ' ';
214   if (!results().empty()) {
215     p << " -> (" << getResultTypes() << ")";
216     printBlockTerminators = true;
217   }
218   p << ' ';
219   p.printRegion(bodyRegion(),
220                 /*printEntryBlockArgs=*/false,
221                 /*printBlockTerminators=*/printBlockTerminators);
222   p.printOptionalAttrDict((*this)->getAttrs());
223 }
224 
225 ParseResult AllocaScopeOp::parse(OpAsmParser &parser, OperationState &result) {
226   // Create a region for the body.
227   result.regions.reserve(1);
228   Region *bodyRegion = result.addRegion();
229 
230   // Parse optional results type list.
231   if (parser.parseOptionalArrowTypeList(result.types))
232     return failure();
233 
234   // Parse the body region.
235   if (parser.parseRegion(*bodyRegion, /*arguments=*/{}, /*argTypes=*/{}))
236     return failure();
237   AllocaScopeOp::ensureTerminator(*bodyRegion, parser.getBuilder(),
238                                   result.location);
239 
240   // Parse the optional attribute list.
241   if (parser.parseOptionalAttrDict(result.attributes))
242     return failure();
243 
244   return success();
245 }
246 
247 void AllocaScopeOp::getSuccessorRegions(
248     Optional<unsigned> index, ArrayRef<Attribute> operands,
249     SmallVectorImpl<RegionSuccessor> &regions) {
250   if (index.hasValue()) {
251     regions.push_back(RegionSuccessor(getResults()));
252     return;
253   }
254 
255   regions.push_back(RegionSuccessor(&bodyRegion()));
256 }
257 
258 /// Given an operation, return whether this op is guaranteed to
259 /// allocate an AutomaticAllocationScopeResource
260 static bool isGuaranteedAutomaticAllocation(Operation *op) {
261   MemoryEffectOpInterface interface = dyn_cast<MemoryEffectOpInterface>(op);
262   if (!interface)
263     return false;
264   for (auto res : op->getResults()) {
265     if (auto effect =
266             interface.getEffectOnValue<MemoryEffects::Allocate>(res)) {
267       if (isa<SideEffects::AutomaticAllocationScopeResource>(
268               effect->getResource()))
269         return true;
270     }
271   }
272   return false;
273 }
274 
275 /// Given an operation, return whether this op itself could
276 /// allocate an AutomaticAllocationScopeResource. Note that
277 /// this will not check whether an operation contained within
278 /// the op can allocate.
279 static bool isOpItselfPotentialAutomaticAllocation(Operation *op) {
280   // This op itself doesn't create a stack allocation,
281   // the inner allocation should be handled separately.
282   if (op->hasTrait<OpTrait::HasRecursiveSideEffects>())
283     return false;
284   MemoryEffectOpInterface interface = dyn_cast<MemoryEffectOpInterface>(op);
285   if (!interface)
286     return true;
287   for (auto res : op->getResults()) {
288     if (auto effect =
289             interface.getEffectOnValue<MemoryEffects::Allocate>(res)) {
290       if (isa<SideEffects::AutomaticAllocationScopeResource>(
291               effect->getResource()))
292         return true;
293     }
294   }
295   return false;
296 }
297 
298 /// Return whether this op is the last non terminating op
299 /// in a region. That is to say, it is in a one-block region
300 /// and is only followed by a terminator. This prevents
301 /// extending the lifetime of allocations.
302 static bool lastNonTerminatorInRegion(Operation *op) {
303   return op->getNextNode() == op->getBlock()->getTerminator() &&
304          op->getParentRegion()->getBlocks().size() == 1;
305 }
306 
307 /// Inline an AllocaScopeOp if either the direct parent is an allocation scope
308 /// or it contains no allocation.
309 struct AllocaScopeInliner : public OpRewritePattern<AllocaScopeOp> {
310   using OpRewritePattern<AllocaScopeOp>::OpRewritePattern;
311 
312   LogicalResult matchAndRewrite(AllocaScopeOp op,
313                                 PatternRewriter &rewriter) const override {
314     if (!op->getParentOp()->hasTrait<OpTrait::AutomaticAllocationScope>()) {
315       bool hasPotentialAlloca =
316           op->walk([&](Operation *alloc) {
317               if (alloc == op)
318                 return WalkResult::advance();
319               if (isOpItselfPotentialAutomaticAllocation(alloc))
320                 return WalkResult::interrupt();
321               return WalkResult::advance();
322             }).wasInterrupted();
323       if (hasPotentialAlloca)
324         return failure();
325     }
326 
327     // Only apply to if this is this last non-terminator
328     // op in the block (lest lifetime be extended) of a one
329     // block region
330     if (!lastNonTerminatorInRegion(op))
331       return failure();
332 
333     Block *block = &op.getRegion().front();
334     Operation *terminator = block->getTerminator();
335     ValueRange results = terminator->getOperands();
336     rewriter.mergeBlockBefore(block, op);
337     rewriter.replaceOp(op, results);
338     rewriter.eraseOp(terminator);
339     return success();
340   }
341 };
342 
343 /// Move allocations into an allocation scope, if it is legal to
344 /// move them (e.g. their operands are available at the location
345 /// the op would be moved to).
346 struct AllocaScopeHoister : public OpRewritePattern<AllocaScopeOp> {
347   using OpRewritePattern<AllocaScopeOp>::OpRewritePattern;
348 
349   LogicalResult matchAndRewrite(AllocaScopeOp op,
350                                 PatternRewriter &rewriter) const override {
351 
352     if (!op->getParentWithTrait<OpTrait::AutomaticAllocationScope>())
353       return failure();
354 
355     Operation *lastParentWithoutScope = op->getParentOp();
356 
357     if (!lastParentWithoutScope ||
358         lastParentWithoutScope->hasTrait<OpTrait::AutomaticAllocationScope>())
359       return failure();
360 
361     // Only apply to if this is this last non-terminator
362     // op in the block (lest lifetime be extended) of a one
363     // block region
364     if (!lastNonTerminatorInRegion(op) ||
365         !lastNonTerminatorInRegion(lastParentWithoutScope))
366       return failure();
367 
368     while (!lastParentWithoutScope->getParentOp()
369                 ->hasTrait<OpTrait::AutomaticAllocationScope>()) {
370       lastParentWithoutScope = lastParentWithoutScope->getParentOp();
371       if (!lastParentWithoutScope ||
372           !lastNonTerminatorInRegion(lastParentWithoutScope))
373         return failure();
374     }
375     assert(lastParentWithoutScope->getParentOp()
376                ->hasTrait<OpTrait::AutomaticAllocationScope>());
377 
378     Region *containingRegion = nullptr;
379     for (auto &r : lastParentWithoutScope->getRegions()) {
380       if (r.isAncestor(op->getParentRegion())) {
381         assert(containingRegion == nullptr &&
382                "only one region can contain the op");
383         containingRegion = &r;
384       }
385     }
386     assert(containingRegion && "op must be contained in a region");
387 
388     SmallVector<Operation *> toHoist;
389     op->walk([&](Operation *alloc) {
390       if (!isGuaranteedAutomaticAllocation(alloc))
391         return WalkResult::skip();
392 
393       // If any operand is not defined before the location of
394       // lastParentWithoutScope (i.e. where we would hoist to), skip.
395       if (llvm::any_of(alloc->getOperands(), [&](Value v) {
396             return containingRegion->isAncestor(v.getParentRegion());
397           }))
398         return WalkResult::skip();
399       toHoist.push_back(alloc);
400       return WalkResult::advance();
401     });
402 
403     if (toHoist.empty())
404       return failure();
405     rewriter.setInsertionPoint(lastParentWithoutScope);
406     for (auto *op : toHoist) {
407       auto *cloned = rewriter.clone(*op);
408       rewriter.replaceOp(op, cloned->getResults());
409     }
410     return success();
411   }
412 };
413 
414 void AllocaScopeOp::getCanonicalizationPatterns(RewritePatternSet &results,
415                                                 MLIRContext *context) {
416   results.add<AllocaScopeInliner, AllocaScopeHoister>(context);
417 }
418 
419 //===----------------------------------------------------------------------===//
420 // AssumeAlignmentOp
421 //===----------------------------------------------------------------------===//
422 
423 LogicalResult AssumeAlignmentOp::verify() {
424   if (!llvm::isPowerOf2_32(alignment()))
425     return emitOpError("alignment must be power of 2");
426   return success();
427 }
428 
429 //===----------------------------------------------------------------------===//
430 // CastOp
431 //===----------------------------------------------------------------------===//
432 
433 /// Determines whether MemRef_CastOp casts to a more dynamic version of the
434 /// source memref. This is useful to to fold a memref.cast into a consuming op
435 /// and implement canonicalization patterns for ops in different dialects that
436 /// may consume the results of memref.cast operations. Such foldable memref.cast
437 /// operations are typically inserted as `view` and `subview` ops are
438 /// canonicalized, to preserve the type compatibility of their uses.
439 ///
440 /// Returns true when all conditions are met:
441 /// 1. source and result are ranked memrefs with strided semantics and same
442 /// element type and rank.
443 /// 2. each of the source's size, offset or stride has more static information
444 /// than the corresponding result's size, offset or stride.
445 ///
446 /// Example 1:
447 /// ```mlir
448 ///   %1 = memref.cast %0 : memref<8x16xf32> to memref<?x?xf32>
449 ///   %2 = consumer %1 ... : memref<?x?xf32> ...
450 /// ```
451 ///
452 /// may fold into:
453 ///
454 /// ```mlir
455 ///   %2 = consumer %0 ... : memref<8x16xf32> ...
456 /// ```
457 ///
458 /// Example 2:
459 /// ```
460 ///   %1 = memref.cast %0 : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
461 ///          to memref<?x?xf32>
462 ///   consumer %1 : memref<?x?xf32> ...
463 /// ```
464 ///
465 /// may fold into:
466 ///
467 /// ```
468 ///   consumer %0 ... : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
469 /// ```
470 bool CastOp::canFoldIntoConsumerOp(CastOp castOp) {
471   MemRefType sourceType = castOp.source().getType().dyn_cast<MemRefType>();
472   MemRefType resultType = castOp.getType().dyn_cast<MemRefType>();
473 
474   // Requires ranked MemRefType.
475   if (!sourceType || !resultType)
476     return false;
477 
478   // Requires same elemental type.
479   if (sourceType.getElementType() != resultType.getElementType())
480     return false;
481 
482   // Requires same rank.
483   if (sourceType.getRank() != resultType.getRank())
484     return false;
485 
486   // Only fold casts between strided memref forms.
487   int64_t sourceOffset, resultOffset;
488   SmallVector<int64_t, 4> sourceStrides, resultStrides;
489   if (failed(getStridesAndOffset(sourceType, sourceStrides, sourceOffset)) ||
490       failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
491     return false;
492 
493   // If cast is towards more static sizes along any dimension, don't fold.
494   for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) {
495     auto ss = std::get<0>(it), st = std::get<1>(it);
496     if (ss != st)
497       if (ShapedType::isDynamic(ss) && !ShapedType::isDynamic(st))
498         return false;
499   }
500 
501   // If cast is towards more static offset along any dimension, don't fold.
502   if (sourceOffset != resultOffset)
503     if (ShapedType::isDynamicStrideOrOffset(sourceOffset) &&
504         !ShapedType::isDynamicStrideOrOffset(resultOffset))
505       return false;
506 
507   // If cast is towards more static strides along any dimension, don't fold.
508   for (auto it : llvm::zip(sourceStrides, resultStrides)) {
509     auto ss = std::get<0>(it), st = std::get<1>(it);
510     if (ss != st)
511       if (ShapedType::isDynamicStrideOrOffset(ss) &&
512           !ShapedType::isDynamicStrideOrOffset(st))
513         return false;
514   }
515 
516   return true;
517 }
518 
519 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
520   if (inputs.size() != 1 || outputs.size() != 1)
521     return false;
522   Type a = inputs.front(), b = outputs.front();
523   auto aT = a.dyn_cast<MemRefType>();
524   auto bT = b.dyn_cast<MemRefType>();
525 
526   auto uaT = a.dyn_cast<UnrankedMemRefType>();
527   auto ubT = b.dyn_cast<UnrankedMemRefType>();
528 
529   if (aT && bT) {
530     if (aT.getElementType() != bT.getElementType())
531       return false;
532     if (aT.getLayout() != bT.getLayout()) {
533       int64_t aOffset, bOffset;
534       SmallVector<int64_t, 4> aStrides, bStrides;
535       if (failed(getStridesAndOffset(aT, aStrides, aOffset)) ||
536           failed(getStridesAndOffset(bT, bStrides, bOffset)) ||
537           aStrides.size() != bStrides.size())
538         return false;
539 
540       // Strides along a dimension/offset are compatible if the value in the
541       // source memref is static and the value in the target memref is the
542       // same. They are also compatible if either one is dynamic (see
543       // description of MemRefCastOp for details).
544       auto checkCompatible = [](int64_t a, int64_t b) {
545         return (a == MemRefType::getDynamicStrideOrOffset() ||
546                 b == MemRefType::getDynamicStrideOrOffset() || a == b);
547       };
548       if (!checkCompatible(aOffset, bOffset))
549         return false;
550       for (const auto &aStride : enumerate(aStrides))
551         if (!checkCompatible(aStride.value(), bStrides[aStride.index()]))
552           return false;
553     }
554     if (aT.getMemorySpace() != bT.getMemorySpace())
555       return false;
556 
557     // They must have the same rank, and any specified dimensions must match.
558     if (aT.getRank() != bT.getRank())
559       return false;
560 
561     for (unsigned i = 0, e = aT.getRank(); i != e; ++i) {
562       int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i);
563       if (aDim != -1 && bDim != -1 && aDim != bDim)
564         return false;
565     }
566     return true;
567   } else {
568     if (!aT && !uaT)
569       return false;
570     if (!bT && !ubT)
571       return false;
572     // Unranked to unranked casting is unsupported
573     if (uaT && ubT)
574       return false;
575 
576     auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType();
577     auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType();
578     if (aEltType != bEltType)
579       return false;
580 
581     auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace();
582     auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace();
583     return aMemSpace == bMemSpace;
584   }
585 
586   return false;
587 }
588 
589 OpFoldResult CastOp::fold(ArrayRef<Attribute> operands) {
590   return succeeded(foldMemRefCast(*this)) ? getResult() : Value();
591 }
592 
593 //===----------------------------------------------------------------------===//
594 // CopyOp
595 //===----------------------------------------------------------------------===//
596 
597 namespace {
598 /// If the source/target of a CopyOp is a CastOp that does not modify the shape
599 /// and element type, the cast can be skipped. Such CastOps only cast the layout
600 /// of the type.
601 struct FoldCopyOfCast : public OpRewritePattern<CopyOp> {
602   using OpRewritePattern<CopyOp>::OpRewritePattern;
603 
604   LogicalResult matchAndRewrite(CopyOp copyOp,
605                                 PatternRewriter &rewriter) const override {
606     bool modified = false;
607 
608     // Check source.
609     if (auto castOp = copyOp.source().getDefiningOp<CastOp>()) {
610       auto fromType = castOp.source().getType().dyn_cast<MemRefType>();
611       auto toType = castOp.source().getType().dyn_cast<MemRefType>();
612 
613       if (fromType && toType) {
614         if (fromType.getShape() == toType.getShape() &&
615             fromType.getElementType() == toType.getElementType()) {
616           rewriter.updateRootInPlace(
617               copyOp, [&] { copyOp.sourceMutable().assign(castOp.source()); });
618           modified = true;
619         }
620       }
621     }
622 
623     // Check target.
624     if (auto castOp = copyOp.target().getDefiningOp<CastOp>()) {
625       auto fromType = castOp.source().getType().dyn_cast<MemRefType>();
626       auto toType = castOp.source().getType().dyn_cast<MemRefType>();
627 
628       if (fromType && toType) {
629         if (fromType.getShape() == toType.getShape() &&
630             fromType.getElementType() == toType.getElementType()) {
631           rewriter.updateRootInPlace(
632               copyOp, [&] { copyOp.targetMutable().assign(castOp.source()); });
633           modified = true;
634         }
635       }
636     }
637 
638     return success(modified);
639   }
640 };
641 
642 /// Fold memref.copy(%x, %x).
643 struct FoldSelfCopy : public OpRewritePattern<CopyOp> {
644   using OpRewritePattern<CopyOp>::OpRewritePattern;
645 
646   LogicalResult matchAndRewrite(CopyOp copyOp,
647                                 PatternRewriter &rewriter) const override {
648     if (copyOp.source() != copyOp.target())
649       return failure();
650 
651     rewriter.eraseOp(copyOp);
652     return success();
653   }
654 };
655 } // namespace
656 
657 void CopyOp::getCanonicalizationPatterns(RewritePatternSet &results,
658                                          MLIRContext *context) {
659   results.add<FoldCopyOfCast, FoldSelfCopy>(context);
660 }
661 
662 LogicalResult CopyOp::fold(ArrayRef<Attribute> cstOperands,
663                            SmallVectorImpl<OpFoldResult> &results) {
664   /// copy(memrefcast) -> copy
665   bool folded = false;
666   Operation *op = *this;
667   for (OpOperand &operand : op->getOpOperands()) {
668     auto castOp = operand.get().getDefiningOp<memref::CastOp>();
669     if (castOp && memref::CastOp::canFoldIntoConsumerOp(castOp)) {
670       operand.set(castOp.getOperand());
671       folded = true;
672     }
673   }
674   return success(folded);
675 }
676 
677 //===----------------------------------------------------------------------===//
678 // DeallocOp
679 //===----------------------------------------------------------------------===//
680 
681 LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands,
682                               SmallVectorImpl<OpFoldResult> &results) {
683   /// dealloc(memrefcast) -> dealloc
684   return foldMemRefCast(*this);
685 }
686 
687 //===----------------------------------------------------------------------===//
688 // DimOp
689 //===----------------------------------------------------------------------===//
690 
691 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
692                   int64_t index) {
693   auto loc = result.location;
694   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
695   build(builder, result, source, indexValue);
696 }
697 
698 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
699                   Value index) {
700   auto indexTy = builder.getIndexType();
701   build(builder, result, indexTy, source, index);
702 }
703 
704 Optional<int64_t> DimOp::getConstantIndex() {
705   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
706     return constantOp.getValue().cast<IntegerAttr>().getInt();
707   return {};
708 }
709 
710 LogicalResult DimOp::verify() {
711   // Assume unknown index to be in range.
712   Optional<int64_t> index = getConstantIndex();
713   if (!index.hasValue())
714     return success();
715 
716   // Check that constant index is not knowingly out of range.
717   auto type = source().getType();
718   if (auto memrefType = type.dyn_cast<MemRefType>()) {
719     if (index.getValue() >= memrefType.getRank())
720       return emitOpError("index is out of range");
721   } else if (type.isa<UnrankedMemRefType>()) {
722     // Assume index to be in range.
723   } else {
724     llvm_unreachable("expected operand with memref type");
725   }
726   return success();
727 }
728 
729 /// Return a map with key being elements in `vals` and data being number of
730 /// occurences of it. Use std::map, since the `vals` here are strides and the
731 /// dynamic stride value is the same as the tombstone value for
732 /// `DenseMap<int64_t>`.
733 static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) {
734   std::map<int64_t, unsigned> numOccurences;
735   for (auto val : vals)
736     numOccurences[val]++;
737   return numOccurences;
738 }
739 
740 /// Given the `originalType` and a `candidateReducedType` whose shape is assumed
741 /// to be a subset of `originalType` with some `1` entries erased, return the
742 /// set of indices that specifies which of the entries of `originalShape` are
743 /// dropped to obtain `reducedShape`.
744 /// This accounts for cases where there are multiple unit-dims, but only a
745 /// subset of those are dropped. For MemRefTypes these can be disambiguated
746 /// using the strides. If a dimension is dropped the stride must be dropped too.
747 static llvm::Optional<llvm::SmallBitVector>
748 computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
749                                ArrayRef<OpFoldResult> sizes) {
750   llvm::SmallBitVector unusedDims(originalType.getRank());
751   if (originalType.getRank() == reducedType.getRank())
752     return unusedDims;
753 
754   for (const auto &dim : llvm::enumerate(sizes))
755     if (auto attr = dim.value().dyn_cast<Attribute>())
756       if (attr.cast<IntegerAttr>().getInt() == 1)
757         unusedDims.set(dim.index());
758 
759   SmallVector<int64_t> originalStrides, candidateStrides;
760   int64_t originalOffset, candidateOffset;
761   if (failed(
762           getStridesAndOffset(originalType, originalStrides, originalOffset)) ||
763       failed(
764           getStridesAndOffset(reducedType, candidateStrides, candidateOffset)))
765     return llvm::None;
766 
767   // For memrefs, a dimension is truly dropped if its corresponding stride is
768   // also dropped. This is particularly important when more than one of the dims
769   // is 1. Track the number of occurences of the strides in the original type
770   // and the candidate type. For each unused dim that stride should not be
771   // present in the candidate type. Note that there could be multiple dimensions
772   // that have the same size. We dont need to exactly figure out which dim
773   // corresponds to which stride, we just need to verify that the number of
774   // reptitions of a stride in the original + number of unused dims with that
775   // stride == number of repititions of a stride in the candidate.
776   std::map<int64_t, unsigned> currUnaccountedStrides =
777       getNumOccurences(originalStrides);
778   std::map<int64_t, unsigned> candidateStridesNumOccurences =
779       getNumOccurences(candidateStrides);
780   for (size_t dim = 0, e = unusedDims.size(); dim != e; ++dim) {
781     if (!unusedDims.test(dim))
782       continue;
783     int64_t originalStride = originalStrides[dim];
784     if (currUnaccountedStrides[originalStride] >
785         candidateStridesNumOccurences[originalStride]) {
786       // This dim can be treated as dropped.
787       currUnaccountedStrides[originalStride]--;
788       continue;
789     }
790     if (currUnaccountedStrides[originalStride] ==
791         candidateStridesNumOccurences[originalStride]) {
792       // The stride for this is not dropped. Keep as is.
793       unusedDims.reset(dim);
794       continue;
795     }
796     if (currUnaccountedStrides[originalStride] <
797         candidateStridesNumOccurences[originalStride]) {
798       // This should never happen. Cant have a stride in the reduced rank type
799       // that wasnt in the original one.
800       return llvm::None;
801     }
802   }
803 
804   if ((int64_t)unusedDims.count() + reducedType.getRank() !=
805       originalType.getRank())
806     return llvm::None;
807   return unusedDims;
808 }
809 
810 llvm::SmallBitVector SubViewOp::getDroppedDims() {
811   MemRefType sourceType = getSourceType();
812   MemRefType resultType = getType();
813   llvm::Optional<llvm::SmallBitVector> unusedDims =
814       computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes());
815   assert(unusedDims && "unable to find unused dims of subview");
816   return *unusedDims;
817 }
818 
819 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
820   // All forms of folding require a known index.
821   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
822   if (!index)
823     return {};
824 
825   // Folding for unranked types (UnrankedMemRefType) is not supported.
826   auto memrefType = source().getType().dyn_cast<MemRefType>();
827   if (!memrefType)
828     return {};
829 
830   // Fold if the shape extent along the given index is known.
831   if (!memrefType.isDynamicDim(index.getInt())) {
832     Builder builder(getContext());
833     return builder.getIndexAttr(memrefType.getShape()[index.getInt()]);
834   }
835 
836   // The size at the given index is now known to be a dynamic size.
837   unsigned unsignedIndex = index.getValue().getZExtValue();
838 
839   // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`.
840   Operation *definingOp = source().getDefiningOp();
841 
842   if (auto alloc = dyn_cast_or_null<AllocOp>(definingOp))
843     return *(alloc.getDynamicSizes().begin() +
844              memrefType.getDynamicDimIndex(unsignedIndex));
845 
846   if (auto alloca = dyn_cast_or_null<AllocaOp>(definingOp))
847     return *(alloca.getDynamicSizes().begin() +
848              memrefType.getDynamicDimIndex(unsignedIndex));
849 
850   if (auto view = dyn_cast_or_null<ViewOp>(definingOp))
851     return *(view.getDynamicSizes().begin() +
852              memrefType.getDynamicDimIndex(unsignedIndex));
853 
854   if (auto subview = dyn_cast_or_null<SubViewOp>(definingOp)) {
855     llvm::SmallBitVector unusedDims = subview.getDroppedDims();
856     unsigned resultIndex = 0;
857     unsigned sourceRank = subview.getSourceType().getRank();
858     unsigned sourceIndex = 0;
859     for (auto i : llvm::seq<unsigned>(0, sourceRank)) {
860       if (unusedDims.test(i))
861         continue;
862       if (resultIndex == unsignedIndex) {
863         sourceIndex = i;
864         break;
865       }
866       resultIndex++;
867     }
868     assert(subview.isDynamicSize(sourceIndex) &&
869            "expected dynamic subview size");
870     return subview.getDynamicSize(sourceIndex);
871   }
872 
873   if (auto sizeInterface =
874           dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) {
875     assert(sizeInterface.isDynamicSize(unsignedIndex) &&
876            "Expected dynamic subview size");
877     return sizeInterface.getDynamicSize(unsignedIndex);
878   }
879 
880   // dim(memrefcast) -> dim
881   if (succeeded(foldMemRefCast(*this)))
882     return getResult();
883 
884   return {};
885 }
886 
887 namespace {
888 /// Fold dim of a memref reshape operation to a load into the reshape's shape
889 /// operand.
890 struct DimOfMemRefReshape : public OpRewritePattern<DimOp> {
891   using OpRewritePattern<DimOp>::OpRewritePattern;
892 
893   LogicalResult matchAndRewrite(DimOp dim,
894                                 PatternRewriter &rewriter) const override {
895     auto reshape = dim.source().getDefiningOp<ReshapeOp>();
896 
897     if (!reshape)
898       return failure();
899 
900     // Place the load directly after the reshape to ensure that the shape memref
901     // was not mutated.
902     rewriter.setInsertionPointAfter(reshape);
903     Location loc = dim.getLoc();
904     Value load = rewriter.create<LoadOp>(loc, reshape.shape(), dim.index());
905     if (load.getType() != dim.getType())
906       load = rewriter.create<arith::IndexCastOp>(loc, dim.getType(), load);
907     rewriter.replaceOp(dim, load);
908     return success();
909   }
910 };
911 
912 } // namespace
913 
914 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
915                                         MLIRContext *context) {
916   results.add<DimOfMemRefReshape>(context);
917 }
918 
919 // ---------------------------------------------------------------------------
920 // DmaStartOp
921 // ---------------------------------------------------------------------------
922 
923 void DmaStartOp::build(OpBuilder &builder, OperationState &result,
924                        Value srcMemRef, ValueRange srcIndices, Value destMemRef,
925                        ValueRange destIndices, Value numElements,
926                        Value tagMemRef, ValueRange tagIndices, Value stride,
927                        Value elementsPerStride) {
928   result.addOperands(srcMemRef);
929   result.addOperands(srcIndices);
930   result.addOperands(destMemRef);
931   result.addOperands(destIndices);
932   result.addOperands({numElements, tagMemRef});
933   result.addOperands(tagIndices);
934   if (stride)
935     result.addOperands({stride, elementsPerStride});
936 }
937 
938 void DmaStartOp::print(OpAsmPrinter &p) {
939   p << " " << getSrcMemRef() << '[' << getSrcIndices() << "], "
940     << getDstMemRef() << '[' << getDstIndices() << "], " << getNumElements()
941     << ", " << getTagMemRef() << '[' << getTagIndices() << ']';
942   if (isStrided())
943     p << ", " << getStride() << ", " << getNumElementsPerStride();
944 
945   p.printOptionalAttrDict((*this)->getAttrs());
946   p << " : " << getSrcMemRef().getType() << ", " << getDstMemRef().getType()
947     << ", " << getTagMemRef().getType();
948 }
949 
950 // Parse DmaStartOp.
951 // Ex:
952 //   %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size,
953 //                       %tag[%index], %stride, %num_elt_per_stride :
954 //                     : memref<3076 x f32, 0>,
955 //                       memref<1024 x f32, 2>,
956 //                       memref<1 x i32>
957 //
958 ParseResult DmaStartOp::parse(OpAsmParser &parser, OperationState &result) {
959   OpAsmParser::OperandType srcMemRefInfo;
960   SmallVector<OpAsmParser::OperandType, 4> srcIndexInfos;
961   OpAsmParser::OperandType dstMemRefInfo;
962   SmallVector<OpAsmParser::OperandType, 4> dstIndexInfos;
963   OpAsmParser::OperandType numElementsInfo;
964   OpAsmParser::OperandType tagMemrefInfo;
965   SmallVector<OpAsmParser::OperandType, 4> tagIndexInfos;
966   SmallVector<OpAsmParser::OperandType, 2> strideInfo;
967 
968   SmallVector<Type, 3> types;
969   auto indexType = parser.getBuilder().getIndexType();
970 
971   // Parse and resolve the following list of operands:
972   // *) source memref followed by its indices (in square brackets).
973   // *) destination memref followed by its indices (in square brackets).
974   // *) dma size in KiB.
975   if (parser.parseOperand(srcMemRefInfo) ||
976       parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) ||
977       parser.parseComma() || parser.parseOperand(dstMemRefInfo) ||
978       parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) ||
979       parser.parseComma() || parser.parseOperand(numElementsInfo) ||
980       parser.parseComma() || parser.parseOperand(tagMemrefInfo) ||
981       parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square))
982     return failure();
983 
984   // Parse optional stride and elements per stride.
985   if (parser.parseTrailingOperandList(strideInfo))
986     return failure();
987 
988   bool isStrided = strideInfo.size() == 2;
989   if (!strideInfo.empty() && !isStrided) {
990     return parser.emitError(parser.getNameLoc(),
991                             "expected two stride related operands");
992   }
993 
994   if (parser.parseColonTypeList(types))
995     return failure();
996   if (types.size() != 3)
997     return parser.emitError(parser.getNameLoc(), "fewer/more types expected");
998 
999   if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) ||
1000       parser.resolveOperands(srcIndexInfos, indexType, result.operands) ||
1001       parser.resolveOperand(dstMemRefInfo, types[1], result.operands) ||
1002       parser.resolveOperands(dstIndexInfos, indexType, result.operands) ||
1003       // size should be an index.
1004       parser.resolveOperand(numElementsInfo, indexType, result.operands) ||
1005       parser.resolveOperand(tagMemrefInfo, types[2], result.operands) ||
1006       // tag indices should be index.
1007       parser.resolveOperands(tagIndexInfos, indexType, result.operands))
1008     return failure();
1009 
1010   if (isStrided) {
1011     if (parser.resolveOperands(strideInfo, indexType, result.operands))
1012       return failure();
1013   }
1014 
1015   return success();
1016 }
1017 
1018 LogicalResult DmaStartOp::verify() {
1019   unsigned numOperands = getNumOperands();
1020 
1021   // Mandatory non-variadic operands are: src memref, dst memref, tag memref and
1022   // the number of elements.
1023   if (numOperands < 4)
1024     return emitOpError("expected at least 4 operands");
1025 
1026   // Check types of operands. The order of these calls is important: the later
1027   // calls rely on some type properties to compute the operand position.
1028   // 1. Source memref.
1029   if (!getSrcMemRef().getType().isa<MemRefType>())
1030     return emitOpError("expected source to be of memref type");
1031   if (numOperands < getSrcMemRefRank() + 4)
1032     return emitOpError() << "expected at least " << getSrcMemRefRank() + 4
1033                          << " operands";
1034   if (!getSrcIndices().empty() &&
1035       !llvm::all_of(getSrcIndices().getTypes(),
1036                     [](Type t) { return t.isIndex(); }))
1037     return emitOpError("expected source indices to be of index type");
1038 
1039   // 2. Destination memref.
1040   if (!getDstMemRef().getType().isa<MemRefType>())
1041     return emitOpError("expected destination to be of memref type");
1042   unsigned numExpectedOperands = getSrcMemRefRank() + getDstMemRefRank() + 4;
1043   if (numOperands < numExpectedOperands)
1044     return emitOpError() << "expected at least " << numExpectedOperands
1045                          << " operands";
1046   if (!getDstIndices().empty() &&
1047       !llvm::all_of(getDstIndices().getTypes(),
1048                     [](Type t) { return t.isIndex(); }))
1049     return emitOpError("expected destination indices to be of index type");
1050 
1051   // 3. Number of elements.
1052   if (!getNumElements().getType().isIndex())
1053     return emitOpError("expected num elements to be of index type");
1054 
1055   // 4. Tag memref.
1056   if (!getTagMemRef().getType().isa<MemRefType>())
1057     return emitOpError("expected tag to be of memref type");
1058   numExpectedOperands += getTagMemRefRank();
1059   if (numOperands < numExpectedOperands)
1060     return emitOpError() << "expected at least " << numExpectedOperands
1061                          << " operands";
1062   if (!getTagIndices().empty() &&
1063       !llvm::all_of(getTagIndices().getTypes(),
1064                     [](Type t) { return t.isIndex(); }))
1065     return emitOpError("expected tag indices to be of index type");
1066 
1067   // Optional stride-related operands must be either both present or both
1068   // absent.
1069   if (numOperands != numExpectedOperands &&
1070       numOperands != numExpectedOperands + 2)
1071     return emitOpError("incorrect number of operands");
1072 
1073   // 5. Strides.
1074   if (isStrided()) {
1075     if (!getStride().getType().isIndex() ||
1076         !getNumElementsPerStride().getType().isIndex())
1077       return emitOpError(
1078           "expected stride and num elements per stride to be of type index");
1079   }
1080 
1081   return success();
1082 }
1083 
1084 LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
1085                                SmallVectorImpl<OpFoldResult> &results) {
1086   /// dma_start(memrefcast) -> dma_start
1087   return foldMemRefCast(*this);
1088 }
1089 
1090 // ---------------------------------------------------------------------------
1091 // DmaWaitOp
1092 // ---------------------------------------------------------------------------
1093 
1094 LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands,
1095                               SmallVectorImpl<OpFoldResult> &results) {
1096   /// dma_wait(memrefcast) -> dma_wait
1097   return foldMemRefCast(*this);
1098 }
1099 
1100 LogicalResult DmaWaitOp::verify() {
1101   // Check that the number of tag indices matches the tagMemRef rank.
1102   unsigned numTagIndices = tagIndices().size();
1103   unsigned tagMemRefRank = getTagMemRefRank();
1104   if (numTagIndices != tagMemRefRank)
1105     return emitOpError() << "expected tagIndices to have the same number of "
1106                             "elements as the tagMemRef rank, expected "
1107                          << tagMemRefRank << ", but got " << numTagIndices;
1108   return success();
1109 }
1110 
1111 //===----------------------------------------------------------------------===//
1112 // GenericAtomicRMWOp
1113 //===----------------------------------------------------------------------===//
1114 
1115 void GenericAtomicRMWOp::build(OpBuilder &builder, OperationState &result,
1116                                Value memref, ValueRange ivs) {
1117   result.addOperands(memref);
1118   result.addOperands(ivs);
1119 
1120   if (auto memrefType = memref.getType().dyn_cast<MemRefType>()) {
1121     Type elementType = memrefType.getElementType();
1122     result.addTypes(elementType);
1123 
1124     Region *bodyRegion = result.addRegion();
1125     bodyRegion->push_back(new Block());
1126     bodyRegion->addArgument(elementType, memref.getLoc());
1127   }
1128 }
1129 
1130 LogicalResult GenericAtomicRMWOp::verify() {
1131   auto &body = getRegion();
1132   if (body.getNumArguments() != 1)
1133     return emitOpError("expected single number of entry block arguments");
1134 
1135   if (getResult().getType() != body.getArgument(0).getType())
1136     return emitOpError("expected block argument of the same type result type");
1137 
1138   bool hasSideEffects =
1139       body.walk([&](Operation *nestedOp) {
1140             if (MemoryEffectOpInterface::hasNoEffect(nestedOp))
1141               return WalkResult::advance();
1142             nestedOp->emitError(
1143                 "body of 'memref.generic_atomic_rmw' should contain "
1144                 "only operations with no side effects");
1145             return WalkResult::interrupt();
1146           })
1147           .wasInterrupted();
1148   return hasSideEffects ? failure() : success();
1149 }
1150 
1151 ParseResult GenericAtomicRMWOp::parse(OpAsmParser &parser,
1152                                       OperationState &result) {
1153   OpAsmParser::OperandType memref;
1154   Type memrefType;
1155   SmallVector<OpAsmParser::OperandType, 4> ivs;
1156 
1157   Type indexType = parser.getBuilder().getIndexType();
1158   if (parser.parseOperand(memref) ||
1159       parser.parseOperandList(ivs, OpAsmParser::Delimiter::Square) ||
1160       parser.parseColonType(memrefType) ||
1161       parser.resolveOperand(memref, memrefType, result.operands) ||
1162       parser.resolveOperands(ivs, indexType, result.operands))
1163     return failure();
1164 
1165   Region *body = result.addRegion();
1166   if (parser.parseRegion(*body, llvm::None, llvm::None) ||
1167       parser.parseOptionalAttrDict(result.attributes))
1168     return failure();
1169   result.types.push_back(memrefType.cast<MemRefType>().getElementType());
1170   return success();
1171 }
1172 
1173 void GenericAtomicRMWOp::print(OpAsmPrinter &p) {
1174   p << ' ' << memref() << "[" << indices() << "] : " << memref().getType()
1175     << ' ';
1176   p.printRegion(getRegion());
1177   p.printOptionalAttrDict((*this)->getAttrs());
1178 }
1179 
1180 //===----------------------------------------------------------------------===//
1181 // AtomicYieldOp
1182 //===----------------------------------------------------------------------===//
1183 
1184 LogicalResult AtomicYieldOp::verify() {
1185   Type parentType = (*this)->getParentOp()->getResultTypes().front();
1186   Type resultType = result().getType();
1187   if (parentType != resultType)
1188     return emitOpError() << "types mismatch between yield op: " << resultType
1189                          << " and its parent: " << parentType;
1190   return success();
1191 }
1192 
1193 //===----------------------------------------------------------------------===//
1194 // GlobalOp
1195 //===----------------------------------------------------------------------===//
1196 
1197 static void printGlobalMemrefOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op,
1198                                                    TypeAttr type,
1199                                                    Attribute initialValue) {
1200   p << type;
1201   if (!op.isExternal()) {
1202     p << " = ";
1203     if (op.isUninitialized())
1204       p << "uninitialized";
1205     else
1206       p.printAttributeWithoutType(initialValue);
1207   }
1208 }
1209 
1210 static ParseResult
1211 parseGlobalMemrefOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr,
1212                                        Attribute &initialValue) {
1213   Type type;
1214   if (parser.parseType(type))
1215     return failure();
1216 
1217   auto memrefType = type.dyn_cast<MemRefType>();
1218   if (!memrefType || !memrefType.hasStaticShape())
1219     return parser.emitError(parser.getNameLoc())
1220            << "type should be static shaped memref, but got " << type;
1221   typeAttr = TypeAttr::get(type);
1222 
1223   if (parser.parseOptionalEqual())
1224     return success();
1225 
1226   if (succeeded(parser.parseOptionalKeyword("uninitialized"))) {
1227     initialValue = UnitAttr::get(parser.getContext());
1228     return success();
1229   }
1230 
1231   Type tensorType = getTensorTypeFromMemRefType(memrefType);
1232   if (parser.parseAttribute(initialValue, tensorType))
1233     return failure();
1234   if (!initialValue.isa<ElementsAttr>())
1235     return parser.emitError(parser.getNameLoc())
1236            << "initial value should be a unit or elements attribute";
1237   return success();
1238 }
1239 
1240 LogicalResult GlobalOp::verify() {
1241   auto memrefType = type().dyn_cast<MemRefType>();
1242   if (!memrefType || !memrefType.hasStaticShape())
1243     return emitOpError("type should be static shaped memref, but got ")
1244            << type();
1245 
1246   // Verify that the initial value, if present, is either a unit attribute or
1247   // an elements attribute.
1248   if (initial_value().hasValue()) {
1249     Attribute initValue = initial_value().getValue();
1250     if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>())
1251       return emitOpError("initial value should be a unit or elements "
1252                          "attribute, but got ")
1253              << initValue;
1254 
1255     // Check that the type of the initial value is compatible with the type of
1256     // the global variable.
1257     if (initValue.isa<ElementsAttr>()) {
1258       Type initType = initValue.getType();
1259       Type tensorType = getTensorTypeFromMemRefType(memrefType);
1260       if (initType != tensorType)
1261         return emitOpError("initial value expected to be of type ")
1262                << tensorType << ", but was of type " << initType;
1263     }
1264   }
1265 
1266   if (Optional<uint64_t> alignAttr = alignment()) {
1267     uint64_t alignment = alignAttr.getValue();
1268 
1269     if (!llvm::isPowerOf2_64(alignment))
1270       return emitError() << "alignment attribute value " << alignment
1271                          << " is not a power of 2";
1272   }
1273 
1274   // TODO: verify visibility for declarations.
1275   return success();
1276 }
1277 
1278 //===----------------------------------------------------------------------===//
1279 // GetGlobalOp
1280 //===----------------------------------------------------------------------===//
1281 
1282 LogicalResult
1283 GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
1284   // Verify that the result type is same as the type of the referenced
1285   // memref.global op.
1286   auto global =
1287       symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, nameAttr());
1288   if (!global)
1289     return emitOpError("'")
1290            << name() << "' does not reference a valid global memref";
1291 
1292   Type resultType = result().getType();
1293   if (global.type() != resultType)
1294     return emitOpError("result type ")
1295            << resultType << " does not match type " << global.type()
1296            << " of the global memref @" << name();
1297   return success();
1298 }
1299 
1300 //===----------------------------------------------------------------------===//
1301 // LoadOp
1302 //===----------------------------------------------------------------------===//
1303 
1304 LogicalResult LoadOp::verify() {
1305   if (getNumOperands() != 1 + getMemRefType().getRank())
1306     return emitOpError("incorrect number of indices for load");
1307   return success();
1308 }
1309 
1310 OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) {
1311   /// load(memrefcast) -> load
1312   if (succeeded(foldMemRefCast(*this)))
1313     return getResult();
1314   return OpFoldResult();
1315 }
1316 
1317 //===----------------------------------------------------------------------===//
1318 // PrefetchOp
1319 //===----------------------------------------------------------------------===//
1320 
1321 void PrefetchOp::print(OpAsmPrinter &p) {
1322   p << " " << memref() << '[';
1323   p.printOperands(indices());
1324   p << ']' << ", " << (isWrite() ? "write" : "read");
1325   p << ", locality<" << localityHint();
1326   p << ">, " << (isDataCache() ? "data" : "instr");
1327   p.printOptionalAttrDict(
1328       (*this)->getAttrs(),
1329       /*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"});
1330   p << " : " << getMemRefType();
1331 }
1332 
1333 ParseResult PrefetchOp::parse(OpAsmParser &parser, OperationState &result) {
1334   OpAsmParser::OperandType memrefInfo;
1335   SmallVector<OpAsmParser::OperandType, 4> indexInfo;
1336   IntegerAttr localityHint;
1337   MemRefType type;
1338   StringRef readOrWrite, cacheType;
1339 
1340   auto indexTy = parser.getBuilder().getIndexType();
1341   auto i32Type = parser.getBuilder().getIntegerType(32);
1342   if (parser.parseOperand(memrefInfo) ||
1343       parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) ||
1344       parser.parseComma() || parser.parseKeyword(&readOrWrite) ||
1345       parser.parseComma() || parser.parseKeyword("locality") ||
1346       parser.parseLess() ||
1347       parser.parseAttribute(localityHint, i32Type, "localityHint",
1348                             result.attributes) ||
1349       parser.parseGreater() || parser.parseComma() ||
1350       parser.parseKeyword(&cacheType) || parser.parseColonType(type) ||
1351       parser.resolveOperand(memrefInfo, type, result.operands) ||
1352       parser.resolveOperands(indexInfo, indexTy, result.operands))
1353     return failure();
1354 
1355   if (!readOrWrite.equals("read") && !readOrWrite.equals("write"))
1356     return parser.emitError(parser.getNameLoc(),
1357                             "rw specifier has to be 'read' or 'write'");
1358   result.addAttribute(
1359       PrefetchOp::getIsWriteAttrName(),
1360       parser.getBuilder().getBoolAttr(readOrWrite.equals("write")));
1361 
1362   if (!cacheType.equals("data") && !cacheType.equals("instr"))
1363     return parser.emitError(parser.getNameLoc(),
1364                             "cache type has to be 'data' or 'instr'");
1365 
1366   result.addAttribute(
1367       PrefetchOp::getIsDataCacheAttrName(),
1368       parser.getBuilder().getBoolAttr(cacheType.equals("data")));
1369 
1370   return success();
1371 }
1372 
1373 LogicalResult PrefetchOp::verify() {
1374   if (getNumOperands() != 1 + getMemRefType().getRank())
1375     return emitOpError("too few indices");
1376 
1377   return success();
1378 }
1379 
1380 LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands,
1381                                SmallVectorImpl<OpFoldResult> &results) {
1382   // prefetch(memrefcast) -> prefetch
1383   return foldMemRefCast(*this);
1384 }
1385 
1386 //===----------------------------------------------------------------------===//
1387 // RankOp
1388 //===----------------------------------------------------------------------===//
1389 
1390 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
1391   // Constant fold rank when the rank of the operand is known.
1392   auto type = getOperand().getType();
1393   auto shapedType = type.dyn_cast<ShapedType>();
1394   if (shapedType && shapedType.hasRank())
1395     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
1396   return IntegerAttr();
1397 }
1398 
1399 //===----------------------------------------------------------------------===//
1400 // ReinterpretCastOp
1401 //===----------------------------------------------------------------------===//
1402 
1403 /// Build a ReinterpretCastOp with all dynamic entries: `staticOffsets`,
1404 /// `staticSizes` and `staticStrides` are automatically filled with
1405 /// source-memref-rank sentinel values that encode dynamic entries.
1406 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1407                               MemRefType resultType, Value source,
1408                               OpFoldResult offset, ArrayRef<OpFoldResult> sizes,
1409                               ArrayRef<OpFoldResult> strides,
1410                               ArrayRef<NamedAttribute> attrs) {
1411   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1412   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1413   dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets,
1414                              ShapedType::kDynamicStrideOrOffset);
1415   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1416                              ShapedType::kDynamicSize);
1417   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1418                              ShapedType::kDynamicStrideOrOffset);
1419   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1420         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1421         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1422   result.addAttributes(attrs);
1423 }
1424 
1425 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1426                               MemRefType resultType, Value source,
1427                               int64_t offset, ArrayRef<int64_t> sizes,
1428                               ArrayRef<int64_t> strides,
1429                               ArrayRef<NamedAttribute> attrs) {
1430   SmallVector<OpFoldResult> sizeValues =
1431       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1432         return b.getI64IntegerAttr(v);
1433       }));
1434   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1435       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1436         return b.getI64IntegerAttr(v);
1437       }));
1438   build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues,
1439         strideValues, attrs);
1440 }
1441 
1442 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1443                               MemRefType resultType, Value source, Value offset,
1444                               ValueRange sizes, ValueRange strides,
1445                               ArrayRef<NamedAttribute> attrs) {
1446   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1447       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1448   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1449       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1450   build(b, result, resultType, source, offset, sizeValues, strideValues, attrs);
1451 }
1452 
1453 // TODO: ponder whether we want to allow missing trailing sizes/strides that are
1454 // completed automatically, like we have for subview and extract_slice.
1455 LogicalResult ReinterpretCastOp::verify() {
1456   // The source and result memrefs should be in the same memory space.
1457   auto srcType = source().getType().cast<BaseMemRefType>();
1458   auto resultType = getType().cast<MemRefType>();
1459   if (srcType.getMemorySpace() != resultType.getMemorySpace())
1460     return emitError("different memory spaces specified for source type ")
1461            << srcType << " and result memref type " << resultType;
1462   if (srcType.getElementType() != resultType.getElementType())
1463     return emitError("different element types specified for source type ")
1464            << srcType << " and result memref type " << resultType;
1465 
1466   // Match sizes in result memref type and in static_sizes attribute.
1467   for (auto &en : llvm::enumerate(llvm::zip(
1468            resultType.getShape(), extractFromI64ArrayAttr(static_sizes())))) {
1469     int64_t resultSize = std::get<0>(en.value());
1470     int64_t expectedSize = std::get<1>(en.value());
1471     if (!ShapedType::isDynamic(resultSize) &&
1472         !ShapedType::isDynamic(expectedSize) && resultSize != expectedSize)
1473       return emitError("expected result type with size = ")
1474              << expectedSize << " instead of " << resultSize
1475              << " in dim = " << en.index();
1476   }
1477 
1478   // Match offset and strides in static_offset and static_strides attributes. If
1479   // result memref type has no affine map specified, this will assume an
1480   // identity layout.
1481   int64_t resultOffset;
1482   SmallVector<int64_t, 4> resultStrides;
1483   if (failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
1484     return emitError("expected result type to have strided layout but found ")
1485            << resultType;
1486 
1487   // Match offset in result memref type and in static_offsets attribute.
1488   int64_t expectedOffset = extractFromI64ArrayAttr(static_offsets()).front();
1489   if (!ShapedType::isDynamicStrideOrOffset(resultOffset) &&
1490       !ShapedType::isDynamicStrideOrOffset(expectedOffset) &&
1491       resultOffset != expectedOffset)
1492     return emitError("expected result type with offset = ")
1493            << resultOffset << " instead of " << expectedOffset;
1494 
1495   // Match strides in result memref type and in static_strides attribute.
1496   for (auto &en : llvm::enumerate(llvm::zip(
1497            resultStrides, extractFromI64ArrayAttr(static_strides())))) {
1498     int64_t resultStride = std::get<0>(en.value());
1499     int64_t expectedStride = std::get<1>(en.value());
1500     if (!ShapedType::isDynamicStrideOrOffset(resultStride) &&
1501         !ShapedType::isDynamicStrideOrOffset(expectedStride) &&
1502         resultStride != expectedStride)
1503       return emitError("expected result type with stride = ")
1504              << expectedStride << " instead of " << resultStride
1505              << " in dim = " << en.index();
1506   }
1507 
1508   return success();
1509 }
1510 
1511 OpFoldResult ReinterpretCastOp::fold(ArrayRef<Attribute> /*operands*/) {
1512   Value src = source();
1513   auto getPrevSrc = [&]() -> Value {
1514     // reinterpret_cast(reinterpret_cast(x)) -> reinterpret_cast(x).
1515     if (auto prev = src.getDefiningOp<ReinterpretCastOp>())
1516       return prev.source();
1517 
1518     // reinterpret_cast(cast(x)) -> reinterpret_cast(x).
1519     if (auto prev = src.getDefiningOp<CastOp>())
1520       return prev.source();
1521 
1522     // reinterpret_cast(subview(x)) -> reinterpret_cast(x) if subview offsets
1523     // are 0.
1524     if (auto prev = src.getDefiningOp<SubViewOp>())
1525       if (llvm::all_of(prev.getMixedOffsets(), [](OpFoldResult val) {
1526             return isConstantIntValue(val, 0);
1527           }))
1528         return prev.source();
1529 
1530     return nullptr;
1531   };
1532 
1533   if (auto prevSrc = getPrevSrc()) {
1534     sourceMutable().assign(prevSrc);
1535     return getResult();
1536   }
1537 
1538   return nullptr;
1539 }
1540 
1541 //===----------------------------------------------------------------------===//
1542 // Reassociative reshape ops
1543 //===----------------------------------------------------------------------===//
1544 
1545 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
1546   return getSymbolLessAffineMaps(getReassociationExprs());
1547 }
1548 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
1549   return convertReassociationIndicesToExprs(getContext(),
1550                                             getReassociationIndices());
1551 }
1552 
1553 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
1554   return getSymbolLessAffineMaps(getReassociationExprs());
1555 }
1556 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
1557   return convertReassociationIndicesToExprs(getContext(),
1558                                             getReassociationIndices());
1559 }
1560 
1561 /// Detect whether memref dims [dim, dim + extent) can be reshaped without
1562 /// copies.
1563 static bool isReshapableDimBand(unsigned dim, unsigned extent,
1564                                 ArrayRef<int64_t> sizes,
1565                                 ArrayRef<AffineExpr> strides) {
1566   // Bands of extent one can be reshaped, as they are not reshaped at all.
1567   if (extent == 1)
1568     return true;
1569   // Otherwise, the size of the first dimension needs to be known.
1570   if (ShapedType::isDynamic(sizes[dim]))
1571     return false;
1572   assert(sizes.size() == strides.size() && "mismatched ranks");
1573   // off by 1 indexing to avoid out of bounds
1574   //                       V
1575   for (auto idx = dim, e = dim + extent; idx + 1 < e; ++idx) {
1576     // Only bands of static shapes are reshapable. This is due to the fact that
1577     // there is no relation between dynamic sizes and dynamic strides: we do not
1578     // have enough information to know whether a "-1" size corresponds to the
1579     // proper symbol in the AffineExpr of a stride.
1580     if (ShapedType::isDynamic(sizes[idx + 1]))
1581       return false;
1582     // TODO: Refine this by passing the proper nDims and nSymbols so we can
1583     // simplify on the fly and catch more reshapable cases.
1584     if (strides[idx] != strides[idx + 1] * sizes[idx + 1])
1585       return false;
1586   }
1587   return true;
1588 }
1589 
1590 /// Compute the MemRefType obtained by applying the `reassociation` (which is
1591 /// expected to be valid) to `type`.
1592 /// If `type` is Contiguous MemRefType, this always produce a contiguous
1593 /// MemRefType.
1594 static MemRefType
1595 computeReshapeCollapsedType(MemRefType type,
1596                             ArrayRef<AffineMap> reassociation) {
1597   auto sizes = type.getShape();
1598   AffineExpr offset;
1599   SmallVector<AffineExpr, 4> strides;
1600   auto status = getStridesAndOffset(type, strides, offset);
1601   auto isIdentityLayout = type.getLayout().isIdentity();
1602   (void)status;
1603   assert(succeeded(status) && "expected strided memref");
1604 
1605   SmallVector<int64_t, 4> newSizes;
1606   newSizes.reserve(reassociation.size());
1607   SmallVector<AffineExpr, 4> newStrides;
1608   newStrides.reserve(reassociation.size());
1609 
1610   // Use the fact that reassociation is valid to simplify the logic: only use
1611   // each map's rank.
1612   assert(isReassociationValid(reassociation) && "invalid reassociation");
1613   unsigned currentDim = 0;
1614   for (AffineMap m : reassociation) {
1615     unsigned dim = m.getNumResults();
1616     int64_t size = 1;
1617     AffineExpr stride = strides[currentDim + dim - 1];
1618     if (isIdentityLayout ||
1619         isReshapableDimBand(currentDim, dim, sizes, strides)) {
1620       for (unsigned d = 0; d < dim; ++d) {
1621         int64_t currentSize = sizes[currentDim + d];
1622         if (ShapedType::isDynamic(currentSize)) {
1623           size = ShapedType::kDynamicSize;
1624           break;
1625         }
1626         size *= currentSize;
1627       }
1628     } else {
1629       size = ShapedType::kDynamicSize;
1630       stride = AffineExpr();
1631     }
1632     newSizes.push_back(size);
1633     newStrides.push_back(stride);
1634     currentDim += dim;
1635   }
1636 
1637   // Early-exit: if `type` is contiguous, the result must be contiguous.
1638   if (canonicalizeStridedLayout(type).getLayout().isIdentity())
1639     return MemRefType::Builder(type).setShape(newSizes).setLayout({});
1640 
1641   // Convert back to int64_t because we don't have enough information to create
1642   // new strided layouts from AffineExpr only. This corresponds to a case where
1643   // copies may be necessary.
1644   int64_t intOffset = ShapedType::kDynamicStrideOrOffset;
1645   if (auto o = offset.dyn_cast<AffineConstantExpr>())
1646     intOffset = o.getValue();
1647   SmallVector<int64_t, 4> intStrides;
1648   intStrides.reserve(strides.size());
1649   for (auto stride : newStrides) {
1650     if (auto cst = stride.dyn_cast_or_null<AffineConstantExpr>())
1651       intStrides.push_back(cst.getValue());
1652     else
1653       intStrides.push_back(ShapedType::kDynamicStrideOrOffset);
1654   }
1655   auto layout =
1656       makeStridedLinearLayoutMap(intStrides, intOffset, type.getContext());
1657   return canonicalizeStridedLayout(
1658       MemRefType::Builder(type).setShape(newSizes).setLayout(
1659           AffineMapAttr::get(layout)));
1660 }
1661 
1662 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
1663                           ArrayRef<ReassociationIndices> reassociation,
1664                           ArrayRef<NamedAttribute> attrs) {
1665   auto memRefType = src.getType().cast<MemRefType>();
1666   auto resultType = computeReshapeCollapsedType(
1667       memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
1668                       b.getContext(), reassociation)));
1669   build(b, result, resultType, src, attrs);
1670   result.addAttribute(getReassociationAttrName(),
1671                       getReassociationIndicesAttribute(b, reassociation));
1672 }
1673 
1674 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
1675                             ArrayRef<ReassociationIndices> reassociation,
1676                             ArrayRef<NamedAttribute> attrs) {
1677   auto memRefType = src.getType().cast<MemRefType>();
1678   auto resultType = computeReshapeCollapsedType(
1679       memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
1680                       b.getContext(), reassociation)));
1681   build(b, result, resultType, src, attrs);
1682   result.addAttribute(getReassociationAttrName(),
1683                       getReassociationIndicesAttribute(b, reassociation));
1684 }
1685 
1686 template <typename ReshapeOp,
1687           bool isExpansion = std::is_same<ReshapeOp, ExpandShapeOp>::value>
1688 static LogicalResult verifyReshapeOp(ReshapeOp op, MemRefType expandedType,
1689                                      MemRefType collapsedType) {
1690   if (failed(
1691           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
1692     return failure();
1693   auto maps = op.getReassociationMaps();
1694   MemRefType expectedType = computeReshapeCollapsedType(expandedType, maps);
1695   if (collapsedType != expectedType)
1696     return op.emitOpError("expected collapsed type to be ")
1697            << expectedType << ", but got " << collapsedType;
1698   return success();
1699 }
1700 
1701 LogicalResult ExpandShapeOp::verify() {
1702   return verifyReshapeOp(*this, getResultType(), getSrcType());
1703 }
1704 
1705 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
1706                                                 MLIRContext *context) {
1707   results.add<CollapseReshapeOps<ExpandShapeOp>,
1708               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>>(context);
1709 }
1710 
1711 LogicalResult CollapseShapeOp::verify() {
1712   return verifyReshapeOp(*this, getSrcType(), getResultType());
1713 }
1714 
1715 struct CollapseShapeOpMemRefCastFolder
1716     : public OpRewritePattern<CollapseShapeOp> {
1717 public:
1718   using OpRewritePattern<CollapseShapeOp>::OpRewritePattern;
1719 
1720   LogicalResult matchAndRewrite(CollapseShapeOp op,
1721                                 PatternRewriter &rewriter) const override {
1722     auto cast = op.getOperand().getDefiningOp<CastOp>();
1723     if (!cast)
1724       return failure();
1725 
1726     if (!CastOp::canFoldIntoConsumerOp(cast))
1727       return failure();
1728 
1729     Type newResultType = computeReshapeCollapsedType(
1730         cast.getOperand().getType().cast<MemRefType>(),
1731         op.getReassociationMaps());
1732 
1733     if (newResultType == op.getResultType()) {
1734       rewriter.updateRootInPlace(
1735           op, [&]() { op.srcMutable().assign(cast.source()); });
1736     } else {
1737       Value newOp = rewriter.create<CollapseShapeOp>(
1738           op->getLoc(), cast.source(), op.getReassociationIndices());
1739       rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp);
1740     }
1741     return success();
1742   }
1743 };
1744 
1745 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
1746                                                   MLIRContext *context) {
1747   results.add<CollapseReshapeOps<CollapseShapeOp>,
1748               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
1749               CollapseShapeOpMemRefCastFolder>(context);
1750 }
1751 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
1752   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
1753 }
1754 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
1755   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
1756 }
1757 
1758 //===----------------------------------------------------------------------===//
1759 // ReshapeOp
1760 //===----------------------------------------------------------------------===//
1761 
1762 LogicalResult ReshapeOp::verify() {
1763   Type operandType = source().getType();
1764   Type resultType = result().getType();
1765 
1766   Type operandElementType = operandType.cast<ShapedType>().getElementType();
1767   Type resultElementType = resultType.cast<ShapedType>().getElementType();
1768   if (operandElementType != resultElementType)
1769     return emitOpError("element types of source and destination memref "
1770                        "types should be the same");
1771 
1772   if (auto operandMemRefType = operandType.dyn_cast<MemRefType>())
1773     if (!operandMemRefType.getLayout().isIdentity())
1774       return emitOpError("source memref type should have identity affine map");
1775 
1776   int64_t shapeSize = shape().getType().cast<MemRefType>().getDimSize(0);
1777   auto resultMemRefType = resultType.dyn_cast<MemRefType>();
1778   if (resultMemRefType) {
1779     if (!resultMemRefType.getLayout().isIdentity())
1780       return emitOpError("result memref type should have identity affine map");
1781     if (shapeSize == ShapedType::kDynamicSize)
1782       return emitOpError("cannot use shape operand with dynamic length to "
1783                          "reshape to statically-ranked memref type");
1784     if (shapeSize != resultMemRefType.getRank())
1785       return emitOpError(
1786           "length of shape operand differs from the result's memref rank");
1787   }
1788   return success();
1789 }
1790 
1791 //===----------------------------------------------------------------------===//
1792 // StoreOp
1793 //===----------------------------------------------------------------------===//
1794 
1795 LogicalResult StoreOp::verify() {
1796   if (getNumOperands() != 2 + getMemRefType().getRank())
1797     return emitOpError("store index operand count not equal to memref rank");
1798 
1799   return success();
1800 }
1801 
1802 LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands,
1803                             SmallVectorImpl<OpFoldResult> &results) {
1804   /// store(memrefcast) -> store
1805   return foldMemRefCast(*this, getValueToStore());
1806 }
1807 
1808 //===----------------------------------------------------------------------===//
1809 // SubViewOp
1810 //===----------------------------------------------------------------------===//
1811 
1812 namespace {
1813 /// Helpers to write more idiomatic operations.
1814 namespace saturated_arith {
1815 struct Wrapper {
1816   explicit Wrapper(int64_t v) : v(v) {}
1817   operator int64_t() { return v; }
1818   int64_t v;
1819 };
1820 Wrapper operator+(Wrapper a, int64_t b) {
1821   if (ShapedType::isDynamicStrideOrOffset(a) ||
1822       ShapedType::isDynamicStrideOrOffset(b))
1823     return Wrapper(ShapedType::kDynamicStrideOrOffset);
1824   return Wrapper(a.v + b);
1825 }
1826 Wrapper operator*(Wrapper a, int64_t b) {
1827   if (ShapedType::isDynamicStrideOrOffset(a) ||
1828       ShapedType::isDynamicStrideOrOffset(b))
1829     return Wrapper(ShapedType::kDynamicStrideOrOffset);
1830   return Wrapper(a.v * b);
1831 }
1832 } // namespace saturated_arith
1833 } // namespace
1834 
1835 /// A subview result type can be fully inferred from the source type and the
1836 /// static representation of offsets, sizes and strides. Special sentinels
1837 /// encode the dynamic case.
1838 Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
1839                                 ArrayRef<int64_t> staticOffsets,
1840                                 ArrayRef<int64_t> staticSizes,
1841                                 ArrayRef<int64_t> staticStrides) {
1842   unsigned rank = sourceMemRefType.getRank();
1843   (void)rank;
1844   assert(staticOffsets.size() == rank && "staticOffsets length mismatch");
1845   assert(staticSizes.size() == rank && "staticSizes length mismatch");
1846   assert(staticStrides.size() == rank && "staticStrides length mismatch");
1847 
1848   // Extract source offset and strides.
1849   int64_t sourceOffset;
1850   SmallVector<int64_t, 4> sourceStrides;
1851   auto res = getStridesAndOffset(sourceMemRefType, sourceStrides, sourceOffset);
1852   assert(succeeded(res) && "SubViewOp expected strided memref type");
1853   (void)res;
1854 
1855   // Compute target offset whose value is:
1856   //   `sourceOffset + sum_i(staticOffset_i * sourceStrides_i)`.
1857   int64_t targetOffset = sourceOffset;
1858   for (auto it : llvm::zip(staticOffsets, sourceStrides)) {
1859     auto staticOffset = std::get<0>(it), targetStride = std::get<1>(it);
1860     using namespace saturated_arith;
1861     targetOffset = Wrapper(targetOffset) + Wrapper(staticOffset) * targetStride;
1862   }
1863 
1864   // Compute target stride whose value is:
1865   //   `sourceStrides_i * staticStrides_i`.
1866   SmallVector<int64_t, 4> targetStrides;
1867   targetStrides.reserve(staticOffsets.size());
1868   for (auto it : llvm::zip(sourceStrides, staticStrides)) {
1869     auto sourceStride = std::get<0>(it), staticStride = std::get<1>(it);
1870     using namespace saturated_arith;
1871     targetStrides.push_back(Wrapper(sourceStride) * staticStride);
1872   }
1873 
1874   // The type is now known.
1875   return MemRefType::get(
1876       staticSizes, sourceMemRefType.getElementType(),
1877       makeStridedLinearLayoutMap(targetStrides, targetOffset,
1878                                  sourceMemRefType.getContext()),
1879       sourceMemRefType.getMemorySpace());
1880 }
1881 
1882 Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
1883                                 ArrayRef<OpFoldResult> offsets,
1884                                 ArrayRef<OpFoldResult> sizes,
1885                                 ArrayRef<OpFoldResult> strides) {
1886   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1887   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1888   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1889                              ShapedType::kDynamicStrideOrOffset);
1890   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1891                              ShapedType::kDynamicSize);
1892   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1893                              ShapedType::kDynamicStrideOrOffset);
1894   return SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
1895                                     staticSizes, staticStrides);
1896 }
1897 
1898 Type SubViewOp::inferRankReducedResultType(unsigned resultRank,
1899                                            MemRefType sourceRankedTensorType,
1900                                            ArrayRef<int64_t> offsets,
1901                                            ArrayRef<int64_t> sizes,
1902                                            ArrayRef<int64_t> strides) {
1903   auto inferredType =
1904       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
1905           .cast<MemRefType>();
1906   assert(inferredType.getRank() >= resultRank && "expected ");
1907   int rankDiff = inferredType.getRank() - resultRank;
1908   if (rankDiff > 0) {
1909     auto shape = inferredType.getShape();
1910     llvm::SmallBitVector dimsToProject =
1911         getPositionsOfShapeOne(rankDiff, shape);
1912     SmallVector<int64_t> projectedShape;
1913     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
1914       if (!dimsToProject.test(pos))
1915         projectedShape.push_back(shape[pos]);
1916 
1917     AffineMap map = inferredType.getLayout().getAffineMap();
1918     if (!map.isIdentity())
1919       map = getProjectedMap(map, dimsToProject);
1920     inferredType =
1921         MemRefType::get(projectedShape, inferredType.getElementType(), map,
1922                         inferredType.getMemorySpace());
1923   }
1924   return inferredType;
1925 }
1926 
1927 Type SubViewOp::inferRankReducedResultType(unsigned resultRank,
1928                                            MemRefType sourceRankedTensorType,
1929                                            ArrayRef<OpFoldResult> offsets,
1930                                            ArrayRef<OpFoldResult> sizes,
1931                                            ArrayRef<OpFoldResult> strides) {
1932   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1933   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1934   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1935                              ShapedType::kDynamicStrideOrOffset);
1936   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1937                              ShapedType::kDynamicSize);
1938   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1939                              ShapedType::kDynamicStrideOrOffset);
1940   return SubViewOp::inferRankReducedResultType(
1941       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
1942       staticStrides);
1943 }
1944 // Build a SubViewOp with mixed static and dynamic entries and custom result
1945 // type. If the type passed is nullptr, it is inferred.
1946 void SubViewOp::build(OpBuilder &b, OperationState &result,
1947                       MemRefType resultType, Value source,
1948                       ArrayRef<OpFoldResult> offsets,
1949                       ArrayRef<OpFoldResult> sizes,
1950                       ArrayRef<OpFoldResult> strides,
1951                       ArrayRef<NamedAttribute> attrs) {
1952   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1953   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1954   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1955                              ShapedType::kDynamicStrideOrOffset);
1956   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1957                              ShapedType::kDynamicSize);
1958   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1959                              ShapedType::kDynamicStrideOrOffset);
1960   auto sourceMemRefType = source.getType().cast<MemRefType>();
1961   // Structuring implementation this way avoids duplication between builders.
1962   if (!resultType) {
1963     resultType = SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
1964                                             staticSizes, staticStrides)
1965                      .cast<MemRefType>();
1966   }
1967   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1968         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1969         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1970   result.addAttributes(attrs);
1971 }
1972 
1973 // Build a SubViewOp with mixed static and dynamic entries and inferred result
1974 // type.
1975 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1976                       ArrayRef<OpFoldResult> offsets,
1977                       ArrayRef<OpFoldResult> sizes,
1978                       ArrayRef<OpFoldResult> strides,
1979                       ArrayRef<NamedAttribute> attrs) {
1980   build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
1981 }
1982 
1983 // Build a SubViewOp with static entries and inferred result type.
1984 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1985                       ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1986                       ArrayRef<int64_t> strides,
1987                       ArrayRef<NamedAttribute> attrs) {
1988   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1989       llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
1990         return b.getI64IntegerAttr(v);
1991       }));
1992   SmallVector<OpFoldResult> sizeValues =
1993       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1994         return b.getI64IntegerAttr(v);
1995       }));
1996   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1997       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1998         return b.getI64IntegerAttr(v);
1999       }));
2000   build(b, result, source, offsetValues, sizeValues, strideValues, attrs);
2001 }
2002 
2003 // Build a SubViewOp with dynamic entries and custom result type. If the
2004 // type passed is nullptr, it is inferred.
2005 void SubViewOp::build(OpBuilder &b, OperationState &result,
2006                       MemRefType resultType, Value source,
2007                       ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
2008                       ArrayRef<int64_t> strides,
2009                       ArrayRef<NamedAttribute> attrs) {
2010   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
2011       llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
2012         return b.getI64IntegerAttr(v);
2013       }));
2014   SmallVector<OpFoldResult> sizeValues =
2015       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
2016         return b.getI64IntegerAttr(v);
2017       }));
2018   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
2019       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
2020         return b.getI64IntegerAttr(v);
2021       }));
2022   build(b, result, resultType, source, offsetValues, sizeValues, strideValues,
2023         attrs);
2024 }
2025 
2026 // Build a SubViewOp with dynamic entries and custom result type. If the type
2027 // passed is nullptr, it is inferred.
2028 void SubViewOp::build(OpBuilder &b, OperationState &result,
2029                       MemRefType resultType, Value source, ValueRange offsets,
2030                       ValueRange sizes, ValueRange strides,
2031                       ArrayRef<NamedAttribute> attrs) {
2032   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
2033       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
2034   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
2035       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
2036   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
2037       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
2038   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
2039 }
2040 
2041 // Build a SubViewOp with dynamic entries and inferred result type.
2042 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
2043                       ValueRange offsets, ValueRange sizes, ValueRange strides,
2044                       ArrayRef<NamedAttribute> attrs) {
2045   build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
2046 }
2047 
2048 /// For ViewLikeOpInterface.
2049 Value SubViewOp::getViewSource() { return source(); }
2050 
2051 /// Return true if t1 and t2 have equal offsets (both dynamic or of same static
2052 /// value).
2053 static bool haveCompatibleOffsets(MemRefType t1, MemRefType t2) {
2054   AffineExpr t1Offset, t2Offset;
2055   SmallVector<AffineExpr> t1Strides, t2Strides;
2056   auto res1 = getStridesAndOffset(t1, t1Strides, t1Offset);
2057   auto res2 = getStridesAndOffset(t2, t2Strides, t2Offset);
2058   return succeeded(res1) && succeeded(res2) && t1Offset == t2Offset;
2059 }
2060 
2061 /// Checks if `original` Type type can be rank reduced to `reduced` type.
2062 /// This function is slight variant of `is subsequence` algorithm where
2063 /// not matching dimension must be 1.
2064 static SliceVerificationResult
2065 isRankReducedMemRefType(MemRefType originalType,
2066                         MemRefType candidateRankReducedType,
2067                         ArrayRef<OpFoldResult> sizes) {
2068   auto partialRes = isRankReducedType(originalType, candidateRankReducedType);
2069   if (partialRes != SliceVerificationResult::Success)
2070     return partialRes;
2071 
2072   auto optionalUnusedDimsMask = computeMemRefRankReductionMask(
2073       originalType, candidateRankReducedType, sizes);
2074 
2075   // Sizes cannot be matched in case empty vector is returned.
2076   if (!optionalUnusedDimsMask.hasValue())
2077     return SliceVerificationResult::LayoutMismatch;
2078 
2079   if (originalType.getMemorySpace() !=
2080       candidateRankReducedType.getMemorySpace())
2081     return SliceVerificationResult::MemSpaceMismatch;
2082 
2083   // No amount of stride dropping can reconcile incompatible offsets.
2084   if (!haveCompatibleOffsets(originalType, candidateRankReducedType))
2085     return SliceVerificationResult::LayoutMismatch;
2086 
2087   return SliceVerificationResult::Success;
2088 }
2089 
2090 template <typename OpTy>
2091 static LogicalResult produceSubViewErrorMsg(SliceVerificationResult result,
2092                                             OpTy op, Type expectedType) {
2093   auto memrefType = expectedType.cast<ShapedType>();
2094   switch (result) {
2095   case SliceVerificationResult::Success:
2096     return success();
2097   case SliceVerificationResult::RankTooLarge:
2098     return op.emitError("expected result rank to be smaller or equal to ")
2099            << "the source rank. ";
2100   case SliceVerificationResult::SizeMismatch:
2101     return op.emitError("expected result type to be ")
2102            << expectedType
2103            << " or a rank-reduced version. (mismatch of result sizes) ";
2104   case SliceVerificationResult::ElemTypeMismatch:
2105     return op.emitError("expected result element type to be ")
2106            << memrefType.getElementType();
2107   case SliceVerificationResult::MemSpaceMismatch:
2108     return op.emitError("expected result and source memory spaces to match.");
2109   case SliceVerificationResult::LayoutMismatch:
2110     return op.emitError("expected result type to be ")
2111            << expectedType
2112            << " or a rank-reduced version. (mismatch of result layout) ";
2113   }
2114   llvm_unreachable("unexpected subview verification result");
2115 }
2116 
2117 /// Verifier for SubViewOp.
2118 LogicalResult SubViewOp::verify() {
2119   MemRefType baseType = getSourceType();
2120   MemRefType subViewType = getType();
2121 
2122   // The base memref and the view memref should be in the same memory space.
2123   if (baseType.getMemorySpace() != subViewType.getMemorySpace())
2124     return emitError("different memory spaces specified for base memref "
2125                      "type ")
2126            << baseType << " and subview memref type " << subViewType;
2127 
2128   // Verify that the base memref type has a strided layout map.
2129   if (!isStrided(baseType))
2130     return emitError("base type ") << baseType << " is not strided";
2131 
2132   // Verify result type against inferred type.
2133   auto expectedType = SubViewOp::inferResultType(
2134       baseType, extractFromI64ArrayAttr(static_offsets()),
2135       extractFromI64ArrayAttr(static_sizes()),
2136       extractFromI64ArrayAttr(static_strides()));
2137 
2138   auto result = isRankReducedMemRefType(expectedType.cast<MemRefType>(),
2139                                         subViewType, getMixedSizes());
2140   return produceSubViewErrorMsg(result, *this, expectedType);
2141 }
2142 
2143 raw_ostream &mlir::operator<<(raw_ostream &os, const Range &range) {
2144   return os << "range " << range.offset << ":" << range.size << ":"
2145             << range.stride;
2146 }
2147 
2148 /// Return the list of Range (i.e. offset, size, stride). Each Range
2149 /// entry contains either the dynamic value or a ConstantIndexOp constructed
2150 /// with `b` at location `loc`.
2151 SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op,
2152                                               OpBuilder &b, Location loc) {
2153   std::array<unsigned, 3> ranks = op.getArrayAttrMaxRanks();
2154   assert(ranks[0] == ranks[1] && "expected offset and sizes of equal ranks");
2155   assert(ranks[1] == ranks[2] && "expected sizes and strides of equal ranks");
2156   SmallVector<Range, 8> res;
2157   unsigned rank = ranks[0];
2158   res.reserve(rank);
2159   for (unsigned idx = 0; idx < rank; ++idx) {
2160     Value offset =
2161         op.isDynamicOffset(idx)
2162             ? op.getDynamicOffset(idx)
2163             : b.create<arith::ConstantIndexOp>(loc, op.getStaticOffset(idx));
2164     Value size =
2165         op.isDynamicSize(idx)
2166             ? op.getDynamicSize(idx)
2167             : b.create<arith::ConstantIndexOp>(loc, op.getStaticSize(idx));
2168     Value stride =
2169         op.isDynamicStride(idx)
2170             ? op.getDynamicStride(idx)
2171             : b.create<arith::ConstantIndexOp>(loc, op.getStaticStride(idx));
2172     res.emplace_back(Range{offset, size, stride});
2173   }
2174   return res;
2175 }
2176 
2177 /// Compute the canonical result type of a SubViewOp. Call `inferResultType` to
2178 /// deduce the result type for the given `sourceType`. Additionally, reduce the
2179 /// rank of the inferred result type if `currentResultType` is lower rank than
2180 /// `currentSourceType`. Use this signature if `sourceType` is updated together
2181 /// with the result type. In this case, it is important to compute the dropped
2182 /// dimensions using `currentSourceType` whose strides align with
2183 /// `currentResultType`.
2184 static MemRefType getCanonicalSubViewResultType(
2185     MemRefType currentResultType, MemRefType currentSourceType,
2186     MemRefType sourceType, ArrayRef<OpFoldResult> mixedOffsets,
2187     ArrayRef<OpFoldResult> mixedSizes, ArrayRef<OpFoldResult> mixedStrides) {
2188   auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets,
2189                                                        mixedSizes, mixedStrides)
2190                                 .cast<MemRefType>();
2191   llvm::Optional<llvm::SmallBitVector> unusedDims =
2192       computeMemRefRankReductionMask(currentSourceType, currentResultType,
2193                                      mixedSizes);
2194   // Return nullptr as failure mode.
2195   if (!unusedDims)
2196     return nullptr;
2197   SmallVector<int64_t> shape;
2198   for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) {
2199     if (unusedDims->test(sizes.index()))
2200       continue;
2201     shape.push_back(sizes.value());
2202   }
2203   AffineMap layoutMap = nonRankReducedType.getLayout().getAffineMap();
2204   if (!layoutMap.isIdentity())
2205     layoutMap = getProjectedMap(layoutMap, unusedDims.getValue());
2206   return MemRefType::get(shape, nonRankReducedType.getElementType(), layoutMap,
2207                          nonRankReducedType.getMemorySpace());
2208 }
2209 
2210 /// Compute the canonical result type of a SubViewOp. Call `inferResultType` to
2211 /// deduce the result type. Additionally, reduce the rank of the inferred result
2212 /// type if `currentResultType` is lower rank than `sourceType`.
2213 static MemRefType getCanonicalSubViewResultType(
2214     MemRefType currentResultType, MemRefType sourceType,
2215     ArrayRef<OpFoldResult> mixedOffsets, ArrayRef<OpFoldResult> mixedSizes,
2216     ArrayRef<OpFoldResult> mixedStrides) {
2217   return getCanonicalSubViewResultType(currentResultType, sourceType,
2218                                        sourceType, mixedOffsets, mixedSizes,
2219                                        mixedStrides);
2220 }
2221 
2222 /// Helper method to check if a `subview` operation is trivially a no-op. This
2223 /// is the case if the all offsets are zero, all strides are 1, and the source
2224 /// shape is same as the size of the subview. In such cases, the subview can be
2225 /// folded into its source.
2226 static bool isTrivialSubViewOp(SubViewOp subViewOp) {
2227   if (subViewOp.getSourceType().getRank() != subViewOp.getType().getRank())
2228     return false;
2229 
2230   auto mixedOffsets = subViewOp.getMixedOffsets();
2231   auto mixedSizes = subViewOp.getMixedSizes();
2232   auto mixedStrides = subViewOp.getMixedStrides();
2233 
2234   // Check offsets are zero.
2235   if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) {
2236         Optional<int64_t> intValue = getConstantIntValue(ofr);
2237         return !intValue || intValue.getValue() != 0;
2238       }))
2239     return false;
2240 
2241   // Check strides are one.
2242   if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) {
2243         Optional<int64_t> intValue = getConstantIntValue(ofr);
2244         return !intValue || intValue.getValue() != 1;
2245       }))
2246     return false;
2247 
2248   // Check all size values are static and matches the (static) source shape.
2249   ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
2250   for (const auto &size : llvm::enumerate(mixedSizes)) {
2251     Optional<int64_t> intValue = getConstantIntValue(size.value());
2252     if (!intValue || intValue.getValue() != sourceShape[size.index()])
2253       return false;
2254   }
2255   // All conditions met. The `SubViewOp` is foldable as a no-op.
2256   return true;
2257 }
2258 
2259 namespace {
2260 /// Pattern to rewrite a subview op with MemRefCast arguments.
2261 /// This essentially pushes memref.cast past its consuming subview when
2262 /// `canFoldIntoConsumerOp` is true.
2263 ///
2264 /// Example:
2265 /// ```
2266 ///   %0 = memref.cast %V : memref<16x16xf32> to memref<?x?xf32>
2267 ///   %1 = memref.subview %0[0, 0][3, 4][1, 1] :
2268 ///     memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]>
2269 /// ```
2270 /// is rewritten into:
2271 /// ```
2272 ///   %0 = memref.subview %V: memref<16x16xf32> to memref<3x4xf32, #[[map0]]>
2273 ///   %1 = memref.cast %0: memref<3x4xf32, offset:0, strides:[16, 1]> to
2274 ///     memref<3x4xf32, offset:?, strides:[?, 1]>
2275 /// ```
2276 class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> {
2277 public:
2278   using OpRewritePattern<SubViewOp>::OpRewritePattern;
2279 
2280   LogicalResult matchAndRewrite(SubViewOp subViewOp,
2281                                 PatternRewriter &rewriter) const override {
2282     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
2283     if (llvm::any_of(subViewOp.getOperands(), [](Value operand) {
2284           return matchPattern(operand, matchConstantIndex());
2285         }))
2286       return failure();
2287 
2288     auto castOp = subViewOp.source().getDefiningOp<CastOp>();
2289     if (!castOp)
2290       return failure();
2291 
2292     if (!CastOp::canFoldIntoConsumerOp(castOp))
2293       return failure();
2294 
2295     // Compute the SubViewOp result type after folding the MemRefCastOp. Use the
2296     // MemRefCastOp source operand type to infer the result type and the current
2297     // SubViewOp source operand type to compute the dropped dimensions if the
2298     // operation is rank-reducing.
2299     auto resultType = getCanonicalSubViewResultType(
2300         subViewOp.getType(), subViewOp.getSourceType(),
2301         castOp.source().getType().cast<MemRefType>(),
2302         subViewOp.getMixedOffsets(), subViewOp.getMixedSizes(),
2303         subViewOp.getMixedStrides());
2304     if (!resultType)
2305       return failure();
2306 
2307     Value newSubView = rewriter.create<SubViewOp>(
2308         subViewOp.getLoc(), resultType, castOp.source(), subViewOp.offsets(),
2309         subViewOp.sizes(), subViewOp.strides(), subViewOp.static_offsets(),
2310         subViewOp.static_sizes(), subViewOp.static_strides());
2311     rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(),
2312                                         newSubView);
2313     return success();
2314   }
2315 };
2316 
2317 /// Canonicalize subview ops that are no-ops. When the source shape is not same
2318 /// as a result shape due to use of `affine_map`.
2319 class TrivialSubViewOpFolder final : public OpRewritePattern<SubViewOp> {
2320 public:
2321   using OpRewritePattern<SubViewOp>::OpRewritePattern;
2322 
2323   LogicalResult matchAndRewrite(SubViewOp subViewOp,
2324                                 PatternRewriter &rewriter) const override {
2325     if (!isTrivialSubViewOp(subViewOp))
2326       return failure();
2327     if (subViewOp.getSourceType() == subViewOp.getType()) {
2328       rewriter.replaceOp(subViewOp, subViewOp.source());
2329       return success();
2330     }
2331     rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(),
2332                                         subViewOp.source());
2333     return success();
2334   }
2335 };
2336 } // namespace
2337 
2338 /// Return the canonical type of the result of a subview.
2339 struct SubViewReturnTypeCanonicalizer {
2340   MemRefType operator()(SubViewOp op, ArrayRef<OpFoldResult> mixedOffsets,
2341                         ArrayRef<OpFoldResult> mixedSizes,
2342                         ArrayRef<OpFoldResult> mixedStrides) {
2343     return getCanonicalSubViewResultType(op.getType(), op.getSourceType(),
2344                                          mixedOffsets, mixedSizes,
2345                                          mixedStrides);
2346   }
2347 };
2348 
2349 /// A canonicalizer wrapper to replace SubViewOps.
2350 struct SubViewCanonicalizer {
2351   void operator()(PatternRewriter &rewriter, SubViewOp op, SubViewOp newOp) {
2352     rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp);
2353   }
2354 };
2355 
2356 void SubViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
2357                                             MLIRContext *context) {
2358   results
2359       .add<OpWithOffsetSizesAndStridesConstantArgumentFolder<
2360                SubViewOp, SubViewReturnTypeCanonicalizer, SubViewCanonicalizer>,
2361            SubViewOpMemRefCastFolder, TrivialSubViewOpFolder>(context);
2362 }
2363 
2364 OpFoldResult SubViewOp::fold(ArrayRef<Attribute> operands) {
2365   auto resultShapedType = getResult().getType().cast<ShapedType>();
2366   auto sourceShapedType = source().getType().cast<ShapedType>();
2367 
2368   if (resultShapedType.hasStaticShape() &&
2369       resultShapedType == sourceShapedType) {
2370     return getViewSource();
2371   }
2372 
2373   return {};
2374 }
2375 
2376 //===----------------------------------------------------------------------===//
2377 // TransposeOp
2378 //===----------------------------------------------------------------------===//
2379 
2380 /// Build a strided memref type by applying `permutationMap` tp `memRefType`.
2381 static MemRefType inferTransposeResultType(MemRefType memRefType,
2382                                            AffineMap permutationMap) {
2383   auto rank = memRefType.getRank();
2384   auto originalSizes = memRefType.getShape();
2385   // Compute permuted sizes.
2386   SmallVector<int64_t, 4> sizes(rank, 0);
2387   for (const auto &en : llvm::enumerate(permutationMap.getResults()))
2388     sizes[en.index()] =
2389         originalSizes[en.value().cast<AffineDimExpr>().getPosition()];
2390 
2391   // Compute permuted strides.
2392   int64_t offset;
2393   SmallVector<int64_t, 4> strides;
2394   auto res = getStridesAndOffset(memRefType, strides, offset);
2395   assert(succeeded(res) && strides.size() == static_cast<unsigned>(rank));
2396   (void)res;
2397   auto map =
2398       makeStridedLinearLayoutMap(strides, offset, memRefType.getContext());
2399   map = permutationMap ? map.compose(permutationMap) : map;
2400   return MemRefType::Builder(memRefType)
2401       .setShape(sizes)
2402       .setLayout(AffineMapAttr::get(map));
2403 }
2404 
2405 void TransposeOp::build(OpBuilder &b, OperationState &result, Value in,
2406                         AffineMapAttr permutation,
2407                         ArrayRef<NamedAttribute> attrs) {
2408   auto permutationMap = permutation.getValue();
2409   assert(permutationMap);
2410 
2411   auto memRefType = in.getType().cast<MemRefType>();
2412   // Compute result type.
2413   MemRefType resultType = inferTransposeResultType(memRefType, permutationMap);
2414 
2415   build(b, result, resultType, in, attrs);
2416   result.addAttribute(TransposeOp::getPermutationAttrName(), permutation);
2417 }
2418 
2419 // transpose $in $permutation attr-dict : type($in) `to` type(results)
2420 void TransposeOp::print(OpAsmPrinter &p) {
2421   p << " " << in() << " " << permutation();
2422   p.printOptionalAttrDict((*this)->getAttrs(), {getPermutationAttrName()});
2423   p << " : " << in().getType() << " to " << getType();
2424 }
2425 
2426 ParseResult TransposeOp::parse(OpAsmParser &parser, OperationState &result) {
2427   OpAsmParser::OperandType in;
2428   AffineMap permutation;
2429   MemRefType srcType, dstType;
2430   if (parser.parseOperand(in) || parser.parseAffineMap(permutation) ||
2431       parser.parseOptionalAttrDict(result.attributes) ||
2432       parser.parseColonType(srcType) ||
2433       parser.resolveOperand(in, srcType, result.operands) ||
2434       parser.parseKeywordType("to", dstType) ||
2435       parser.addTypeToList(dstType, result.types))
2436     return failure();
2437 
2438   result.addAttribute(TransposeOp::getPermutationAttrName(),
2439                       AffineMapAttr::get(permutation));
2440   return success();
2441 }
2442 
2443 LogicalResult TransposeOp::verify() {
2444   if (!permutation().isPermutation())
2445     return emitOpError("expected a permutation map");
2446   if (permutation().getNumDims() != getShapedType().getRank())
2447     return emitOpError("expected a permutation map of same rank as the input");
2448 
2449   auto srcType = in().getType().cast<MemRefType>();
2450   auto dstType = getType().cast<MemRefType>();
2451   auto transposedType = inferTransposeResultType(srcType, permutation());
2452   if (dstType != transposedType)
2453     return emitOpError("output type ")
2454            << dstType << " does not match transposed input type " << srcType
2455            << ", " << transposedType;
2456   return success();
2457 }
2458 
2459 OpFoldResult TransposeOp::fold(ArrayRef<Attribute>) {
2460   if (succeeded(foldMemRefCast(*this)))
2461     return getResult();
2462   return {};
2463 }
2464 
2465 //===----------------------------------------------------------------------===//
2466 // ViewOp
2467 //===----------------------------------------------------------------------===//
2468 
2469 LogicalResult ViewOp::verify() {
2470   auto baseType = getOperand(0).getType().cast<MemRefType>();
2471   auto viewType = getType();
2472 
2473   // The base memref should have identity layout map (or none).
2474   if (!baseType.getLayout().isIdentity())
2475     return emitError("unsupported map for base memref type ") << baseType;
2476 
2477   // The result memref should have identity layout map (or none).
2478   if (!viewType.getLayout().isIdentity())
2479     return emitError("unsupported map for result memref type ") << viewType;
2480 
2481   // The base memref and the view memref should be in the same memory space.
2482   if (baseType.getMemorySpace() != viewType.getMemorySpace())
2483     return emitError("different memory spaces specified for base memref "
2484                      "type ")
2485            << baseType << " and view memref type " << viewType;
2486 
2487   // Verify that we have the correct number of sizes for the result type.
2488   unsigned numDynamicDims = viewType.getNumDynamicDims();
2489   if (sizes().size() != numDynamicDims)
2490     return emitError("incorrect number of size operands for type ") << viewType;
2491 
2492   return success();
2493 }
2494 
2495 Value ViewOp::getViewSource() { return source(); }
2496 
2497 namespace {
2498 
2499 struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
2500   using OpRewritePattern<ViewOp>::OpRewritePattern;
2501 
2502   LogicalResult matchAndRewrite(ViewOp viewOp,
2503                                 PatternRewriter &rewriter) const override {
2504     // Return if none of the operands are constants.
2505     if (llvm::none_of(viewOp.getOperands(), [](Value operand) {
2506           return matchPattern(operand, matchConstantIndex());
2507         }))
2508       return failure();
2509 
2510     // Get result memref type.
2511     auto memrefType = viewOp.getType();
2512 
2513     // Get offset from old memref view type 'memRefType'.
2514     int64_t oldOffset;
2515     SmallVector<int64_t, 4> oldStrides;
2516     if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
2517       return failure();
2518     assert(oldOffset == 0 && "Expected 0 offset");
2519 
2520     SmallVector<Value, 4> newOperands;
2521 
2522     // Offset cannot be folded into result type.
2523 
2524     // Fold any dynamic dim operands which are produced by a constant.
2525     SmallVector<int64_t, 4> newShapeConstants;
2526     newShapeConstants.reserve(memrefType.getRank());
2527 
2528     unsigned dynamicDimPos = 0;
2529     unsigned rank = memrefType.getRank();
2530     for (unsigned dim = 0, e = rank; dim < e; ++dim) {
2531       int64_t dimSize = memrefType.getDimSize(dim);
2532       // If this is already static dimension, keep it.
2533       if (!ShapedType::isDynamic(dimSize)) {
2534         newShapeConstants.push_back(dimSize);
2535         continue;
2536       }
2537       auto *defOp = viewOp.sizes()[dynamicDimPos].getDefiningOp();
2538       if (auto constantIndexOp =
2539               dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) {
2540         // Dynamic shape dimension will be folded.
2541         newShapeConstants.push_back(constantIndexOp.value());
2542       } else {
2543         // Dynamic shape dimension not folded; copy operand from old memref.
2544         newShapeConstants.push_back(dimSize);
2545         newOperands.push_back(viewOp.sizes()[dynamicDimPos]);
2546       }
2547       dynamicDimPos++;
2548     }
2549 
2550     // Create new memref type with constant folded dims.
2551     MemRefType newMemRefType =
2552         MemRefType::Builder(memrefType).setShape(newShapeConstants);
2553     // Nothing new, don't fold.
2554     if (newMemRefType == memrefType)
2555       return failure();
2556 
2557     // Create new ViewOp.
2558     auto newViewOp = rewriter.create<ViewOp>(viewOp.getLoc(), newMemRefType,
2559                                              viewOp.getOperand(0),
2560                                              viewOp.byte_shift(), newOperands);
2561     // Insert a cast so we have the same type as the old memref type.
2562     rewriter.replaceOpWithNewOp<CastOp>(viewOp, viewOp.getType(), newViewOp);
2563     return success();
2564   }
2565 };
2566 
2567 struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> {
2568   using OpRewritePattern<ViewOp>::OpRewritePattern;
2569 
2570   LogicalResult matchAndRewrite(ViewOp viewOp,
2571                                 PatternRewriter &rewriter) const override {
2572     Value memrefOperand = viewOp.getOperand(0);
2573     CastOp memrefCastOp = memrefOperand.getDefiningOp<CastOp>();
2574     if (!memrefCastOp)
2575       return failure();
2576     Value allocOperand = memrefCastOp.getOperand();
2577     AllocOp allocOp = allocOperand.getDefiningOp<AllocOp>();
2578     if (!allocOp)
2579       return failure();
2580     rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand,
2581                                         viewOp.byte_shift(), viewOp.sizes());
2582     return success();
2583   }
2584 };
2585 
2586 } // namespace
2587 
2588 void ViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
2589                                          MLIRContext *context) {
2590   results.add<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context);
2591 }
2592 
2593 //===----------------------------------------------------------------------===//
2594 // AtomicRMWOp
2595 //===----------------------------------------------------------------------===//
2596 
2597 LogicalResult AtomicRMWOp::verify() {
2598   if (getMemRefType().getRank() != getNumOperands() - 2)
2599     return emitOpError(
2600         "expects the number of subscripts to be equal to memref rank");
2601   switch (kind()) {
2602   case arith::AtomicRMWKind::addf:
2603   case arith::AtomicRMWKind::maxf:
2604   case arith::AtomicRMWKind::minf:
2605   case arith::AtomicRMWKind::mulf:
2606     if (!value().getType().isa<FloatType>())
2607       return emitOpError() << "with kind '"
2608                            << arith::stringifyAtomicRMWKind(kind())
2609                            << "' expects a floating-point type";
2610     break;
2611   case arith::AtomicRMWKind::addi:
2612   case arith::AtomicRMWKind::maxs:
2613   case arith::AtomicRMWKind::maxu:
2614   case arith::AtomicRMWKind::mins:
2615   case arith::AtomicRMWKind::minu:
2616   case arith::AtomicRMWKind::muli:
2617   case arith::AtomicRMWKind::ori:
2618   case arith::AtomicRMWKind::andi:
2619     if (!value().getType().isa<IntegerType>())
2620       return emitOpError() << "with kind '"
2621                            << arith::stringifyAtomicRMWKind(kind())
2622                            << "' expects an integer type";
2623     break;
2624   default:
2625     break;
2626   }
2627   return success();
2628 }
2629 
2630 OpFoldResult AtomicRMWOp::fold(ArrayRef<Attribute> operands) {
2631   /// atomicrmw(memrefcast) -> atomicrmw
2632   if (succeeded(foldMemRefCast(*this, value())))
2633     return getResult();
2634   return OpFoldResult();
2635 }
2636 
2637 //===----------------------------------------------------------------------===//
2638 // TableGen'd op method definitions
2639 //===----------------------------------------------------------------------===//
2640 
2641 #define GET_OP_CLASSES
2642 #include "mlir/Dialect/MemRef/IR/MemRefOps.cpp.inc"
2643