1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/MemRef/IR/MemRef.h"
11 #include "mlir/Dialect/MemRef/Utils/MemRefUtils.h"
12 #include "mlir/Dialect/StandardOps/IR/Ops.h"
13 #include "mlir/Dialect/StandardOps/Utils/Utils.h"
14 #include "mlir/Dialect/Utils/StaticValueUtils.h"
15 #include "mlir/IR/AffineMap.h"
16 #include "mlir/IR/Builders.h"
17 #include "mlir/IR/BuiltinTypes.h"
18 #include "mlir/IR/Matchers.h"
19 #include "mlir/IR/PatternMatch.h"
20 #include "mlir/IR/TypeUtilities.h"
21 #include "mlir/Interfaces/InferTypeOpInterface.h"
22 #include "mlir/Interfaces/ViewLikeInterface.h"
23 #include "llvm/ADT/STLExtras.h"
24 
25 using namespace mlir;
26 using namespace mlir::memref;
27 
28 /// Materialize a single constant operation from a given attribute value with
29 /// the desired resultant type.
30 Operation *MemRefDialect::materializeConstant(OpBuilder &builder,
31                                               Attribute value, Type type,
32                                               Location loc) {
33   if (arith::ConstantOp::isBuildableWith(value, type))
34     return builder.create<arith::ConstantOp>(loc, value, type);
35   if (ConstantOp::isBuildableWith(value, type))
36     return builder.create<ConstantOp>(loc, value, type);
37   return nullptr;
38 }
39 
40 //===----------------------------------------------------------------------===//
41 // Common canonicalization pattern support logic
42 //===----------------------------------------------------------------------===//
43 
44 /// This is a common class used for patterns of the form
45 /// "someop(memrefcast) -> someop".  It folds the source of any memref.cast
46 /// into the root operation directly.
47 LogicalResult mlir::memref::foldMemRefCast(Operation *op, Value inner) {
48   bool folded = false;
49   for (OpOperand &operand : op->getOpOperands()) {
50     auto cast = operand.get().getDefiningOp<CastOp>();
51     if (cast && operand.get() != inner &&
52         !cast.getOperand().getType().isa<UnrankedMemRefType>()) {
53       operand.set(cast.getOperand());
54       folded = true;
55     }
56   }
57   return success(folded);
58 }
59 
60 /// Return an unranked/ranked tensor type for the given unranked/ranked memref
61 /// type.
62 Type mlir::memref::getTensorTypeFromMemRefType(Type type) {
63   if (auto memref = type.dyn_cast<MemRefType>())
64     return RankedTensorType::get(memref.getShape(), memref.getElementType());
65   if (auto memref = type.dyn_cast<UnrankedMemRefType>())
66     return UnrankedTensorType::get(memref.getElementType());
67   return NoneType::get(type.getContext());
68 }
69 
70 //===----------------------------------------------------------------------===//
71 // AllocOp / AllocaOp
72 //===----------------------------------------------------------------------===//
73 
74 template <typename AllocLikeOp>
75 static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
76   static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value,
77                 "applies to only alloc or alloca");
78   auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>();
79   if (!memRefType)
80     return op.emitOpError("result must be a memref");
81 
82   if (static_cast<int64_t>(op.dynamicSizes().size()) !=
83       memRefType.getNumDynamicDims())
84     return op.emitOpError("dimension operand count does not equal memref "
85                           "dynamic dimension count");
86 
87   unsigned numSymbols = 0;
88   if (!memRefType.getLayout().isIdentity())
89     numSymbols = memRefType.getLayout().getAffineMap().getNumSymbols();
90   if (op.symbolOperands().size() != numSymbols)
91     return op.emitOpError("symbol operand count does not equal memref symbol "
92                           "count: expected ")
93            << numSymbols << ", got " << op.symbolOperands().size();
94 
95   return success();
96 }
97 
98 static LogicalResult verify(AllocOp op) { return verifyAllocLikeOp(op); }
99 
100 static LogicalResult verify(AllocaOp op) {
101   // An alloca op needs to have an ancestor with an allocation scope trait.
102   if (!op->getParentWithTrait<OpTrait::AutomaticAllocationScope>())
103     return op.emitOpError(
104         "requires an ancestor op with AutomaticAllocationScope trait");
105 
106   return verifyAllocLikeOp(op);
107 }
108 
109 namespace {
110 /// Fold constant dimensions into an alloc like operation.
111 template <typename AllocLikeOp>
112 struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
113   using OpRewritePattern<AllocLikeOp>::OpRewritePattern;
114 
115   LogicalResult matchAndRewrite(AllocLikeOp alloc,
116                                 PatternRewriter &rewriter) const override {
117     // Check to see if any dimensions operands are constants.  If so, we can
118     // substitute and drop them.
119     if (llvm::none_of(alloc.dynamicSizes(), [](Value operand) {
120           return matchPattern(operand, matchConstantIndex());
121         }))
122       return failure();
123 
124     auto memrefType = alloc.getType();
125 
126     // Ok, we have one or more constant operands.  Collect the non-constant ones
127     // and keep track of the resultant memref type to build.
128     SmallVector<int64_t, 4> newShapeConstants;
129     newShapeConstants.reserve(memrefType.getRank());
130     SmallVector<Value, 4> dynamicSizes;
131 
132     unsigned dynamicDimPos = 0;
133     for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
134       int64_t dimSize = memrefType.getDimSize(dim);
135       // If this is already static dimension, keep it.
136       if (dimSize != -1) {
137         newShapeConstants.push_back(dimSize);
138         continue;
139       }
140       auto dynamicSize = alloc.dynamicSizes()[dynamicDimPos];
141       auto *defOp = dynamicSize.getDefiningOp();
142       if (auto constantIndexOp =
143               dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) {
144         // Dynamic shape dimension will be folded.
145         newShapeConstants.push_back(constantIndexOp.value());
146       } else {
147         // Dynamic shape dimension not folded; copy dynamicSize from old memref.
148         newShapeConstants.push_back(-1);
149         dynamicSizes.push_back(dynamicSize);
150       }
151       dynamicDimPos++;
152     }
153 
154     // Create new memref type (which will have fewer dynamic dimensions).
155     MemRefType newMemRefType =
156         MemRefType::Builder(memrefType).setShape(newShapeConstants);
157     assert(static_cast<int64_t>(dynamicSizes.size()) ==
158            newMemRefType.getNumDynamicDims());
159 
160     // Create and insert the alloc op for the new memref.
161     auto newAlloc = rewriter.create<AllocLikeOp>(
162         alloc.getLoc(), newMemRefType, dynamicSizes, alloc.symbolOperands(),
163         alloc.alignmentAttr());
164     // Insert a cast so we have the same type as the old alloc.
165     auto resultCast =
166         rewriter.create<CastOp>(alloc.getLoc(), newAlloc, alloc.getType());
167 
168     rewriter.replaceOp(alloc, {resultCast});
169     return success();
170   }
171 };
172 
173 /// Fold alloc operations with no users or only store and dealloc uses.
174 template <typename T>
175 struct SimplifyDeadAlloc : public OpRewritePattern<T> {
176   using OpRewritePattern<T>::OpRewritePattern;
177 
178   LogicalResult matchAndRewrite(T alloc,
179                                 PatternRewriter &rewriter) const override {
180     if (llvm::any_of(alloc->getUsers(), [&](Operation *op) {
181           if (auto storeOp = dyn_cast<StoreOp>(op))
182             return storeOp.value() == alloc;
183           return !isa<DeallocOp>(op);
184         }))
185       return failure();
186 
187     for (Operation *user : llvm::make_early_inc_range(alloc->getUsers()))
188       rewriter.eraseOp(user);
189 
190     rewriter.eraseOp(alloc);
191     return success();
192   }
193 };
194 } // end anonymous namespace.
195 
196 void AllocOp::getCanonicalizationPatterns(RewritePatternSet &results,
197                                           MLIRContext *context) {
198   results.add<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc<AllocOp>>(context);
199 }
200 
201 void AllocaOp::getCanonicalizationPatterns(RewritePatternSet &results,
202                                            MLIRContext *context) {
203   results.add<SimplifyAllocConst<AllocaOp>, SimplifyDeadAlloc<AllocaOp>>(
204       context);
205 }
206 
207 //===----------------------------------------------------------------------===//
208 // AllocaScopeOp
209 //===----------------------------------------------------------------------===//
210 
211 static void print(OpAsmPrinter &p, AllocaScopeOp &op) {
212   bool printBlockTerminators = false;
213 
214   p << " ";
215   if (!op.results().empty()) {
216     p << " -> (" << op.getResultTypes() << ")";
217     printBlockTerminators = true;
218   }
219   p.printRegion(op.bodyRegion(),
220                 /*printEntryBlockArgs=*/false,
221                 /*printBlockTerminators=*/printBlockTerminators);
222   p.printOptionalAttrDict(op->getAttrs());
223 }
224 
225 static ParseResult parseAllocaScopeOp(OpAsmParser &parser,
226                                       OperationState &result) {
227   // Create a region for the body.
228   result.regions.reserve(1);
229   Region *bodyRegion = result.addRegion();
230 
231   // Parse optional results type list.
232   if (parser.parseOptionalArrowTypeList(result.types))
233     return failure();
234 
235   // Parse the body region.
236   if (parser.parseRegion(*bodyRegion, /*arguments=*/{}, /*argTypes=*/{}))
237     return failure();
238   AllocaScopeOp::ensureTerminator(*bodyRegion, parser.getBuilder(),
239                                   result.location);
240 
241   // Parse the optional attribute list.
242   if (parser.parseOptionalAttrDict(result.attributes))
243     return failure();
244 
245   return success();
246 }
247 
248 static LogicalResult verify(AllocaScopeOp op) {
249   if (failed(RegionBranchOpInterface::verifyTypes(op)))
250     return failure();
251 
252   return success();
253 }
254 
255 void AllocaScopeOp::getSuccessorRegions(
256     Optional<unsigned> index, ArrayRef<Attribute> operands,
257     SmallVectorImpl<RegionSuccessor> &regions) {
258   if (index.hasValue()) {
259     regions.push_back(RegionSuccessor(getResults()));
260     return;
261   }
262 
263   regions.push_back(RegionSuccessor(&bodyRegion()));
264 }
265 
266 //===----------------------------------------------------------------------===//
267 // AssumeAlignmentOp
268 //===----------------------------------------------------------------------===//
269 
270 static LogicalResult verify(AssumeAlignmentOp op) {
271   unsigned alignment = op.alignment();
272   if (!llvm::isPowerOf2_32(alignment))
273     return op.emitOpError("alignment must be power of 2");
274   return success();
275 }
276 
277 //===----------------------------------------------------------------------===//
278 // CastOp
279 //===----------------------------------------------------------------------===//
280 
281 /// Determines whether MemRef_CastOp casts to a more dynamic version of the
282 /// source memref. This is useful to to fold a memref.cast into a consuming op
283 /// and implement canonicalization patterns for ops in different dialects that
284 /// may consume the results of memref.cast operations. Such foldable memref.cast
285 /// operations are typically inserted as `view` and `subview` ops are
286 /// canonicalized, to preserve the type compatibility of their uses.
287 ///
288 /// Returns true when all conditions are met:
289 /// 1. source and result are ranked memrefs with strided semantics and same
290 /// element type and rank.
291 /// 2. each of the source's size, offset or stride has more static information
292 /// than the corresponding result's size, offset or stride.
293 ///
294 /// Example 1:
295 /// ```mlir
296 ///   %1 = memref.cast %0 : memref<8x16xf32> to memref<?x?xf32>
297 ///   %2 = consumer %1 ... : memref<?x?xf32> ...
298 /// ```
299 ///
300 /// may fold into:
301 ///
302 /// ```mlir
303 ///   %2 = consumer %0 ... : memref<8x16xf32> ...
304 /// ```
305 ///
306 /// Example 2:
307 /// ```
308 ///   %1 = memref.cast %0 : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
309 ///          to memref<?x?xf32>
310 ///   consumer %1 : memref<?x?xf32> ...
311 /// ```
312 ///
313 /// may fold into:
314 ///
315 /// ```
316 ///   consumer %0 ... : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
317 /// ```
318 bool CastOp::canFoldIntoConsumerOp(CastOp castOp) {
319   MemRefType sourceType = castOp.source().getType().dyn_cast<MemRefType>();
320   MemRefType resultType = castOp.getType().dyn_cast<MemRefType>();
321 
322   // Requires ranked MemRefType.
323   if (!sourceType || !resultType)
324     return false;
325 
326   // Requires same elemental type.
327   if (sourceType.getElementType() != resultType.getElementType())
328     return false;
329 
330   // Requires same rank.
331   if (sourceType.getRank() != resultType.getRank())
332     return false;
333 
334   // Only fold casts between strided memref forms.
335   int64_t sourceOffset, resultOffset;
336   SmallVector<int64_t, 4> sourceStrides, resultStrides;
337   if (failed(getStridesAndOffset(sourceType, sourceStrides, sourceOffset)) ||
338       failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
339     return false;
340 
341   // If cast is towards more static sizes along any dimension, don't fold.
342   for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) {
343     auto ss = std::get<0>(it), st = std::get<1>(it);
344     if (ss != st)
345       if (MemRefType::isDynamic(ss) && !MemRefType::isDynamic(st))
346         return false;
347   }
348 
349   // If cast is towards more static offset along any dimension, don't fold.
350   if (sourceOffset != resultOffset)
351     if (MemRefType::isDynamicStrideOrOffset(sourceOffset) &&
352         !MemRefType::isDynamicStrideOrOffset(resultOffset))
353       return false;
354 
355   // If cast is towards more static strides along any dimension, don't fold.
356   for (auto it : llvm::zip(sourceStrides, resultStrides)) {
357     auto ss = std::get<0>(it), st = std::get<1>(it);
358     if (ss != st)
359       if (MemRefType::isDynamicStrideOrOffset(ss) &&
360           !MemRefType::isDynamicStrideOrOffset(st))
361         return false;
362   }
363 
364   return true;
365 }
366 
367 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
368   if (inputs.size() != 1 || outputs.size() != 1)
369     return false;
370   Type a = inputs.front(), b = outputs.front();
371   auto aT = a.dyn_cast<MemRefType>();
372   auto bT = b.dyn_cast<MemRefType>();
373 
374   auto uaT = a.dyn_cast<UnrankedMemRefType>();
375   auto ubT = b.dyn_cast<UnrankedMemRefType>();
376 
377   if (aT && bT) {
378     if (aT.getElementType() != bT.getElementType())
379       return false;
380     if (aT.getLayout() != bT.getLayout()) {
381       int64_t aOffset, bOffset;
382       SmallVector<int64_t, 4> aStrides, bStrides;
383       if (failed(getStridesAndOffset(aT, aStrides, aOffset)) ||
384           failed(getStridesAndOffset(bT, bStrides, bOffset)) ||
385           aStrides.size() != bStrides.size())
386         return false;
387 
388       // Strides along a dimension/offset are compatible if the value in the
389       // source memref is static and the value in the target memref is the
390       // same. They are also compatible if either one is dynamic (see
391       // description of MemRefCastOp for details).
392       auto checkCompatible = [](int64_t a, int64_t b) {
393         return (a == MemRefType::getDynamicStrideOrOffset() ||
394                 b == MemRefType::getDynamicStrideOrOffset() || a == b);
395       };
396       if (!checkCompatible(aOffset, bOffset))
397         return false;
398       for (auto aStride : enumerate(aStrides))
399         if (!checkCompatible(aStride.value(), bStrides[aStride.index()]))
400           return false;
401     }
402     if (aT.getMemorySpace() != bT.getMemorySpace())
403       return false;
404 
405     // They must have the same rank, and any specified dimensions must match.
406     if (aT.getRank() != bT.getRank())
407       return false;
408 
409     for (unsigned i = 0, e = aT.getRank(); i != e; ++i) {
410       int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i);
411       if (aDim != -1 && bDim != -1 && aDim != bDim)
412         return false;
413     }
414     return true;
415   } else {
416     if (!aT && !uaT)
417       return false;
418     if (!bT && !ubT)
419       return false;
420     // Unranked to unranked casting is unsupported
421     if (uaT && ubT)
422       return false;
423 
424     auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType();
425     auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType();
426     if (aEltType != bEltType)
427       return false;
428 
429     auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace();
430     auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace();
431     if (aMemSpace != bMemSpace)
432       return false;
433 
434     return true;
435   }
436 
437   return false;
438 }
439 
440 OpFoldResult CastOp::fold(ArrayRef<Attribute> operands) {
441   return succeeded(foldMemRefCast(*this)) ? getResult() : Value();
442 }
443 
444 //===----------------------------------------------------------------------===//
445 // DeallocOp
446 //===----------------------------------------------------------------------===//
447 
448 LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands,
449                               SmallVectorImpl<OpFoldResult> &results) {
450   /// dealloc(memrefcast) -> dealloc
451   return foldMemRefCast(*this);
452 }
453 
454 //===----------------------------------------------------------------------===//
455 // DimOp
456 //===----------------------------------------------------------------------===//
457 
458 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
459                   int64_t index) {
460   auto loc = result.location;
461   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
462   build(builder, result, source, indexValue);
463 }
464 
465 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
466                   Value index) {
467   auto indexTy = builder.getIndexType();
468   build(builder, result, indexTy, source, index);
469 }
470 
471 Optional<int64_t> DimOp::getConstantIndex() {
472   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
473     return constantOp.getValue().cast<IntegerAttr>().getInt();
474   return {};
475 }
476 
477 static LogicalResult verify(DimOp op) {
478   // Assume unknown index to be in range.
479   Optional<int64_t> index = op.getConstantIndex();
480   if (!index.hasValue())
481     return success();
482 
483   // Check that constant index is not knowingly out of range.
484   auto type = op.source().getType();
485   if (auto memrefType = type.dyn_cast<MemRefType>()) {
486     if (index.getValue() >= memrefType.getRank())
487       return op.emitOpError("index is out of range");
488   } else if (type.isa<UnrankedMemRefType>()) {
489     // Assume index to be in range.
490   } else {
491     llvm_unreachable("expected operand with memref type");
492   }
493   return success();
494 }
495 
496 /// Return a map with key being elements in `vals` and data being number of
497 /// occurences of it. Use std::map, since the `vals` here are strides and the
498 /// dynamic stride value is the same as the tombstone value for
499 /// `DenseMap<int64_t>`.
500 static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) {
501   std::map<int64_t, unsigned> numOccurences;
502   for (auto val : vals)
503     numOccurences[val]++;
504   return numOccurences;
505 }
506 
507 /// Given the `originalType` and a `candidateReducedType` whose shape is assumed
508 /// to be a subset of `originalType` with some `1` entries erased, return the
509 /// set of indices that specifies which of the entries of `originalShape` are
510 /// dropped to obtain `reducedShape`.
511 /// This accounts for cases where there are multiple unit-dims, but only a
512 /// subset of those are dropped. For MemRefTypes these can be disambiguated
513 /// using the strides. If a dimension is dropped the stride must be dropped too.
514 static llvm::Optional<llvm::SmallDenseSet<unsigned>>
515 computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
516                                ArrayRef<OpFoldResult> sizes) {
517   llvm::SmallDenseSet<unsigned> unusedDims;
518   if (originalType.getRank() == reducedType.getRank())
519     return unusedDims;
520 
521   for (auto dim : llvm::enumerate(sizes))
522     if (auto attr = dim.value().dyn_cast<Attribute>())
523       if (attr.cast<IntegerAttr>().getInt() == 1)
524         unusedDims.insert(dim.index());
525 
526   SmallVector<int64_t> originalStrides, candidateStrides;
527   int64_t originalOffset, candidateOffset;
528   if (failed(
529           getStridesAndOffset(originalType, originalStrides, originalOffset)) ||
530       failed(
531           getStridesAndOffset(reducedType, candidateStrides, candidateOffset)))
532     return llvm::None;
533 
534   // For memrefs, a dimension is truly dropped if its corresponding stride is
535   // also dropped. This is particularly important when more than one of the dims
536   // is 1. Track the number of occurences of the strides in the original type
537   // and the candidate type. For each unused dim that stride should not be
538   // present in the candidate type. Note that there could be multiple dimensions
539   // that have the same size. We dont need to exactly figure out which dim
540   // corresponds to which stride, we just need to verify that the number of
541   // reptitions of a stride in the original + number of unused dims with that
542   // stride == number of repititions of a stride in the candidate.
543   std::map<int64_t, unsigned> currUnaccountedStrides =
544       getNumOccurences(originalStrides);
545   std::map<int64_t, unsigned> candidateStridesNumOccurences =
546       getNumOccurences(candidateStrides);
547   llvm::SmallDenseSet<unsigned> prunedUnusedDims;
548   for (unsigned dim : unusedDims) {
549     int64_t originalStride = originalStrides[dim];
550     if (currUnaccountedStrides[originalStride] >
551         candidateStridesNumOccurences[originalStride]) {
552       // This dim can be treated as dropped.
553       currUnaccountedStrides[originalStride]--;
554       continue;
555     }
556     if (currUnaccountedStrides[originalStride] ==
557         candidateStridesNumOccurences[originalStride]) {
558       // The stride for this is not dropped. Keep as is.
559       prunedUnusedDims.insert(dim);
560       continue;
561     }
562     if (currUnaccountedStrides[originalStride] <
563         candidateStridesNumOccurences[originalStride]) {
564       // This should never happen. Cant have a stride in the reduced rank type
565       // that wasnt in the original one.
566       return llvm::None;
567     }
568   }
569 
570   for (auto prunedDim : prunedUnusedDims)
571     unusedDims.erase(prunedDim);
572   if (unusedDims.size() + reducedType.getRank() != originalType.getRank())
573     return llvm::None;
574   return unusedDims;
575 }
576 
577 llvm::SmallDenseSet<unsigned> SubViewOp::getDroppedDims() {
578   MemRefType sourceType = getSourceType();
579   MemRefType resultType = getType();
580   llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims =
581       computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes());
582   assert(unusedDims && "unable to find unused dims of subview");
583   return *unusedDims;
584 }
585 
586 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
587   // All forms of folding require a known index.
588   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
589   if (!index)
590     return {};
591 
592   // Folding for unranked types (UnrankedMemRefType) is not supported.
593   auto memrefType = source().getType().dyn_cast<MemRefType>();
594   if (!memrefType)
595     return {};
596 
597   // Fold if the shape extent along the given index is known.
598   if (!memrefType.isDynamicDim(index.getInt())) {
599     Builder builder(getContext());
600     return builder.getIndexAttr(memrefType.getShape()[index.getInt()]);
601   }
602 
603   // The size at the given index is now known to be a dynamic size.
604   unsigned unsignedIndex = index.getValue().getZExtValue();
605 
606   // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`.
607   Operation *definingOp = source().getDefiningOp();
608 
609   if (auto alloc = dyn_cast_or_null<AllocOp>(definingOp))
610     return *(alloc.getDynamicSizes().begin() +
611              memrefType.getDynamicDimIndex(unsignedIndex));
612 
613   if (auto alloca = dyn_cast_or_null<AllocaOp>(definingOp))
614     return *(alloca.getDynamicSizes().begin() +
615              memrefType.getDynamicDimIndex(unsignedIndex));
616 
617   if (auto view = dyn_cast_or_null<ViewOp>(definingOp))
618     return *(view.getDynamicSizes().begin() +
619              memrefType.getDynamicDimIndex(unsignedIndex));
620 
621   if (auto subview = dyn_cast_or_null<SubViewOp>(definingOp)) {
622     llvm::SmallDenseSet<unsigned> unusedDims = subview.getDroppedDims();
623     unsigned resultIndex = 0;
624     unsigned sourceRank = subview.getSourceType().getRank();
625     unsigned sourceIndex = 0;
626     for (auto i : llvm::seq<unsigned>(0, sourceRank)) {
627       if (unusedDims.count(i))
628         continue;
629       if (resultIndex == unsignedIndex) {
630         sourceIndex = i;
631         break;
632       }
633       resultIndex++;
634     }
635     assert(subview.isDynamicSize(sourceIndex) &&
636            "expected dynamic subview size");
637     return subview.getDynamicSize(sourceIndex);
638   }
639 
640   if (auto sizeInterface =
641           dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) {
642     assert(sizeInterface.isDynamicSize(unsignedIndex) &&
643            "Expected dynamic subview size");
644     return sizeInterface.getDynamicSize(unsignedIndex);
645   }
646 
647   // dim(memrefcast) -> dim
648   if (succeeded(foldMemRefCast(*this)))
649     return getResult();
650 
651   return {};
652 }
653 
654 namespace {
655 /// Fold dim of a memref reshape operation to a load into the reshape's shape
656 /// operand.
657 struct DimOfMemRefReshape : public OpRewritePattern<DimOp> {
658   using OpRewritePattern<DimOp>::OpRewritePattern;
659 
660   LogicalResult matchAndRewrite(DimOp dim,
661                                 PatternRewriter &rewriter) const override {
662     auto reshape = dim.source().getDefiningOp<ReshapeOp>();
663 
664     if (!reshape)
665       return failure();
666 
667     // Place the load directly after the reshape to ensure that the shape memref
668     // was not mutated.
669     rewriter.setInsertionPointAfter(reshape);
670     Location loc = dim.getLoc();
671     Value load = rewriter.create<LoadOp>(loc, reshape.shape(), dim.index());
672     if (load.getType() != dim.getType())
673       load = rewriter.create<arith::IndexCastOp>(loc, dim.getType(), load);
674     rewriter.replaceOp(dim, load);
675     return success();
676   }
677 };
678 
679 } // end anonymous namespace.
680 
681 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
682                                         MLIRContext *context) {
683   results.add<DimOfMemRefReshape>(context);
684 }
685 
686 // ---------------------------------------------------------------------------
687 // DmaStartOp
688 // ---------------------------------------------------------------------------
689 
690 void DmaStartOp::build(OpBuilder &builder, OperationState &result,
691                        Value srcMemRef, ValueRange srcIndices, Value destMemRef,
692                        ValueRange destIndices, Value numElements,
693                        Value tagMemRef, ValueRange tagIndices, Value stride,
694                        Value elementsPerStride) {
695   result.addOperands(srcMemRef);
696   result.addOperands(srcIndices);
697   result.addOperands(destMemRef);
698   result.addOperands(destIndices);
699   result.addOperands({numElements, tagMemRef});
700   result.addOperands(tagIndices);
701   if (stride)
702     result.addOperands({stride, elementsPerStride});
703 }
704 
705 static void print(OpAsmPrinter &p, DmaStartOp op) {
706   p << " " << op.getSrcMemRef() << '[' << op.getSrcIndices() << "], "
707     << op.getDstMemRef() << '[' << op.getDstIndices() << "], "
708     << op.getNumElements() << ", " << op.getTagMemRef() << '['
709     << op.getTagIndices() << ']';
710   if (op.isStrided())
711     p << ", " << op.getStride() << ", " << op.getNumElementsPerStride();
712 
713   p.printOptionalAttrDict(op->getAttrs());
714   p << " : " << op.getSrcMemRef().getType() << ", "
715     << op.getDstMemRef().getType() << ", " << op.getTagMemRef().getType();
716 }
717 
718 // Parse DmaStartOp.
719 // Ex:
720 //   %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size,
721 //                       %tag[%index], %stride, %num_elt_per_stride :
722 //                     : memref<3076 x f32, 0>,
723 //                       memref<1024 x f32, 2>,
724 //                       memref<1 x i32>
725 //
726 static ParseResult parseDmaStartOp(OpAsmParser &parser,
727                                    OperationState &result) {
728   OpAsmParser::OperandType srcMemRefInfo;
729   SmallVector<OpAsmParser::OperandType, 4> srcIndexInfos;
730   OpAsmParser::OperandType dstMemRefInfo;
731   SmallVector<OpAsmParser::OperandType, 4> dstIndexInfos;
732   OpAsmParser::OperandType numElementsInfo;
733   OpAsmParser::OperandType tagMemrefInfo;
734   SmallVector<OpAsmParser::OperandType, 4> tagIndexInfos;
735   SmallVector<OpAsmParser::OperandType, 2> strideInfo;
736 
737   SmallVector<Type, 3> types;
738   auto indexType = parser.getBuilder().getIndexType();
739 
740   // Parse and resolve the following list of operands:
741   // *) source memref followed by its indices (in square brackets).
742   // *) destination memref followed by its indices (in square brackets).
743   // *) dma size in KiB.
744   if (parser.parseOperand(srcMemRefInfo) ||
745       parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) ||
746       parser.parseComma() || parser.parseOperand(dstMemRefInfo) ||
747       parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) ||
748       parser.parseComma() || parser.parseOperand(numElementsInfo) ||
749       parser.parseComma() || parser.parseOperand(tagMemrefInfo) ||
750       parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square))
751     return failure();
752 
753   // Parse optional stride and elements per stride.
754   if (parser.parseTrailingOperandList(strideInfo))
755     return failure();
756 
757   bool isStrided = strideInfo.size() == 2;
758   if (!strideInfo.empty() && !isStrided) {
759     return parser.emitError(parser.getNameLoc(),
760                             "expected two stride related operands");
761   }
762 
763   if (parser.parseColonTypeList(types))
764     return failure();
765   if (types.size() != 3)
766     return parser.emitError(parser.getNameLoc(), "fewer/more types expected");
767 
768   if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) ||
769       parser.resolveOperands(srcIndexInfos, indexType, result.operands) ||
770       parser.resolveOperand(dstMemRefInfo, types[1], result.operands) ||
771       parser.resolveOperands(dstIndexInfos, indexType, result.operands) ||
772       // size should be an index.
773       parser.resolveOperand(numElementsInfo, indexType, result.operands) ||
774       parser.resolveOperand(tagMemrefInfo, types[2], result.operands) ||
775       // tag indices should be index.
776       parser.resolveOperands(tagIndexInfos, indexType, result.operands))
777     return failure();
778 
779   if (isStrided) {
780     if (parser.resolveOperands(strideInfo, indexType, result.operands))
781       return failure();
782   }
783 
784   return success();
785 }
786 
787 static LogicalResult verify(DmaStartOp op) {
788   unsigned numOperands = op.getNumOperands();
789 
790   // Mandatory non-variadic operands are: src memref, dst memref, tag memref and
791   // the number of elements.
792   if (numOperands < 4)
793     return op.emitOpError("expected at least 4 operands");
794 
795   // Check types of operands. The order of these calls is important: the later
796   // calls rely on some type properties to compute the operand position.
797   // 1. Source memref.
798   if (!op.getSrcMemRef().getType().isa<MemRefType>())
799     return op.emitOpError("expected source to be of memref type");
800   if (numOperands < op.getSrcMemRefRank() + 4)
801     return op.emitOpError()
802            << "expected at least " << op.getSrcMemRefRank() + 4 << " operands";
803   if (!op.getSrcIndices().empty() &&
804       !llvm::all_of(op.getSrcIndices().getTypes(),
805                     [](Type t) { return t.isIndex(); }))
806     return op.emitOpError("expected source indices to be of index type");
807 
808   // 2. Destination memref.
809   if (!op.getDstMemRef().getType().isa<MemRefType>())
810     return op.emitOpError("expected destination to be of memref type");
811   unsigned numExpectedOperands =
812       op.getSrcMemRefRank() + op.getDstMemRefRank() + 4;
813   if (numOperands < numExpectedOperands)
814     return op.emitOpError()
815            << "expected at least " << numExpectedOperands << " operands";
816   if (!op.getDstIndices().empty() &&
817       !llvm::all_of(op.getDstIndices().getTypes(),
818                     [](Type t) { return t.isIndex(); }))
819     return op.emitOpError("expected destination indices to be of index type");
820 
821   // 3. Number of elements.
822   if (!op.getNumElements().getType().isIndex())
823     return op.emitOpError("expected num elements to be of index type");
824 
825   // 4. Tag memref.
826   if (!op.getTagMemRef().getType().isa<MemRefType>())
827     return op.emitOpError("expected tag to be of memref type");
828   numExpectedOperands += op.getTagMemRefRank();
829   if (numOperands < numExpectedOperands)
830     return op.emitOpError()
831            << "expected at least " << numExpectedOperands << " operands";
832   if (!op.getTagIndices().empty() &&
833       !llvm::all_of(op.getTagIndices().getTypes(),
834                     [](Type t) { return t.isIndex(); }))
835     return op.emitOpError("expected tag indices to be of index type");
836 
837   // Optional stride-related operands must be either both present or both
838   // absent.
839   if (numOperands != numExpectedOperands &&
840       numOperands != numExpectedOperands + 2)
841     return op.emitOpError("incorrect number of operands");
842 
843   // 5. Strides.
844   if (op.isStrided()) {
845     if (!op.getStride().getType().isIndex() ||
846         !op.getNumElementsPerStride().getType().isIndex())
847       return op.emitOpError(
848           "expected stride and num elements per stride to be of type index");
849   }
850 
851   return success();
852 }
853 
854 LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
855                                SmallVectorImpl<OpFoldResult> &results) {
856   /// dma_start(memrefcast) -> dma_start
857   return foldMemRefCast(*this);
858 }
859 
860 // ---------------------------------------------------------------------------
861 // DmaWaitOp
862 // ---------------------------------------------------------------------------
863 
864 LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands,
865                               SmallVectorImpl<OpFoldResult> &results) {
866   /// dma_wait(memrefcast) -> dma_wait
867   return foldMemRefCast(*this);
868 }
869 
870 static LogicalResult verify(DmaWaitOp op) {
871   // Check that the number of tag indices matches the tagMemRef rank.
872   unsigned numTagIndices = op.tagIndices().size();
873   unsigned tagMemRefRank = op.getTagMemRefRank();
874   if (numTagIndices != tagMemRefRank)
875     return op.emitOpError() << "expected tagIndices to have the same number of "
876                                "elements as the tagMemRef rank, expected "
877                             << tagMemRefRank << ", but got " << numTagIndices;
878   return success();
879 }
880 
881 //===----------------------------------------------------------------------===//
882 // GlobalOp
883 //===----------------------------------------------------------------------===//
884 
885 static void printGlobalMemrefOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op,
886                                                    TypeAttr type,
887                                                    Attribute initialValue) {
888   p << type;
889   if (!op.isExternal()) {
890     p << " = ";
891     if (op.isUninitialized())
892       p << "uninitialized";
893     else
894       p.printAttributeWithoutType(initialValue);
895   }
896 }
897 
898 static ParseResult
899 parseGlobalMemrefOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr,
900                                        Attribute &initialValue) {
901   Type type;
902   if (parser.parseType(type))
903     return failure();
904 
905   auto memrefType = type.dyn_cast<MemRefType>();
906   if (!memrefType || !memrefType.hasStaticShape())
907     return parser.emitError(parser.getNameLoc())
908            << "type should be static shaped memref, but got " << type;
909   typeAttr = TypeAttr::get(type);
910 
911   if (parser.parseOptionalEqual())
912     return success();
913 
914   if (succeeded(parser.parseOptionalKeyword("uninitialized"))) {
915     initialValue = UnitAttr::get(parser.getContext());
916     return success();
917   }
918 
919   Type tensorType = getTensorTypeFromMemRefType(memrefType);
920   if (parser.parseAttribute(initialValue, tensorType))
921     return failure();
922   if (!initialValue.isa<ElementsAttr>())
923     return parser.emitError(parser.getNameLoc())
924            << "initial value should be a unit or elements attribute";
925   return success();
926 }
927 
928 static LogicalResult verify(GlobalOp op) {
929   auto memrefType = op.type().dyn_cast<MemRefType>();
930   if (!memrefType || !memrefType.hasStaticShape())
931     return op.emitOpError("type should be static shaped memref, but got ")
932            << op.type();
933 
934   // Verify that the initial value, if present, is either a unit attribute or
935   // an elements attribute.
936   if (op.initial_value().hasValue()) {
937     Attribute initValue = op.initial_value().getValue();
938     if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>())
939       return op.emitOpError("initial value should be a unit or elements "
940                             "attribute, but got ")
941              << initValue;
942 
943     // Check that the type of the initial value is compatible with the type of
944     // the global variable.
945     if (initValue.isa<ElementsAttr>()) {
946       Type initType = initValue.getType();
947       Type tensorType = getTensorTypeFromMemRefType(memrefType);
948       if (initType != tensorType)
949         return op.emitOpError("initial value expected to be of type ")
950                << tensorType << ", but was of type " << initType;
951     }
952   }
953 
954   if (Optional<uint64_t> alignAttr = op.alignment()) {
955     uint64_t alignment = alignAttr.getValue();
956 
957     if (!llvm::isPowerOf2_64(alignment))
958       return op->emitError() << "alignment attribute value " << alignment
959                              << " is not a power of 2";
960   }
961 
962   // TODO: verify visibility for declarations.
963   return success();
964 }
965 
966 //===----------------------------------------------------------------------===//
967 // GetGlobalOp
968 //===----------------------------------------------------------------------===//
969 
970 LogicalResult
971 GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
972   // Verify that the result type is same as the type of the referenced
973   // memref.global op.
974   auto global =
975       symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, nameAttr());
976   if (!global)
977     return emitOpError("'")
978            << name() << "' does not reference a valid global memref";
979 
980   Type resultType = result().getType();
981   if (global.type() != resultType)
982     return emitOpError("result type ")
983            << resultType << " does not match type " << global.type()
984            << " of the global memref @" << name();
985   return success();
986 }
987 
988 //===----------------------------------------------------------------------===//
989 // LoadOp
990 //===----------------------------------------------------------------------===//
991 
992 static LogicalResult verify(LoadOp op) {
993   if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
994     return op.emitOpError("incorrect number of indices for load");
995   return success();
996 }
997 
998 OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) {
999   /// load(memrefcast) -> load
1000   if (succeeded(foldMemRefCast(*this)))
1001     return getResult();
1002   return OpFoldResult();
1003 }
1004 
1005 //===----------------------------------------------------------------------===//
1006 // PrefetchOp
1007 //===----------------------------------------------------------------------===//
1008 
1009 static void print(OpAsmPrinter &p, PrefetchOp op) {
1010   p << " " << op.memref() << '[';
1011   p.printOperands(op.indices());
1012   p << ']' << ", " << (op.isWrite() ? "write" : "read");
1013   p << ", locality<" << op.localityHint();
1014   p << ">, " << (op.isDataCache() ? "data" : "instr");
1015   p.printOptionalAttrDict(
1016       op->getAttrs(),
1017       /*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"});
1018   p << " : " << op.getMemRefType();
1019 }
1020 
1021 static ParseResult parsePrefetchOp(OpAsmParser &parser,
1022                                    OperationState &result) {
1023   OpAsmParser::OperandType memrefInfo;
1024   SmallVector<OpAsmParser::OperandType, 4> indexInfo;
1025   IntegerAttr localityHint;
1026   MemRefType type;
1027   StringRef readOrWrite, cacheType;
1028 
1029   auto indexTy = parser.getBuilder().getIndexType();
1030   auto i32Type = parser.getBuilder().getIntegerType(32);
1031   if (parser.parseOperand(memrefInfo) ||
1032       parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) ||
1033       parser.parseComma() || parser.parseKeyword(&readOrWrite) ||
1034       parser.parseComma() || parser.parseKeyword("locality") ||
1035       parser.parseLess() ||
1036       parser.parseAttribute(localityHint, i32Type, "localityHint",
1037                             result.attributes) ||
1038       parser.parseGreater() || parser.parseComma() ||
1039       parser.parseKeyword(&cacheType) || parser.parseColonType(type) ||
1040       parser.resolveOperand(memrefInfo, type, result.operands) ||
1041       parser.resolveOperands(indexInfo, indexTy, result.operands))
1042     return failure();
1043 
1044   if (!readOrWrite.equals("read") && !readOrWrite.equals("write"))
1045     return parser.emitError(parser.getNameLoc(),
1046                             "rw specifier has to be 'read' or 'write'");
1047   result.addAttribute(
1048       PrefetchOp::getIsWriteAttrName(),
1049       parser.getBuilder().getBoolAttr(readOrWrite.equals("write")));
1050 
1051   if (!cacheType.equals("data") && !cacheType.equals("instr"))
1052     return parser.emitError(parser.getNameLoc(),
1053                             "cache type has to be 'data' or 'instr'");
1054 
1055   result.addAttribute(
1056       PrefetchOp::getIsDataCacheAttrName(),
1057       parser.getBuilder().getBoolAttr(cacheType.equals("data")));
1058 
1059   return success();
1060 }
1061 
1062 static LogicalResult verify(PrefetchOp op) {
1063   if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
1064     return op.emitOpError("too few indices");
1065 
1066   return success();
1067 }
1068 
1069 LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands,
1070                                SmallVectorImpl<OpFoldResult> &results) {
1071   // prefetch(memrefcast) -> prefetch
1072   return foldMemRefCast(*this);
1073 }
1074 
1075 //===----------------------------------------------------------------------===//
1076 // ReinterpretCastOp
1077 //===----------------------------------------------------------------------===//
1078 
1079 /// Build a ReinterpretCastOp with all dynamic entries: `staticOffsets`,
1080 /// `staticSizes` and `staticStrides` are automatically filled with
1081 /// source-memref-rank sentinel values that encode dynamic entries.
1082 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1083                               MemRefType resultType, Value source,
1084                               OpFoldResult offset, ArrayRef<OpFoldResult> sizes,
1085                               ArrayRef<OpFoldResult> strides,
1086                               ArrayRef<NamedAttribute> attrs) {
1087   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1088   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1089   dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets,
1090                              ShapedType::kDynamicStrideOrOffset);
1091   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1092                              ShapedType::kDynamicSize);
1093   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1094                              ShapedType::kDynamicStrideOrOffset);
1095   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1096         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1097         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1098   result.addAttributes(attrs);
1099 }
1100 
1101 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1102                               MemRefType resultType, Value source,
1103                               int64_t offset, ArrayRef<int64_t> sizes,
1104                               ArrayRef<int64_t> strides,
1105                               ArrayRef<NamedAttribute> attrs) {
1106   SmallVector<OpFoldResult> sizeValues =
1107       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1108         return b.getI64IntegerAttr(v);
1109       }));
1110   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1111       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1112         return b.getI64IntegerAttr(v);
1113       }));
1114   build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues,
1115         strideValues, attrs);
1116 }
1117 
1118 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1119                               MemRefType resultType, Value source, Value offset,
1120                               ValueRange sizes, ValueRange strides,
1121                               ArrayRef<NamedAttribute> attrs) {
1122   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1123       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1124   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1125       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1126   build(b, result, resultType, source, offset, sizeValues, strideValues, attrs);
1127 }
1128 
1129 // TODO: ponder whether we want to allow missing trailing sizes/strides that are
1130 // completed automatically, like we have for subview and extract_slice.
1131 static LogicalResult verify(ReinterpretCastOp op) {
1132   // The source and result memrefs should be in the same memory space.
1133   auto srcType = op.source().getType().cast<BaseMemRefType>();
1134   auto resultType = op.getType().cast<MemRefType>();
1135   if (srcType.getMemorySpace() != resultType.getMemorySpace())
1136     return op.emitError("different memory spaces specified for source type ")
1137            << srcType << " and result memref type " << resultType;
1138   if (srcType.getElementType() != resultType.getElementType())
1139     return op.emitError("different element types specified for source type ")
1140            << srcType << " and result memref type " << resultType;
1141 
1142   // Match sizes in result memref type and in static_sizes attribute.
1143   for (auto &en :
1144        llvm::enumerate(llvm::zip(resultType.getShape(),
1145                                  extractFromI64ArrayAttr(op.static_sizes())))) {
1146     int64_t resultSize = std::get<0>(en.value());
1147     int64_t expectedSize = std::get<1>(en.value());
1148     if (resultSize != expectedSize)
1149       return op.emitError("expected result type with size = ")
1150              << expectedSize << " instead of " << resultSize
1151              << " in dim = " << en.index();
1152   }
1153 
1154   // Match offset and strides in static_offset and static_strides attributes if
1155   // result memref type has an affine map specified.
1156   if (!resultType.getLayout().isIdentity()) {
1157     int64_t resultOffset;
1158     SmallVector<int64_t, 4> resultStrides;
1159     if (failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
1160       return failure();
1161 
1162     // Match offset in result memref type and in static_offsets attribute.
1163     int64_t expectedOffset =
1164         extractFromI64ArrayAttr(op.static_offsets()).front();
1165     if (resultOffset != expectedOffset)
1166       return op.emitError("expected result type with offset = ")
1167              << resultOffset << " instead of " << expectedOffset;
1168 
1169     // Match strides in result memref type and in static_strides attribute.
1170     for (auto &en : llvm::enumerate(llvm::zip(
1171              resultStrides, extractFromI64ArrayAttr(op.static_strides())))) {
1172       int64_t resultStride = std::get<0>(en.value());
1173       int64_t expectedStride = std::get<1>(en.value());
1174       if (resultStride != expectedStride)
1175         return op.emitError("expected result type with stride = ")
1176                << expectedStride << " instead of " << resultStride
1177                << " in dim = " << en.index();
1178     }
1179   }
1180   return success();
1181 }
1182 
1183 //===----------------------------------------------------------------------===//
1184 // Reassociative reshape ops
1185 //===----------------------------------------------------------------------===//
1186 
1187 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
1188   return getSymbolLessAffineMaps(getReassociationExprs());
1189 }
1190 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
1191   return convertReassociationIndicesToExprs(getContext(),
1192                                             getReassociationIndices());
1193 }
1194 
1195 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
1196   return getSymbolLessAffineMaps(getReassociationExprs());
1197 }
1198 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
1199   return convertReassociationIndicesToExprs(getContext(),
1200                                             getReassociationIndices());
1201 }
1202 
1203 static void print(OpAsmPrinter &p, ExpandShapeOp op) {
1204   ::mlir::printReshapeOp<ExpandShapeOp>(p, op);
1205 }
1206 
1207 static void print(OpAsmPrinter &p, CollapseShapeOp op) {
1208   ::mlir::printReshapeOp<CollapseShapeOp>(p, op);
1209 }
1210 
1211 /// Detect whether memref dims [dim, dim + extent) can be reshaped without
1212 /// copies.
1213 static bool isReshapableDimBand(unsigned dim, unsigned extent,
1214                                 ArrayRef<int64_t> sizes,
1215                                 ArrayRef<AffineExpr> strides) {
1216   // Bands of extent one can be reshaped, as they are not reshaped at all.
1217   if (extent == 1)
1218     return true;
1219   // Otherwise, the size of the first dimension needs to be known.
1220   if (ShapedType::isDynamic(sizes[dim]))
1221     return false;
1222   assert(sizes.size() == strides.size() && "mismatched ranks");
1223   // off by 1 indexing to avoid out of bounds
1224   //                       V
1225   for (auto idx = dim, e = dim + extent; idx + 1 < e; ++idx) {
1226     // Only bands of static shapes are reshapable. This is due to the fact that
1227     // there is no relation between dynamic sizes and dynamic strides: we do not
1228     // have enough information to know whether a "-1" size corresponds to the
1229     // proper symbol in the AffineExpr of a stride.
1230     if (ShapedType::isDynamic(sizes[idx + 1]))
1231       return false;
1232     // TODO: Refine this by passing the proper nDims and nSymbols so we can
1233     // simplify on the fly and catch more reshapable cases.
1234     if (strides[idx] != strides[idx + 1] * sizes[idx + 1])
1235       return false;
1236   }
1237   return true;
1238 }
1239 
1240 /// Compute the MemRefType obtained by applying the `reassociation` (which is
1241 /// expected to be valid) to `type`.
1242 /// If `type` is Contiguous MemRefType, this always produce a contiguous
1243 /// MemRefType.
1244 static MemRefType
1245 computeReshapeCollapsedType(MemRefType type,
1246                             ArrayRef<AffineMap> reassociation) {
1247   auto sizes = type.getShape();
1248   AffineExpr offset;
1249   SmallVector<AffineExpr, 4> strides;
1250   auto status = getStridesAndOffset(type, strides, offset);
1251   (void)status;
1252   assert(succeeded(status) && "expected strided memref");
1253 
1254   SmallVector<int64_t, 4> newSizes;
1255   newSizes.reserve(reassociation.size());
1256   SmallVector<AffineExpr, 4> newStrides;
1257   newStrides.reserve(reassociation.size());
1258 
1259   // Use the fact that reassociation is valid to simplify the logic: only use
1260   // each map's rank.
1261   assert(isReassociationValid(reassociation) && "invalid reassociation");
1262   unsigned currentDim = 0;
1263   for (AffineMap m : reassociation) {
1264     unsigned dim = m.getNumResults();
1265     int64_t size = 1;
1266     AffineExpr stride = strides[currentDim + dim - 1];
1267     if (!isReshapableDimBand(currentDim, dim, sizes, strides)) {
1268       size = ShapedType::kDynamicSize;
1269       stride = AffineExpr();
1270     } else {
1271       for (unsigned d = 0; d < dim; ++d)
1272         size *= sizes[currentDim + d];
1273     }
1274     newSizes.push_back(size);
1275     newStrides.push_back(stride);
1276     currentDim += dim;
1277   }
1278 
1279   // Early-exit: if `type` is contiguous, the result must be contiguous.
1280   if (canonicalizeStridedLayout(type).getLayout().isIdentity())
1281     return MemRefType::Builder(type).setShape(newSizes).setLayout({});
1282 
1283   // Convert back to int64_t because we don't have enough information to create
1284   // new strided layouts from AffineExpr only. This corresponds to a case where
1285   // copies may be necessary.
1286   int64_t intOffset = ShapedType::kDynamicStrideOrOffset;
1287   if (auto o = offset.dyn_cast<AffineConstantExpr>())
1288     intOffset = o.getValue();
1289   SmallVector<int64_t, 4> intStrides;
1290   intStrides.reserve(strides.size());
1291   for (auto stride : newStrides) {
1292     if (auto cst = stride.dyn_cast_or_null<AffineConstantExpr>())
1293       intStrides.push_back(cst.getValue());
1294     else
1295       intStrides.push_back(ShapedType::kDynamicStrideOrOffset);
1296   }
1297   auto layout =
1298       makeStridedLinearLayoutMap(intStrides, intOffset, type.getContext());
1299   return canonicalizeStridedLayout(
1300       MemRefType::Builder(type).setShape(newSizes).setLayout(
1301           AffineMapAttr::get(layout)));
1302 }
1303 
1304 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
1305                           ArrayRef<ReassociationIndices> reassociation,
1306                           ArrayRef<NamedAttribute> attrs) {
1307   auto memRefType = src.getType().cast<MemRefType>();
1308   auto resultType = computeReshapeCollapsedType(
1309       memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
1310                       b.getContext(), reassociation)));
1311   build(b, result, resultType, src, attrs);
1312   result.addAttribute(getReassociationAttrName(),
1313                       getReassociationIndicesAttribute(b, reassociation));
1314 }
1315 
1316 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
1317                             ArrayRef<ReassociationIndices> reassociation,
1318                             ArrayRef<NamedAttribute> attrs) {
1319   auto memRefType = src.getType().cast<MemRefType>();
1320   auto resultType = computeReshapeCollapsedType(
1321       memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
1322                       b.getContext(), reassociation)));
1323   build(b, result, resultType, src, attrs);
1324   result.addAttribute(getReassociationAttrName(),
1325                       getReassociationIndicesAttribute(b, reassociation));
1326 }
1327 
1328 template <typename ReshapeOp,
1329           bool isExpansion = std::is_same<ReshapeOp, ExpandShapeOp>::value>
1330 static LogicalResult verifyReshapeOp(ReshapeOp op, MemRefType expandedType,
1331                                      MemRefType collapsedType) {
1332   if (failed(
1333           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
1334     return failure();
1335   auto maps = op.getReassociationMaps();
1336   MemRefType expectedType = computeReshapeCollapsedType(expandedType, maps);
1337   if (collapsedType != expectedType)
1338     return op.emitOpError("expected collapsed type to be ")
1339            << expectedType << ", but got " << collapsedType;
1340   return success();
1341 }
1342 
1343 static LogicalResult verify(ExpandShapeOp op) {
1344   return verifyReshapeOp(op, op.getResultType(), op.getSrcType());
1345 }
1346 
1347 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
1348                                                 MLIRContext *context) {
1349   results.add<CollapseReshapeOps<ExpandShapeOp>,
1350               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>>(context);
1351 }
1352 
1353 static LogicalResult verify(CollapseShapeOp op) {
1354   return verifyReshapeOp(op, op.getSrcType(), op.getResultType());
1355 }
1356 
1357 struct CollapseShapeOpMemRefCastFolder
1358     : public OpRewritePattern<CollapseShapeOp> {
1359 public:
1360   using OpRewritePattern<CollapseShapeOp>::OpRewritePattern;
1361 
1362   LogicalResult matchAndRewrite(CollapseShapeOp op,
1363                                 PatternRewriter &rewriter) const override {
1364     auto cast = op.getOperand().getDefiningOp<CastOp>();
1365     if (!cast)
1366       return failure();
1367 
1368     if (!CastOp::canFoldIntoConsumerOp(cast))
1369       return failure();
1370 
1371     Type newResultType = computeReshapeCollapsedType(
1372         cast.getOperand().getType().cast<MemRefType>(),
1373         op.getReassociationMaps());
1374 
1375     if (newResultType == op.getResultType()) {
1376       rewriter.updateRootInPlace(
1377           op, [&]() { op.srcMutable().assign(cast.source()); });
1378     } else {
1379       Value newOp = rewriter.create<CollapseShapeOp>(
1380           op->getLoc(), cast.source(), op.getReassociationIndices());
1381       rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp);
1382     }
1383     return success();
1384   }
1385 };
1386 
1387 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
1388                                                   MLIRContext *context) {
1389   results.add<CollapseReshapeOps<CollapseShapeOp>,
1390               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
1391               CollapseShapeOpMemRefCastFolder>(context);
1392 }
1393 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
1394   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
1395 }
1396 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
1397   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
1398 }
1399 
1400 //===----------------------------------------------------------------------===//
1401 // ReshapeOp
1402 //===----------------------------------------------------------------------===//
1403 
1404 static LogicalResult verify(ReshapeOp op) {
1405   Type operandType = op.source().getType();
1406   Type resultType = op.result().getType();
1407 
1408   Type operandElementType = operandType.cast<ShapedType>().getElementType();
1409   Type resultElementType = resultType.cast<ShapedType>().getElementType();
1410   if (operandElementType != resultElementType)
1411     return op.emitOpError("element types of source and destination memref "
1412                           "types should be the same");
1413 
1414   if (auto operandMemRefType = operandType.dyn_cast<MemRefType>())
1415     if (!operandMemRefType.getLayout().isIdentity())
1416       return op.emitOpError(
1417           "source memref type should have identity affine map");
1418 
1419   int64_t shapeSize = op.shape().getType().cast<MemRefType>().getDimSize(0);
1420   auto resultMemRefType = resultType.dyn_cast<MemRefType>();
1421   if (resultMemRefType) {
1422     if (!resultMemRefType.getLayout().isIdentity())
1423       return op.emitOpError(
1424           "result memref type should have identity affine map");
1425     if (shapeSize == ShapedType::kDynamicSize)
1426       return op.emitOpError("cannot use shape operand with dynamic length to "
1427                             "reshape to statically-ranked memref type");
1428     if (shapeSize != resultMemRefType.getRank())
1429       return op.emitOpError(
1430           "length of shape operand differs from the result's memref rank");
1431   }
1432   return success();
1433 }
1434 
1435 //===----------------------------------------------------------------------===//
1436 // StoreOp
1437 //===----------------------------------------------------------------------===//
1438 
1439 static LogicalResult verify(StoreOp op) {
1440   if (op.getNumOperands() != 2 + op.getMemRefType().getRank())
1441     return op.emitOpError("store index operand count not equal to memref rank");
1442 
1443   return success();
1444 }
1445 
1446 LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands,
1447                             SmallVectorImpl<OpFoldResult> &results) {
1448   /// store(memrefcast) -> store
1449   return foldMemRefCast(*this, getValueToStore());
1450 }
1451 
1452 //===----------------------------------------------------------------------===//
1453 // SubViewOp
1454 //===----------------------------------------------------------------------===//
1455 
1456 namespace {
1457 /// Helpers to write more idiomatic operations.
1458 namespace saturated_arith {
1459 struct Wrapper {
1460   explicit Wrapper(int64_t v) : v(v) {}
1461   operator int64_t() { return v; }
1462   int64_t v;
1463 };
1464 Wrapper operator+(Wrapper a, int64_t b) {
1465   if (ShapedType::isDynamicStrideOrOffset(a) ||
1466       ShapedType::isDynamicStrideOrOffset(b))
1467     return Wrapper(ShapedType::kDynamicStrideOrOffset);
1468   return Wrapper(a.v + b);
1469 }
1470 Wrapper operator*(Wrapper a, int64_t b) {
1471   if (ShapedType::isDynamicStrideOrOffset(a) ||
1472       ShapedType::isDynamicStrideOrOffset(b))
1473     return Wrapper(ShapedType::kDynamicStrideOrOffset);
1474   return Wrapper(a.v * b);
1475 }
1476 } // end namespace saturated_arith
1477 } // end namespace
1478 
1479 /// A subview result type can be fully inferred from the source type and the
1480 /// static representation of offsets, sizes and strides. Special sentinels
1481 /// encode the dynamic case.
1482 Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
1483                                 ArrayRef<int64_t> leadingStaticOffsets,
1484                                 ArrayRef<int64_t> leadingStaticSizes,
1485                                 ArrayRef<int64_t> leadingStaticStrides) {
1486   // A subview may specify only a leading subset of offset/sizes/strides in
1487   // which case we complete with offset=0, sizes from memref type and strides=1.
1488   unsigned rank = sourceMemRefType.getRank();
1489   assert(leadingStaticOffsets.size() <= rank &&
1490          "unexpected leadingStaticOffsets overflow");
1491   assert(leadingStaticSizes.size() <= rank &&
1492          "unexpected leadingStaticSizes overflow");
1493   assert(leadingStaticStrides.size() <= rank &&
1494          "unexpected leadingStaticStrides overflow");
1495   auto staticOffsets = llvm::to_vector<4>(leadingStaticOffsets);
1496   auto staticSizes = llvm::to_vector<4>(leadingStaticSizes);
1497   auto staticStrides = llvm::to_vector<4>(leadingStaticStrides);
1498   unsigned numTrailingOffsets = rank - staticOffsets.size();
1499   unsigned numTrailingSizes = rank - staticSizes.size();
1500   unsigned numTrailingStrides = rank - staticStrides.size();
1501   staticOffsets.append(numTrailingOffsets, 0);
1502   llvm::append_range(staticSizes,
1503                      sourceMemRefType.getShape().take_back(numTrailingSizes));
1504   staticStrides.append(numTrailingStrides, 1);
1505 
1506   // Extract source offset and strides.
1507   int64_t sourceOffset;
1508   SmallVector<int64_t, 4> sourceStrides;
1509   auto res = getStridesAndOffset(sourceMemRefType, sourceStrides, sourceOffset);
1510   assert(succeeded(res) && "SubViewOp expected strided memref type");
1511   (void)res;
1512 
1513   // Compute target offset whose value is:
1514   //   `sourceOffset + sum_i(staticOffset_i * sourceStrides_i)`.
1515   int64_t targetOffset = sourceOffset;
1516   for (auto it : llvm::zip(staticOffsets, sourceStrides)) {
1517     auto staticOffset = std::get<0>(it), targetStride = std::get<1>(it);
1518     using namespace saturated_arith;
1519     targetOffset = Wrapper(targetOffset) + Wrapper(staticOffset) * targetStride;
1520   }
1521 
1522   // Compute target stride whose value is:
1523   //   `sourceStrides_i * staticStrides_i`.
1524   SmallVector<int64_t, 4> targetStrides;
1525   targetStrides.reserve(staticOffsets.size());
1526   for (auto it : llvm::zip(sourceStrides, staticStrides)) {
1527     auto sourceStride = std::get<0>(it), staticStride = std::get<1>(it);
1528     using namespace saturated_arith;
1529     targetStrides.push_back(Wrapper(sourceStride) * staticStride);
1530   }
1531 
1532   // The type is now known.
1533   return MemRefType::get(
1534       staticSizes, sourceMemRefType.getElementType(),
1535       makeStridedLinearLayoutMap(targetStrides, targetOffset,
1536                                  sourceMemRefType.getContext()),
1537       sourceMemRefType.getMemorySpace());
1538 }
1539 
1540 Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
1541                                 ArrayRef<OpFoldResult> leadingStaticOffsets,
1542                                 ArrayRef<OpFoldResult> leadingStaticSizes,
1543                                 ArrayRef<OpFoldResult> leadingStaticStrides) {
1544   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1545   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1546   dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets,
1547                              staticOffsets, ShapedType::kDynamicStrideOrOffset);
1548   dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes,
1549                              ShapedType::kDynamicSize);
1550   dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides,
1551                              staticStrides, ShapedType::kDynamicStrideOrOffset);
1552   return SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
1553                                     staticSizes, staticStrides);
1554 }
1555 
1556 Type SubViewOp::inferRankReducedResultType(
1557     unsigned resultRank, MemRefType sourceRankedTensorType,
1558     ArrayRef<int64_t> leadingStaticOffsets,
1559     ArrayRef<int64_t> leadingStaticSizes,
1560     ArrayRef<int64_t> leadingStaticStrides) {
1561   auto inferredType =
1562       inferResultType(sourceRankedTensorType, leadingStaticOffsets,
1563                       leadingStaticSizes, leadingStaticStrides)
1564           .cast<MemRefType>();
1565   assert(inferredType.getRank() >= resultRank && "expected ");
1566   int rankDiff = inferredType.getRank() - resultRank;
1567   if (rankDiff > 0) {
1568     auto shape = inferredType.getShape();
1569     llvm::SmallDenseSet<unsigned> dimsToProject;
1570     mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject);
1571     SmallVector<int64_t> projectedShape;
1572     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
1573       if (!dimsToProject.contains(pos))
1574         projectedShape.push_back(shape[pos]);
1575 
1576     AffineMap map = inferredType.getLayout().getAffineMap();
1577     if (!map.isIdentity())
1578       map = getProjectedMap(map, dimsToProject);
1579     inferredType =
1580         MemRefType::get(projectedShape, inferredType.getElementType(), map,
1581                         inferredType.getMemorySpace());
1582   }
1583   return inferredType;
1584 }
1585 
1586 Type SubViewOp::inferRankReducedResultType(
1587     unsigned resultRank, MemRefType sourceRankedTensorType,
1588     ArrayRef<OpFoldResult> leadingStaticOffsets,
1589     ArrayRef<OpFoldResult> leadingStaticSizes,
1590     ArrayRef<OpFoldResult> leadingStaticStrides) {
1591   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1592   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1593   dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets,
1594                              staticOffsets, ShapedType::kDynamicStrideOrOffset);
1595   dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes,
1596                              ShapedType::kDynamicSize);
1597   dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides,
1598                              staticStrides, ShapedType::kDynamicStrideOrOffset);
1599   return SubViewOp::inferRankReducedResultType(
1600       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
1601       staticStrides);
1602 }
1603 // Build a SubViewOp with mixed static and dynamic entries and custom result
1604 // type. If the type passed is nullptr, it is inferred.
1605 void SubViewOp::build(OpBuilder &b, OperationState &result,
1606                       MemRefType resultType, Value source,
1607                       ArrayRef<OpFoldResult> offsets,
1608                       ArrayRef<OpFoldResult> sizes,
1609                       ArrayRef<OpFoldResult> strides,
1610                       ArrayRef<NamedAttribute> attrs) {
1611   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1612   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1613   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1614                              ShapedType::kDynamicStrideOrOffset);
1615   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1616                              ShapedType::kDynamicSize);
1617   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1618                              ShapedType::kDynamicStrideOrOffset);
1619   auto sourceMemRefType = source.getType().cast<MemRefType>();
1620   // Structuring implementation this way avoids duplication between builders.
1621   if (!resultType) {
1622     resultType = SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
1623                                             staticSizes, staticStrides)
1624                      .cast<MemRefType>();
1625   }
1626   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1627         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1628         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1629   result.addAttributes(attrs);
1630 }
1631 
1632 // Build a SubViewOp with mixed static and dynamic entries and inferred result
1633 // type.
1634 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1635                       ArrayRef<OpFoldResult> offsets,
1636                       ArrayRef<OpFoldResult> sizes,
1637                       ArrayRef<OpFoldResult> strides,
1638                       ArrayRef<NamedAttribute> attrs) {
1639   build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
1640 }
1641 
1642 // Build a SubViewOp with static entries and inferred result type.
1643 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1644                       ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1645                       ArrayRef<int64_t> strides,
1646                       ArrayRef<NamedAttribute> attrs) {
1647   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1648       llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
1649         return b.getI64IntegerAttr(v);
1650       }));
1651   SmallVector<OpFoldResult> sizeValues =
1652       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1653         return b.getI64IntegerAttr(v);
1654       }));
1655   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1656       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1657         return b.getI64IntegerAttr(v);
1658       }));
1659   build(b, result, source, offsetValues, sizeValues, strideValues, attrs);
1660 }
1661 
1662 // Build a SubViewOp with dynamic entries and custom result type. If the
1663 // type passed is nullptr, it is inferred.
1664 void SubViewOp::build(OpBuilder &b, OperationState &result,
1665                       MemRefType resultType, Value source,
1666                       ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1667                       ArrayRef<int64_t> strides,
1668                       ArrayRef<NamedAttribute> attrs) {
1669   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1670       llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
1671         return b.getI64IntegerAttr(v);
1672       }));
1673   SmallVector<OpFoldResult> sizeValues =
1674       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1675         return b.getI64IntegerAttr(v);
1676       }));
1677   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1678       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1679         return b.getI64IntegerAttr(v);
1680       }));
1681   build(b, result, resultType, source, offsetValues, sizeValues, strideValues,
1682         attrs);
1683 }
1684 
1685 // Build a SubViewOp with dynamic entries and custom result type. If the type
1686 // passed is nullptr, it is inferred.
1687 void SubViewOp::build(OpBuilder &b, OperationState &result,
1688                       MemRefType resultType, Value source, ValueRange offsets,
1689                       ValueRange sizes, ValueRange strides,
1690                       ArrayRef<NamedAttribute> attrs) {
1691   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1692       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1693   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1694       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1695   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1696       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1697   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1698 }
1699 
1700 // Build a SubViewOp with dynamic entries and inferred result type.
1701 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1702                       ValueRange offsets, ValueRange sizes, ValueRange strides,
1703                       ArrayRef<NamedAttribute> attrs) {
1704   build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
1705 }
1706 
1707 /// For ViewLikeOpInterface.
1708 Value SubViewOp::getViewSource() { return source(); }
1709 
1710 /// Checks if `original` Type type can be rank reduced to `reduced` type.
1711 /// This function is slight variant of `is subsequence` algorithm where
1712 /// not matching dimension must be 1.
1713 static SliceVerificationResult
1714 isRankReducedMemRefType(MemRefType originalType,
1715                         MemRefType candidatecandidateReducedType,
1716                         ArrayRef<OpFoldResult> sizes) {
1717   auto partialRes =
1718       isRankReducedType(originalType, candidatecandidateReducedType);
1719   if (partialRes != SliceVerificationResult::Success)
1720     return partialRes;
1721 
1722   MemRefType original = originalType.cast<MemRefType>();
1723   MemRefType candidateReduced =
1724       candidatecandidateReducedType.cast<MemRefType>();
1725 
1726   auto optionalUnusedDimsMask =
1727       computeMemRefRankReductionMask(original, candidateReduced, sizes);
1728 
1729   // Sizes cannot be matched in case empty vector is returned.
1730   if (!optionalUnusedDimsMask.hasValue())
1731     return SliceVerificationResult::LayoutMismatch;
1732 
1733   if (original.getMemorySpace() != candidateReduced.getMemorySpace())
1734     return SliceVerificationResult::MemSpaceMismatch;
1735 
1736   return SliceVerificationResult::Success;
1737 }
1738 
1739 template <typename OpTy>
1740 static LogicalResult produceSubViewErrorMsg(SliceVerificationResult result,
1741                                             OpTy op, Type expectedType) {
1742   auto memrefType = expectedType.cast<ShapedType>();
1743   switch (result) {
1744   case SliceVerificationResult::Success:
1745     return success();
1746   case SliceVerificationResult::RankTooLarge:
1747     return op.emitError("expected result rank to be smaller or equal to ")
1748            << "the source rank. ";
1749   case SliceVerificationResult::SizeMismatch:
1750     return op.emitError("expected result type to be ")
1751            << expectedType
1752            << " or a rank-reduced version. (mismatch of result sizes) ";
1753   case SliceVerificationResult::ElemTypeMismatch:
1754     return op.emitError("expected result element type to be ")
1755            << memrefType.getElementType();
1756   case SliceVerificationResult::MemSpaceMismatch:
1757     return op.emitError("expected result and source memory spaces to match.");
1758   case SliceVerificationResult::LayoutMismatch:
1759     return op.emitError("expected result type to be ")
1760            << expectedType
1761            << " or a rank-reduced version. (mismatch of result layout) ";
1762   }
1763   llvm_unreachable("unexpected subview verification result");
1764 }
1765 
1766 /// Verifier for SubViewOp.
1767 static LogicalResult verify(SubViewOp op) {
1768   MemRefType baseType = op.getSourceType();
1769   MemRefType subViewType = op.getType();
1770 
1771   // The base memref and the view memref should be in the same memory space.
1772   if (baseType.getMemorySpace() != subViewType.getMemorySpace())
1773     return op.emitError("different memory spaces specified for base memref "
1774                         "type ")
1775            << baseType << " and subview memref type " << subViewType;
1776 
1777   // Verify that the base memref type has a strided layout map.
1778   if (!isStrided(baseType))
1779     return op.emitError("base type ") << baseType << " is not strided";
1780 
1781   // Verify result type against inferred type.
1782   auto expectedType = SubViewOp::inferResultType(
1783       baseType, extractFromI64ArrayAttr(op.static_offsets()),
1784       extractFromI64ArrayAttr(op.static_sizes()),
1785       extractFromI64ArrayAttr(op.static_strides()));
1786 
1787   auto result = isRankReducedMemRefType(expectedType.cast<MemRefType>(),
1788                                         subViewType, op.getMixedSizes());
1789   return produceSubViewErrorMsg(result, op, expectedType);
1790 }
1791 
1792 raw_ostream &mlir::operator<<(raw_ostream &os, const Range &range) {
1793   return os << "range " << range.offset << ":" << range.size << ":"
1794             << range.stride;
1795 }
1796 
1797 /// Return the list of Range (i.e. offset, size, stride). Each Range
1798 /// entry contains either the dynamic value or a ConstantIndexOp constructed
1799 /// with `b` at location `loc`.
1800 SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op,
1801                                               OpBuilder &b, Location loc) {
1802   std::array<unsigned, 3> ranks = op.getArrayAttrMaxRanks();
1803   assert(ranks[0] == ranks[1] && "expected offset and sizes of equal ranks");
1804   assert(ranks[1] == ranks[2] && "expected sizes and strides of equal ranks");
1805   SmallVector<Range, 8> res;
1806   unsigned rank = ranks[0];
1807   res.reserve(rank);
1808   for (unsigned idx = 0; idx < rank; ++idx) {
1809     Value offset =
1810         op.isDynamicOffset(idx)
1811             ? op.getDynamicOffset(idx)
1812             : b.create<arith::ConstantIndexOp>(loc, op.getStaticOffset(idx));
1813     Value size =
1814         op.isDynamicSize(idx)
1815             ? op.getDynamicSize(idx)
1816             : b.create<arith::ConstantIndexOp>(loc, op.getStaticSize(idx));
1817     Value stride =
1818         op.isDynamicStride(idx)
1819             ? op.getDynamicStride(idx)
1820             : b.create<arith::ConstantIndexOp>(loc, op.getStaticStride(idx));
1821     res.emplace_back(Range{offset, size, stride});
1822   }
1823   return res;
1824 }
1825 
1826 /// Infer the canonical type of the result of a subview operation. Returns a
1827 /// type with rank `resultRank` that is either the rank of the rank-reduced
1828 /// type, or the non-rank-reduced type.
1829 static MemRefType getCanonicalSubViewResultType(
1830     MemRefType currentResultType, MemRefType sourceType,
1831     ArrayRef<OpFoldResult> mixedOffsets, ArrayRef<OpFoldResult> mixedSizes,
1832     ArrayRef<OpFoldResult> mixedStrides) {
1833   auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets,
1834                                                        mixedSizes, mixedStrides)
1835                                 .cast<MemRefType>();
1836   llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims =
1837       computeMemRefRankReductionMask(sourceType, currentResultType, mixedSizes);
1838   // Return nullptr as failure mode.
1839   if (!unusedDims)
1840     return nullptr;
1841   SmallVector<int64_t> shape;
1842   for (auto sizes : llvm::enumerate(nonRankReducedType.getShape())) {
1843     if (unusedDims->count(sizes.index()))
1844       continue;
1845     shape.push_back(sizes.value());
1846   }
1847   AffineMap layoutMap = nonRankReducedType.getLayout().getAffineMap();
1848   if (!layoutMap.isIdentity())
1849     layoutMap = getProjectedMap(layoutMap, unusedDims.getValue());
1850   return MemRefType::get(shape, nonRankReducedType.getElementType(), layoutMap,
1851                          nonRankReducedType.getMemorySpace());
1852 }
1853 
1854 namespace {
1855 /// Pattern to rewrite a subview op with MemRefCast arguments.
1856 /// This essentially pushes memref.cast past its consuming subview when
1857 /// `canFoldIntoConsumerOp` is true.
1858 ///
1859 /// Example:
1860 /// ```
1861 ///   %0 = memref.cast %V : memref<16x16xf32> to memref<?x?xf32>
1862 ///   %1 = memref.subview %0[0, 0][3, 4][1, 1] :
1863 ///     memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]>
1864 /// ```
1865 /// is rewritten into:
1866 /// ```
1867 ///   %0 = memref.subview %V: memref<16x16xf32> to memref<3x4xf32, #[[map0]]>
1868 ///   %1 = memref.cast %0: memref<3x4xf32, offset:0, strides:[16, 1]> to
1869 ///     memref<3x4xf32, offset:?, strides:[?, 1]>
1870 /// ```
1871 class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> {
1872 public:
1873   using OpRewritePattern<SubViewOp>::OpRewritePattern;
1874 
1875   LogicalResult matchAndRewrite(SubViewOp subViewOp,
1876                                 PatternRewriter &rewriter) const override {
1877     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1878     if (llvm::any_of(subViewOp.getOperands(), [](Value operand) {
1879           return matchPattern(operand, matchConstantIndex());
1880         }))
1881       return failure();
1882 
1883     auto castOp = subViewOp.source().getDefiningOp<CastOp>();
1884     if (!castOp)
1885       return failure();
1886 
1887     if (!CastOp::canFoldIntoConsumerOp(castOp))
1888       return failure();
1889 
1890     /// Deduce the resultType of the SubViewOp using `inferSubViewResultType` on
1891     /// the cast source operand type and the SubViewOp static information. This
1892     /// is the resulting type if the MemRefCastOp were folded.
1893     auto resultType = getCanonicalSubViewResultType(
1894         subViewOp.getType(), castOp.source().getType().cast<MemRefType>(),
1895         subViewOp.getMixedOffsets(), subViewOp.getMixedSizes(),
1896         subViewOp.getMixedStrides());
1897     Value newSubView = rewriter.create<SubViewOp>(
1898         subViewOp.getLoc(), resultType, castOp.source(), subViewOp.offsets(),
1899         subViewOp.sizes(), subViewOp.strides(), subViewOp.static_offsets(),
1900         subViewOp.static_sizes(), subViewOp.static_strides());
1901     rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(),
1902                                         newSubView);
1903     return success();
1904   }
1905 };
1906 } // namespace
1907 
1908 /// Return the canonical type of the result of a subview.
1909 struct SubViewReturnTypeCanonicalizer {
1910   MemRefType operator()(SubViewOp op, ArrayRef<OpFoldResult> mixedOffsets,
1911                         ArrayRef<OpFoldResult> mixedSizes,
1912                         ArrayRef<OpFoldResult> mixedStrides) {
1913     return getCanonicalSubViewResultType(op.getType(), op.getSourceType(),
1914                                          mixedOffsets, mixedSizes,
1915                                          mixedStrides);
1916   }
1917 };
1918 
1919 /// A canonicalizer wrapper to replace SubViewOps.
1920 struct SubViewCanonicalizer {
1921   void operator()(PatternRewriter &rewriter, SubViewOp op, SubViewOp newOp) {
1922     rewriter.replaceOpWithNewOp<CastOp>(op, newOp, op.getType());
1923   }
1924 };
1925 
1926 void SubViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
1927                                             MLIRContext *context) {
1928   results
1929       .add<OpWithOffsetSizesAndStridesConstantArgumentFolder<
1930                SubViewOp, SubViewReturnTypeCanonicalizer, SubViewCanonicalizer>,
1931            SubViewOpMemRefCastFolder>(context);
1932 }
1933 
1934 OpFoldResult SubViewOp::fold(ArrayRef<Attribute> operands) {
1935   auto resultShapedType = getResult().getType().cast<ShapedType>();
1936   auto sourceShapedType = source().getType().cast<ShapedType>();
1937 
1938   if (resultShapedType.hasStaticShape() &&
1939       resultShapedType == sourceShapedType) {
1940     return getViewSource();
1941   }
1942 
1943   return {};
1944 }
1945 
1946 //===----------------------------------------------------------------------===//
1947 // TransposeOp
1948 //===----------------------------------------------------------------------===//
1949 
1950 /// Build a strided memref type by applying `permutationMap` tp `memRefType`.
1951 static MemRefType inferTransposeResultType(MemRefType memRefType,
1952                                            AffineMap permutationMap) {
1953   auto rank = memRefType.getRank();
1954   auto originalSizes = memRefType.getShape();
1955   // Compute permuted sizes.
1956   SmallVector<int64_t, 4> sizes(rank, 0);
1957   for (auto en : llvm::enumerate(permutationMap.getResults()))
1958     sizes[en.index()] =
1959         originalSizes[en.value().cast<AffineDimExpr>().getPosition()];
1960 
1961   // Compute permuted strides.
1962   int64_t offset;
1963   SmallVector<int64_t, 4> strides;
1964   auto res = getStridesAndOffset(memRefType, strides, offset);
1965   assert(succeeded(res) && strides.size() == static_cast<unsigned>(rank));
1966   (void)res;
1967   auto map =
1968       makeStridedLinearLayoutMap(strides, offset, memRefType.getContext());
1969   map = permutationMap ? map.compose(permutationMap) : map;
1970   return MemRefType::Builder(memRefType)
1971       .setShape(sizes)
1972       .setLayout(AffineMapAttr::get(map));
1973 }
1974 
1975 void TransposeOp::build(OpBuilder &b, OperationState &result, Value in,
1976                         AffineMapAttr permutation,
1977                         ArrayRef<NamedAttribute> attrs) {
1978   auto permutationMap = permutation.getValue();
1979   assert(permutationMap);
1980 
1981   auto memRefType = in.getType().cast<MemRefType>();
1982   // Compute result type.
1983   MemRefType resultType = inferTransposeResultType(memRefType, permutationMap);
1984 
1985   build(b, result, resultType, in, attrs);
1986   result.addAttribute(TransposeOp::getPermutationAttrName(), permutation);
1987 }
1988 
1989 // transpose $in $permutation attr-dict : type($in) `to` type(results)
1990 static void print(OpAsmPrinter &p, TransposeOp op) {
1991   p << " " << op.in() << " " << op.permutation();
1992   p.printOptionalAttrDict(op->getAttrs(),
1993                           {TransposeOp::getPermutationAttrName()});
1994   p << " : " << op.in().getType() << " to " << op.getType();
1995 }
1996 
1997 static ParseResult parseTransposeOp(OpAsmParser &parser,
1998                                     OperationState &result) {
1999   OpAsmParser::OperandType in;
2000   AffineMap permutation;
2001   MemRefType srcType, dstType;
2002   if (parser.parseOperand(in) || parser.parseAffineMap(permutation) ||
2003       parser.parseOptionalAttrDict(result.attributes) ||
2004       parser.parseColonType(srcType) ||
2005       parser.resolveOperand(in, srcType, result.operands) ||
2006       parser.parseKeywordType("to", dstType) ||
2007       parser.addTypeToList(dstType, result.types))
2008     return failure();
2009 
2010   result.addAttribute(TransposeOp::getPermutationAttrName(),
2011                       AffineMapAttr::get(permutation));
2012   return success();
2013 }
2014 
2015 static LogicalResult verify(TransposeOp op) {
2016   if (!op.permutation().isPermutation())
2017     return op.emitOpError("expected a permutation map");
2018   if (op.permutation().getNumDims() != op.getShapedType().getRank())
2019     return op.emitOpError(
2020         "expected a permutation map of same rank as the input");
2021 
2022   auto srcType = op.in().getType().cast<MemRefType>();
2023   auto dstType = op.getType().cast<MemRefType>();
2024   auto transposedType = inferTransposeResultType(srcType, op.permutation());
2025   if (dstType != transposedType)
2026     return op.emitOpError("output type ")
2027            << dstType << " does not match transposed input type " << srcType
2028            << ", " << transposedType;
2029   return success();
2030 }
2031 
2032 OpFoldResult TransposeOp::fold(ArrayRef<Attribute>) {
2033   if (succeeded(foldMemRefCast(*this)))
2034     return getResult();
2035   return {};
2036 }
2037 
2038 //===----------------------------------------------------------------------===//
2039 // ViewOp
2040 //===----------------------------------------------------------------------===//
2041 
2042 static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) {
2043   OpAsmParser::OperandType srcInfo;
2044   SmallVector<OpAsmParser::OperandType, 1> offsetInfo;
2045   SmallVector<OpAsmParser::OperandType, 4> sizesInfo;
2046   auto indexType = parser.getBuilder().getIndexType();
2047   Type srcType, dstType;
2048   llvm::SMLoc offsetLoc;
2049   if (parser.parseOperand(srcInfo) || parser.getCurrentLocation(&offsetLoc) ||
2050       parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square))
2051     return failure();
2052 
2053   if (offsetInfo.size() != 1)
2054     return parser.emitError(offsetLoc) << "expects 1 offset operand";
2055 
2056   return failure(
2057       parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) ||
2058       parser.parseOptionalAttrDict(result.attributes) ||
2059       parser.parseColonType(srcType) ||
2060       parser.resolveOperand(srcInfo, srcType, result.operands) ||
2061       parser.resolveOperands(offsetInfo, indexType, result.operands) ||
2062       parser.resolveOperands(sizesInfo, indexType, result.operands) ||
2063       parser.parseKeywordType("to", dstType) ||
2064       parser.addTypeToList(dstType, result.types));
2065 }
2066 
2067 static void print(OpAsmPrinter &p, ViewOp op) {
2068   p << ' ' << op.getOperand(0) << '[';
2069   p.printOperand(op.byte_shift());
2070   p << "][" << op.sizes() << ']';
2071   p.printOptionalAttrDict(op->getAttrs());
2072   p << " : " << op.getOperand(0).getType() << " to " << op.getType();
2073 }
2074 
2075 static LogicalResult verify(ViewOp op) {
2076   auto baseType = op.getOperand(0).getType().cast<MemRefType>();
2077   auto viewType = op.getType();
2078 
2079   // The base memref should have identity layout map (or none).
2080   if (!baseType.getLayout().isIdentity())
2081     return op.emitError("unsupported map for base memref type ") << baseType;
2082 
2083   // The result memref should have identity layout map (or none).
2084   if (!viewType.getLayout().isIdentity())
2085     return op.emitError("unsupported map for result memref type ") << viewType;
2086 
2087   // The base memref and the view memref should be in the same memory space.
2088   if (baseType.getMemorySpace() != viewType.getMemorySpace())
2089     return op.emitError("different memory spaces specified for base memref "
2090                         "type ")
2091            << baseType << " and view memref type " << viewType;
2092 
2093   // Verify that we have the correct number of sizes for the result type.
2094   unsigned numDynamicDims = viewType.getNumDynamicDims();
2095   if (op.sizes().size() != numDynamicDims)
2096     return op.emitError("incorrect number of size operands for type ")
2097            << viewType;
2098 
2099   return success();
2100 }
2101 
2102 Value ViewOp::getViewSource() { return source(); }
2103 
2104 namespace {
2105 
2106 struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
2107   using OpRewritePattern<ViewOp>::OpRewritePattern;
2108 
2109   LogicalResult matchAndRewrite(ViewOp viewOp,
2110                                 PatternRewriter &rewriter) const override {
2111     // Return if none of the operands are constants.
2112     if (llvm::none_of(viewOp.getOperands(), [](Value operand) {
2113           return matchPattern(operand, matchConstantIndex());
2114         }))
2115       return failure();
2116 
2117     // Get result memref type.
2118     auto memrefType = viewOp.getType();
2119 
2120     // Get offset from old memref view type 'memRefType'.
2121     int64_t oldOffset;
2122     SmallVector<int64_t, 4> oldStrides;
2123     if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
2124       return failure();
2125     assert(oldOffset == 0 && "Expected 0 offset");
2126 
2127     SmallVector<Value, 4> newOperands;
2128 
2129     // Offset cannot be folded into result type.
2130 
2131     // Fold any dynamic dim operands which are produced by a constant.
2132     SmallVector<int64_t, 4> newShapeConstants;
2133     newShapeConstants.reserve(memrefType.getRank());
2134 
2135     unsigned dynamicDimPos = 0;
2136     unsigned rank = memrefType.getRank();
2137     for (unsigned dim = 0, e = rank; dim < e; ++dim) {
2138       int64_t dimSize = memrefType.getDimSize(dim);
2139       // If this is already static dimension, keep it.
2140       if (!ShapedType::isDynamic(dimSize)) {
2141         newShapeConstants.push_back(dimSize);
2142         continue;
2143       }
2144       auto *defOp = viewOp.sizes()[dynamicDimPos].getDefiningOp();
2145       if (auto constantIndexOp =
2146               dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) {
2147         // Dynamic shape dimension will be folded.
2148         newShapeConstants.push_back(constantIndexOp.value());
2149       } else {
2150         // Dynamic shape dimension not folded; copy operand from old memref.
2151         newShapeConstants.push_back(dimSize);
2152         newOperands.push_back(viewOp.sizes()[dynamicDimPos]);
2153       }
2154       dynamicDimPos++;
2155     }
2156 
2157     // Create new memref type with constant folded dims.
2158     MemRefType newMemRefType =
2159         MemRefType::Builder(memrefType).setShape(newShapeConstants);
2160     // Nothing new, don't fold.
2161     if (newMemRefType == memrefType)
2162       return failure();
2163 
2164     // Create new ViewOp.
2165     auto newViewOp = rewriter.create<ViewOp>(viewOp.getLoc(), newMemRefType,
2166                                              viewOp.getOperand(0),
2167                                              viewOp.byte_shift(), newOperands);
2168     // Insert a cast so we have the same type as the old memref type.
2169     rewriter.replaceOpWithNewOp<CastOp>(viewOp, newViewOp, viewOp.getType());
2170     return success();
2171   }
2172 };
2173 
2174 struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> {
2175   using OpRewritePattern<ViewOp>::OpRewritePattern;
2176 
2177   LogicalResult matchAndRewrite(ViewOp viewOp,
2178                                 PatternRewriter &rewriter) const override {
2179     Value memrefOperand = viewOp.getOperand(0);
2180     CastOp memrefCastOp = memrefOperand.getDefiningOp<CastOp>();
2181     if (!memrefCastOp)
2182       return failure();
2183     Value allocOperand = memrefCastOp.getOperand();
2184     AllocOp allocOp = allocOperand.getDefiningOp<AllocOp>();
2185     if (!allocOp)
2186       return failure();
2187     rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand,
2188                                         viewOp.byte_shift(), viewOp.sizes());
2189     return success();
2190   }
2191 };
2192 
2193 } // end anonymous namespace
2194 
2195 void ViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
2196                                          MLIRContext *context) {
2197   results.add<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context);
2198 }
2199 
2200 //===----------------------------------------------------------------------===//
2201 // TableGen'd op method definitions
2202 //===----------------------------------------------------------------------===//
2203 
2204 #define GET_OP_CLASSES
2205 #include "mlir/Dialect/MemRef/IR/MemRefOps.cpp.inc"
2206