1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/MemRef/IR/MemRef.h"
11 #include "mlir/Dialect/MemRef/Utils/MemRefUtils.h"
12 #include "mlir/Dialect/StandardOps/IR/Ops.h"
13 #include "mlir/Dialect/StandardOps/Utils/Utils.h"
14 #include "mlir/Dialect/Utils/StaticValueUtils.h"
15 #include "mlir/IR/AffineMap.h"
16 #include "mlir/IR/Builders.h"
17 #include "mlir/IR/BuiltinTypes.h"
18 #include "mlir/IR/Matchers.h"
19 #include "mlir/IR/PatternMatch.h"
20 #include "mlir/IR/TypeUtilities.h"
21 #include "mlir/Interfaces/InferTypeOpInterface.h"
22 #include "mlir/Interfaces/ViewLikeInterface.h"
23 #include "llvm/ADT/STLExtras.h"
24 
25 using namespace mlir;
26 using namespace mlir::memref;
27 
28 /// Materialize a single constant operation from a given attribute value with
29 /// the desired resultant type.
30 Operation *MemRefDialect::materializeConstant(OpBuilder &builder,
31                                               Attribute value, Type type,
32                                               Location loc) {
33   if (arith::ConstantOp::isBuildableWith(value, type))
34     return builder.create<arith::ConstantOp>(loc, value, type);
35   if (ConstantOp::isBuildableWith(value, type))
36     return builder.create<ConstantOp>(loc, value, type);
37   return nullptr;
38 }
39 
40 //===----------------------------------------------------------------------===//
41 // Common canonicalization pattern support logic
42 //===----------------------------------------------------------------------===//
43 
44 /// This is a common class used for patterns of the form
45 /// "someop(memrefcast) -> someop".  It folds the source of any memref.cast
46 /// into the root operation directly.
47 LogicalResult mlir::memref::foldMemRefCast(Operation *op, Value inner) {
48   bool folded = false;
49   for (OpOperand &operand : op->getOpOperands()) {
50     auto cast = operand.get().getDefiningOp<CastOp>();
51     if (cast && operand.get() != inner &&
52         !cast.getOperand().getType().isa<UnrankedMemRefType>()) {
53       operand.set(cast.getOperand());
54       folded = true;
55     }
56   }
57   return success(folded);
58 }
59 
60 /// Return an unranked/ranked tensor type for the given unranked/ranked memref
61 /// type.
62 Type mlir::memref::getTensorTypeFromMemRefType(Type type) {
63   if (auto memref = type.dyn_cast<MemRefType>())
64     return RankedTensorType::get(memref.getShape(), memref.getElementType());
65   if (auto memref = type.dyn_cast<UnrankedMemRefType>())
66     return UnrankedTensorType::get(memref.getElementType());
67   return NoneType::get(type.getContext());
68 }
69 
70 //===----------------------------------------------------------------------===//
71 // AllocOp / AllocaOp
72 //===----------------------------------------------------------------------===//
73 
74 template <typename AllocLikeOp>
75 static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
76   static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value,
77                 "applies to only alloc or alloca");
78   auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>();
79   if (!memRefType)
80     return op.emitOpError("result must be a memref");
81 
82   if (static_cast<int64_t>(op.dynamicSizes().size()) !=
83       memRefType.getNumDynamicDims())
84     return op.emitOpError("dimension operand count does not equal memref "
85                           "dynamic dimension count");
86 
87   unsigned numSymbols = 0;
88   if (!memRefType.getLayout().isIdentity())
89     numSymbols = memRefType.getLayout().getAffineMap().getNumSymbols();
90   if (op.symbolOperands().size() != numSymbols)
91     return op.emitOpError("symbol operand count does not equal memref symbol "
92                           "count: expected ")
93            << numSymbols << ", got " << op.symbolOperands().size();
94 
95   return success();
96 }
97 
98 static LogicalResult verify(AllocOp op) { return verifyAllocLikeOp(op); }
99 
100 static LogicalResult verify(AllocaOp op) {
101   // An alloca op needs to have an ancestor with an allocation scope trait.
102   if (!op->getParentWithTrait<OpTrait::AutomaticAllocationScope>())
103     return op.emitOpError(
104         "requires an ancestor op with AutomaticAllocationScope trait");
105 
106   return verifyAllocLikeOp(op);
107 }
108 
109 namespace {
110 /// Fold constant dimensions into an alloc like operation.
111 template <typename AllocLikeOp>
112 struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
113   using OpRewritePattern<AllocLikeOp>::OpRewritePattern;
114 
115   LogicalResult matchAndRewrite(AllocLikeOp alloc,
116                                 PatternRewriter &rewriter) const override {
117     // Check to see if any dimensions operands are constants.  If so, we can
118     // substitute and drop them.
119     if (llvm::none_of(alloc.dynamicSizes(), [](Value operand) {
120           return matchPattern(operand, matchConstantIndex());
121         }))
122       return failure();
123 
124     auto memrefType = alloc.getType();
125 
126     // Ok, we have one or more constant operands.  Collect the non-constant ones
127     // and keep track of the resultant memref type to build.
128     SmallVector<int64_t, 4> newShapeConstants;
129     newShapeConstants.reserve(memrefType.getRank());
130     SmallVector<Value, 4> dynamicSizes;
131 
132     unsigned dynamicDimPos = 0;
133     for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
134       int64_t dimSize = memrefType.getDimSize(dim);
135       // If this is already static dimension, keep it.
136       if (dimSize != -1) {
137         newShapeConstants.push_back(dimSize);
138         continue;
139       }
140       auto dynamicSize = alloc.dynamicSizes()[dynamicDimPos];
141       auto *defOp = dynamicSize.getDefiningOp();
142       if (auto constantIndexOp =
143               dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) {
144         // Dynamic shape dimension will be folded.
145         newShapeConstants.push_back(constantIndexOp.value());
146       } else {
147         // Dynamic shape dimension not folded; copy dynamicSize from old memref.
148         newShapeConstants.push_back(-1);
149         dynamicSizes.push_back(dynamicSize);
150       }
151       dynamicDimPos++;
152     }
153 
154     // Create new memref type (which will have fewer dynamic dimensions).
155     MemRefType newMemRefType =
156         MemRefType::Builder(memrefType).setShape(newShapeConstants);
157     assert(static_cast<int64_t>(dynamicSizes.size()) ==
158            newMemRefType.getNumDynamicDims());
159 
160     // Create and insert the alloc op for the new memref.
161     auto newAlloc = rewriter.create<AllocLikeOp>(
162         alloc.getLoc(), newMemRefType, dynamicSizes, alloc.symbolOperands(),
163         alloc.alignmentAttr());
164     // Insert a cast so we have the same type as the old alloc.
165     auto resultCast =
166         rewriter.create<CastOp>(alloc.getLoc(), newAlloc, alloc.getType());
167 
168     rewriter.replaceOp(alloc, {resultCast});
169     return success();
170   }
171 };
172 
173 /// Fold alloc operations with no users or only store and dealloc uses.
174 template <typename T>
175 struct SimplifyDeadAlloc : public OpRewritePattern<T> {
176   using OpRewritePattern<T>::OpRewritePattern;
177 
178   LogicalResult matchAndRewrite(T alloc,
179                                 PatternRewriter &rewriter) const override {
180     if (llvm::any_of(alloc->getUsers(), [&](Operation *op) {
181           if (auto storeOp = dyn_cast<StoreOp>(op))
182             return storeOp.value() == alloc;
183           return !isa<DeallocOp>(op);
184         }))
185       return failure();
186 
187     for (Operation *user : llvm::make_early_inc_range(alloc->getUsers()))
188       rewriter.eraseOp(user);
189 
190     rewriter.eraseOp(alloc);
191     return success();
192   }
193 };
194 } // namespace
195 
196 void AllocOp::getCanonicalizationPatterns(RewritePatternSet &results,
197                                           MLIRContext *context) {
198   results.add<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc<AllocOp>>(context);
199 }
200 
201 void AllocaOp::getCanonicalizationPatterns(RewritePatternSet &results,
202                                            MLIRContext *context) {
203   results.add<SimplifyAllocConst<AllocaOp>, SimplifyDeadAlloc<AllocaOp>>(
204       context);
205 }
206 
207 //===----------------------------------------------------------------------===//
208 // AllocaScopeOp
209 //===----------------------------------------------------------------------===//
210 
211 static void print(OpAsmPrinter &p, AllocaScopeOp &op) {
212   bool printBlockTerminators = false;
213 
214   p << " ";
215   if (!op.results().empty()) {
216     p << " -> (" << op.getResultTypes() << ")";
217     printBlockTerminators = true;
218   }
219   p.printRegion(op.bodyRegion(),
220                 /*printEntryBlockArgs=*/false,
221                 /*printBlockTerminators=*/printBlockTerminators);
222   p.printOptionalAttrDict(op->getAttrs());
223 }
224 
225 static ParseResult parseAllocaScopeOp(OpAsmParser &parser,
226                                       OperationState &result) {
227   // Create a region for the body.
228   result.regions.reserve(1);
229   Region *bodyRegion = result.addRegion();
230 
231   // Parse optional results type list.
232   if (parser.parseOptionalArrowTypeList(result.types))
233     return failure();
234 
235   // Parse the body region.
236   if (parser.parseRegion(*bodyRegion, /*arguments=*/{}, /*argTypes=*/{}))
237     return failure();
238   AllocaScopeOp::ensureTerminator(*bodyRegion, parser.getBuilder(),
239                                   result.location);
240 
241   // Parse the optional attribute list.
242   if (parser.parseOptionalAttrDict(result.attributes))
243     return failure();
244 
245   return success();
246 }
247 
248 static LogicalResult verify(AllocaScopeOp op) {
249   if (failed(RegionBranchOpInterface::verifyTypes(op)))
250     return failure();
251 
252   return success();
253 }
254 
255 void AllocaScopeOp::getSuccessorRegions(
256     Optional<unsigned> index, ArrayRef<Attribute> operands,
257     SmallVectorImpl<RegionSuccessor> &regions) {
258   if (index.hasValue()) {
259     regions.push_back(RegionSuccessor(getResults()));
260     return;
261   }
262 
263   regions.push_back(RegionSuccessor(&bodyRegion()));
264 }
265 
266 //===----------------------------------------------------------------------===//
267 // AssumeAlignmentOp
268 //===----------------------------------------------------------------------===//
269 
270 static LogicalResult verify(AssumeAlignmentOp op) {
271   unsigned alignment = op.alignment();
272   if (!llvm::isPowerOf2_32(alignment))
273     return op.emitOpError("alignment must be power of 2");
274   return success();
275 }
276 
277 //===----------------------------------------------------------------------===//
278 // CastOp
279 //===----------------------------------------------------------------------===//
280 
281 /// Determines whether MemRef_CastOp casts to a more dynamic version of the
282 /// source memref. This is useful to to fold a memref.cast into a consuming op
283 /// and implement canonicalization patterns for ops in different dialects that
284 /// may consume the results of memref.cast operations. Such foldable memref.cast
285 /// operations are typically inserted as `view` and `subview` ops are
286 /// canonicalized, to preserve the type compatibility of their uses.
287 ///
288 /// Returns true when all conditions are met:
289 /// 1. source and result are ranked memrefs with strided semantics and same
290 /// element type and rank.
291 /// 2. each of the source's size, offset or stride has more static information
292 /// than the corresponding result's size, offset or stride.
293 ///
294 /// Example 1:
295 /// ```mlir
296 ///   %1 = memref.cast %0 : memref<8x16xf32> to memref<?x?xf32>
297 ///   %2 = consumer %1 ... : memref<?x?xf32> ...
298 /// ```
299 ///
300 /// may fold into:
301 ///
302 /// ```mlir
303 ///   %2 = consumer %0 ... : memref<8x16xf32> ...
304 /// ```
305 ///
306 /// Example 2:
307 /// ```
308 ///   %1 = memref.cast %0 : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
309 ///          to memref<?x?xf32>
310 ///   consumer %1 : memref<?x?xf32> ...
311 /// ```
312 ///
313 /// may fold into:
314 ///
315 /// ```
316 ///   consumer %0 ... : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
317 /// ```
318 bool CastOp::canFoldIntoConsumerOp(CastOp castOp) {
319   MemRefType sourceType = castOp.source().getType().dyn_cast<MemRefType>();
320   MemRefType resultType = castOp.getType().dyn_cast<MemRefType>();
321 
322   // Requires ranked MemRefType.
323   if (!sourceType || !resultType)
324     return false;
325 
326   // Requires same elemental type.
327   if (sourceType.getElementType() != resultType.getElementType())
328     return false;
329 
330   // Requires same rank.
331   if (sourceType.getRank() != resultType.getRank())
332     return false;
333 
334   // Only fold casts between strided memref forms.
335   int64_t sourceOffset, resultOffset;
336   SmallVector<int64_t, 4> sourceStrides, resultStrides;
337   if (failed(getStridesAndOffset(sourceType, sourceStrides, sourceOffset)) ||
338       failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
339     return false;
340 
341   // If cast is towards more static sizes along any dimension, don't fold.
342   for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) {
343     auto ss = std::get<0>(it), st = std::get<1>(it);
344     if (ss != st)
345       if (MemRefType::isDynamic(ss) && !MemRefType::isDynamic(st))
346         return false;
347   }
348 
349   // If cast is towards more static offset along any dimension, don't fold.
350   if (sourceOffset != resultOffset)
351     if (MemRefType::isDynamicStrideOrOffset(sourceOffset) &&
352         !MemRefType::isDynamicStrideOrOffset(resultOffset))
353       return false;
354 
355   // If cast is towards more static strides along any dimension, don't fold.
356   for (auto it : llvm::zip(sourceStrides, resultStrides)) {
357     auto ss = std::get<0>(it), st = std::get<1>(it);
358     if (ss != st)
359       if (MemRefType::isDynamicStrideOrOffset(ss) &&
360           !MemRefType::isDynamicStrideOrOffset(st))
361         return false;
362   }
363 
364   return true;
365 }
366 
367 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
368   if (inputs.size() != 1 || outputs.size() != 1)
369     return false;
370   Type a = inputs.front(), b = outputs.front();
371   auto aT = a.dyn_cast<MemRefType>();
372   auto bT = b.dyn_cast<MemRefType>();
373 
374   auto uaT = a.dyn_cast<UnrankedMemRefType>();
375   auto ubT = b.dyn_cast<UnrankedMemRefType>();
376 
377   if (aT && bT) {
378     if (aT.getElementType() != bT.getElementType())
379       return false;
380     if (aT.getLayout() != bT.getLayout()) {
381       int64_t aOffset, bOffset;
382       SmallVector<int64_t, 4> aStrides, bStrides;
383       if (failed(getStridesAndOffset(aT, aStrides, aOffset)) ||
384           failed(getStridesAndOffset(bT, bStrides, bOffset)) ||
385           aStrides.size() != bStrides.size())
386         return false;
387 
388       // Strides along a dimension/offset are compatible if the value in the
389       // source memref is static and the value in the target memref is the
390       // same. They are also compatible if either one is dynamic (see
391       // description of MemRefCastOp for details).
392       auto checkCompatible = [](int64_t a, int64_t b) {
393         return (a == MemRefType::getDynamicStrideOrOffset() ||
394                 b == MemRefType::getDynamicStrideOrOffset() || a == b);
395       };
396       if (!checkCompatible(aOffset, bOffset))
397         return false;
398       for (const auto &aStride : enumerate(aStrides))
399         if (!checkCompatible(aStride.value(), bStrides[aStride.index()]))
400           return false;
401     }
402     if (aT.getMemorySpace() != bT.getMemorySpace())
403       return false;
404 
405     // They must have the same rank, and any specified dimensions must match.
406     if (aT.getRank() != bT.getRank())
407       return false;
408 
409     for (unsigned i = 0, e = aT.getRank(); i != e; ++i) {
410       int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i);
411       if (aDim != -1 && bDim != -1 && aDim != bDim)
412         return false;
413     }
414     return true;
415   } else {
416     if (!aT && !uaT)
417       return false;
418     if (!bT && !ubT)
419       return false;
420     // Unranked to unranked casting is unsupported
421     if (uaT && ubT)
422       return false;
423 
424     auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType();
425     auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType();
426     if (aEltType != bEltType)
427       return false;
428 
429     auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace();
430     auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace();
431     return aMemSpace == bMemSpace;
432   }
433 
434   return false;
435 }
436 
437 OpFoldResult CastOp::fold(ArrayRef<Attribute> operands) {
438   return succeeded(foldMemRefCast(*this)) ? getResult() : Value();
439 }
440 
441 //===----------------------------------------------------------------------===//
442 // DeallocOp
443 //===----------------------------------------------------------------------===//
444 
445 LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands,
446                               SmallVectorImpl<OpFoldResult> &results) {
447   /// dealloc(memrefcast) -> dealloc
448   return foldMemRefCast(*this);
449 }
450 
451 //===----------------------------------------------------------------------===//
452 // DimOp
453 //===----------------------------------------------------------------------===//
454 
455 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
456                   int64_t index) {
457   auto loc = result.location;
458   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
459   build(builder, result, source, indexValue);
460 }
461 
462 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
463                   Value index) {
464   auto indexTy = builder.getIndexType();
465   build(builder, result, indexTy, source, index);
466 }
467 
468 Optional<int64_t> DimOp::getConstantIndex() {
469   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
470     return constantOp.getValue().cast<IntegerAttr>().getInt();
471   return {};
472 }
473 
474 static LogicalResult verify(DimOp op) {
475   // Assume unknown index to be in range.
476   Optional<int64_t> index = op.getConstantIndex();
477   if (!index.hasValue())
478     return success();
479 
480   // Check that constant index is not knowingly out of range.
481   auto type = op.source().getType();
482   if (auto memrefType = type.dyn_cast<MemRefType>()) {
483     if (index.getValue() >= memrefType.getRank())
484       return op.emitOpError("index is out of range");
485   } else if (type.isa<UnrankedMemRefType>()) {
486     // Assume index to be in range.
487   } else {
488     llvm_unreachable("expected operand with memref type");
489   }
490   return success();
491 }
492 
493 /// Return a map with key being elements in `vals` and data being number of
494 /// occurences of it. Use std::map, since the `vals` here are strides and the
495 /// dynamic stride value is the same as the tombstone value for
496 /// `DenseMap<int64_t>`.
497 static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) {
498   std::map<int64_t, unsigned> numOccurences;
499   for (auto val : vals)
500     numOccurences[val]++;
501   return numOccurences;
502 }
503 
504 /// Given the `originalType` and a `candidateReducedType` whose shape is assumed
505 /// to be a subset of `originalType` with some `1` entries erased, return the
506 /// set of indices that specifies which of the entries of `originalShape` are
507 /// dropped to obtain `reducedShape`.
508 /// This accounts for cases where there are multiple unit-dims, but only a
509 /// subset of those are dropped. For MemRefTypes these can be disambiguated
510 /// using the strides. If a dimension is dropped the stride must be dropped too.
511 static llvm::Optional<llvm::SmallDenseSet<unsigned>>
512 computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
513                                ArrayRef<OpFoldResult> sizes) {
514   llvm::SmallDenseSet<unsigned> unusedDims;
515   if (originalType.getRank() == reducedType.getRank())
516     return unusedDims;
517 
518   for (const auto &dim : llvm::enumerate(sizes))
519     if (auto attr = dim.value().dyn_cast<Attribute>())
520       if (attr.cast<IntegerAttr>().getInt() == 1)
521         unusedDims.insert(dim.index());
522 
523   SmallVector<int64_t> originalStrides, candidateStrides;
524   int64_t originalOffset, candidateOffset;
525   if (failed(
526           getStridesAndOffset(originalType, originalStrides, originalOffset)) ||
527       failed(
528           getStridesAndOffset(reducedType, candidateStrides, candidateOffset)))
529     return llvm::None;
530 
531   // For memrefs, a dimension is truly dropped if its corresponding stride is
532   // also dropped. This is particularly important when more than one of the dims
533   // is 1. Track the number of occurences of the strides in the original type
534   // and the candidate type. For each unused dim that stride should not be
535   // present in the candidate type. Note that there could be multiple dimensions
536   // that have the same size. We dont need to exactly figure out which dim
537   // corresponds to which stride, we just need to verify that the number of
538   // reptitions of a stride in the original + number of unused dims with that
539   // stride == number of repititions of a stride in the candidate.
540   std::map<int64_t, unsigned> currUnaccountedStrides =
541       getNumOccurences(originalStrides);
542   std::map<int64_t, unsigned> candidateStridesNumOccurences =
543       getNumOccurences(candidateStrides);
544   llvm::SmallDenseSet<unsigned> prunedUnusedDims;
545   for (unsigned dim : unusedDims) {
546     int64_t originalStride = originalStrides[dim];
547     if (currUnaccountedStrides[originalStride] >
548         candidateStridesNumOccurences[originalStride]) {
549       // This dim can be treated as dropped.
550       currUnaccountedStrides[originalStride]--;
551       continue;
552     }
553     if (currUnaccountedStrides[originalStride] ==
554         candidateStridesNumOccurences[originalStride]) {
555       // The stride for this is not dropped. Keep as is.
556       prunedUnusedDims.insert(dim);
557       continue;
558     }
559     if (currUnaccountedStrides[originalStride] <
560         candidateStridesNumOccurences[originalStride]) {
561       // This should never happen. Cant have a stride in the reduced rank type
562       // that wasnt in the original one.
563       return llvm::None;
564     }
565   }
566 
567   for (auto prunedDim : prunedUnusedDims)
568     unusedDims.erase(prunedDim);
569   if (unusedDims.size() + reducedType.getRank() != originalType.getRank())
570     return llvm::None;
571   return unusedDims;
572 }
573 
574 llvm::SmallDenseSet<unsigned> SubViewOp::getDroppedDims() {
575   MemRefType sourceType = getSourceType();
576   MemRefType resultType = getType();
577   llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims =
578       computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes());
579   assert(unusedDims && "unable to find unused dims of subview");
580   return *unusedDims;
581 }
582 
583 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
584   // All forms of folding require a known index.
585   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
586   if (!index)
587     return {};
588 
589   // Folding for unranked types (UnrankedMemRefType) is not supported.
590   auto memrefType = source().getType().dyn_cast<MemRefType>();
591   if (!memrefType)
592     return {};
593 
594   // Fold if the shape extent along the given index is known.
595   if (!memrefType.isDynamicDim(index.getInt())) {
596     Builder builder(getContext());
597     return builder.getIndexAttr(memrefType.getShape()[index.getInt()]);
598   }
599 
600   // The size at the given index is now known to be a dynamic size.
601   unsigned unsignedIndex = index.getValue().getZExtValue();
602 
603   // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`.
604   Operation *definingOp = source().getDefiningOp();
605 
606   if (auto alloc = dyn_cast_or_null<AllocOp>(definingOp))
607     return *(alloc.getDynamicSizes().begin() +
608              memrefType.getDynamicDimIndex(unsignedIndex));
609 
610   if (auto alloca = dyn_cast_or_null<AllocaOp>(definingOp))
611     return *(alloca.getDynamicSizes().begin() +
612              memrefType.getDynamicDimIndex(unsignedIndex));
613 
614   if (auto view = dyn_cast_or_null<ViewOp>(definingOp))
615     return *(view.getDynamicSizes().begin() +
616              memrefType.getDynamicDimIndex(unsignedIndex));
617 
618   if (auto subview = dyn_cast_or_null<SubViewOp>(definingOp)) {
619     llvm::SmallDenseSet<unsigned> unusedDims = subview.getDroppedDims();
620     unsigned resultIndex = 0;
621     unsigned sourceRank = subview.getSourceType().getRank();
622     unsigned sourceIndex = 0;
623     for (auto i : llvm::seq<unsigned>(0, sourceRank)) {
624       if (unusedDims.count(i))
625         continue;
626       if (resultIndex == unsignedIndex) {
627         sourceIndex = i;
628         break;
629       }
630       resultIndex++;
631     }
632     assert(subview.isDynamicSize(sourceIndex) &&
633            "expected dynamic subview size");
634     return subview.getDynamicSize(sourceIndex);
635   }
636 
637   if (auto sizeInterface =
638           dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) {
639     assert(sizeInterface.isDynamicSize(unsignedIndex) &&
640            "Expected dynamic subview size");
641     return sizeInterface.getDynamicSize(unsignedIndex);
642   }
643 
644   // dim(memrefcast) -> dim
645   if (succeeded(foldMemRefCast(*this)))
646     return getResult();
647 
648   return {};
649 }
650 
651 namespace {
652 /// Fold dim of a memref reshape operation to a load into the reshape's shape
653 /// operand.
654 struct DimOfMemRefReshape : public OpRewritePattern<DimOp> {
655   using OpRewritePattern<DimOp>::OpRewritePattern;
656 
657   LogicalResult matchAndRewrite(DimOp dim,
658                                 PatternRewriter &rewriter) const override {
659     auto reshape = dim.source().getDefiningOp<ReshapeOp>();
660 
661     if (!reshape)
662       return failure();
663 
664     // Place the load directly after the reshape to ensure that the shape memref
665     // was not mutated.
666     rewriter.setInsertionPointAfter(reshape);
667     Location loc = dim.getLoc();
668     Value load = rewriter.create<LoadOp>(loc, reshape.shape(), dim.index());
669     if (load.getType() != dim.getType())
670       load = rewriter.create<arith::IndexCastOp>(loc, dim.getType(), load);
671     rewriter.replaceOp(dim, load);
672     return success();
673   }
674 };
675 
676 } // namespace
677 
678 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
679                                         MLIRContext *context) {
680   results.add<DimOfMemRefReshape>(context);
681 }
682 
683 // ---------------------------------------------------------------------------
684 // DmaStartOp
685 // ---------------------------------------------------------------------------
686 
687 void DmaStartOp::build(OpBuilder &builder, OperationState &result,
688                        Value srcMemRef, ValueRange srcIndices, Value destMemRef,
689                        ValueRange destIndices, Value numElements,
690                        Value tagMemRef, ValueRange tagIndices, Value stride,
691                        Value elementsPerStride) {
692   result.addOperands(srcMemRef);
693   result.addOperands(srcIndices);
694   result.addOperands(destMemRef);
695   result.addOperands(destIndices);
696   result.addOperands({numElements, tagMemRef});
697   result.addOperands(tagIndices);
698   if (stride)
699     result.addOperands({stride, elementsPerStride});
700 }
701 
702 static void print(OpAsmPrinter &p, DmaStartOp op) {
703   p << " " << op.getSrcMemRef() << '[' << op.getSrcIndices() << "], "
704     << op.getDstMemRef() << '[' << op.getDstIndices() << "], "
705     << op.getNumElements() << ", " << op.getTagMemRef() << '['
706     << op.getTagIndices() << ']';
707   if (op.isStrided())
708     p << ", " << op.getStride() << ", " << op.getNumElementsPerStride();
709 
710   p.printOptionalAttrDict(op->getAttrs());
711   p << " : " << op.getSrcMemRef().getType() << ", "
712     << op.getDstMemRef().getType() << ", " << op.getTagMemRef().getType();
713 }
714 
715 // Parse DmaStartOp.
716 // Ex:
717 //   %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size,
718 //                       %tag[%index], %stride, %num_elt_per_stride :
719 //                     : memref<3076 x f32, 0>,
720 //                       memref<1024 x f32, 2>,
721 //                       memref<1 x i32>
722 //
723 static ParseResult parseDmaStartOp(OpAsmParser &parser,
724                                    OperationState &result) {
725   OpAsmParser::OperandType srcMemRefInfo;
726   SmallVector<OpAsmParser::OperandType, 4> srcIndexInfos;
727   OpAsmParser::OperandType dstMemRefInfo;
728   SmallVector<OpAsmParser::OperandType, 4> dstIndexInfos;
729   OpAsmParser::OperandType numElementsInfo;
730   OpAsmParser::OperandType tagMemrefInfo;
731   SmallVector<OpAsmParser::OperandType, 4> tagIndexInfos;
732   SmallVector<OpAsmParser::OperandType, 2> strideInfo;
733 
734   SmallVector<Type, 3> types;
735   auto indexType = parser.getBuilder().getIndexType();
736 
737   // Parse and resolve the following list of operands:
738   // *) source memref followed by its indices (in square brackets).
739   // *) destination memref followed by its indices (in square brackets).
740   // *) dma size in KiB.
741   if (parser.parseOperand(srcMemRefInfo) ||
742       parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) ||
743       parser.parseComma() || parser.parseOperand(dstMemRefInfo) ||
744       parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) ||
745       parser.parseComma() || parser.parseOperand(numElementsInfo) ||
746       parser.parseComma() || parser.parseOperand(tagMemrefInfo) ||
747       parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square))
748     return failure();
749 
750   // Parse optional stride and elements per stride.
751   if (parser.parseTrailingOperandList(strideInfo))
752     return failure();
753 
754   bool isStrided = strideInfo.size() == 2;
755   if (!strideInfo.empty() && !isStrided) {
756     return parser.emitError(parser.getNameLoc(),
757                             "expected two stride related operands");
758   }
759 
760   if (parser.parseColonTypeList(types))
761     return failure();
762   if (types.size() != 3)
763     return parser.emitError(parser.getNameLoc(), "fewer/more types expected");
764 
765   if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) ||
766       parser.resolveOperands(srcIndexInfos, indexType, result.operands) ||
767       parser.resolveOperand(dstMemRefInfo, types[1], result.operands) ||
768       parser.resolveOperands(dstIndexInfos, indexType, result.operands) ||
769       // size should be an index.
770       parser.resolveOperand(numElementsInfo, indexType, result.operands) ||
771       parser.resolveOperand(tagMemrefInfo, types[2], result.operands) ||
772       // tag indices should be index.
773       parser.resolveOperands(tagIndexInfos, indexType, result.operands))
774     return failure();
775 
776   if (isStrided) {
777     if (parser.resolveOperands(strideInfo, indexType, result.operands))
778       return failure();
779   }
780 
781   return success();
782 }
783 
784 static LogicalResult verify(DmaStartOp op) {
785   unsigned numOperands = op.getNumOperands();
786 
787   // Mandatory non-variadic operands are: src memref, dst memref, tag memref and
788   // the number of elements.
789   if (numOperands < 4)
790     return op.emitOpError("expected at least 4 operands");
791 
792   // Check types of operands. The order of these calls is important: the later
793   // calls rely on some type properties to compute the operand position.
794   // 1. Source memref.
795   if (!op.getSrcMemRef().getType().isa<MemRefType>())
796     return op.emitOpError("expected source to be of memref type");
797   if (numOperands < op.getSrcMemRefRank() + 4)
798     return op.emitOpError()
799            << "expected at least " << op.getSrcMemRefRank() + 4 << " operands";
800   if (!op.getSrcIndices().empty() &&
801       !llvm::all_of(op.getSrcIndices().getTypes(),
802                     [](Type t) { return t.isIndex(); }))
803     return op.emitOpError("expected source indices to be of index type");
804 
805   // 2. Destination memref.
806   if (!op.getDstMemRef().getType().isa<MemRefType>())
807     return op.emitOpError("expected destination to be of memref type");
808   unsigned numExpectedOperands =
809       op.getSrcMemRefRank() + op.getDstMemRefRank() + 4;
810   if (numOperands < numExpectedOperands)
811     return op.emitOpError()
812            << "expected at least " << numExpectedOperands << " operands";
813   if (!op.getDstIndices().empty() &&
814       !llvm::all_of(op.getDstIndices().getTypes(),
815                     [](Type t) { return t.isIndex(); }))
816     return op.emitOpError("expected destination indices to be of index type");
817 
818   // 3. Number of elements.
819   if (!op.getNumElements().getType().isIndex())
820     return op.emitOpError("expected num elements to be of index type");
821 
822   // 4. Tag memref.
823   if (!op.getTagMemRef().getType().isa<MemRefType>())
824     return op.emitOpError("expected tag to be of memref type");
825   numExpectedOperands += op.getTagMemRefRank();
826   if (numOperands < numExpectedOperands)
827     return op.emitOpError()
828            << "expected at least " << numExpectedOperands << " operands";
829   if (!op.getTagIndices().empty() &&
830       !llvm::all_of(op.getTagIndices().getTypes(),
831                     [](Type t) { return t.isIndex(); }))
832     return op.emitOpError("expected tag indices to be of index type");
833 
834   // Optional stride-related operands must be either both present or both
835   // absent.
836   if (numOperands != numExpectedOperands &&
837       numOperands != numExpectedOperands + 2)
838     return op.emitOpError("incorrect number of operands");
839 
840   // 5. Strides.
841   if (op.isStrided()) {
842     if (!op.getStride().getType().isIndex() ||
843         !op.getNumElementsPerStride().getType().isIndex())
844       return op.emitOpError(
845           "expected stride and num elements per stride to be of type index");
846   }
847 
848   return success();
849 }
850 
851 LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
852                                SmallVectorImpl<OpFoldResult> &results) {
853   /// dma_start(memrefcast) -> dma_start
854   return foldMemRefCast(*this);
855 }
856 
857 // ---------------------------------------------------------------------------
858 // DmaWaitOp
859 // ---------------------------------------------------------------------------
860 
861 LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands,
862                               SmallVectorImpl<OpFoldResult> &results) {
863   /// dma_wait(memrefcast) -> dma_wait
864   return foldMemRefCast(*this);
865 }
866 
867 static LogicalResult verify(DmaWaitOp op) {
868   // Check that the number of tag indices matches the tagMemRef rank.
869   unsigned numTagIndices = op.tagIndices().size();
870   unsigned tagMemRefRank = op.getTagMemRefRank();
871   if (numTagIndices != tagMemRefRank)
872     return op.emitOpError() << "expected tagIndices to have the same number of "
873                                "elements as the tagMemRef rank, expected "
874                             << tagMemRefRank << ", but got " << numTagIndices;
875   return success();
876 }
877 
878 //===----------------------------------------------------------------------===//
879 // GlobalOp
880 //===----------------------------------------------------------------------===//
881 
882 static void printGlobalMemrefOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op,
883                                                    TypeAttr type,
884                                                    Attribute initialValue) {
885   p << type;
886   if (!op.isExternal()) {
887     p << " = ";
888     if (op.isUninitialized())
889       p << "uninitialized";
890     else
891       p.printAttributeWithoutType(initialValue);
892   }
893 }
894 
895 static ParseResult
896 parseGlobalMemrefOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr,
897                                        Attribute &initialValue) {
898   Type type;
899   if (parser.parseType(type))
900     return failure();
901 
902   auto memrefType = type.dyn_cast<MemRefType>();
903   if (!memrefType || !memrefType.hasStaticShape())
904     return parser.emitError(parser.getNameLoc())
905            << "type should be static shaped memref, but got " << type;
906   typeAttr = TypeAttr::get(type);
907 
908   if (parser.parseOptionalEqual())
909     return success();
910 
911   if (succeeded(parser.parseOptionalKeyword("uninitialized"))) {
912     initialValue = UnitAttr::get(parser.getContext());
913     return success();
914   }
915 
916   Type tensorType = getTensorTypeFromMemRefType(memrefType);
917   if (parser.parseAttribute(initialValue, tensorType))
918     return failure();
919   if (!initialValue.isa<ElementsAttr>())
920     return parser.emitError(parser.getNameLoc())
921            << "initial value should be a unit or elements attribute";
922   return success();
923 }
924 
925 static LogicalResult verify(GlobalOp op) {
926   auto memrefType = op.type().dyn_cast<MemRefType>();
927   if (!memrefType || !memrefType.hasStaticShape())
928     return op.emitOpError("type should be static shaped memref, but got ")
929            << op.type();
930 
931   // Verify that the initial value, if present, is either a unit attribute or
932   // an elements attribute.
933   if (op.initial_value().hasValue()) {
934     Attribute initValue = op.initial_value().getValue();
935     if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>())
936       return op.emitOpError("initial value should be a unit or elements "
937                             "attribute, but got ")
938              << initValue;
939 
940     // Check that the type of the initial value is compatible with the type of
941     // the global variable.
942     if (initValue.isa<ElementsAttr>()) {
943       Type initType = initValue.getType();
944       Type tensorType = getTensorTypeFromMemRefType(memrefType);
945       if (initType != tensorType)
946         return op.emitOpError("initial value expected to be of type ")
947                << tensorType << ", but was of type " << initType;
948     }
949   }
950 
951   if (Optional<uint64_t> alignAttr = op.alignment()) {
952     uint64_t alignment = alignAttr.getValue();
953 
954     if (!llvm::isPowerOf2_64(alignment))
955       return op->emitError() << "alignment attribute value " << alignment
956                              << " is not a power of 2";
957   }
958 
959   // TODO: verify visibility for declarations.
960   return success();
961 }
962 
963 //===----------------------------------------------------------------------===//
964 // GetGlobalOp
965 //===----------------------------------------------------------------------===//
966 
967 LogicalResult
968 GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
969   // Verify that the result type is same as the type of the referenced
970   // memref.global op.
971   auto global =
972       symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, nameAttr());
973   if (!global)
974     return emitOpError("'")
975            << name() << "' does not reference a valid global memref";
976 
977   Type resultType = result().getType();
978   if (global.type() != resultType)
979     return emitOpError("result type ")
980            << resultType << " does not match type " << global.type()
981            << " of the global memref @" << name();
982   return success();
983 }
984 
985 //===----------------------------------------------------------------------===//
986 // LoadOp
987 //===----------------------------------------------------------------------===//
988 
989 static LogicalResult verify(LoadOp op) {
990   if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
991     return op.emitOpError("incorrect number of indices for load");
992   return success();
993 }
994 
995 OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) {
996   /// load(memrefcast) -> load
997   if (succeeded(foldMemRefCast(*this)))
998     return getResult();
999   return OpFoldResult();
1000 }
1001 
1002 //===----------------------------------------------------------------------===//
1003 // PrefetchOp
1004 //===----------------------------------------------------------------------===//
1005 
1006 static void print(OpAsmPrinter &p, PrefetchOp op) {
1007   p << " " << op.memref() << '[';
1008   p.printOperands(op.indices());
1009   p << ']' << ", " << (op.isWrite() ? "write" : "read");
1010   p << ", locality<" << op.localityHint();
1011   p << ">, " << (op.isDataCache() ? "data" : "instr");
1012   p.printOptionalAttrDict(
1013       op->getAttrs(),
1014       /*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"});
1015   p << " : " << op.getMemRefType();
1016 }
1017 
1018 static ParseResult parsePrefetchOp(OpAsmParser &parser,
1019                                    OperationState &result) {
1020   OpAsmParser::OperandType memrefInfo;
1021   SmallVector<OpAsmParser::OperandType, 4> indexInfo;
1022   IntegerAttr localityHint;
1023   MemRefType type;
1024   StringRef readOrWrite, cacheType;
1025 
1026   auto indexTy = parser.getBuilder().getIndexType();
1027   auto i32Type = parser.getBuilder().getIntegerType(32);
1028   if (parser.parseOperand(memrefInfo) ||
1029       parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) ||
1030       parser.parseComma() || parser.parseKeyword(&readOrWrite) ||
1031       parser.parseComma() || parser.parseKeyword("locality") ||
1032       parser.parseLess() ||
1033       parser.parseAttribute(localityHint, i32Type, "localityHint",
1034                             result.attributes) ||
1035       parser.parseGreater() || parser.parseComma() ||
1036       parser.parseKeyword(&cacheType) || parser.parseColonType(type) ||
1037       parser.resolveOperand(memrefInfo, type, result.operands) ||
1038       parser.resolveOperands(indexInfo, indexTy, result.operands))
1039     return failure();
1040 
1041   if (!readOrWrite.equals("read") && !readOrWrite.equals("write"))
1042     return parser.emitError(parser.getNameLoc(),
1043                             "rw specifier has to be 'read' or 'write'");
1044   result.addAttribute(
1045       PrefetchOp::getIsWriteAttrName(),
1046       parser.getBuilder().getBoolAttr(readOrWrite.equals("write")));
1047 
1048   if (!cacheType.equals("data") && !cacheType.equals("instr"))
1049     return parser.emitError(parser.getNameLoc(),
1050                             "cache type has to be 'data' or 'instr'");
1051 
1052   result.addAttribute(
1053       PrefetchOp::getIsDataCacheAttrName(),
1054       parser.getBuilder().getBoolAttr(cacheType.equals("data")));
1055 
1056   return success();
1057 }
1058 
1059 static LogicalResult verify(PrefetchOp op) {
1060   if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
1061     return op.emitOpError("too few indices");
1062 
1063   return success();
1064 }
1065 
1066 LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands,
1067                                SmallVectorImpl<OpFoldResult> &results) {
1068   // prefetch(memrefcast) -> prefetch
1069   return foldMemRefCast(*this);
1070 }
1071 
1072 //===----------------------------------------------------------------------===//
1073 // RankOp
1074 //===----------------------------------------------------------------------===//
1075 
1076 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
1077   // Constant fold rank when the rank of the operand is known.
1078   auto type = getOperand().getType();
1079   auto shapedType = type.dyn_cast<ShapedType>();
1080   if (shapedType && shapedType.hasRank())
1081     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
1082   return IntegerAttr();
1083 }
1084 
1085 //===----------------------------------------------------------------------===//
1086 // ReinterpretCastOp
1087 //===----------------------------------------------------------------------===//
1088 
1089 /// Build a ReinterpretCastOp with all dynamic entries: `staticOffsets`,
1090 /// `staticSizes` and `staticStrides` are automatically filled with
1091 /// source-memref-rank sentinel values that encode dynamic entries.
1092 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1093                               MemRefType resultType, Value source,
1094                               OpFoldResult offset, ArrayRef<OpFoldResult> sizes,
1095                               ArrayRef<OpFoldResult> strides,
1096                               ArrayRef<NamedAttribute> attrs) {
1097   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1098   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1099   dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets,
1100                              ShapedType::kDynamicStrideOrOffset);
1101   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1102                              ShapedType::kDynamicSize);
1103   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1104                              ShapedType::kDynamicStrideOrOffset);
1105   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1106         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1107         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1108   result.addAttributes(attrs);
1109 }
1110 
1111 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1112                               MemRefType resultType, Value source,
1113                               int64_t offset, ArrayRef<int64_t> sizes,
1114                               ArrayRef<int64_t> strides,
1115                               ArrayRef<NamedAttribute> attrs) {
1116   SmallVector<OpFoldResult> sizeValues =
1117       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1118         return b.getI64IntegerAttr(v);
1119       }));
1120   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1121       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1122         return b.getI64IntegerAttr(v);
1123       }));
1124   build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues,
1125         strideValues, attrs);
1126 }
1127 
1128 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1129                               MemRefType resultType, Value source, Value offset,
1130                               ValueRange sizes, ValueRange strides,
1131                               ArrayRef<NamedAttribute> attrs) {
1132   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1133       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1134   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1135       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1136   build(b, result, resultType, source, offset, sizeValues, strideValues, attrs);
1137 }
1138 
1139 // TODO: ponder whether we want to allow missing trailing sizes/strides that are
1140 // completed automatically, like we have for subview and extract_slice.
1141 static LogicalResult verify(ReinterpretCastOp op) {
1142   // The source and result memrefs should be in the same memory space.
1143   auto srcType = op.source().getType().cast<BaseMemRefType>();
1144   auto resultType = op.getType().cast<MemRefType>();
1145   if (srcType.getMemorySpace() != resultType.getMemorySpace())
1146     return op.emitError("different memory spaces specified for source type ")
1147            << srcType << " and result memref type " << resultType;
1148   if (srcType.getElementType() != resultType.getElementType())
1149     return op.emitError("different element types specified for source type ")
1150            << srcType << " and result memref type " << resultType;
1151 
1152   // Match sizes in result memref type and in static_sizes attribute.
1153   for (auto &en :
1154        llvm::enumerate(llvm::zip(resultType.getShape(),
1155                                  extractFromI64ArrayAttr(op.static_sizes())))) {
1156     int64_t resultSize = std::get<0>(en.value());
1157     int64_t expectedSize = std::get<1>(en.value());
1158     if (!ShapedType::isDynamic(resultSize) && resultSize != expectedSize)
1159       return op.emitError("expected result type with size = ")
1160              << expectedSize << " instead of " << resultSize
1161              << " in dim = " << en.index();
1162   }
1163 
1164   // Match offset and strides in static_offset and static_strides attributes if
1165   // result memref type has an affine map specified.
1166   if (!resultType.getLayout().isIdentity()) {
1167     int64_t resultOffset;
1168     SmallVector<int64_t, 4> resultStrides;
1169     if (failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
1170       return failure();
1171 
1172     // Match offset in result memref type and in static_offsets attribute.
1173     int64_t expectedOffset =
1174         extractFromI64ArrayAttr(op.static_offsets()).front();
1175     if (!ShapedType::isDynamicStrideOrOffset(resultOffset) &&
1176         resultOffset != expectedOffset)
1177       return op.emitError("expected result type with offset = ")
1178              << resultOffset << " instead of " << expectedOffset;
1179 
1180     // Match strides in result memref type and in static_strides attribute.
1181     for (auto &en : llvm::enumerate(llvm::zip(
1182              resultStrides, extractFromI64ArrayAttr(op.static_strides())))) {
1183       int64_t resultStride = std::get<0>(en.value());
1184       int64_t expectedStride = std::get<1>(en.value());
1185       if (!ShapedType::isDynamicStrideOrOffset(resultStride) &&
1186           resultStride != expectedStride)
1187         return op.emitError("expected result type with stride = ")
1188                << expectedStride << " instead of " << resultStride
1189                << " in dim = " << en.index();
1190     }
1191   }
1192   return success();
1193 }
1194 
1195 //===----------------------------------------------------------------------===//
1196 // Reassociative reshape ops
1197 //===----------------------------------------------------------------------===//
1198 
1199 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
1200   return getSymbolLessAffineMaps(getReassociationExprs());
1201 }
1202 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
1203   return convertReassociationIndicesToExprs(getContext(),
1204                                             getReassociationIndices());
1205 }
1206 
1207 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
1208   return getSymbolLessAffineMaps(getReassociationExprs());
1209 }
1210 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
1211   return convertReassociationIndicesToExprs(getContext(),
1212                                             getReassociationIndices());
1213 }
1214 
1215 static void print(OpAsmPrinter &p, ExpandShapeOp op) {
1216   ::mlir::printReshapeOp<ExpandShapeOp>(p, op);
1217 }
1218 
1219 static void print(OpAsmPrinter &p, CollapseShapeOp op) {
1220   ::mlir::printReshapeOp<CollapseShapeOp>(p, op);
1221 }
1222 
1223 /// Detect whether memref dims [dim, dim + extent) can be reshaped without
1224 /// copies.
1225 static bool isReshapableDimBand(unsigned dim, unsigned extent,
1226                                 ArrayRef<int64_t> sizes,
1227                                 ArrayRef<AffineExpr> strides) {
1228   // Bands of extent one can be reshaped, as they are not reshaped at all.
1229   if (extent == 1)
1230     return true;
1231   // Otherwise, the size of the first dimension needs to be known.
1232   if (ShapedType::isDynamic(sizes[dim]))
1233     return false;
1234   assert(sizes.size() == strides.size() && "mismatched ranks");
1235   // off by 1 indexing to avoid out of bounds
1236   //                       V
1237   for (auto idx = dim, e = dim + extent; idx + 1 < e; ++idx) {
1238     // Only bands of static shapes are reshapable. This is due to the fact that
1239     // there is no relation between dynamic sizes and dynamic strides: we do not
1240     // have enough information to know whether a "-1" size corresponds to the
1241     // proper symbol in the AffineExpr of a stride.
1242     if (ShapedType::isDynamic(sizes[idx + 1]))
1243       return false;
1244     // TODO: Refine this by passing the proper nDims and nSymbols so we can
1245     // simplify on the fly and catch more reshapable cases.
1246     if (strides[idx] != strides[idx + 1] * sizes[idx + 1])
1247       return false;
1248   }
1249   return true;
1250 }
1251 
1252 /// Compute the MemRefType obtained by applying the `reassociation` (which is
1253 /// expected to be valid) to `type`.
1254 /// If `type` is Contiguous MemRefType, this always produce a contiguous
1255 /// MemRefType.
1256 static MemRefType
1257 computeReshapeCollapsedType(MemRefType type,
1258                             ArrayRef<AffineMap> reassociation) {
1259   auto sizes = type.getShape();
1260   AffineExpr offset;
1261   SmallVector<AffineExpr, 4> strides;
1262   auto status = getStridesAndOffset(type, strides, offset);
1263   (void)status;
1264   assert(succeeded(status) && "expected strided memref");
1265 
1266   SmallVector<int64_t, 4> newSizes;
1267   newSizes.reserve(reassociation.size());
1268   SmallVector<AffineExpr, 4> newStrides;
1269   newStrides.reserve(reassociation.size());
1270 
1271   // Use the fact that reassociation is valid to simplify the logic: only use
1272   // each map's rank.
1273   assert(isReassociationValid(reassociation) && "invalid reassociation");
1274   unsigned currentDim = 0;
1275   for (AffineMap m : reassociation) {
1276     unsigned dim = m.getNumResults();
1277     int64_t size = 1;
1278     AffineExpr stride = strides[currentDim + dim - 1];
1279     if (!isReshapableDimBand(currentDim, dim, sizes, strides)) {
1280       size = ShapedType::kDynamicSize;
1281       stride = AffineExpr();
1282     } else {
1283       for (unsigned d = 0; d < dim; ++d)
1284         size *= sizes[currentDim + d];
1285     }
1286     newSizes.push_back(size);
1287     newStrides.push_back(stride);
1288     currentDim += dim;
1289   }
1290 
1291   // Early-exit: if `type` is contiguous, the result must be contiguous.
1292   if (canonicalizeStridedLayout(type).getLayout().isIdentity())
1293     return MemRefType::Builder(type).setShape(newSizes).setLayout({});
1294 
1295   // Convert back to int64_t because we don't have enough information to create
1296   // new strided layouts from AffineExpr only. This corresponds to a case where
1297   // copies may be necessary.
1298   int64_t intOffset = ShapedType::kDynamicStrideOrOffset;
1299   if (auto o = offset.dyn_cast<AffineConstantExpr>())
1300     intOffset = o.getValue();
1301   SmallVector<int64_t, 4> intStrides;
1302   intStrides.reserve(strides.size());
1303   for (auto stride : newStrides) {
1304     if (auto cst = stride.dyn_cast_or_null<AffineConstantExpr>())
1305       intStrides.push_back(cst.getValue());
1306     else
1307       intStrides.push_back(ShapedType::kDynamicStrideOrOffset);
1308   }
1309   auto layout =
1310       makeStridedLinearLayoutMap(intStrides, intOffset, type.getContext());
1311   return canonicalizeStridedLayout(
1312       MemRefType::Builder(type).setShape(newSizes).setLayout(
1313           AffineMapAttr::get(layout)));
1314 }
1315 
1316 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
1317                           ArrayRef<ReassociationIndices> reassociation,
1318                           ArrayRef<NamedAttribute> attrs) {
1319   auto memRefType = src.getType().cast<MemRefType>();
1320   auto resultType = computeReshapeCollapsedType(
1321       memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
1322                       b.getContext(), reassociation)));
1323   build(b, result, resultType, src, attrs);
1324   result.addAttribute(getReassociationAttrName(),
1325                       getReassociationIndicesAttribute(b, reassociation));
1326 }
1327 
1328 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
1329                             ArrayRef<ReassociationIndices> reassociation,
1330                             ArrayRef<NamedAttribute> attrs) {
1331   auto memRefType = src.getType().cast<MemRefType>();
1332   auto resultType = computeReshapeCollapsedType(
1333       memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
1334                       b.getContext(), reassociation)));
1335   build(b, result, resultType, src, attrs);
1336   result.addAttribute(getReassociationAttrName(),
1337                       getReassociationIndicesAttribute(b, reassociation));
1338 }
1339 
1340 template <typename ReshapeOp,
1341           bool isExpansion = std::is_same<ReshapeOp, ExpandShapeOp>::value>
1342 static LogicalResult verifyReshapeOp(ReshapeOp op, MemRefType expandedType,
1343                                      MemRefType collapsedType) {
1344   if (failed(
1345           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
1346     return failure();
1347   auto maps = op.getReassociationMaps();
1348   MemRefType expectedType = computeReshapeCollapsedType(expandedType, maps);
1349   if (collapsedType != expectedType)
1350     return op.emitOpError("expected collapsed type to be ")
1351            << expectedType << ", but got " << collapsedType;
1352   return success();
1353 }
1354 
1355 static LogicalResult verify(ExpandShapeOp op) {
1356   return verifyReshapeOp(op, op.getResultType(), op.getSrcType());
1357 }
1358 
1359 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
1360                                                 MLIRContext *context) {
1361   results.add<CollapseReshapeOps<ExpandShapeOp>,
1362               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>>(context);
1363 }
1364 
1365 static LogicalResult verify(CollapseShapeOp op) {
1366   return verifyReshapeOp(op, op.getSrcType(), op.getResultType());
1367 }
1368 
1369 struct CollapseShapeOpMemRefCastFolder
1370     : public OpRewritePattern<CollapseShapeOp> {
1371 public:
1372   using OpRewritePattern<CollapseShapeOp>::OpRewritePattern;
1373 
1374   LogicalResult matchAndRewrite(CollapseShapeOp op,
1375                                 PatternRewriter &rewriter) const override {
1376     auto cast = op.getOperand().getDefiningOp<CastOp>();
1377     if (!cast)
1378       return failure();
1379 
1380     if (!CastOp::canFoldIntoConsumerOp(cast))
1381       return failure();
1382 
1383     Type newResultType = computeReshapeCollapsedType(
1384         cast.getOperand().getType().cast<MemRefType>(),
1385         op.getReassociationMaps());
1386 
1387     if (newResultType == op.getResultType()) {
1388       rewriter.updateRootInPlace(
1389           op, [&]() { op.srcMutable().assign(cast.source()); });
1390     } else {
1391       Value newOp = rewriter.create<CollapseShapeOp>(
1392           op->getLoc(), cast.source(), op.getReassociationIndices());
1393       rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp);
1394     }
1395     return success();
1396   }
1397 };
1398 
1399 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
1400                                                   MLIRContext *context) {
1401   results.add<CollapseReshapeOps<CollapseShapeOp>,
1402               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
1403               CollapseShapeOpMemRefCastFolder>(context);
1404 }
1405 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
1406   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
1407 }
1408 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
1409   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
1410 }
1411 
1412 //===----------------------------------------------------------------------===//
1413 // ReshapeOp
1414 //===----------------------------------------------------------------------===//
1415 
1416 static LogicalResult verify(ReshapeOp op) {
1417   Type operandType = op.source().getType();
1418   Type resultType = op.result().getType();
1419 
1420   Type operandElementType = operandType.cast<ShapedType>().getElementType();
1421   Type resultElementType = resultType.cast<ShapedType>().getElementType();
1422   if (operandElementType != resultElementType)
1423     return op.emitOpError("element types of source and destination memref "
1424                           "types should be the same");
1425 
1426   if (auto operandMemRefType = operandType.dyn_cast<MemRefType>())
1427     if (!operandMemRefType.getLayout().isIdentity())
1428       return op.emitOpError(
1429           "source memref type should have identity affine map");
1430 
1431   int64_t shapeSize = op.shape().getType().cast<MemRefType>().getDimSize(0);
1432   auto resultMemRefType = resultType.dyn_cast<MemRefType>();
1433   if (resultMemRefType) {
1434     if (!resultMemRefType.getLayout().isIdentity())
1435       return op.emitOpError(
1436           "result memref type should have identity affine map");
1437     if (shapeSize == ShapedType::kDynamicSize)
1438       return op.emitOpError("cannot use shape operand with dynamic length to "
1439                             "reshape to statically-ranked memref type");
1440     if (shapeSize != resultMemRefType.getRank())
1441       return op.emitOpError(
1442           "length of shape operand differs from the result's memref rank");
1443   }
1444   return success();
1445 }
1446 
1447 //===----------------------------------------------------------------------===//
1448 // StoreOp
1449 //===----------------------------------------------------------------------===//
1450 
1451 static LogicalResult verify(StoreOp op) {
1452   if (op.getNumOperands() != 2 + op.getMemRefType().getRank())
1453     return op.emitOpError("store index operand count not equal to memref rank");
1454 
1455   return success();
1456 }
1457 
1458 LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands,
1459                             SmallVectorImpl<OpFoldResult> &results) {
1460   /// store(memrefcast) -> store
1461   return foldMemRefCast(*this, getValueToStore());
1462 }
1463 
1464 //===----------------------------------------------------------------------===//
1465 // SubViewOp
1466 //===----------------------------------------------------------------------===//
1467 
1468 namespace {
1469 /// Helpers to write more idiomatic operations.
1470 namespace saturated_arith {
1471 struct Wrapper {
1472   explicit Wrapper(int64_t v) : v(v) {}
1473   operator int64_t() { return v; }
1474   int64_t v;
1475 };
1476 Wrapper operator+(Wrapper a, int64_t b) {
1477   if (ShapedType::isDynamicStrideOrOffset(a) ||
1478       ShapedType::isDynamicStrideOrOffset(b))
1479     return Wrapper(ShapedType::kDynamicStrideOrOffset);
1480   return Wrapper(a.v + b);
1481 }
1482 Wrapper operator*(Wrapper a, int64_t b) {
1483   if (ShapedType::isDynamicStrideOrOffset(a) ||
1484       ShapedType::isDynamicStrideOrOffset(b))
1485     return Wrapper(ShapedType::kDynamicStrideOrOffset);
1486   return Wrapper(a.v * b);
1487 }
1488 } // namespace saturated_arith
1489 } // namespace
1490 
1491 /// A subview result type can be fully inferred from the source type and the
1492 /// static representation of offsets, sizes and strides. Special sentinels
1493 /// encode the dynamic case.
1494 Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
1495                                 ArrayRef<int64_t> staticOffsets,
1496                                 ArrayRef<int64_t> staticSizes,
1497                                 ArrayRef<int64_t> staticStrides) {
1498   unsigned rank = sourceMemRefType.getRank();
1499   (void)rank;
1500   assert(staticOffsets.size() == rank && "unexpected staticOffsets overflow");
1501   assert(staticSizes.size() == rank && "unexpected staticSizes overflow");
1502   assert(staticStrides.size() == rank && "unexpected staticStrides overflow");
1503 
1504   // Extract source offset and strides.
1505   int64_t sourceOffset;
1506   SmallVector<int64_t, 4> sourceStrides;
1507   auto res = getStridesAndOffset(sourceMemRefType, sourceStrides, sourceOffset);
1508   assert(succeeded(res) && "SubViewOp expected strided memref type");
1509   (void)res;
1510 
1511   // Compute target offset whose value is:
1512   //   `sourceOffset + sum_i(staticOffset_i * sourceStrides_i)`.
1513   int64_t targetOffset = sourceOffset;
1514   for (auto it : llvm::zip(staticOffsets, sourceStrides)) {
1515     auto staticOffset = std::get<0>(it), targetStride = std::get<1>(it);
1516     using namespace saturated_arith;
1517     targetOffset = Wrapper(targetOffset) + Wrapper(staticOffset) * targetStride;
1518   }
1519 
1520   // Compute target stride whose value is:
1521   //   `sourceStrides_i * staticStrides_i`.
1522   SmallVector<int64_t, 4> targetStrides;
1523   targetStrides.reserve(staticOffsets.size());
1524   for (auto it : llvm::zip(sourceStrides, staticStrides)) {
1525     auto sourceStride = std::get<0>(it), staticStride = std::get<1>(it);
1526     using namespace saturated_arith;
1527     targetStrides.push_back(Wrapper(sourceStride) * staticStride);
1528   }
1529 
1530   // The type is now known.
1531   return MemRefType::get(
1532       staticSizes, sourceMemRefType.getElementType(),
1533       makeStridedLinearLayoutMap(targetStrides, targetOffset,
1534                                  sourceMemRefType.getContext()),
1535       sourceMemRefType.getMemorySpace());
1536 }
1537 
1538 Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
1539                                 ArrayRef<OpFoldResult> offsets,
1540                                 ArrayRef<OpFoldResult> sizes,
1541                                 ArrayRef<OpFoldResult> strides) {
1542   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1543   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1544   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1545                              ShapedType::kDynamicStrideOrOffset);
1546   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1547                              ShapedType::kDynamicSize);
1548   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1549                              ShapedType::kDynamicStrideOrOffset);
1550   return SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
1551                                     staticSizes, staticStrides);
1552 }
1553 
1554 Type SubViewOp::inferRankReducedResultType(unsigned resultRank,
1555                                            MemRefType sourceRankedTensorType,
1556                                            ArrayRef<int64_t> offsets,
1557                                            ArrayRef<int64_t> sizes,
1558                                            ArrayRef<int64_t> strides) {
1559   auto inferredType =
1560       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
1561           .cast<MemRefType>();
1562   assert(inferredType.getRank() >= resultRank && "expected ");
1563   int rankDiff = inferredType.getRank() - resultRank;
1564   if (rankDiff > 0) {
1565     auto shape = inferredType.getShape();
1566     llvm::SmallDenseSet<unsigned> dimsToProject;
1567     mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject);
1568     SmallVector<int64_t> projectedShape;
1569     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
1570       if (!dimsToProject.contains(pos))
1571         projectedShape.push_back(shape[pos]);
1572 
1573     AffineMap map = inferredType.getLayout().getAffineMap();
1574     if (!map.isIdentity())
1575       map = getProjectedMap(map, dimsToProject);
1576     inferredType =
1577         MemRefType::get(projectedShape, inferredType.getElementType(), map,
1578                         inferredType.getMemorySpace());
1579   }
1580   return inferredType;
1581 }
1582 
1583 Type SubViewOp::inferRankReducedResultType(unsigned resultRank,
1584                                            MemRefType sourceRankedTensorType,
1585                                            ArrayRef<OpFoldResult> offsets,
1586                                            ArrayRef<OpFoldResult> sizes,
1587                                            ArrayRef<OpFoldResult> strides) {
1588   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1589   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1590   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1591                              ShapedType::kDynamicStrideOrOffset);
1592   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1593                              ShapedType::kDynamicSize);
1594   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1595                              ShapedType::kDynamicStrideOrOffset);
1596   return SubViewOp::inferRankReducedResultType(
1597       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
1598       staticStrides);
1599 }
1600 // Build a SubViewOp with mixed static and dynamic entries and custom result
1601 // type. If the type passed is nullptr, it is inferred.
1602 void SubViewOp::build(OpBuilder &b, OperationState &result,
1603                       MemRefType resultType, Value source,
1604                       ArrayRef<OpFoldResult> offsets,
1605                       ArrayRef<OpFoldResult> sizes,
1606                       ArrayRef<OpFoldResult> strides,
1607                       ArrayRef<NamedAttribute> attrs) {
1608   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1609   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1610   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1611                              ShapedType::kDynamicStrideOrOffset);
1612   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1613                              ShapedType::kDynamicSize);
1614   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1615                              ShapedType::kDynamicStrideOrOffset);
1616   auto sourceMemRefType = source.getType().cast<MemRefType>();
1617   // Structuring implementation this way avoids duplication between builders.
1618   if (!resultType) {
1619     resultType = SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
1620                                             staticSizes, staticStrides)
1621                      .cast<MemRefType>();
1622   }
1623   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1624         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1625         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1626   result.addAttributes(attrs);
1627 }
1628 
1629 // Build a SubViewOp with mixed static and dynamic entries and inferred result
1630 // type.
1631 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1632                       ArrayRef<OpFoldResult> offsets,
1633                       ArrayRef<OpFoldResult> sizes,
1634                       ArrayRef<OpFoldResult> strides,
1635                       ArrayRef<NamedAttribute> attrs) {
1636   build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
1637 }
1638 
1639 // Build a SubViewOp with static entries and inferred result type.
1640 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1641                       ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1642                       ArrayRef<int64_t> strides,
1643                       ArrayRef<NamedAttribute> attrs) {
1644   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1645       llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
1646         return b.getI64IntegerAttr(v);
1647       }));
1648   SmallVector<OpFoldResult> sizeValues =
1649       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1650         return b.getI64IntegerAttr(v);
1651       }));
1652   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1653       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1654         return b.getI64IntegerAttr(v);
1655       }));
1656   build(b, result, source, offsetValues, sizeValues, strideValues, attrs);
1657 }
1658 
1659 // Build a SubViewOp with dynamic entries and custom result type. If the
1660 // type passed is nullptr, it is inferred.
1661 void SubViewOp::build(OpBuilder &b, OperationState &result,
1662                       MemRefType resultType, Value source,
1663                       ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1664                       ArrayRef<int64_t> strides,
1665                       ArrayRef<NamedAttribute> attrs) {
1666   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1667       llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
1668         return b.getI64IntegerAttr(v);
1669       }));
1670   SmallVector<OpFoldResult> sizeValues =
1671       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1672         return b.getI64IntegerAttr(v);
1673       }));
1674   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1675       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1676         return b.getI64IntegerAttr(v);
1677       }));
1678   build(b, result, resultType, source, offsetValues, sizeValues, strideValues,
1679         attrs);
1680 }
1681 
1682 // Build a SubViewOp with dynamic entries and custom result type. If the type
1683 // passed is nullptr, it is inferred.
1684 void SubViewOp::build(OpBuilder &b, OperationState &result,
1685                       MemRefType resultType, Value source, ValueRange offsets,
1686                       ValueRange sizes, ValueRange strides,
1687                       ArrayRef<NamedAttribute> attrs) {
1688   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1689       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1690   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1691       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1692   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1693       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1694   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1695 }
1696 
1697 // Build a SubViewOp with dynamic entries and inferred result type.
1698 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1699                       ValueRange offsets, ValueRange sizes, ValueRange strides,
1700                       ArrayRef<NamedAttribute> attrs) {
1701   build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
1702 }
1703 
1704 /// For ViewLikeOpInterface.
1705 Value SubViewOp::getViewSource() { return source(); }
1706 
1707 /// Return true if t1 and t2 have equal offsets (both dynamic or of same static
1708 /// value).
1709 static bool haveCompatibleOffsets(MemRefType t1, MemRefType t2) {
1710   AffineExpr t1Offset, t2Offset;
1711   SmallVector<AffineExpr> t1Strides, t2Strides;
1712   auto res1 = getStridesAndOffset(t1, t1Strides, t1Offset);
1713   auto res2 = getStridesAndOffset(t2, t2Strides, t2Offset);
1714   return succeeded(res1) && succeeded(res2) && t1Offset == t2Offset;
1715 }
1716 
1717 /// Checks if `original` Type type can be rank reduced to `reduced` type.
1718 /// This function is slight variant of `is subsequence` algorithm where
1719 /// not matching dimension must be 1.
1720 static SliceVerificationResult
1721 isRankReducedMemRefType(MemRefType originalType,
1722                         MemRefType candidateRankReducedType,
1723                         ArrayRef<OpFoldResult> sizes) {
1724   auto partialRes = isRankReducedType(originalType, candidateRankReducedType);
1725   if (partialRes != SliceVerificationResult::Success)
1726     return partialRes;
1727 
1728   auto optionalUnusedDimsMask = computeMemRefRankReductionMask(
1729       originalType, candidateRankReducedType, sizes);
1730 
1731   // Sizes cannot be matched in case empty vector is returned.
1732   if (!optionalUnusedDimsMask.hasValue())
1733     return SliceVerificationResult::LayoutMismatch;
1734 
1735   if (originalType.getMemorySpace() !=
1736       candidateRankReducedType.getMemorySpace())
1737     return SliceVerificationResult::MemSpaceMismatch;
1738 
1739   // No amount of stride dropping can reconcile incompatible offsets.
1740   if (!haveCompatibleOffsets(originalType, candidateRankReducedType))
1741     return SliceVerificationResult::LayoutMismatch;
1742 
1743   return SliceVerificationResult::Success;
1744 }
1745 
1746 template <typename OpTy>
1747 static LogicalResult produceSubViewErrorMsg(SliceVerificationResult result,
1748                                             OpTy op, Type expectedType) {
1749   auto memrefType = expectedType.cast<ShapedType>();
1750   switch (result) {
1751   case SliceVerificationResult::Success:
1752     return success();
1753   case SliceVerificationResult::RankTooLarge:
1754     return op.emitError("expected result rank to be smaller or equal to ")
1755            << "the source rank. ";
1756   case SliceVerificationResult::SizeMismatch:
1757     return op.emitError("expected result type to be ")
1758            << expectedType
1759            << " or a rank-reduced version. (mismatch of result sizes) ";
1760   case SliceVerificationResult::ElemTypeMismatch:
1761     return op.emitError("expected result element type to be ")
1762            << memrefType.getElementType();
1763   case SliceVerificationResult::MemSpaceMismatch:
1764     return op.emitError("expected result and source memory spaces to match.");
1765   case SliceVerificationResult::LayoutMismatch:
1766     return op.emitError("expected result type to be ")
1767            << expectedType
1768            << " or a rank-reduced version. (mismatch of result layout) ";
1769   }
1770   llvm_unreachable("unexpected subview verification result");
1771 }
1772 
1773 /// Verifier for SubViewOp.
1774 static LogicalResult verify(SubViewOp op) {
1775   MemRefType baseType = op.getSourceType();
1776   MemRefType subViewType = op.getType();
1777 
1778   // The base memref and the view memref should be in the same memory space.
1779   if (baseType.getMemorySpace() != subViewType.getMemorySpace())
1780     return op.emitError("different memory spaces specified for base memref "
1781                         "type ")
1782            << baseType << " and subview memref type " << subViewType;
1783 
1784   // Verify that the base memref type has a strided layout map.
1785   if (!isStrided(baseType))
1786     return op.emitError("base type ") << baseType << " is not strided";
1787 
1788   // Verify result type against inferred type.
1789   auto expectedType = SubViewOp::inferResultType(
1790       baseType, extractFromI64ArrayAttr(op.static_offsets()),
1791       extractFromI64ArrayAttr(op.static_sizes()),
1792       extractFromI64ArrayAttr(op.static_strides()));
1793 
1794   auto result = isRankReducedMemRefType(expectedType.cast<MemRefType>(),
1795                                         subViewType, op.getMixedSizes());
1796   return produceSubViewErrorMsg(result, op, expectedType);
1797 }
1798 
1799 raw_ostream &mlir::operator<<(raw_ostream &os, const Range &range) {
1800   return os << "range " << range.offset << ":" << range.size << ":"
1801             << range.stride;
1802 }
1803 
1804 /// Return the list of Range (i.e. offset, size, stride). Each Range
1805 /// entry contains either the dynamic value or a ConstantIndexOp constructed
1806 /// with `b` at location `loc`.
1807 SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op,
1808                                               OpBuilder &b, Location loc) {
1809   std::array<unsigned, 3> ranks = op.getArrayAttrMaxRanks();
1810   assert(ranks[0] == ranks[1] && "expected offset and sizes of equal ranks");
1811   assert(ranks[1] == ranks[2] && "expected sizes and strides of equal ranks");
1812   SmallVector<Range, 8> res;
1813   unsigned rank = ranks[0];
1814   res.reserve(rank);
1815   for (unsigned idx = 0; idx < rank; ++idx) {
1816     Value offset =
1817         op.isDynamicOffset(idx)
1818             ? op.getDynamicOffset(idx)
1819             : b.create<arith::ConstantIndexOp>(loc, op.getStaticOffset(idx));
1820     Value size =
1821         op.isDynamicSize(idx)
1822             ? op.getDynamicSize(idx)
1823             : b.create<arith::ConstantIndexOp>(loc, op.getStaticSize(idx));
1824     Value stride =
1825         op.isDynamicStride(idx)
1826             ? op.getDynamicStride(idx)
1827             : b.create<arith::ConstantIndexOp>(loc, op.getStaticStride(idx));
1828     res.emplace_back(Range{offset, size, stride});
1829   }
1830   return res;
1831 }
1832 
1833 /// Compute the canonical result type of a SubViewOp. Call `inferResultType` to
1834 /// deduce the result type for the given `sourceType`. Additionally, reduce the
1835 /// rank of the inferred result type if `currentResultType` is lower rank than
1836 /// `currentSourceType`. Use this signature if `sourceType` is updated together
1837 /// with the result type. In this case, it is important to compute the dropped
1838 /// dimensions using `currentSourceType` whose strides align with
1839 /// `currentResultType`.
1840 static MemRefType getCanonicalSubViewResultType(
1841     MemRefType currentResultType, MemRefType currentSourceType,
1842     MemRefType sourceType, ArrayRef<OpFoldResult> mixedOffsets,
1843     ArrayRef<OpFoldResult> mixedSizes, ArrayRef<OpFoldResult> mixedStrides) {
1844   auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets,
1845                                                        mixedSizes, mixedStrides)
1846                                 .cast<MemRefType>();
1847   llvm::Optional<llvm::SmallDenseSet<unsigned>> unusedDims =
1848       computeMemRefRankReductionMask(currentSourceType, currentResultType,
1849                                      mixedSizes);
1850   // Return nullptr as failure mode.
1851   if (!unusedDims)
1852     return nullptr;
1853   SmallVector<int64_t> shape;
1854   for (const auto &sizes : llvm::enumerate(nonRankReducedType.getShape())) {
1855     if (unusedDims->count(sizes.index()))
1856       continue;
1857     shape.push_back(sizes.value());
1858   }
1859   AffineMap layoutMap = nonRankReducedType.getLayout().getAffineMap();
1860   if (!layoutMap.isIdentity())
1861     layoutMap = getProjectedMap(layoutMap, unusedDims.getValue());
1862   return MemRefType::get(shape, nonRankReducedType.getElementType(), layoutMap,
1863                          nonRankReducedType.getMemorySpace());
1864 }
1865 
1866 /// Compute the canonical result type of a SubViewOp. Call `inferResultType` to
1867 /// deduce the result type. Additionally, reduce the rank of the inferred result
1868 /// type if `currentResultType` is lower rank than `sourceType`.
1869 static MemRefType getCanonicalSubViewResultType(
1870     MemRefType currentResultType, MemRefType sourceType,
1871     ArrayRef<OpFoldResult> mixedOffsets, ArrayRef<OpFoldResult> mixedSizes,
1872     ArrayRef<OpFoldResult> mixedStrides) {
1873   return getCanonicalSubViewResultType(currentResultType, sourceType,
1874                                        sourceType, mixedOffsets, mixedSizes,
1875                                        mixedStrides);
1876 }
1877 
1878 /// Helper method to check if a `subview` operation is trivially a no-op. This
1879 /// is the case if the all offsets are zero, all strides are 1, and the source
1880 /// shape is same as the size of the subview. In such cases, the subview can be
1881 /// folded into its source.
1882 static bool isTrivialSubViewOp(SubViewOp subViewOp) {
1883   if (subViewOp.getSourceType().getRank() != subViewOp.getType().getRank())
1884     return false;
1885 
1886   auto mixedOffsets = subViewOp.getMixedOffsets();
1887   auto mixedSizes = subViewOp.getMixedSizes();
1888   auto mixedStrides = subViewOp.getMixedStrides();
1889 
1890   // Check offsets are zero.
1891   if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) {
1892         Optional<int64_t> intValue = getConstantIntValue(ofr);
1893         return !intValue || intValue.getValue() != 0;
1894       }))
1895     return false;
1896 
1897   // Check strides are one.
1898   if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) {
1899         Optional<int64_t> intValue = getConstantIntValue(ofr);
1900         return !intValue || intValue.getValue() != 1;
1901       }))
1902     return false;
1903 
1904   // Check all size values are static and matches the (static) source shape.
1905   ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
1906   for (const auto &size : llvm::enumerate(mixedSizes)) {
1907     Optional<int64_t> intValue = getConstantIntValue(size.value());
1908     if (!intValue || intValue.getValue() != sourceShape[size.index()])
1909       return false;
1910   }
1911   // All conditions met. The `SubViewOp` is foldable as a no-op.
1912   return true;
1913 }
1914 
1915 namespace {
1916 /// Pattern to rewrite a subview op with MemRefCast arguments.
1917 /// This essentially pushes memref.cast past its consuming subview when
1918 /// `canFoldIntoConsumerOp` is true.
1919 ///
1920 /// Example:
1921 /// ```
1922 ///   %0 = memref.cast %V : memref<16x16xf32> to memref<?x?xf32>
1923 ///   %1 = memref.subview %0[0, 0][3, 4][1, 1] :
1924 ///     memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]>
1925 /// ```
1926 /// is rewritten into:
1927 /// ```
1928 ///   %0 = memref.subview %V: memref<16x16xf32> to memref<3x4xf32, #[[map0]]>
1929 ///   %1 = memref.cast %0: memref<3x4xf32, offset:0, strides:[16, 1]> to
1930 ///     memref<3x4xf32, offset:?, strides:[?, 1]>
1931 /// ```
1932 class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> {
1933 public:
1934   using OpRewritePattern<SubViewOp>::OpRewritePattern;
1935 
1936   LogicalResult matchAndRewrite(SubViewOp subViewOp,
1937                                 PatternRewriter &rewriter) const override {
1938     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1939     if (llvm::any_of(subViewOp.getOperands(), [](Value operand) {
1940           return matchPattern(operand, matchConstantIndex());
1941         }))
1942       return failure();
1943 
1944     auto castOp = subViewOp.source().getDefiningOp<CastOp>();
1945     if (!castOp)
1946       return failure();
1947 
1948     if (!CastOp::canFoldIntoConsumerOp(castOp))
1949       return failure();
1950 
1951     // Compute the SubViewOp result type after folding the MemRefCastOp. Use the
1952     // MemRefCastOp source operand type to infer the result type and the current
1953     // SubViewOp source operand type to compute the dropped dimensions if the
1954     // operation is rank-reducing.
1955     auto resultType = getCanonicalSubViewResultType(
1956         subViewOp.getType(), subViewOp.getSourceType(),
1957         castOp.source().getType().cast<MemRefType>(),
1958         subViewOp.getMixedOffsets(), subViewOp.getMixedSizes(),
1959         subViewOp.getMixedStrides());
1960     if (!resultType)
1961       return failure();
1962 
1963     Value newSubView = rewriter.create<SubViewOp>(
1964         subViewOp.getLoc(), resultType, castOp.source(), subViewOp.offsets(),
1965         subViewOp.sizes(), subViewOp.strides(), subViewOp.static_offsets(),
1966         subViewOp.static_sizes(), subViewOp.static_strides());
1967     rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(),
1968                                         newSubView);
1969     return success();
1970   }
1971 };
1972 
1973 /// Canonicalize subview ops that are no-ops. When the source shape is not same
1974 /// as a result shape due to use of `affine_map`.
1975 class TrivialSubViewOpFolder final : public OpRewritePattern<SubViewOp> {
1976 public:
1977   using OpRewritePattern<SubViewOp>::OpRewritePattern;
1978 
1979   LogicalResult matchAndRewrite(SubViewOp subViewOp,
1980                                 PatternRewriter &rewriter) const override {
1981     if (!isTrivialSubViewOp(subViewOp))
1982       return failure();
1983     if (subViewOp.getSourceType() == subViewOp.getType()) {
1984       rewriter.replaceOp(subViewOp, subViewOp.source());
1985       return success();
1986     }
1987     rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.source(),
1988                                         subViewOp.getType());
1989     return success();
1990   }
1991 };
1992 } // namespace
1993 
1994 /// Return the canonical type of the result of a subview.
1995 struct SubViewReturnTypeCanonicalizer {
1996   MemRefType operator()(SubViewOp op, ArrayRef<OpFoldResult> mixedOffsets,
1997                         ArrayRef<OpFoldResult> mixedSizes,
1998                         ArrayRef<OpFoldResult> mixedStrides) {
1999     return getCanonicalSubViewResultType(op.getType(), op.getSourceType(),
2000                                          mixedOffsets, mixedSizes,
2001                                          mixedStrides);
2002   }
2003 };
2004 
2005 /// A canonicalizer wrapper to replace SubViewOps.
2006 struct SubViewCanonicalizer {
2007   void operator()(PatternRewriter &rewriter, SubViewOp op, SubViewOp newOp) {
2008     rewriter.replaceOpWithNewOp<CastOp>(op, newOp, op.getType());
2009   }
2010 };
2011 
2012 void SubViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
2013                                             MLIRContext *context) {
2014   results
2015       .add<OpWithOffsetSizesAndStridesConstantArgumentFolder<
2016                SubViewOp, SubViewReturnTypeCanonicalizer, SubViewCanonicalizer>,
2017            SubViewOpMemRefCastFolder, TrivialSubViewOpFolder>(context);
2018 }
2019 
2020 OpFoldResult SubViewOp::fold(ArrayRef<Attribute> operands) {
2021   auto resultShapedType = getResult().getType().cast<ShapedType>();
2022   auto sourceShapedType = source().getType().cast<ShapedType>();
2023 
2024   if (resultShapedType.hasStaticShape() &&
2025       resultShapedType == sourceShapedType) {
2026     return getViewSource();
2027   }
2028 
2029   return {};
2030 }
2031 
2032 //===----------------------------------------------------------------------===//
2033 // TransposeOp
2034 //===----------------------------------------------------------------------===//
2035 
2036 /// Build a strided memref type by applying `permutationMap` tp `memRefType`.
2037 static MemRefType inferTransposeResultType(MemRefType memRefType,
2038                                            AffineMap permutationMap) {
2039   auto rank = memRefType.getRank();
2040   auto originalSizes = memRefType.getShape();
2041   // Compute permuted sizes.
2042   SmallVector<int64_t, 4> sizes(rank, 0);
2043   for (const auto &en : llvm::enumerate(permutationMap.getResults()))
2044     sizes[en.index()] =
2045         originalSizes[en.value().cast<AffineDimExpr>().getPosition()];
2046 
2047   // Compute permuted strides.
2048   int64_t offset;
2049   SmallVector<int64_t, 4> strides;
2050   auto res = getStridesAndOffset(memRefType, strides, offset);
2051   assert(succeeded(res) && strides.size() == static_cast<unsigned>(rank));
2052   (void)res;
2053   auto map =
2054       makeStridedLinearLayoutMap(strides, offset, memRefType.getContext());
2055   map = permutationMap ? map.compose(permutationMap) : map;
2056   return MemRefType::Builder(memRefType)
2057       .setShape(sizes)
2058       .setLayout(AffineMapAttr::get(map));
2059 }
2060 
2061 void TransposeOp::build(OpBuilder &b, OperationState &result, Value in,
2062                         AffineMapAttr permutation,
2063                         ArrayRef<NamedAttribute> attrs) {
2064   auto permutationMap = permutation.getValue();
2065   assert(permutationMap);
2066 
2067   auto memRefType = in.getType().cast<MemRefType>();
2068   // Compute result type.
2069   MemRefType resultType = inferTransposeResultType(memRefType, permutationMap);
2070 
2071   build(b, result, resultType, in, attrs);
2072   result.addAttribute(TransposeOp::getPermutationAttrName(), permutation);
2073 }
2074 
2075 // transpose $in $permutation attr-dict : type($in) `to` type(results)
2076 static void print(OpAsmPrinter &p, TransposeOp op) {
2077   p << " " << op.in() << " " << op.permutation();
2078   p.printOptionalAttrDict(op->getAttrs(),
2079                           {TransposeOp::getPermutationAttrName()});
2080   p << " : " << op.in().getType() << " to " << op.getType();
2081 }
2082 
2083 static ParseResult parseTransposeOp(OpAsmParser &parser,
2084                                     OperationState &result) {
2085   OpAsmParser::OperandType in;
2086   AffineMap permutation;
2087   MemRefType srcType, dstType;
2088   if (parser.parseOperand(in) || parser.parseAffineMap(permutation) ||
2089       parser.parseOptionalAttrDict(result.attributes) ||
2090       parser.parseColonType(srcType) ||
2091       parser.resolveOperand(in, srcType, result.operands) ||
2092       parser.parseKeywordType("to", dstType) ||
2093       parser.addTypeToList(dstType, result.types))
2094     return failure();
2095 
2096   result.addAttribute(TransposeOp::getPermutationAttrName(),
2097                       AffineMapAttr::get(permutation));
2098   return success();
2099 }
2100 
2101 static LogicalResult verify(TransposeOp op) {
2102   if (!op.permutation().isPermutation())
2103     return op.emitOpError("expected a permutation map");
2104   if (op.permutation().getNumDims() != op.getShapedType().getRank())
2105     return op.emitOpError(
2106         "expected a permutation map of same rank as the input");
2107 
2108   auto srcType = op.in().getType().cast<MemRefType>();
2109   auto dstType = op.getType().cast<MemRefType>();
2110   auto transposedType = inferTransposeResultType(srcType, op.permutation());
2111   if (dstType != transposedType)
2112     return op.emitOpError("output type ")
2113            << dstType << " does not match transposed input type " << srcType
2114            << ", " << transposedType;
2115   return success();
2116 }
2117 
2118 OpFoldResult TransposeOp::fold(ArrayRef<Attribute>) {
2119   if (succeeded(foldMemRefCast(*this)))
2120     return getResult();
2121   return {};
2122 }
2123 
2124 //===----------------------------------------------------------------------===//
2125 // ViewOp
2126 //===----------------------------------------------------------------------===//
2127 
2128 static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) {
2129   OpAsmParser::OperandType srcInfo;
2130   SmallVector<OpAsmParser::OperandType, 1> offsetInfo;
2131   SmallVector<OpAsmParser::OperandType, 4> sizesInfo;
2132   auto indexType = parser.getBuilder().getIndexType();
2133   Type srcType, dstType;
2134   llvm::SMLoc offsetLoc;
2135   if (parser.parseOperand(srcInfo) || parser.getCurrentLocation(&offsetLoc) ||
2136       parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square))
2137     return failure();
2138 
2139   if (offsetInfo.size() != 1)
2140     return parser.emitError(offsetLoc) << "expects 1 offset operand";
2141 
2142   return failure(
2143       parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) ||
2144       parser.parseOptionalAttrDict(result.attributes) ||
2145       parser.parseColonType(srcType) ||
2146       parser.resolveOperand(srcInfo, srcType, result.operands) ||
2147       parser.resolveOperands(offsetInfo, indexType, result.operands) ||
2148       parser.resolveOperands(sizesInfo, indexType, result.operands) ||
2149       parser.parseKeywordType("to", dstType) ||
2150       parser.addTypeToList(dstType, result.types));
2151 }
2152 
2153 static void print(OpAsmPrinter &p, ViewOp op) {
2154   p << ' ' << op.getOperand(0) << '[';
2155   p.printOperand(op.byte_shift());
2156   p << "][" << op.sizes() << ']';
2157   p.printOptionalAttrDict(op->getAttrs());
2158   p << " : " << op.getOperand(0).getType() << " to " << op.getType();
2159 }
2160 
2161 static LogicalResult verify(ViewOp op) {
2162   auto baseType = op.getOperand(0).getType().cast<MemRefType>();
2163   auto viewType = op.getType();
2164 
2165   // The base memref should have identity layout map (or none).
2166   if (!baseType.getLayout().isIdentity())
2167     return op.emitError("unsupported map for base memref type ") << baseType;
2168 
2169   // The result memref should have identity layout map (or none).
2170   if (!viewType.getLayout().isIdentity())
2171     return op.emitError("unsupported map for result memref type ") << viewType;
2172 
2173   // The base memref and the view memref should be in the same memory space.
2174   if (baseType.getMemorySpace() != viewType.getMemorySpace())
2175     return op.emitError("different memory spaces specified for base memref "
2176                         "type ")
2177            << baseType << " and view memref type " << viewType;
2178 
2179   // Verify that we have the correct number of sizes for the result type.
2180   unsigned numDynamicDims = viewType.getNumDynamicDims();
2181   if (op.sizes().size() != numDynamicDims)
2182     return op.emitError("incorrect number of size operands for type ")
2183            << viewType;
2184 
2185   return success();
2186 }
2187 
2188 Value ViewOp::getViewSource() { return source(); }
2189 
2190 namespace {
2191 
2192 struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
2193   using OpRewritePattern<ViewOp>::OpRewritePattern;
2194 
2195   LogicalResult matchAndRewrite(ViewOp viewOp,
2196                                 PatternRewriter &rewriter) const override {
2197     // Return if none of the operands are constants.
2198     if (llvm::none_of(viewOp.getOperands(), [](Value operand) {
2199           return matchPattern(operand, matchConstantIndex());
2200         }))
2201       return failure();
2202 
2203     // Get result memref type.
2204     auto memrefType = viewOp.getType();
2205 
2206     // Get offset from old memref view type 'memRefType'.
2207     int64_t oldOffset;
2208     SmallVector<int64_t, 4> oldStrides;
2209     if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
2210       return failure();
2211     assert(oldOffset == 0 && "Expected 0 offset");
2212 
2213     SmallVector<Value, 4> newOperands;
2214 
2215     // Offset cannot be folded into result type.
2216 
2217     // Fold any dynamic dim operands which are produced by a constant.
2218     SmallVector<int64_t, 4> newShapeConstants;
2219     newShapeConstants.reserve(memrefType.getRank());
2220 
2221     unsigned dynamicDimPos = 0;
2222     unsigned rank = memrefType.getRank();
2223     for (unsigned dim = 0, e = rank; dim < e; ++dim) {
2224       int64_t dimSize = memrefType.getDimSize(dim);
2225       // If this is already static dimension, keep it.
2226       if (!ShapedType::isDynamic(dimSize)) {
2227         newShapeConstants.push_back(dimSize);
2228         continue;
2229       }
2230       auto *defOp = viewOp.sizes()[dynamicDimPos].getDefiningOp();
2231       if (auto constantIndexOp =
2232               dyn_cast_or_null<arith::ConstantIndexOp>(defOp)) {
2233         // Dynamic shape dimension will be folded.
2234         newShapeConstants.push_back(constantIndexOp.value());
2235       } else {
2236         // Dynamic shape dimension not folded; copy operand from old memref.
2237         newShapeConstants.push_back(dimSize);
2238         newOperands.push_back(viewOp.sizes()[dynamicDimPos]);
2239       }
2240       dynamicDimPos++;
2241     }
2242 
2243     // Create new memref type with constant folded dims.
2244     MemRefType newMemRefType =
2245         MemRefType::Builder(memrefType).setShape(newShapeConstants);
2246     // Nothing new, don't fold.
2247     if (newMemRefType == memrefType)
2248       return failure();
2249 
2250     // Create new ViewOp.
2251     auto newViewOp = rewriter.create<ViewOp>(viewOp.getLoc(), newMemRefType,
2252                                              viewOp.getOperand(0),
2253                                              viewOp.byte_shift(), newOperands);
2254     // Insert a cast so we have the same type as the old memref type.
2255     rewriter.replaceOpWithNewOp<CastOp>(viewOp, newViewOp, viewOp.getType());
2256     return success();
2257   }
2258 };
2259 
2260 struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> {
2261   using OpRewritePattern<ViewOp>::OpRewritePattern;
2262 
2263   LogicalResult matchAndRewrite(ViewOp viewOp,
2264                                 PatternRewriter &rewriter) const override {
2265     Value memrefOperand = viewOp.getOperand(0);
2266     CastOp memrefCastOp = memrefOperand.getDefiningOp<CastOp>();
2267     if (!memrefCastOp)
2268       return failure();
2269     Value allocOperand = memrefCastOp.getOperand();
2270     AllocOp allocOp = allocOperand.getDefiningOp<AllocOp>();
2271     if (!allocOp)
2272       return failure();
2273     rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand,
2274                                         viewOp.byte_shift(), viewOp.sizes());
2275     return success();
2276   }
2277 };
2278 
2279 } // namespace
2280 
2281 void ViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
2282                                          MLIRContext *context) {
2283   results.add<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context);
2284 }
2285 
2286 //===----------------------------------------------------------------------===//
2287 // AtomicRMWOp
2288 //===----------------------------------------------------------------------===//
2289 
2290 static LogicalResult verify(AtomicRMWOp op) {
2291   if (op.getMemRefType().getRank() != op.getNumOperands() - 2)
2292     return op.emitOpError(
2293         "expects the number of subscripts to be equal to memref rank");
2294   switch (op.kind()) {
2295   case arith::AtomicRMWKind::addf:
2296   case arith::AtomicRMWKind::maxf:
2297   case arith::AtomicRMWKind::minf:
2298   case arith::AtomicRMWKind::mulf:
2299     if (!op.value().getType().isa<FloatType>())
2300       return op.emitOpError()
2301              << "with kind '" << arith::stringifyAtomicRMWKind(op.kind())
2302              << "' expects a floating-point type";
2303     break;
2304   case arith::AtomicRMWKind::addi:
2305   case arith::AtomicRMWKind::maxs:
2306   case arith::AtomicRMWKind::maxu:
2307   case arith::AtomicRMWKind::mins:
2308   case arith::AtomicRMWKind::minu:
2309   case arith::AtomicRMWKind::muli:
2310   case arith::AtomicRMWKind::ori:
2311   case arith::AtomicRMWKind::andi:
2312     if (!op.value().getType().isa<IntegerType>())
2313       return op.emitOpError()
2314              << "with kind '" << arith::stringifyAtomicRMWKind(op.kind())
2315              << "' expects an integer type";
2316     break;
2317   default:
2318     break;
2319   }
2320   return success();
2321 }
2322 
2323 OpFoldResult AtomicRMWOp::fold(ArrayRef<Attribute> operands) {
2324   /// atomicrmw(memrefcast) -> atomicrmw
2325   if (succeeded(foldMemRefCast(*this, value())))
2326     return getResult();
2327   return OpFoldResult();
2328 }
2329 
2330 //===----------------------------------------------------------------------===//
2331 // TableGen'd op method definitions
2332 //===----------------------------------------------------------------------===//
2333 
2334 #define GET_OP_CLASSES
2335 #include "mlir/Dialect/MemRef/IR/MemRefOps.cpp.inc"
2336