1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/MemRef/IR/MemRef.h"
10 #include "mlir/Dialect/MemRef/Utils/MemRefUtils.h"
11 #include "mlir/Dialect/StandardOps/IR/Ops.h"
12 #include "mlir/Dialect/StandardOps/Utils/Utils.h"
13 #include "mlir/Dialect/Tensor/IR/Tensor.h"
14 #include "mlir/Dialect/Utils/StaticValueUtils.h"
15 #include "mlir/IR/AffineMap.h"
16 #include "mlir/IR/Builders.h"
17 #include "mlir/IR/BuiltinTypes.h"
18 #include "mlir/IR/Matchers.h"
19 #include "mlir/IR/PatternMatch.h"
20 #include "mlir/IR/TypeUtilities.h"
21 #include "mlir/Interfaces/InferTypeOpInterface.h"
22 #include "mlir/Interfaces/ViewLikeInterface.h"
23 #include "llvm/ADT/STLExtras.h"
24 
25 using namespace mlir;
26 using namespace mlir::memref;
27 
28 /// Materialize a single constant operation from a given attribute value with
29 /// the desired resultant type.
30 Operation *MemRefDialect::materializeConstant(OpBuilder &builder,
31                                               Attribute value, Type type,
32                                               Location loc) {
33   return builder.create<mlir::ConstantOp>(loc, type, value);
34 }
35 
36 //===----------------------------------------------------------------------===//
37 // Common canonicalization pattern support logic
38 //===----------------------------------------------------------------------===//
39 
40 /// This is a common class used for patterns of the form
41 /// "someop(memrefcast) -> someop".  It folds the source of any memref.cast
42 /// into the root operation directly.
43 static LogicalResult foldMemRefCast(Operation *op, Value inner = nullptr) {
44   bool folded = false;
45   for (OpOperand &operand : op->getOpOperands()) {
46     auto cast = operand.get().getDefiningOp<CastOp>();
47     if (cast && operand.get() != inner &&
48         !cast.getOperand().getType().isa<UnrankedMemRefType>()) {
49       operand.set(cast.getOperand());
50       folded = true;
51     }
52   }
53   return success(folded);
54 }
55 
56 //===----------------------------------------------------------------------===//
57 // Helpers for GlobalOp
58 //===----------------------------------------------------------------------===//
59 
60 static Type getTensorTypeFromMemRefType(Type type) {
61   if (auto memref = type.dyn_cast<MemRefType>())
62     return RankedTensorType::get(memref.getShape(), memref.getElementType());
63   if (auto memref = type.dyn_cast<UnrankedMemRefType>())
64     return UnrankedTensorType::get(memref.getElementType());
65   return NoneType::get(type.getContext());
66 }
67 
68 //===----------------------------------------------------------------------===//
69 // AllocOp / AllocaOp
70 //===----------------------------------------------------------------------===//
71 
72 template <typename AllocLikeOp>
73 static LogicalResult verifyAllocLikeOp(AllocLikeOp op) {
74   static_assert(llvm::is_one_of<AllocLikeOp, AllocOp, AllocaOp>::value,
75                 "applies to only alloc or alloca");
76   auto memRefType = op.getResult().getType().template dyn_cast<MemRefType>();
77   if (!memRefType)
78     return op.emitOpError("result must be a memref");
79 
80   if (static_cast<int64_t>(op.dynamicSizes().size()) !=
81       memRefType.getNumDynamicDims())
82     return op.emitOpError("dimension operand count does not equal memref "
83                           "dynamic dimension count");
84 
85   unsigned numSymbols = 0;
86   if (!memRefType.getAffineMaps().empty())
87     numSymbols = memRefType.getAffineMaps().front().getNumSymbols();
88   if (op.symbolOperands().size() != numSymbols)
89     return op.emitOpError("symbol operand count does not equal memref symbol "
90                           "count: expected ")
91            << numSymbols << ", got " << op.symbolOperands().size();
92 
93   return success();
94 }
95 
96 static LogicalResult verify(AllocOp op) { return verifyAllocLikeOp(op); }
97 
98 static LogicalResult verify(AllocaOp op) {
99   // An alloca op needs to have an ancestor with an allocation scope trait.
100   if (!op->getParentWithTrait<OpTrait::AutomaticAllocationScope>())
101     return op.emitOpError(
102         "requires an ancestor op with AutomaticAllocationScope trait");
103 
104   return verifyAllocLikeOp(op);
105 }
106 
107 namespace {
108 /// Fold constant dimensions into an alloc like operation.
109 template <typename AllocLikeOp>
110 struct SimplifyAllocConst : public OpRewritePattern<AllocLikeOp> {
111   using OpRewritePattern<AllocLikeOp>::OpRewritePattern;
112 
113   LogicalResult matchAndRewrite(AllocLikeOp alloc,
114                                 PatternRewriter &rewriter) const override {
115     // Check to see if any dimensions operands are constants.  If so, we can
116     // substitute and drop them.
117     if (llvm::none_of(alloc.dynamicSizes(), [](Value operand) {
118           return matchPattern(operand, matchConstantIndex());
119         }))
120       return failure();
121 
122     auto memrefType = alloc.getType();
123 
124     // Ok, we have one or more constant operands.  Collect the non-constant ones
125     // and keep track of the resultant memref type to build.
126     SmallVector<int64_t, 4> newShapeConstants;
127     newShapeConstants.reserve(memrefType.getRank());
128     SmallVector<Value, 4> dynamicSizes;
129 
130     unsigned dynamicDimPos = 0;
131     for (unsigned dim = 0, e = memrefType.getRank(); dim < e; ++dim) {
132       int64_t dimSize = memrefType.getDimSize(dim);
133       // If this is already static dimension, keep it.
134       if (dimSize != -1) {
135         newShapeConstants.push_back(dimSize);
136         continue;
137       }
138       auto dynamicSize = alloc.dynamicSizes()[dynamicDimPos];
139       auto *defOp = dynamicSize.getDefiningOp();
140       if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) {
141         // Dynamic shape dimension will be folded.
142         newShapeConstants.push_back(constantIndexOp.getValue());
143       } else {
144         // Dynamic shape dimension not folded; copy dynamicSize from old memref.
145         newShapeConstants.push_back(-1);
146         dynamicSizes.push_back(dynamicSize);
147       }
148       dynamicDimPos++;
149     }
150 
151     // Create new memref type (which will have fewer dynamic dimensions).
152     MemRefType newMemRefType =
153         MemRefType::Builder(memrefType).setShape(newShapeConstants);
154     assert(static_cast<int64_t>(dynamicSizes.size()) ==
155            newMemRefType.getNumDynamicDims());
156 
157     // Create and insert the alloc op for the new memref.
158     auto newAlloc = rewriter.create<AllocLikeOp>(
159         alloc.getLoc(), newMemRefType, dynamicSizes, alloc.symbolOperands(),
160         alloc.alignmentAttr());
161     // Insert a cast so we have the same type as the old alloc.
162     auto resultCast =
163         rewriter.create<CastOp>(alloc.getLoc(), newAlloc, alloc.getType());
164 
165     rewriter.replaceOp(alloc, {resultCast});
166     return success();
167   }
168 };
169 
170 /// Fold alloc operations with no users or only store and dealloc uses.
171 template <typename T>
172 struct SimplifyDeadAlloc : public OpRewritePattern<T> {
173   using OpRewritePattern<T>::OpRewritePattern;
174 
175   LogicalResult matchAndRewrite(T alloc,
176                                 PatternRewriter &rewriter) const override {
177     if (llvm::any_of(alloc->getUsers(), [&](Operation *op) {
178           if (auto storeOp = dyn_cast<StoreOp>(op))
179             return storeOp.value() == alloc;
180           return !isa<DeallocOp>(op);
181         }))
182       return failure();
183 
184     for (Operation *user : llvm::make_early_inc_range(alloc->getUsers()))
185       rewriter.eraseOp(user);
186 
187     rewriter.eraseOp(alloc);
188     return success();
189   }
190 };
191 } // end anonymous namespace.
192 
193 void AllocOp::getCanonicalizationPatterns(RewritePatternSet &results,
194                                           MLIRContext *context) {
195   results.add<SimplifyAllocConst<AllocOp>, SimplifyDeadAlloc<AllocOp>>(context);
196 }
197 
198 void AllocaOp::getCanonicalizationPatterns(RewritePatternSet &results,
199                                            MLIRContext *context) {
200   results.add<SimplifyAllocConst<AllocaOp>, SimplifyDeadAlloc<AllocaOp>>(
201       context);
202 }
203 
204 //===----------------------------------------------------------------------===//
205 // AllocaScopeOp
206 //===----------------------------------------------------------------------===//
207 
208 static void print(OpAsmPrinter &p, AllocaScopeOp &op) {
209   bool printBlockTerminators = false;
210 
211   p << " ";
212   if (!op.results().empty()) {
213     p << " -> (" << op.getResultTypes() << ")";
214     printBlockTerminators = true;
215   }
216   p.printRegion(op.bodyRegion(),
217                 /*printEntryBlockArgs=*/false,
218                 /*printBlockTerminators=*/printBlockTerminators);
219   p.printOptionalAttrDict(op->getAttrs());
220 }
221 
222 static ParseResult parseAllocaScopeOp(OpAsmParser &parser,
223                                       OperationState &result) {
224   // Create a region for the body.
225   result.regions.reserve(1);
226   Region *bodyRegion = result.addRegion();
227 
228   // Parse optional results type list.
229   if (parser.parseOptionalArrowTypeList(result.types))
230     return failure();
231 
232   // Parse the body region.
233   if (parser.parseRegion(*bodyRegion, /*arguments=*/{}, /*argTypes=*/{}))
234     return failure();
235   AllocaScopeOp::ensureTerminator(*bodyRegion, parser.getBuilder(),
236                                   result.location);
237 
238   // Parse the optional attribute list.
239   if (parser.parseOptionalAttrDict(result.attributes))
240     return failure();
241 
242   return success();
243 }
244 
245 static LogicalResult verify(AllocaScopeOp op) {
246   if (failed(RegionBranchOpInterface::verifyTypes(op)))
247     return failure();
248 
249   return success();
250 }
251 
252 void AllocaScopeOp::getSuccessorRegions(
253     Optional<unsigned> index, ArrayRef<Attribute> operands,
254     SmallVectorImpl<RegionSuccessor> &regions) {
255   if (index.hasValue()) {
256     regions.push_back(RegionSuccessor(getResults()));
257     return;
258   }
259 
260   regions.push_back(RegionSuccessor(&bodyRegion()));
261 }
262 
263 //===----------------------------------------------------------------------===//
264 // AssumeAlignmentOp
265 //===----------------------------------------------------------------------===//
266 
267 static LogicalResult verify(AssumeAlignmentOp op) {
268   unsigned alignment = op.alignment();
269   if (!llvm::isPowerOf2_32(alignment))
270     return op.emitOpError("alignment must be power of 2");
271   return success();
272 }
273 
274 //===----------------------------------------------------------------------===//
275 // BufferCastOp
276 //===----------------------------------------------------------------------===//
277 
278 OpFoldResult BufferCastOp::fold(ArrayRef<Attribute>) {
279   if (auto tensorLoad = tensor().getDefiningOp<TensorLoadOp>())
280     if (tensorLoad.memref().getType() == getType())
281       return tensorLoad.memref();
282   return {};
283 }
284 
285 namespace {
286 /// Replace tensor_cast + buffer_cast by buffer_cast + memref_cast.
287 struct BufferCast : public OpRewritePattern<BufferCastOp> {
288   using OpRewritePattern<BufferCastOp>::OpRewritePattern;
289 
290   LogicalResult matchAndRewrite(BufferCastOp bufferCast,
291                                 PatternRewriter &rewriter) const final {
292     auto tensorCastOperand =
293         bufferCast.getOperand().getDefiningOp<tensor::CastOp>();
294     if (!tensorCastOperand)
295       return failure();
296     auto srcTensorType =
297         tensorCastOperand.getOperand().getType().dyn_cast<RankedTensorType>();
298     if (!srcTensorType)
299       return failure();
300     auto memrefType = MemRefType::get(srcTensorType.getShape(),
301                                       srcTensorType.getElementType());
302     Value memref = rewriter.create<BufferCastOp>(
303         bufferCast.getLoc(), memrefType, tensorCastOperand.getOperand());
304     rewriter.replaceOpWithNewOp<CastOp>(bufferCast, bufferCast.getType(),
305                                         memref);
306     return success();
307   }
308 };
309 
310 /// Canonicalize memref.tensor_load + memref.buffer_cast to memref.cast when
311 /// type mismatches prevent `BufferCastOp::fold` to kick in.
312 struct TensorLoadToMemRef : public OpRewritePattern<BufferCastOp> {
313   using OpRewritePattern<BufferCastOp>::OpRewritePattern;
314 
315   LogicalResult matchAndRewrite(BufferCastOp bufferCast,
316                                 PatternRewriter &rewriter) const final {
317     auto tensorLoad = bufferCast.tensor().getDefiningOp<TensorLoadOp>();
318     // Bail unless we have a tensor_load + memref.buffer_cast with different
319     // types. `BufferCastOp::fold` handles the same type case.
320     if (!tensorLoad || tensorLoad.memref().getType() == bufferCast.getType())
321       return failure();
322     // If types are definitely not cast-compatible, bail.
323     if (!CastOp::areCastCompatible(tensorLoad.memref().getType(),
324                                    bufferCast.getType()))
325       return failure();
326 
327     // We already know that the types are potentially cast-compatible. However
328     // in case the affine maps are different, we may need to use a copy if we go
329     // from dynamic to static offset or stride (the canonicalization cannot know
330     // at this point that it is really cast compatible).
331     auto isGuaranteedCastCompatible = [](MemRefType source, MemRefType target) {
332       int64_t sourceOffset, targetOffset;
333       SmallVector<int64_t, 4> sourceStrides, targetStrides;
334       if (failed(getStridesAndOffset(source, sourceStrides, sourceOffset)) ||
335           failed(getStridesAndOffset(target, targetStrides, targetOffset)))
336         return false;
337       auto dynamicToStatic = [](int64_t a, int64_t b) {
338         return a == MemRefType::getDynamicStrideOrOffset() &&
339                b != MemRefType::getDynamicStrideOrOffset();
340       };
341       if (dynamicToStatic(sourceOffset, targetOffset))
342         return false;
343       for (auto it : zip(sourceStrides, targetStrides))
344         if (dynamicToStatic(std::get<0>(it), std::get<1>(it)))
345           return false;
346       return true;
347     };
348 
349     auto tensorLoadType = tensorLoad.memref().getType().dyn_cast<MemRefType>();
350     auto bufferCastType = bufferCast.getType().dyn_cast<MemRefType>();
351     if (tensorLoadType && bufferCastType &&
352         !isGuaranteedCastCompatible(tensorLoadType, bufferCastType)) {
353       MemRefType resultType = bufferCastType;
354       auto loc = bufferCast.getLoc();
355       SmallVector<Value, 4> dynamicOperands;
356       for (int i = 0; i < resultType.getRank(); ++i) {
357         if (resultType.getShape()[i] != ShapedType::kDynamicSize)
358           continue;
359         auto index = rewriter.createOrFold<ConstantIndexOp>(loc, i);
360         Value size = rewriter.create<tensor::DimOp>(loc, tensorLoad, index);
361         dynamicOperands.push_back(size);
362       }
363       auto copy =
364           rewriter.create<memref::AllocOp>(loc, resultType, dynamicOperands);
365       rewriter.create<CopyOp>(loc, tensorLoad.memref(), copy);
366       rewriter.replaceOp(bufferCast, {copy});
367     } else
368       rewriter.replaceOpWithNewOp<CastOp>(bufferCast, bufferCast.getType(),
369                                           tensorLoad.memref());
370     return success();
371   }
372 };
373 
374 } // namespace
375 
376 void BufferCastOp::getCanonicalizationPatterns(RewritePatternSet &results,
377                                                MLIRContext *context) {
378   results.add<BufferCast, TensorLoadToMemRef>(context);
379 }
380 
381 //===----------------------------------------------------------------------===//
382 // CastOp
383 //===----------------------------------------------------------------------===//
384 
385 /// Determines whether MemRef_CastOp casts to a more dynamic version of the
386 /// source memref. This is useful to to fold a memref.cast into a consuming op
387 /// and implement canonicalization patterns for ops in different dialects that
388 /// may consume the results of memref.cast operations. Such foldable memref.cast
389 /// operations are typically inserted as `view` and `subview` ops are
390 /// canonicalized, to preserve the type compatibility of their uses.
391 ///
392 /// Returns true when all conditions are met:
393 /// 1. source and result are ranked memrefs with strided semantics and same
394 /// element type and rank.
395 /// 2. each of the source's size, offset or stride has more static information
396 /// than the corresponding result's size, offset or stride.
397 ///
398 /// Example 1:
399 /// ```mlir
400 ///   %1 = memref.cast %0 : memref<8x16xf32> to memref<?x?xf32>
401 ///   %2 = consumer %1 ... : memref<?x?xf32> ...
402 /// ```
403 ///
404 /// may fold into:
405 ///
406 /// ```mlir
407 ///   %2 = consumer %0 ... : memref<8x16xf32> ...
408 /// ```
409 ///
410 /// Example 2:
411 /// ```
412 ///   %1 = memref.cast %0 : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
413 ///          to memref<?x?xf32>
414 ///   consumer %1 : memref<?x?xf32> ...
415 /// ```
416 ///
417 /// may fold into:
418 ///
419 /// ```
420 ///   consumer %0 ... : memref<?x16xf32, affine_map<(i, j)->(16 * i + j)>>
421 /// ```
422 bool CastOp::canFoldIntoConsumerOp(CastOp castOp) {
423   MemRefType sourceType = castOp.source().getType().dyn_cast<MemRefType>();
424   MemRefType resultType = castOp.getType().dyn_cast<MemRefType>();
425 
426   // Requires ranked MemRefType.
427   if (!sourceType || !resultType)
428     return false;
429 
430   // Requires same elemental type.
431   if (sourceType.getElementType() != resultType.getElementType())
432     return false;
433 
434   // Requires same rank.
435   if (sourceType.getRank() != resultType.getRank())
436     return false;
437 
438   // Only fold casts between strided memref forms.
439   int64_t sourceOffset, resultOffset;
440   SmallVector<int64_t, 4> sourceStrides, resultStrides;
441   if (failed(getStridesAndOffset(sourceType, sourceStrides, sourceOffset)) ||
442       failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
443     return false;
444 
445   // If cast is towards more static sizes along any dimension, don't fold.
446   for (auto it : llvm::zip(sourceType.getShape(), resultType.getShape())) {
447     auto ss = std::get<0>(it), st = std::get<1>(it);
448     if (ss != st)
449       if (MemRefType::isDynamic(ss) && !MemRefType::isDynamic(st))
450         return false;
451   }
452 
453   // If cast is towards more static offset along any dimension, don't fold.
454   if (sourceOffset != resultOffset)
455     if (MemRefType::isDynamicStrideOrOffset(sourceOffset) &&
456         !MemRefType::isDynamicStrideOrOffset(resultOffset))
457       return false;
458 
459   // If cast is towards more static strides along any dimension, don't fold.
460   for (auto it : llvm::zip(sourceStrides, resultStrides)) {
461     auto ss = std::get<0>(it), st = std::get<1>(it);
462     if (ss != st)
463       if (MemRefType::isDynamicStrideOrOffset(ss) &&
464           !MemRefType::isDynamicStrideOrOffset(st))
465         return false;
466   }
467 
468   return true;
469 }
470 
471 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
472   if (inputs.size() != 1 || outputs.size() != 1)
473     return false;
474   Type a = inputs.front(), b = outputs.front();
475   auto aT = a.dyn_cast<MemRefType>();
476   auto bT = b.dyn_cast<MemRefType>();
477 
478   auto uaT = a.dyn_cast<UnrankedMemRefType>();
479   auto ubT = b.dyn_cast<UnrankedMemRefType>();
480 
481   if (aT && bT) {
482     if (aT.getElementType() != bT.getElementType())
483       return false;
484     if (aT.getAffineMaps() != bT.getAffineMaps()) {
485       int64_t aOffset, bOffset;
486       SmallVector<int64_t, 4> aStrides, bStrides;
487       if (failed(getStridesAndOffset(aT, aStrides, aOffset)) ||
488           failed(getStridesAndOffset(bT, bStrides, bOffset)) ||
489           aStrides.size() != bStrides.size())
490         return false;
491 
492       // Strides along a dimension/offset are compatible if the value in the
493       // source memref is static and the value in the target memref is the
494       // same. They are also compatible if either one is dynamic (see
495       // description of MemRefCastOp for details).
496       auto checkCompatible = [](int64_t a, int64_t b) {
497         return (a == MemRefType::getDynamicStrideOrOffset() ||
498                 b == MemRefType::getDynamicStrideOrOffset() || a == b);
499       };
500       if (!checkCompatible(aOffset, bOffset))
501         return false;
502       for (auto aStride : enumerate(aStrides))
503         if (!checkCompatible(aStride.value(), bStrides[aStride.index()]))
504           return false;
505     }
506     if (aT.getMemorySpace() != bT.getMemorySpace())
507       return false;
508 
509     // They must have the same rank, and any specified dimensions must match.
510     if (aT.getRank() != bT.getRank())
511       return false;
512 
513     for (unsigned i = 0, e = aT.getRank(); i != e; ++i) {
514       int64_t aDim = aT.getDimSize(i), bDim = bT.getDimSize(i);
515       if (aDim != -1 && bDim != -1 && aDim != bDim)
516         return false;
517     }
518     return true;
519   } else {
520     if (!aT && !uaT)
521       return false;
522     if (!bT && !ubT)
523       return false;
524     // Unranked to unranked casting is unsupported
525     if (uaT && ubT)
526       return false;
527 
528     auto aEltType = (aT) ? aT.getElementType() : uaT.getElementType();
529     auto bEltType = (bT) ? bT.getElementType() : ubT.getElementType();
530     if (aEltType != bEltType)
531       return false;
532 
533     auto aMemSpace = (aT) ? aT.getMemorySpace() : uaT.getMemorySpace();
534     auto bMemSpace = (bT) ? bT.getMemorySpace() : ubT.getMemorySpace();
535     if (aMemSpace != bMemSpace)
536       return false;
537 
538     return true;
539   }
540 
541   return false;
542 }
543 
544 OpFoldResult CastOp::fold(ArrayRef<Attribute> operands) {
545   return succeeded(foldMemRefCast(*this)) ? getResult() : Value();
546 }
547 
548 //===----------------------------------------------------------------------===//
549 // CloneOp
550 //===----------------------------------------------------------------------===//
551 
552 void CloneOp::getEffects(
553     SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
554         &effects) {
555   effects.emplace_back(MemoryEffects::Read::get(), input(),
556                        SideEffects::DefaultResource::get());
557   effects.emplace_back(MemoryEffects::Write::get(), output(),
558                        SideEffects::DefaultResource::get());
559   effects.emplace_back(MemoryEffects::Allocate::get(), output(),
560                        SideEffects::DefaultResource::get());
561 }
562 
563 namespace {
564 /// Merge the clone and its source (by converting the clone to a cast) when
565 /// possible.
566 struct SimplifyClones : public OpRewritePattern<CloneOp> {
567   using OpRewritePattern<CloneOp>::OpRewritePattern;
568 
569   LogicalResult matchAndRewrite(CloneOp cloneOp,
570                                 PatternRewriter &rewriter) const override {
571     if (cloneOp.use_empty()) {
572       rewriter.eraseOp(cloneOp);
573       return success();
574     }
575 
576     Value source = cloneOp.input();
577 
578     // This only finds dealloc operations for the immediate value. It should
579     // also consider aliases. That would also make the safety check below
580     // redundant.
581     llvm::Optional<Operation *> maybeCloneDeallocOp =
582         findDealloc(cloneOp.output());
583     // Skip if either of them has > 1 deallocate operations.
584     if (!maybeCloneDeallocOp.hasValue())
585       return failure();
586     llvm::Optional<Operation *> maybeSourceDeallocOp = findDealloc(source);
587     if (!maybeSourceDeallocOp.hasValue())
588       return failure();
589     Operation *cloneDeallocOp = *maybeCloneDeallocOp;
590     Operation *sourceDeallocOp = *maybeSourceDeallocOp;
591 
592     // If both are deallocated in the same block, their in-block lifetimes
593     // might not fully overlap, so we cannot decide which one to drop.
594     if (cloneDeallocOp && sourceDeallocOp &&
595         cloneDeallocOp->getBlock() == sourceDeallocOp->getBlock())
596       return failure();
597 
598     Block *currentBlock = cloneOp->getBlock();
599     Operation *redundantDealloc = nullptr;
600     if (cloneDeallocOp && cloneDeallocOp->getBlock() == currentBlock) {
601       redundantDealloc = cloneDeallocOp;
602     } else if (sourceDeallocOp && sourceDeallocOp->getBlock() == currentBlock) {
603       redundantDealloc = sourceDeallocOp;
604     }
605 
606     if (!redundantDealloc)
607       return failure();
608 
609     // Safety check that there are no other deallocations inbetween
610     // cloneOp and redundantDealloc, as otherwise we might deallocate an alias
611     // of source before the uses of the clone. With alias information, we could
612     // restrict this to only fail of the dealloc's operand is an alias
613     // of the source.
614     for (Operation *pos = cloneOp->getNextNode(); pos != redundantDealloc;
615          pos = pos->getNextNode()) {
616       auto effectInterface = dyn_cast<MemoryEffectOpInterface>(pos);
617       if (!effectInterface)
618         continue;
619       if (effectInterface.hasEffect<MemoryEffects::Free>())
620         return failure();
621     }
622 
623     rewriter.replaceOpWithNewOp<memref::CastOp>(cloneOp, cloneOp.getType(),
624                                                 source);
625     rewriter.eraseOp(redundantDealloc);
626     return success();
627   }
628 };
629 
630 } // end anonymous namespace.
631 
632 void CloneOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
633                                           MLIRContext *context) {
634   results.insert<SimplifyClones>(context);
635 }
636 
637 OpFoldResult CloneOp::fold(ArrayRef<Attribute> operands) {
638   return succeeded(foldMemRefCast(*this)) ? getResult() : Value();
639 }
640 
641 //===----------------------------------------------------------------------===//
642 // DeallocOp
643 //===----------------------------------------------------------------------===//
644 
645 LogicalResult DeallocOp::fold(ArrayRef<Attribute> cstOperands,
646                               SmallVectorImpl<OpFoldResult> &results) {
647   /// dealloc(memrefcast) -> dealloc
648   return foldMemRefCast(*this);
649 }
650 
651 //===----------------------------------------------------------------------===//
652 // DimOp
653 //===----------------------------------------------------------------------===//
654 
655 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
656                   int64_t index) {
657   auto loc = result.location;
658   Value indexValue = builder.create<ConstantIndexOp>(loc, index);
659   build(builder, result, source, indexValue);
660 }
661 
662 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
663                   Value index) {
664   auto indexTy = builder.getIndexType();
665   build(builder, result, indexTy, source, index);
666 }
667 
668 Optional<int64_t> DimOp::getConstantIndex() {
669   if (auto constantOp = index().getDefiningOp<ConstantOp>())
670     return constantOp.getValue().cast<IntegerAttr>().getInt();
671   return {};
672 }
673 
674 static LogicalResult verify(DimOp op) {
675   // Assume unknown index to be in range.
676   Optional<int64_t> index = op.getConstantIndex();
677   if (!index.hasValue())
678     return success();
679 
680   // Check that constant index is not knowingly out of range.
681   auto type = op.source().getType();
682   if (auto memrefType = type.dyn_cast<MemRefType>()) {
683     if (index.getValue() >= memrefType.getRank())
684       return op.emitOpError("index is out of range");
685   } else if (type.isa<UnrankedMemRefType>()) {
686     // Assume index to be in range.
687   } else {
688     llvm_unreachable("expected operand with memref type");
689   }
690   return success();
691 }
692 
693 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
694   // All forms of folding require a known index.
695   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
696   if (!index)
697     return {};
698 
699   // Folding for unranked types (UnrankedMemRefType) is not supported.
700   auto memrefType = source().getType().dyn_cast<MemRefType>();
701   if (!memrefType)
702     return {};
703 
704   // Fold if the shape extent along the given index is known.
705   if (!memrefType.isDynamicDim(index.getInt())) {
706     Builder builder(getContext());
707     return builder.getIndexAttr(memrefType.getShape()[index.getInt()]);
708   }
709 
710   // The size at the given index is now known to be a dynamic size.
711   unsigned unsignedIndex = index.getValue().getZExtValue();
712 
713   // Fold dim to the size argument for an `AllocOp`, `ViewOp`, or `SubViewOp`.
714   Operation *definingOp = source().getDefiningOp();
715 
716   if (auto alloc = dyn_cast_or_null<AllocOp>(definingOp))
717     return *(alloc.getDynamicSizes().begin() +
718              memrefType.getDynamicDimIndex(unsignedIndex));
719 
720   if (auto alloca = dyn_cast_or_null<AllocaOp>(definingOp))
721     return *(alloca.getDynamicSizes().begin() +
722              memrefType.getDynamicDimIndex(unsignedIndex));
723 
724   if (auto view = dyn_cast_or_null<ViewOp>(definingOp))
725     return *(view.getDynamicSizes().begin() +
726              memrefType.getDynamicDimIndex(unsignedIndex));
727 
728   if (auto sizeInterface =
729           dyn_cast_or_null<OffsetSizeAndStrideOpInterface>(definingOp)) {
730     assert(sizeInterface.isDynamicSize(unsignedIndex) &&
731            "Expected dynamic subview size");
732     return sizeInterface.getDynamicSize(unsignedIndex);
733   }
734 
735   // dim(memrefcast) -> dim
736   if (succeeded(foldMemRefCast(*this)))
737     return getResult();
738 
739   return {};
740 }
741 
742 namespace {
743 /// Fold dim of a memref reshape operation to a load into the reshape's shape
744 /// operand.
745 struct DimOfMemRefReshape : public OpRewritePattern<DimOp> {
746   using OpRewritePattern<DimOp>::OpRewritePattern;
747 
748   LogicalResult matchAndRewrite(DimOp dim,
749                                 PatternRewriter &rewriter) const override {
750     auto reshape = dim.source().getDefiningOp<ReshapeOp>();
751 
752     if (!reshape)
753       return failure();
754 
755     // Place the load directly after the reshape to ensure that the shape memref
756     // was not mutated.
757     rewriter.setInsertionPointAfter(reshape);
758     Location loc = dim.getLoc();
759     Value load = rewriter.create<LoadOp>(loc, reshape.shape(), dim.index());
760     if (load.getType() != dim.getType())
761       load = rewriter.create<IndexCastOp>(loc, dim.getType(), load);
762     rewriter.replaceOp(dim, load);
763     return success();
764   }
765 };
766 
767 /// Fold dim of a cast into the dim of the source of the memref cast.
768 struct DimOfCastOp : public OpRewritePattern<DimOp> {
769   using OpRewritePattern<DimOp>::OpRewritePattern;
770 
771   LogicalResult matchAndRewrite(DimOp dimOp,
772                                 PatternRewriter &rewriter) const override {
773     auto castOp = dimOp.source().getDefiningOp<BufferCastOp>();
774     if (!castOp)
775       return failure();
776     Value newSource = castOp.getOperand();
777     rewriter.replaceOpWithNewOp<tensor::DimOp>(dimOp, newSource, dimOp.index());
778     return success();
779   }
780 };
781 } // end anonymous namespace.
782 
783 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
784                                         MLIRContext *context) {
785   results.add<DimOfMemRefReshape, DimOfCastOp>(context);
786 }
787 
788 // ---------------------------------------------------------------------------
789 // DmaStartOp
790 // ---------------------------------------------------------------------------
791 
792 void DmaStartOp::build(OpBuilder &builder, OperationState &result,
793                        Value srcMemRef, ValueRange srcIndices, Value destMemRef,
794                        ValueRange destIndices, Value numElements,
795                        Value tagMemRef, ValueRange tagIndices, Value stride,
796                        Value elementsPerStride) {
797   result.addOperands(srcMemRef);
798   result.addOperands(srcIndices);
799   result.addOperands(destMemRef);
800   result.addOperands(destIndices);
801   result.addOperands({numElements, tagMemRef});
802   result.addOperands(tagIndices);
803   if (stride)
804     result.addOperands({stride, elementsPerStride});
805 }
806 
807 void DmaStartOp::print(OpAsmPrinter &p) {
808   p << " " << getSrcMemRef() << '[' << getSrcIndices() << "], "
809     << getDstMemRef() << '[' << getDstIndices() << "], " << getNumElements()
810     << ", " << getTagMemRef() << '[' << getTagIndices() << ']';
811   if (isStrided())
812     p << ", " << getStride() << ", " << getNumElementsPerStride();
813 
814   p.printOptionalAttrDict((*this)->getAttrs());
815   p << " : " << getSrcMemRef().getType() << ", " << getDstMemRef().getType()
816     << ", " << getTagMemRef().getType();
817 }
818 
819 // Parse DmaStartOp.
820 // Ex:
821 //   %dma_id = dma_start %src[%i, %j], %dst[%k, %l], %size,
822 //                       %tag[%index], %stride, %num_elt_per_stride :
823 //                     : memref<3076 x f32, 0>,
824 //                       memref<1024 x f32, 2>,
825 //                       memref<1 x i32>
826 //
827 ParseResult DmaStartOp::parse(OpAsmParser &parser, OperationState &result) {
828   OpAsmParser::OperandType srcMemRefInfo;
829   SmallVector<OpAsmParser::OperandType, 4> srcIndexInfos;
830   OpAsmParser::OperandType dstMemRefInfo;
831   SmallVector<OpAsmParser::OperandType, 4> dstIndexInfos;
832   OpAsmParser::OperandType numElementsInfo;
833   OpAsmParser::OperandType tagMemrefInfo;
834   SmallVector<OpAsmParser::OperandType, 4> tagIndexInfos;
835   SmallVector<OpAsmParser::OperandType, 2> strideInfo;
836 
837   SmallVector<Type, 3> types;
838   auto indexType = parser.getBuilder().getIndexType();
839 
840   // Parse and resolve the following list of operands:
841   // *) source memref followed by its indices (in square brackets).
842   // *) destination memref followed by its indices (in square brackets).
843   // *) dma size in KiB.
844   if (parser.parseOperand(srcMemRefInfo) ||
845       parser.parseOperandList(srcIndexInfos, OpAsmParser::Delimiter::Square) ||
846       parser.parseComma() || parser.parseOperand(dstMemRefInfo) ||
847       parser.parseOperandList(dstIndexInfos, OpAsmParser::Delimiter::Square) ||
848       parser.parseComma() || parser.parseOperand(numElementsInfo) ||
849       parser.parseComma() || parser.parseOperand(tagMemrefInfo) ||
850       parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square))
851     return failure();
852 
853   // Parse optional stride and elements per stride.
854   if (parser.parseTrailingOperandList(strideInfo))
855     return failure();
856 
857   bool isStrided = strideInfo.size() == 2;
858   if (!strideInfo.empty() && !isStrided) {
859     return parser.emitError(parser.getNameLoc(),
860                             "expected two stride related operands");
861   }
862 
863   if (parser.parseColonTypeList(types))
864     return failure();
865   if (types.size() != 3)
866     return parser.emitError(parser.getNameLoc(), "fewer/more types expected");
867 
868   if (parser.resolveOperand(srcMemRefInfo, types[0], result.operands) ||
869       parser.resolveOperands(srcIndexInfos, indexType, result.operands) ||
870       parser.resolveOperand(dstMemRefInfo, types[1], result.operands) ||
871       parser.resolveOperands(dstIndexInfos, indexType, result.operands) ||
872       // size should be an index.
873       parser.resolveOperand(numElementsInfo, indexType, result.operands) ||
874       parser.resolveOperand(tagMemrefInfo, types[2], result.operands) ||
875       // tag indices should be index.
876       parser.resolveOperands(tagIndexInfos, indexType, result.operands))
877     return failure();
878 
879   if (isStrided) {
880     if (parser.resolveOperands(strideInfo, indexType, result.operands))
881       return failure();
882   }
883 
884   return success();
885 }
886 
887 LogicalResult DmaStartOp::verify() {
888   unsigned numOperands = getNumOperands();
889 
890   // Mandatory non-variadic operands are: src memref, dst memref, tag memref and
891   // the number of elements.
892   if (numOperands < 4)
893     return emitOpError("expected at least 4 operands");
894 
895   // Check types of operands. The order of these calls is important: the later
896   // calls rely on some type properties to compute the operand position.
897   // 1. Source memref.
898   if (!getSrcMemRef().getType().isa<MemRefType>())
899     return emitOpError("expected source to be of memref type");
900   if (numOperands < getSrcMemRefRank() + 4)
901     return emitOpError() << "expected at least " << getSrcMemRefRank() + 4
902                          << " operands";
903   if (!getSrcIndices().empty() &&
904       !llvm::all_of(getSrcIndices().getTypes(),
905                     [](Type t) { return t.isIndex(); }))
906     return emitOpError("expected source indices to be of index type");
907 
908   // 2. Destination memref.
909   if (!getDstMemRef().getType().isa<MemRefType>())
910     return emitOpError("expected destination to be of memref type");
911   unsigned numExpectedOperands = getSrcMemRefRank() + getDstMemRefRank() + 4;
912   if (numOperands < numExpectedOperands)
913     return emitOpError() << "expected at least " << numExpectedOperands
914                          << " operands";
915   if (!getDstIndices().empty() &&
916       !llvm::all_of(getDstIndices().getTypes(),
917                     [](Type t) { return t.isIndex(); }))
918     return emitOpError("expected destination indices to be of index type");
919 
920   // 3. Number of elements.
921   if (!getNumElements().getType().isIndex())
922     return emitOpError("expected num elements to be of index type");
923 
924   // 4. Tag memref.
925   if (!getTagMemRef().getType().isa<MemRefType>())
926     return emitOpError("expected tag to be of memref type");
927   numExpectedOperands += getTagMemRefRank();
928   if (numOperands < numExpectedOperands)
929     return emitOpError() << "expected at least " << numExpectedOperands
930                          << " operands";
931   if (!getTagIndices().empty() &&
932       !llvm::all_of(getTagIndices().getTypes(),
933                     [](Type t) { return t.isIndex(); }))
934     return emitOpError("expected tag indices to be of index type");
935 
936   // Optional stride-related operands must be either both present or both
937   // absent.
938   if (numOperands != numExpectedOperands &&
939       numOperands != numExpectedOperands + 2)
940     return emitOpError("incorrect number of operands");
941 
942   // 5. Strides.
943   if (isStrided()) {
944     if (!getStride().getType().isIndex() ||
945         !getNumElementsPerStride().getType().isIndex())
946       return emitOpError(
947           "expected stride and num elements per stride to be of type index");
948   }
949 
950   return success();
951 }
952 
953 LogicalResult DmaStartOp::fold(ArrayRef<Attribute> cstOperands,
954                                SmallVectorImpl<OpFoldResult> &results) {
955   /// dma_start(memrefcast) -> dma_start
956   return foldMemRefCast(*this);
957 }
958 
959 // ---------------------------------------------------------------------------
960 // DmaWaitOp
961 // ---------------------------------------------------------------------------
962 
963 void DmaWaitOp::build(OpBuilder &builder, OperationState &result,
964                       Value tagMemRef, ValueRange tagIndices,
965                       Value numElements) {
966   result.addOperands(tagMemRef);
967   result.addOperands(tagIndices);
968   result.addOperands(numElements);
969 }
970 
971 void DmaWaitOp::print(OpAsmPrinter &p) {
972   p << " " << getTagMemRef() << '[' << getTagIndices() << "], "
973     << getNumElements();
974   p.printOptionalAttrDict((*this)->getAttrs());
975   p << " : " << getTagMemRef().getType();
976 }
977 
978 // Parse DmaWaitOp.
979 // Eg:
980 //   dma_wait %tag[%index], %num_elements : memref<1 x i32, (d0) -> (d0), 4>
981 //
982 ParseResult DmaWaitOp::parse(OpAsmParser &parser, OperationState &result) {
983   OpAsmParser::OperandType tagMemrefInfo;
984   SmallVector<OpAsmParser::OperandType, 2> tagIndexInfos;
985   Type type;
986   auto indexType = parser.getBuilder().getIndexType();
987   OpAsmParser::OperandType numElementsInfo;
988 
989   // Parse tag memref, its indices, and dma size.
990   if (parser.parseOperand(tagMemrefInfo) ||
991       parser.parseOperandList(tagIndexInfos, OpAsmParser::Delimiter::Square) ||
992       parser.parseComma() || parser.parseOperand(numElementsInfo) ||
993       parser.parseColonType(type) ||
994       parser.resolveOperand(tagMemrefInfo, type, result.operands) ||
995       parser.resolveOperands(tagIndexInfos, indexType, result.operands) ||
996       parser.resolveOperand(numElementsInfo, indexType, result.operands))
997     return failure();
998 
999   return success();
1000 }
1001 
1002 LogicalResult DmaWaitOp::fold(ArrayRef<Attribute> cstOperands,
1003                               SmallVectorImpl<OpFoldResult> &results) {
1004   /// dma_wait(memrefcast) -> dma_wait
1005   return foldMemRefCast(*this);
1006 }
1007 
1008 LogicalResult DmaWaitOp::verify() {
1009   // Mandatory non-variadic operands are tag and the number of elements.
1010   if (getNumOperands() < 2)
1011     return emitOpError() << "expected at least 2 operands";
1012 
1013   // Check types of operands. The order of these calls is important: the later
1014   // calls rely on some type properties to compute the operand position.
1015   if (!getTagMemRef().getType().isa<MemRefType>())
1016     return emitOpError() << "expected tag to be of memref type";
1017 
1018   if (getNumOperands() != 2 + getTagMemRefRank())
1019     return emitOpError() << "expected " << 2 + getTagMemRefRank()
1020                          << " operands";
1021 
1022   if (!getTagIndices().empty() &&
1023       !llvm::all_of(getTagIndices().getTypes(),
1024                     [](Type t) { return t.isIndex(); }))
1025     return emitOpError() << "expected tag indices to be of index type";
1026 
1027   if (!getNumElements().getType().isIndex())
1028     return emitOpError()
1029            << "expected the number of elements to be of index type";
1030 
1031   return success();
1032 }
1033 
1034 //===----------------------------------------------------------------------===//
1035 // GlobalOp
1036 //===----------------------------------------------------------------------===//
1037 
1038 static void printGlobalMemrefOpTypeAndInitialValue(OpAsmPrinter &p, GlobalOp op,
1039                                                    TypeAttr type,
1040                                                    Attribute initialValue) {
1041   p << type;
1042   if (!op.isExternal()) {
1043     p << " = ";
1044     if (op.isUninitialized())
1045       p << "uninitialized";
1046     else
1047       p.printAttributeWithoutType(initialValue);
1048   }
1049 }
1050 
1051 static ParseResult
1052 parseGlobalMemrefOpTypeAndInitialValue(OpAsmParser &parser, TypeAttr &typeAttr,
1053                                        Attribute &initialValue) {
1054   Type type;
1055   if (parser.parseType(type))
1056     return failure();
1057 
1058   auto memrefType = type.dyn_cast<MemRefType>();
1059   if (!memrefType || !memrefType.hasStaticShape())
1060     return parser.emitError(parser.getNameLoc())
1061            << "type should be static shaped memref, but got " << type;
1062   typeAttr = TypeAttr::get(type);
1063 
1064   if (parser.parseOptionalEqual())
1065     return success();
1066 
1067   if (succeeded(parser.parseOptionalKeyword("uninitialized"))) {
1068     initialValue = UnitAttr::get(parser.getBuilder().getContext());
1069     return success();
1070   }
1071 
1072   Type tensorType = getTensorTypeFromMemRefType(memrefType);
1073   if (parser.parseAttribute(initialValue, tensorType))
1074     return failure();
1075   if (!initialValue.isa<ElementsAttr>())
1076     return parser.emitError(parser.getNameLoc())
1077            << "initial value should be a unit or elements attribute";
1078   return success();
1079 }
1080 
1081 static LogicalResult verify(GlobalOp op) {
1082   auto memrefType = op.type().dyn_cast<MemRefType>();
1083   if (!memrefType || !memrefType.hasStaticShape())
1084     return op.emitOpError("type should be static shaped memref, but got ")
1085            << op.type();
1086 
1087   // Verify that the initial value, if present, is either a unit attribute or
1088   // an elements attribute.
1089   if (op.initial_value().hasValue()) {
1090     Attribute initValue = op.initial_value().getValue();
1091     if (!initValue.isa<UnitAttr>() && !initValue.isa<ElementsAttr>())
1092       return op.emitOpError("initial value should be a unit or elements "
1093                             "attribute, but got ")
1094              << initValue;
1095 
1096     // Check that the type of the initial value is compatible with the type of
1097     // the global variable.
1098     if (initValue.isa<ElementsAttr>()) {
1099       Type initType = initValue.getType();
1100       Type tensorType = getTensorTypeFromMemRefType(memrefType);
1101       if (initType != tensorType)
1102         return op.emitOpError("initial value expected to be of type ")
1103                << tensorType << ", but was of type " << initType;
1104     }
1105   }
1106 
1107   // TODO: verify visibility for declarations.
1108   return success();
1109 }
1110 
1111 //===----------------------------------------------------------------------===//
1112 // GetGlobalOp
1113 //===----------------------------------------------------------------------===//
1114 
1115 LogicalResult
1116 GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
1117   // Verify that the result type is same as the type of the referenced
1118   // memref.global op.
1119   auto global =
1120       symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, nameAttr());
1121   if (!global)
1122     return emitOpError("'")
1123            << name() << "' does not reference a valid global memref";
1124 
1125   Type resultType = result().getType();
1126   if (global.type() != resultType)
1127     return emitOpError("result type ")
1128            << resultType << " does not match type " << global.type()
1129            << " of the global memref @" << name();
1130   return success();
1131 }
1132 
1133 //===----------------------------------------------------------------------===//
1134 // LoadOp
1135 //===----------------------------------------------------------------------===//
1136 
1137 static LogicalResult verify(LoadOp op) {
1138   if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
1139     return op.emitOpError("incorrect number of indices for load");
1140   return success();
1141 }
1142 
1143 OpFoldResult LoadOp::fold(ArrayRef<Attribute> cstOperands) {
1144   /// load(memrefcast) -> load
1145   if (succeeded(foldMemRefCast(*this)))
1146     return getResult();
1147   return OpFoldResult();
1148 }
1149 
1150 namespace {
1151 /// Fold a load on a buffer_cast operation into an tensor.extract on the
1152 /// corresponding tensor.
1153 struct LoadOfBufferCast : public OpRewritePattern<LoadOp> {
1154   using OpRewritePattern<LoadOp>::OpRewritePattern;
1155 
1156   LogicalResult matchAndRewrite(LoadOp load,
1157                                 PatternRewriter &rewriter) const override {
1158     auto buffercast = load.memref().getDefiningOp<BufferCastOp>();
1159     if (!buffercast)
1160       return failure();
1161 
1162     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(load, buffercast.tensor(),
1163                                                    load.indices());
1164     return success();
1165   }
1166 };
1167 } // end anonymous namespace.
1168 
1169 void LoadOp::getCanonicalizationPatterns(RewritePatternSet &results,
1170                                          MLIRContext *context) {
1171   results.add<LoadOfBufferCast>(context);
1172 }
1173 
1174 //===----------------------------------------------------------------------===//
1175 // PrefetchOp
1176 //===----------------------------------------------------------------------===//
1177 
1178 static void print(OpAsmPrinter &p, PrefetchOp op) {
1179   p << " " << op.memref() << '[';
1180   p.printOperands(op.indices());
1181   p << ']' << ", " << (op.isWrite() ? "write" : "read");
1182   p << ", locality<" << op.localityHint();
1183   p << ">, " << (op.isDataCache() ? "data" : "instr");
1184   p.printOptionalAttrDict(
1185       op->getAttrs(),
1186       /*elidedAttrs=*/{"localityHint", "isWrite", "isDataCache"});
1187   p << " : " << op.getMemRefType();
1188 }
1189 
1190 static ParseResult parsePrefetchOp(OpAsmParser &parser,
1191                                    OperationState &result) {
1192   OpAsmParser::OperandType memrefInfo;
1193   SmallVector<OpAsmParser::OperandType, 4> indexInfo;
1194   IntegerAttr localityHint;
1195   MemRefType type;
1196   StringRef readOrWrite, cacheType;
1197 
1198   auto indexTy = parser.getBuilder().getIndexType();
1199   auto i32Type = parser.getBuilder().getIntegerType(32);
1200   if (parser.parseOperand(memrefInfo) ||
1201       parser.parseOperandList(indexInfo, OpAsmParser::Delimiter::Square) ||
1202       parser.parseComma() || parser.parseKeyword(&readOrWrite) ||
1203       parser.parseComma() || parser.parseKeyword("locality") ||
1204       parser.parseLess() ||
1205       parser.parseAttribute(localityHint, i32Type, "localityHint",
1206                             result.attributes) ||
1207       parser.parseGreater() || parser.parseComma() ||
1208       parser.parseKeyword(&cacheType) || parser.parseColonType(type) ||
1209       parser.resolveOperand(memrefInfo, type, result.operands) ||
1210       parser.resolveOperands(indexInfo, indexTy, result.operands))
1211     return failure();
1212 
1213   if (!readOrWrite.equals("read") && !readOrWrite.equals("write"))
1214     return parser.emitError(parser.getNameLoc(),
1215                             "rw specifier has to be 'read' or 'write'");
1216   result.addAttribute(
1217       PrefetchOp::getIsWriteAttrName(),
1218       parser.getBuilder().getBoolAttr(readOrWrite.equals("write")));
1219 
1220   if (!cacheType.equals("data") && !cacheType.equals("instr"))
1221     return parser.emitError(parser.getNameLoc(),
1222                             "cache type has to be 'data' or 'instr'");
1223 
1224   result.addAttribute(
1225       PrefetchOp::getIsDataCacheAttrName(),
1226       parser.getBuilder().getBoolAttr(cacheType.equals("data")));
1227 
1228   return success();
1229 }
1230 
1231 static LogicalResult verify(PrefetchOp op) {
1232   if (op.getNumOperands() != 1 + op.getMemRefType().getRank())
1233     return op.emitOpError("too few indices");
1234 
1235   return success();
1236 }
1237 
1238 LogicalResult PrefetchOp::fold(ArrayRef<Attribute> cstOperands,
1239                                SmallVectorImpl<OpFoldResult> &results) {
1240   // prefetch(memrefcast) -> prefetch
1241   return foldMemRefCast(*this);
1242 }
1243 
1244 //===----------------------------------------------------------------------===//
1245 // ReinterpretCastOp
1246 //===----------------------------------------------------------------------===//
1247 
1248 /// Build a ReinterpretCastOp with all dynamic entries: `staticOffsets`,
1249 /// `staticSizes` and `staticStrides` are automatically filled with
1250 /// source-memref-rank sentinel values that encode dynamic entries.
1251 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1252                               MemRefType resultType, Value source,
1253                               OpFoldResult offset, ArrayRef<OpFoldResult> sizes,
1254                               ArrayRef<OpFoldResult> strides,
1255                               ArrayRef<NamedAttribute> attrs) {
1256   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1257   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1258   dispatchIndexOpFoldResults(offset, dynamicOffsets, staticOffsets,
1259                              ShapedType::kDynamicStrideOrOffset);
1260   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1261                              ShapedType::kDynamicSize);
1262   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1263                              ShapedType::kDynamicStrideOrOffset);
1264   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1265         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1266         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1267   result.addAttributes(attrs);
1268 }
1269 
1270 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1271                               MemRefType resultType, Value source,
1272                               int64_t offset, ArrayRef<int64_t> sizes,
1273                               ArrayRef<int64_t> strides,
1274                               ArrayRef<NamedAttribute> attrs) {
1275   SmallVector<OpFoldResult> sizeValues =
1276       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1277         return b.getI64IntegerAttr(v);
1278       }));
1279   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1280       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1281         return b.getI64IntegerAttr(v);
1282       }));
1283   build(b, result, resultType, source, b.getI64IntegerAttr(offset), sizeValues,
1284         strideValues, attrs);
1285 }
1286 
1287 void ReinterpretCastOp::build(OpBuilder &b, OperationState &result,
1288                               MemRefType resultType, Value source, Value offset,
1289                               ValueRange sizes, ValueRange strides,
1290                               ArrayRef<NamedAttribute> attrs) {
1291   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1292       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1293   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1294       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1295   build(b, result, resultType, source, offset, sizeValues, strideValues, attrs);
1296 }
1297 
1298 // TODO: ponder whether we want to allow missing trailing sizes/strides that are
1299 // completed automatically, like we have for subview and extract_slice.
1300 static LogicalResult verify(ReinterpretCastOp op) {
1301   // The source and result memrefs should be in the same memory space.
1302   auto srcType = op.source().getType().cast<BaseMemRefType>();
1303   auto resultType = op.getType().cast<MemRefType>();
1304   if (srcType.getMemorySpace() != resultType.getMemorySpace())
1305     return op.emitError("different memory spaces specified for source type ")
1306            << srcType << " and result memref type " << resultType;
1307   if (srcType.getElementType() != resultType.getElementType())
1308     return op.emitError("different element types specified for source type ")
1309            << srcType << " and result memref type " << resultType;
1310 
1311   // Match sizes in result memref type and in static_sizes attribute.
1312   for (auto &en :
1313        llvm::enumerate(llvm::zip(resultType.getShape(),
1314                                  extractFromI64ArrayAttr(op.static_sizes())))) {
1315     int64_t resultSize = std::get<0>(en.value());
1316     int64_t expectedSize = std::get<1>(en.value());
1317     if (resultSize != expectedSize)
1318       return op.emitError("expected result type with size = ")
1319              << expectedSize << " instead of " << resultSize
1320              << " in dim = " << en.index();
1321   }
1322 
1323   // Match offset and strides in static_offset and static_strides attributes if
1324   // result memref type has an affine map specified.
1325   if (!resultType.getAffineMaps().empty()) {
1326     int64_t resultOffset;
1327     SmallVector<int64_t, 4> resultStrides;
1328     if (failed(getStridesAndOffset(resultType, resultStrides, resultOffset)))
1329       return failure();
1330 
1331     // Match offset in result memref type and in static_offsets attribute.
1332     int64_t expectedOffset =
1333         extractFromI64ArrayAttr(op.static_offsets()).front();
1334     if (resultOffset != expectedOffset)
1335       return op.emitError("expected result type with offset = ")
1336              << resultOffset << " instead of " << expectedOffset;
1337 
1338     // Match strides in result memref type and in static_strides attribute.
1339     for (auto &en : llvm::enumerate(llvm::zip(
1340              resultStrides, extractFromI64ArrayAttr(op.static_strides())))) {
1341       int64_t resultStride = std::get<0>(en.value());
1342       int64_t expectedStride = std::get<1>(en.value());
1343       if (resultStride != expectedStride)
1344         return op.emitError("expected result type with stride = ")
1345                << expectedStride << " instead of " << resultStride
1346                << " in dim = " << en.index();
1347     }
1348   }
1349   return success();
1350 }
1351 
1352 //===----------------------------------------------------------------------===//
1353 // Reassociative reshape ops
1354 //===----------------------------------------------------------------------===//
1355 
1356 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
1357   return getSymbolLessAffineMaps(getReassociationExprs());
1358 }
1359 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
1360   return convertReassociationIndicesToExprs(getContext(),
1361                                             getReassociationIndices());
1362 }
1363 
1364 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
1365   return getSymbolLessAffineMaps(getReassociationExprs());
1366 }
1367 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
1368   return convertReassociationIndicesToExprs(getContext(),
1369                                             getReassociationIndices());
1370 }
1371 
1372 static void print(OpAsmPrinter &p, ExpandShapeOp op) {
1373   ::mlir::printReshapeOp<ExpandShapeOp>(p, op);
1374 }
1375 
1376 static void print(OpAsmPrinter &p, CollapseShapeOp op) {
1377   ::mlir::printReshapeOp<CollapseShapeOp>(p, op);
1378 }
1379 
1380 /// Detect whether memref dims [dim, dim + extent) can be reshaped without
1381 /// copies.
1382 static bool isReshapableDimBand(unsigned dim, unsigned extent,
1383                                 ArrayRef<int64_t> sizes,
1384                                 ArrayRef<AffineExpr> strides) {
1385   assert(sizes.size() == strides.size() && "mismatched ranks");
1386   // off by 1 indexing to avoid out of bounds
1387   //                       V
1388   for (auto idx = dim, e = dim + extent; idx + 1 < e; ++idx) {
1389     // Only bands of static shapes are reshapable. This is due to the fact that
1390     // there is no relation between dynamic sizes and dynamic strides: we do not
1391     // have enough information to know whether a "-1" size corresponds to the
1392     // proper symbol in the AffineExpr of a stride.
1393     if (ShapedType::isDynamic(sizes[dim + 1]))
1394       return false;
1395     // TODO: Refine this by passing the proper nDims and nSymbols so we can
1396     // simplify on the fly and catch more reshapable cases.
1397     if (strides[idx] != strides[idx + 1] * sizes[idx + 1])
1398       return false;
1399   }
1400   return true;
1401 }
1402 
1403 /// Compute the MemRefType obtained by applying the `reassociation` (which is
1404 /// expected to be valid) to `type`.
1405 /// If `type` is Contiguous MemRefType, this always produce a contiguous
1406 /// MemRefType.
1407 static MemRefType
1408 computeReshapeCollapsedType(MemRefType type,
1409                             ArrayRef<AffineMap> reassociation) {
1410   auto sizes = type.getShape();
1411   AffineExpr offset;
1412   SmallVector<AffineExpr, 4> strides;
1413   auto status = getStridesAndOffset(type, strides, offset);
1414   (void)status;
1415   assert(succeeded(status) && "expected strided memref");
1416 
1417   SmallVector<int64_t, 4> newSizes;
1418   newSizes.reserve(reassociation.size());
1419   SmallVector<AffineExpr, 4> newStrides;
1420   newStrides.reserve(reassociation.size());
1421 
1422   // Use the fact that reassociation is valid to simplify the logic: only use
1423   // each map's rank.
1424   assert(isReassociationValid(reassociation) && "invalid reassociation");
1425   unsigned currentDim = 0;
1426   for (AffineMap m : reassociation) {
1427     unsigned dim = m.getNumResults();
1428     int64_t size = 1;
1429     AffineExpr stride = strides[currentDim + dim - 1];
1430     if (!isReshapableDimBand(currentDim, dim, sizes, strides)) {
1431       size = ShapedType::kDynamicSize;
1432       stride = AffineExpr();
1433     } else {
1434       for (unsigned d = 0; d < dim; ++d)
1435         size *= sizes[currentDim + d];
1436     }
1437     newSizes.push_back(size);
1438     newStrides.push_back(stride);
1439     currentDim += dim;
1440   }
1441 
1442   // Early-exit: if `type` is contiguous, the result must be contiguous.
1443   if (canonicalizeStridedLayout(type).getAffineMaps().empty())
1444     return MemRefType::Builder(type).setShape(newSizes).setAffineMaps({});
1445 
1446   // Convert back to int64_t because we don't have enough information to create
1447   // new strided layouts from AffineExpr only. This corresponds to a case where
1448   // copies may be necessary.
1449   int64_t intOffset = ShapedType::kDynamicStrideOrOffset;
1450   if (auto o = offset.dyn_cast<AffineConstantExpr>())
1451     intOffset = o.getValue();
1452   SmallVector<int64_t, 4> intStrides;
1453   intStrides.reserve(strides.size());
1454   for (auto stride : newStrides) {
1455     if (auto cst = stride.dyn_cast_or_null<AffineConstantExpr>())
1456       intStrides.push_back(cst.getValue());
1457     else
1458       intStrides.push_back(ShapedType::kDynamicStrideOrOffset);
1459   }
1460   auto layout =
1461       makeStridedLinearLayoutMap(intStrides, intOffset, type.getContext());
1462   return canonicalizeStridedLayout(
1463       MemRefType::Builder(type).setShape(newSizes).setAffineMaps({layout}));
1464 }
1465 
1466 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
1467                           ArrayRef<ReassociationIndices> reassociation,
1468                           ArrayRef<NamedAttribute> attrs) {
1469   auto memRefType = src.getType().cast<MemRefType>();
1470   auto resultType = computeReshapeCollapsedType(
1471       memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
1472                       b.getContext(), reassociation)));
1473   build(b, result, resultType, src, attrs);
1474   result.addAttribute(getReassociationAttrName(),
1475                       getReassociationIndicesAttribute(b, reassociation));
1476 }
1477 
1478 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
1479                             ArrayRef<ReassociationIndices> reassociation,
1480                             ArrayRef<NamedAttribute> attrs) {
1481   auto memRefType = src.getType().cast<MemRefType>();
1482   auto resultType = computeReshapeCollapsedType(
1483       memRefType, getSymbolLessAffineMaps(convertReassociationIndicesToExprs(
1484                       b.getContext(), reassociation)));
1485   build(b, result, resultType, src, attrs);
1486   result.addAttribute(getReassociationAttrName(),
1487                       getReassociationIndicesAttribute(b, reassociation));
1488 }
1489 
1490 template <typename ReshapeOp,
1491           bool isExpansion = std::is_same<ReshapeOp, ExpandShapeOp>::value>
1492 static LogicalResult verifyReshapeOp(ReshapeOp op, MemRefType expandedType,
1493                                      MemRefType collapsedType) {
1494   if (failed(
1495           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
1496     return failure();
1497   auto maps = op.getReassociationMaps();
1498   MemRefType expectedType = computeReshapeCollapsedType(expandedType, maps);
1499   if (collapsedType != expectedType)
1500     return op.emitOpError("expected collapsed type to be ")
1501            << expectedType << ", but got " << collapsedType;
1502   return success();
1503 }
1504 
1505 static LogicalResult verify(ExpandShapeOp op) {
1506   return verifyReshapeOp(op, op.getResultType(), op.getSrcType());
1507 }
1508 
1509 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
1510                                                 MLIRContext *context) {
1511   results.add<CollapseReshapeOps<ExpandShapeOp>,
1512               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>>(context);
1513 }
1514 
1515 static LogicalResult verify(CollapseShapeOp op) {
1516   return verifyReshapeOp(op, op.getSrcType(), op.getResultType());
1517 }
1518 
1519 struct CollapseShapeOpMemRefCastFolder
1520     : public OpRewritePattern<CollapseShapeOp> {
1521 public:
1522   using OpRewritePattern<CollapseShapeOp>::OpRewritePattern;
1523 
1524   LogicalResult matchAndRewrite(CollapseShapeOp op,
1525                                 PatternRewriter &rewriter) const override {
1526     auto cast = op.getOperand().getDefiningOp<CastOp>();
1527     if (!cast)
1528       return failure();
1529 
1530     if (!CastOp::canFoldIntoConsumerOp(cast))
1531       return failure();
1532 
1533     Type newResultType = computeReshapeCollapsedType(
1534         cast.getOperand().getType().cast<MemRefType>(),
1535         op.getReassociationMaps());
1536 
1537     if (newResultType == op.getResultType()) {
1538       rewriter.updateRootInPlace(
1539           op, [&]() { op.srcMutable().assign(cast.source()); });
1540     } else {
1541       Value newOp = rewriter.create<CollapseShapeOp>(
1542           op->getLoc(), cast.source(), op.getReassociationIndices());
1543       rewriter.replaceOpWithNewOp<CastOp>(op, op.getType(), newOp);
1544     }
1545     return success();
1546   }
1547 };
1548 
1549 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
1550                                                   MLIRContext *context) {
1551   results.add<CollapseReshapeOps<CollapseShapeOp>,
1552               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
1553               CollapseShapeOpMemRefCastFolder>(context);
1554 }
1555 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
1556   if (succeeded(foldMemRefCast(*this)))
1557     return getResult();
1558   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
1559 }
1560 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
1561   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
1562 }
1563 
1564 //===----------------------------------------------------------------------===//
1565 // ReshapeOp
1566 //===----------------------------------------------------------------------===//
1567 
1568 static LogicalResult verify(ReshapeOp op) {
1569   Type operandType = op.source().getType();
1570   Type resultType = op.result().getType();
1571 
1572   Type operandElementType = operandType.cast<ShapedType>().getElementType();
1573   Type resultElementType = resultType.cast<ShapedType>().getElementType();
1574   if (operandElementType != resultElementType)
1575     return op.emitOpError("element types of source and destination memref "
1576                           "types should be the same");
1577 
1578   if (auto operandMemRefType = operandType.dyn_cast<MemRefType>())
1579     if (!operandMemRefType.getAffineMaps().empty())
1580       return op.emitOpError(
1581           "source memref type should have identity affine map");
1582 
1583   int64_t shapeSize = op.shape().getType().cast<MemRefType>().getDimSize(0);
1584   auto resultMemRefType = resultType.dyn_cast<MemRefType>();
1585   if (resultMemRefType) {
1586     if (!resultMemRefType.getAffineMaps().empty())
1587       return op.emitOpError(
1588           "result memref type should have identity affine map");
1589     if (shapeSize == ShapedType::kDynamicSize)
1590       return op.emitOpError("cannot use shape operand with dynamic length to "
1591                             "reshape to statically-ranked memref type");
1592     if (shapeSize != resultMemRefType.getRank())
1593       return op.emitOpError(
1594           "length of shape operand differs from the result's memref rank");
1595   }
1596   return success();
1597 }
1598 
1599 //===----------------------------------------------------------------------===//
1600 // StoreOp
1601 //===----------------------------------------------------------------------===//
1602 
1603 static LogicalResult verify(StoreOp op) {
1604   if (op.getNumOperands() != 2 + op.getMemRefType().getRank())
1605     return op.emitOpError("store index operand count not equal to memref rank");
1606 
1607   return success();
1608 }
1609 
1610 LogicalResult StoreOp::fold(ArrayRef<Attribute> cstOperands,
1611                             SmallVectorImpl<OpFoldResult> &results) {
1612   /// store(memrefcast) -> store
1613   return foldMemRefCast(*this, getValueToStore());
1614 }
1615 
1616 //===----------------------------------------------------------------------===//
1617 // SubViewOp
1618 //===----------------------------------------------------------------------===//
1619 
1620 namespace {
1621 /// Helpers to write more idiomatic operations.
1622 namespace saturated_arith {
1623 struct Wrapper {
1624   explicit Wrapper(int64_t v) : v(v) {}
1625   operator int64_t() { return v; }
1626   int64_t v;
1627 };
1628 Wrapper operator+(Wrapper a, int64_t b) {
1629   if (ShapedType::isDynamicStrideOrOffset(a) ||
1630       ShapedType::isDynamicStrideOrOffset(b))
1631     return Wrapper(ShapedType::kDynamicStrideOrOffset);
1632   return Wrapper(a.v + b);
1633 }
1634 Wrapper operator*(Wrapper a, int64_t b) {
1635   if (ShapedType::isDynamicStrideOrOffset(a) ||
1636       ShapedType::isDynamicStrideOrOffset(b))
1637     return Wrapper(ShapedType::kDynamicStrideOrOffset);
1638   return Wrapper(a.v * b);
1639 }
1640 } // end namespace saturated_arith
1641 } // end namespace
1642 
1643 /// A subview result type can be fully inferred from the source type and the
1644 /// static representation of offsets, sizes and strides. Special sentinels
1645 /// encode the dynamic case.
1646 Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
1647                                 ArrayRef<int64_t> leadingStaticOffsets,
1648                                 ArrayRef<int64_t> leadingStaticSizes,
1649                                 ArrayRef<int64_t> leadingStaticStrides) {
1650   // A subview may specify only a leading subset of offset/sizes/strides in
1651   // which case we complete with offset=0, sizes from memref type and strides=1.
1652   unsigned rank = sourceMemRefType.getRank();
1653   assert(leadingStaticOffsets.size() <= rank &&
1654          "unexpected leadingStaticOffsets overflow");
1655   assert(leadingStaticSizes.size() <= rank &&
1656          "unexpected leadingStaticSizes overflow");
1657   assert(leadingStaticStrides.size() <= rank &&
1658          "unexpected leadingStaticStrides overflow");
1659   auto staticOffsets = llvm::to_vector<4>(leadingStaticOffsets);
1660   auto staticSizes = llvm::to_vector<4>(leadingStaticSizes);
1661   auto staticStrides = llvm::to_vector<4>(leadingStaticStrides);
1662   unsigned numTrailingOffsets = rank - staticOffsets.size();
1663   unsigned numTrailingSizes = rank - staticSizes.size();
1664   unsigned numTrailingStrides = rank - staticStrides.size();
1665   staticOffsets.append(numTrailingOffsets, 0);
1666   llvm::append_range(staticSizes,
1667                      sourceMemRefType.getShape().take_back(numTrailingSizes));
1668   staticStrides.append(numTrailingStrides, 1);
1669 
1670   // Extract source offset and strides.
1671   int64_t sourceOffset;
1672   SmallVector<int64_t, 4> sourceStrides;
1673   auto res = getStridesAndOffset(sourceMemRefType, sourceStrides, sourceOffset);
1674   assert(succeeded(res) && "SubViewOp expected strided memref type");
1675   (void)res;
1676 
1677   // Compute target offset whose value is:
1678   //   `sourceOffset + sum_i(staticOffset_i * sourceStrides_i)`.
1679   int64_t targetOffset = sourceOffset;
1680   for (auto it : llvm::zip(staticOffsets, sourceStrides)) {
1681     auto staticOffset = std::get<0>(it), targetStride = std::get<1>(it);
1682     using namespace saturated_arith;
1683     targetOffset = Wrapper(targetOffset) + Wrapper(staticOffset) * targetStride;
1684   }
1685 
1686   // Compute target stride whose value is:
1687   //   `sourceStrides_i * staticStrides_i`.
1688   SmallVector<int64_t, 4> targetStrides;
1689   targetStrides.reserve(staticOffsets.size());
1690   for (auto it : llvm::zip(sourceStrides, staticStrides)) {
1691     auto sourceStride = std::get<0>(it), staticStride = std::get<1>(it);
1692     using namespace saturated_arith;
1693     targetStrides.push_back(Wrapper(sourceStride) * staticStride);
1694   }
1695 
1696   // The type is now known.
1697   return MemRefType::get(
1698       staticSizes, sourceMemRefType.getElementType(),
1699       makeStridedLinearLayoutMap(targetStrides, targetOffset,
1700                                  sourceMemRefType.getContext()),
1701       sourceMemRefType.getMemorySpace());
1702 }
1703 
1704 Type SubViewOp::inferResultType(MemRefType sourceMemRefType,
1705                                 ArrayRef<OpFoldResult> leadingStaticOffsets,
1706                                 ArrayRef<OpFoldResult> leadingStaticSizes,
1707                                 ArrayRef<OpFoldResult> leadingStaticStrides) {
1708   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1709   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1710   dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets,
1711                              staticOffsets, ShapedType::kDynamicStrideOrOffset);
1712   dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes,
1713                              ShapedType::kDynamicSize);
1714   dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides,
1715                              staticStrides, ShapedType::kDynamicStrideOrOffset);
1716   return SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
1717                                     staticSizes, staticStrides)
1718       .cast<MemRefType>();
1719 }
1720 
1721 Type SubViewOp::inferRankReducedResultType(
1722     unsigned resultRank, MemRefType sourceRankedTensorType,
1723     ArrayRef<int64_t> leadingStaticOffsets,
1724     ArrayRef<int64_t> leadingStaticSizes,
1725     ArrayRef<int64_t> leadingStaticStrides) {
1726   auto inferredType =
1727       inferResultType(sourceRankedTensorType, leadingStaticOffsets,
1728                       leadingStaticSizes, leadingStaticStrides)
1729           .cast<MemRefType>();
1730   assert(inferredType.getRank() >= resultRank && "expected ");
1731   int rankDiff = inferredType.getRank() - resultRank;
1732   if (rankDiff > 0) {
1733     auto shape = inferredType.getShape();
1734     llvm::SmallDenseSet<unsigned> dimsToProject;
1735     mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject);
1736     SmallVector<int64_t> projectedShape;
1737     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
1738       if (!dimsToProject.contains(pos))
1739         projectedShape.push_back(shape[pos]);
1740 
1741     AffineMap map;
1742     auto maps = inferredType.getAffineMaps();
1743     if (!maps.empty() && maps.front())
1744       map = getProjectedMap(maps.front(), dimsToProject);
1745     inferredType =
1746         MemRefType::get(projectedShape, inferredType.getElementType(), map,
1747                         inferredType.getMemorySpace());
1748   }
1749   return inferredType;
1750 }
1751 
1752 Type SubViewOp::inferRankReducedResultType(
1753     unsigned resultRank, MemRefType sourceRankedTensorType,
1754     ArrayRef<OpFoldResult> leadingStaticOffsets,
1755     ArrayRef<OpFoldResult> leadingStaticSizes,
1756     ArrayRef<OpFoldResult> leadingStaticStrides) {
1757   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1758   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1759   dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets,
1760                              staticOffsets, ShapedType::kDynamicStrideOrOffset);
1761   dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes,
1762                              ShapedType::kDynamicSize);
1763   dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides,
1764                              staticStrides, ShapedType::kDynamicStrideOrOffset);
1765   return SubViewOp::inferRankReducedResultType(
1766       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
1767       staticStrides);
1768 }
1769 // Build a SubViewOp with mixed static and dynamic entries and custom result
1770 // type. If the type passed is nullptr, it is inferred.
1771 void SubViewOp::build(OpBuilder &b, OperationState &result,
1772                       MemRefType resultType, Value source,
1773                       ArrayRef<OpFoldResult> offsets,
1774                       ArrayRef<OpFoldResult> sizes,
1775                       ArrayRef<OpFoldResult> strides,
1776                       ArrayRef<NamedAttribute> attrs) {
1777   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1778   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1779   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1780                              ShapedType::kDynamicStrideOrOffset);
1781   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1782                              ShapedType::kDynamicSize);
1783   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1784                              ShapedType::kDynamicStrideOrOffset);
1785   auto sourceMemRefType = source.getType().cast<MemRefType>();
1786   // Structuring implementation this way avoids duplication between builders.
1787   if (!resultType) {
1788     resultType = SubViewOp::inferResultType(sourceMemRefType, staticOffsets,
1789                                             staticSizes, staticStrides)
1790                      .cast<MemRefType>();
1791   }
1792   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1793         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1794         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1795   result.addAttributes(attrs);
1796 }
1797 
1798 // Build a SubViewOp with mixed static and dynamic entries and inferred result
1799 // type.
1800 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1801                       ArrayRef<OpFoldResult> offsets,
1802                       ArrayRef<OpFoldResult> sizes,
1803                       ArrayRef<OpFoldResult> strides,
1804                       ArrayRef<NamedAttribute> attrs) {
1805   build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
1806 }
1807 
1808 // Build a SubViewOp with static entries and inferred result type.
1809 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1810                       ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1811                       ArrayRef<int64_t> strides,
1812                       ArrayRef<NamedAttribute> attrs) {
1813   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1814       llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
1815         return b.getI64IntegerAttr(v);
1816       }));
1817   SmallVector<OpFoldResult> sizeValues =
1818       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1819         return b.getI64IntegerAttr(v);
1820       }));
1821   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1822       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1823         return b.getI64IntegerAttr(v);
1824       }));
1825   build(b, result, source, offsetValues, sizeValues, strideValues, attrs);
1826 }
1827 
1828 // Build a SubViewOp with dynamic entries and custom result type. If the
1829 // type passed is nullptr, it is inferred.
1830 void SubViewOp::build(OpBuilder &b, OperationState &result,
1831                       MemRefType resultType, Value source,
1832                       ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1833                       ArrayRef<int64_t> strides,
1834                       ArrayRef<NamedAttribute> attrs) {
1835   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1836       llvm::map_range(offsets, [&](int64_t v) -> OpFoldResult {
1837         return b.getI64IntegerAttr(v);
1838       }));
1839   SmallVector<OpFoldResult> sizeValues =
1840       llvm::to_vector<4>(llvm::map_range(sizes, [&](int64_t v) -> OpFoldResult {
1841         return b.getI64IntegerAttr(v);
1842       }));
1843   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1844       llvm::map_range(strides, [&](int64_t v) -> OpFoldResult {
1845         return b.getI64IntegerAttr(v);
1846       }));
1847   build(b, result, resultType, source, offsetValues, sizeValues, strideValues,
1848         attrs);
1849 }
1850 
1851 // Build a SubViewOp with dynamic entries and custom result type. If the type
1852 // passed is nullptr, it is inferred.
1853 void SubViewOp::build(OpBuilder &b, OperationState &result,
1854                       MemRefType resultType, Value source, ValueRange offsets,
1855                       ValueRange sizes, ValueRange strides,
1856                       ArrayRef<NamedAttribute> attrs) {
1857   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1858       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1859   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1860       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1861   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1862       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1863   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1864 }
1865 
1866 // Build a SubViewOp with dynamic entries and inferred result type.
1867 void SubViewOp::build(OpBuilder &b, OperationState &result, Value source,
1868                       ValueRange offsets, ValueRange sizes, ValueRange strides,
1869                       ArrayRef<NamedAttribute> attrs) {
1870   build(b, result, MemRefType(), source, offsets, sizes, strides, attrs);
1871 }
1872 
1873 /// For ViewLikeOpInterface.
1874 Value SubViewOp::getViewSource() { return source(); }
1875 
1876 enum SubViewVerificationResult {
1877   Success,
1878   RankTooLarge,
1879   SizeMismatch,
1880   ElemTypeMismatch,
1881   MemSpaceMismatch,
1882   AffineMapMismatch
1883 };
1884 
1885 /// Checks if `original` Type type can be rank reduced to `reduced` type.
1886 /// This function is slight variant of `is subsequence` algorithm where
1887 /// not matching dimension must be 1.
1888 static SubViewVerificationResult
1889 isRankReducedType(Type originalType, Type candidateReducedType,
1890                   std::string *errMsg = nullptr) {
1891   if (originalType == candidateReducedType)
1892     return SubViewVerificationResult::Success;
1893   if (!originalType.isa<MemRefType>())
1894     return SubViewVerificationResult::Success;
1895   if (originalType.isa<MemRefType>() && !candidateReducedType.isa<MemRefType>())
1896     return SubViewVerificationResult::Success;
1897 
1898   ShapedType originalShapedType = originalType.cast<ShapedType>();
1899   ShapedType candidateReducedShapedType =
1900       candidateReducedType.cast<ShapedType>();
1901 
1902   // Rank and size logic is valid for all ShapedTypes.
1903   ArrayRef<int64_t> originalShape = originalShapedType.getShape();
1904   ArrayRef<int64_t> candidateReducedShape =
1905       candidateReducedShapedType.getShape();
1906   unsigned originalRank = originalShape.size(),
1907            candidateReducedRank = candidateReducedShape.size();
1908   if (candidateReducedRank > originalRank)
1909     return SubViewVerificationResult::RankTooLarge;
1910 
1911   auto optionalUnusedDimsMask =
1912       computeRankReductionMask(originalShape, candidateReducedShape);
1913 
1914   // Sizes cannot be matched in case empty vector is returned.
1915   if (!optionalUnusedDimsMask.hasValue())
1916     return SubViewVerificationResult::SizeMismatch;
1917 
1918   if (originalShapedType.getElementType() !=
1919       candidateReducedShapedType.getElementType())
1920     return SubViewVerificationResult::ElemTypeMismatch;
1921 
1922   // Strided layout logic is relevant for MemRefType only.
1923   MemRefType original = originalType.cast<MemRefType>();
1924   MemRefType candidateReduced = candidateReducedType.cast<MemRefType>();
1925   if (original.getMemorySpace() != candidateReduced.getMemorySpace())
1926     return SubViewVerificationResult::MemSpaceMismatch;
1927 
1928   llvm::SmallDenseSet<unsigned> unusedDims = optionalUnusedDimsMask.getValue();
1929   auto inferredType =
1930       getProjectedMap(getStridedLinearLayoutMap(original), unusedDims);
1931   AffineMap candidateLayout;
1932   if (candidateReduced.getAffineMaps().empty())
1933     candidateLayout = getStridedLinearLayoutMap(candidateReduced);
1934   else
1935     candidateLayout = candidateReduced.getAffineMaps().front();
1936   assert(inferredType.getNumResults() == 1 &&
1937          candidateLayout.getNumResults() == 1);
1938   if (inferredType.getNumSymbols() != candidateLayout.getNumSymbols() ||
1939       inferredType.getNumDims() != candidateLayout.getNumDims()) {
1940     if (errMsg) {
1941       llvm::raw_string_ostream os(*errMsg);
1942       os << "inferred type: " << inferredType;
1943     }
1944     return SubViewVerificationResult::AffineMapMismatch;
1945   }
1946   // Check that the difference of the affine maps simplifies to 0.
1947   AffineExpr diffExpr =
1948       inferredType.getResult(0) - candidateLayout.getResult(0);
1949   diffExpr = simplifyAffineExpr(diffExpr, inferredType.getNumDims(),
1950                                 inferredType.getNumSymbols());
1951   auto cst = diffExpr.dyn_cast<AffineConstantExpr>();
1952   if (!(cst && cst.getValue() == 0)) {
1953     if (errMsg) {
1954       llvm::raw_string_ostream os(*errMsg);
1955       os << "inferred type: " << inferredType;
1956     }
1957     return SubViewVerificationResult::AffineMapMismatch;
1958   }
1959   return SubViewVerificationResult::Success;
1960 }
1961 
1962 template <typename OpTy>
1963 static LogicalResult produceSubViewErrorMsg(SubViewVerificationResult result,
1964                                             OpTy op, Type expectedType,
1965                                             StringRef errMsg = "") {
1966   auto memrefType = expectedType.cast<ShapedType>();
1967   switch (result) {
1968   case SubViewVerificationResult::Success:
1969     return success();
1970   case SubViewVerificationResult::RankTooLarge:
1971     return op.emitError("expected result rank to be smaller or equal to ")
1972            << "the source rank. " << errMsg;
1973   case SubViewVerificationResult::SizeMismatch:
1974     return op.emitError("expected result type to be ")
1975            << expectedType
1976            << " or a rank-reduced version. (mismatch of result sizes) "
1977            << errMsg;
1978   case SubViewVerificationResult::ElemTypeMismatch:
1979     return op.emitError("expected result element type to be ")
1980            << memrefType.getElementType() << errMsg;
1981   case SubViewVerificationResult::MemSpaceMismatch:
1982     return op.emitError("expected result and source memory spaces to match.")
1983            << errMsg;
1984   case SubViewVerificationResult::AffineMapMismatch:
1985     return op.emitError("expected result type to be ")
1986            << expectedType
1987            << " or a rank-reduced version. (mismatch of result affine map) "
1988            << errMsg;
1989   }
1990   llvm_unreachable("unexpected subview verification result");
1991 }
1992 
1993 /// Verifier for SubViewOp.
1994 static LogicalResult verify(SubViewOp op) {
1995   MemRefType baseType = op.getSourceType();
1996   MemRefType subViewType = op.getType();
1997 
1998   // The base memref and the view memref should be in the same memory space.
1999   if (baseType.getMemorySpace() != subViewType.getMemorySpace())
2000     return op.emitError("different memory spaces specified for base memref "
2001                         "type ")
2002            << baseType << " and subview memref type " << subViewType;
2003 
2004   // Verify that the base memref type has a strided layout map.
2005   if (!isStrided(baseType))
2006     return op.emitError("base type ") << baseType << " is not strided";
2007 
2008   // Verify result type against inferred type.
2009   auto expectedType = SubViewOp::inferResultType(
2010       baseType, extractFromI64ArrayAttr(op.static_offsets()),
2011       extractFromI64ArrayAttr(op.static_sizes()),
2012       extractFromI64ArrayAttr(op.static_strides()));
2013 
2014   std::string errMsg;
2015   auto result = isRankReducedType(expectedType, subViewType, &errMsg);
2016   return produceSubViewErrorMsg(result, op, expectedType, errMsg);
2017 }
2018 
2019 raw_ostream &mlir::operator<<(raw_ostream &os, Range &range) {
2020   return os << "range " << range.offset << ":" << range.size << ":"
2021             << range.stride;
2022 }
2023 
2024 /// Return the list of Range (i.e. offset, size, stride). Each Range
2025 /// entry contains either the dynamic value or a ConstantIndexOp constructed
2026 /// with `b` at location `loc`.
2027 SmallVector<Range, 8> mlir::getOrCreateRanges(OffsetSizeAndStrideOpInterface op,
2028                                               OpBuilder &b, Location loc) {
2029   std::array<unsigned, 3> ranks = op.getArrayAttrMaxRanks();
2030   assert(ranks[0] == ranks[1] && "expected offset and sizes of equal ranks");
2031   assert(ranks[1] == ranks[2] && "expected sizes and strides of equal ranks");
2032   SmallVector<Range, 8> res;
2033   unsigned rank = ranks[0];
2034   res.reserve(rank);
2035   for (unsigned idx = 0; idx < rank; ++idx) {
2036     Value offset =
2037         op.isDynamicOffset(idx)
2038             ? op.getDynamicOffset(idx)
2039             : b.create<ConstantIndexOp>(loc, op.getStaticOffset(idx));
2040     Value size = op.isDynamicSize(idx)
2041                      ? op.getDynamicSize(idx)
2042                      : b.create<ConstantIndexOp>(loc, op.getStaticSize(idx));
2043     Value stride =
2044         op.isDynamicStride(idx)
2045             ? op.getDynamicStride(idx)
2046             : b.create<ConstantIndexOp>(loc, op.getStaticStride(idx));
2047     res.emplace_back(Range{offset, size, stride});
2048   }
2049   return res;
2050 }
2051 
2052 /// Infer the canonical type of the result of a subview operation. Returns a
2053 /// type with rank `resultRank` that is either the rank of the rank-reduced
2054 /// type, or the non-rank-reduced type.
2055 static MemRefType
2056 getCanonicalSubViewResultType(unsigned resultRank, MemRefType sourceType,
2057                               ArrayRef<OpFoldResult> mixedOffsets,
2058                               ArrayRef<OpFoldResult> mixedSizes,
2059                               ArrayRef<OpFoldResult> mixedStrides) {
2060   auto resultType =
2061       SubViewOp::inferRankReducedResultType(
2062           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
2063           .cast<MemRefType>();
2064   if (resultType.getRank() != resultRank) {
2065     resultType = SubViewOp::inferResultType(sourceType, mixedOffsets,
2066                                             mixedSizes, mixedStrides)
2067                      .cast<MemRefType>();
2068   }
2069   return resultType;
2070 }
2071 
2072 namespace {
2073 /// Pattern to rewrite a subview op with MemRefCast arguments.
2074 /// This essentially pushes memref.cast past its consuming subview when
2075 /// `canFoldIntoConsumerOp` is true.
2076 ///
2077 /// Example:
2078 /// ```
2079 ///   %0 = memref.cast %V : memref<16x16xf32> to memref<?x?xf32>
2080 ///   %1 = memref.subview %0[0, 0][3, 4][1, 1] :
2081 ///     memref<?x?xf32> to memref<3x4xf32, offset:?, strides:[?, 1]>
2082 /// ```
2083 /// is rewritten into:
2084 /// ```
2085 ///   %0 = memref.subview %V: memref<16x16xf32> to memref<3x4xf32, #[[map0]]>
2086 ///   %1 = memref.cast %0: memref<3x4xf32, offset:0, strides:[16, 1]> to
2087 ///     memref<3x4xf32, offset:?, strides:[?, 1]>
2088 /// ```
2089 class SubViewOpMemRefCastFolder final : public OpRewritePattern<SubViewOp> {
2090 public:
2091   using OpRewritePattern<SubViewOp>::OpRewritePattern;
2092 
2093   LogicalResult matchAndRewrite(SubViewOp subViewOp,
2094                                 PatternRewriter &rewriter) const override {
2095     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
2096     if (llvm::any_of(subViewOp.getOperands(), [](Value operand) {
2097           return matchPattern(operand, matchConstantIndex());
2098         }))
2099       return failure();
2100 
2101     auto castOp = subViewOp.source().getDefiningOp<CastOp>();
2102     if (!castOp)
2103       return failure();
2104 
2105     if (!CastOp::canFoldIntoConsumerOp(castOp))
2106       return failure();
2107 
2108     /// Deduce the resultType of the SubViewOp using `inferSubViewResultType` on
2109     /// the cast source operand type and the SubViewOp static information. This
2110     /// is the resulting type if the MemRefCastOp were folded.
2111     auto resultType = getCanonicalSubViewResultType(
2112         subViewOp.getType().getRank(),
2113         castOp.source().getType().cast<MemRefType>(),
2114         subViewOp.getMixedOffsets(), subViewOp.getMixedSizes(),
2115         subViewOp.getMixedStrides());
2116     Value newSubView = rewriter.create<SubViewOp>(
2117         subViewOp.getLoc(), resultType, castOp.source(), subViewOp.offsets(),
2118         subViewOp.sizes(), subViewOp.strides(), subViewOp.static_offsets(),
2119         subViewOp.static_sizes(), subViewOp.static_strides());
2120     rewriter.replaceOpWithNewOp<CastOp>(subViewOp, subViewOp.getType(),
2121                                         newSubView);
2122     return success();
2123   }
2124 };
2125 } // namespace
2126 
2127 /// Return the canonical type of the result of a subview.
2128 struct SubViewReturnTypeCanonicalizer {
2129   MemRefType operator()(SubViewOp op, ArrayRef<OpFoldResult> mixedOffsets,
2130                         ArrayRef<OpFoldResult> mixedSizes,
2131                         ArrayRef<OpFoldResult> mixedStrides) {
2132     return getCanonicalSubViewResultType(op.getType().getRank(),
2133                                          op.getSourceType(), mixedOffsets,
2134                                          mixedSizes, mixedStrides);
2135   }
2136 };
2137 
2138 /// A canonicalizer wrapper to replace SubViewOps.
2139 struct SubViewCanonicalizer {
2140   void operator()(PatternRewriter &rewriter, SubViewOp op, SubViewOp newOp) {
2141     rewriter.replaceOpWithNewOp<CastOp>(op, newOp, op.getType());
2142   }
2143 };
2144 
2145 void SubViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
2146                                             MLIRContext *context) {
2147   results
2148       .add<OpWithOffsetSizesAndStridesConstantArgumentFolder<
2149                SubViewOp, SubViewReturnTypeCanonicalizer, SubViewCanonicalizer>,
2150            SubViewOpMemRefCastFolder>(context);
2151 }
2152 
2153 OpFoldResult SubViewOp::fold(ArrayRef<Attribute> operands) {
2154   auto resultShapedType = getResult().getType().cast<ShapedType>();
2155   auto sourceShapedType = source().getType().cast<ShapedType>();
2156 
2157   if (resultShapedType.hasStaticShape() &&
2158       resultShapedType == sourceShapedType) {
2159     return getViewSource();
2160   }
2161 
2162   return {};
2163 }
2164 
2165 //===----------------------------------------------------------------------===//
2166 // TensorLoadOp
2167 //===----------------------------------------------------------------------===//
2168 
2169 OpFoldResult TensorLoadOp::fold(ArrayRef<Attribute>) {
2170   if (auto bufferCast = memref().getDefiningOp<BufferCastOp>())
2171     // Approximate alias analysis by conservatively folding only when no there
2172     // is no interleaved operation.
2173     if (bufferCast->getBlock() == this->getOperation()->getBlock() &&
2174         bufferCast->getNextNode() == this->getOperation())
2175       return bufferCast.tensor();
2176   return {};
2177 }
2178 
2179 namespace {
2180 struct DimOfTensorLoadFolder : public OpRewritePattern<tensor::DimOp> {
2181   using OpRewritePattern<tensor::DimOp>::OpRewritePattern;
2182 
2183   LogicalResult matchAndRewrite(tensor::DimOp dimOp,
2184                                 PatternRewriter &rewriter) const override {
2185     auto tensorLoadOp = dimOp.source().getDefiningOp<TensorLoadOp>();
2186     if (!tensorLoadOp)
2187       return failure();
2188 
2189     rewriter.replaceOpWithNewOp<DimOp>(dimOp, tensorLoadOp.memref(),
2190                                        dimOp.index());
2191     return success();
2192   }
2193 };
2194 } // namespace
2195 
2196 void TensorLoadOp::getCanonicalizationPatterns(RewritePatternSet &results,
2197                                                MLIRContext *context) {
2198   results.add<DimOfTensorLoadFolder>(context);
2199 }
2200 
2201 //===----------------------------------------------------------------------===//
2202 // TransposeOp
2203 //===----------------------------------------------------------------------===//
2204 
2205 /// Build a strided memref type by applying `permutationMap` tp `memRefType`.
2206 static MemRefType inferTransposeResultType(MemRefType memRefType,
2207                                            AffineMap permutationMap) {
2208   auto rank = memRefType.getRank();
2209   auto originalSizes = memRefType.getShape();
2210   // Compute permuted sizes.
2211   SmallVector<int64_t, 4> sizes(rank, 0);
2212   for (auto en : llvm::enumerate(permutationMap.getResults()))
2213     sizes[en.index()] =
2214         originalSizes[en.value().cast<AffineDimExpr>().getPosition()];
2215 
2216   // Compute permuted strides.
2217   int64_t offset;
2218   SmallVector<int64_t, 4> strides;
2219   auto res = getStridesAndOffset(memRefType, strides, offset);
2220   assert(succeeded(res) && strides.size() == static_cast<unsigned>(rank));
2221   (void)res;
2222   auto map =
2223       makeStridedLinearLayoutMap(strides, offset, memRefType.getContext());
2224   map = permutationMap ? map.compose(permutationMap) : map;
2225   return MemRefType::Builder(memRefType).setShape(sizes).setAffineMaps(map);
2226 }
2227 
2228 void TransposeOp::build(OpBuilder &b, OperationState &result, Value in,
2229                         AffineMapAttr permutation,
2230                         ArrayRef<NamedAttribute> attrs) {
2231   auto permutationMap = permutation.getValue();
2232   assert(permutationMap);
2233 
2234   auto memRefType = in.getType().cast<MemRefType>();
2235   // Compute result type.
2236   MemRefType resultType = inferTransposeResultType(memRefType, permutationMap);
2237 
2238   build(b, result, resultType, in, attrs);
2239   result.addAttribute(TransposeOp::getPermutationAttrName(), permutation);
2240 }
2241 
2242 // transpose $in $permutation attr-dict : type($in) `to` type(results)
2243 static void print(OpAsmPrinter &p, TransposeOp op) {
2244   p << " " << op.in() << " " << op.permutation();
2245   p.printOptionalAttrDict(op->getAttrs(),
2246                           {TransposeOp::getPermutationAttrName()});
2247   p << " : " << op.in().getType() << " to " << op.getType();
2248 }
2249 
2250 static ParseResult parseTransposeOp(OpAsmParser &parser,
2251                                     OperationState &result) {
2252   OpAsmParser::OperandType in;
2253   AffineMap permutation;
2254   MemRefType srcType, dstType;
2255   if (parser.parseOperand(in) || parser.parseAffineMap(permutation) ||
2256       parser.parseOptionalAttrDict(result.attributes) ||
2257       parser.parseColonType(srcType) ||
2258       parser.resolveOperand(in, srcType, result.operands) ||
2259       parser.parseKeywordType("to", dstType) ||
2260       parser.addTypeToList(dstType, result.types))
2261     return failure();
2262 
2263   result.addAttribute(TransposeOp::getPermutationAttrName(),
2264                       AffineMapAttr::get(permutation));
2265   return success();
2266 }
2267 
2268 static LogicalResult verify(TransposeOp op) {
2269   if (!op.permutation().isPermutation())
2270     return op.emitOpError("expected a permutation map");
2271   if (op.permutation().getNumDims() != op.getShapedType().getRank())
2272     return op.emitOpError(
2273         "expected a permutation map of same rank as the input");
2274 
2275   auto srcType = op.in().getType().cast<MemRefType>();
2276   auto dstType = op.getType().cast<MemRefType>();
2277   auto transposedType = inferTransposeResultType(srcType, op.permutation());
2278   if (dstType != transposedType)
2279     return op.emitOpError("output type ")
2280            << dstType << " does not match transposed input type " << srcType
2281            << ", " << transposedType;
2282   return success();
2283 }
2284 
2285 OpFoldResult TransposeOp::fold(ArrayRef<Attribute>) {
2286   if (succeeded(foldMemRefCast(*this)))
2287     return getResult();
2288   return {};
2289 }
2290 
2291 //===----------------------------------------------------------------------===//
2292 // ViewOp
2293 //===----------------------------------------------------------------------===//
2294 
2295 static ParseResult parseViewOp(OpAsmParser &parser, OperationState &result) {
2296   OpAsmParser::OperandType srcInfo;
2297   SmallVector<OpAsmParser::OperandType, 1> offsetInfo;
2298   SmallVector<OpAsmParser::OperandType, 4> sizesInfo;
2299   auto indexType = parser.getBuilder().getIndexType();
2300   Type srcType, dstType;
2301   llvm::SMLoc offsetLoc;
2302   if (parser.parseOperand(srcInfo) || parser.getCurrentLocation(&offsetLoc) ||
2303       parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square))
2304     return failure();
2305 
2306   if (offsetInfo.size() != 1)
2307     return parser.emitError(offsetLoc) << "expects 1 offset operand";
2308 
2309   return failure(
2310       parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) ||
2311       parser.parseOptionalAttrDict(result.attributes) ||
2312       parser.parseColonType(srcType) ||
2313       parser.resolveOperand(srcInfo, srcType, result.operands) ||
2314       parser.resolveOperands(offsetInfo, indexType, result.operands) ||
2315       parser.resolveOperands(sizesInfo, indexType, result.operands) ||
2316       parser.parseKeywordType("to", dstType) ||
2317       parser.addTypeToList(dstType, result.types));
2318 }
2319 
2320 static void print(OpAsmPrinter &p, ViewOp op) {
2321   p << ' ' << op.getOperand(0) << '[';
2322   p.printOperand(op.byte_shift());
2323   p << "][" << op.sizes() << ']';
2324   p.printOptionalAttrDict(op->getAttrs());
2325   p << " : " << op.getOperand(0).getType() << " to " << op.getType();
2326 }
2327 
2328 static LogicalResult verify(ViewOp op) {
2329   auto baseType = op.getOperand(0).getType().cast<MemRefType>();
2330   auto viewType = op.getType();
2331 
2332   // The base memref should have identity layout map (or none).
2333   if (baseType.getAffineMaps().size() > 1 ||
2334       (baseType.getAffineMaps().size() == 1 &&
2335        !baseType.getAffineMaps()[0].isIdentity()))
2336     return op.emitError("unsupported map for base memref type ") << baseType;
2337 
2338   // The result memref should have identity layout map (or none).
2339   if (viewType.getAffineMaps().size() > 1 ||
2340       (viewType.getAffineMaps().size() == 1 &&
2341        !viewType.getAffineMaps()[0].isIdentity()))
2342     return op.emitError("unsupported map for result memref type ") << viewType;
2343 
2344   // The base memref and the view memref should be in the same memory space.
2345   if (baseType.getMemorySpace() != viewType.getMemorySpace())
2346     return op.emitError("different memory spaces specified for base memref "
2347                         "type ")
2348            << baseType << " and view memref type " << viewType;
2349 
2350   // Verify that we have the correct number of sizes for the result type.
2351   unsigned numDynamicDims = viewType.getNumDynamicDims();
2352   if (op.sizes().size() != numDynamicDims)
2353     return op.emitError("incorrect number of size operands for type ")
2354            << viewType;
2355 
2356   return success();
2357 }
2358 
2359 Value ViewOp::getViewSource() { return source(); }
2360 
2361 namespace {
2362 
2363 struct ViewOpShapeFolder : public OpRewritePattern<ViewOp> {
2364   using OpRewritePattern<ViewOp>::OpRewritePattern;
2365 
2366   LogicalResult matchAndRewrite(ViewOp viewOp,
2367                                 PatternRewriter &rewriter) const override {
2368     // Return if none of the operands are constants.
2369     if (llvm::none_of(viewOp.getOperands(), [](Value operand) {
2370           return matchPattern(operand, matchConstantIndex());
2371         }))
2372       return failure();
2373 
2374     // Get result memref type.
2375     auto memrefType = viewOp.getType();
2376 
2377     // Get offset from old memref view type 'memRefType'.
2378     int64_t oldOffset;
2379     SmallVector<int64_t, 4> oldStrides;
2380     if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset)))
2381       return failure();
2382     assert(oldOffset == 0 && "Expected 0 offset");
2383 
2384     SmallVector<Value, 4> newOperands;
2385 
2386     // Offset cannot be folded into result type.
2387 
2388     // Fold any dynamic dim operands which are produced by a constant.
2389     SmallVector<int64_t, 4> newShapeConstants;
2390     newShapeConstants.reserve(memrefType.getRank());
2391 
2392     unsigned dynamicDimPos = 0;
2393     unsigned rank = memrefType.getRank();
2394     for (unsigned dim = 0, e = rank; dim < e; ++dim) {
2395       int64_t dimSize = memrefType.getDimSize(dim);
2396       // If this is already static dimension, keep it.
2397       if (!ShapedType::isDynamic(dimSize)) {
2398         newShapeConstants.push_back(dimSize);
2399         continue;
2400       }
2401       auto *defOp = viewOp.sizes()[dynamicDimPos].getDefiningOp();
2402       if (auto constantIndexOp = dyn_cast_or_null<ConstantIndexOp>(defOp)) {
2403         // Dynamic shape dimension will be folded.
2404         newShapeConstants.push_back(constantIndexOp.getValue());
2405       } else {
2406         // Dynamic shape dimension not folded; copy operand from old memref.
2407         newShapeConstants.push_back(dimSize);
2408         newOperands.push_back(viewOp.sizes()[dynamicDimPos]);
2409       }
2410       dynamicDimPos++;
2411     }
2412 
2413     // Create new memref type with constant folded dims.
2414     MemRefType newMemRefType =
2415         MemRefType::Builder(memrefType).setShape(newShapeConstants);
2416     // Nothing new, don't fold.
2417     if (newMemRefType == memrefType)
2418       return failure();
2419 
2420     // Create new ViewOp.
2421     auto newViewOp = rewriter.create<ViewOp>(viewOp.getLoc(), newMemRefType,
2422                                              viewOp.getOperand(0),
2423                                              viewOp.byte_shift(), newOperands);
2424     // Insert a cast so we have the same type as the old memref type.
2425     rewriter.replaceOpWithNewOp<CastOp>(viewOp, newViewOp, viewOp.getType());
2426     return success();
2427   }
2428 };
2429 
2430 struct ViewOpMemrefCastFolder : public OpRewritePattern<ViewOp> {
2431   using OpRewritePattern<ViewOp>::OpRewritePattern;
2432 
2433   LogicalResult matchAndRewrite(ViewOp viewOp,
2434                                 PatternRewriter &rewriter) const override {
2435     Value memrefOperand = viewOp.getOperand(0);
2436     CastOp memrefCastOp = memrefOperand.getDefiningOp<CastOp>();
2437     if (!memrefCastOp)
2438       return failure();
2439     Value allocOperand = memrefCastOp.getOperand();
2440     AllocOp allocOp = allocOperand.getDefiningOp<AllocOp>();
2441     if (!allocOp)
2442       return failure();
2443     rewriter.replaceOpWithNewOp<ViewOp>(viewOp, viewOp.getType(), allocOperand,
2444                                         viewOp.byte_shift(), viewOp.sizes());
2445     return success();
2446   }
2447 };
2448 
2449 } // end anonymous namespace
2450 
2451 void ViewOp::getCanonicalizationPatterns(RewritePatternSet &results,
2452                                          MLIRContext *context) {
2453   results.add<ViewOpShapeFolder, ViewOpMemrefCastFolder>(context);
2454 }
2455 
2456 //===----------------------------------------------------------------------===//
2457 // TableGen'd op method definitions
2458 //===----------------------------------------------------------------------===//
2459 
2460 #define GET_OP_CLASSES
2461 #include "mlir/Dialect/MemRef/IR/MemRefOps.cpp.inc"
2462