1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
11 #include "mlir/Dialect/Bufferization/IR/Bufferization.h"
12 #include "mlir/Dialect/MemRef/IR/MemRef.h"
13 #include "mlir/Dialect/MemRef/Utils/MemRefUtils.h"
14 #include "mlir/Dialect/Tensor/IR/Tensor.h"
15 #include "mlir/IR/Matchers.h"
16 
17 using namespace mlir;
18 using namespace mlir::bufferization;
19 
20 //===----------------------------------------------------------------------===//
21 // Helper functions
22 //===----------------------------------------------------------------------===//
23 
24 FailureOr<Value>
25 mlir::bufferization::castOrReallocMemRefValue(OpBuilder &b, Value value,
26                                               MemRefType destType) {
27   auto srcType = value.getType().cast<MemRefType>();
28 
29   // Element type, rank and memory space must match.
30   if (srcType.getElementType() != destType.getElementType())
31     return failure();
32   if (srcType.getMemorySpaceAsInt() != destType.getMemorySpaceAsInt())
33     return failure();
34   if (srcType.getRank() != destType.getRank())
35     return failure();
36 
37   // In case the affine maps are different, we may need to use a copy if we go
38   // from dynamic to static offset or stride (the canonicalization cannot know
39   // at this point that it is really cast compatible).
40   auto isGuaranteedCastCompatible = [](MemRefType source, MemRefType target) {
41     int64_t sourceOffset, targetOffset;
42     SmallVector<int64_t, 4> sourceStrides, targetStrides;
43     if (failed(getStridesAndOffset(source, sourceStrides, sourceOffset)) ||
44         failed(getStridesAndOffset(target, targetStrides, targetOffset)))
45       return false;
46     auto dynamicToStatic = [](int64_t a, int64_t b) {
47       return a == MemRefType::getDynamicStrideOrOffset() &&
48              b != MemRefType::getDynamicStrideOrOffset();
49     };
50     if (dynamicToStatic(sourceOffset, targetOffset))
51       return false;
52     for (auto it : zip(sourceStrides, targetStrides))
53       if (dynamicToStatic(std::get<0>(it), std::get<1>(it)))
54         return false;
55     return true;
56   };
57 
58   // Note: If `areCastCompatible`, a cast is valid, but may fail at runtime. To
59   // ensure that we only generate casts that always succeed at runtime, we check
60   // a fix extra conditions in `isGuaranteedCastCompatible`.
61   if (memref::CastOp::areCastCompatible(srcType, destType) &&
62       isGuaranteedCastCompatible(srcType, destType)) {
63     Value casted = b.create<memref::CastOp>(value.getLoc(), destType, value);
64     return casted;
65   }
66 
67   auto loc = value.getLoc();
68   SmallVector<Value, 4> dynamicOperands;
69   for (int i = 0; i < destType.getRank(); ++i) {
70     if (destType.getShape()[i] != ShapedType::kDynamicSize)
71       continue;
72     auto index = b.createOrFold<arith::ConstantIndexOp>(loc, i);
73     Value size = b.create<memref::DimOp>(loc, value, index);
74     dynamicOperands.push_back(size);
75   }
76   // TODO: Use alloc/memcpy callback from BufferizationOptions if called via
77   // BufferizableOpInterface impl of ToMemrefOp.
78   Value copy = b.create<memref::AllocOp>(loc, destType, dynamicOperands);
79   b.create<memref::CopyOp>(loc, value, copy);
80   return copy;
81 }
82 
83 /// Try to fold to_memref(to_tensor(x)). If x's type and the result type of the
84 /// to_memref op are different, a memref.cast is needed.
85 LogicalResult mlir::bufferization::foldToMemrefToTensorPair(
86     RewriterBase &rewriter, ToMemrefOp toMemref, bool allowSameType) {
87   auto memrefToTensor = toMemref.tensor().getDefiningOp<ToTensorOp>();
88   if (!memrefToTensor)
89     return failure();
90 
91   Type srcType = memrefToTensor.memref().getType();
92   Type destType = toMemref.getType();
93 
94   // Directly rewrite if the type did not change.
95   if (srcType == destType) {
96     // Function can be configured to only handle cases where a cast is needed.
97     if (!allowSameType)
98       return failure();
99     rewriter.replaceOp(toMemref, memrefToTensor.memref());
100     return success();
101   }
102 
103   auto rankedSrcType = srcType.dyn_cast<MemRefType>();
104   auto rankedDestType = destType.dyn_cast<MemRefType>();
105   auto unrankedSrcType = srcType.dyn_cast<UnrankedMemRefType>();
106 
107   // Ranked memref -> Ranked memref cast.
108   if (rankedSrcType && rankedDestType) {
109     FailureOr<Value> replacement = castOrReallocMemRefValue(
110         rewriter, memrefToTensor.memref(), rankedDestType);
111     if (failed(replacement))
112       return failure();
113 
114     rewriter.replaceOp(toMemref, *replacement);
115     return success();
116   }
117 
118   // Unranked memref -> Ranked memref cast: May require a copy.
119   // TODO: Not implemented at the moment.
120   if (unrankedSrcType && rankedDestType)
121     return failure();
122 
123   // Unranked memref -> unranked memref cast
124   // Ranked memref -> unranked memref cast: No copy needed.
125   assert(memref::CastOp::areCastCompatible(srcType, destType) &&
126          "expected that types are cast compatible");
127   rewriter.replaceOpWithNewOp<memref::CastOp>(toMemref, destType,
128                                               memrefToTensor.memref());
129   return success();
130 }
131 
132 //===----------------------------------------------------------------------===//
133 // AllocTensorOp
134 //===----------------------------------------------------------------------===//
135 
136 LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
137                                        BufferizationState &state) {
138   // Nothing to do for dead AllocTensorOps.
139   if (getOperation()->getUses().empty())
140     return success();
141 
142   Optional<bool> dealloc = llvm::None;
143   if (escape().hasValue())
144     dealloc = !*escape();
145   FailureOr<Value> alloc =
146       state.createAlloc(rewriter, getLoc(), getResult(), dealloc);
147   if (failed(alloc))
148     return failure();
149   if (copy()) {
150     FailureOr<Value> copyValueBuffer = state.getBuffer(
151         rewriter, getOperation()->getOpOperand(getNumOperands() - 1));
152     if (failed(copyValueBuffer))
153       return failure();
154     if (failed(state.getOptions().createMemCpy(rewriter, getLoc(),
155                                                *copyValueBuffer, *alloc)))
156       return failure();
157   }
158   replaceOpWithBufferizedValues(rewriter, getOperation(), *alloc);
159   return success();
160 }
161 
162 bool AllocTensorOp::isMemoryWrite(OpResult opResult,
163                                   const AnalysisState &state) {
164   // AllocTensorOps do not write unless they have a `copy` value.
165   return static_cast<bool>(copy());
166 }
167 
168 bool AllocTensorOp::bufferizesToMemoryRead(OpOperand &opOperand,
169                                            const AnalysisState &state) {
170   assert(opOperand.getOperandNumber() == getNumOperands() - 1 &&
171          "expected copy operand");
172   return true;
173 }
174 
175 bool AllocTensorOp::bufferizesToMemoryWrite(OpOperand &opOperand,
176                                             const AnalysisState &state) {
177   assert(opOperand.getOperandNumber() == getNumOperands() - 1 &&
178          "expected copy operand");
179   return false;
180 }
181 
182 SmallVector<OpResult>
183 AllocTensorOp::getAliasingOpResult(OpOperand &opOperand,
184                                    const AnalysisState &state) {
185   // This is a new allocation. It does not alias with any other buffer.
186   return {};
187 }
188 
189 LogicalResult AllocTensorOp::verify() {
190   if (copy() && !dynamicSizes().empty())
191     return emitError("dynamic sizes not needed when copying a tensor");
192   if (!copy() && getType().getNumDynamicDims() !=
193                      static_cast<int64_t>(dynamicSizes().size()))
194     return emitError("expected ")
195            << getType().getNumDynamicDims() << " dynamic sizes";
196   if (copy() && copy().getType() != getType())
197     return emitError("expected that `copy` and return type match");
198   return success();
199 }
200 
201 void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
202                           RankedTensorType type, ValueRange dynamicSizes) {
203   build(builder, result, type, dynamicSizes, /*copy=*/Value(),
204         /*escape=*/BoolAttr());
205 }
206 
207 void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
208                           RankedTensorType type, ValueRange dynamicSizes,
209                           Value copy) {
210   build(builder, result, type, dynamicSizes, copy, /*escape=*/BoolAttr());
211 }
212 
213 void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
214                           RankedTensorType type, ValueRange dynamicSizes,
215                           Value copy, bool escape) {
216   build(builder, result, type, dynamicSizes, copy, builder.getBoolAttr(escape));
217 }
218 
219 namespace {
220 /// Change the type of the result of a `bufferization.alloc_tensor` by making
221 /// the result type statically sized along dimension that in the original
222 /// operation where defined as dynamic, but the size was defined using a
223 /// `constant` op. For example:
224 ///
225 ///  %c5 = arith.constant 5: index
226 ///  %0 = bufferization.alloc_tensor(%arg0, %c5) : tensor<?x?xf32>
227 ///
228 ///  to
229 ///
230 ///  %0 = bufferization.alloc_tensor(%arg0) : tensor<?x5xf32>
231 struct ReplaceStaticShapeDims : OpRewritePattern<AllocTensorOp> {
232   using OpRewritePattern<AllocTensorOp>::OpRewritePattern;
233 
234   LogicalResult matchAndRewrite(AllocTensorOp op,
235                                 PatternRewriter &rewriter) const override {
236     if (op.copy())
237       return failure();
238     SmallVector<int64_t> newShape = llvm::to_vector(op.getType().getShape());
239     SmallVector<Value> newDynamicSizes;
240     unsigned int dynValCounter = 0;
241     for (int64_t i = 0; i < op.getType().getRank(); ++i) {
242       if (!op.isDynamicDim(i))
243         continue;
244       Value value = op.dynamicSizes()[dynValCounter++];
245       APInt intVal;
246       if (matchPattern(value, m_ConstantInt(&intVal))) {
247         newShape[i] = intVal.getSExtValue();
248       } else {
249         newDynamicSizes.push_back(value);
250       }
251     }
252     RankedTensorType newType = RankedTensorType::get(
253         newShape, op.getType().getElementType(), op.getType().getEncoding());
254     if (newType == op.getType())
255       return failure();
256     auto newOp = rewriter.create<AllocTensorOp>(
257         op.getLoc(), newType, newDynamicSizes, /*copy=*/Value(),
258         /*escape=*/op.escapeAttr());
259     rewriter.replaceOpWithNewOp<tensor::CastOp>(op, op.getType(), newOp);
260     return success();
261   }
262 };
263 
264 struct FoldDimOfAllocTensorOp : public OpRewritePattern<tensor::DimOp> {
265   using OpRewritePattern<tensor::DimOp>::OpRewritePattern;
266 
267   LogicalResult matchAndRewrite(tensor::DimOp dimOp,
268                                 PatternRewriter &rewriter) const override {
269     Optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
270     auto allocTensorOp = dimOp.source().getDefiningOp<AllocTensorOp>();
271     if (!allocTensorOp || !maybeConstantIndex)
272       return failure();
273     if (!allocTensorOp.getType().isDynamicDim(*maybeConstantIndex))
274       return failure();
275     rewriter.replaceOp(
276         dimOp, allocTensorOp.getDynamicSize(rewriter, *maybeConstantIndex));
277     return success();
278   }
279 };
280 } // namespace
281 
282 void AllocTensorOp::getCanonicalizationPatterns(RewritePatternSet &results,
283                                                 MLIRContext *ctx) {
284   results.add<FoldDimOfAllocTensorOp, ReplaceStaticShapeDims>(ctx);
285 }
286 
287 LogicalResult AllocTensorOp::reifyResultShapes(
288     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
289   auto shapes = llvm::to_vector<4>(llvm::map_range(
290       llvm::seq<int64_t>(0, getType().getRank()), [&](int64_t dim) -> Value {
291         if (isDynamicDim(dim))
292           return getDynamicSize(builder, dim);
293         return builder.create<arith::ConstantIndexOp>(getLoc(),
294                                                       getStaticSize(dim));
295       }));
296   reifiedReturnShapes.emplace_back(std::move(shapes));
297   return success();
298 }
299 
300 ParseResult AllocTensorOp::parse(OpAsmParser &parser, OperationState &result) {
301   SmallVector<OpAsmParser::UnresolvedOperand> dynamicSizesOperands;
302   if (parser.parseLParen() || parser.parseOperandList(dynamicSizesOperands) ||
303       parser.parseRParen())
304     return failure();
305   ParseResult copyKeyword = parser.parseOptionalKeyword("copy");
306   OpAsmParser::UnresolvedOperand copyOperand;
307   if (copyKeyword.succeeded())
308     if (parser.parseLParen() || parser.parseOperand(copyOperand) ||
309         parser.parseRParen())
310       return failure();
311   if (parser.parseOptionalAttrDict(result.attributes) || parser.parseColon())
312     return failure();
313 
314   TensorType type;
315   if (parser.parseCustomTypeWithFallback(type))
316     return failure();
317   result.addTypes(type);
318 
319   Type indexType = parser.getBuilder().getIndexType();
320   if (parser.resolveOperands(dynamicSizesOperands, indexType, result.operands))
321     return failure();
322   if (copyKeyword.succeeded())
323     if (parser.resolveOperand(copyOperand, type, result.operands))
324       return failure();
325   result.addAttribute(AllocTensorOp::getOperandSegmentSizeAttr(),
326                       parser.getBuilder().getI32VectorAttr(
327                           {static_cast<int32_t>(dynamicSizesOperands.size()),
328                            static_cast<int32_t>(copyKeyword.succeeded())}));
329   return success();
330 }
331 
332 void AllocTensorOp::print(OpAsmPrinter &p) {
333   p << "(" << dynamicSizes() << ")";
334   if (copy())
335     p << " copy(" << copy() << ")";
336   p.printOptionalAttrDict((*this)->getAttrs(), /*elidedAttrs=*/{
337                               AllocTensorOp::getOperandSegmentSizeAttr()});
338   p << " : ";
339   auto type = result().getType();
340   if (auto validType = type.dyn_cast<::mlir::TensorType>())
341     p.printStrippedAttrOrType(validType);
342   else
343     p << type;
344 }
345 
346 Value AllocTensorOp::getDynamicSize(OpBuilder &b, unsigned idx) {
347   assert(isDynamicDim(idx) && "expected dynamic dim");
348   if (copy())
349     return b.create<tensor::DimOp>(getLoc(), copy(), idx);
350   return getOperand(getIndexOfDynamicSize(idx));
351 }
352 
353 //===----------------------------------------------------------------------===//
354 // CloneOp
355 //===----------------------------------------------------------------------===//
356 
357 void CloneOp::getEffects(
358     SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
359         &effects) {
360   effects.emplace_back(MemoryEffects::Read::get(), input(),
361                        SideEffects::DefaultResource::get());
362   effects.emplace_back(MemoryEffects::Write::get(), output(),
363                        SideEffects::DefaultResource::get());
364   effects.emplace_back(MemoryEffects::Allocate::get(), output(),
365                        SideEffects::DefaultResource::get());
366 }
367 
368 OpFoldResult CloneOp::fold(ArrayRef<Attribute> operands) {
369   return succeeded(memref::foldMemRefCast(*this)) ? getResult() : Value();
370 }
371 
372 namespace {
373 
374 /// Merge the clone and its source (by converting the clone to a cast) when
375 /// possible.
376 struct SimplifyClones : public OpRewritePattern<CloneOp> {
377   using OpRewritePattern<CloneOp>::OpRewritePattern;
378 
379   LogicalResult matchAndRewrite(CloneOp cloneOp,
380                                 PatternRewriter &rewriter) const override {
381     if (cloneOp.use_empty()) {
382       rewriter.eraseOp(cloneOp);
383       return success();
384     }
385 
386     Value source = cloneOp.input();
387 
388     // This only finds dealloc operations for the immediate value. It should
389     // also consider aliases. That would also make the safety check below
390     // redundant.
391     llvm::Optional<Operation *> maybeCloneDeallocOp =
392         memref::findDealloc(cloneOp.output());
393     // Skip if either of them has > 1 deallocate operations.
394     if (!maybeCloneDeallocOp.hasValue())
395       return failure();
396     llvm::Optional<Operation *> maybeSourceDeallocOp =
397         memref::findDealloc(source);
398     if (!maybeSourceDeallocOp.hasValue())
399       return failure();
400     Operation *cloneDeallocOp = *maybeCloneDeallocOp;
401     Operation *sourceDeallocOp = *maybeSourceDeallocOp;
402 
403     // If both are deallocated in the same block, their in-block lifetimes
404     // might not fully overlap, so we cannot decide which one to drop.
405     if (cloneDeallocOp && sourceDeallocOp &&
406         cloneDeallocOp->getBlock() == sourceDeallocOp->getBlock())
407       return failure();
408 
409     Block *currentBlock = cloneOp->getBlock();
410     Operation *redundantDealloc = nullptr;
411     if (cloneDeallocOp && cloneDeallocOp->getBlock() == currentBlock) {
412       redundantDealloc = cloneDeallocOp;
413     } else if (sourceDeallocOp && sourceDeallocOp->getBlock() == currentBlock) {
414       redundantDealloc = sourceDeallocOp;
415     }
416 
417     if (!redundantDealloc)
418       return failure();
419 
420     // Safety check that there are no other deallocations inbetween
421     // cloneOp and redundantDealloc, as otherwise we might deallocate an alias
422     // of source before the uses of the clone. With alias information, we could
423     // restrict this to only fail of the dealloc's operand is an alias
424     // of the source.
425     for (Operation *pos = cloneOp->getNextNode(); pos != redundantDealloc;
426          pos = pos->getNextNode()) {
427       auto effectInterface = dyn_cast<MemoryEffectOpInterface>(pos);
428       if (!effectInterface)
429         continue;
430       if (effectInterface.hasEffect<MemoryEffects::Free>())
431         return failure();
432     }
433 
434     rewriter.replaceOpWithNewOp<memref::CastOp>(cloneOp, cloneOp.getType(),
435                                                 source);
436     rewriter.eraseOp(redundantDealloc);
437     return success();
438   }
439 };
440 
441 } // namespace
442 
443 void CloneOp::getCanonicalizationPatterns(RewritePatternSet &results,
444                                           MLIRContext *context) {
445   results.add<SimplifyClones>(context);
446 }
447 
448 //===----------------------------------------------------------------------===//
449 // ToTensorOp
450 //===----------------------------------------------------------------------===//
451 
452 OpFoldResult ToTensorOp::fold(ArrayRef<Attribute>) {
453   if (auto toMemref = memref().getDefiningOp<ToMemrefOp>())
454     // Approximate alias analysis by conservatively folding only when no there
455     // is no interleaved operation.
456     if (toMemref->getBlock() == this->getOperation()->getBlock() &&
457         toMemref->getNextNode() == this->getOperation())
458       return toMemref.tensor();
459   return {};
460 }
461 
462 namespace {
463 
464 struct DimOfToTensorFolder : public OpRewritePattern<tensor::DimOp> {
465   using OpRewritePattern<tensor::DimOp>::OpRewritePattern;
466 
467   LogicalResult matchAndRewrite(tensor::DimOp dimOp,
468                                 PatternRewriter &rewriter) const override {
469     auto memrefToTensorOp = dimOp.source().getDefiningOp<ToTensorOp>();
470     if (!memrefToTensorOp)
471       return failure();
472 
473     rewriter.replaceOpWithNewOp<memref::DimOp>(dimOp, memrefToTensorOp.memref(),
474                                                dimOp.index());
475     return success();
476   }
477 };
478 
479 } // namespace
480 
481 void ToTensorOp::getCanonicalizationPatterns(RewritePatternSet &results,
482                                              MLIRContext *context) {
483   results.add<DimOfToTensorFolder>(context);
484 }
485 
486 //===----------------------------------------------------------------------===//
487 // ToMemrefOp
488 //===----------------------------------------------------------------------===//
489 
490 OpFoldResult ToMemrefOp::fold(ArrayRef<Attribute>) {
491   if (auto memrefToTensor = tensor().getDefiningOp<ToTensorOp>())
492     if (memrefToTensor.memref().getType() == getType())
493       return memrefToTensor.memref();
494   return {};
495 }
496 
497 namespace {
498 
499 /// Replace tensor.cast + to_memref by to_memref + memref.cast.
500 struct ToMemrefOfCast : public OpRewritePattern<ToMemrefOp> {
501   using OpRewritePattern<ToMemrefOp>::OpRewritePattern;
502 
503   LogicalResult matchAndRewrite(ToMemrefOp toMemref,
504                                 PatternRewriter &rewriter) const final {
505     auto tensorCastOperand =
506         toMemref.getOperand().getDefiningOp<tensor::CastOp>();
507     if (!tensorCastOperand)
508       return failure();
509     auto srcTensorType =
510         tensorCastOperand.getOperand().getType().dyn_cast<RankedTensorType>();
511     if (!srcTensorType)
512       return failure();
513     auto memrefType = MemRefType::get(srcTensorType.getShape(),
514                                       srcTensorType.getElementType());
515     Value memref = rewriter.create<ToMemrefOp>(toMemref.getLoc(), memrefType,
516                                                tensorCastOperand.getOperand());
517     rewriter.replaceOpWithNewOp<memref::CastOp>(toMemref, toMemref.getType(),
518                                                 memref);
519     return success();
520   }
521 };
522 
523 /// Canonicalize bufferization.to_tensor + bufferization.to_memref to
524 /// memref.cast when type mismatches prevent `ToMemrefOp::fold` to kick in.
525 struct TensorLoadToMemref : public OpRewritePattern<ToMemrefOp> {
526   using OpRewritePattern<ToMemrefOp>::OpRewritePattern;
527 
528   LogicalResult matchAndRewrite(ToMemrefOp toMemref,
529                                 PatternRewriter &rewriter) const final {
530     // Only handle cases where a cast is needed. The other case is handled by
531     // the folder.
532     return foldToMemrefToTensorPair(rewriter, toMemref,
533                                     /*allowSameType=*/false);
534   }
535 };
536 
537 /// Fold a load on a to_memref operation into an tensor.extract on the
538 /// corresponding tensor.
539 struct LoadOfToMemref : public OpRewritePattern<memref::LoadOp> {
540   using OpRewritePattern<memref::LoadOp>::OpRewritePattern;
541 
542   LogicalResult matchAndRewrite(memref::LoadOp load,
543                                 PatternRewriter &rewriter) const override {
544     auto toMemref = load.memref().getDefiningOp<ToMemrefOp>();
545     if (!toMemref)
546       return failure();
547 
548     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(load, toMemref.tensor(),
549                                                    load.indices());
550     return success();
551   }
552 };
553 
554 /// Fold dim of a to_memref into the dim of the tensor.
555 struct DimOfCastOp : public OpRewritePattern<memref::DimOp> {
556   using OpRewritePattern<memref::DimOp>::OpRewritePattern;
557 
558   LogicalResult matchAndRewrite(memref::DimOp dimOp,
559                                 PatternRewriter &rewriter) const override {
560     auto castOp = dimOp.source().getDefiningOp<ToMemrefOp>();
561     if (!castOp)
562       return failure();
563     Value newSource = castOp.getOperand();
564     rewriter.replaceOpWithNewOp<tensor::DimOp>(dimOp, newSource, dimOp.index());
565     return success();
566   }
567 };
568 
569 } // namespace
570 
571 void ToMemrefOp::getCanonicalizationPatterns(RewritePatternSet &results,
572                                              MLIRContext *context) {
573   results.add<DimOfCastOp, LoadOfToMemref, ToMemrefOfCast, TensorLoadToMemref>(
574       context);
575 }
576 
577 LogicalResult ToMemrefOp::bufferize(RewriterBase &rewriter,
578                                     BufferizationState &state) {
579   // Fold to_memref(to_tensor(x)) to x. Insert a cast if necessary.
580   (void)foldToMemrefToTensorPair(rewriter, *this);
581   // Note: The return value of `bufferize` indicates whether there was an error
582   // or not. (And not whether the pattern matched or not.)
583   return success();
584 }
585 
586 Optional<Operation *> CloneOp::buildDealloc(OpBuilder &builder, Value alloc) {
587   return builder.create<memref::DeallocOp>(alloc.getLoc(), alloc)
588       .getOperation();
589 }
590 
591 Optional<Value> CloneOp::buildClone(OpBuilder &builder, Value alloc) {
592   return builder.create<CloneOp>(alloc.getLoc(), alloc).getResult();
593 }
594 
595 //===----------------------------------------------------------------------===//
596 // TableGen'd op method definitions
597 //===----------------------------------------------------------------------===//
598 
599 #define GET_OP_CLASSES
600 #include "mlir/Dialect/Bufferization/IR/BufferizationOps.cpp.inc"
601