1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/StandardOps/Utils/Utils.h"
11 #include "mlir/Dialect/Tensor/IR/Tensor.h"
12 #include "mlir/Dialect/Utils/StaticValueUtils.h"
13 #include "mlir/IR/BlockAndValueMapping.h"
14 #include "mlir/IR/Builders.h"
15 #include "mlir/IR/BuiltinAttributeInterfaces.h"
16 #include "mlir/IR/Matchers.h"
17 #include "mlir/IR/PatternMatch.h"
18 #include "mlir/IR/TypeUtilities.h"
19 #include "llvm/ADT/STLExtras.h"
20 
21 using namespace mlir;
22 using namespace mlir::tensor;
23 
24 /// Materialize a single constant operation from a given attribute value with
25 /// the desired resultant type.
26 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
27                                               Attribute value, Type type,
28                                               Location loc) {
29   if (arith::ConstantOp::isBuildableWith(value, type))
30     return builder.create<arith::ConstantOp>(loc, value, type);
31   if (ConstantOp::isBuildableWith(value, type))
32     return builder.create<ConstantOp>(loc, value, type);
33   return nullptr;
34 }
35 
36 //===----------------------------------------------------------------------===//
37 // CastOp
38 //===----------------------------------------------------------------------===//
39 
40 /// Returns true if `target` is a ranked tensor type that preserves static
41 /// information available in the `source` ranked tensor type.
42 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
43   auto sourceType = source.dyn_cast<RankedTensorType>();
44   auto targetType = target.dyn_cast<RankedTensorType>();
45 
46   // Requires RankedTensorType.
47   if (!sourceType || !targetType)
48     return false;
49 
50   // Requires same elemental type.
51   if (sourceType.getElementType() != targetType.getElementType())
52     return false;
53 
54   // Requires same rank.
55   if (sourceType.getRank() != targetType.getRank())
56     return false;
57 
58   // If cast is towards more static sizes along any dimension, don't fold.
59   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
60     if (!ShapedType::isDynamic(std::get<0>(t)) &&
61         ShapedType::isDynamic(std::get<1>(t)))
62       return false;
63   }
64 
65   return true;
66 }
67 
68 /// Determines whether tensor::CastOp casts to a more dynamic version of the
69 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
70 /// implement canonicalization patterns for ops in different dialects that may
71 /// consume the results of tensor.cast operations. Such foldable tensor.cast
72 /// operations are typically inserted as `slice` ops and are canonicalized,
73 /// to preserve the type compatibility of their uses.
74 ///
75 /// Returns true when all conditions are met:
76 /// 1. source and result are ranked tensors with same element type and rank.
77 /// 2. the tensor type has more static information than the result
78 ///
79 /// Example:
80 /// ```mlir
81 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
82 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
83 /// ```
84 ///
85 /// folds into:
86 ///
87 /// ```mlir
88 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
89 /// ```
90 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
91   if (!castOp)
92     return false;
93 
94   // Can fold if the source of cast has at least as much static information as
95   // its results.
96   return preservesStaticInformation(castOp.getType(),
97                                     castOp.source().getType());
98 }
99 
100 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
101 /// that can be folded.
102 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
103   bool folded = false;
104   for (OpOperand &operand : op->getOpOperands()) {
105     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
106     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
107       operand.set(castOp.getOperand());
108       folded = true;
109     }
110   }
111   return success(folded);
112 }
113 
114 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
115   if (inputs.size() != 1 || outputs.size() != 1)
116     return false;
117   Type a = inputs.front(), b = outputs.front();
118   auto aT = a.dyn_cast<TensorType>();
119   auto bT = b.dyn_cast<TensorType>();
120   if (!aT || !bT)
121     return false;
122 
123   if (aT.getElementType() != bT.getElementType())
124     return false;
125 
126   return succeeded(verifyCompatibleShape(aT, bT));
127 }
128 
129 /// Compute a TensorType that has the joined shape knowledge of the two
130 /// given TensorTypes. The element types need to match.
131 static TensorType joinShapes(TensorType one, TensorType two) {
132   assert(one.getElementType() == two.getElementType());
133 
134   if (!one.hasRank())
135     return two;
136   if (!two.hasRank())
137     return one;
138 
139   int64_t rank = one.getRank();
140   if (rank != two.getRank())
141     return {};
142 
143   SmallVector<int64_t, 4> join;
144   join.reserve(rank);
145   for (int64_t i = 0; i < rank; ++i) {
146     if (one.isDynamicDim(i)) {
147       join.push_back(two.getDimSize(i));
148       continue;
149     }
150     if (two.isDynamicDim(i)) {
151       join.push_back(one.getDimSize(i));
152       continue;
153     }
154     if (one.getDimSize(i) != two.getDimSize(i))
155       return {};
156     join.push_back(one.getDimSize(i));
157   }
158   return RankedTensorType::get(join, one.getElementType());
159 }
160 
161 namespace {
162 
163 /// Replaces chains of two tensor.cast operations by a single tensor.cast
164 /// operation if doing so does not remove runtime constraints.
165 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
166   using OpRewritePattern<CastOp>::OpRewritePattern;
167 
168   LogicalResult matchAndRewrite(CastOp tensorCast,
169                                 PatternRewriter &rewriter) const final {
170     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
171 
172     if (!tensorCastOperand)
173       return failure();
174 
175     auto sourceType =
176         tensorCastOperand.getOperand().getType().cast<TensorType>();
177     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
178     auto resultType = tensorCast.getType().cast<TensorType>();
179 
180     // We can remove the intermediate cast if joining all three produces the
181     // same result as just joining the source and result shapes.
182     auto firstJoin =
183         joinShapes(joinShapes(sourceType, intermediateType), resultType);
184 
185     // The join might not exist if the cast sequence would fail at runtime.
186     if (!firstJoin)
187       return failure();
188 
189     // The newJoin always exists if the above join exists, it might just contain
190     // less information. If so, we cannot drop the intermediate cast, as doing
191     // so would remove runtime checks.
192     auto newJoin = joinShapes(sourceType, resultType);
193     if (firstJoin != newJoin)
194       return failure();
195 
196     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
197                                         tensorCastOperand.getOperand());
198     return success();
199   }
200 };
201 
202 } // namespace
203 
204 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
205                                          MLIRContext *context) {
206   results.add<ChainedTensorCast>(context);
207 }
208 
209 //===----------------------------------------------------------------------===//
210 // DimOp
211 //===----------------------------------------------------------------------===//
212 
213 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
214                   int64_t index) {
215   auto loc = result.location;
216   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
217   build(builder, result, source, indexValue);
218 }
219 
220 Optional<int64_t> DimOp::getConstantIndex() {
221   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
222     return constantOp.getValue().cast<IntegerAttr>().getInt();
223   return {};
224 }
225 
226 static LogicalResult verify(DimOp op) {
227   // Assume unknown index to be in range.
228   Optional<int64_t> index = op.getConstantIndex();
229   if (!index.hasValue())
230     return success();
231 
232   // Check that constant index is not knowingly out of range.
233   auto type = op.source().getType();
234   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
235     if (index.getValue() >= tensorType.getRank())
236       return op.emitOpError("index is out of range");
237   } else if (type.isa<UnrankedTensorType>()) {
238     // Assume index to be in range.
239   } else {
240     llvm_unreachable("expected operand with tensor type");
241   }
242   return success();
243 }
244 
245 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
246   // All forms of folding require a known index.
247   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
248   if (!index)
249     return {};
250 
251   // Folding for unranked types (UnrankedTensorType) is not supported.
252   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
253   if (!tensorType)
254     return {};
255 
256   // Fold if the shape extent along the given index is known.
257   if (!tensorType.isDynamicDim(index.getInt())) {
258     Builder builder(getContext());
259     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
260   }
261 
262   Operation *definingOp = source().getDefiningOp();
263 
264   // Fold dim to the operand of tensor.generate.
265   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
266     auto resultType =
267         fromElements.getResult().getType().cast<RankedTensorType>();
268     // The case where the type encodes the size of the dimension is handled
269     // above.
270     assert(resultType.getShape()[index.getInt()] ==
271            RankedTensorType::kDynamicSize);
272 
273     // Find the operand of the fromElements that corresponds to this index.
274     auto dynExtents = fromElements.dynamicExtents().begin();
275     for (auto dim : resultType.getShape().take_front(index.getInt()))
276       if (dim == RankedTensorType::kDynamicSize)
277         dynExtents++;
278 
279     return Value{*dynExtents};
280   }
281 
282   // The size at the given index is now known to be a dynamic size.
283   unsigned unsignedIndex = index.getValue().getZExtValue();
284 
285   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
286     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
287     // `resolve-shaped-type-result-dims` pass.
288     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
289         sliceOp.isDynamicSize(unsignedIndex)) {
290       return {sliceOp.getDynamicSize(unsignedIndex)};
291     }
292   }
293 
294   // dim(cast) -> dim
295   if (succeeded(foldTensorCast(*this)))
296     return getResult();
297 
298   return {};
299 }
300 
301 namespace {
302 /// Fold dim of a cast into the dim of the source of the tensor cast.
303 struct DimOfCastOp : public OpRewritePattern<DimOp> {
304   using OpRewritePattern<DimOp>::OpRewritePattern;
305 
306   LogicalResult matchAndRewrite(DimOp dimOp,
307                                 PatternRewriter &rewriter) const override {
308     auto castOp = dimOp.source().getDefiningOp<CastOp>();
309     if (!castOp)
310       return failure();
311     Value newSource = castOp.getOperand();
312     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
313     return success();
314   }
315 };
316 } // end anonymous namespace.
317 
318 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
319                                         MLIRContext *context) {
320   results.add<DimOfCastOp>(context);
321 }
322 
323 //===----------------------------------------------------------------------===//
324 // ExtractOp
325 //===----------------------------------------------------------------------===//
326 
327 static LogicalResult verify(ExtractOp op) {
328   // Verify the # indices match if we have a ranked type.
329   if (auto tensorType = op.tensor().getType().dyn_cast<RankedTensorType>())
330     if (tensorType.getRank() != static_cast<int64_t>(op.indices().size()))
331       return op.emitOpError("incorrect number of indices for extract_element");
332 
333   return success();
334 }
335 
336 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
337   // The tensor operand must be a known constant.
338   Attribute tensor = operands.front();
339   if (!tensor)
340     return {};
341   // If this is a splat elements attribute, simply return the value. All of the
342   // elements of a splat attribute are the same.
343   if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
344     return splatTensor.getSplatValue<Attribute>();
345 
346   // Otherwise, collect the constant indices into the tensor.
347   SmallVector<uint64_t, 8> indices;
348   for (Attribute indice : llvm::drop_begin(operands, 1)) {
349     if (!indice || !indice.isa<IntegerAttr>())
350       return {};
351     indices.push_back(indice.cast<IntegerAttr>().getInt());
352   }
353 
354   // If this is an elements attribute, query the value at the given indices.
355   auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
356   if (elementsAttr && elementsAttr.isValidIndex(indices))
357     return elementsAttr.getValues<Attribute>()[indices];
358   return {};
359 }
360 
361 //===----------------------------------------------------------------------===//
362 // FromElementsOp
363 //===----------------------------------------------------------------------===//
364 
365 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
366                            Type elementType, ValueRange elements) {
367   Type resultTy = RankedTensorType::get({static_cast<int64_t>(elements.size())},
368                                         elementType);
369   result.addOperands(elements);
370   result.addTypes(resultTy);
371 }
372 
373 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
374                            ValueRange elements) {
375   assert(!elements.empty() && "expected at least one element");
376   build(builder, result, elements.front().getType(), elements);
377 }
378 
379 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
380   if (!llvm::is_contained(operands, nullptr))
381     return DenseElementsAttr::get(getType(), operands);
382   return {};
383 }
384 
385 namespace {
386 
387 // Canonicalizes the pattern of the form
388 //
389 // %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
390 // %extracted_element = tensor.extract %tensor[%c0] : tensor<1xi32>
391 //
392 // to just %element.
393 struct ExtractElementFromTensorFromElements
394     : public OpRewritePattern<tensor::ExtractOp> {
395   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
396 
397   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
398                                 PatternRewriter &rewriter) const final {
399     if (extract.indices().size() != 1)
400       return failure();
401 
402     auto tensorFromElements = extract.tensor().getDefiningOp<FromElementsOp>();
403     if (tensorFromElements == nullptr)
404       return failure();
405 
406     APInt index;
407     if (!matchPattern(*extract.indices().begin(), m_ConstantInt(&index)))
408       return failure();
409     // Prevent out of bounds accesses. This can happen in invalid code that will
410     // never execute.
411     if (tensorFromElements->getNumOperands() <= index.getZExtValue() ||
412         index.getSExtValue() < 0)
413       return failure();
414     rewriter.replaceOp(extract,
415                        tensorFromElements.getOperand(index.getZExtValue()));
416     return success();
417   }
418 };
419 
420 } // namespace
421 
422 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
423                                                  MLIRContext *context) {
424   results.add<ExtractElementFromTensorFromElements>(context);
425 }
426 
427 //===----------------------------------------------------------------------===//
428 // InsertOp
429 //===----------------------------------------------------------------------===//
430 
431 static LogicalResult verify(InsertOp op) {
432   // Verify the # indices match if we have a ranked type.
433   if (auto destType = op.dest().getType().dyn_cast<RankedTensorType>())
434     if (destType.getRank() != static_cast<int64_t>(op.indices().size()))
435       return op.emitOpError("incorrect number of indices");
436   return success();
437 }
438 
439 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
440   Attribute scalar = operands[0];
441   Attribute dest = operands[1];
442   if (scalar && dest)
443     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
444       if (scalar == splatDest.getSplatValue<Attribute>())
445         return dest;
446   return {};
447 }
448 
449 //===----------------------------------------------------------------------===//
450 // GenerateOp
451 //===----------------------------------------------------------------------===//
452 
453 static LogicalResult verify(GenerateOp op) {
454   // Ensure that the tensor type has as many dynamic dimensions as are specified
455   // by the operands.
456   RankedTensorType resultTy = op.getType().cast<RankedTensorType>();
457   if (op.getNumOperands() != resultTy.getNumDynamicDims())
458     return op.emitError("must have as many index operands as dynamic extents "
459                         "in the result type");
460 
461   // Ensure that region arguments span the index space.
462   if (!llvm::all_of(op.body().getArgumentTypes(),
463                     [](Type ty) { return ty.isIndex(); }))
464     return op.emitError("all body arguments must be index");
465   if (op.body().getNumArguments() != resultTy.getRank())
466     return op.emitError("must have one body argument per input dimension");
467 
468   // Ensure that the region yields an element of the right type.
469   auto yieldOp =
470       llvm::cast<YieldOp>(op.body().getBlocks().front().getTerminator());
471   if (yieldOp.value().getType() != resultTy.getElementType())
472     return op.emitOpError(
473         "body must be terminated with a `yield` operation of the tensor "
474         "element type");
475 
476   return success();
477 }
478 
479 void GenerateOp::build(
480     OpBuilder &b, OperationState &result, Type resultTy,
481     ValueRange dynamicExtents,
482     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
483   build(b, result, resultTy, dynamicExtents);
484 
485   // Build and populate body.
486   OpBuilder::InsertionGuard guard(b);
487   Region *bodyRegion = result.regions.front().get();
488   auto rank = resultTy.cast<RankedTensorType>().getRank();
489   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
490   Block *bodyBlock =
491       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes);
492   bodyBuilder(b, result.location, bodyBlock->getArguments());
493 }
494 
495 namespace {
496 
497 /// Canonicalizes tensor.generate operations with a constant
498 /// operand into the equivalent operation with the operand expressed in the
499 /// result type, instead. We also insert a type cast to make sure that the
500 /// resulting IR is still well-typed.
501 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
502   using OpRewritePattern<GenerateOp>::OpRewritePattern;
503 
504   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
505                                 PatternRewriter &rewriter) const final {
506     auto resultType =
507         tensorFromElements.getResult().getType().cast<RankedTensorType>();
508 
509     if (resultType.hasStaticShape())
510       return failure();
511 
512     SmallVector<Value, 4> newOperands;
513     SmallVector<int64_t, 4> newShape;
514     auto operandsIt = tensorFromElements.dynamicExtents().begin();
515 
516     for (int64_t dim : resultType.getShape()) {
517       if (dim != RankedTensorType::kDynamicSize) {
518         newShape.push_back(dim);
519         continue;
520       }
521       APInt index;
522       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
523         newShape.push_back(RankedTensorType::kDynamicSize);
524         newOperands.push_back(*operandsIt++);
525         continue;
526       }
527       newShape.push_back(index.getSExtValue());
528       operandsIt++;
529     }
530 
531     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
532       return failure();
533 
534     auto loc = tensorFromElements.getLoc();
535     auto newOp = rewriter.create<GenerateOp>(
536         loc, RankedTensorType::get(newShape, resultType.getElementType()),
537         newOperands);
538     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
539                                 newOp.body().begin());
540     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
541                                                 newOp);
542     return success();
543   }
544 };
545 
546 /// Canonicalizes the pattern of the form
547 ///
548 /// %tensor = tensor.generate %x {
549 ///   ^bb0(%arg0: index):  // no predecessors
550 ///   <computation>
551 ///   yield %1 : index
552 /// } : tensor<?xindex>
553 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
554 ///
555 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
556 /// tensor.generate operation has no side-effects.
557 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
558   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
559 
560   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
561                                 PatternRewriter &rewriter) const final {
562     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
563     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
564       return failure();
565 
566     BlockAndValueMapping mapping;
567     Block *body = tensorFromElements.getBody();
568     mapping.map(body->getArguments(), extract.indices());
569     for (auto &op : body->without_terminator())
570       rewriter.clone(op, mapping);
571 
572     auto yield = cast<YieldOp>(body->getTerminator());
573 
574     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
575     return success();
576   }
577 };
578 
579 /// Canonicalizes the pattern of the form
580 ///
581 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
582 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
583 ///
584 /// to
585 ///
586 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
587 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
588   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
589 
590   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
591                                 PatternRewriter &rewriter) const final {
592     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
593     if (!tensorCast)
594       return failure();
595 
596     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
597                                                    extract.indices());
598     return success();
599   }
600 };
601 
602 } // namespace
603 
604 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
605                                              MLIRContext *context) {
606   // TODO: Move extract patterns to tensor::ExtractOp.
607   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
608               StaticTensorGenerate>(context);
609 }
610 
611 //===----------------------------------------------------------------------===//
612 // ReshapeOp
613 //===----------------------------------------------------------------------===//
614 
615 static int64_t GetNumElements(ShapedType type) {
616   int64_t numElements = 1;
617   for (auto dim : type.getShape())
618     numElements *= dim;
619   return numElements;
620 }
621 
622 static LogicalResult verify(ReshapeOp op) {
623   TensorType operandType = op.source().getType().cast<TensorType>();
624   TensorType resultType = op.result().getType().cast<TensorType>();
625 
626   if (operandType.getElementType() != resultType.getElementType())
627     return op.emitOpError("element types of source and destination tensor "
628                           "types should be the same");
629 
630   int64_t shapeSize =
631       op.shape().getType().cast<RankedTensorType>().getDimSize(0);
632   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
633   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
634 
635   if (resultRankedType) {
636     if (operandRankedType && resultRankedType.hasStaticShape() &&
637         operandRankedType.hasStaticShape()) {
638       if (GetNumElements(operandRankedType) != GetNumElements(resultRankedType))
639         return op.emitOpError("source and destination tensor should have the "
640                               "same number of elements");
641     }
642     if (shapeSize == TensorType::kDynamicSize)
643       return op.emitOpError("cannot use shape operand with dynamic length to "
644                             "reshape to statically-ranked tensor type");
645     if (shapeSize != resultRankedType.getRank())
646       return op.emitOpError(
647           "length of shape operand differs from the result's tensor rank");
648   }
649   return success();
650 }
651 
652 //===----------------------------------------------------------------------===//
653 // ExtractSliceOp
654 //===----------------------------------------------------------------------===//
655 
656 /// An extract_slice op result type can be fully inferred from the source type
657 /// and the static representation of offsets, sizes and strides. Special
658 /// sentinels encode the dynamic case.
659 RankedTensorType
660 ExtractSliceOp::inferResultType(RankedTensorType sourceRankedTensorType,
661                                 ArrayRef<int64_t> leadingStaticOffsets,
662                                 ArrayRef<int64_t> leadingStaticSizes,
663                                 ArrayRef<int64_t> leadingStaticStrides) {
664   // An extract_slice op may specify only a leading subset of offset/sizes/
665   // strides in which case we complete with offset=0, sizes from memref type and
666   // strides=1.
667   unsigned rank = sourceRankedTensorType.getRank();
668   assert(leadingStaticSizes.size() <= rank &&
669          "unexpected leadingStaticSizes overflow");
670   auto staticSizes = llvm::to_vector<4>(leadingStaticSizes);
671   unsigned numTrailingSizes = rank - staticSizes.size();
672   llvm::append_range(staticSizes, sourceRankedTensorType.getShape().take_back(
673                                       numTrailingSizes));
674   return RankedTensorType::get(staticSizes,
675                                sourceRankedTensorType.getElementType());
676 }
677 
678 RankedTensorType
679 ExtractSliceOp::inferResultType(RankedTensorType sourceRankedTensorType,
680                                 ArrayRef<OpFoldResult> leadingStaticOffsets,
681                                 ArrayRef<OpFoldResult> leadingStaticSizes,
682                                 ArrayRef<OpFoldResult> leadingStaticStrides) {
683   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
684   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
685   dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets,
686                              staticOffsets, ShapedType::kDynamicStrideOrOffset);
687   dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes,
688                              ShapedType::kDynamicSize);
689   dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides,
690                              staticStrides, ShapedType::kDynamicStrideOrOffset);
691   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
692                                          staticSizes, staticStrides);
693 }
694 
695 /// An extract_slice op result type can be fully inferred from the source type
696 /// and the static representation of offsets, sizes and strides. Special
697 /// sentinels encode the dynamic case.
698 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
699     unsigned resultRank, RankedTensorType sourceRankedTensorType,
700     ArrayRef<int64_t> leadingStaticOffsets,
701     ArrayRef<int64_t> leadingStaticSizes,
702     ArrayRef<int64_t> leadingStaticStrides) {
703   auto inferredType =
704       inferResultType(sourceRankedTensorType, leadingStaticOffsets,
705                       leadingStaticSizes, leadingStaticStrides)
706           .cast<RankedTensorType>();
707   int rankDiff = inferredType.getRank() - resultRank;
708   if (rankDiff > 0) {
709     auto shape = inferredType.getShape();
710     llvm::SmallDenseSet<unsigned> dimsToProject;
711     mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject);
712     SmallVector<int64_t> projectedShape;
713     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
714       if (!dimsToProject.contains(pos))
715         projectedShape.push_back(shape[pos]);
716     inferredType =
717         RankedTensorType::get(projectedShape, inferredType.getElementType());
718   }
719   return inferredType;
720 }
721 
722 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
723     unsigned resultRank, RankedTensorType sourceRankedTensorType,
724     ArrayRef<OpFoldResult> leadingStaticOffsets,
725     ArrayRef<OpFoldResult> leadingStaticSizes,
726     ArrayRef<OpFoldResult> leadingStaticStrides) {
727   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
728   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
729   dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets,
730                              staticOffsets, ShapedType::kDynamicStrideOrOffset);
731   dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes,
732                              ShapedType::kDynamicSize);
733   dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides,
734                              staticStrides, ShapedType::kDynamicStrideOrOffset);
735   return ExtractSliceOp::inferRankReducedResultType(
736       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
737       staticStrides);
738 }
739 
740 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
741 /// result type. If the type passed is nullptr, it is inferred.
742 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
743                            RankedTensorType resultType, Value source,
744                            ArrayRef<OpFoldResult> offsets,
745                            ArrayRef<OpFoldResult> sizes,
746                            ArrayRef<OpFoldResult> strides,
747                            ArrayRef<NamedAttribute> attrs) {
748   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
749   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
750   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
751 
752                              ShapedType::kDynamicStrideOrOffset);
753   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
754                              ShapedType::kDynamicSize);
755   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
756 
757                              ShapedType::kDynamicStrideOrOffset);
758   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
759   // Structuring implementation this way avoids duplication between builders.
760   if (!resultType) {
761     resultType =
762         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
763                                         staticSizes, staticStrides)
764             .cast<RankedTensorType>();
765   }
766   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
767         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
768         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
769   result.addAttributes(attrs);
770 }
771 
772 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
773 /// result type.
774 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
775                            ArrayRef<OpFoldResult> offsets,
776                            ArrayRef<OpFoldResult> sizes,
777                            ArrayRef<OpFoldResult> strides,
778                            ArrayRef<NamedAttribute> attrs) {
779   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
780 }
781 
782 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
783 /// type passed is nullptr, it is inferred.
784 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
785                            RankedTensorType resultType, Value source,
786                            ValueRange offsets, ValueRange sizes,
787                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
788   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
789       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
790   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
791       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
792   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
793       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
794   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
795 }
796 
797 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
798 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
799                            ValueRange offsets, ValueRange sizes,
800                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
801   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
802 }
803 
804 template <typename OpTy>
805 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
806                                           OpTy op, Type expectedType) {
807   auto memrefType = expectedType.cast<ShapedType>();
808   switch (result) {
809   case SliceVerificationResult::Success:
810     return success();
811   case SliceVerificationResult::RankTooLarge:
812     return op.emitError("expected rank to be smaller or equal to ")
813            << "the other rank. ";
814   case SliceVerificationResult::SizeMismatch:
815     return op.emitError("expected type to be ")
816            << expectedType << " or a rank-reduced version. (size mismatch) ";
817   case SliceVerificationResult::ElemTypeMismatch:
818     return op.emitError("expected element type to be ")
819            << memrefType.getElementType();
820   default:
821     llvm_unreachable("unexpected extract_slice op verification result");
822   }
823 }
824 
825 /// Verifier for ExtractSliceOp.
826 static LogicalResult verify(ExtractSliceOp op) {
827   // Verify result type against inferred type.
828   auto expectedType =
829       ExtractSliceOp::inferResultType(op.getSourceType(), op.getMixedOffsets(),
830                                       op.getMixedSizes(), op.getMixedStrides());
831   auto result =
832       isRankReducedType(expectedType.cast<ShapedType>(), op.getType());
833   return produceSliceErrorMsg(result, op, expectedType);
834 }
835 
836 /// Infer the canonical type of the result of an extract_slice op. Returns a
837 /// type with rank `resultRank` that is either the rank of the rank-reduced
838 /// type, or the non-rank-reduced type.
839 static RankedTensorType
840 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
841                             ArrayRef<OpFoldResult> mixedOffsets,
842                             ArrayRef<OpFoldResult> mixedSizes,
843                             ArrayRef<OpFoldResult> mixedStrides) {
844   auto resultType =
845       ExtractSliceOp::inferRankReducedResultType(
846           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
847           .cast<RankedTensorType>();
848   if (resultType.getRank() != resultRank) {
849     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
850                                                  mixedSizes, mixedStrides)
851                      .cast<RankedTensorType>();
852   }
853   return resultType;
854 }
855 
856 llvm::SmallDenseSet<unsigned> ExtractSliceOp::getDroppedDims() {
857   llvm::SmallDenseSet<unsigned> droppedDims;
858   ArrayRef<int64_t> resultShape = getType().getShape();
859   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
860   unsigned shapePos = 0;
861   for (auto size : enumerate(mixedSizes)) {
862     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
863     // If the size is not 1, or if the current matched dimension of the result
864     // is the same static shape as the size value (which is 1), then the
865     // dimension is preserved.
866     if (!sizeVal || sizeVal.getValue() != 1 ||
867         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
868       shapePos++;
869       continue;
870     }
871     droppedDims.insert(size.index());
872   }
873   return droppedDims;
874 }
875 
876 LogicalResult ExtractSliceOp::reifyResultShapes(
877     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
878   reifiedReturnShapes.resize(1);
879   reifiedReturnShapes[0].reserve(getType().getRank());
880   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
881   llvm::SmallDenseSet<unsigned> droppedDims = getDroppedDims();
882   Location loc = getLoc();
883   for (auto size : enumerate(mixedSizes)) {
884     if (droppedDims.count(size.index()))
885       continue;
886     if (auto attr = size.value().dyn_cast<Attribute>()) {
887       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
888           loc, attr.cast<IntegerAttr>().getInt()));
889       continue;
890     }
891     reifiedReturnShapes[0].push_back(size.value().get<Value>());
892   }
893   return success();
894 }
895 
896 namespace {
897 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
898 /// This essentially pushes memref_cast past its consuming slice when
899 /// `canFoldIntoConsumerOp` is true.
900 ///
901 /// Example:
902 /// ```
903 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
904 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
905 ///   tensor<3x4xf32>
906 /// ```
907 /// is rewritten into:
908 /// ```
909 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
910 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
911 /// ```
912 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
913 public:
914   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
915 
916   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
917                                 PatternRewriter &rewriter) const override {
918     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
919     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
920           return matchPattern(operand, matchConstantIndex());
921         }))
922       return failure();
923 
924     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
925     if (!castOp)
926       return failure();
927 
928     if (!canFoldIntoConsumerOp(castOp))
929       return failure();
930 
931     /// Deduce the type of the result to use for the canonicalized operation.
932     RankedTensorType resultType = getCanonicalSliceResultType(
933         sliceOp.getType().getRank(), sliceOp.getSourceType(),
934         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
935         sliceOp.getMixedStrides());
936     Value newSlice = rewriter.create<ExtractSliceOp>(
937         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
938         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
939         sliceOp.static_sizes(), sliceOp.static_strides());
940     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
941                                                 newSlice);
942     return success();
943   }
944 };
945 } // namespace
946 
947 /// Return the canonical type of the result of an extract_slice op.
948 struct SliceReturnTypeCanonicalizer {
949   RankedTensorType operator()(ExtractSliceOp op,
950                               ArrayRef<OpFoldResult> mixedOffsets,
951                               ArrayRef<OpFoldResult> mixedSizes,
952                               ArrayRef<OpFoldResult> mixedStrides) {
953     return getCanonicalSliceResultType(op.getType().getRank(),
954                                        op.getSourceType(), mixedOffsets,
955                                        mixedSizes, mixedStrides);
956   }
957 };
958 
959 /// A canonicalizer wrapper to replace ExtractSliceOps.
960 struct SliceCanonicalizer {
961   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
962                   ExtractSliceOp newOp) {
963     Value replacement = newOp.getResult();
964     if (replacement.getType() != op.getType())
965       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
966                                                     replacement);
967     rewriter.replaceOp(op, replacement);
968   }
969 };
970 
971 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
972                                                  MLIRContext *context) {
973   results.add<
974       OpWithOffsetSizesAndStridesConstantArgumentFolder<
975           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
976       ExtractSliceOpCastFolder>(context);
977 }
978 
979 //
980 static LogicalResult
981 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
982                                            ShapedType shapedType) {
983   OpBuilder b(op.getContext());
984   for (OpFoldResult ofr : op.getMixedOffsets())
985     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
986       return failure();
987   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
988   // is appropriate.
989   auto shape = shapedType.getShape();
990   for (auto it : llvm::zip(op.getMixedSizes(), shape))
991     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
992       return failure();
993   for (OpFoldResult ofr : op.getMixedStrides())
994     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
995       return failure();
996   return success();
997 }
998 
999 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1000 /// we can return the InsertSliceOp's source directly.
1001 // TODO: This only checks the immediate producer; extend to go up the
1002 // insert/extract chain if the slices are disjoint.
1003 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1004   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1005 
1006   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1007   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1008       insertOp.isSameAs(extractOp, isSame))
1009     return insertOp.source();
1010 
1011   return {};
1012 }
1013 
1014 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute>) {
1015   if (getSourceType() == getType() &&
1016       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1017     return this->source();
1018   if (Value slice = foldExtractAfterInsertSlice(*this))
1019     return slice;
1020   return OpFoldResult();
1021 }
1022 
1023 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1024     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1025   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1026   unsigned rank = rankedTensorType.getRank();
1027   auto shape = rankedTensorType.getShape();
1028   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1029   SmallVector<OpFoldResult> sizes;
1030   for (unsigned i = 0, e = rank; i < e; ++i) {
1031     OpFoldResult dim;
1032     if (rankedTensorType.isDynamicDim(i))
1033       dim = b.createOrFold<tensor::DimOp>(
1034           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1035     else
1036       dim = b.getIndexAttr(shape[i]);
1037     sizes.push_back(dim);
1038   }
1039   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1040   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1041                                                 offsets, sizes, strides);
1042 }
1043 
1044 //===----------------------------------------------------------------------===//
1045 // InsertSliceOp
1046 //===----------------------------------------------------------------------===//
1047 
1048 // Build a InsertSliceOp with mixed static and dynamic entries.
1049 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1050                           Value dest, ArrayRef<OpFoldResult> offsets,
1051                           ArrayRef<OpFoldResult> sizes,
1052                           ArrayRef<OpFoldResult> strides,
1053                           ArrayRef<NamedAttribute> attrs) {
1054   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1055   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1056   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1057 
1058                              ShapedType::kDynamicStrideOrOffset);
1059   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1060                              ShapedType::kDynamicSize);
1061   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1062 
1063                              ShapedType::kDynamicStrideOrOffset);
1064   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1065         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1066         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1067   result.addAttributes(attrs);
1068 }
1069 
1070 // Build a InsertSliceOp with dynamic entries.
1071 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1072                           Value dest, ValueRange offsets, ValueRange sizes,
1073                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1074   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1075       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1076   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1077       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1078   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1079       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1080   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1081 }
1082 
1083 /// Verifier for InsertSliceOp.
1084 static LogicalResult verify(InsertSliceOp op) {
1085   // insert_slice is the inverse of extract_slice, use the same type inference.
1086   auto expectedType = ExtractSliceOp::inferRankReducedResultType(
1087       op.getSourceType().getRank(), op.getType(),
1088       extractFromI64ArrayAttr(op.static_offsets()),
1089       extractFromI64ArrayAttr(op.static_sizes()),
1090       extractFromI64ArrayAttr(op.static_strides()));
1091   auto result =
1092       isRankReducedType(expectedType.cast<ShapedType>(), op.getSourceType());
1093   return produceSliceErrorMsg(result, op, expectedType);
1094 }
1095 
1096 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1097 /// can mutate the second InsertSliceOp's destination to the first one's.
1098 ///
1099 /// Example:
1100 ///
1101 /// ```mlir
1102 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1103 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1104 /// ```
1105 ///
1106 /// folds into:
1107 ///
1108 /// ```mlir
1109 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1110 /// ```
1111 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1112   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1113 
1114   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1115   if (!prevInsertOp ||
1116       prevInsertOp.source().getType() != insertOp.source().getType() ||
1117       !prevInsertOp.isSameAs(insertOp, isSame))
1118     return failure();
1119 
1120   insertOp.destMutable().assign(prevInsertOp.dest());
1121   return success();
1122 }
1123 
1124 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1125   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1126       getSourceType() == getType() &&
1127       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1128     return this->source();
1129   if (succeeded(foldInsertAfterInsertSlice(*this)))
1130     return getResult();
1131   return OpFoldResult();
1132 }
1133 
1134 LogicalResult InsertSliceOp::reifyResultShapes(
1135     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1136   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1137   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1138     reifiedReturnShapes[0][dim] =
1139         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1140   }
1141   return success();
1142 }
1143 
1144 namespace {
1145 /// Pattern to rewrite a insert_slice op with constant arguments.
1146 class InsertSliceOpConstantArgumentFolder final
1147     : public OpRewritePattern<InsertSliceOp> {
1148 public:
1149   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1150 
1151   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1152                                 PatternRewriter &rewriter) const override {
1153     // No constant operand, just return.
1154     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1155           return matchPattern(operand, matchConstantIndex());
1156         }))
1157       return failure();
1158 
1159     // At least one of offsets/sizes/strides is a new constant.
1160     // Form the new list of operands and constant attributes from the
1161     // existing.
1162     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1163     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1164     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1165     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1166     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1167     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1168 
1169     // Create the new op in canonical form.
1170     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1171         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1172         mixedOffsets, mixedSizes, mixedStrides);
1173     Value toInsert = insertSliceOp.source();
1174     if (sourceType != insertSliceOp.getSourceType())
1175       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1176                                                  sourceType, toInsert);
1177     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1178         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1179         mixedStrides);
1180     return success();
1181   }
1182 };
1183 
1184 /// Fold tensor_casts with insert_slice operations. If the source or destination
1185 /// tensor is a tensor_cast that removes static type information, the cast is
1186 /// folded into the insert_slice operation. E.g.:
1187 ///
1188 /// ```mlir
1189 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1190 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1191 /// ```
1192 ///
1193 /// folds into:
1194 ///
1195 /// ```mlir
1196 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1197 /// ```
1198 ///
1199 /// Note: When folding a cast on the destination tensor, the result of the
1200 /// insert_slice operation is casted to ensure that the type of the result did
1201 /// not change.
1202 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1203   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1204 
1205   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1206                                 PatternRewriter &rewriter) const override {
1207     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1208           return matchPattern(operand, matchConstantIndex());
1209         }))
1210       return failure();
1211 
1212     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1213       auto castOp = v.getDefiningOp<tensor::CastOp>();
1214       if (!castOp || !canFoldIntoConsumerOp(castOp))
1215         return llvm::None;
1216       return castOp.source();
1217     };
1218     Optional<Value> sourceCastSource =
1219         getSourceOfCastOp(insertSliceOp.source());
1220     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1221     if (!sourceCastSource && !destCastSource)
1222       return failure();
1223 
1224     Value replacement = rewriter.create<InsertSliceOp>(
1225         insertSliceOp.getLoc(),
1226         (sourceCastSource ? *sourceCastSource : insertSliceOp.source()),
1227         (destCastSource ? *destCastSource : insertSliceOp.dest()),
1228         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1229         insertSliceOp.getMixedStrides());
1230 
1231     if (replacement.getType() != insertSliceOp.getType()) {
1232       replacement = rewriter.create<tensor::CastOp>(
1233           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1234     }
1235     rewriter.replaceOp(insertSliceOp, replacement);
1236     return success();
1237   }
1238 };
1239 
1240 /// If additional static type information can be deduced from a insert_slice's
1241 /// size operands, insert an explicit cast of the op's source operand. This
1242 /// enables other canonicalization patterns that are matching for tensor_cast
1243 /// ops such as `ForOpTensorCastFolder` in SCF.
1244 ///
1245 /// Example:
1246 ///
1247 /// ```mlir
1248 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1249 ///       : tensor<?x?xf32> into ...
1250 /// ```
1251 ///
1252 /// folds into:
1253 ///
1254 /// ```mlir
1255 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1256 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1257 ///       : tensor<64x64xf32> into ...
1258 /// ```
1259 struct InsertSliceOpSourceCastInserter final
1260     : public OpRewritePattern<InsertSliceOp> {
1261   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1262 
1263   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1264                                 PatternRewriter &rewriter) const override {
1265     RankedTensorType srcType = insertSliceOp.getSourceType();
1266     if (srcType.getRank() != insertSliceOp.getType().getRank())
1267       return failure();
1268     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1269                                      srcType.getShape().end());
1270     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1271       if (Optional<int64_t> constInt =
1272               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1273         newSrcShape[i] = *constInt;
1274     }
1275 
1276     RankedTensorType newSrcType =
1277         RankedTensorType::get(newSrcShape, srcType.getElementType());
1278     if (srcType == newSrcType ||
1279         !preservesStaticInformation(srcType, newSrcType) ||
1280         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1281       return failure();
1282 
1283     // newSrcType is:
1284     //   1) Different from srcType.
1285     //   2) "More static" than srcType.
1286     //   3) Cast-compatible with srcType.
1287     // Insert the cast.
1288     Value cast = rewriter.create<tensor::CastOp>(
1289         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1290     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1291         insertSliceOp, cast, insertSliceOp.dest(),
1292         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1293         insertSliceOp.getMixedStrides());
1294     return success();
1295   }
1296 };
1297 } // namespace
1298 
1299 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1300                                                 MLIRContext *context) {
1301   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1302               InsertSliceOpSourceCastInserter>(context);
1303 }
1304 
1305 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1306                                                              Location loc,
1307                                                              Value tensor,
1308                                                              Value dest) {
1309   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1310   unsigned rank = rankedTensorType.getRank();
1311   auto shape = rankedTensorType.getShape();
1312   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1313   SmallVector<OpFoldResult> sizes;
1314   for (unsigned i = 0, e = rank; i < e; ++i) {
1315     OpFoldResult dim;
1316     if (rankedTensorType.isDynamicDim(i))
1317       dim = b.createOrFold<tensor::DimOp>(
1318           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1319     else
1320       dim = b.getIndexAttr(shape[i]);
1321     sizes.push_back(dim);
1322   }
1323   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1324   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1325                                                sizes, strides);
1326 }
1327 
1328 //===----------------------------------------------------------------------===//
1329 // TableGen'd op method definitions
1330 //===----------------------------------------------------------------------===//
1331 
1332 #define GET_OP_CLASSES
1333 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
1334