1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/Arithmetic/Utils/Utils.h"
11 #include "mlir/Dialect/Complex/IR/Complex.h"
12 #include "mlir/Dialect/Tensor/IR/Tensor.h"
13 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
14 #include "mlir/Dialect/Utils/StaticValueUtils.h"
15 #include "mlir/IR/BlockAndValueMapping.h"
16 #include "mlir/IR/Builders.h"
17 #include "mlir/IR/BuiltinAttributeInterfaces.h"
18 #include "mlir/IR/Matchers.h"
19 #include "mlir/IR/TypeUtilities.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 
23 using namespace mlir;
24 using namespace mlir::tensor;
25 
26 /// Materialize a single constant operation from a given attribute value with
27 /// the desired resultant type.
28 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
29                                               Attribute value, Type type,
30                                               Location loc) {
31   if (arith::ConstantOp::isBuildableWith(value, type))
32     return builder.create<arith::ConstantOp>(loc, value, type);
33   if (complex::ConstantOp::isBuildableWith(value, type))
34     return builder.create<complex::ConstantOp>(loc, type,
35                                                value.cast<ArrayAttr>());
36   return nullptr;
37 }
38 
39 //===----------------------------------------------------------------------===//
40 // CastOp
41 //===----------------------------------------------------------------------===//
42 
43 /// Returns true if `target` is a ranked tensor type that preserves static
44 /// information available in the `source` ranked tensor type.
45 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
46   auto sourceType = source.dyn_cast<RankedTensorType>();
47   auto targetType = target.dyn_cast<RankedTensorType>();
48 
49   // Requires RankedTensorType.
50   if (!sourceType || !targetType)
51     return false;
52 
53   // Requires same elemental type.
54   if (sourceType.getElementType() != targetType.getElementType())
55     return false;
56 
57   // Requires same rank.
58   if (sourceType.getRank() != targetType.getRank())
59     return false;
60 
61   // If cast is towards more static sizes along any dimension, don't fold.
62   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
63     if (!ShapedType::isDynamic(std::get<0>(t)) &&
64         ShapedType::isDynamic(std::get<1>(t)))
65       return false;
66   }
67 
68   return true;
69 }
70 
71 /// Determines whether tensor::CastOp casts to a more dynamic version of the
72 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
73 /// implement canonicalization patterns for ops in different dialects that may
74 /// consume the results of tensor.cast operations. Such foldable tensor.cast
75 /// operations are typically inserted as `slice` ops and are canonicalized,
76 /// to preserve the type compatibility of their uses.
77 ///
78 /// Returns true when all conditions are met:
79 /// 1. source and result are ranked tensors with same element type and rank.
80 /// 2. the tensor type has more static information than the result
81 ///
82 /// Example:
83 /// ```mlir
84 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
85 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
86 /// ```
87 ///
88 /// folds into:
89 ///
90 /// ```mlir
91 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
92 /// ```
93 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
94   if (!castOp)
95     return false;
96 
97   // Can fold if the source of cast has at least as much static information as
98   // its results.
99   return preservesStaticInformation(castOp.getType(),
100                                     castOp.source().getType());
101 }
102 
103 /// Determines whether the tensor::CastOp casts to a more static version of the
104 /// source tensor. This is useful to fold into a producing op and implement
105 /// canonicaliation patterns with the `tensor.cast` op as the root, but producer
106 /// being from different dialects. Returns true when all conditions are met:
107 /// 1. source and result and ranked tensors with same element type and rank.
108 /// 2. the result type has more static information than the source.
109 ///
110 /// Example:
111 /// ```mlir
112 ///   %1 = producer ... : tensor<?x?xf32>
113 ///   %2 = tensor.cast %1 : tensor<?x?xf32> to tensor<8x16xf32>
114 /// ```
115 ///
116 /// can be canonicalized to :
117 ///
118 /// ```mlir
119 ///   %2 = producer ... : tensor<8x16xf32>
120 /// ```
121 /// Not all ops might be canonicalizable this way, but for those that can be,
122 /// this method provides a check that it is worth doing the canonicalization.
123 bool mlir::tensor::canFoldIntoProducerOp(CastOp castOp) {
124   if (!castOp)
125     return false;
126   return preservesStaticInformation(castOp.source().getType(),
127                                     castOp.getType());
128 }
129 
130 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
131 /// that can be folded.
132 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
133   bool folded = false;
134   for (OpOperand &operand : op->getOpOperands()) {
135     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
136     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
137       operand.set(castOp.getOperand());
138       folded = true;
139     }
140   }
141   return success(folded);
142 }
143 
144 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
145   if (inputs.size() != 1 || outputs.size() != 1)
146     return false;
147   Type a = inputs.front(), b = outputs.front();
148   auto aT = a.dyn_cast<TensorType>();
149   auto bT = b.dyn_cast<TensorType>();
150   if (!aT || !bT)
151     return false;
152 
153   if (aT.getElementType() != bT.getElementType())
154     return false;
155 
156   return succeeded(verifyCompatibleShape(aT, bT));
157 }
158 
159 /// Compute a TensorType that has the joined shape knowledge of the two
160 /// given TensorTypes. The element types need to match.
161 static TensorType joinShapes(TensorType one, TensorType two) {
162   assert(one.getElementType() == two.getElementType());
163 
164   if (!one.hasRank())
165     return two;
166   if (!two.hasRank())
167     return one;
168 
169   int64_t rank = one.getRank();
170   if (rank != two.getRank())
171     return {};
172 
173   SmallVector<int64_t, 4> join;
174   join.reserve(rank);
175   for (int64_t i = 0; i < rank; ++i) {
176     if (one.isDynamicDim(i)) {
177       join.push_back(two.getDimSize(i));
178       continue;
179     }
180     if (two.isDynamicDim(i)) {
181       join.push_back(one.getDimSize(i));
182       continue;
183     }
184     if (one.getDimSize(i) != two.getDimSize(i))
185       return {};
186     join.push_back(one.getDimSize(i));
187   }
188   return RankedTensorType::get(join, one.getElementType());
189 }
190 
191 namespace {
192 
193 /// Replaces chains of two tensor.cast operations by a single tensor.cast
194 /// operation if doing so does not remove runtime constraints.
195 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
196   using OpRewritePattern<CastOp>::OpRewritePattern;
197 
198   LogicalResult matchAndRewrite(CastOp tensorCast,
199                                 PatternRewriter &rewriter) const final {
200     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
201 
202     if (!tensorCastOperand)
203       return failure();
204 
205     auto sourceType =
206         tensorCastOperand.getOperand().getType().cast<TensorType>();
207     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
208     auto resultType = tensorCast.getType().cast<TensorType>();
209 
210     // We can remove the intermediate cast if joining all three produces the
211     // same result as just joining the source and result shapes.
212     auto firstJoin =
213         joinShapes(joinShapes(sourceType, intermediateType), resultType);
214 
215     // The join might not exist if the cast sequence would fail at runtime.
216     if (!firstJoin)
217       return failure();
218 
219     // The newJoin always exists if the above join exists, it might just contain
220     // less information. If so, we cannot drop the intermediate cast, as doing
221     // so would remove runtime checks.
222     auto newJoin = joinShapes(sourceType, resultType);
223     if (firstJoin != newJoin)
224       return failure();
225 
226     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
227                                         tensorCastOperand.getOperand());
228     return success();
229   }
230 };
231 
232 /// Fold tensor.cast into tesor.extract_slice producer.
233 /// Example:
234 /// ```
235 ///  %0 = tensor.extract_slice %arg0[%o, 0] [%s, 512] [1, 1] :
236 ///    tensor<128x512xf32> to tensor<?x512xf32>
237 ///  %1 = tensor.cast %0 : tensor<?x512xf32> to tensor<16x512xf32>
238 /// ```
239 /// ->
240 /// ```
241 /// %1 = tensor.extract_slice %arg0[%o, 0] [16, 512] [1, 1] :
242 ///   tensor<128x512xf32> to tensor<16x512xf32>
243 /// ```
244 struct TensorCastExtractSlice : public OpRewritePattern<CastOp> {
245   using OpRewritePattern<CastOp>::OpRewritePattern;
246 
247   LogicalResult matchAndRewrite(CastOp tensorCast,
248                                 PatternRewriter &rewriter) const final {
249     auto extractOperand =
250         tensorCast.getOperand().getDefiningOp<ExtractSliceOp>();
251 
252     if (!extractOperand || !canFoldIntoProducerOp(tensorCast) ||
253         tensorCast.getType().getShape() ==
254             tensorCast.source().getType().cast<RankedTensorType>().getShape())
255       return failure();
256 
257     SmallVector<OpFoldResult, 4> sizes = extractOperand.getMixedSizes();
258     auto dimMask = computeRankReductionMask(
259         extractFromI64ArrayAttr(extractOperand.static_sizes()),
260         extractOperand.getType().getShape());
261     size_t dimIndex = 0;
262     for (size_t i = 0, e = sizes.size(); i < e; i++) {
263       if (dimMask && dimMask->count(i))
264         continue;
265       int64_t dim = tensorCast.getType().getShape()[dimIndex++];
266       if (ShapedType::isDynamic(dim))
267         continue;
268       sizes[i] = rewriter.getIndexAttr(dim);
269     }
270 
271     rewriter.replaceOpWithNewOp<ExtractSliceOp>(
272         tensorCast, tensorCast.getType().cast<RankedTensorType>(),
273         extractOperand.source(), extractOperand.getMixedOffsets(), sizes,
274         extractOperand.getMixedStrides());
275     return success();
276   }
277 };
278 
279 } // namespace
280 
281 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
282                                          MLIRContext *context) {
283   results.add<ChainedTensorCast, TensorCastExtractSlice>(context);
284 }
285 
286 //===----------------------------------------------------------------------===//
287 // DimOp
288 //===----------------------------------------------------------------------===//
289 
290 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
291                   int64_t index) {
292   auto loc = result.location;
293   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
294   build(builder, result, source, indexValue);
295 }
296 
297 Optional<int64_t> DimOp::getConstantIndex() {
298   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
299     return constantOp.getValue().cast<IntegerAttr>().getInt();
300   return {};
301 }
302 
303 LogicalResult DimOp::verify() {
304   // Assume unknown index to be in range.
305   Optional<int64_t> index = getConstantIndex();
306   if (!index)
307     return success();
308 
309   // Check that constant index is not knowingly out of range.
310   auto type = source().getType();
311   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
312     if (*index >= tensorType.getRank())
313       return emitOpError("index is out of range");
314   } else if (type.isa<UnrankedTensorType>()) {
315     // Assume index to be in range.
316   } else {
317     llvm_unreachable("expected operand with tensor type");
318   }
319   return success();
320 }
321 
322 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
323   // All forms of folding require a known index.
324   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
325   if (!index)
326     return {};
327 
328   // Folding for unranked types (UnrankedTensorType) is not supported.
329   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
330   if (!tensorType)
331     return {};
332 
333   // Fold if the shape extent along the given index is known.
334   if (!tensorType.isDynamicDim(index.getInt())) {
335     Builder builder(getContext());
336     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
337   }
338 
339   Operation *definingOp = source().getDefiningOp();
340 
341   // Fold dim to the operand of tensor.generate.
342   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
343     auto resultType =
344         fromElements.getResult().getType().cast<RankedTensorType>();
345     // The case where the type encodes the size of the dimension is handled
346     // above.
347     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
348 
349     // Find the operand of the fromElements that corresponds to this index.
350     auto dynExtents = fromElements.dynamicExtents().begin();
351     for (auto dim : resultType.getShape().take_front(index.getInt()))
352       if (ShapedType::isDynamic(dim))
353         dynExtents++;
354 
355     return Value{*dynExtents};
356   }
357 
358   // The size at the given index is now known to be a dynamic size.
359   unsigned unsignedIndex = index.getValue().getZExtValue();
360 
361   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
362     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
363     // `resolve-shaped-type-result-dims` pass.
364     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
365         sliceOp.isDynamicSize(unsignedIndex)) {
366       return {sliceOp.getDynamicSize(unsignedIndex)};
367     }
368   }
369 
370   // dim(cast) -> dim
371   if (succeeded(foldTensorCast(*this)))
372     return getResult();
373 
374   return {};
375 }
376 
377 namespace {
378 /// Fold dim of a cast into the dim of the source of the tensor cast.
379 struct DimOfCastOp : public OpRewritePattern<DimOp> {
380   using OpRewritePattern<DimOp>::OpRewritePattern;
381 
382   LogicalResult matchAndRewrite(DimOp dimOp,
383                                 PatternRewriter &rewriter) const override {
384     auto castOp = dimOp.source().getDefiningOp<CastOp>();
385     if (!castOp)
386       return failure();
387     Value newSource = castOp.getOperand();
388     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
389     return success();
390   }
391 };
392 } // namespace
393 
394 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
395                                         MLIRContext *context) {
396   results.add<DimOfCastOp>(context);
397 }
398 
399 //===----------------------------------------------------------------------===//
400 // ExtractOp
401 //===----------------------------------------------------------------------===//
402 
403 LogicalResult ExtractOp::verify() {
404   // Verify the # indices match if we have a ranked type.
405   if (auto tensorType = tensor().getType().dyn_cast<RankedTensorType>())
406     if (tensorType.getRank() != static_cast<int64_t>(indices().size()))
407       return emitOpError("incorrect number of indices for extract_element");
408 
409   return success();
410 }
411 
412 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
413   // If this is a splat elements attribute, simply return the value. All of the
414   // elements of a splat attribute are the same.
415   if (Attribute tensor = operands.front())
416     if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
417       return splatTensor.getSplatValue<Attribute>();
418 
419   // Collect the constant indices into the tensor.
420   SmallVector<uint64_t, 8> indices;
421   for (Attribute indice : llvm::drop_begin(operands, 1)) {
422     if (!indice || !indice.isa<IntegerAttr>())
423       return {};
424     indices.push_back(indice.cast<IntegerAttr>().getInt());
425   }
426 
427   // Fold extract(from_elements(...)).
428   if (auto fromElementsOp = tensor().getDefiningOp<FromElementsOp>()) {
429     auto tensorType = fromElementsOp.getType().cast<RankedTensorType>();
430     auto rank = tensorType.getRank();
431     assert(static_cast<int64_t>(indices.size()) == tensorType.getRank() &&
432            "rank mismatch");
433     int flatIndex = 0;
434     int stride = 1;
435     for (int i = rank - 1; i >= 0; --i) {
436       if (i < rank - 1)
437         stride *= tensorType.getDimSize(i);
438       flatIndex += indices[i] * stride;
439     }
440     // Prevent out of bounds accesses. This can happen in invalid code that will
441     // never execute.
442     if (static_cast<int>(fromElementsOp.elements().size()) <= flatIndex ||
443         flatIndex < 0)
444       return {};
445     return fromElementsOp.elements()[flatIndex];
446   }
447 
448   // If this is an elements attribute, query the value at the given indices.
449   if (Attribute tensor = operands.front()) {
450     auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
451     if (elementsAttr && elementsAttr.isValidIndex(indices))
452       return elementsAttr.getValues<Attribute>()[indices];
453   }
454 
455   return {};
456 }
457 
458 //===----------------------------------------------------------------------===//
459 // FromElementsOp
460 //===----------------------------------------------------------------------===//
461 
462 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
463                            Type resultType, ValueRange elements) {
464   result.addOperands(elements);
465   result.addTypes(resultType);
466 }
467 
468 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
469                            ValueRange elements) {
470   assert(!elements.empty() && "expected at least one element");
471   Type resultType = RankedTensorType::get(
472       {static_cast<int64_t>(elements.size())}, elements.front().getType());
473   build(builder, result, resultType, elements);
474 }
475 
476 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
477   if (!llvm::is_contained(operands, nullptr))
478     return DenseElementsAttr::get(getType(), operands);
479   return {};
480 }
481 
482 namespace {
483 
484 // Pushes the index_casts that occur before extractions to after the extract.
485 // This minimizes type conversion in some cases and enables the extract
486 // canonicalizer. This changes:
487 //
488 // %cast = arith.index_cast %tensor : tensor<1xi32> to tensor<1xindex>
489 // %extract = tensor.extract %cast[%index] : tensor<1xindex>
490 //
491 // to the following:
492 //
493 // %extract = tensor.extract %tensor[%index] : tensor<1xindex>
494 // %cast = arith.index_cast %extract : i32 to index
495 //
496 // to just %element.
497 //
498 // Consider expanding this to a template and handle all tensor cast operations.
499 struct ExtractElementFromIndexCast
500     : public OpRewritePattern<tensor::ExtractOp> {
501   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
502 
503   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
504                                 PatternRewriter &rewriter) const final {
505     Location loc = extract.getLoc();
506     auto indexCast = extract.tensor().getDefiningOp<arith::IndexCastOp>();
507     if (!indexCast)
508       return failure();
509 
510     Type elementTy = getElementTypeOrSelf(indexCast.getIn());
511 
512     auto newExtract = rewriter.create<tensor::ExtractOp>(
513         loc, elementTy, indexCast.getIn(), extract.indices());
514 
515     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(extract, extract.getType(),
516                                                     newExtract);
517 
518     return success();
519   }
520 };
521 
522 } // namespace
523 
524 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
525                                                  MLIRContext *context) {
526   results.add<ExtractElementFromIndexCast>(context);
527 }
528 
529 //===----------------------------------------------------------------------===//
530 // InsertOp
531 //===----------------------------------------------------------------------===//
532 
533 LogicalResult InsertOp::verify() {
534   // Verify the # indices match if we have a ranked type.
535   if (auto destType = dest().getType().dyn_cast<RankedTensorType>())
536     if (destType.getRank() != static_cast<int64_t>(indices().size()))
537       return emitOpError("incorrect number of indices");
538   return success();
539 }
540 
541 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
542   Attribute scalar = operands[0];
543   Attribute dest = operands[1];
544   if (scalar && dest)
545     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
546       if (scalar == splatDest.getSplatValue<Attribute>())
547         return dest;
548   return {};
549 }
550 
551 //===----------------------------------------------------------------------===//
552 // GenerateOp
553 //===----------------------------------------------------------------------===//
554 
555 LogicalResult GenerateOp::reifyResultShapes(
556     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
557   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
558   int idx = 0;
559   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
560     if (getType().isDynamicDim(dim)) {
561       reifiedReturnShapes[0][dim] = getOperand(idx++);
562     } else {
563       reifiedReturnShapes[0][dim] = builder.create<arith::ConstantIndexOp>(
564           getLoc(), getType().getDimSize(dim));
565     }
566   }
567   return success();
568 }
569 
570 LogicalResult GenerateOp::verify() {
571   // Ensure that the tensor type has as many dynamic dimensions as are specified
572   // by the operands.
573   RankedTensorType resultTy = getType().cast<RankedTensorType>();
574   if (getNumOperands() != resultTy.getNumDynamicDims())
575     return emitError("must have as many index operands as dynamic extents "
576                      "in the result type");
577 
578   return success();
579 }
580 
581 LogicalResult GenerateOp::verifyRegions() {
582   RankedTensorType resultTy = getType().cast<RankedTensorType>();
583   // Ensure that region arguments span the index space.
584   if (!llvm::all_of(body().getArgumentTypes(),
585                     [](Type ty) { return ty.isIndex(); }))
586     return emitError("all body arguments must be index");
587   if (body().getNumArguments() != resultTy.getRank())
588     return emitError("must have one body argument per input dimension");
589 
590   // Ensure that the region yields an element of the right type.
591   auto yieldOp = cast<YieldOp>(body().getBlocks().front().getTerminator());
592 
593   if (yieldOp.value().getType() != resultTy.getElementType())
594     return emitOpError(
595         "body must be terminated with a `yield` operation of the tensor "
596         "element type");
597 
598   return success();
599 }
600 
601 void GenerateOp::build(
602     OpBuilder &b, OperationState &result, Type resultTy,
603     ValueRange dynamicExtents,
604     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
605   build(b, result, resultTy, dynamicExtents);
606 
607   // Build and populate body.
608   OpBuilder::InsertionGuard guard(b);
609   Region *bodyRegion = result.regions.front().get();
610   auto rank = resultTy.cast<RankedTensorType>().getRank();
611   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
612   SmallVector<Location, 2> argumentLocs(rank, result.location);
613   Block *bodyBlock =
614       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes, argumentLocs);
615   bodyBuilder(b, result.location, bodyBlock->getArguments());
616 }
617 
618 namespace {
619 
620 /// Canonicalizes tensor.generate operations with a constant
621 /// operand into the equivalent operation with the operand expressed in the
622 /// result type, instead. We also insert a type cast to make sure that the
623 /// resulting IR is still well-typed.
624 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
625   using OpRewritePattern<GenerateOp>::OpRewritePattern;
626 
627   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
628                                 PatternRewriter &rewriter) const final {
629     auto resultType =
630         tensorFromElements.getResult().getType().cast<RankedTensorType>();
631 
632     if (resultType.hasStaticShape())
633       return failure();
634 
635     SmallVector<Value, 4> newOperands;
636     SmallVector<int64_t, 4> newShape;
637     auto operandsIt = tensorFromElements.dynamicExtents().begin();
638 
639     for (int64_t dim : resultType.getShape()) {
640       if (!ShapedType::isDynamic(dim)) {
641         newShape.push_back(dim);
642         continue;
643       }
644       APInt index;
645       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
646         newShape.push_back(ShapedType::kDynamicSize);
647         newOperands.push_back(*operandsIt++);
648         continue;
649       }
650       newShape.push_back(index.getSExtValue());
651       operandsIt++;
652     }
653 
654     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
655       return failure();
656 
657     auto loc = tensorFromElements.getLoc();
658     auto newOp = rewriter.create<GenerateOp>(
659         loc, RankedTensorType::get(newShape, resultType.getElementType()),
660         newOperands);
661     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
662                                 newOp.body().begin());
663     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
664                                                 newOp);
665     return success();
666   }
667 };
668 
669 /// Canonicalizes the pattern of the form
670 ///
671 /// %tensor = tensor.generate %x {
672 ///   ^bb0(%arg0: index):
673 ///   <computation>
674 ///   yield %1 : index
675 /// } : tensor<?xindex>
676 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
677 ///
678 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
679 /// tensor.generate operation has no side-effects.
680 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
681   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
682 
683   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
684                                 PatternRewriter &rewriter) const final {
685     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
686     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
687       return failure();
688 
689     BlockAndValueMapping mapping;
690     Block *body = &tensorFromElements.getBody().front();
691     mapping.map(body->getArguments(), extract.indices());
692     for (auto &op : body->without_terminator())
693       rewriter.clone(op, mapping);
694 
695     auto yield = cast<YieldOp>(body->getTerminator());
696 
697     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
698     return success();
699   }
700 };
701 
702 /// Canonicalizes the pattern of the form
703 ///
704 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
705 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
706 ///
707 /// to
708 ///
709 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
710 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
711   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
712 
713   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
714                                 PatternRewriter &rewriter) const final {
715     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
716     if (!tensorCast)
717       return failure();
718 
719     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
720                                                    extract.indices());
721     return success();
722   }
723 };
724 
725 } // namespace
726 
727 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
728                                              MLIRContext *context) {
729   // TODO: Move extract patterns to tensor::ExtractOp.
730   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
731               StaticTensorGenerate>(context);
732 }
733 
734 //===----------------------------------------------------------------------===//
735 // RankOp
736 //===----------------------------------------------------------------------===//
737 
738 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
739   // Constant fold rank when the rank of the operand is known.
740   auto type = getOperand().getType();
741   auto shapedType = type.dyn_cast<ShapedType>();
742   if (shapedType && shapedType.hasRank())
743     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
744   return IntegerAttr();
745 }
746 
747 //===----------------------------------------------------------------------===//
748 // ReshapeOp
749 //===----------------------------------------------------------------------===//
750 
751 static int64_t getNumElements(ShapedType type) {
752   int64_t numElements = 1;
753   for (auto dim : type.getShape())
754     numElements *= dim;
755   return numElements;
756 }
757 
758 LogicalResult ReshapeOp::verify() {
759   TensorType operandType = source().getType().cast<TensorType>();
760   TensorType resultType = result().getType().cast<TensorType>();
761 
762   if (operandType.getElementType() != resultType.getElementType())
763     return emitOpError("element types of source and destination tensor "
764                        "types should be the same");
765 
766   int64_t shapeSize = shape().getType().cast<RankedTensorType>().getDimSize(0);
767   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
768   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
769 
770   if (resultRankedType) {
771     if (operandRankedType && resultRankedType.hasStaticShape() &&
772         operandRankedType.hasStaticShape()) {
773       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
774         return emitOpError("source and destination tensor should have the "
775                            "same number of elements");
776     }
777     if (ShapedType::isDynamic(shapeSize))
778       return emitOpError("cannot use shape operand with dynamic length to "
779                          "reshape to statically-ranked tensor type");
780     if (shapeSize != resultRankedType.getRank())
781       return emitOpError(
782           "length of shape operand differs from the result's tensor rank");
783   }
784   return success();
785 }
786 
787 //===----------------------------------------------------------------------===//
788 // Reassociative reshape ops
789 //===----------------------------------------------------------------------===//
790 
791 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
792   return getSymbolLessAffineMaps(getReassociationExprs());
793 }
794 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
795   return convertReassociationIndicesToExprs(getContext(),
796                                             getReassociationIndices());
797 }
798 
799 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
800   return getSymbolLessAffineMaps(getReassociationExprs());
801 }
802 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
803   return convertReassociationIndicesToExprs(getContext(),
804                                             getReassociationIndices());
805 }
806 
807 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
808 static RankedTensorType
809 computeTensorReshapeCollapsedType(RankedTensorType type,
810                                   ArrayRef<AffineMap> reassociation) {
811   auto shape = type.getShape();
812   SmallVector<int64_t, 4> newShape;
813   newShape.reserve(reassociation.size());
814 
815   // Use the fact that reassociation is valid to simplify the logic: only use
816   // each map's rank.
817   assert(isReassociationValid(reassociation) && "invalid reassociation");
818   unsigned currentDim = 0;
819   for (AffineMap m : reassociation) {
820     unsigned dim = m.getNumResults();
821     auto band = shape.slice(currentDim, dim);
822     int64_t size = 1;
823     if (llvm::is_contained(band, ShapedType::kDynamicSize))
824       size = ShapedType::kDynamicSize;
825     else
826       for (unsigned d = 0; d < dim; ++d)
827         size *= shape[currentDim + d];
828     newShape.push_back(size);
829     currentDim += dim;
830   }
831 
832   return RankedTensorType::get(newShape, type.getElementType());
833 }
834 
835 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
836                             ArrayRef<ReassociationIndices> reassociation,
837                             ArrayRef<NamedAttribute> attrs) {
838   auto resultType = computeTensorReshapeCollapsedType(
839       src.getType().cast<RankedTensorType>(),
840       getSymbolLessAffineMaps(
841           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
842   build(b, result, resultType, src, attrs);
843   result.addAttribute(getReassociationAttrStrName(),
844                       getReassociationIndicesAttribute(b, reassociation));
845 }
846 
847 // Checks if types are the same, but ignoring encoding on ranked tensors.
848 static bool isSameTypesWithoutEncoding(Type tp1, Type tp2) {
849   if (auto rtp1 = tp1.dyn_cast<RankedTensorType>()) {
850     if (auto rtp2 = tp2.dyn_cast<RankedTensorType>())
851       return rtp1.getShape() == rtp2.getShape() &&
852              rtp1.getElementType() == rtp2.getElementType();
853     return false;
854   }
855   // Default implementation.
856   return tp1 == tp2;
857 }
858 
859 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
860                                         TensorReshapeOp, ExpandShapeOp>::value>
861 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
862                                            RankedTensorType expandedType,
863                                            RankedTensorType collapsedType) {
864   if (failed(
865           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
866     return failure();
867 
868   auto maps = op.getReassociationMaps();
869   RankedTensorType expectedType =
870       computeTensorReshapeCollapsedType(expandedType, maps);
871   if (!isSameTypesWithoutEncoding(collapsedType, expectedType))
872     return op.emitOpError("expected collapsed type to be ")
873            << expectedType << ", but got " << collapsedType;
874   return success();
875 }
876 
877 LogicalResult ExpandShapeOp::verify() {
878   return verifyTensorReshapeOp(*this, getResultType(), getSrcType());
879 }
880 
881 LogicalResult CollapseShapeOp::verify() {
882   return verifyTensorReshapeOp(*this, getSrcType(), getResultType());
883 }
884 
885 namespace {
886 /// Reshape of a splat constant can be replaced with a constant of the result
887 /// type.
888 template <typename TensorReshapeOp>
889 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
890   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
891   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
892                                 PatternRewriter &rewriter) const override {
893     DenseElementsAttr attr;
894     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
895       return failure();
896     if (!attr || !attr.isSplat())
897       return failure();
898     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
899         reshapeOp.getResultType(), attr.getRawData());
900     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
901     return success();
902   }
903 };
904 
905 /// Reshape of a FromElements can be replaced with a FromElements of the result
906 /// type
907 template <typename TensorReshapeOp>
908 struct FoldReshapeWithFromElements : OpRewritePattern<TensorReshapeOp> {
909   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
910   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
911                                 PatternRewriter &rewriter) const override {
912     auto fromElements =
913         reshapeOp.src().template getDefiningOp<FromElementsOp>();
914     if (!fromElements)
915       return failure();
916 
917     auto shapedTy = reshapeOp.getType().template cast<ShapedType>();
918 
919     if (!shapedTy.hasStaticShape())
920       return failure();
921 
922     rewriter.replaceOpWithNewOp<FromElementsOp>(reshapeOp, reshapeOp.getType(),
923                                                 fromElements.elements());
924     return success();
925   }
926 };
927 
928 } // namespace
929 
930 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
931                                                 MLIRContext *context) {
932   results.add<ComposeReassociativeReshapeOps<ExpandShapeOp>,
933               ComposeExpandOfCollapseOp<ExpandShapeOp, CollapseShapeOp>,
934               FoldReshapeWithConstant<ExpandShapeOp>,
935               FoldReshapeWithFromElements<ExpandShapeOp>>(context);
936 }
937 
938 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
939                                                   MLIRContext *context) {
940   results.add<ComposeReassociativeReshapeOps<CollapseShapeOp>,
941               ComposeCollapseOfExpandOp<CollapseShapeOp, ExpandShapeOp>,
942               FoldReshapeWithConstant<CollapseShapeOp>,
943               FoldReshapeWithFromElements<CollapseShapeOp>>(context);
944 }
945 
946 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
947   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
948 }
949 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
950   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
951 }
952 
953 //===----------------------------------------------------------------------===//
954 // ExtractSliceOp
955 //===----------------------------------------------------------------------===//
956 
957 /// An extract_slice op result type can be fully inferred from the source type
958 /// and the static representation of offsets, sizes and strides. Special
959 /// sentinels encode the dynamic case.
960 RankedTensorType ExtractSliceOp::inferResultType(
961     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
962     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
963   // An extract_slice op may specify only a leading subset of offset/sizes/
964   // strides in which case we complete with offset=0, sizes from memref type and
965   // strides=1.
966   unsigned rank = sourceRankedTensorType.getRank();
967   (void)rank;
968   assert(staticSizes.size() == rank &&
969          "unexpected staticSizes not equal to rank of source");
970   return RankedTensorType::get(staticSizes,
971                                sourceRankedTensorType.getElementType());
972 }
973 
974 RankedTensorType ExtractSliceOp::inferResultType(
975     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
976     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
977   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
978   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
979   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
980                              ShapedType::kDynamicStrideOrOffset);
981   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
982                              ShapedType::kDynamicSize);
983   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
984                              ShapedType::kDynamicStrideOrOffset);
985   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
986                                          staticSizes, staticStrides);
987 }
988 
989 /// An extract_slice op result type can be fully inferred from the source type
990 /// and the static representation of offsets, sizes and strides. Special
991 /// sentinels encode the dynamic case.
992 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
993     unsigned resultRank, RankedTensorType sourceRankedTensorType,
994     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
995     ArrayRef<int64_t> strides) {
996   auto inferredType =
997       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
998           .cast<RankedTensorType>();
999   int rankDiff = inferredType.getRank() - resultRank;
1000   if (rankDiff > 0) {
1001     auto shape = inferredType.getShape();
1002     llvm::SmallBitVector dimsToProject =
1003         getPositionsOfShapeOne(rankDiff, shape);
1004     SmallVector<int64_t> projectedShape;
1005     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
1006       if (!dimsToProject.test(pos))
1007         projectedShape.push_back(shape[pos]);
1008     inferredType =
1009         RankedTensorType::get(projectedShape, inferredType.getElementType());
1010   }
1011   return inferredType;
1012 }
1013 
1014 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
1015     unsigned resultRank, RankedTensorType sourceRankedTensorType,
1016     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
1017     ArrayRef<OpFoldResult> strides) {
1018   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1019   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1020   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1021                              ShapedType::kDynamicStrideOrOffset);
1022   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1023                              ShapedType::kDynamicSize);
1024   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1025                              ShapedType::kDynamicStrideOrOffset);
1026   return ExtractSliceOp::inferRankReducedResultType(
1027       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
1028       staticStrides);
1029 }
1030 
1031 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
1032 /// result type. If the type passed is nullptr, it is inferred.
1033 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1034                            RankedTensorType resultType, Value source,
1035                            ArrayRef<OpFoldResult> offsets,
1036                            ArrayRef<OpFoldResult> sizes,
1037                            ArrayRef<OpFoldResult> strides,
1038                            ArrayRef<NamedAttribute> attrs) {
1039   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1040   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1041   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1042                              ShapedType::kDynamicStrideOrOffset);
1043   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1044                              ShapedType::kDynamicSize);
1045   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1046                              ShapedType::kDynamicStrideOrOffset);
1047   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
1048   // Structuring implementation this way avoids duplication between builders.
1049   if (!resultType) {
1050     resultType =
1051         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
1052                                         staticSizes, staticStrides)
1053             .cast<RankedTensorType>();
1054   }
1055   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1056         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1057         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1058   result.addAttributes(attrs);
1059 }
1060 
1061 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
1062 /// result type.
1063 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1064                            ArrayRef<OpFoldResult> offsets,
1065                            ArrayRef<OpFoldResult> sizes,
1066                            ArrayRef<OpFoldResult> strides,
1067                            ArrayRef<NamedAttribute> attrs) {
1068   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1069 }
1070 
1071 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
1072 /// type passed is nullptr, it is inferred.
1073 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1074                            RankedTensorType resultType, Value source,
1075                            ValueRange offsets, ValueRange sizes,
1076                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1077   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1078       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1079   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1080       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1081   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1082       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1083   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1084 }
1085 
1086 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
1087 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1088                            ValueRange offsets, ValueRange sizes,
1089                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1090   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1091 }
1092 
1093 template <typename OpTy>
1094 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
1095                                           OpTy op, Type expectedType) {
1096   auto memrefType = expectedType.cast<ShapedType>();
1097   switch (result) {
1098   case SliceVerificationResult::Success:
1099     return success();
1100   case SliceVerificationResult::RankTooLarge:
1101     return op.emitError("expected rank to be smaller or equal to ")
1102            << "the other rank. ";
1103   case SliceVerificationResult::SizeMismatch:
1104     return op.emitError("expected type to be ")
1105            << expectedType << " or a rank-reduced version. (size mismatch) ";
1106   case SliceVerificationResult::ElemTypeMismatch:
1107     return op.emitError("expected element type to be ")
1108            << memrefType.getElementType();
1109   default:
1110     llvm_unreachable("unexpected extract_slice op verification result");
1111   }
1112 }
1113 
1114 /// Verifier for ExtractSliceOp.
1115 LogicalResult ExtractSliceOp::verify() {
1116   // Verify result type against inferred type.
1117   auto expectedType = ExtractSliceOp::inferResultType(
1118       getSourceType(), getMixedOffsets(), getMixedSizes(), getMixedStrides());
1119   auto result = isRankReducedType(expectedType.cast<ShapedType>(), getType());
1120   return produceSliceErrorMsg(result, *this, expectedType);
1121 }
1122 
1123 /// Infer the canonical type of the result of an extract_slice op. Returns a
1124 /// type with rank `resultRank` that is either the rank of the rank-reduced
1125 /// type, or the non-rank-reduced type.
1126 static RankedTensorType
1127 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1128                             ArrayRef<OpFoldResult> mixedOffsets,
1129                             ArrayRef<OpFoldResult> mixedSizes,
1130                             ArrayRef<OpFoldResult> mixedStrides) {
1131   auto resultType =
1132       ExtractSliceOp::inferRankReducedResultType(
1133           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1134           .cast<RankedTensorType>();
1135   if (resultType.getRank() != resultRank) {
1136     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1137                                                  mixedSizes, mixedStrides)
1138                      .cast<RankedTensorType>();
1139   }
1140   return resultType;
1141 }
1142 
1143 llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
1144   ArrayRef<int64_t> resultShape = getType().getShape();
1145   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1146   llvm::SmallBitVector droppedDims(mixedSizes.size());
1147   unsigned shapePos = 0;
1148   for (const auto &size : enumerate(mixedSizes)) {
1149     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1150     // If the size is not 1, or if the current matched dimension of the result
1151     // is the same static shape as the size value (which is 1), then the
1152     // dimension is preserved.
1153     if (!sizeVal || *sizeVal != 1 ||
1154         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1155       shapePos++;
1156       continue;
1157     }
1158     droppedDims.set(size.index());
1159   }
1160   return droppedDims;
1161 }
1162 
1163 LogicalResult ExtractSliceOp::reifyResultShapes(
1164     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1165   reifiedReturnShapes.resize(1);
1166   reifiedReturnShapes[0].reserve(getType().getRank());
1167   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1168   llvm::SmallBitVector droppedDims = getDroppedDims();
1169   Location loc = getLoc();
1170   for (const auto &size : enumerate(mixedSizes)) {
1171     if (droppedDims.test(size.index()))
1172       continue;
1173     if (auto attr = size.value().dyn_cast<Attribute>()) {
1174       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1175           loc, attr.cast<IntegerAttr>().getInt()));
1176       continue;
1177     }
1178     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1179   }
1180   return success();
1181 }
1182 
1183 namespace {
1184 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1185 /// This essentially pushes memref_cast past its consuming slice when
1186 /// `canFoldIntoConsumerOp` is true.
1187 ///
1188 /// Example:
1189 /// ```
1190 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1191 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1192 ///   tensor<3x4xf32>
1193 /// ```
1194 /// is rewritten into:
1195 /// ```
1196 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1197 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1198 /// ```
1199 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1200 public:
1201   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1202 
1203   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1204                                 PatternRewriter &rewriter) const override {
1205     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1206     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1207           return matchPattern(operand, matchConstantIndex());
1208         }))
1209       return failure();
1210 
1211     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1212     if (!castOp)
1213       return failure();
1214 
1215     if (!canFoldIntoConsumerOp(castOp))
1216       return failure();
1217 
1218     /// Deduce the type of the result to use for the canonicalized operation.
1219     RankedTensorType resultType = getCanonicalSliceResultType(
1220         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1221         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1222         sliceOp.getMixedStrides());
1223     Value newSlice = rewriter.create<ExtractSliceOp>(
1224         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1225         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1226         sliceOp.static_sizes(), sliceOp.static_strides());
1227     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1228                                                 newSlice);
1229     return success();
1230   }
1231 };
1232 
1233 /// Slice elements from `values` into `outValues`. `counts` represents the
1234 /// numbers of elements to stride in the original values for each dimension.
1235 /// The output values can be used to construct a DenseElementsAttr.
1236 template <typename IterTy, typename ElemTy>
1237 static void sliceElements(IterTy values, ArrayRef<int64_t> counts,
1238                           ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1239                           ArrayRef<int64_t> strides,
1240                           llvm::SmallVectorImpl<ElemTy> *outValues) {
1241   assert(offsets.size() == sizes.size());
1242   assert(offsets.size() == strides.size());
1243   if (offsets.empty())
1244     return;
1245 
1246   int64_t offset = offsets.front();
1247   int64_t size = sizes.front();
1248   int64_t stride = strides.front();
1249   if (offsets.size() == 1) {
1250     for (int64_t i = 0; i < size; ++i, offset += stride)
1251       outValues->push_back(*(values + offset));
1252 
1253     return;
1254   }
1255 
1256   for (int64_t i = 0; i < size; ++i, offset += stride) {
1257     auto begin = values + offset * counts.front();
1258     sliceElements<IterTy, ElemTy>(begin, counts.drop_front(),
1259                                   offsets.drop_front(), sizes.drop_front(),
1260                                   strides.drop_front(), outValues);
1261   }
1262 }
1263 
1264 /// Fold arith.constant and tensor.extract_slice into arith.constant. The folded
1265 /// operation might introduce more constant data; Users can control their
1266 /// heuristics by the control function.
1267 class ConstantOpExtractSliceFolder final
1268     : public OpRewritePattern<ExtractSliceOp> {
1269 public:
1270   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1271 
1272   ConstantOpExtractSliceFolder(MLIRContext *context,
1273                                ControlConstantExtractSliceFusionFn controlFn)
1274       : OpRewritePattern<ExtractSliceOp>(context),
1275         controlFn(std::move(controlFn)) {}
1276 
1277   LogicalResult matchAndRewrite(ExtractSliceOp op,
1278                                 PatternRewriter &rewriter) const override {
1279     DenseElementsAttr attr;
1280     if (!matchPattern(op.source(), m_Constant(&attr)))
1281       return failure();
1282 
1283     // A constant splat is handled by fold().
1284     if (attr.isSplat())
1285       return failure();
1286 
1287     // Dynamic result shape is not supported.
1288     auto sourceType = op.source().getType().cast<ShapedType>();
1289     auto resultType = op.result().getType().cast<ShapedType>();
1290     if (!sourceType.hasStaticShape() || !resultType.hasStaticShape())
1291       return failure();
1292 
1293     // Customized control over the folding.
1294     if (!controlFn(op))
1295       return failure();
1296 
1297     int64_t count = sourceType.getNumElements();
1298     if (count == 0)
1299       return failure();
1300 
1301     // Check if there are any dynamic parts, which are not supported.
1302     auto offsets = extractFromI64ArrayAttr(op.static_offsets());
1303     if (llvm::is_contained(offsets, ShapedType::kDynamicStrideOrOffset))
1304       return failure();
1305     auto sizes = extractFromI64ArrayAttr(op.static_sizes());
1306     if (llvm::is_contained(sizes, ShapedType::kDynamicSize))
1307       return failure();
1308     auto strides = extractFromI64ArrayAttr(op.static_strides());
1309     if (llvm::is_contained(strides, ShapedType::kDynamicStrideOrOffset))
1310       return failure();
1311 
1312     // Compute the stride for each dimension.
1313     SmallVector<int64_t> counts;
1314     ArrayRef<int64_t> shape = sourceType.getShape();
1315     counts.reserve(shape.size());
1316     for (int64_t v : shape) {
1317       count = count / v;
1318       counts.push_back(count);
1319     }
1320 
1321     // New attribute constructed by the sliced values.
1322     DenseElementsAttr newAttr;
1323 
1324     if (auto elems = attr.dyn_cast<DenseIntElementsAttr>()) {
1325       SmallVector<APInt> outValues;
1326       outValues.reserve(sourceType.getNumElements());
1327       sliceElements<DenseElementsAttr::IntElementIterator, APInt>(
1328           elems.begin(), counts, offsets, sizes, strides, &outValues);
1329       newAttr = DenseElementsAttr::get(resultType, outValues);
1330     } else if (auto elems = attr.dyn_cast<DenseFPElementsAttr>()) {
1331       SmallVector<APFloat> outValues;
1332       outValues.reserve(sourceType.getNumElements());
1333       sliceElements<DenseElementsAttr::FloatElementIterator, APFloat>(
1334           elems.begin(), counts, offsets, sizes, strides, &outValues);
1335       newAttr = DenseElementsAttr::get(resultType, outValues);
1336     }
1337 
1338     if (newAttr) {
1339       rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, resultType, newAttr);
1340       return success();
1341     }
1342 
1343     return failure();
1344   }
1345 
1346 private:
1347   /// This additionally controls whether the fold happens or not. Users can
1348   /// impose their heuristics in the function.
1349   ControlConstantExtractSliceFusionFn controlFn;
1350 };
1351 
1352 } // namespace
1353 
1354 void mlir::tensor::populateFoldConstantExtractSlicePatterns(
1355     RewritePatternSet &patterns,
1356     const ControlConstantExtractSliceFusionFn &controlFn) {
1357   patterns.add<ConstantOpExtractSliceFolder>(patterns.getContext(), controlFn);
1358 }
1359 
1360 /// Return the canonical type of the result of an extract_slice op.
1361 struct SliceReturnTypeCanonicalizer {
1362   RankedTensorType operator()(ExtractSliceOp op,
1363                               ArrayRef<OpFoldResult> mixedOffsets,
1364                               ArrayRef<OpFoldResult> mixedSizes,
1365                               ArrayRef<OpFoldResult> mixedStrides) {
1366     return getCanonicalSliceResultType(op.getType().getRank(),
1367                                        op.getSourceType(), mixedOffsets,
1368                                        mixedSizes, mixedStrides);
1369   }
1370 };
1371 
1372 /// A canonicalizer wrapper to replace ExtractSliceOps.
1373 struct SliceCanonicalizer {
1374   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1375                   ExtractSliceOp newOp) {
1376     Value replacement = newOp.getResult();
1377     if (replacement.getType() != op.getType())
1378       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1379                                                     replacement);
1380     rewriter.replaceOp(op, replacement);
1381   }
1382 };
1383 
1384 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1385                                                  MLIRContext *context) {
1386   results.add<
1387       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1388           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1389       ExtractSliceOpCastFolder>(context);
1390 }
1391 
1392 //
1393 static LogicalResult
1394 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1395                                            ShapedType shapedType) {
1396   OpBuilder b(op.getContext());
1397   for (OpFoldResult ofr : op.getMixedOffsets())
1398     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1399       return failure();
1400   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1401   // is appropriate.
1402   auto shape = shapedType.getShape();
1403   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1404     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1405       return failure();
1406   for (OpFoldResult ofr : op.getMixedStrides())
1407     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1408       return failure();
1409   return success();
1410 }
1411 
1412 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1413 /// we can return the InsertSliceOp's source directly.
1414 // TODO: This only checks the immediate producer; extend to go up the
1415 // insert/extract chain if the slices are disjoint.
1416 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1417   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1418 
1419   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1420   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1421       insertOp.isSameAs(extractOp, isSame))
1422     return insertOp.source();
1423 
1424   return {};
1425 }
1426 
1427 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute> operands) {
1428   if (auto splat = operands[0].dyn_cast_or_null<SplatElementsAttr>()) {
1429     auto resultType = result().getType().cast<ShapedType>();
1430     if (resultType.hasStaticShape())
1431       return splat.resizeSplat(resultType);
1432   }
1433   if (getSourceType() == getType() &&
1434       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1435     return this->source();
1436   if (Value slice = foldExtractAfterInsertSlice(*this))
1437     return slice;
1438 
1439   return OpFoldResult();
1440 }
1441 
1442 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1443     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1444   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1445   unsigned rank = rankedTensorType.getRank();
1446   auto shape = rankedTensorType.getShape();
1447   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1448   SmallVector<OpFoldResult> sizes;
1449   for (unsigned i = 0, e = rank; i < e; ++i) {
1450     OpFoldResult dim;
1451     if (rankedTensorType.isDynamicDim(i))
1452       dim = b.createOrFold<tensor::DimOp>(
1453           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1454     else
1455       dim = b.getIndexAttr(shape[i]);
1456     sizes.push_back(dim);
1457   }
1458   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1459   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1460                                                 offsets, sizes, strides);
1461 }
1462 
1463 //===----------------------------------------------------------------------===//
1464 // InsertSliceOp
1465 //===----------------------------------------------------------------------===//
1466 
1467 // Build a InsertSliceOp with mixed static and dynamic entries.
1468 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1469                           Value dest, ArrayRef<OpFoldResult> offsets,
1470                           ArrayRef<OpFoldResult> sizes,
1471                           ArrayRef<OpFoldResult> strides,
1472                           ArrayRef<NamedAttribute> attrs) {
1473   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1474   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1475   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1476                              ShapedType::kDynamicStrideOrOffset);
1477   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1478                              ShapedType::kDynamicSize);
1479   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1480                              ShapedType::kDynamicStrideOrOffset);
1481   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1482         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1483         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1484   result.addAttributes(attrs);
1485 }
1486 
1487 // Build a InsertSliceOp with dynamic entries.
1488 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1489                           Value dest, ValueRange offsets, ValueRange sizes,
1490                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1491   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1492       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1493   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1494       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1495   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1496       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1497   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1498 }
1499 
1500 static SliceVerificationResult
1501 verifyInsertSliceOp(ShapedType srcType, ShapedType dstType,
1502                     ArrayAttr staticOffsets, ArrayAttr staticSizes,
1503                     ArrayAttr staticStrides,
1504                     ShapedType *expectedType = nullptr) {
1505   // insert_slice is the inverse of extract_slice, use the same type inference.
1506   auto expected = ExtractSliceOp::inferRankReducedResultType(
1507                       srcType.getRank(), dstType.cast<RankedTensorType>(),
1508                       extractFromI64ArrayAttr(staticOffsets),
1509                       extractFromI64ArrayAttr(staticSizes),
1510                       extractFromI64ArrayAttr(staticStrides))
1511                       .cast<ShapedType>();
1512   if (expectedType)
1513     *expectedType = expected;
1514   return isRankReducedType(expected, srcType);
1515 }
1516 
1517 /// Verifier for InsertSliceOp.
1518 LogicalResult InsertSliceOp::verify() {
1519   ShapedType expectedType;
1520   auto result =
1521       verifyInsertSliceOp(getSourceType(), getType(), static_offsets(),
1522                           static_sizes(), static_strides(), &expectedType);
1523   return produceSliceErrorMsg(result, *this, expectedType);
1524 }
1525 
1526 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1527 /// can mutate the second InsertSliceOp's destination to the first one's.
1528 ///
1529 /// Example:
1530 ///
1531 /// ```mlir
1532 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1533 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1534 /// ```
1535 ///
1536 /// folds into:
1537 ///
1538 /// ```mlir
1539 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1540 /// ```
1541 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1542   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1543 
1544   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1545   if (!prevInsertOp ||
1546       prevInsertOp.source().getType() != insertOp.source().getType() ||
1547       !prevInsertOp.isSameAs(insertOp, isSame))
1548     return failure();
1549 
1550   insertOp.destMutable().assign(prevInsertOp.dest());
1551   return success();
1552 }
1553 
1554 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1555   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1556       getSourceType() == getType() &&
1557       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1558     return this->source();
1559   if (succeeded(foldInsertAfterInsertSlice(*this)))
1560     return getResult();
1561   return OpFoldResult();
1562 }
1563 
1564 LogicalResult InsertSliceOp::reifyResultShapes(
1565     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1566   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1567   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1568     reifiedReturnShapes[0][dim] =
1569         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1570   }
1571   return success();
1572 }
1573 
1574 namespace {
1575 /// Pattern to rewrite a insert_slice op with constant arguments.
1576 class InsertSliceOpConstantArgumentFolder final
1577     : public OpRewritePattern<InsertSliceOp> {
1578 public:
1579   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1580 
1581   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1582                                 PatternRewriter &rewriter) const override {
1583     // No constant operand, just return.
1584     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1585           return matchPattern(operand, matchConstantIndex());
1586         }))
1587       return failure();
1588 
1589     // At least one of offsets/sizes/strides is a new constant.
1590     // Form the new list of operands and constant attributes from the
1591     // existing.
1592     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1593     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1594     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1595     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1596     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1597     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1598 
1599     // Create the new op in canonical form.
1600     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1601         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1602         mixedOffsets, mixedSizes, mixedStrides);
1603     Value toInsert = insertSliceOp.source();
1604     if (sourceType != insertSliceOp.getSourceType())
1605       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1606                                                  sourceType, toInsert);
1607     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1608         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1609         mixedStrides);
1610     return success();
1611   }
1612 };
1613 
1614 /// Fold tensor_casts with insert_slice operations. If the source or destination
1615 /// tensor is a tensor_cast that removes static type information, the cast is
1616 /// folded into the insert_slice operation. E.g.:
1617 ///
1618 /// ```mlir
1619 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1620 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1621 /// ```
1622 ///
1623 /// folds into:
1624 ///
1625 /// ```mlir
1626 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1627 /// ```
1628 ///
1629 /// Note: When folding a cast on the destination tensor, the result of the
1630 /// insert_slice operation is casted to ensure that the type of the result did
1631 /// not change.
1632 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1633   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1634 
1635   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1636                                 PatternRewriter &rewriter) const override {
1637     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1638           return matchPattern(operand, matchConstantIndex());
1639         }))
1640       return failure();
1641 
1642     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1643       auto castOp = v.getDefiningOp<tensor::CastOp>();
1644       if (!castOp || !canFoldIntoConsumerOp(castOp))
1645         return llvm::None;
1646       return castOp.source();
1647     };
1648     Optional<Value> sourceCastSource =
1649         getSourceOfCastOp(insertSliceOp.source());
1650     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1651     if (!sourceCastSource && !destCastSource)
1652       return failure();
1653 
1654     auto src = (sourceCastSource ? *sourceCastSource : insertSliceOp.source());
1655     auto dst = (destCastSource ? *destCastSource : insertSliceOp.dest());
1656 
1657     auto srcType = src.getType().cast<ShapedType>();
1658     auto dstType = dst.getType().cast<ShapedType>();
1659     if (verifyInsertSliceOp(srcType, dstType, insertSliceOp.static_offsets(),
1660                             insertSliceOp.static_sizes(),
1661                             insertSliceOp.static_strides()) !=
1662         SliceVerificationResult::Success)
1663       return failure();
1664 
1665     Value replacement = rewriter.create<InsertSliceOp>(
1666         insertSliceOp.getLoc(), src, dst, insertSliceOp.getMixedOffsets(),
1667         insertSliceOp.getMixedSizes(), insertSliceOp.getMixedStrides());
1668 
1669     if (replacement.getType() != insertSliceOp.getType()) {
1670       replacement = rewriter.create<tensor::CastOp>(
1671           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1672     }
1673     rewriter.replaceOp(insertSliceOp, replacement);
1674     return success();
1675   }
1676 };
1677 
1678 /// If additional static type information can be deduced from a insert_slice's
1679 /// size operands, insert an explicit cast of the op's source operand. This
1680 /// enables other canonicalization patterns that are matching for tensor_cast
1681 /// ops such as `ForOpTensorCastFolder` in SCF.
1682 ///
1683 /// Example:
1684 ///
1685 /// ```mlir
1686 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1687 ///       : tensor<?x?xf32> into ...
1688 /// ```
1689 ///
1690 /// folds into:
1691 ///
1692 /// ```mlir
1693 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1694 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1695 ///       : tensor<64x64xf32> into ...
1696 /// ```
1697 struct InsertSliceOpSourceCastInserter final
1698     : public OpRewritePattern<InsertSliceOp> {
1699   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1700 
1701   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1702                                 PatternRewriter &rewriter) const override {
1703     RankedTensorType srcType = insertSliceOp.getSourceType();
1704     if (srcType.getRank() != insertSliceOp.getType().getRank())
1705       return failure();
1706     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1707                                      srcType.getShape().end());
1708     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1709       if (Optional<int64_t> constInt =
1710               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1711         newSrcShape[i] = *constInt;
1712     }
1713 
1714     RankedTensorType newSrcType =
1715         RankedTensorType::get(newSrcShape, srcType.getElementType());
1716     if (srcType == newSrcType ||
1717         !preservesStaticInformation(srcType, newSrcType) ||
1718         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1719       return failure();
1720 
1721     // newSrcType is:
1722     //   1) Different from srcType.
1723     //   2) "More static" than srcType.
1724     //   3) Cast-compatible with srcType.
1725     // Insert the cast.
1726     Value cast = rewriter.create<tensor::CastOp>(
1727         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1728     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1729         insertSliceOp, cast, insertSliceOp.dest(),
1730         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1731         insertSliceOp.getMixedStrides());
1732     return success();
1733   }
1734 };
1735 } // namespace
1736 
1737 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1738                                                 MLIRContext *context) {
1739   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1740               InsertSliceOpSourceCastInserter>(context);
1741 }
1742 
1743 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1744                                                              Location loc,
1745                                                              Value tensor,
1746                                                              Value dest) {
1747   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1748   unsigned rank = rankedTensorType.getRank();
1749   auto shape = rankedTensorType.getShape();
1750   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1751   SmallVector<OpFoldResult> sizes;
1752   for (unsigned i = 0, e = rank; i < e; ++i) {
1753     OpFoldResult dim;
1754     if (rankedTensorType.isDynamicDim(i))
1755       dim = b.createOrFold<tensor::DimOp>(
1756           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1757     else
1758       dim = b.getIndexAttr(shape[i]);
1759     sizes.push_back(dim);
1760   }
1761   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1762   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1763                                                sizes, strides);
1764 }
1765 
1766 //===----------------------------------------------------------------------===//
1767 // PadOp
1768 //===----------------------------------------------------------------------===//
1769 
1770 // TODO: Replace custom<InferType> directive with AllTypesMatch as soon as it
1771 // supports optional types.
1772 void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
1773                     Type typeToInfer, Type typeToInferFrom) {}
1774 
1775 ParseResult parseInferType(OpAsmParser &parser,
1776                            Optional<OpAsmParser::UnresolvedOperand> optOperand,
1777                            Type &typeToInfer, Type typeToInferFrom) {
1778   if (optOperand)
1779     typeToInfer = typeToInferFrom;
1780   return success();
1781 }
1782 
1783 LogicalResult PadOp::verify() {
1784   auto sourceType = source().getType().cast<RankedTensorType>();
1785   auto resultType = result().getType().cast<RankedTensorType>();
1786   auto expectedType =
1787       PadOp::inferResultType(sourceType, extractFromI64ArrayAttr(static_low()),
1788                              extractFromI64ArrayAttr(static_high()));
1789   for (int i = 0, e = sourceType.getRank(); i < e; ++i) {
1790     if (resultType.getDimSize(i) == expectedType.getDimSize(i))
1791       continue;
1792     if (expectedType.isDynamicDim(i))
1793       continue;
1794     return emitError("specified type ")
1795            << resultType << " does not match the inferred type "
1796            << expectedType;
1797   }
1798 
1799   return success();
1800 }
1801 
1802 LogicalResult PadOp::verifyRegions() {
1803   auto &region = getRegion();
1804   unsigned rank = result().getType().cast<RankedTensorType>().getRank();
1805   Block &block = region.front();
1806   if (block.getNumArguments() != rank)
1807     return emitError("expected the block to have ") << rank << " arguments";
1808 
1809   // Note: the number and type of yield values are checked in the YieldOp.
1810   for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
1811     if (!en.value().isIndex())
1812       return emitOpError("expected block argument ")
1813              << (en.index() + 1) << " to be an index";
1814   }
1815 
1816   // Ensure that the region yields an element of the right type.
1817   auto yieldOp = llvm::cast<YieldOp>(block.getTerminator());
1818   if (yieldOp.value().getType() !=
1819       getType().cast<ShapedType>().getElementType())
1820     return emitOpError("expected yield type to match shape element type");
1821 
1822   return success();
1823 }
1824 
1825 RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
1826                                         ArrayRef<int64_t> staticLow,
1827                                         ArrayRef<int64_t> staticHigh,
1828                                         ArrayRef<int64_t> resultShape) {
1829   unsigned rank = sourceType.getRank();
1830   assert(staticLow.size() == rank && "unexpected staticLow size mismatch");
1831   assert(staticHigh.size() == rank && "unexpected staticHigh size mismatch");
1832   assert((resultShape.empty() || resultShape.size() == rank) &&
1833          "unexpected resultShape size mismatch");
1834 
1835   SmallVector<int64_t, 4> inferredShape;
1836   for (auto i : llvm::seq<unsigned>(0, rank)) {
1837     if (sourceType.isDynamicDim(i) ||
1838         staticLow[i] == ShapedType::kDynamicSize ||
1839         staticHigh[i] == ShapedType::kDynamicSize) {
1840       inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
1841                                                   : resultShape[i]);
1842     } else {
1843       int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
1844       assert((resultShape.empty() || size == resultShape[i] ||
1845               resultShape[i] == ShapedType::kDynamicSize) &&
1846              "mismatch between inferred shape and result shape");
1847       inferredShape.push_back(size);
1848     }
1849   }
1850 
1851   return RankedTensorType::get(inferredShape, sourceType.getElementType());
1852 }
1853 
1854 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1855                   ArrayRef<int64_t> staticLow, ArrayRef<int64_t> staticHigh,
1856                   ValueRange low, ValueRange high, bool nofold,
1857                   ArrayRef<NamedAttribute> attrs) {
1858   auto sourceType = source.getType().cast<RankedTensorType>();
1859   auto resultType = inferResultType(sourceType, staticLow, staticHigh);
1860   build(b, result, resultType, source, low, high, b.getI64ArrayAttr(staticLow),
1861         b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr());
1862   result.addAttributes(attrs);
1863 }
1864 
1865 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1866                   ValueRange low, ValueRange high, bool nofold,
1867                   ArrayRef<NamedAttribute> attrs) {
1868   auto sourceType = source.getType().cast<RankedTensorType>();
1869   unsigned rank = sourceType.getRank();
1870   SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
1871   build(b, result, source, staticVector, staticVector, low, high, nofold,
1872         attrs);
1873 }
1874 
1875 void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
1876                   Value source, ArrayRef<OpFoldResult> low,
1877                   ArrayRef<OpFoldResult> high, bool nofold,
1878                   ArrayRef<NamedAttribute> attrs) {
1879   assert(resultType.isa<RankedTensorType>());
1880   auto sourceType = source.getType().cast<RankedTensorType>();
1881   SmallVector<Value, 4> dynamicLow, dynamicHigh;
1882   SmallVector<int64_t, 4> staticLow, staticHigh;
1883   // staticLow and staticHigh have full information of the padding config.
1884   // This will grow staticLow and staticHigh with 1 value. If the config is
1885   // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
1886   // value as well.
1887   dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
1888                              ShapedType::kDynamicSize);
1889   dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
1890                              ShapedType::kDynamicSize);
1891   if (!resultType) {
1892     resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
1893   }
1894   build(b, result, resultType, source, dynamicLow, dynamicHigh,
1895         b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh),
1896         nofold ? b.getUnitAttr() : UnitAttr());
1897   result.addAttributes(attrs);
1898 }
1899 
1900 llvm::SmallBitVector PadOp::getPaddedDims() {
1901   llvm::SmallBitVector paddedDims(getSourceType().getRank());
1902   auto extractPaddedDims = [&](ArrayRef<OpFoldResult> paddingWidths) {
1903     for (const auto &en : enumerate(paddingWidths))
1904       if (getConstantIntValue(en.value()) != static_cast<int64_t>(0))
1905         paddedDims.set(en.index());
1906   };
1907   extractPaddedDims(getMixedLowPad());
1908   extractPaddedDims(getMixedHighPad());
1909   return paddedDims;
1910 }
1911 
1912 namespace {
1913 // Folds tensor.pad when padding is static zeros and the attribute
1914 // doesn't request otherwise.
1915 struct FoldStaticZeroPadding : public OpRewritePattern<PadOp> {
1916   using OpRewritePattern<PadOp>::OpRewritePattern;
1917 
1918   LogicalResult matchAndRewrite(PadOp padTensorOp,
1919                                 PatternRewriter &rewriter) const override {
1920     if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad())
1921       return failure();
1922     if (padTensorOp.nofold())
1923       return failure();
1924     rewriter.replaceOpWithNewOp<tensor::CastOp>(
1925         padTensorOp, padTensorOp.result().getType(), padTensorOp.source());
1926     return success();
1927   }
1928 };
1929 
1930 // Fold CastOp into PadOp when adding static information.
1931 struct FoldSourceTensorCast : public OpRewritePattern<PadOp> {
1932   using OpRewritePattern<PadOp>::OpRewritePattern;
1933 
1934   LogicalResult matchAndRewrite(PadOp padTensorOp,
1935                                 PatternRewriter &rewriter) const override {
1936     auto castOp = padTensorOp.source().getDefiningOp<tensor::CastOp>();
1937     if (!tensor::canFoldIntoConsumerOp(castOp))
1938       return failure();
1939 
1940     auto newResultType = PadOp::inferResultType(
1941         castOp.source().getType().cast<RankedTensorType>(),
1942         extractFromI64ArrayAttr(padTensorOp.static_low()),
1943         extractFromI64ArrayAttr(padTensorOp.static_high()),
1944         padTensorOp.getResultType().getShape());
1945 
1946     if (newResultType == padTensorOp.getResultType()) {
1947       rewriter.updateRootInPlace(padTensorOp, [&]() {
1948         padTensorOp.sourceMutable().assign(castOp.source());
1949       });
1950     } else {
1951       auto newOp = rewriter.create<PadOp>(
1952           padTensorOp->getLoc(), newResultType, padTensorOp.source(),
1953           padTensorOp.low(), padTensorOp.high(), padTensorOp.static_low(),
1954           padTensorOp.static_high(), padTensorOp.nofold());
1955       BlockAndValueMapping mapper;
1956       padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
1957 
1958       rewriter.replaceOpWithNewOp<tensor::CastOp>(
1959           padTensorOp, padTensorOp.getResultType(), newOp);
1960     }
1961     return success();
1962   }
1963 };
1964 
1965 // Fold CastOp using the result of PadOp back into the latter if it adds
1966 // static information.
1967 struct FoldTargetTensorCast : public OpRewritePattern<PadOp> {
1968   using OpRewritePattern<PadOp>::OpRewritePattern;
1969 
1970   LogicalResult matchAndRewrite(PadOp padTensorOp,
1971                                 PatternRewriter &rewriter) const override {
1972     if (!padTensorOp.result().hasOneUse())
1973       return failure();
1974     auto tensorCastOp =
1975         dyn_cast<tensor::CastOp>(*padTensorOp->getUsers().begin());
1976     if (!tensorCastOp)
1977       return failure();
1978     if (!tensor::preservesStaticInformation(padTensorOp.result().getType(),
1979                                             tensorCastOp.dest().getType()))
1980       return failure();
1981 
1982     auto replacementOp = rewriter.create<PadOp>(
1983         padTensorOp.getLoc(), tensorCastOp.dest().getType(),
1984         padTensorOp.source(), padTensorOp.low(), padTensorOp.high(),
1985         padTensorOp.static_low(), padTensorOp.static_high(),
1986         padTensorOp.nofold());
1987     replacementOp.region().takeBody(padTensorOp.region());
1988 
1989     rewriter.replaceOp(padTensorOp, replacementOp.result());
1990     rewriter.replaceOp(tensorCastOp, replacementOp.result());
1991     return success();
1992   }
1993 };
1994 
1995 /// Fold chains of tensor::ExtractSliceOp, tensor::PadOp pairs that pad
1996 /// different dimensions. The pattern applies if the following preconditions
1997 /// hold:
1998 ///   1) the tensor::ExtractSliceOps are not rank-reducing,
1999 ///   2) the tensor::ExtractSliceOps have only unit-strides,
2000 ///   3) the tensor::PadOps perform only high-padding,
2001 ///   4) the tensor::PadOps have the same constant padding value,
2002 ///   5) the tensor::PadOps do not have common padding dimensions,
2003 ///   6) one tensor::ExtractSliceOp, tensor::PadOp pair has zero-padding and
2004 ///      zero-offset for every dimension.
2005 ///   7) the tensor::ExtractSliceOp sizes match the source tensor sizes for the
2006 ///      padded source dimensions.
2007 ///
2008 /// Example:
2009 ///
2010 /// ```mlir
2011 ///   %0 = tensor.extract_slice %input[16, 0] [%sz0, 64] [1, 1]
2012 ///       : tensor<64x64xf32> to tensor<?x64xf32>
2013 ///   %1 = tensor.pad %0 low[0, 0] high[%pw0, 0] { ...
2014 ///     } : tensor<?x64xf32> to tensor<8x64xf32>
2015 ///   %2 = tensor.extract_slice %1[0, 4] [8, %sz1] [1, 1]
2016 ///        : tensor<8x64xf32> to tensor<8x?xf32>
2017 ///   %res = tensor.pad %2 nofold low[0, 0] high[0, %pw1] { ...
2018 ///     } : tensor<8x?xf32> to tensor<8x4xf32>
2019 /// ```
2020 ///
2021 /// folds into:
2022 ///
2023 /// ```mlir
2024 ///   %0 = tensor.extract_slice %input[16, 4] [%sz0, %sz1] [1, 1]
2025 ///        : tensor<64x64xf32> to tensor<?x?xf32>
2026 ///   %res = tensor.pad %0 nofold low[0, 0] high[%pw0, %pw1] { ...
2027 ///     } : tensor<?x?xf32> to tensor<8x4xf32>
2028 /// ```
2029 struct FoldOrthogonalPaddings : public OpRewritePattern<PadOp> {
2030   using OpRewritePattern<PadOp>::OpRewritePattern;
2031 
2032   LogicalResult matchAndRewrite(PadOp padOp,
2033                                 PatternRewriter &rewriter) const override {
2034     auto innerSliceOp = padOp.source().getDefiningOp<ExtractSliceOp>();
2035     if (!innerSliceOp)
2036       return failure();
2037     auto outerPadOp = innerSliceOp.source().getDefiningOp<PadOp>();
2038     if (!outerPadOp || outerPadOp.nofold())
2039       return failure();
2040     auto outerSliceOp = outerPadOp.source().getDefiningOp<ExtractSliceOp>();
2041     if (!outerSliceOp)
2042       return failure();
2043 
2044     // 1) Fail if the chain is rank-reducing.
2045     int64_t rank = padOp.getSourceType().getRank();
2046     if (outerSliceOp.getSourceType().getRank() != rank) {
2047       return rewriter.notifyMatchFailure(padOp,
2048                                          "cannot fold rank-reducing chain");
2049     }
2050 
2051     // 2) Fail if the tensor::ExtractSliceOps have non-unit strides.
2052     if (!innerSliceOp.hasUnitStride() || !outerSliceOp.hasUnitStride()) {
2053       return rewriter.notifyMatchFailure(
2054           padOp, "cannot fold non-unit stride ExtractSliceOps");
2055     }
2056 
2057     // 3) Fail if the tensor::PadOps have non-zero low padding.
2058     if (!padOp.hasZeroLowPad() || !outerPadOp.hasZeroLowPad()) {
2059       return rewriter.notifyMatchFailure(padOp,
2060                                          "cannot fold PadOps with low padding");
2061     }
2062 
2063     // 4) Fail if the tensor::PadOps padding values do not match.
2064     Attribute innerAttr, outerAttr;
2065     Value innerValue = padOp.getConstantPaddingValue();
2066     Value outerValue = outerPadOp.getConstantPaddingValue();
2067     if (!innerValue || !outerValue ||
2068         !matchPattern(innerValue, m_Constant(&innerAttr)) ||
2069         !matchPattern(outerValue, m_Constant(&outerAttr)) ||
2070         innerAttr != outerAttr) {
2071       return rewriter.notifyMatchFailure(
2072           padOp, "cannot fold PadOps with different padding values");
2073     }
2074 
2075     // 5) Fail if a dimension is padded by both tensor::PadOps.
2076     llvm::SmallBitVector innerDims = padOp.getPaddedDims();
2077     llvm::SmallBitVector outerDims = outerPadOp.getPaddedDims();
2078     if (innerDims.anyCommon(outerDims)) {
2079       return rewriter.notifyMatchFailure(
2080           padOp, "cannot fold PadOps with common padding dimensions");
2081     }
2082 
2083     // 6) Combine the offsets of the two tensor::ExtractSliceOps. Find the
2084     // zero-offset and zero-padding tensor::ExtractSliceOp, tensor::PadOp pair
2085     // for every dimension, and use the offset the other pair. Fail if no
2086     // zero-offset and zero-padding tensor::ExtractSliceOp, tensor::PadOp pair
2087     // exists.
2088     SmallVector<OpFoldResult> newOffsets(rank, rewriter.getIndexAttr(0));
2089     for (auto &en : enumerate(newOffsets)) {
2090       OpFoldResult innerOffset = innerSliceOp.getMixedOffsets()[en.index()];
2091       OpFoldResult outerOffset = outerSliceOp.getMixedOffsets()[en.index()];
2092       if (!innerDims.test(en.index()) &&
2093           (getConstantIntValue(innerOffset) == static_cast<int64_t>(0))) {
2094         en.value() = outerOffset;
2095         continue;
2096       }
2097       if (!outerDims.test(en.index()) &&
2098           (getConstantIntValue(outerOffset) == static_cast<int64_t>(0))) {
2099         en.value() = innerOffset;
2100         continue;
2101       }
2102       return rewriter.notifyMatchFailure(
2103           padOp, "cannot find zero-offset and zero-padding pair");
2104     }
2105 
2106     // 7) Combine the sizes of the two tensor::ExtractSliceOps. Take the size of
2107     // the outer tensor::ExtractSliceOp for the dimensions padded by the outer
2108     // tensor::PadOp and fail if the size of the inner tensor::ExtractSliceOp
2109     // does not match the size of the padded dimension. Otherwise, take the size
2110     // of the inner tensor::ExtractSliceOp.
2111     SmallVector<OpFoldResult> newSizes = innerSliceOp.getMixedSizes();
2112     for (auto &en : enumerate(newSizes)) {
2113       if (!outerDims.test(en.index()))
2114         continue;
2115       OpFoldResult sliceSize = innerSliceOp.getMixedSizes()[en.index()];
2116       int64_t sourceSize = innerSliceOp.getSourceType().getShape()[en.index()];
2117       assert(!ShapedType::isDynamic(sourceSize) &&
2118              "expected padded dimension to have a static size");
2119       if (getConstantIntValue(sliceSize) != sourceSize) {
2120         return rewriter.notifyMatchFailure(
2121             padOp, "cannot fold since the inner ExtractSliceOp size does not "
2122                    "match the size of the outer padding");
2123       }
2124       en.value() = outerSliceOp.getMixedSizes()[en.index()];
2125     }
2126 
2127     // Combine the high paddings of the two tensor::PadOps.
2128     SmallVector<OpFoldResult> newHighPad(rank, rewriter.getIndexAttr(0));
2129     for (auto &en : enumerate(newHighPad)) {
2130       if (innerDims.test(en.index()))
2131         newHighPad[en.index()] = padOp.getMixedHighPad()[en.index()];
2132       if (outerDims.test(en.index()))
2133         newHighPad[en.index()] = outerPadOp.getMixedHighPad()[en.index()];
2134     }
2135 
2136     // Create a new tensor::ExtractSliceOp, tensor::PadOp pair that performs the
2137     // two paddings in one step.
2138     auto newSliceOp = rewriter.create<ExtractSliceOp>(
2139         padOp.getLoc(), outerSliceOp.source(), newOffsets, newSizes,
2140         innerSliceOp.getMixedStrides());
2141     auto newPadOp = rewriter.create<PadOp>(
2142         padOp.getLoc(), padOp.getResultType(), newSliceOp.getResult(),
2143         padOp.getMixedLowPad(), newHighPad, padOp.nofold());
2144     rewriter.inlineRegionBefore(padOp.getRegion(), newPadOp.getRegion(),
2145                                 newPadOp.getRegion().begin());
2146     rewriter.replaceOp(padOp, newPadOp.getResult());
2147     return success();
2148   }
2149 };
2150 
2151 } // namespace
2152 
2153 void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
2154                                         MLIRContext *context) {
2155   results.add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast,
2156               FoldOrthogonalPaddings>(context);
2157 }
2158 
2159 /// Return the padding value of the PadOp if it constant. In this context,
2160 /// "constant" means an actual constant or "defined outside of the block".
2161 ///
2162 /// Values are considered constant in three cases:
2163 ///  - A ConstantLike value.
2164 ///  - A basic block argument from a different block.
2165 ///  - A value defined outside of the block.
2166 ///
2167 /// If the padding value is not constant, an empty Value is returned.
2168 Value PadOp::getConstantPaddingValue() {
2169   auto yieldOp = dyn_cast<YieldOp>(getRegion().front().getTerminator());
2170   if (!yieldOp)
2171     return {};
2172   Value padValue = yieldOp.value();
2173   // Check if yield value is a constant.
2174   if (matchPattern(padValue, m_Constant()))
2175     return padValue;
2176   // Check if yield value is defined inside the PadOp block.
2177   if (padValue.getParentBlock() == &getRegion().front())
2178     return {};
2179   // Else: Yield value defined outside of the PadOp block.
2180   return padValue;
2181 }
2182 
2183 OpFoldResult PadOp::fold(ArrayRef<Attribute>) {
2184   if (getResultType().hasStaticShape() && getResultType() == getSourceType() &&
2185       !nofold())
2186     return source();
2187   return {};
2188 }
2189 
2190 //===----------------------------------------------------------------------===//
2191 // SplatOp
2192 //===----------------------------------------------------------------------===//
2193 
2194 OpFoldResult SplatOp::fold(ArrayRef<Attribute> operands) {
2195   auto constOperand = operands.front();
2196   if (!constOperand.isa_and_nonnull<IntegerAttr, FloatAttr>())
2197     return {};
2198 
2199   // SplatElementsAttr::get treats single value for second arg as being a splat.
2200   return SplatElementsAttr::get(getType(), {constOperand});
2201 }
2202 
2203 //===----------------------------------------------------------------------===//
2204 // TableGen'd op method definitions
2205 //===----------------------------------------------------------------------===//
2206 
2207 #define GET_OP_CLASSES
2208 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
2209