1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/Arithmetic/Utils/Utils.h"
11 #include "mlir/Dialect/Complex/IR/Complex.h"
12 #include "mlir/Dialect/Tensor/IR/Tensor.h"
13 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
14 #include "mlir/Dialect/Utils/StaticValueUtils.h"
15 #include "mlir/IR/BlockAndValueMapping.h"
16 #include "mlir/IR/Builders.h"
17 #include "mlir/IR/BuiltinAttributeInterfaces.h"
18 #include "mlir/IR/Matchers.h"
19 #include "mlir/IR/TypeUtilities.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 
23 using namespace mlir;
24 using namespace mlir::tensor;
25 
26 /// Materialize a single constant operation from a given attribute value with
27 /// the desired resultant type.
28 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
29                                               Attribute value, Type type,
30                                               Location loc) {
31   if (arith::ConstantOp::isBuildableWith(value, type))
32     return builder.create<arith::ConstantOp>(loc, value, type);
33   if (complex::ConstantOp::isBuildableWith(value, type))
34     return builder.create<complex::ConstantOp>(loc, type,
35                                                value.cast<ArrayAttr>());
36   return nullptr;
37 }
38 
39 //===----------------------------------------------------------------------===//
40 // CastOp
41 //===----------------------------------------------------------------------===//
42 
43 /// Returns true if `target` is a ranked tensor type that preserves static
44 /// information available in the `source` ranked tensor type.
45 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
46   auto sourceType = source.dyn_cast<RankedTensorType>();
47   auto targetType = target.dyn_cast<RankedTensorType>();
48 
49   // Requires RankedTensorType.
50   if (!sourceType || !targetType)
51     return false;
52 
53   // Requires same elemental type.
54   if (sourceType.getElementType() != targetType.getElementType())
55     return false;
56 
57   // Requires same rank.
58   if (sourceType.getRank() != targetType.getRank())
59     return false;
60 
61   // If cast is towards more static sizes along any dimension, don't fold.
62   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
63     if (!ShapedType::isDynamic(std::get<0>(t)) &&
64         ShapedType::isDynamic(std::get<1>(t)))
65       return false;
66   }
67 
68   return true;
69 }
70 
71 /// Determines whether tensor::CastOp casts to a more dynamic version of the
72 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
73 /// implement canonicalization patterns for ops in different dialects that may
74 /// consume the results of tensor.cast operations. Such foldable tensor.cast
75 /// operations are typically inserted as `slice` ops and are canonicalized,
76 /// to preserve the type compatibility of their uses.
77 ///
78 /// Returns true when all conditions are met:
79 /// 1. source and result are ranked tensors with same element type and rank.
80 /// 2. the tensor type has more static information than the result
81 ///
82 /// Example:
83 /// ```mlir
84 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
85 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
86 /// ```
87 ///
88 /// folds into:
89 ///
90 /// ```mlir
91 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
92 /// ```
93 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
94   if (!castOp)
95     return false;
96 
97   // Can fold if the source of cast has at least as much static information as
98   // its results.
99   return preservesStaticInformation(castOp.getType(),
100                                     castOp.getSource().getType());
101 }
102 
103 /// Determines whether the tensor::CastOp casts to a more static version of the
104 /// source tensor. This is useful to fold into a producing op and implement
105 /// canonicaliation patterns with the `tensor.cast` op as the root, but producer
106 /// being from different dialects. Returns true when all conditions are met:
107 /// 1. source and result and ranked tensors with same element type and rank.
108 /// 2. the result type has more static information than the source.
109 ///
110 /// Example:
111 /// ```mlir
112 ///   %1 = producer ... : tensor<?x?xf32>
113 ///   %2 = tensor.cast %1 : tensor<?x?xf32> to tensor<8x16xf32>
114 /// ```
115 ///
116 /// can be canonicalized to :
117 ///
118 /// ```mlir
119 ///   %2 = producer ... : tensor<8x16xf32>
120 /// ```
121 /// Not all ops might be canonicalizable this way, but for those that can be,
122 /// this method provides a check that it is worth doing the canonicalization.
123 bool mlir::tensor::canFoldIntoProducerOp(CastOp castOp) {
124   if (!castOp)
125     return false;
126   return preservesStaticInformation(castOp.getSource().getType(),
127                                     castOp.getType());
128 }
129 
130 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
131 /// that can be folded.
132 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
133   bool folded = false;
134   for (OpOperand &operand : op->getOpOperands()) {
135     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
136     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
137       operand.set(castOp.getOperand());
138       folded = true;
139     }
140   }
141   return success(folded);
142 }
143 
144 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
145   if (inputs.size() != 1 || outputs.size() != 1)
146     return false;
147   Type a = inputs.front(), b = outputs.front();
148   auto aT = a.dyn_cast<TensorType>();
149   auto bT = b.dyn_cast<TensorType>();
150   if (!aT || !bT)
151     return false;
152 
153   if (aT.getElementType() != bT.getElementType())
154     return false;
155 
156   return succeeded(verifyCompatibleShape(aT, bT));
157 }
158 
159 /// Compute a TensorType that has the joined shape knowledge of the two
160 /// given TensorTypes. The element types need to match.
161 static TensorType joinShapes(TensorType one, TensorType two) {
162   assert(one.getElementType() == two.getElementType());
163 
164   if (!one.hasRank())
165     return two;
166   if (!two.hasRank())
167     return one;
168 
169   int64_t rank = one.getRank();
170   if (rank != two.getRank())
171     return {};
172 
173   SmallVector<int64_t, 4> join;
174   join.reserve(rank);
175   for (int64_t i = 0; i < rank; ++i) {
176     if (one.isDynamicDim(i)) {
177       join.push_back(two.getDimSize(i));
178       continue;
179     }
180     if (two.isDynamicDim(i)) {
181       join.push_back(one.getDimSize(i));
182       continue;
183     }
184     if (one.getDimSize(i) != two.getDimSize(i))
185       return {};
186     join.push_back(one.getDimSize(i));
187   }
188   return RankedTensorType::get(join, one.getElementType());
189 }
190 
191 namespace {
192 
193 /// Replaces chains of two tensor.cast operations by a single tensor.cast
194 /// operation if doing so does not remove runtime constraints.
195 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
196   using OpRewritePattern<CastOp>::OpRewritePattern;
197 
198   LogicalResult matchAndRewrite(CastOp tensorCast,
199                                 PatternRewriter &rewriter) const final {
200     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
201 
202     if (!tensorCastOperand)
203       return failure();
204 
205     auto sourceType =
206         tensorCastOperand.getOperand().getType().cast<TensorType>();
207     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
208     auto resultType = tensorCast.getType().cast<TensorType>();
209 
210     // We can remove the intermediate cast if joining all three produces the
211     // same result as just joining the source and result shapes.
212     auto firstJoin =
213         joinShapes(joinShapes(sourceType, intermediateType), resultType);
214 
215     // The join might not exist if the cast sequence would fail at runtime.
216     if (!firstJoin)
217       return failure();
218 
219     // The newJoin always exists if the above join exists, it might just contain
220     // less information. If so, we cannot drop the intermediate cast, as doing
221     // so would remove runtime checks.
222     auto newJoin = joinShapes(sourceType, resultType);
223     if (firstJoin != newJoin)
224       return failure();
225 
226     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
227                                         tensorCastOperand.getOperand());
228     return success();
229   }
230 };
231 
232 /// Fold tensor.cast into tesor.extract_slice producer.
233 /// Example:
234 /// ```
235 ///  %0 = tensor.extract_slice %arg0[%o, 0] [%s, 512] [1, 1] :
236 ///    tensor<128x512xf32> to tensor<?x512xf32>
237 ///  %1 = tensor.cast %0 : tensor<?x512xf32> to tensor<16x512xf32>
238 /// ```
239 /// ->
240 /// ```
241 /// %1 = tensor.extract_slice %arg0[%o, 0] [16, 512] [1, 1] :
242 ///   tensor<128x512xf32> to tensor<16x512xf32>
243 /// ```
244 struct TensorCastExtractSlice : public OpRewritePattern<CastOp> {
245   using OpRewritePattern<CastOp>::OpRewritePattern;
246 
247   LogicalResult matchAndRewrite(CastOp tensorCast,
248                                 PatternRewriter &rewriter) const final {
249     auto extractOperand =
250         tensorCast.getOperand().getDefiningOp<ExtractSliceOp>();
251 
252     if (!extractOperand || !canFoldIntoProducerOp(tensorCast) ||
253         tensorCast.getType().getShape() == tensorCast.getSource()
254                                                .getType()
255                                                .cast<RankedTensorType>()
256                                                .getShape())
257       return failure();
258 
259     SmallVector<OpFoldResult, 4> sizes = extractOperand.getMixedSizes();
260     auto dimMask = computeRankReductionMask(
261         extractFromI64ArrayAttr(extractOperand.getStaticSizes()),
262         extractOperand.getType().getShape());
263     size_t dimIndex = 0;
264     for (size_t i = 0, e = sizes.size(); i < e; i++) {
265       if (dimMask && dimMask->count(i))
266         continue;
267       int64_t dim = tensorCast.getType().getShape()[dimIndex++];
268       if (ShapedType::isDynamic(dim))
269         continue;
270       sizes[i] = rewriter.getIndexAttr(dim);
271     }
272 
273     rewriter.replaceOpWithNewOp<ExtractSliceOp>(
274         tensorCast, tensorCast.getType().cast<RankedTensorType>(),
275         extractOperand.getSource(), extractOperand.getMixedOffsets(), sizes,
276         extractOperand.getMixedStrides());
277     return success();
278   }
279 };
280 
281 } // namespace
282 
283 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
284                                          MLIRContext *context) {
285   results.add<ChainedTensorCast, TensorCastExtractSlice>(context);
286 }
287 
288 //===----------------------------------------------------------------------===//
289 // DimOp
290 //===----------------------------------------------------------------------===//
291 
292 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
293                   int64_t index) {
294   auto loc = result.location;
295   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
296   build(builder, result, source, indexValue);
297 }
298 
299 Optional<int64_t> DimOp::getConstantIndex() {
300   if (auto constantOp = getIndex().getDefiningOp<arith::ConstantOp>())
301     return constantOp.getValue().cast<IntegerAttr>().getInt();
302   return {};
303 }
304 
305 LogicalResult DimOp::verify() {
306   // Assume unknown index to be in range.
307   Optional<int64_t> index = getConstantIndex();
308   if (!index)
309     return success();
310 
311   // Check that constant index is not knowingly out of range.
312   auto type = getSource().getType();
313   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
314     if (*index >= tensorType.getRank())
315       return emitOpError("index is out of range");
316   } else if (type.isa<UnrankedTensorType>()) {
317     // Assume index to be in range.
318   } else {
319     llvm_unreachable("expected operand with tensor type");
320   }
321   return success();
322 }
323 
324 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
325   // All forms of folding require a known index.
326   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
327   if (!index)
328     return {};
329 
330   // Folding for unranked types (UnrankedTensorType) is not supported.
331   auto tensorType = getSource().getType().dyn_cast<RankedTensorType>();
332   if (!tensorType)
333     return {};
334 
335   // Fold if the shape extent along the given index is known.
336   if (!tensorType.isDynamicDim(index.getInt())) {
337     Builder builder(getContext());
338     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
339   }
340 
341   Operation *definingOp = getSource().getDefiningOp();
342 
343   // Fold dim to the operand of tensor.generate.
344   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
345     auto resultType =
346         fromElements.getResult().getType().cast<RankedTensorType>();
347     // The case where the type encodes the size of the dimension is handled
348     // above.
349     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
350 
351     // Find the operand of the fromElements that corresponds to this index.
352     auto dynExtents = fromElements.getDynamicExtents().begin();
353     for (auto dim : resultType.getShape().take_front(index.getInt()))
354       if (ShapedType::isDynamic(dim))
355         dynExtents++;
356 
357     return Value{*dynExtents};
358   }
359 
360   // The size at the given index is now known to be a dynamic size.
361   unsigned unsignedIndex = index.getValue().getZExtValue();
362 
363   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
364     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
365     // `resolve-shaped-type-result-dims` pass.
366     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
367         sliceOp.isDynamicSize(unsignedIndex)) {
368       return {sliceOp.getDynamicSize(unsignedIndex)};
369     }
370   }
371 
372   // dim(cast) -> dim
373   if (succeeded(foldTensorCast(*this)))
374     return getResult();
375 
376   return {};
377 }
378 
379 namespace {
380 /// Fold dim of a cast into the dim of the source of the tensor cast.
381 struct DimOfCastOp : public OpRewritePattern<DimOp> {
382   using OpRewritePattern<DimOp>::OpRewritePattern;
383 
384   LogicalResult matchAndRewrite(DimOp dimOp,
385                                 PatternRewriter &rewriter) const override {
386     auto castOp = dimOp.getSource().getDefiningOp<CastOp>();
387     if (!castOp)
388       return failure();
389     Value newSource = castOp.getOperand();
390     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.getIndex());
391     return success();
392   }
393 };
394 } // namespace
395 
396 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
397                                         MLIRContext *context) {
398   results.add<DimOfCastOp>(context);
399 }
400 
401 //===----------------------------------------------------------------------===//
402 // ExtractOp
403 //===----------------------------------------------------------------------===//
404 
405 LogicalResult ExtractOp::verify() {
406   // Verify the # indices match if we have a ranked type.
407   if (auto tensorType = getTensor().getType().dyn_cast<RankedTensorType>())
408     if (tensorType.getRank() != static_cast<int64_t>(getIndices().size()))
409       return emitOpError("incorrect number of indices for extract_element");
410 
411   return success();
412 }
413 
414 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
415   // If this is a splat elements attribute, simply return the value. All of the
416   // elements of a splat attribute are the same.
417   if (Attribute tensor = operands.front())
418     if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
419       return splatTensor.getSplatValue<Attribute>();
420 
421   // Collect the constant indices into the tensor.
422   SmallVector<uint64_t, 8> indices;
423   for (Attribute indice : llvm::drop_begin(operands, 1)) {
424     if (!indice || !indice.isa<IntegerAttr>())
425       return {};
426     indices.push_back(indice.cast<IntegerAttr>().getInt());
427   }
428 
429   // Fold extract(from_elements(...)).
430   if (auto fromElementsOp = getTensor().getDefiningOp<FromElementsOp>()) {
431     auto tensorType = fromElementsOp.getType().cast<RankedTensorType>();
432     auto rank = tensorType.getRank();
433     assert(static_cast<int64_t>(indices.size()) == tensorType.getRank() &&
434            "rank mismatch");
435     int flatIndex = 0;
436     int stride = 1;
437     for (int i = rank - 1; i >= 0; --i) {
438       if (i < rank - 1)
439         stride *= tensorType.getDimSize(i);
440       flatIndex += indices[i] * stride;
441     }
442     // Prevent out of bounds accesses. This can happen in invalid code that will
443     // never execute.
444     if (static_cast<int>(fromElementsOp.getElements().size()) <= flatIndex ||
445         flatIndex < 0)
446       return {};
447     return fromElementsOp.getElements()[flatIndex];
448   }
449 
450   // If this is an elements attribute, query the value at the given indices.
451   if (Attribute tensor = operands.front()) {
452     auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
453     if (elementsAttr && elementsAttr.isValidIndex(indices))
454       return elementsAttr.getValues<Attribute>()[indices];
455   }
456 
457   return {};
458 }
459 
460 //===----------------------------------------------------------------------===//
461 // FromElementsOp
462 //===----------------------------------------------------------------------===//
463 
464 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
465                            Type resultType, ValueRange elements) {
466   result.addOperands(elements);
467   result.addTypes(resultType);
468 }
469 
470 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
471                            ValueRange elements) {
472   assert(!elements.empty() && "expected at least one element");
473   Type resultType = RankedTensorType::get(
474       {static_cast<int64_t>(elements.size())}, elements.front().getType());
475   build(builder, result, resultType, elements);
476 }
477 
478 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
479   if (!llvm::is_contained(operands, nullptr))
480     return DenseElementsAttr::get(getType(), operands);
481   return {};
482 }
483 
484 namespace {
485 
486 // Pushes the index_casts that occur before extractions to after the extract.
487 // This minimizes type conversion in some cases and enables the extract
488 // canonicalizer. This changes:
489 //
490 // %cast = arith.index_cast %tensor : tensor<1xi32> to tensor<1xindex>
491 // %extract = tensor.extract %cast[%index] : tensor<1xindex>
492 //
493 // to the following:
494 //
495 // %extract = tensor.extract %tensor[%index] : tensor<1xindex>
496 // %cast = arith.index_cast %extract : i32 to index
497 //
498 // to just %element.
499 //
500 // Consider expanding this to a template and handle all tensor cast operations.
501 struct ExtractElementFromIndexCast
502     : public OpRewritePattern<tensor::ExtractOp> {
503   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
504 
505   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
506                                 PatternRewriter &rewriter) const final {
507     Location loc = extract.getLoc();
508     auto indexCast = extract.getTensor().getDefiningOp<arith::IndexCastOp>();
509     if (!indexCast)
510       return failure();
511 
512     Type elementTy = getElementTypeOrSelf(indexCast.getIn());
513 
514     auto newExtract = rewriter.create<tensor::ExtractOp>(
515         loc, elementTy, indexCast.getIn(), extract.getIndices());
516 
517     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(extract, extract.getType(),
518                                                     newExtract);
519 
520     return success();
521   }
522 };
523 
524 } // namespace
525 
526 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
527                                                  MLIRContext *context) {
528   results.add<ExtractElementFromIndexCast>(context);
529 }
530 
531 //===----------------------------------------------------------------------===//
532 // InsertOp
533 //===----------------------------------------------------------------------===//
534 
535 LogicalResult InsertOp::verify() {
536   // Verify the # indices match if we have a ranked type.
537   if (auto destType = getDest().getType().dyn_cast<RankedTensorType>())
538     if (destType.getRank() != static_cast<int64_t>(getIndices().size()))
539       return emitOpError("incorrect number of indices");
540   return success();
541 }
542 
543 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
544   Attribute scalar = operands[0];
545   Attribute dest = operands[1];
546   if (scalar && dest)
547     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
548       if (scalar == splatDest.getSplatValue<Attribute>())
549         return dest;
550   return {};
551 }
552 
553 //===----------------------------------------------------------------------===//
554 // GenerateOp
555 //===----------------------------------------------------------------------===//
556 
557 LogicalResult GenerateOp::reifyResultShapes(
558     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
559   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
560   int idx = 0;
561   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
562     if (getType().isDynamicDim(dim)) {
563       reifiedReturnShapes[0][dim] = getOperand(idx++);
564     } else {
565       reifiedReturnShapes[0][dim] = builder.create<arith::ConstantIndexOp>(
566           getLoc(), getType().getDimSize(dim));
567     }
568   }
569   return success();
570 }
571 
572 LogicalResult GenerateOp::verify() {
573   // Ensure that the tensor type has as many dynamic dimensions as are specified
574   // by the operands.
575   RankedTensorType resultTy = getType().cast<RankedTensorType>();
576   if (getNumOperands() != resultTy.getNumDynamicDims())
577     return emitError("must have as many index operands as dynamic extents "
578                      "in the result type");
579 
580   return success();
581 }
582 
583 LogicalResult GenerateOp::verifyRegions() {
584   RankedTensorType resultTy = getType().cast<RankedTensorType>();
585   // Ensure that region arguments span the index space.
586   if (!llvm::all_of(getBody().getArgumentTypes(),
587                     [](Type ty) { return ty.isIndex(); }))
588     return emitError("all body arguments must be index");
589   if (getBody().getNumArguments() != resultTy.getRank())
590     return emitError("must have one body argument per input dimension");
591 
592   // Ensure that the region yields an element of the right type.
593   auto yieldOp = cast<YieldOp>(getBody().getBlocks().front().getTerminator());
594 
595   if (yieldOp.getValue().getType() != resultTy.getElementType())
596     return emitOpError(
597         "body must be terminated with a `yield` operation of the tensor "
598         "element type");
599 
600   return success();
601 }
602 
603 void GenerateOp::build(
604     OpBuilder &b, OperationState &result, Type resultTy,
605     ValueRange dynamicExtents,
606     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
607   build(b, result, resultTy, dynamicExtents);
608 
609   // Build and populate body.
610   OpBuilder::InsertionGuard guard(b);
611   Region *bodyRegion = result.regions.front().get();
612   auto rank = resultTy.cast<RankedTensorType>().getRank();
613   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
614   SmallVector<Location, 2> argumentLocs(rank, result.location);
615   Block *bodyBlock =
616       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes, argumentLocs);
617   bodyBuilder(b, result.location, bodyBlock->getArguments());
618 }
619 
620 namespace {
621 
622 /// Canonicalizes tensor.generate operations with a constant
623 /// operand into the equivalent operation with the operand expressed in the
624 /// result type, instead. We also insert a type cast to make sure that the
625 /// resulting IR is still well-typed.
626 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
627   using OpRewritePattern<GenerateOp>::OpRewritePattern;
628 
629   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
630                                 PatternRewriter &rewriter) const final {
631     auto resultType =
632         tensorFromElements.getResult().getType().cast<RankedTensorType>();
633 
634     if (resultType.hasStaticShape())
635       return failure();
636 
637     SmallVector<Value, 4> newOperands;
638     SmallVector<int64_t, 4> newShape;
639     auto operandsIt = tensorFromElements.getDynamicExtents().begin();
640 
641     for (int64_t dim : resultType.getShape()) {
642       if (!ShapedType::isDynamic(dim)) {
643         newShape.push_back(dim);
644         continue;
645       }
646       APInt index;
647       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
648         newShape.push_back(ShapedType::kDynamicSize);
649         newOperands.push_back(*operandsIt++);
650         continue;
651       }
652       newShape.push_back(index.getSExtValue());
653       operandsIt++;
654     }
655 
656     if (newOperands.size() == tensorFromElements.getDynamicExtents().size())
657       return failure();
658 
659     auto loc = tensorFromElements.getLoc();
660     auto newOp = rewriter.create<GenerateOp>(
661         loc, RankedTensorType::get(newShape, resultType.getElementType()),
662         newOperands);
663     rewriter.inlineRegionBefore(tensorFromElements.getBody(), newOp.getBody(),
664                                 newOp.getBody().begin());
665     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
666                                                 newOp);
667     return success();
668   }
669 };
670 
671 /// Canonicalizes the pattern of the form
672 ///
673 /// %tensor = tensor.generate %x {
674 ///   ^bb0(%arg0: index):
675 ///   <computation>
676 ///   yield %1 : index
677 /// } : tensor<?xindex>
678 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
679 ///
680 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
681 /// tensor.generate operation has no side-effects.
682 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
683   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
684 
685   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
686                                 PatternRewriter &rewriter) const final {
687     auto tensorFromElements = extract.getTensor().getDefiningOp<GenerateOp>();
688     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
689       return failure();
690 
691     BlockAndValueMapping mapping;
692     Block *body = &tensorFromElements.getBody().front();
693     mapping.map(body->getArguments(), extract.getIndices());
694     for (auto &op : body->without_terminator())
695       rewriter.clone(op, mapping);
696 
697     auto yield = cast<YieldOp>(body->getTerminator());
698 
699     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.getValue()));
700     return success();
701   }
702 };
703 
704 /// Canonicalizes the pattern of the form
705 ///
706 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
707 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
708 ///
709 /// to
710 ///
711 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
712 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
713   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
714 
715   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
716                                 PatternRewriter &rewriter) const final {
717     auto tensorCast = extract.getTensor().getDefiningOp<tensor::CastOp>();
718     if (!tensorCast)
719       return failure();
720 
721     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(
722         extract, tensorCast.getSource(), extract.getIndices());
723     return success();
724   }
725 };
726 
727 } // namespace
728 
729 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
730                                              MLIRContext *context) {
731   // TODO: Move extract patterns to tensor::ExtractOp.
732   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
733               StaticTensorGenerate>(context);
734 }
735 
736 //===----------------------------------------------------------------------===//
737 // RankOp
738 //===----------------------------------------------------------------------===//
739 
740 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
741   // Constant fold rank when the rank of the operand is known.
742   auto type = getOperand().getType();
743   auto shapedType = type.dyn_cast<ShapedType>();
744   if (shapedType && shapedType.hasRank())
745     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
746   return IntegerAttr();
747 }
748 
749 //===----------------------------------------------------------------------===//
750 // ReshapeOp
751 //===----------------------------------------------------------------------===//
752 
753 static int64_t getNumElements(ShapedType type) {
754   int64_t numElements = 1;
755   for (auto dim : type.getShape())
756     numElements *= dim;
757   return numElements;
758 }
759 
760 LogicalResult ReshapeOp::verify() {
761   TensorType operandType = getSource().getType().cast<TensorType>();
762   TensorType resultType = getResult().getType().cast<TensorType>();
763 
764   if (operandType.getElementType() != resultType.getElementType())
765     return emitOpError("element types of source and destination tensor "
766                        "types should be the same");
767 
768   int64_t shapeSize =
769       getShape().getType().cast<RankedTensorType>().getDimSize(0);
770   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
771   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
772 
773   if (resultRankedType) {
774     if (operandRankedType && resultRankedType.hasStaticShape() &&
775         operandRankedType.hasStaticShape()) {
776       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
777         return emitOpError("source and destination tensor should have the "
778                            "same number of elements");
779     }
780     if (ShapedType::isDynamic(shapeSize))
781       return emitOpError("cannot use shape operand with dynamic length to "
782                          "reshape to statically-ranked tensor type");
783     if (shapeSize != resultRankedType.getRank())
784       return emitOpError(
785           "length of shape operand differs from the result's tensor rank");
786   }
787   return success();
788 }
789 
790 //===----------------------------------------------------------------------===//
791 // Reassociative reshape ops
792 //===----------------------------------------------------------------------===//
793 
794 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
795   return getSymbolLessAffineMaps(getReassociationExprs());
796 }
797 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
798   return convertReassociationIndicesToExprs(getContext(),
799                                             getReassociationIndices());
800 }
801 
802 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
803   return getSymbolLessAffineMaps(getReassociationExprs());
804 }
805 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
806   return convertReassociationIndicesToExprs(getContext(),
807                                             getReassociationIndices());
808 }
809 
810 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
811 static RankedTensorType
812 computeTensorReshapeCollapsedType(RankedTensorType type,
813                                   ArrayRef<AffineMap> reassociation) {
814   auto shape = type.getShape();
815   SmallVector<int64_t, 4> newShape;
816   newShape.reserve(reassociation.size());
817 
818   // Use the fact that reassociation is valid to simplify the logic: only use
819   // each map's rank.
820   assert(isReassociationValid(reassociation) && "invalid reassociation");
821   unsigned currentDim = 0;
822   for (AffineMap m : reassociation) {
823     unsigned dim = m.getNumResults();
824     auto band = shape.slice(currentDim, dim);
825     int64_t size = 1;
826     if (llvm::is_contained(band, ShapedType::kDynamicSize))
827       size = ShapedType::kDynamicSize;
828     else
829       for (unsigned d = 0; d < dim; ++d)
830         size *= shape[currentDim + d];
831     newShape.push_back(size);
832     currentDim += dim;
833   }
834 
835   return RankedTensorType::get(newShape, type.getElementType());
836 }
837 
838 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
839                             ArrayRef<ReassociationIndices> reassociation,
840                             ArrayRef<NamedAttribute> attrs) {
841   auto resultType = computeTensorReshapeCollapsedType(
842       src.getType().cast<RankedTensorType>(),
843       getSymbolLessAffineMaps(
844           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
845   build(b, result, resultType, src, attrs);
846   result.addAttribute(getReassociationAttrStrName(),
847                       getReassociationIndicesAttribute(b, reassociation));
848 }
849 
850 // Checks if types are the same, but ignoring encoding on ranked tensors.
851 static bool isSameTypesWithoutEncoding(Type tp1, Type tp2) {
852   if (auto rtp1 = tp1.dyn_cast<RankedTensorType>()) {
853     if (auto rtp2 = tp2.dyn_cast<RankedTensorType>())
854       return rtp1.getShape() == rtp2.getShape() &&
855              rtp1.getElementType() == rtp2.getElementType();
856     return false;
857   }
858   // Default implementation.
859   return tp1 == tp2;
860 }
861 
862 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
863                                         TensorReshapeOp, ExpandShapeOp>::value>
864 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
865                                            RankedTensorType expandedType,
866                                            RankedTensorType collapsedType) {
867   if (failed(
868           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
869     return failure();
870 
871   auto maps = op.getReassociationMaps();
872   RankedTensorType expectedType =
873       computeTensorReshapeCollapsedType(expandedType, maps);
874   if (!isSameTypesWithoutEncoding(collapsedType, expectedType))
875     return op.emitOpError("expected collapsed type to be ")
876            << expectedType << ", but got " << collapsedType;
877   return success();
878 }
879 
880 LogicalResult ExpandShapeOp::verify() {
881   return verifyTensorReshapeOp(*this, getResultType(), getSrcType());
882 }
883 
884 LogicalResult CollapseShapeOp::verify() {
885   return verifyTensorReshapeOp(*this, getSrcType(), getResultType());
886 }
887 
888 namespace {
889 /// Reshape of a splat constant can be replaced with a constant of the result
890 /// type.
891 template <typename TensorReshapeOp>
892 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
893   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
894   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
895                                 PatternRewriter &rewriter) const override {
896     DenseElementsAttr attr;
897     if (!matchPattern(reshapeOp.getSrc(), m_Constant(&attr)))
898       return failure();
899     if (!attr || !attr.isSplat())
900       return failure();
901     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
902         reshapeOp.getResultType(), attr.getRawData());
903     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
904     return success();
905   }
906 };
907 
908 /// Reshape of a FromElements can be replaced with a FromElements of the result
909 /// type
910 template <typename TensorReshapeOp>
911 struct FoldReshapeWithFromElements : OpRewritePattern<TensorReshapeOp> {
912   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
913   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
914                                 PatternRewriter &rewriter) const override {
915     auto fromElements =
916         reshapeOp.getSrc().template getDefiningOp<FromElementsOp>();
917     if (!fromElements)
918       return failure();
919 
920     auto shapedTy = reshapeOp.getType().template cast<ShapedType>();
921 
922     if (!shapedTy.hasStaticShape())
923       return failure();
924 
925     rewriter.replaceOpWithNewOp<FromElementsOp>(reshapeOp, reshapeOp.getType(),
926                                                 fromElements.getElements());
927     return success();
928   }
929 };
930 
931 } // namespace
932 
933 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
934                                                 MLIRContext *context) {
935   results.add<ComposeReassociativeReshapeOps<ExpandShapeOp>,
936               ComposeExpandOfCollapseOp<ExpandShapeOp, CollapseShapeOp>,
937               FoldReshapeWithConstant<ExpandShapeOp>,
938               FoldReshapeWithFromElements<ExpandShapeOp>>(context);
939 }
940 
941 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
942                                                   MLIRContext *context) {
943   results.add<ComposeReassociativeReshapeOps<CollapseShapeOp>,
944               ComposeCollapseOfExpandOp<CollapseShapeOp, ExpandShapeOp>,
945               FoldReshapeWithConstant<CollapseShapeOp>,
946               FoldReshapeWithFromElements<CollapseShapeOp>>(context);
947 }
948 
949 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
950   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
951 }
952 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
953   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
954 }
955 
956 //===----------------------------------------------------------------------===//
957 // ExtractSliceOp
958 //===----------------------------------------------------------------------===//
959 
960 /// An extract_slice op result type can be fully inferred from the source type
961 /// and the static representation of offsets, sizes and strides. Special
962 /// sentinels encode the dynamic case.
963 RankedTensorType ExtractSliceOp::inferResultType(
964     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
965     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
966   // An extract_slice op may specify only a leading subset of offset/sizes/
967   // strides in which case we complete with offset=0, sizes from memref type and
968   // strides=1.
969   unsigned rank = sourceRankedTensorType.getRank();
970   (void)rank;
971   assert(staticSizes.size() == rank &&
972          "unexpected staticSizes not equal to rank of source");
973   return RankedTensorType::get(staticSizes,
974                                sourceRankedTensorType.getElementType());
975 }
976 
977 RankedTensorType ExtractSliceOp::inferResultType(
978     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
979     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
980   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
981   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
982   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
983                              ShapedType::kDynamicStrideOrOffset);
984   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
985                              ShapedType::kDynamicSize);
986   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
987                              ShapedType::kDynamicStrideOrOffset);
988   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
989                                          staticSizes, staticStrides);
990 }
991 
992 /// An extract_slice op result type can be fully inferred from the source type
993 /// and the static representation of offsets, sizes and strides. Special
994 /// sentinels encode the dynamic case.
995 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
996     unsigned resultRank, RankedTensorType sourceRankedTensorType,
997     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
998     ArrayRef<int64_t> strides) {
999   auto inferredType =
1000       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
1001           .cast<RankedTensorType>();
1002   int rankDiff = inferredType.getRank() - resultRank;
1003   if (rankDiff > 0) {
1004     auto shape = inferredType.getShape();
1005     llvm::SmallBitVector dimsToProject =
1006         getPositionsOfShapeOne(rankDiff, shape);
1007     SmallVector<int64_t> projectedShape;
1008     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
1009       if (!dimsToProject.test(pos))
1010         projectedShape.push_back(shape[pos]);
1011     inferredType =
1012         RankedTensorType::get(projectedShape, inferredType.getElementType());
1013   }
1014   return inferredType;
1015 }
1016 
1017 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
1018     unsigned resultRank, RankedTensorType sourceRankedTensorType,
1019     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
1020     ArrayRef<OpFoldResult> strides) {
1021   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1022   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1023   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1024                              ShapedType::kDynamicStrideOrOffset);
1025   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1026                              ShapedType::kDynamicSize);
1027   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1028                              ShapedType::kDynamicStrideOrOffset);
1029   return ExtractSliceOp::inferRankReducedResultType(
1030       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
1031       staticStrides);
1032 }
1033 
1034 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
1035 /// result type. If the type passed is nullptr, it is inferred.
1036 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1037                            RankedTensorType resultType, Value source,
1038                            ArrayRef<OpFoldResult> offsets,
1039                            ArrayRef<OpFoldResult> sizes,
1040                            ArrayRef<OpFoldResult> strides,
1041                            ArrayRef<NamedAttribute> attrs) {
1042   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1043   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1044   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1045                              ShapedType::kDynamicStrideOrOffset);
1046   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1047                              ShapedType::kDynamicSize);
1048   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1049                              ShapedType::kDynamicStrideOrOffset);
1050   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
1051   // Structuring implementation this way avoids duplication between builders.
1052   if (!resultType) {
1053     resultType =
1054         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
1055                                         staticSizes, staticStrides)
1056             .cast<RankedTensorType>();
1057   }
1058   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1059         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1060         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1061   result.addAttributes(attrs);
1062 }
1063 
1064 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
1065 /// result type.
1066 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1067                            ArrayRef<OpFoldResult> offsets,
1068                            ArrayRef<OpFoldResult> sizes,
1069                            ArrayRef<OpFoldResult> strides,
1070                            ArrayRef<NamedAttribute> attrs) {
1071   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1072 }
1073 
1074 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
1075 /// type passed is nullptr, it is inferred.
1076 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1077                            RankedTensorType resultType, Value source,
1078                            ValueRange offsets, ValueRange sizes,
1079                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1080   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1081       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1082   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1083       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1084   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1085       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1086   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1087 }
1088 
1089 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
1090 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1091                            ValueRange offsets, ValueRange sizes,
1092                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1093   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1094 }
1095 
1096 template <typename OpTy>
1097 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
1098                                           OpTy op, Type expectedType) {
1099   auto memrefType = expectedType.cast<ShapedType>();
1100   switch (result) {
1101   case SliceVerificationResult::Success:
1102     return success();
1103   case SliceVerificationResult::RankTooLarge:
1104     return op.emitError("expected rank to be smaller or equal to ")
1105            << "the other rank. ";
1106   case SliceVerificationResult::SizeMismatch:
1107     return op.emitError("expected type to be ")
1108            << expectedType << " or a rank-reduced version. (size mismatch) ";
1109   case SliceVerificationResult::ElemTypeMismatch:
1110     return op.emitError("expected element type to be ")
1111            << memrefType.getElementType();
1112   default:
1113     llvm_unreachable("unexpected extract_slice op verification result");
1114   }
1115 }
1116 
1117 /// Verifier for ExtractSliceOp.
1118 LogicalResult ExtractSliceOp::verify() {
1119   // Verify result type against inferred type.
1120   auto expectedType = ExtractSliceOp::inferResultType(
1121       getSourceType(), getMixedOffsets(), getMixedSizes(), getMixedStrides());
1122   auto result = isRankReducedType(expectedType.cast<ShapedType>(), getType());
1123   return produceSliceErrorMsg(result, *this, expectedType);
1124 }
1125 
1126 /// Infer the canonical type of the result of an extract_slice op. Returns a
1127 /// type with rank `resultRank` that is either the rank of the rank-reduced
1128 /// type, or the non-rank-reduced type.
1129 static RankedTensorType
1130 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1131                             ArrayRef<OpFoldResult> mixedOffsets,
1132                             ArrayRef<OpFoldResult> mixedSizes,
1133                             ArrayRef<OpFoldResult> mixedStrides) {
1134   auto resultType =
1135       ExtractSliceOp::inferRankReducedResultType(
1136           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1137           .cast<RankedTensorType>();
1138   if (resultType.getRank() != resultRank) {
1139     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1140                                                  mixedSizes, mixedStrides)
1141                      .cast<RankedTensorType>();
1142   }
1143   return resultType;
1144 }
1145 
1146 llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
1147   ArrayRef<int64_t> resultShape = getType().getShape();
1148   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1149   llvm::SmallBitVector droppedDims(mixedSizes.size());
1150   unsigned shapePos = 0;
1151   for (const auto &size : enumerate(mixedSizes)) {
1152     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1153     // If the size is not 1, or if the current matched dimension of the result
1154     // is the same static shape as the size value (which is 1), then the
1155     // dimension is preserved.
1156     if (!sizeVal || *sizeVal != 1 ||
1157         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1158       shapePos++;
1159       continue;
1160     }
1161     droppedDims.set(size.index());
1162   }
1163   return droppedDims;
1164 }
1165 
1166 LogicalResult ExtractSliceOp::reifyResultShapes(
1167     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1168   reifiedReturnShapes.resize(1);
1169   reifiedReturnShapes[0].reserve(getType().getRank());
1170   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1171   llvm::SmallBitVector droppedDims = getDroppedDims();
1172   Location loc = getLoc();
1173   for (const auto &size : enumerate(mixedSizes)) {
1174     if (droppedDims.test(size.index()))
1175       continue;
1176     if (auto attr = size.value().dyn_cast<Attribute>()) {
1177       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1178           loc, attr.cast<IntegerAttr>().getInt()));
1179       continue;
1180     }
1181     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1182   }
1183   return success();
1184 }
1185 
1186 namespace {
1187 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1188 /// This essentially pushes memref_cast past its consuming slice when
1189 /// `canFoldIntoConsumerOp` is true.
1190 ///
1191 /// Example:
1192 /// ```
1193 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1194 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1195 ///   tensor<3x4xf32>
1196 /// ```
1197 /// is rewritten into:
1198 /// ```
1199 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1200 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1201 /// ```
1202 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1203 public:
1204   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1205 
1206   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1207                                 PatternRewriter &rewriter) const override {
1208     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1209     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1210           return matchPattern(operand, matchConstantIndex());
1211         }))
1212       return failure();
1213 
1214     auto castOp = sliceOp.getSource().getDefiningOp<tensor::CastOp>();
1215     if (!castOp)
1216       return failure();
1217 
1218     if (!canFoldIntoConsumerOp(castOp))
1219       return failure();
1220 
1221     /// Deduce the type of the result to use for the canonicalized operation.
1222     RankedTensorType resultType = getCanonicalSliceResultType(
1223         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1224         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1225         sliceOp.getMixedStrides());
1226     Value newSlice = rewriter.create<ExtractSliceOp>(
1227         sliceOp.getLoc(), resultType, castOp.getSource(), sliceOp.getOffsets(),
1228         sliceOp.getSizes(), sliceOp.getStrides(), sliceOp.getStaticOffsets(),
1229         sliceOp.getStaticSizes(), sliceOp.getStaticStrides());
1230     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1231                                                 newSlice);
1232     return success();
1233   }
1234 };
1235 
1236 /// Slice elements from `values` into `outValues`. `counts` represents the
1237 /// numbers of elements to stride in the original values for each dimension.
1238 /// The output values can be used to construct a DenseElementsAttr.
1239 template <typename IterTy, typename ElemTy>
1240 static void sliceElements(IterTy values, ArrayRef<int64_t> counts,
1241                           ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1242                           ArrayRef<int64_t> strides,
1243                           llvm::SmallVectorImpl<ElemTy> *outValues) {
1244   assert(offsets.size() == sizes.size());
1245   assert(offsets.size() == strides.size());
1246   if (offsets.empty())
1247     return;
1248 
1249   int64_t offset = offsets.front();
1250   int64_t size = sizes.front();
1251   int64_t stride = strides.front();
1252   if (offsets.size() == 1) {
1253     for (int64_t i = 0; i < size; ++i, offset += stride)
1254       outValues->push_back(*(values + offset));
1255 
1256     return;
1257   }
1258 
1259   for (int64_t i = 0; i < size; ++i, offset += stride) {
1260     auto begin = values + offset * counts.front();
1261     sliceElements<IterTy, ElemTy>(begin, counts.drop_front(),
1262                                   offsets.drop_front(), sizes.drop_front(),
1263                                   strides.drop_front(), outValues);
1264   }
1265 }
1266 
1267 /// Fold arith.constant and tensor.extract_slice into arith.constant. The folded
1268 /// operation might introduce more constant data; Users can control their
1269 /// heuristics by the control function.
1270 class ConstantOpExtractSliceFolder final
1271     : public OpRewritePattern<ExtractSliceOp> {
1272 public:
1273   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1274 
1275   ConstantOpExtractSliceFolder(MLIRContext *context,
1276                                ControlConstantExtractSliceFusionFn controlFn)
1277       : OpRewritePattern<ExtractSliceOp>(context),
1278         controlFn(std::move(controlFn)) {}
1279 
1280   LogicalResult matchAndRewrite(ExtractSliceOp op,
1281                                 PatternRewriter &rewriter) const override {
1282     DenseElementsAttr attr;
1283     if (!matchPattern(op.getSource(), m_Constant(&attr)))
1284       return failure();
1285 
1286     // A constant splat is handled by fold().
1287     if (attr.isSplat())
1288       return failure();
1289 
1290     // Dynamic result shape is not supported.
1291     auto sourceType = op.getSource().getType().cast<ShapedType>();
1292     auto resultType = op.getResult().getType().cast<ShapedType>();
1293     if (!sourceType.hasStaticShape() || !resultType.hasStaticShape())
1294       return failure();
1295 
1296     // Customized control over the folding.
1297     if (!controlFn(op))
1298       return failure();
1299 
1300     int64_t count = sourceType.getNumElements();
1301     if (count == 0)
1302       return failure();
1303 
1304     // Check if there are any dynamic parts, which are not supported.
1305     auto offsets = extractFromI64ArrayAttr(op.getStaticOffsets());
1306     if (llvm::is_contained(offsets, ShapedType::kDynamicStrideOrOffset))
1307       return failure();
1308     auto sizes = extractFromI64ArrayAttr(op.getStaticSizes());
1309     if (llvm::is_contained(sizes, ShapedType::kDynamicSize))
1310       return failure();
1311     auto strides = extractFromI64ArrayAttr(op.getStaticStrides());
1312     if (llvm::is_contained(strides, ShapedType::kDynamicStrideOrOffset))
1313       return failure();
1314 
1315     // Compute the stride for each dimension.
1316     SmallVector<int64_t> counts;
1317     ArrayRef<int64_t> shape = sourceType.getShape();
1318     counts.reserve(shape.size());
1319     for (int64_t v : shape) {
1320       count = count / v;
1321       counts.push_back(count);
1322     }
1323 
1324     // New attribute constructed by the sliced values.
1325     DenseElementsAttr newAttr;
1326 
1327     if (auto elems = attr.dyn_cast<DenseIntElementsAttr>()) {
1328       SmallVector<APInt> outValues;
1329       outValues.reserve(sourceType.getNumElements());
1330       sliceElements<DenseElementsAttr::IntElementIterator, APInt>(
1331           elems.begin(), counts, offsets, sizes, strides, &outValues);
1332       newAttr = DenseElementsAttr::get(resultType, outValues);
1333     } else if (auto elems = attr.dyn_cast<DenseFPElementsAttr>()) {
1334       SmallVector<APFloat> outValues;
1335       outValues.reserve(sourceType.getNumElements());
1336       sliceElements<DenseElementsAttr::FloatElementIterator, APFloat>(
1337           elems.begin(), counts, offsets, sizes, strides, &outValues);
1338       newAttr = DenseElementsAttr::get(resultType, outValues);
1339     }
1340 
1341     if (newAttr) {
1342       rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, resultType, newAttr);
1343       return success();
1344     }
1345 
1346     return failure();
1347   }
1348 
1349 private:
1350   /// This additionally controls whether the fold happens or not. Users can
1351   /// impose their heuristics in the function.
1352   ControlConstantExtractSliceFusionFn controlFn;
1353 };
1354 
1355 } // namespace
1356 
1357 void mlir::tensor::populateFoldConstantExtractSlicePatterns(
1358     RewritePatternSet &patterns,
1359     const ControlConstantExtractSliceFusionFn &controlFn) {
1360   patterns.add<ConstantOpExtractSliceFolder>(patterns.getContext(), controlFn);
1361 }
1362 
1363 /// Return the canonical type of the result of an extract_slice op.
1364 struct SliceReturnTypeCanonicalizer {
1365   RankedTensorType operator()(ExtractSliceOp op,
1366                               ArrayRef<OpFoldResult> mixedOffsets,
1367                               ArrayRef<OpFoldResult> mixedSizes,
1368                               ArrayRef<OpFoldResult> mixedStrides) {
1369     return getCanonicalSliceResultType(op.getType().getRank(),
1370                                        op.getSourceType(), mixedOffsets,
1371                                        mixedSizes, mixedStrides);
1372   }
1373 };
1374 
1375 /// A canonicalizer wrapper to replace ExtractSliceOps.
1376 struct SliceCanonicalizer {
1377   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1378                   ExtractSliceOp newOp) {
1379     Value replacement = newOp.getResult();
1380     if (replacement.getType() != op.getType())
1381       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1382                                                     replacement);
1383     rewriter.replaceOp(op, replacement);
1384   }
1385 };
1386 
1387 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1388                                                  MLIRContext *context) {
1389   results.add<
1390       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1391           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1392       ExtractSliceOpCastFolder>(context);
1393 }
1394 
1395 //
1396 static LogicalResult
1397 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1398                                            ShapedType shapedType) {
1399   OpBuilder b(op.getContext());
1400   for (OpFoldResult ofr : op.getMixedOffsets())
1401     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1402       return failure();
1403   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1404   // is appropriate.
1405   auto shape = shapedType.getShape();
1406   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1407     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1408       return failure();
1409   for (OpFoldResult ofr : op.getMixedStrides())
1410     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1411       return failure();
1412   return success();
1413 }
1414 
1415 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1416 /// we can return the InsertSliceOp's source directly.
1417 // TODO: This only checks the immediate producer; extend to go up the
1418 // insert/extract chain if the slices are disjoint.
1419 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1420   auto insertOp = extractOp.getSource().getDefiningOp<InsertSliceOp>();
1421 
1422   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1423   if (insertOp && insertOp.getSource().getType() == extractOp.getType() &&
1424       insertOp.isSameAs(extractOp, isSame))
1425     return insertOp.getSource();
1426 
1427   return {};
1428 }
1429 
1430 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute> operands) {
1431   if (auto splat = operands[0].dyn_cast_or_null<SplatElementsAttr>()) {
1432     auto resultType = getResult().getType().cast<ShapedType>();
1433     if (resultType.hasStaticShape())
1434       return splat.resizeSplat(resultType);
1435   }
1436   if (getSourceType() == getType() &&
1437       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1438     return this->getSource();
1439   if (Value slice = foldExtractAfterInsertSlice(*this))
1440     return slice;
1441 
1442   return OpFoldResult();
1443 }
1444 
1445 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1446     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1447   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1448   unsigned rank = rankedTensorType.getRank();
1449   auto shape = rankedTensorType.getShape();
1450   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1451   SmallVector<OpFoldResult> sizes;
1452   for (unsigned i = 0, e = rank; i < e; ++i) {
1453     OpFoldResult dim;
1454     if (rankedTensorType.isDynamicDim(i))
1455       dim = b.createOrFold<tensor::DimOp>(
1456           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1457     else
1458       dim = b.getIndexAttr(shape[i]);
1459     sizes.push_back(dim);
1460   }
1461   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1462   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1463                                                 offsets, sizes, strides);
1464 }
1465 
1466 //===----------------------------------------------------------------------===//
1467 // InsertSliceOp
1468 //===----------------------------------------------------------------------===//
1469 
1470 // Build a InsertSliceOp with mixed static and dynamic entries.
1471 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1472                           Value dest, ArrayRef<OpFoldResult> offsets,
1473                           ArrayRef<OpFoldResult> sizes,
1474                           ArrayRef<OpFoldResult> strides,
1475                           ArrayRef<NamedAttribute> attrs) {
1476   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1477   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1478   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1479                              ShapedType::kDynamicStrideOrOffset);
1480   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1481                              ShapedType::kDynamicSize);
1482   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1483                              ShapedType::kDynamicStrideOrOffset);
1484   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1485         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1486         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1487   result.addAttributes(attrs);
1488 }
1489 
1490 // Build a InsertSliceOp with dynamic entries.
1491 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1492                           Value dest, ValueRange offsets, ValueRange sizes,
1493                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1494   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1495       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1496   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1497       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1498   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1499       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1500   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1501 }
1502 
1503 static SliceVerificationResult
1504 verifyInsertSliceOp(ShapedType srcType, ShapedType dstType,
1505                     ArrayAttr staticOffsets, ArrayAttr staticSizes,
1506                     ArrayAttr staticStrides,
1507                     ShapedType *expectedType = nullptr) {
1508   // insert_slice is the inverse of extract_slice, use the same type inference.
1509   auto expected = ExtractSliceOp::inferRankReducedResultType(
1510                       srcType.getRank(), dstType.cast<RankedTensorType>(),
1511                       extractFromI64ArrayAttr(staticOffsets),
1512                       extractFromI64ArrayAttr(staticSizes),
1513                       extractFromI64ArrayAttr(staticStrides))
1514                       .cast<ShapedType>();
1515   if (expectedType)
1516     *expectedType = expected;
1517   return isRankReducedType(expected, srcType);
1518 }
1519 
1520 /// Verifier for InsertSliceOp.
1521 LogicalResult InsertSliceOp::verify() {
1522   ShapedType expectedType;
1523   auto result =
1524       verifyInsertSliceOp(getSourceType(), getType(), getStaticOffsets(),
1525                           getStaticSizes(), getStaticStrides(), &expectedType);
1526   return produceSliceErrorMsg(result, *this, expectedType);
1527 }
1528 
1529 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1530 /// can mutate the second InsertSliceOp's destination to the first one's.
1531 ///
1532 /// Example:
1533 ///
1534 /// ```mlir
1535 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1536 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1537 /// ```
1538 ///
1539 /// folds into:
1540 ///
1541 /// ```mlir
1542 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1543 /// ```
1544 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1545   auto prevInsertOp = insertOp.getDest().getDefiningOp<InsertSliceOp>();
1546 
1547   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1548   if (!prevInsertOp ||
1549       prevInsertOp.getSource().getType() != insertOp.getSource().getType() ||
1550       !prevInsertOp.isSameAs(insertOp, isSame))
1551     return failure();
1552 
1553   insertOp.getDestMutable().assign(prevInsertOp.getDest());
1554   return success();
1555 }
1556 
1557 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1558   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1559       getSourceType() == getType() &&
1560       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1561     return this->getSource();
1562   if (succeeded(foldInsertAfterInsertSlice(*this)))
1563     return getResult();
1564   return OpFoldResult();
1565 }
1566 
1567 LogicalResult InsertSliceOp::reifyResultShapes(
1568     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1569   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1570   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1571     reifiedReturnShapes[0][dim] =
1572         builder.createOrFold<tensor::DimOp>(getLoc(), getDest(), dim);
1573   }
1574   return success();
1575 }
1576 
1577 namespace {
1578 /// Pattern to rewrite a insert_slice op with constant arguments.
1579 class InsertSliceOpConstantArgumentFolder final
1580     : public OpRewritePattern<InsertSliceOp> {
1581 public:
1582   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1583 
1584   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1585                                 PatternRewriter &rewriter) const override {
1586     // No constant operand, just return.
1587     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1588           return matchPattern(operand, matchConstantIndex());
1589         }))
1590       return failure();
1591 
1592     // At least one of offsets/sizes/strides is a new constant.
1593     // Form the new list of operands and constant attributes from the
1594     // existing.
1595     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1596     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1597     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1598     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1599     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1600     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1601 
1602     // Create the new op in canonical form.
1603     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1604         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1605         mixedOffsets, mixedSizes, mixedStrides);
1606     Value toInsert = insertSliceOp.getSource();
1607     if (sourceType != insertSliceOp.getSourceType())
1608       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1609                                                  sourceType, toInsert);
1610     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1611         insertSliceOp, toInsert, insertSliceOp.getDest(), mixedOffsets,
1612         mixedSizes, mixedStrides);
1613     return success();
1614   }
1615 };
1616 
1617 /// Fold tensor_casts with insert_slice operations. If the source or destination
1618 /// tensor is a tensor_cast that removes static type information, the cast is
1619 /// folded into the insert_slice operation. E.g.:
1620 ///
1621 /// ```mlir
1622 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1623 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1624 /// ```
1625 ///
1626 /// folds into:
1627 ///
1628 /// ```mlir
1629 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1630 /// ```
1631 ///
1632 /// Note: When folding a cast on the destination tensor, the result of the
1633 /// insert_slice operation is casted to ensure that the type of the result did
1634 /// not change.
1635 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1636   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1637 
1638   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1639                                 PatternRewriter &rewriter) const override {
1640     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1641           return matchPattern(operand, matchConstantIndex());
1642         }))
1643       return failure();
1644 
1645     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1646       auto castOp = v.getDefiningOp<tensor::CastOp>();
1647       if (!castOp || !canFoldIntoConsumerOp(castOp))
1648         return llvm::None;
1649       return castOp.getSource();
1650     };
1651     Optional<Value> sourceCastSource =
1652         getSourceOfCastOp(insertSliceOp.getSource());
1653     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.getDest());
1654     if (!sourceCastSource && !destCastSource)
1655       return failure();
1656 
1657     auto src =
1658         (sourceCastSource ? *sourceCastSource : insertSliceOp.getSource());
1659     auto dst = (destCastSource ? *destCastSource : insertSliceOp.getDest());
1660 
1661     auto srcType = src.getType().cast<ShapedType>();
1662     auto dstType = dst.getType().cast<ShapedType>();
1663     if (verifyInsertSliceOp(srcType, dstType, insertSliceOp.getStaticOffsets(),
1664                             insertSliceOp.getStaticSizes(),
1665                             insertSliceOp.getStaticStrides()) !=
1666         SliceVerificationResult::Success)
1667       return failure();
1668 
1669     Value replacement = rewriter.create<InsertSliceOp>(
1670         insertSliceOp.getLoc(), src, dst, insertSliceOp.getMixedOffsets(),
1671         insertSliceOp.getMixedSizes(), insertSliceOp.getMixedStrides());
1672 
1673     if (replacement.getType() != insertSliceOp.getType()) {
1674       replacement = rewriter.create<tensor::CastOp>(
1675           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1676     }
1677     rewriter.replaceOp(insertSliceOp, replacement);
1678     return success();
1679   }
1680 };
1681 
1682 /// If additional static type information can be deduced from a insert_slice's
1683 /// size operands, insert an explicit cast of the op's source operand. This
1684 /// enables other canonicalization patterns that are matching for tensor_cast
1685 /// ops such as `ForOpTensorCastFolder` in SCF.
1686 ///
1687 /// Example:
1688 ///
1689 /// ```mlir
1690 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1691 ///       : tensor<?x?xf32> into ...
1692 /// ```
1693 ///
1694 /// folds into:
1695 ///
1696 /// ```mlir
1697 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1698 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1699 ///       : tensor<64x64xf32> into ...
1700 /// ```
1701 struct InsertSliceOpSourceCastInserter final
1702     : public OpRewritePattern<InsertSliceOp> {
1703   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1704 
1705   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1706                                 PatternRewriter &rewriter) const override {
1707     RankedTensorType srcType = insertSliceOp.getSourceType();
1708     if (srcType.getRank() != insertSliceOp.getType().getRank())
1709       return failure();
1710     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1711                                      srcType.getShape().end());
1712     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1713       if (Optional<int64_t> constInt =
1714               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1715         newSrcShape[i] = *constInt;
1716     }
1717 
1718     RankedTensorType newSrcType =
1719         RankedTensorType::get(newSrcShape, srcType.getElementType());
1720     if (srcType == newSrcType ||
1721         !preservesStaticInformation(srcType, newSrcType) ||
1722         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1723       return failure();
1724 
1725     // newSrcType is:
1726     //   1) Different from srcType.
1727     //   2) "More static" than srcType.
1728     //   3) Cast-compatible with srcType.
1729     // Insert the cast.
1730     Value cast = rewriter.create<tensor::CastOp>(
1731         insertSliceOp.getLoc(), newSrcType, insertSliceOp.getSource());
1732     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1733         insertSliceOp, cast, insertSliceOp.getDest(),
1734         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1735         insertSliceOp.getMixedStrides());
1736     return success();
1737   }
1738 };
1739 } // namespace
1740 
1741 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1742                                                 MLIRContext *context) {
1743   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1744               InsertSliceOpSourceCastInserter>(context);
1745 }
1746 
1747 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1748                                                              Location loc,
1749                                                              Value tensor,
1750                                                              Value dest) {
1751   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1752   unsigned rank = rankedTensorType.getRank();
1753   auto shape = rankedTensorType.getShape();
1754   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1755   SmallVector<OpFoldResult> sizes;
1756   for (unsigned i = 0, e = rank; i < e; ++i) {
1757     OpFoldResult dim;
1758     if (rankedTensorType.isDynamicDim(i))
1759       dim = b.createOrFold<tensor::DimOp>(
1760           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1761     else
1762       dim = b.getIndexAttr(shape[i]);
1763     sizes.push_back(dim);
1764   }
1765   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1766   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1767                                                sizes, strides);
1768 }
1769 
1770 //===----------------------------------------------------------------------===//
1771 // PadOp
1772 //===----------------------------------------------------------------------===//
1773 
1774 // TODO: Replace custom<InferType> directive with AllTypesMatch as soon as it
1775 // supports optional types.
1776 void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
1777                     Type typeToInfer, Type typeToInferFrom) {}
1778 
1779 ParseResult parseInferType(OpAsmParser &parser,
1780                            Optional<OpAsmParser::UnresolvedOperand> optOperand,
1781                            Type &typeToInfer, Type typeToInferFrom) {
1782   if (optOperand)
1783     typeToInfer = typeToInferFrom;
1784   return success();
1785 }
1786 
1787 LogicalResult PadOp::verify() {
1788   auto sourceType = getSource().getType().cast<RankedTensorType>();
1789   auto resultType = getResult().getType().cast<RankedTensorType>();
1790   auto expectedType = PadOp::inferResultType(
1791       sourceType, extractFromI64ArrayAttr(getStaticLow()),
1792       extractFromI64ArrayAttr(getStaticHigh()));
1793   for (int i = 0, e = sourceType.getRank(); i < e; ++i) {
1794     if (resultType.getDimSize(i) == expectedType.getDimSize(i))
1795       continue;
1796     if (expectedType.isDynamicDim(i))
1797       continue;
1798     return emitError("specified type ")
1799            << resultType << " does not match the inferred type "
1800            << expectedType;
1801   }
1802 
1803   return success();
1804 }
1805 
1806 LogicalResult PadOp::verifyRegions() {
1807   auto &region = getRegion();
1808   unsigned rank = getResult().getType().cast<RankedTensorType>().getRank();
1809   Block &block = region.front();
1810   if (block.getNumArguments() != rank)
1811     return emitError("expected the block to have ") << rank << " arguments";
1812 
1813   // Note: the number and type of yield values are checked in the YieldOp.
1814   for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
1815     if (!en.value().isIndex())
1816       return emitOpError("expected block argument ")
1817              << (en.index() + 1) << " to be an index";
1818   }
1819 
1820   // Ensure that the region yields an element of the right type.
1821   auto yieldOp = llvm::cast<YieldOp>(block.getTerminator());
1822   if (yieldOp.getValue().getType() !=
1823       getType().cast<ShapedType>().getElementType())
1824     return emitOpError("expected yield type to match shape element type");
1825 
1826   return success();
1827 }
1828 
1829 RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
1830                                         ArrayRef<int64_t> staticLow,
1831                                         ArrayRef<int64_t> staticHigh,
1832                                         ArrayRef<int64_t> resultShape) {
1833   unsigned rank = sourceType.getRank();
1834   assert(staticLow.size() == rank && "unexpected staticLow size mismatch");
1835   assert(staticHigh.size() == rank && "unexpected staticHigh size mismatch");
1836   assert((resultShape.empty() || resultShape.size() == rank) &&
1837          "unexpected resultShape size mismatch");
1838 
1839   SmallVector<int64_t, 4> inferredShape;
1840   for (auto i : llvm::seq<unsigned>(0, rank)) {
1841     if (sourceType.isDynamicDim(i) ||
1842         staticLow[i] == ShapedType::kDynamicSize ||
1843         staticHigh[i] == ShapedType::kDynamicSize) {
1844       inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
1845                                                   : resultShape[i]);
1846     } else {
1847       int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
1848       assert((resultShape.empty() || size == resultShape[i] ||
1849               resultShape[i] == ShapedType::kDynamicSize) &&
1850              "mismatch between inferred shape and result shape");
1851       inferredShape.push_back(size);
1852     }
1853   }
1854 
1855   return RankedTensorType::get(inferredShape, sourceType.getElementType());
1856 }
1857 
1858 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1859                   ArrayRef<int64_t> staticLow, ArrayRef<int64_t> staticHigh,
1860                   ValueRange low, ValueRange high, bool nofold,
1861                   ArrayRef<NamedAttribute> attrs) {
1862   auto sourceType = source.getType().cast<RankedTensorType>();
1863   auto resultType = inferResultType(sourceType, staticLow, staticHigh);
1864   build(b, result, resultType, source, low, high, b.getI64ArrayAttr(staticLow),
1865         b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr());
1866   result.addAttributes(attrs);
1867 }
1868 
1869 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1870                   ValueRange low, ValueRange high, bool nofold,
1871                   ArrayRef<NamedAttribute> attrs) {
1872   auto sourceType = source.getType().cast<RankedTensorType>();
1873   unsigned rank = sourceType.getRank();
1874   SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
1875   build(b, result, source, staticVector, staticVector, low, high, nofold,
1876         attrs);
1877 }
1878 
1879 void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
1880                   Value source, ArrayRef<OpFoldResult> low,
1881                   ArrayRef<OpFoldResult> high, bool nofold,
1882                   ArrayRef<NamedAttribute> attrs) {
1883   assert(resultType.isa<RankedTensorType>());
1884   auto sourceType = source.getType().cast<RankedTensorType>();
1885   SmallVector<Value, 4> dynamicLow, dynamicHigh;
1886   SmallVector<int64_t, 4> staticLow, staticHigh;
1887   // staticLow and staticHigh have full information of the padding config.
1888   // This will grow staticLow and staticHigh with 1 value. If the config is
1889   // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
1890   // value as well.
1891   dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
1892                              ShapedType::kDynamicSize);
1893   dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
1894                              ShapedType::kDynamicSize);
1895   if (!resultType) {
1896     resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
1897   }
1898   build(b, result, resultType, source, dynamicLow, dynamicHigh,
1899         b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh),
1900         nofold ? b.getUnitAttr() : UnitAttr());
1901   result.addAttributes(attrs);
1902 }
1903 
1904 llvm::SmallBitVector PadOp::getPaddedDims() {
1905   llvm::SmallBitVector paddedDims(getSourceType().getRank());
1906   auto extractPaddedDims = [&](ArrayRef<OpFoldResult> paddingWidths) {
1907     for (const auto &en : enumerate(paddingWidths))
1908       if (getConstantIntValue(en.value()) != static_cast<int64_t>(0))
1909         paddedDims.set(en.index());
1910   };
1911   extractPaddedDims(getMixedLowPad());
1912   extractPaddedDims(getMixedHighPad());
1913   return paddedDims;
1914 }
1915 
1916 namespace {
1917 // Folds tensor.pad when padding is static zeros and the attribute
1918 // doesn't request otherwise.
1919 struct FoldStaticZeroPadding : public OpRewritePattern<PadOp> {
1920   using OpRewritePattern<PadOp>::OpRewritePattern;
1921 
1922   LogicalResult matchAndRewrite(PadOp padTensorOp,
1923                                 PatternRewriter &rewriter) const override {
1924     if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad())
1925       return failure();
1926     if (padTensorOp.getNofold())
1927       return failure();
1928     rewriter.replaceOpWithNewOp<tensor::CastOp>(
1929         padTensorOp, padTensorOp.getResult().getType(),
1930         padTensorOp.getSource());
1931     return success();
1932   }
1933 };
1934 
1935 // Fold CastOp into PadOp when adding static information.
1936 struct FoldSourceTensorCast : public OpRewritePattern<PadOp> {
1937   using OpRewritePattern<PadOp>::OpRewritePattern;
1938 
1939   LogicalResult matchAndRewrite(PadOp padTensorOp,
1940                                 PatternRewriter &rewriter) const override {
1941     auto castOp = padTensorOp.getSource().getDefiningOp<tensor::CastOp>();
1942     if (!tensor::canFoldIntoConsumerOp(castOp))
1943       return failure();
1944 
1945     auto newResultType = PadOp::inferResultType(
1946         castOp.getSource().getType().cast<RankedTensorType>(),
1947         extractFromI64ArrayAttr(padTensorOp.getStaticLow()),
1948         extractFromI64ArrayAttr(padTensorOp.getStaticHigh()),
1949         padTensorOp.getResultType().getShape());
1950 
1951     if (newResultType == padTensorOp.getResultType()) {
1952       rewriter.updateRootInPlace(padTensorOp, [&]() {
1953         padTensorOp.getSourceMutable().assign(castOp.getSource());
1954       });
1955     } else {
1956       auto newOp = rewriter.create<PadOp>(
1957           padTensorOp->getLoc(), newResultType, padTensorOp.getSource(),
1958           padTensorOp.getLow(), padTensorOp.getHigh(),
1959           padTensorOp.getStaticLow(), padTensorOp.getStaticHigh(),
1960           padTensorOp.getNofold());
1961       BlockAndValueMapping mapper;
1962       padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
1963 
1964       rewriter.replaceOpWithNewOp<tensor::CastOp>(
1965           padTensorOp, padTensorOp.getResultType(), newOp);
1966     }
1967     return success();
1968   }
1969 };
1970 
1971 // Fold CastOp using the result of PadOp back into the latter if it adds
1972 // static information.
1973 struct FoldTargetTensorCast : public OpRewritePattern<PadOp> {
1974   using OpRewritePattern<PadOp>::OpRewritePattern;
1975 
1976   LogicalResult matchAndRewrite(PadOp padTensorOp,
1977                                 PatternRewriter &rewriter) const override {
1978     if (!padTensorOp.getResult().hasOneUse())
1979       return failure();
1980     auto tensorCastOp =
1981         dyn_cast<tensor::CastOp>(*padTensorOp->getUsers().begin());
1982     if (!tensorCastOp)
1983       return failure();
1984     if (!tensor::preservesStaticInformation(padTensorOp.getResult().getType(),
1985                                             tensorCastOp.getDest().getType()))
1986       return failure();
1987 
1988     auto replacementOp = rewriter.create<PadOp>(
1989         padTensorOp.getLoc(), tensorCastOp.getDest().getType(),
1990         padTensorOp.getSource(), padTensorOp.getLow(), padTensorOp.getHigh(),
1991         padTensorOp.getStaticLow(), padTensorOp.getStaticHigh(),
1992         padTensorOp.getNofold());
1993     replacementOp.getRegion().takeBody(padTensorOp.getRegion());
1994 
1995     rewriter.replaceOp(padTensorOp, replacementOp.getResult());
1996     rewriter.replaceOp(tensorCastOp, replacementOp.getResult());
1997     return success();
1998   }
1999 };
2000 
2001 /// Fold chains of tensor::ExtractSliceOp, tensor::PadOp pairs that pad
2002 /// different dimensions. The pattern applies if the following preconditions
2003 /// hold:
2004 ///   1) the tensor::ExtractSliceOps are not rank-reducing,
2005 ///   2) the tensor::ExtractSliceOps have only unit-strides,
2006 ///   3) the tensor::PadOps perform only high-padding,
2007 ///   4) the tensor::PadOps have the same constant padding value,
2008 ///   5) the tensor::PadOps do not have common padding dimensions,
2009 ///   6) one tensor::ExtractSliceOp, tensor::PadOp pair has zero-padding and
2010 ///      zero-offset for every dimension.
2011 ///   7) the tensor::ExtractSliceOp sizes match the source tensor sizes for the
2012 ///      padded source dimensions.
2013 ///
2014 /// Example:
2015 ///
2016 /// ```mlir
2017 ///   %0 = tensor.extract_slice %input[16, 0] [%sz0, 64] [1, 1]
2018 ///       : tensor<64x64xf32> to tensor<?x64xf32>
2019 ///   %1 = tensor.pad %0 low[0, 0] high[%pw0, 0] { ...
2020 ///     } : tensor<?x64xf32> to tensor<8x64xf32>
2021 ///   %2 = tensor.extract_slice %1[0, 4] [8, %sz1] [1, 1]
2022 ///        : tensor<8x64xf32> to tensor<8x?xf32>
2023 ///   %res = tensor.pad %2 nofold low[0, 0] high[0, %pw1] { ...
2024 ///     } : tensor<8x?xf32> to tensor<8x4xf32>
2025 /// ```
2026 ///
2027 /// folds into:
2028 ///
2029 /// ```mlir
2030 ///   %0 = tensor.extract_slice %input[16, 4] [%sz0, %sz1] [1, 1]
2031 ///        : tensor<64x64xf32> to tensor<?x?xf32>
2032 ///   %res = tensor.pad %0 nofold low[0, 0] high[%pw0, %pw1] { ...
2033 ///     } : tensor<?x?xf32> to tensor<8x4xf32>
2034 /// ```
2035 struct FoldOrthogonalPaddings : public OpRewritePattern<PadOp> {
2036   using OpRewritePattern<PadOp>::OpRewritePattern;
2037 
2038   LogicalResult matchAndRewrite(PadOp padOp,
2039                                 PatternRewriter &rewriter) const override {
2040     auto innerSliceOp = padOp.getSource().getDefiningOp<ExtractSliceOp>();
2041     if (!innerSliceOp)
2042       return failure();
2043     auto outerPadOp = innerSliceOp.getSource().getDefiningOp<PadOp>();
2044     if (!outerPadOp || outerPadOp.getNofold())
2045       return failure();
2046     auto outerSliceOp = outerPadOp.getSource().getDefiningOp<ExtractSliceOp>();
2047     if (!outerSliceOp)
2048       return failure();
2049 
2050     // 1) Fail if the chain is rank-reducing.
2051     int64_t rank = padOp.getSourceType().getRank();
2052     if (outerSliceOp.getSourceType().getRank() != rank) {
2053       return rewriter.notifyMatchFailure(padOp,
2054                                          "cannot fold rank-reducing chain");
2055     }
2056 
2057     // 2) Fail if the tensor::ExtractSliceOps have non-unit strides.
2058     if (!innerSliceOp.hasUnitStride() || !outerSliceOp.hasUnitStride()) {
2059       return rewriter.notifyMatchFailure(
2060           padOp, "cannot fold non-unit stride ExtractSliceOps");
2061     }
2062 
2063     // 3) Fail if the tensor::PadOps have non-zero low padding.
2064     if (!padOp.hasZeroLowPad() || !outerPadOp.hasZeroLowPad()) {
2065       return rewriter.notifyMatchFailure(padOp,
2066                                          "cannot fold PadOps with low padding");
2067     }
2068 
2069     // 4) Fail if the tensor::PadOps padding values do not match.
2070     Attribute innerAttr, outerAttr;
2071     Value innerValue = padOp.getConstantPaddingValue();
2072     Value outerValue = outerPadOp.getConstantPaddingValue();
2073     if (!innerValue || !outerValue ||
2074         !matchPattern(innerValue, m_Constant(&innerAttr)) ||
2075         !matchPattern(outerValue, m_Constant(&outerAttr)) ||
2076         innerAttr != outerAttr) {
2077       return rewriter.notifyMatchFailure(
2078           padOp, "cannot fold PadOps with different padding values");
2079     }
2080 
2081     // 5) Fail if a dimension is padded by both tensor::PadOps.
2082     llvm::SmallBitVector innerDims = padOp.getPaddedDims();
2083     llvm::SmallBitVector outerDims = outerPadOp.getPaddedDims();
2084     if (innerDims.anyCommon(outerDims)) {
2085       return rewriter.notifyMatchFailure(
2086           padOp, "cannot fold PadOps with common padding dimensions");
2087     }
2088 
2089     // 6) Combine the offsets of the two tensor::ExtractSliceOps. Find the
2090     // zero-offset and zero-padding tensor::ExtractSliceOp, tensor::PadOp pair
2091     // for every dimension, and use the offset the other pair. Fail if no
2092     // zero-offset and zero-padding tensor::ExtractSliceOp, tensor::PadOp pair
2093     // exists.
2094     SmallVector<OpFoldResult> newOffsets(rank, rewriter.getIndexAttr(0));
2095     for (auto &en : enumerate(newOffsets)) {
2096       OpFoldResult innerOffset = innerSliceOp.getMixedOffsets()[en.index()];
2097       OpFoldResult outerOffset = outerSliceOp.getMixedOffsets()[en.index()];
2098       if (!innerDims.test(en.index()) &&
2099           (getConstantIntValue(innerOffset) == static_cast<int64_t>(0))) {
2100         en.value() = outerOffset;
2101         continue;
2102       }
2103       if (!outerDims.test(en.index()) &&
2104           (getConstantIntValue(outerOffset) == static_cast<int64_t>(0))) {
2105         en.value() = innerOffset;
2106         continue;
2107       }
2108       return rewriter.notifyMatchFailure(
2109           padOp, "cannot find zero-offset and zero-padding pair");
2110     }
2111 
2112     // 7) Combine the sizes of the two tensor::ExtractSliceOps. Take the size of
2113     // the outer tensor::ExtractSliceOp for the dimensions padded by the outer
2114     // tensor::PadOp and fail if the size of the inner tensor::ExtractSliceOp
2115     // does not match the size of the padded dimension. Otherwise, take the size
2116     // of the inner tensor::ExtractSliceOp.
2117     SmallVector<OpFoldResult> newSizes = innerSliceOp.getMixedSizes();
2118     for (auto &en : enumerate(newSizes)) {
2119       if (!outerDims.test(en.index()))
2120         continue;
2121       OpFoldResult sliceSize = innerSliceOp.getMixedSizes()[en.index()];
2122       int64_t sourceSize = innerSliceOp.getSourceType().getShape()[en.index()];
2123       assert(!ShapedType::isDynamic(sourceSize) &&
2124              "expected padded dimension to have a static size");
2125       if (getConstantIntValue(sliceSize) != sourceSize) {
2126         return rewriter.notifyMatchFailure(
2127             padOp, "cannot fold since the inner ExtractSliceOp size does not "
2128                    "match the size of the outer padding");
2129       }
2130       en.value() = outerSliceOp.getMixedSizes()[en.index()];
2131     }
2132 
2133     // Combine the high paddings of the two tensor::PadOps.
2134     SmallVector<OpFoldResult> newHighPad(rank, rewriter.getIndexAttr(0));
2135     for (auto &en : enumerate(newHighPad)) {
2136       if (innerDims.test(en.index()))
2137         newHighPad[en.index()] = padOp.getMixedHighPad()[en.index()];
2138       if (outerDims.test(en.index()))
2139         newHighPad[en.index()] = outerPadOp.getMixedHighPad()[en.index()];
2140     }
2141 
2142     // Create a new tensor::ExtractSliceOp, tensor::PadOp pair that performs the
2143     // two paddings in one step.
2144     auto newSliceOp = rewriter.create<ExtractSliceOp>(
2145         padOp.getLoc(), outerSliceOp.getSource(), newOffsets, newSizes,
2146         innerSliceOp.getMixedStrides());
2147     auto newPadOp = rewriter.create<PadOp>(
2148         padOp.getLoc(), padOp.getResultType(), newSliceOp.getResult(),
2149         padOp.getMixedLowPad(), newHighPad, padOp.getNofold());
2150     rewriter.inlineRegionBefore(padOp.getRegion(), newPadOp.getRegion(),
2151                                 newPadOp.getRegion().begin());
2152     rewriter.replaceOp(padOp, newPadOp.getResult());
2153     return success();
2154   }
2155 };
2156 
2157 } // namespace
2158 
2159 void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
2160                                         MLIRContext *context) {
2161   results.add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast,
2162               FoldOrthogonalPaddings>(context);
2163 }
2164 
2165 /// Return the padding value of the PadOp if it constant. In this context,
2166 /// "constant" means an actual constant or "defined outside of the block".
2167 ///
2168 /// Values are considered constant in three cases:
2169 ///  - A ConstantLike value.
2170 ///  - A basic block argument from a different block.
2171 ///  - A value defined outside of the block.
2172 ///
2173 /// If the padding value is not constant, an empty Value is returned.
2174 Value PadOp::getConstantPaddingValue() {
2175   auto yieldOp = dyn_cast<YieldOp>(getRegion().front().getTerminator());
2176   if (!yieldOp)
2177     return {};
2178   Value padValue = yieldOp.getValue();
2179   // Check if yield value is a constant.
2180   if (matchPattern(padValue, m_Constant()))
2181     return padValue;
2182   // Check if yield value is defined inside the PadOp block.
2183   if (padValue.getParentBlock() == &getRegion().front())
2184     return {};
2185   // Else: Yield value defined outside of the PadOp block.
2186   return padValue;
2187 }
2188 
2189 OpFoldResult PadOp::fold(ArrayRef<Attribute>) {
2190   if (getResultType().hasStaticShape() && getResultType() == getSourceType() &&
2191       !getNofold())
2192     return getSource();
2193   return {};
2194 }
2195 
2196 //===----------------------------------------------------------------------===//
2197 // SplatOp
2198 //===----------------------------------------------------------------------===//
2199 
2200 OpFoldResult SplatOp::fold(ArrayRef<Attribute> operands) {
2201   auto constOperand = operands.front();
2202   if (!constOperand.isa_and_nonnull<IntegerAttr, FloatAttr>())
2203     return {};
2204 
2205   // SplatElementsAttr::get treats single value for second arg as being a splat.
2206   return SplatElementsAttr::get(getType(), {constOperand});
2207 }
2208 
2209 //===----------------------------------------------------------------------===//
2210 // TableGen'd op method definitions
2211 //===----------------------------------------------------------------------===//
2212 
2213 #define GET_OP_CLASSES
2214 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
2215