1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/Utils/Utils.h"
10 #include "mlir/Dialect/Tensor/IR/Tensor.h"
11 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
12 #include "mlir/Dialect/Utils/StaticValueUtils.h"
13 #include "mlir/IR/BlockAndValueMapping.h"
14 #include "mlir/IR/Builders.h"
15 #include "mlir/IR/BuiltinAttributeInterfaces.h"
16 #include "mlir/IR/Matchers.h"
17 #include "mlir/IR/TypeUtilities.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallBitVector.h"
20 
21 using namespace mlir;
22 using namespace mlir::tensor;
23 
24 /// Materialize a single constant operation from a given attribute value with
25 /// the desired resultant type.
26 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
27                                               Attribute value, Type type,
28                                               Location loc) {
29   if (arith::ConstantOp::isBuildableWith(value, type))
30     return builder.create<arith::ConstantOp>(loc, value, type);
31   if (complex::ConstantOp::isBuildableWith(value, type))
32     return builder.create<complex::ConstantOp>(loc, type,
33                                                value.cast<ArrayAttr>());
34   return nullptr;
35 }
36 
37 //===----------------------------------------------------------------------===//
38 // CastOp
39 //===----------------------------------------------------------------------===//
40 
41 /// Returns true if `target` is a ranked tensor type that preserves static
42 /// information available in the `source` ranked tensor type.
43 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
44   auto sourceType = source.dyn_cast<RankedTensorType>();
45   auto targetType = target.dyn_cast<RankedTensorType>();
46 
47   // Requires RankedTensorType.
48   if (!sourceType || !targetType)
49     return false;
50 
51   // Requires same elemental type.
52   if (sourceType.getElementType() != targetType.getElementType())
53     return false;
54 
55   // Requires same rank.
56   if (sourceType.getRank() != targetType.getRank())
57     return false;
58 
59   // If cast is towards more static sizes along any dimension, don't fold.
60   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
61     if (!ShapedType::isDynamic(std::get<0>(t)) &&
62         ShapedType::isDynamic(std::get<1>(t)))
63       return false;
64   }
65 
66   return true;
67 }
68 
69 /// Determines whether tensor::CastOp casts to a more dynamic version of the
70 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
71 /// implement canonicalization patterns for ops in different dialects that may
72 /// consume the results of tensor.cast operations. Such foldable tensor.cast
73 /// operations are typically inserted as `slice` ops and are canonicalized,
74 /// to preserve the type compatibility of their uses.
75 ///
76 /// Returns true when all conditions are met:
77 /// 1. source and result are ranked tensors with same element type and rank.
78 /// 2. the tensor type has more static information than the result
79 ///
80 /// Example:
81 /// ```mlir
82 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
83 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
84 /// ```
85 ///
86 /// folds into:
87 ///
88 /// ```mlir
89 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
90 /// ```
91 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
92   if (!castOp)
93     return false;
94 
95   // Can fold if the source of cast has at least as much static information as
96   // its results.
97   return preservesStaticInformation(castOp.getType(),
98                                     castOp.source().getType());
99 }
100 
101 /// Determines whether the tensor::CastOp casts to a more static version of the
102 /// source tensor. This is useful to fold into a producing op and implement
103 /// canonicaliation patterns with the `tensor.cast` op as the root, but producer
104 /// being from different dialects. Returns true when all conditions are met:
105 /// 1. source and result and ranked tensors with same element type and rank.
106 /// 2. the result type has more static information than the source.
107 ///
108 /// Example:
109 /// ```mlir
110 ///   %1 = producer ... : tensor<?x?xf32>
111 ///   %2 = tensor.cast %1 : tensor<?x?xf32> to tensor<8x16xf32>
112 /// ```
113 ///
114 /// can be canonicalized to :
115 ///
116 /// ```mlir
117 ///   %2 = producer ... : tensor<8x16xf32>
118 /// ```
119 /// Not all ops might be canonicalizable this way, but for those that can be,
120 /// this method provides a check that it is worth doing the canonicalization.
121 bool mlir::tensor::canFoldIntoProducerOp(CastOp castOp) {
122   if (!castOp)
123     return false;
124   return preservesStaticInformation(castOp.source().getType(),
125                                     castOp.getType());
126 }
127 
128 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
129 /// that can be folded.
130 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
131   bool folded = false;
132   for (OpOperand &operand : op->getOpOperands()) {
133     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
134     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
135       operand.set(castOp.getOperand());
136       folded = true;
137     }
138   }
139   return success(folded);
140 }
141 
142 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
143   if (inputs.size() != 1 || outputs.size() != 1)
144     return false;
145   Type a = inputs.front(), b = outputs.front();
146   auto aT = a.dyn_cast<TensorType>();
147   auto bT = b.dyn_cast<TensorType>();
148   if (!aT || !bT)
149     return false;
150 
151   if (aT.getElementType() != bT.getElementType())
152     return false;
153 
154   return succeeded(verifyCompatibleShape(aT, bT));
155 }
156 
157 /// Compute a TensorType that has the joined shape knowledge of the two
158 /// given TensorTypes. The element types need to match.
159 static TensorType joinShapes(TensorType one, TensorType two) {
160   assert(one.getElementType() == two.getElementType());
161 
162   if (!one.hasRank())
163     return two;
164   if (!two.hasRank())
165     return one;
166 
167   int64_t rank = one.getRank();
168   if (rank != two.getRank())
169     return {};
170 
171   SmallVector<int64_t, 4> join;
172   join.reserve(rank);
173   for (int64_t i = 0; i < rank; ++i) {
174     if (one.isDynamicDim(i)) {
175       join.push_back(two.getDimSize(i));
176       continue;
177     }
178     if (two.isDynamicDim(i)) {
179       join.push_back(one.getDimSize(i));
180       continue;
181     }
182     if (one.getDimSize(i) != two.getDimSize(i))
183       return {};
184     join.push_back(one.getDimSize(i));
185   }
186   return RankedTensorType::get(join, one.getElementType());
187 }
188 
189 namespace {
190 
191 /// Replaces chains of two tensor.cast operations by a single tensor.cast
192 /// operation if doing so does not remove runtime constraints.
193 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
194   using OpRewritePattern<CastOp>::OpRewritePattern;
195 
196   LogicalResult matchAndRewrite(CastOp tensorCast,
197                                 PatternRewriter &rewriter) const final {
198     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
199 
200     if (!tensorCastOperand)
201       return failure();
202 
203     auto sourceType =
204         tensorCastOperand.getOperand().getType().cast<TensorType>();
205     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
206     auto resultType = tensorCast.getType().cast<TensorType>();
207 
208     // We can remove the intermediate cast if joining all three produces the
209     // same result as just joining the source and result shapes.
210     auto firstJoin =
211         joinShapes(joinShapes(sourceType, intermediateType), resultType);
212 
213     // The join might not exist if the cast sequence would fail at runtime.
214     if (!firstJoin)
215       return failure();
216 
217     // The newJoin always exists if the above join exists, it might just contain
218     // less information. If so, we cannot drop the intermediate cast, as doing
219     // so would remove runtime checks.
220     auto newJoin = joinShapes(sourceType, resultType);
221     if (firstJoin != newJoin)
222       return failure();
223 
224     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
225                                         tensorCastOperand.getOperand());
226     return success();
227   }
228 };
229 
230 } // namespace
231 
232 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
233                                          MLIRContext *context) {
234   results.add<ChainedTensorCast>(context);
235 }
236 
237 //===----------------------------------------------------------------------===//
238 // DimOp
239 //===----------------------------------------------------------------------===//
240 
241 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
242                   int64_t index) {
243   auto loc = result.location;
244   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
245   build(builder, result, source, indexValue);
246 }
247 
248 Optional<int64_t> DimOp::getConstantIndex() {
249   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
250     return constantOp.getValue().cast<IntegerAttr>().getInt();
251   return {};
252 }
253 
254 LogicalResult DimOp::verify() {
255   // Assume unknown index to be in range.
256   Optional<int64_t> index = getConstantIndex();
257   if (!index.hasValue())
258     return success();
259 
260   // Check that constant index is not knowingly out of range.
261   auto type = source().getType();
262   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
263     if (index.getValue() >= tensorType.getRank())
264       return emitOpError("index is out of range");
265   } else if (type.isa<UnrankedTensorType>()) {
266     // Assume index to be in range.
267   } else {
268     llvm_unreachable("expected operand with tensor type");
269   }
270   return success();
271 }
272 
273 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
274   // All forms of folding require a known index.
275   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
276   if (!index)
277     return {};
278 
279   // Folding for unranked types (UnrankedTensorType) is not supported.
280   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
281   if (!tensorType)
282     return {};
283 
284   // Fold if the shape extent along the given index is known.
285   if (!tensorType.isDynamicDim(index.getInt())) {
286     Builder builder(getContext());
287     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
288   }
289 
290   Operation *definingOp = source().getDefiningOp();
291 
292   // Fold dim to the operand of tensor.generate.
293   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
294     auto resultType =
295         fromElements.getResult().getType().cast<RankedTensorType>();
296     // The case where the type encodes the size of the dimension is handled
297     // above.
298     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
299 
300     // Find the operand of the fromElements that corresponds to this index.
301     auto dynExtents = fromElements.dynamicExtents().begin();
302     for (auto dim : resultType.getShape().take_front(index.getInt()))
303       if (ShapedType::isDynamic(dim))
304         dynExtents++;
305 
306     return Value{*dynExtents};
307   }
308 
309   // The size at the given index is now known to be a dynamic size.
310   unsigned unsignedIndex = index.getValue().getZExtValue();
311 
312   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
313     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
314     // `resolve-shaped-type-result-dims` pass.
315     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
316         sliceOp.isDynamicSize(unsignedIndex)) {
317       return {sliceOp.getDynamicSize(unsignedIndex)};
318     }
319   }
320 
321   // dim(cast) -> dim
322   if (succeeded(foldTensorCast(*this)))
323     return getResult();
324 
325   return {};
326 }
327 
328 namespace {
329 /// Fold dim of a cast into the dim of the source of the tensor cast.
330 struct DimOfCastOp : public OpRewritePattern<DimOp> {
331   using OpRewritePattern<DimOp>::OpRewritePattern;
332 
333   LogicalResult matchAndRewrite(DimOp dimOp,
334                                 PatternRewriter &rewriter) const override {
335     auto castOp = dimOp.source().getDefiningOp<CastOp>();
336     if (!castOp)
337       return failure();
338     Value newSource = castOp.getOperand();
339     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
340     return success();
341   }
342 };
343 } // namespace
344 
345 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
346                                         MLIRContext *context) {
347   results.add<DimOfCastOp>(context);
348 }
349 
350 //===----------------------------------------------------------------------===//
351 // ExtractOp
352 //===----------------------------------------------------------------------===//
353 
354 LogicalResult ExtractOp::verify() {
355   // Verify the # indices match if we have a ranked type.
356   if (auto tensorType = tensor().getType().dyn_cast<RankedTensorType>())
357     if (tensorType.getRank() != static_cast<int64_t>(indices().size()))
358       return emitOpError("incorrect number of indices for extract_element");
359 
360   return success();
361 }
362 
363 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
364   // If this is a splat elements attribute, simply return the value. All of the
365   // elements of a splat attribute are the same.
366   if (Attribute tensor = operands.front())
367     if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
368       return splatTensor.getSplatValue<Attribute>();
369 
370   // Collect the constant indices into the tensor.
371   SmallVector<uint64_t, 8> indices;
372   for (Attribute indice : llvm::drop_begin(operands, 1)) {
373     if (!indice || !indice.isa<IntegerAttr>())
374       return {};
375     indices.push_back(indice.cast<IntegerAttr>().getInt());
376   }
377 
378   // Fold extract(from_elements(...)).
379   if (auto fromElementsOp = tensor().getDefiningOp<FromElementsOp>()) {
380     auto tensorType = fromElementsOp.getType().cast<RankedTensorType>();
381     auto rank = tensorType.getRank();
382     assert(static_cast<int64_t>(indices.size()) == tensorType.getRank() &&
383            "rank mismatch");
384     int flatIndex = 0;
385     int stride = 1;
386     for (int i = rank - 1; i >= 0; --i) {
387       if (i < rank - 1)
388         stride *= tensorType.getDimSize(i);
389       flatIndex += indices[i] * stride;
390     }
391     // Prevent out of bounds accesses. This can happen in invalid code that will
392     // never execute.
393     if (static_cast<int>(fromElementsOp.elements().size()) <= flatIndex ||
394         flatIndex < 0)
395       return {};
396     return fromElementsOp.elements()[flatIndex];
397   }
398 
399   // If this is an elements attribute, query the value at the given indices.
400   if (Attribute tensor = operands.front()) {
401     auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
402     if (elementsAttr && elementsAttr.isValidIndex(indices))
403       return elementsAttr.getValues<Attribute>()[indices];
404   }
405 
406   return {};
407 }
408 
409 //===----------------------------------------------------------------------===//
410 // FromElementsOp
411 //===----------------------------------------------------------------------===//
412 
413 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
414                            Type resultType, ValueRange elements) {
415   result.addOperands(elements);
416   result.addTypes(resultType);
417 }
418 
419 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
420                            ValueRange elements) {
421   assert(!elements.empty() && "expected at least one element");
422   Type resultType = RankedTensorType::get(
423       {static_cast<int64_t>(elements.size())}, elements.front().getType());
424   build(builder, result, resultType, elements);
425 }
426 
427 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
428   if (!llvm::is_contained(operands, nullptr))
429     return DenseElementsAttr::get(getType(), operands);
430   return {};
431 }
432 
433 namespace {
434 
435 // Pushes the index_casts that occur before extractions to after the extract.
436 // This minimizes type conversion in some cases and enables the extract
437 // canonicalizer. This changes:
438 //
439 // %cast = arith.index_cast %tensor : tensor<1xi32> to tensor<1xindex>
440 // %extract = tensor.extract %cast[%index] : tensor<1xindex>
441 //
442 // to the following:
443 //
444 // %extract = tensor.extract %tensor[%index] : tensor<1xindex>
445 // %cast = arith.index_cast %extract : i32 to index
446 //
447 // to just %element.
448 //
449 // Consider expanding this to a template and handle all tensor cast operations.
450 struct ExtractElementFromIndexCast
451     : public OpRewritePattern<tensor::ExtractOp> {
452   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
453 
454   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
455                                 PatternRewriter &rewriter) const final {
456     Location loc = extract.getLoc();
457     auto indexCast = extract.tensor().getDefiningOp<arith::IndexCastOp>();
458     if (!indexCast)
459       return failure();
460 
461     Type elementTy = getElementTypeOrSelf(indexCast.getIn());
462 
463     auto newExtract = rewriter.create<tensor::ExtractOp>(
464         loc, elementTy, indexCast.getIn(), extract.indices());
465 
466     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(extract, extract.getType(),
467                                                     newExtract);
468 
469     return success();
470   }
471 };
472 
473 } // namespace
474 
475 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
476                                                  MLIRContext *context) {
477   results.add<ExtractElementFromIndexCast>(context);
478 }
479 
480 //===----------------------------------------------------------------------===//
481 // InsertOp
482 //===----------------------------------------------------------------------===//
483 
484 LogicalResult InsertOp::verify() {
485   // Verify the # indices match if we have a ranked type.
486   if (auto destType = dest().getType().dyn_cast<RankedTensorType>())
487     if (destType.getRank() != static_cast<int64_t>(indices().size()))
488       return emitOpError("incorrect number of indices");
489   return success();
490 }
491 
492 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
493   Attribute scalar = operands[0];
494   Attribute dest = operands[1];
495   if (scalar && dest)
496     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
497       if (scalar == splatDest.getSplatValue<Attribute>())
498         return dest;
499   return {};
500 }
501 
502 //===----------------------------------------------------------------------===//
503 // GenerateOp
504 //===----------------------------------------------------------------------===//
505 
506 LogicalResult GenerateOp::reifyResultShapes(
507     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
508   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
509   int idx = 0;
510   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
511     if (getType().isDynamicDim(dim)) {
512       reifiedReturnShapes[0][dim] = getOperand(idx++);
513     } else {
514       reifiedReturnShapes[0][dim] = builder.create<arith::ConstantIndexOp>(
515           getLoc(), getType().getDimSize(dim));
516     }
517   }
518   return success();
519 }
520 
521 LogicalResult GenerateOp::verify() {
522   // Ensure that the tensor type has as many dynamic dimensions as are specified
523   // by the operands.
524   RankedTensorType resultTy = getType().cast<RankedTensorType>();
525   if (getNumOperands() != resultTy.getNumDynamicDims())
526     return emitError("must have as many index operands as dynamic extents "
527                      "in the result type");
528 
529   return success();
530 }
531 
532 LogicalResult GenerateOp::verifyRegions() {
533   RankedTensorType resultTy = getType().cast<RankedTensorType>();
534   // Ensure that region arguments span the index space.
535   if (!llvm::all_of(body().getArgumentTypes(),
536                     [](Type ty) { return ty.isIndex(); }))
537     return emitError("all body arguments must be index");
538   if (body().getNumArguments() != resultTy.getRank())
539     return emitError("must have one body argument per input dimension");
540 
541   // Ensure that the region yields an element of the right type.
542   auto yieldOp = cast<YieldOp>(body().getBlocks().front().getTerminator());
543 
544   if (yieldOp.value().getType() != resultTy.getElementType())
545     return emitOpError(
546         "body must be terminated with a `yield` operation of the tensor "
547         "element type");
548 
549   return success();
550 }
551 
552 void GenerateOp::build(
553     OpBuilder &b, OperationState &result, Type resultTy,
554     ValueRange dynamicExtents,
555     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
556   build(b, result, resultTy, dynamicExtents);
557 
558   // Build and populate body.
559   OpBuilder::InsertionGuard guard(b);
560   Region *bodyRegion = result.regions.front().get();
561   auto rank = resultTy.cast<RankedTensorType>().getRank();
562   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
563   SmallVector<Location, 2> argumentLocs(rank, result.location);
564   Block *bodyBlock =
565       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes, argumentLocs);
566   bodyBuilder(b, result.location, bodyBlock->getArguments());
567 }
568 
569 namespace {
570 
571 /// Canonicalizes tensor.generate operations with a constant
572 /// operand into the equivalent operation with the operand expressed in the
573 /// result type, instead. We also insert a type cast to make sure that the
574 /// resulting IR is still well-typed.
575 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
576   using OpRewritePattern<GenerateOp>::OpRewritePattern;
577 
578   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
579                                 PatternRewriter &rewriter) const final {
580     auto resultType =
581         tensorFromElements.getResult().getType().cast<RankedTensorType>();
582 
583     if (resultType.hasStaticShape())
584       return failure();
585 
586     SmallVector<Value, 4> newOperands;
587     SmallVector<int64_t, 4> newShape;
588     auto operandsIt = tensorFromElements.dynamicExtents().begin();
589 
590     for (int64_t dim : resultType.getShape()) {
591       if (!ShapedType::isDynamic(dim)) {
592         newShape.push_back(dim);
593         continue;
594       }
595       APInt index;
596       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
597         newShape.push_back(ShapedType::kDynamicSize);
598         newOperands.push_back(*operandsIt++);
599         continue;
600       }
601       newShape.push_back(index.getSExtValue());
602       operandsIt++;
603     }
604 
605     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
606       return failure();
607 
608     auto loc = tensorFromElements.getLoc();
609     auto newOp = rewriter.create<GenerateOp>(
610         loc, RankedTensorType::get(newShape, resultType.getElementType()),
611         newOperands);
612     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
613                                 newOp.body().begin());
614     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
615                                                 newOp);
616     return success();
617   }
618 };
619 
620 /// Canonicalizes the pattern of the form
621 ///
622 /// %tensor = tensor.generate %x {
623 ///   ^bb0(%arg0: index):
624 ///   <computation>
625 ///   yield %1 : index
626 /// } : tensor<?xindex>
627 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
628 ///
629 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
630 /// tensor.generate operation has no side-effects.
631 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
632   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
633 
634   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
635                                 PatternRewriter &rewriter) const final {
636     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
637     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
638       return failure();
639 
640     BlockAndValueMapping mapping;
641     Block *body = tensorFromElements.getBody();
642     mapping.map(body->getArguments(), extract.indices());
643     for (auto &op : body->without_terminator())
644       rewriter.clone(op, mapping);
645 
646     auto yield = cast<YieldOp>(body->getTerminator());
647 
648     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
649     return success();
650   }
651 };
652 
653 /// Canonicalizes the pattern of the form
654 ///
655 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
656 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
657 ///
658 /// to
659 ///
660 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
661 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
662   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
663 
664   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
665                                 PatternRewriter &rewriter) const final {
666     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
667     if (!tensorCast)
668       return failure();
669 
670     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
671                                                    extract.indices());
672     return success();
673   }
674 };
675 
676 } // namespace
677 
678 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
679                                              MLIRContext *context) {
680   // TODO: Move extract patterns to tensor::ExtractOp.
681   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
682               StaticTensorGenerate>(context);
683 }
684 
685 //===----------------------------------------------------------------------===//
686 // RankOp
687 //===----------------------------------------------------------------------===//
688 
689 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
690   // Constant fold rank when the rank of the operand is known.
691   auto type = getOperand().getType();
692   auto shapedType = type.dyn_cast<ShapedType>();
693   if (shapedType && shapedType.hasRank())
694     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
695   return IntegerAttr();
696 }
697 
698 //===----------------------------------------------------------------------===//
699 // ReshapeOp
700 //===----------------------------------------------------------------------===//
701 
702 static int64_t getNumElements(ShapedType type) {
703   int64_t numElements = 1;
704   for (auto dim : type.getShape())
705     numElements *= dim;
706   return numElements;
707 }
708 
709 LogicalResult ReshapeOp::verify() {
710   TensorType operandType = source().getType().cast<TensorType>();
711   TensorType resultType = result().getType().cast<TensorType>();
712 
713   if (operandType.getElementType() != resultType.getElementType())
714     return emitOpError("element types of source and destination tensor "
715                        "types should be the same");
716 
717   int64_t shapeSize = shape().getType().cast<RankedTensorType>().getDimSize(0);
718   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
719   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
720 
721   if (resultRankedType) {
722     if (operandRankedType && resultRankedType.hasStaticShape() &&
723         operandRankedType.hasStaticShape()) {
724       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
725         return emitOpError("source and destination tensor should have the "
726                            "same number of elements");
727     }
728     if (ShapedType::isDynamic(shapeSize))
729       return emitOpError("cannot use shape operand with dynamic length to "
730                          "reshape to statically-ranked tensor type");
731     if (shapeSize != resultRankedType.getRank())
732       return emitOpError(
733           "length of shape operand differs from the result's tensor rank");
734   }
735   return success();
736 }
737 
738 //===----------------------------------------------------------------------===//
739 // Reassociative reshape ops
740 //===----------------------------------------------------------------------===//
741 
742 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
743   return getSymbolLessAffineMaps(getReassociationExprs());
744 }
745 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
746   return convertReassociationIndicesToExprs(getContext(),
747                                             getReassociationIndices());
748 }
749 
750 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
751   return getSymbolLessAffineMaps(getReassociationExprs());
752 }
753 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
754   return convertReassociationIndicesToExprs(getContext(),
755                                             getReassociationIndices());
756 }
757 
758 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
759 static RankedTensorType
760 computeTensorReshapeCollapsedType(RankedTensorType type,
761                                   ArrayRef<AffineMap> reassociation) {
762   auto shape = type.getShape();
763   SmallVector<int64_t, 4> newShape;
764   newShape.reserve(reassociation.size());
765 
766   // Use the fact that reassociation is valid to simplify the logic: only use
767   // each map's rank.
768   assert(isReassociationValid(reassociation) && "invalid reassociation");
769   unsigned currentDim = 0;
770   for (AffineMap m : reassociation) {
771     unsigned dim = m.getNumResults();
772     auto band = shape.slice(currentDim, dim);
773     int64_t size = 1;
774     if (llvm::is_contained(band, ShapedType::kDynamicSize))
775       size = ShapedType::kDynamicSize;
776     else
777       for (unsigned d = 0; d < dim; ++d)
778         size *= shape[currentDim + d];
779     newShape.push_back(size);
780     currentDim += dim;
781   }
782 
783   return RankedTensorType::get(newShape, type.getElementType());
784 }
785 
786 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
787                             ArrayRef<ReassociationIndices> reassociation,
788                             ArrayRef<NamedAttribute> attrs) {
789   auto resultType = computeTensorReshapeCollapsedType(
790       src.getType().cast<RankedTensorType>(),
791       getSymbolLessAffineMaps(
792           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
793   build(b, result, resultType, src, attrs);
794   result.addAttribute(getReassociationAttrName(),
795                       getReassociationIndicesAttribute(b, reassociation));
796 }
797 
798 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
799                                         TensorReshapeOp, ExpandShapeOp>::value>
800 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
801                                            RankedTensorType expandedType,
802                                            RankedTensorType collapsedType) {
803   if (failed(
804           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
805     return failure();
806 
807   auto maps = op.getReassociationMaps();
808   RankedTensorType expectedType =
809       computeTensorReshapeCollapsedType(expandedType, maps);
810   if (collapsedType != expectedType)
811     return op.emitOpError("expected collapsed type to be ")
812            << expectedType << ", but got " << collapsedType;
813   return success();
814 }
815 
816 LogicalResult ExpandShapeOp::verify() {
817   return verifyTensorReshapeOp(*this, getResultType(), getSrcType());
818 }
819 
820 LogicalResult CollapseShapeOp::verify() {
821   return verifyTensorReshapeOp(*this, getSrcType(), getResultType());
822 }
823 
824 namespace {
825 /// Reshape of a splat constant can be replaced with a constant of the result
826 /// type.
827 template <typename TensorReshapeOp>
828 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
829   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
830   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
831                                 PatternRewriter &rewriter) const override {
832     DenseElementsAttr attr;
833     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
834       return failure();
835     if (!attr || !attr.isSplat())
836       return failure();
837     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
838         reshapeOp.getResultType(), attr.getRawData(), true);
839     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
840     return success();
841   }
842 };
843 
844 /// Reshape of a FromElements can be replaced with a FromElements of the result
845 /// type
846 template <typename TensorReshapeOp>
847 struct FoldReshapeWithFromElements : OpRewritePattern<TensorReshapeOp> {
848   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
849   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
850                                 PatternRewriter &rewriter) const override {
851     auto fromElements =
852         reshapeOp.src().template getDefiningOp<FromElementsOp>();
853     if (!fromElements)
854       return failure();
855 
856     auto shapedTy = reshapeOp.getType().template cast<ShapedType>();
857 
858     if (!shapedTy.hasStaticShape())
859       return failure();
860 
861     rewriter.replaceOpWithNewOp<FromElementsOp>(reshapeOp, reshapeOp.getType(),
862                                                 fromElements.elements());
863     return success();
864   }
865 };
866 
867 } // namespace
868 
869 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
870                                                 MLIRContext *context) {
871   results.add<ComposeReassociativeReshapeOps<ExpandShapeOp>,
872               ComposeExpandOfCollapseOp<ExpandShapeOp, CollapseShapeOp>,
873               FoldReshapeWithConstant<ExpandShapeOp>,
874               FoldReshapeWithFromElements<ExpandShapeOp>>(context);
875 }
876 
877 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
878                                                   MLIRContext *context) {
879   results.add<ComposeReassociativeReshapeOps<CollapseShapeOp>,
880               ComposeCollapseOfExpandOp<CollapseShapeOp, ExpandShapeOp>,
881               FoldReshapeWithConstant<CollapseShapeOp>,
882               FoldReshapeWithFromElements<CollapseShapeOp>>(context);
883 }
884 
885 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
886   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
887 }
888 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
889   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
890 }
891 
892 //===----------------------------------------------------------------------===//
893 // ExtractSliceOp
894 //===----------------------------------------------------------------------===//
895 
896 /// An extract_slice op result type can be fully inferred from the source type
897 /// and the static representation of offsets, sizes and strides. Special
898 /// sentinels encode the dynamic case.
899 RankedTensorType ExtractSliceOp::inferResultType(
900     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
901     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
902   // An extract_slice op may specify only a leading subset of offset/sizes/
903   // strides in which case we complete with offset=0, sizes from memref type and
904   // strides=1.
905   unsigned rank = sourceRankedTensorType.getRank();
906   (void)rank;
907   assert(staticSizes.size() == rank &&
908          "unexpected staticSizes not equal to rank of source");
909   return RankedTensorType::get(staticSizes,
910                                sourceRankedTensorType.getElementType());
911 }
912 
913 RankedTensorType ExtractSliceOp::inferResultType(
914     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
915     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
916   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
917   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
918   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
919                              ShapedType::kDynamicStrideOrOffset);
920   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
921                              ShapedType::kDynamicSize);
922   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
923                              ShapedType::kDynamicStrideOrOffset);
924   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
925                                          staticSizes, staticStrides);
926 }
927 
928 /// An extract_slice op result type can be fully inferred from the source type
929 /// and the static representation of offsets, sizes and strides. Special
930 /// sentinels encode the dynamic case.
931 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
932     unsigned resultRank, RankedTensorType sourceRankedTensorType,
933     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
934     ArrayRef<int64_t> strides) {
935   auto inferredType =
936       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
937           .cast<RankedTensorType>();
938   int rankDiff = inferredType.getRank() - resultRank;
939   if (rankDiff > 0) {
940     auto shape = inferredType.getShape();
941     llvm::SmallBitVector dimsToProject =
942         getPositionsOfShapeOne(rankDiff, shape);
943     SmallVector<int64_t> projectedShape;
944     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
945       if (!dimsToProject.test(pos))
946         projectedShape.push_back(shape[pos]);
947     inferredType =
948         RankedTensorType::get(projectedShape, inferredType.getElementType());
949   }
950   return inferredType;
951 }
952 
953 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
954     unsigned resultRank, RankedTensorType sourceRankedTensorType,
955     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
956     ArrayRef<OpFoldResult> strides) {
957   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
958   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
959   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
960                              ShapedType::kDynamicStrideOrOffset);
961   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
962                              ShapedType::kDynamicSize);
963   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
964                              ShapedType::kDynamicStrideOrOffset);
965   return ExtractSliceOp::inferRankReducedResultType(
966       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
967       staticStrides);
968 }
969 
970 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
971 /// result type. If the type passed is nullptr, it is inferred.
972 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
973                            RankedTensorType resultType, Value source,
974                            ArrayRef<OpFoldResult> offsets,
975                            ArrayRef<OpFoldResult> sizes,
976                            ArrayRef<OpFoldResult> strides,
977                            ArrayRef<NamedAttribute> attrs) {
978   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
979   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
980   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
981                              ShapedType::kDynamicStrideOrOffset);
982   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
983                              ShapedType::kDynamicSize);
984   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
985                              ShapedType::kDynamicStrideOrOffset);
986   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
987   // Structuring implementation this way avoids duplication between builders.
988   if (!resultType) {
989     resultType =
990         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
991                                         staticSizes, staticStrides)
992             .cast<RankedTensorType>();
993   }
994   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
995         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
996         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
997   result.addAttributes(attrs);
998 }
999 
1000 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
1001 /// result type.
1002 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1003                            ArrayRef<OpFoldResult> offsets,
1004                            ArrayRef<OpFoldResult> sizes,
1005                            ArrayRef<OpFoldResult> strides,
1006                            ArrayRef<NamedAttribute> attrs) {
1007   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1008 }
1009 
1010 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
1011 /// type passed is nullptr, it is inferred.
1012 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1013                            RankedTensorType resultType, Value source,
1014                            ValueRange offsets, ValueRange sizes,
1015                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1016   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1017       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1018   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1019       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1020   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1021       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1022   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1023 }
1024 
1025 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
1026 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1027                            ValueRange offsets, ValueRange sizes,
1028                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1029   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1030 }
1031 
1032 template <typename OpTy>
1033 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
1034                                           OpTy op, Type expectedType) {
1035   auto memrefType = expectedType.cast<ShapedType>();
1036   switch (result) {
1037   case SliceVerificationResult::Success:
1038     return success();
1039   case SliceVerificationResult::RankTooLarge:
1040     return op.emitError("expected rank to be smaller or equal to ")
1041            << "the other rank. ";
1042   case SliceVerificationResult::SizeMismatch:
1043     return op.emitError("expected type to be ")
1044            << expectedType << " or a rank-reduced version. (size mismatch) ";
1045   case SliceVerificationResult::ElemTypeMismatch:
1046     return op.emitError("expected element type to be ")
1047            << memrefType.getElementType();
1048   default:
1049     llvm_unreachable("unexpected extract_slice op verification result");
1050   }
1051 }
1052 
1053 /// Verifier for ExtractSliceOp.
1054 LogicalResult ExtractSliceOp::verify() {
1055   // Verify result type against inferred type.
1056   auto expectedType = ExtractSliceOp::inferResultType(
1057       getSourceType(), getMixedOffsets(), getMixedSizes(), getMixedStrides());
1058   auto result = isRankReducedType(expectedType.cast<ShapedType>(), getType());
1059   return produceSliceErrorMsg(result, *this, expectedType);
1060 }
1061 
1062 /// Infer the canonical type of the result of an extract_slice op. Returns a
1063 /// type with rank `resultRank` that is either the rank of the rank-reduced
1064 /// type, or the non-rank-reduced type.
1065 static RankedTensorType
1066 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1067                             ArrayRef<OpFoldResult> mixedOffsets,
1068                             ArrayRef<OpFoldResult> mixedSizes,
1069                             ArrayRef<OpFoldResult> mixedStrides) {
1070   auto resultType =
1071       ExtractSliceOp::inferRankReducedResultType(
1072           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1073           .cast<RankedTensorType>();
1074   if (resultType.getRank() != resultRank) {
1075     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1076                                                  mixedSizes, mixedStrides)
1077                      .cast<RankedTensorType>();
1078   }
1079   return resultType;
1080 }
1081 
1082 llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
1083   ArrayRef<int64_t> resultShape = getType().getShape();
1084   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1085   llvm::SmallBitVector droppedDims(mixedSizes.size());
1086   unsigned shapePos = 0;
1087   for (const auto &size : enumerate(mixedSizes)) {
1088     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1089     // If the size is not 1, or if the current matched dimension of the result
1090     // is the same static shape as the size value (which is 1), then the
1091     // dimension is preserved.
1092     if (!sizeVal || sizeVal.getValue() != 1 ||
1093         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1094       shapePos++;
1095       continue;
1096     }
1097     droppedDims.set(size.index());
1098   }
1099   return droppedDims;
1100 }
1101 
1102 LogicalResult ExtractSliceOp::reifyResultShapes(
1103     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1104   reifiedReturnShapes.resize(1);
1105   reifiedReturnShapes[0].reserve(getType().getRank());
1106   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1107   llvm::SmallBitVector droppedDims = getDroppedDims();
1108   Location loc = getLoc();
1109   for (const auto &size : enumerate(mixedSizes)) {
1110     if (droppedDims.test(size.index()))
1111       continue;
1112     if (auto attr = size.value().dyn_cast<Attribute>()) {
1113       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1114           loc, attr.cast<IntegerAttr>().getInt()));
1115       continue;
1116     }
1117     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1118   }
1119   return success();
1120 }
1121 
1122 namespace {
1123 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1124 /// This essentially pushes memref_cast past its consuming slice when
1125 /// `canFoldIntoConsumerOp` is true.
1126 ///
1127 /// Example:
1128 /// ```
1129 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1130 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1131 ///   tensor<3x4xf32>
1132 /// ```
1133 /// is rewritten into:
1134 /// ```
1135 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1136 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1137 /// ```
1138 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1139 public:
1140   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1141 
1142   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1143                                 PatternRewriter &rewriter) const override {
1144     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1145     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1146           return matchPattern(operand, matchConstantIndex());
1147         }))
1148       return failure();
1149 
1150     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1151     if (!castOp)
1152       return failure();
1153 
1154     if (!canFoldIntoConsumerOp(castOp))
1155       return failure();
1156 
1157     /// Deduce the type of the result to use for the canonicalized operation.
1158     RankedTensorType resultType = getCanonicalSliceResultType(
1159         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1160         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1161         sliceOp.getMixedStrides());
1162     Value newSlice = rewriter.create<ExtractSliceOp>(
1163         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1164         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1165         sliceOp.static_sizes(), sliceOp.static_strides());
1166     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1167                                                 newSlice);
1168     return success();
1169   }
1170 };
1171 
1172 /// Slice elements from `values` into `outValues`. `counts` represents the
1173 /// numbers of elements to stride in the original values for each dimension.
1174 /// The output values can be used to construct a DenseElementsAttr.
1175 template <typename IterTy, typename ElemTy>
1176 static void sliceElements(IterTy values, ArrayRef<int64_t> counts,
1177                           ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1178                           ArrayRef<int64_t> strides,
1179                           llvm::SmallVectorImpl<ElemTy> *outValues) {
1180   assert(offsets.size() == sizes.size());
1181   assert(offsets.size() == strides.size());
1182   if (offsets.empty())
1183     return;
1184 
1185   int64_t offset = offsets.front();
1186   int64_t size = sizes.front();
1187   int64_t stride = strides.front();
1188   if (offsets.size() == 1) {
1189     for (int64_t i = 0; i < size; ++i, offset += stride)
1190       outValues->push_back(*(values + offset));
1191 
1192     return;
1193   }
1194 
1195   for (int64_t i = 0; i < size; ++i, offset += stride) {
1196     auto begin = values + offset * counts.front();
1197     sliceElements<IterTy, ElemTy>(begin, counts.drop_front(),
1198                                   offsets.drop_front(), sizes.drop_front(),
1199                                   strides.drop_front(), outValues);
1200   }
1201 }
1202 
1203 /// Fold arith.constant and tensor.extract_slice into arith.constant. The folded
1204 /// operation might introduce more constant data; Users can control their
1205 /// heuristics by the control function.
1206 class ConstantOpExtractSliceFolder final
1207     : public OpRewritePattern<ExtractSliceOp> {
1208 public:
1209   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1210 
1211   ConstantOpExtractSliceFolder(MLIRContext *context,
1212                                ControlConstantExtractSliceFusionFn controlFn)
1213       : OpRewritePattern<ExtractSliceOp>(context),
1214         controlFn(std::move(controlFn)) {}
1215 
1216   LogicalResult matchAndRewrite(ExtractSliceOp op,
1217                                 PatternRewriter &rewriter) const override {
1218     DenseElementsAttr attr;
1219     if (!matchPattern(op.source(), m_Constant(&attr)))
1220       return failure();
1221 
1222     // A constant splat is handled by fold().
1223     if (attr.isSplat())
1224       return failure();
1225 
1226     // Dynamic result shape is not supported.
1227     auto sourceType = op.source().getType().cast<ShapedType>();
1228     auto resultType = op.result().getType().cast<ShapedType>();
1229     if (!sourceType.hasStaticShape() || !resultType.hasStaticShape())
1230       return failure();
1231 
1232     // Customized control over the folding.
1233     if (!controlFn(op))
1234       return failure();
1235 
1236     int64_t count = sourceType.getNumElements();
1237     if (count == 0)
1238       return failure();
1239 
1240     // Check if there are any dynamic parts, which are not supported.
1241     auto offsets = extractFromI64ArrayAttr(op.static_offsets());
1242     if (llvm::is_contained(offsets, ShapedType::kDynamicStrideOrOffset))
1243       return failure();
1244     auto sizes = extractFromI64ArrayAttr(op.static_sizes());
1245     if (llvm::is_contained(sizes, ShapedType::kDynamicSize))
1246       return failure();
1247     auto strides = extractFromI64ArrayAttr(op.static_strides());
1248     if (llvm::is_contained(strides, ShapedType::kDynamicStrideOrOffset))
1249       return failure();
1250 
1251     // Compute the stride for each dimension.
1252     SmallVector<int64_t> counts;
1253     ArrayRef<int64_t> shape = sourceType.getShape();
1254     counts.reserve(shape.size());
1255     for (int64_t v : shape) {
1256       count = count / v;
1257       counts.push_back(count);
1258     }
1259 
1260     // New attribute constructed by the sliced values.
1261     DenseElementsAttr newAttr;
1262 
1263     if (auto elems = attr.dyn_cast<DenseIntElementsAttr>()) {
1264       SmallVector<APInt> outValues;
1265       outValues.reserve(sourceType.getNumElements());
1266       sliceElements<DenseElementsAttr::IntElementIterator, APInt>(
1267           elems.begin(), counts, offsets, sizes, strides, &outValues);
1268       newAttr = DenseElementsAttr::get(resultType, outValues);
1269     } else if (auto elems = attr.dyn_cast<DenseFPElementsAttr>()) {
1270       SmallVector<APFloat> outValues;
1271       outValues.reserve(sourceType.getNumElements());
1272       sliceElements<DenseElementsAttr::FloatElementIterator, APFloat>(
1273           elems.begin(), counts, offsets, sizes, strides, &outValues);
1274       newAttr = DenseElementsAttr::get(resultType, outValues);
1275     }
1276 
1277     if (newAttr) {
1278       rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, resultType, newAttr);
1279       return success();
1280     }
1281 
1282     return failure();
1283   }
1284 
1285 private:
1286   /// This additionally controls whether the fold happens or not. Users can
1287   /// impose their heuristics in the function.
1288   ControlConstantExtractSliceFusionFn controlFn;
1289 };
1290 
1291 } // namespace
1292 
1293 void mlir::tensor::populateFoldConstantExtractSlicePatterns(
1294     RewritePatternSet &patterns,
1295     const ControlConstantExtractSliceFusionFn &controlFn) {
1296   patterns.add<ConstantOpExtractSliceFolder>(patterns.getContext(), controlFn);
1297 }
1298 
1299 /// Return the canonical type of the result of an extract_slice op.
1300 struct SliceReturnTypeCanonicalizer {
1301   RankedTensorType operator()(ExtractSliceOp op,
1302                               ArrayRef<OpFoldResult> mixedOffsets,
1303                               ArrayRef<OpFoldResult> mixedSizes,
1304                               ArrayRef<OpFoldResult> mixedStrides) {
1305     return getCanonicalSliceResultType(op.getType().getRank(),
1306                                        op.getSourceType(), mixedOffsets,
1307                                        mixedSizes, mixedStrides);
1308   }
1309 };
1310 
1311 /// A canonicalizer wrapper to replace ExtractSliceOps.
1312 struct SliceCanonicalizer {
1313   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1314                   ExtractSliceOp newOp) {
1315     Value replacement = newOp.getResult();
1316     if (replacement.getType() != op.getType())
1317       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1318                                                     replacement);
1319     rewriter.replaceOp(op, replacement);
1320   }
1321 };
1322 
1323 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1324                                                  MLIRContext *context) {
1325   results.add<
1326       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1327           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1328       ExtractSliceOpCastFolder>(context);
1329 }
1330 
1331 //
1332 static LogicalResult
1333 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1334                                            ShapedType shapedType) {
1335   OpBuilder b(op.getContext());
1336   for (OpFoldResult ofr : op.getMixedOffsets())
1337     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1338       return failure();
1339   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1340   // is appropriate.
1341   auto shape = shapedType.getShape();
1342   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1343     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1344       return failure();
1345   for (OpFoldResult ofr : op.getMixedStrides())
1346     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1347       return failure();
1348   return success();
1349 }
1350 
1351 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1352 /// we can return the InsertSliceOp's source directly.
1353 // TODO: This only checks the immediate producer; extend to go up the
1354 // insert/extract chain if the slices are disjoint.
1355 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1356   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1357 
1358   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1359   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1360       insertOp.isSameAs(extractOp, isSame))
1361     return insertOp.source();
1362 
1363   return {};
1364 }
1365 
1366 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute> operands) {
1367   if (auto splat = operands[0].dyn_cast_or_null<SplatElementsAttr>()) {
1368     auto resultType = result().getType().cast<ShapedType>();
1369     if (resultType.hasStaticShape())
1370       return splat.resizeSplat(resultType);
1371   }
1372   if (getSourceType() == getType() &&
1373       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1374     return this->source();
1375   if (Value slice = foldExtractAfterInsertSlice(*this))
1376     return slice;
1377 
1378   return OpFoldResult();
1379 }
1380 
1381 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1382     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1383   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1384   unsigned rank = rankedTensorType.getRank();
1385   auto shape = rankedTensorType.getShape();
1386   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1387   SmallVector<OpFoldResult> sizes;
1388   for (unsigned i = 0, e = rank; i < e; ++i) {
1389     OpFoldResult dim;
1390     if (rankedTensorType.isDynamicDim(i))
1391       dim = b.createOrFold<tensor::DimOp>(
1392           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1393     else
1394       dim = b.getIndexAttr(shape[i]);
1395     sizes.push_back(dim);
1396   }
1397   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1398   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1399                                                 offsets, sizes, strides);
1400 }
1401 
1402 //===----------------------------------------------------------------------===//
1403 // InsertSliceOp
1404 //===----------------------------------------------------------------------===//
1405 
1406 // Build a InsertSliceOp with mixed static and dynamic entries.
1407 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1408                           Value dest, ArrayRef<OpFoldResult> offsets,
1409                           ArrayRef<OpFoldResult> sizes,
1410                           ArrayRef<OpFoldResult> strides,
1411                           ArrayRef<NamedAttribute> attrs) {
1412   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1413   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1414   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1415                              ShapedType::kDynamicStrideOrOffset);
1416   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1417                              ShapedType::kDynamicSize);
1418   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1419                              ShapedType::kDynamicStrideOrOffset);
1420   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1421         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1422         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1423   result.addAttributes(attrs);
1424 }
1425 
1426 // Build a InsertSliceOp with dynamic entries.
1427 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1428                           Value dest, ValueRange offsets, ValueRange sizes,
1429                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1430   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1431       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1432   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1433       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1434   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1435       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1436   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1437 }
1438 
1439 static SliceVerificationResult
1440 verifyInsertSliceOp(ShapedType srcType, ShapedType dstType,
1441                     ArrayAttr staticOffsets, ArrayAttr staticSizes,
1442                     ArrayAttr staticStrides,
1443                     ShapedType *expectedType = nullptr) {
1444   // insert_slice is the inverse of extract_slice, use the same type inference.
1445   auto expected = ExtractSliceOp::inferRankReducedResultType(
1446                       srcType.getRank(), dstType.cast<RankedTensorType>(),
1447                       extractFromI64ArrayAttr(staticOffsets),
1448                       extractFromI64ArrayAttr(staticSizes),
1449                       extractFromI64ArrayAttr(staticStrides))
1450                       .cast<ShapedType>();
1451   if (expectedType)
1452     *expectedType = expected;
1453   return isRankReducedType(expected, srcType);
1454 }
1455 
1456 /// Verifier for InsertSliceOp.
1457 LogicalResult InsertSliceOp::verify() {
1458   ShapedType expectedType;
1459   auto result =
1460       verifyInsertSliceOp(getSourceType(), getType(), static_offsets(),
1461                           static_sizes(), static_strides(), &expectedType);
1462   return produceSliceErrorMsg(result, *this, expectedType);
1463 }
1464 
1465 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1466 /// can mutate the second InsertSliceOp's destination to the first one's.
1467 ///
1468 /// Example:
1469 ///
1470 /// ```mlir
1471 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1472 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1473 /// ```
1474 ///
1475 /// folds into:
1476 ///
1477 /// ```mlir
1478 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1479 /// ```
1480 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1481   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1482 
1483   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1484   if (!prevInsertOp ||
1485       prevInsertOp.source().getType() != insertOp.source().getType() ||
1486       !prevInsertOp.isSameAs(insertOp, isSame))
1487     return failure();
1488 
1489   insertOp.destMutable().assign(prevInsertOp.dest());
1490   return success();
1491 }
1492 
1493 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1494   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1495       getSourceType() == getType() &&
1496       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1497     return this->source();
1498   if (succeeded(foldInsertAfterInsertSlice(*this)))
1499     return getResult();
1500   return OpFoldResult();
1501 }
1502 
1503 LogicalResult InsertSliceOp::reifyResultShapes(
1504     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1505   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1506   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1507     reifiedReturnShapes[0][dim] =
1508         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1509   }
1510   return success();
1511 }
1512 
1513 namespace {
1514 /// Pattern to rewrite a insert_slice op with constant arguments.
1515 class InsertSliceOpConstantArgumentFolder final
1516     : public OpRewritePattern<InsertSliceOp> {
1517 public:
1518   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1519 
1520   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1521                                 PatternRewriter &rewriter) const override {
1522     // No constant operand, just return.
1523     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1524           return matchPattern(operand, matchConstantIndex());
1525         }))
1526       return failure();
1527 
1528     // At least one of offsets/sizes/strides is a new constant.
1529     // Form the new list of operands and constant attributes from the
1530     // existing.
1531     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1532     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1533     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1534     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1535     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1536     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1537 
1538     // Create the new op in canonical form.
1539     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1540         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1541         mixedOffsets, mixedSizes, mixedStrides);
1542     Value toInsert = insertSliceOp.source();
1543     if (sourceType != insertSliceOp.getSourceType())
1544       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1545                                                  sourceType, toInsert);
1546     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1547         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1548         mixedStrides);
1549     return success();
1550   }
1551 };
1552 
1553 /// Fold tensor_casts with insert_slice operations. If the source or destination
1554 /// tensor is a tensor_cast that removes static type information, the cast is
1555 /// folded into the insert_slice operation. E.g.:
1556 ///
1557 /// ```mlir
1558 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1559 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1560 /// ```
1561 ///
1562 /// folds into:
1563 ///
1564 /// ```mlir
1565 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1566 /// ```
1567 ///
1568 /// Note: When folding a cast on the destination tensor, the result of the
1569 /// insert_slice operation is casted to ensure that the type of the result did
1570 /// not change.
1571 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1572   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1573 
1574   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1575                                 PatternRewriter &rewriter) const override {
1576     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1577           return matchPattern(operand, matchConstantIndex());
1578         }))
1579       return failure();
1580 
1581     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1582       auto castOp = v.getDefiningOp<tensor::CastOp>();
1583       if (!castOp || !canFoldIntoConsumerOp(castOp))
1584         return llvm::None;
1585       return castOp.source();
1586     };
1587     Optional<Value> sourceCastSource =
1588         getSourceOfCastOp(insertSliceOp.source());
1589     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1590     if (!sourceCastSource && !destCastSource)
1591       return failure();
1592 
1593     auto src = (sourceCastSource ? *sourceCastSource : insertSliceOp.source());
1594     auto dst = (destCastSource ? *destCastSource : insertSliceOp.dest());
1595 
1596     auto srcType = src.getType().cast<ShapedType>();
1597     auto dstType = dst.getType().cast<ShapedType>();
1598     if (verifyInsertSliceOp(srcType, dstType, insertSliceOp.static_offsets(),
1599                             insertSliceOp.static_sizes(),
1600                             insertSliceOp.static_strides()) !=
1601         SliceVerificationResult::Success)
1602       return failure();
1603 
1604     Value replacement = rewriter.create<InsertSliceOp>(
1605         insertSliceOp.getLoc(), src, dst, insertSliceOp.getMixedOffsets(),
1606         insertSliceOp.getMixedSizes(), insertSliceOp.getMixedStrides());
1607 
1608     if (replacement.getType() != insertSliceOp.getType()) {
1609       replacement = rewriter.create<tensor::CastOp>(
1610           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1611     }
1612     rewriter.replaceOp(insertSliceOp, replacement);
1613     return success();
1614   }
1615 };
1616 
1617 /// If additional static type information can be deduced from a insert_slice's
1618 /// size operands, insert an explicit cast of the op's source operand. This
1619 /// enables other canonicalization patterns that are matching for tensor_cast
1620 /// ops such as `ForOpTensorCastFolder` in SCF.
1621 ///
1622 /// Example:
1623 ///
1624 /// ```mlir
1625 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1626 ///       : tensor<?x?xf32> into ...
1627 /// ```
1628 ///
1629 /// folds into:
1630 ///
1631 /// ```mlir
1632 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1633 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1634 ///       : tensor<64x64xf32> into ...
1635 /// ```
1636 struct InsertSliceOpSourceCastInserter final
1637     : public OpRewritePattern<InsertSliceOp> {
1638   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1639 
1640   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1641                                 PatternRewriter &rewriter) const override {
1642     RankedTensorType srcType = insertSliceOp.getSourceType();
1643     if (srcType.getRank() != insertSliceOp.getType().getRank())
1644       return failure();
1645     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1646                                      srcType.getShape().end());
1647     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1648       if (Optional<int64_t> constInt =
1649               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1650         newSrcShape[i] = *constInt;
1651     }
1652 
1653     RankedTensorType newSrcType =
1654         RankedTensorType::get(newSrcShape, srcType.getElementType());
1655     if (srcType == newSrcType ||
1656         !preservesStaticInformation(srcType, newSrcType) ||
1657         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1658       return failure();
1659 
1660     // newSrcType is:
1661     //   1) Different from srcType.
1662     //   2) "More static" than srcType.
1663     //   3) Cast-compatible with srcType.
1664     // Insert the cast.
1665     Value cast = rewriter.create<tensor::CastOp>(
1666         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1667     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1668         insertSliceOp, cast, insertSliceOp.dest(),
1669         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1670         insertSliceOp.getMixedStrides());
1671     return success();
1672   }
1673 };
1674 } // namespace
1675 
1676 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1677                                                 MLIRContext *context) {
1678   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1679               InsertSliceOpSourceCastInserter>(context);
1680 }
1681 
1682 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1683                                                              Location loc,
1684                                                              Value tensor,
1685                                                              Value dest) {
1686   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1687   unsigned rank = rankedTensorType.getRank();
1688   auto shape = rankedTensorType.getShape();
1689   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1690   SmallVector<OpFoldResult> sizes;
1691   for (unsigned i = 0, e = rank; i < e; ++i) {
1692     OpFoldResult dim;
1693     if (rankedTensorType.isDynamicDim(i))
1694       dim = b.createOrFold<tensor::DimOp>(
1695           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1696     else
1697       dim = b.getIndexAttr(shape[i]);
1698     sizes.push_back(dim);
1699   }
1700   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1701   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1702                                                sizes, strides);
1703 }
1704 
1705 //===----------------------------------------------------------------------===//
1706 // PadOp
1707 //===----------------------------------------------------------------------===//
1708 
1709 // TODO: Replace custom<InferType> directive with AllTypesMatch as soon as it
1710 // supports optional types.
1711 void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
1712                     Type typeToInfer, Type typeToInferFrom) {}
1713 
1714 ParseResult parseInferType(OpAsmParser &parser,
1715                            Optional<OpAsmParser::UnresolvedOperand> optOperand,
1716                            Type &typeToInfer, Type typeToInferFrom) {
1717   if (optOperand)
1718     typeToInfer = typeToInferFrom;
1719   return success();
1720 }
1721 
1722 LogicalResult PadOp::verify() {
1723   auto sourceType = source().getType().cast<RankedTensorType>();
1724   auto resultType = result().getType().cast<RankedTensorType>();
1725   auto expectedType =
1726       PadOp::inferResultType(sourceType, extractFromI64ArrayAttr(static_low()),
1727                              extractFromI64ArrayAttr(static_high()));
1728   for (int i = 0, e = sourceType.getRank(); i < e; ++i) {
1729     if (resultType.getDimSize(i) == expectedType.getDimSize(i))
1730       continue;
1731     if (expectedType.isDynamicDim(i))
1732       continue;
1733     return emitError("specified type ")
1734            << resultType << " does not match the inferred type "
1735            << expectedType;
1736   }
1737 
1738   return success();
1739 }
1740 
1741 LogicalResult PadOp::verifyRegions() {
1742   auto &region = getRegion();
1743   unsigned rank = result().getType().cast<RankedTensorType>().getRank();
1744   Block &block = region.front();
1745   if (block.getNumArguments() != rank)
1746     return emitError("expected the block to have ") << rank << " arguments";
1747 
1748   // Note: the number and type of yield values are checked in the YieldOp.
1749   for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
1750     if (!en.value().isIndex())
1751       return emitOpError("expected block argument ")
1752              << (en.index() + 1) << " to be an index";
1753   }
1754 
1755   // Ensure that the region yields an element of the right type.
1756   auto yieldOp = llvm::cast<YieldOp>(block.getTerminator());
1757   if (yieldOp.value().getType() !=
1758       getType().cast<ShapedType>().getElementType())
1759     return emitOpError("expected yield type to match shape element type");
1760 
1761   return success();
1762 }
1763 
1764 RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
1765                                         ArrayRef<int64_t> staticLow,
1766                                         ArrayRef<int64_t> staticHigh,
1767                                         ArrayRef<int64_t> resultShape) {
1768   unsigned rank = sourceType.getRank();
1769   assert(staticLow.size() == rank && "unexpected staticLow size mismatch");
1770   assert(staticHigh.size() == rank && "unexpected staticHigh size mismatch");
1771   assert((resultShape.empty() || resultShape.size() == rank) &&
1772          "unexpected resultShape size mismatch");
1773 
1774   SmallVector<int64_t, 4> inferredShape;
1775   for (auto i : llvm::seq<unsigned>(0, rank)) {
1776     if (sourceType.isDynamicDim(i) ||
1777         staticLow[i] == ShapedType::kDynamicSize ||
1778         staticHigh[i] == ShapedType::kDynamicSize) {
1779       inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
1780                                                   : resultShape[i]);
1781     } else {
1782       int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
1783       assert((resultShape.empty() || size == resultShape[i] ||
1784               resultShape[i] == ShapedType::kDynamicSize) &&
1785              "mismatch between inferred shape and result shape");
1786       inferredShape.push_back(size);
1787     }
1788   }
1789 
1790   return RankedTensorType::get(inferredShape, sourceType.getElementType());
1791 }
1792 
1793 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1794                   ArrayRef<int64_t> staticLow, ArrayRef<int64_t> staticHigh,
1795                   ValueRange low, ValueRange high, bool nofold,
1796                   ArrayRef<NamedAttribute> attrs) {
1797   auto sourceType = source.getType().cast<RankedTensorType>();
1798   auto resultType = inferResultType(sourceType, staticLow, staticHigh);
1799   build(b, result, resultType, source, low, high, b.getI64ArrayAttr(staticLow),
1800         b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr());
1801   result.addAttributes(attrs);
1802 }
1803 
1804 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1805                   ValueRange low, ValueRange high, bool nofold,
1806                   ArrayRef<NamedAttribute> attrs) {
1807   auto sourceType = source.getType().cast<RankedTensorType>();
1808   unsigned rank = sourceType.getRank();
1809   SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
1810   build(b, result, source, staticVector, staticVector, low, high, nofold,
1811         attrs);
1812 }
1813 
1814 void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
1815                   Value source, ArrayRef<OpFoldResult> low,
1816                   ArrayRef<OpFoldResult> high, bool nofold,
1817                   ArrayRef<NamedAttribute> attrs) {
1818   assert(resultType.isa<RankedTensorType>());
1819   auto sourceType = source.getType().cast<RankedTensorType>();
1820   SmallVector<Value, 4> dynamicLow, dynamicHigh;
1821   SmallVector<int64_t, 4> staticLow, staticHigh;
1822   // staticLow and staticHigh have full information of the padding config.
1823   // This will grow staticLow and staticHigh with 1 value. If the config is
1824   // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
1825   // value as well.
1826   dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
1827                              ShapedType::kDynamicSize);
1828   dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
1829                              ShapedType::kDynamicSize);
1830   if (!resultType) {
1831     resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
1832   }
1833   build(b, result, resultType, source, dynamicLow, dynamicHigh,
1834         b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh),
1835         nofold ? b.getUnitAttr() : UnitAttr());
1836   result.addAttributes(attrs);
1837 }
1838 
1839 llvm::SmallBitVector PadOp::getPaddedDims() {
1840   llvm::SmallBitVector paddedDims(getSourceType().getRank());
1841   auto extractPaddedDims = [&](ArrayRef<OpFoldResult> paddingWidths) {
1842     for (const auto &en : enumerate(paddingWidths))
1843       if (getConstantIntValue(en.value()) != static_cast<int64_t>(0))
1844         paddedDims.set(en.index());
1845   };
1846   extractPaddedDims(getMixedLowPad());
1847   extractPaddedDims(getMixedHighPad());
1848   return paddedDims;
1849 }
1850 
1851 namespace {
1852 // Folds tensor.pad when padding is static zeros and the attribute
1853 // doesn't request otherwise.
1854 struct FoldStaticZeroPadding : public OpRewritePattern<PadOp> {
1855   using OpRewritePattern<PadOp>::OpRewritePattern;
1856 
1857   LogicalResult matchAndRewrite(PadOp padTensorOp,
1858                                 PatternRewriter &rewriter) const override {
1859     if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad())
1860       return failure();
1861     if (padTensorOp.nofold())
1862       return failure();
1863     rewriter.replaceOpWithNewOp<tensor::CastOp>(
1864         padTensorOp, padTensorOp.result().getType(), padTensorOp.source());
1865     return success();
1866   }
1867 };
1868 
1869 // Fold CastOp into PadOp when adding static information.
1870 struct FoldSourceTensorCast : public OpRewritePattern<PadOp> {
1871   using OpRewritePattern<PadOp>::OpRewritePattern;
1872 
1873   LogicalResult matchAndRewrite(PadOp padTensorOp,
1874                                 PatternRewriter &rewriter) const override {
1875     auto castOp = padTensorOp.source().getDefiningOp<tensor::CastOp>();
1876     if (!tensor::canFoldIntoConsumerOp(castOp))
1877       return failure();
1878 
1879     auto newResultType = PadOp::inferResultType(
1880         castOp.source().getType().cast<RankedTensorType>(),
1881         extractFromI64ArrayAttr(padTensorOp.static_low()),
1882         extractFromI64ArrayAttr(padTensorOp.static_high()),
1883         padTensorOp.getResultType().getShape());
1884 
1885     if (newResultType == padTensorOp.getResultType()) {
1886       rewriter.updateRootInPlace(padTensorOp, [&]() {
1887         padTensorOp.sourceMutable().assign(castOp.source());
1888       });
1889     } else {
1890       auto newOp = rewriter.create<PadOp>(
1891           padTensorOp->getLoc(), newResultType, padTensorOp.source(),
1892           padTensorOp.low(), padTensorOp.high(), padTensorOp.static_low(),
1893           padTensorOp.static_high(), padTensorOp.nofold());
1894       BlockAndValueMapping mapper;
1895       padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
1896 
1897       rewriter.replaceOpWithNewOp<tensor::CastOp>(
1898           padTensorOp, padTensorOp.getResultType(), newOp);
1899     }
1900     return success();
1901   }
1902 };
1903 
1904 // Fold CastOp using the result of PadOp back into the latter if it adds
1905 // static information.
1906 struct FoldTargetTensorCast : public OpRewritePattern<PadOp> {
1907   using OpRewritePattern<PadOp>::OpRewritePattern;
1908 
1909   LogicalResult matchAndRewrite(PadOp padTensorOp,
1910                                 PatternRewriter &rewriter) const override {
1911     if (!padTensorOp.result().hasOneUse())
1912       return failure();
1913     auto tensorCastOp =
1914         dyn_cast<tensor::CastOp>(*padTensorOp->getUsers().begin());
1915     if (!tensorCastOp)
1916       return failure();
1917     if (!tensor::preservesStaticInformation(padTensorOp.result().getType(),
1918                                             tensorCastOp.dest().getType()))
1919       return failure();
1920 
1921     auto replacementOp = rewriter.create<PadOp>(
1922         padTensorOp.getLoc(), tensorCastOp.dest().getType(),
1923         padTensorOp.source(), padTensorOp.low(), padTensorOp.high(),
1924         padTensorOp.static_low(), padTensorOp.static_high(),
1925         padTensorOp.nofold());
1926     replacementOp.region().takeBody(padTensorOp.region());
1927 
1928     rewriter.replaceOp(padTensorOp, replacementOp.result());
1929     rewriter.replaceOp(tensorCastOp, replacementOp.result());
1930     return success();
1931   }
1932 };
1933 
1934 /// Fold chains of tensor::ExtractSliceOp, tensor::PadOp pairs that pad
1935 /// different dimensions. The pattern applies if the following preconditions
1936 /// hold:
1937 ///   1) the tensor::ExtractSliceOps are not rank-reducing,
1938 ///   2) the tensor::ExtractSliceOps have only unit-strides,
1939 ///   3) the tensor::PadOps perform only high-padding,
1940 ///   4) the tensor::PadOps have the same constant padding value,
1941 ///   5) the tensor::PadOps do not have common padding dimensions,
1942 ///   6) one tensor::ExtractSliceOp, tensor::PadOp pair has zero-padding and
1943 ///      zero-offset for every dimension.
1944 ///   7) the tensor::ExtractSliceOp sizes match the source tensor sizes for the
1945 ///      padded source dimensions.
1946 ///
1947 /// Example:
1948 ///
1949 /// ```mlir
1950 ///   %0 = tensor.extract_slice %input[16, 0] [%sz0, 64] [1, 1]
1951 ///       : tensor<64x64xf32> to tensor<?x64xf32>
1952 ///   %1 = tensor.pad %0 low[0, 0] high[%pw0, 0] { ...
1953 ///     } : tensor<?x64xf32> to tensor<8x64xf32>
1954 ///   %2 = tensor.extract_slice %1[0, 4] [8, %sz1] [1, 1]
1955 ///        : tensor<8x64xf32> to tensor<8x?xf32>
1956 ///   %res = tensor.pad %2 nofold low[0, 0] high[0, %pw1] { ...
1957 ///     } : tensor<8x?xf32> to tensor<8x4xf32>
1958 /// ```
1959 ///
1960 /// folds into:
1961 ///
1962 /// ```mlir
1963 ///   %0 = tensor.extract_slice %input[16, 4] [%sz0, %sz1] [1, 1]
1964 ///        : tensor<64x64xf32> to tensor<?x?xf32>
1965 ///   %res = tensor.pad %0 nofold low[0, 0] high[%pw0, %pw1] { ...
1966 ///     } : tensor<?x?xf32> to tensor<8x4xf32>
1967 /// ```
1968 struct FoldOrthogonalPaddings : public OpRewritePattern<PadOp> {
1969   using OpRewritePattern<PadOp>::OpRewritePattern;
1970 
1971   LogicalResult matchAndRewrite(PadOp padOp,
1972                                 PatternRewriter &rewriter) const override {
1973     auto innerSliceOp = padOp.source().getDefiningOp<ExtractSliceOp>();
1974     if (!innerSliceOp)
1975       return failure();
1976     auto outerPadOp = innerSliceOp.source().getDefiningOp<PadOp>();
1977     if (!outerPadOp || outerPadOp.nofold())
1978       return failure();
1979     auto outerSliceOp = outerPadOp.source().getDefiningOp<ExtractSliceOp>();
1980     if (!outerSliceOp)
1981       return failure();
1982 
1983     // 1) Fail if the chain is rank-reducing.
1984     int64_t rank = padOp.getSourceType().getRank();
1985     if (outerSliceOp.getSourceType().getRank() != rank) {
1986       return rewriter.notifyMatchFailure(padOp,
1987                                          "cannot fold rank-reducing chain");
1988     }
1989 
1990     // 2) Fail if the tensor::ExtractSliceOps have non-unit strides.
1991     if (!innerSliceOp.hasUnitStride() || !outerSliceOp.hasUnitStride()) {
1992       return rewriter.notifyMatchFailure(
1993           padOp, "cannot fold non-unit stride ExtractSliceOps");
1994     }
1995 
1996     // 3) Fail if the tensor::PadOps have non-zero low padding.
1997     if (!padOp.hasZeroLowPad() || !outerPadOp.hasZeroLowPad()) {
1998       return rewriter.notifyMatchFailure(padOp,
1999                                          "cannot fold PadOps with low padding");
2000     }
2001 
2002     // 4) Fail if the tensor::PadOps padding values do not match.
2003     Attribute innerAttr, outerAttr;
2004     Value innerValue = padOp.getConstantPaddingValue();
2005     Value outerValue = outerPadOp.getConstantPaddingValue();
2006     if (!innerValue || !outerValue ||
2007         !matchPattern(innerValue, m_Constant(&innerAttr)) ||
2008         !matchPattern(outerValue, m_Constant(&outerAttr)) ||
2009         innerAttr != outerAttr) {
2010       return rewriter.notifyMatchFailure(
2011           padOp, "cannot fold PadOps with different padding values");
2012     }
2013 
2014     // 5) Fail if a dimension is padded by both tensor::PadOps.
2015     llvm::SmallBitVector innerDims = padOp.getPaddedDims();
2016     llvm::SmallBitVector outerDims = outerPadOp.getPaddedDims();
2017     if (innerDims.anyCommon(outerDims)) {
2018       return rewriter.notifyMatchFailure(
2019           padOp, "cannot fold PadOps with common padding dimensions");
2020     }
2021 
2022     // 6) Combine the offsets of the two tensor::ExtractSliceOps. Find the
2023     // zero-offset and zero-padding tensor::ExtractSliceOp, tensor::PadOp pair
2024     // for every dimension, and use the offset the other pair. Fail if no
2025     // zero-offset and zero-padding tensor::ExtractSliceOp, tensor::PadOp pair
2026     // exists.
2027     SmallVector<OpFoldResult> newOffsets(rank, rewriter.getIndexAttr(0));
2028     for (auto &en : enumerate(newOffsets)) {
2029       OpFoldResult innerOffset = innerSliceOp.getMixedOffsets()[en.index()];
2030       OpFoldResult outerOffset = outerSliceOp.getMixedOffsets()[en.index()];
2031       if (!innerDims.test(en.index()) &&
2032           (getConstantIntValue(innerOffset) == static_cast<int64_t>(0))) {
2033         en.value() = outerOffset;
2034         continue;
2035       }
2036       if (!outerDims.test(en.index()) &&
2037           (getConstantIntValue(outerOffset) == static_cast<int64_t>(0))) {
2038         en.value() = innerOffset;
2039         continue;
2040       }
2041       return rewriter.notifyMatchFailure(
2042           padOp, "cannot find zero-offset and zero-padding pair");
2043     }
2044 
2045     // 7) Combine the sizes of the two tensor::ExtractSliceOps. Take the size of
2046     // the outer tensor::ExtractSliceOp for the dimensions padded by the outer
2047     // tensor::PadOp and fail if the size of the inner tensor::ExtractSliceOp
2048     // does not match the size of the padded dimension. Otherwise, take the size
2049     // of the inner tensor::ExtractSliceOp.
2050     SmallVector<OpFoldResult> newSizes = innerSliceOp.getMixedSizes();
2051     for (auto &en : enumerate(newSizes)) {
2052       if (!outerDims.test(en.index()))
2053         continue;
2054       OpFoldResult sliceSize = innerSliceOp.getMixedSizes()[en.index()];
2055       int64_t sourceSize = innerSliceOp.getSourceType().getShape()[en.index()];
2056       assert(!ShapedType::isDynamic(sourceSize) &&
2057              "expected padded dimension to have a static size");
2058       if (getConstantIntValue(sliceSize) != sourceSize) {
2059         return rewriter.notifyMatchFailure(
2060             padOp, "cannot fold since the inner ExtractSliceOp size does not "
2061                    "match the size of the outer padding");
2062       }
2063       en.value() = outerSliceOp.getMixedSizes()[en.index()];
2064     }
2065 
2066     // Combine the high paddings of the two tensor::PadOps.
2067     SmallVector<OpFoldResult> newHighPad(rank, rewriter.getIndexAttr(0));
2068     for (auto &en : enumerate(newHighPad)) {
2069       if (innerDims.test(en.index()))
2070         newHighPad[en.index()] = padOp.getMixedHighPad()[en.index()];
2071       if (outerDims.test(en.index()))
2072         newHighPad[en.index()] = outerPadOp.getMixedHighPad()[en.index()];
2073     }
2074 
2075     // Create a new tensor::ExtractSliceOp, tensor::PadOp pair that performs the
2076     // two paddings in one step.
2077     auto newSliceOp = rewriter.create<ExtractSliceOp>(
2078         padOp.getLoc(), outerSliceOp.source(), newOffsets, newSizes,
2079         innerSliceOp.getMixedStrides());
2080     auto newPadOp = rewriter.create<PadOp>(
2081         padOp.getLoc(), padOp.getResultType(), newSliceOp.getResult(),
2082         padOp.getMixedLowPad(), newHighPad, padOp.nofold());
2083     rewriter.inlineRegionBefore(padOp.getRegion(), newPadOp.getRegion(),
2084                                 newPadOp.getRegion().begin());
2085     rewriter.replaceOp(padOp, newPadOp.getResult());
2086     return success();
2087   }
2088 };
2089 
2090 } // namespace
2091 
2092 void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
2093                                         MLIRContext *context) {
2094   results.add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast,
2095               FoldOrthogonalPaddings>(context);
2096 }
2097 
2098 /// Return the padding value of the PadOp if it constant. In this context,
2099 /// "constant" means an actual constant or "defined outside of the block".
2100 ///
2101 /// Values are considered constant in three cases:
2102 ///  - A ConstantLike value.
2103 ///  - A basic block argument from a different block.
2104 ///  - A value defined outside of the block.
2105 ///
2106 /// If the padding value is not constant, an empty Value is returned.
2107 Value PadOp::getConstantPaddingValue() {
2108   auto yieldOp = dyn_cast<YieldOp>(getRegion().front().getTerminator());
2109   if (!yieldOp)
2110     return {};
2111   Value padValue = yieldOp.value();
2112   // Check if yield value is a constant.
2113   if (matchPattern(padValue, m_Constant()))
2114     return padValue;
2115   // Check if yield value is defined inside the PadOp block.
2116   if (padValue.getParentBlock() == &getRegion().front())
2117     return {};
2118   // Else: Yield value defined outside of the PadOp block.
2119   return padValue;
2120 }
2121 
2122 OpFoldResult PadOp::fold(ArrayRef<Attribute>) {
2123   if (getResultType().hasStaticShape() && getResultType() == getSourceType() &&
2124       !nofold())
2125     return source();
2126   return {};
2127 }
2128 
2129 //===----------------------------------------------------------------------===//
2130 // SplatOp
2131 //===----------------------------------------------------------------------===//
2132 
2133 OpFoldResult SplatOp::fold(ArrayRef<Attribute> operands) {
2134   auto constOperand = operands.front();
2135   if (!constOperand.isa_and_nonnull<IntegerAttr, FloatAttr>())
2136     return {};
2137 
2138   // SplatElementsAttr::get treats single value for second arg as being a splat.
2139   return SplatElementsAttr::get(getType(), {constOperand});
2140 }
2141 
2142 //===----------------------------------------------------------------------===//
2143 // TableGen'd op method definitions
2144 //===----------------------------------------------------------------------===//
2145 
2146 #define GET_OP_CLASSES
2147 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
2148