1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/Utils/Utils.h"
10 #include "mlir/Dialect/Tensor/IR/Tensor.h"
11 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
12 #include "mlir/Dialect/Utils/StaticValueUtils.h"
13 #include "mlir/IR/BlockAndValueMapping.h"
14 #include "mlir/IR/Builders.h"
15 #include "mlir/IR/BuiltinAttributeInterfaces.h"
16 #include "mlir/IR/Matchers.h"
17 #include "mlir/IR/TypeUtilities.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallBitVector.h"
20 
21 using namespace mlir;
22 using namespace mlir::tensor;
23 
24 /// Materialize a single constant operation from a given attribute value with
25 /// the desired resultant type.
26 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
27                                               Attribute value, Type type,
28                                               Location loc) {
29   if (arith::ConstantOp::isBuildableWith(value, type))
30     return builder.create<arith::ConstantOp>(loc, value, type);
31   if (complex::ConstantOp::isBuildableWith(value, type))
32     return builder.create<complex::ConstantOp>(loc, type,
33                                                value.cast<ArrayAttr>());
34   return nullptr;
35 }
36 
37 //===----------------------------------------------------------------------===//
38 // CastOp
39 //===----------------------------------------------------------------------===//
40 
41 /// Returns true if `target` is a ranked tensor type that preserves static
42 /// information available in the `source` ranked tensor type.
43 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
44   auto sourceType = source.dyn_cast<RankedTensorType>();
45   auto targetType = target.dyn_cast<RankedTensorType>();
46 
47   // Requires RankedTensorType.
48   if (!sourceType || !targetType)
49     return false;
50 
51   // Requires same elemental type.
52   if (sourceType.getElementType() != targetType.getElementType())
53     return false;
54 
55   // Requires same rank.
56   if (sourceType.getRank() != targetType.getRank())
57     return false;
58 
59   // If cast is towards more static sizes along any dimension, don't fold.
60   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
61     if (!ShapedType::isDynamic(std::get<0>(t)) &&
62         ShapedType::isDynamic(std::get<1>(t)))
63       return false;
64   }
65 
66   return true;
67 }
68 
69 /// Determines whether tensor::CastOp casts to a more dynamic version of the
70 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
71 /// implement canonicalization patterns for ops in different dialects that may
72 /// consume the results of tensor.cast operations. Such foldable tensor.cast
73 /// operations are typically inserted as `slice` ops and are canonicalized,
74 /// to preserve the type compatibility of their uses.
75 ///
76 /// Returns true when all conditions are met:
77 /// 1. source and result are ranked tensors with same element type and rank.
78 /// 2. the tensor type has more static information than the result
79 ///
80 /// Example:
81 /// ```mlir
82 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
83 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
84 /// ```
85 ///
86 /// folds into:
87 ///
88 /// ```mlir
89 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
90 /// ```
91 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
92   if (!castOp)
93     return false;
94 
95   // Can fold if the source of cast has at least as much static information as
96   // its results.
97   return preservesStaticInformation(castOp.getType(),
98                                     castOp.source().getType());
99 }
100 
101 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
102 /// that can be folded.
103 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
104   bool folded = false;
105   for (OpOperand &operand : op->getOpOperands()) {
106     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
107     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
108       operand.set(castOp.getOperand());
109       folded = true;
110     }
111   }
112   return success(folded);
113 }
114 
115 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
116   if (inputs.size() != 1 || outputs.size() != 1)
117     return false;
118   Type a = inputs.front(), b = outputs.front();
119   auto aT = a.dyn_cast<TensorType>();
120   auto bT = b.dyn_cast<TensorType>();
121   if (!aT || !bT)
122     return false;
123 
124   if (aT.getElementType() != bT.getElementType())
125     return false;
126 
127   return succeeded(verifyCompatibleShape(aT, bT));
128 }
129 
130 /// Compute a TensorType that has the joined shape knowledge of the two
131 /// given TensorTypes. The element types need to match.
132 static TensorType joinShapes(TensorType one, TensorType two) {
133   assert(one.getElementType() == two.getElementType());
134 
135   if (!one.hasRank())
136     return two;
137   if (!two.hasRank())
138     return one;
139 
140   int64_t rank = one.getRank();
141   if (rank != two.getRank())
142     return {};
143 
144   SmallVector<int64_t, 4> join;
145   join.reserve(rank);
146   for (int64_t i = 0; i < rank; ++i) {
147     if (one.isDynamicDim(i)) {
148       join.push_back(two.getDimSize(i));
149       continue;
150     }
151     if (two.isDynamicDim(i)) {
152       join.push_back(one.getDimSize(i));
153       continue;
154     }
155     if (one.getDimSize(i) != two.getDimSize(i))
156       return {};
157     join.push_back(one.getDimSize(i));
158   }
159   return RankedTensorType::get(join, one.getElementType());
160 }
161 
162 namespace {
163 
164 /// Replaces chains of two tensor.cast operations by a single tensor.cast
165 /// operation if doing so does not remove runtime constraints.
166 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
167   using OpRewritePattern<CastOp>::OpRewritePattern;
168 
169   LogicalResult matchAndRewrite(CastOp tensorCast,
170                                 PatternRewriter &rewriter) const final {
171     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
172 
173     if (!tensorCastOperand)
174       return failure();
175 
176     auto sourceType =
177         tensorCastOperand.getOperand().getType().cast<TensorType>();
178     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
179     auto resultType = tensorCast.getType().cast<TensorType>();
180 
181     // We can remove the intermediate cast if joining all three produces the
182     // same result as just joining the source and result shapes.
183     auto firstJoin =
184         joinShapes(joinShapes(sourceType, intermediateType), resultType);
185 
186     // The join might not exist if the cast sequence would fail at runtime.
187     if (!firstJoin)
188       return failure();
189 
190     // The newJoin always exists if the above join exists, it might just contain
191     // less information. If so, we cannot drop the intermediate cast, as doing
192     // so would remove runtime checks.
193     auto newJoin = joinShapes(sourceType, resultType);
194     if (firstJoin != newJoin)
195       return failure();
196 
197     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
198                                         tensorCastOperand.getOperand());
199     return success();
200   }
201 };
202 
203 } // namespace
204 
205 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
206                                          MLIRContext *context) {
207   results.add<ChainedTensorCast>(context);
208 }
209 
210 //===----------------------------------------------------------------------===//
211 // DimOp
212 //===----------------------------------------------------------------------===//
213 
214 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
215                   int64_t index) {
216   auto loc = result.location;
217   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
218   build(builder, result, source, indexValue);
219 }
220 
221 Optional<int64_t> DimOp::getConstantIndex() {
222   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
223     return constantOp.getValue().cast<IntegerAttr>().getInt();
224   return {};
225 }
226 
227 LogicalResult DimOp::verify() {
228   // Assume unknown index to be in range.
229   Optional<int64_t> index = getConstantIndex();
230   if (!index.hasValue())
231     return success();
232 
233   // Check that constant index is not knowingly out of range.
234   auto type = source().getType();
235   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
236     if (index.getValue() >= tensorType.getRank())
237       return emitOpError("index is out of range");
238   } else if (type.isa<UnrankedTensorType>()) {
239     // Assume index to be in range.
240   } else {
241     llvm_unreachable("expected operand with tensor type");
242   }
243   return success();
244 }
245 
246 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
247   // All forms of folding require a known index.
248   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
249   if (!index)
250     return {};
251 
252   // Folding for unranked types (UnrankedTensorType) is not supported.
253   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
254   if (!tensorType)
255     return {};
256 
257   // Fold if the shape extent along the given index is known.
258   if (!tensorType.isDynamicDim(index.getInt())) {
259     Builder builder(getContext());
260     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
261   }
262 
263   Operation *definingOp = source().getDefiningOp();
264 
265   // Fold dim to the operand of tensor.generate.
266   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
267     auto resultType =
268         fromElements.getResult().getType().cast<RankedTensorType>();
269     // The case where the type encodes the size of the dimension is handled
270     // above.
271     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
272 
273     // Find the operand of the fromElements that corresponds to this index.
274     auto dynExtents = fromElements.dynamicExtents().begin();
275     for (auto dim : resultType.getShape().take_front(index.getInt()))
276       if (ShapedType::isDynamic(dim))
277         dynExtents++;
278 
279     return Value{*dynExtents};
280   }
281 
282   // The size at the given index is now known to be a dynamic size.
283   unsigned unsignedIndex = index.getValue().getZExtValue();
284 
285   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
286     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
287     // `resolve-shaped-type-result-dims` pass.
288     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
289         sliceOp.isDynamicSize(unsignedIndex)) {
290       return {sliceOp.getDynamicSize(unsignedIndex)};
291     }
292   }
293 
294   // dim(cast) -> dim
295   if (succeeded(foldTensorCast(*this)))
296     return getResult();
297 
298   return {};
299 }
300 
301 namespace {
302 /// Fold dim of a cast into the dim of the source of the tensor cast.
303 struct DimOfCastOp : public OpRewritePattern<DimOp> {
304   using OpRewritePattern<DimOp>::OpRewritePattern;
305 
306   LogicalResult matchAndRewrite(DimOp dimOp,
307                                 PatternRewriter &rewriter) const override {
308     auto castOp = dimOp.source().getDefiningOp<CastOp>();
309     if (!castOp)
310       return failure();
311     Value newSource = castOp.getOperand();
312     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
313     return success();
314   }
315 };
316 } // namespace
317 
318 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
319                                         MLIRContext *context) {
320   results.add<DimOfCastOp>(context);
321 }
322 
323 //===----------------------------------------------------------------------===//
324 // ExtractOp
325 //===----------------------------------------------------------------------===//
326 
327 LogicalResult ExtractOp::verify() {
328   // Verify the # indices match if we have a ranked type.
329   if (auto tensorType = tensor().getType().dyn_cast<RankedTensorType>())
330     if (tensorType.getRank() != static_cast<int64_t>(indices().size()))
331       return emitOpError("incorrect number of indices for extract_element");
332 
333   return success();
334 }
335 
336 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
337   // The tensor operand must be a known constant.
338   Attribute tensor = operands.front();
339   if (!tensor)
340     return {};
341   // If this is a splat elements attribute, simply return the value. All of the
342   // elements of a splat attribute are the same.
343   if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
344     return splatTensor.getSplatValue<Attribute>();
345 
346   // Otherwise, collect the constant indices into the tensor.
347   SmallVector<uint64_t, 8> indices;
348   for (Attribute indice : llvm::drop_begin(operands, 1)) {
349     if (!indice || !indice.isa<IntegerAttr>())
350       return {};
351     indices.push_back(indice.cast<IntegerAttr>().getInt());
352   }
353 
354   // If this is an elements attribute, query the value at the given indices.
355   auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
356   if (elementsAttr && elementsAttr.isValidIndex(indices))
357     return elementsAttr.getValues<Attribute>()[indices];
358   return {};
359 }
360 
361 //===----------------------------------------------------------------------===//
362 // FromElementsOp
363 //===----------------------------------------------------------------------===//
364 
365 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
366                            Type resultType, ValueRange elements) {
367   result.addOperands(elements);
368   result.addTypes(resultType);
369 }
370 
371 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
372                            ValueRange elements) {
373   assert(!elements.empty() && "expected at least one element");
374   Type resultType = RankedTensorType::get(
375       {static_cast<int64_t>(elements.size())}, elements.front().getType());
376   build(builder, result, resultType, elements);
377 }
378 
379 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
380   if (!llvm::is_contained(operands, nullptr))
381     return DenseElementsAttr::get(getType(), operands);
382   return {};
383 }
384 
385 namespace {
386 
387 // Canonicalizes the pattern of the form
388 //
389 // %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
390 // %extracted_element = tensor.extract %tensor[%c0] : tensor<1xi32>
391 //
392 // to just %element.
393 struct ExtractElementFromTensorFromElements
394     : public OpRewritePattern<tensor::ExtractOp> {
395   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
396 
397   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
398                                 PatternRewriter &rewriter) const final {
399     auto tensorFromElements = extract.tensor().getDefiningOp<FromElementsOp>();
400     if (!tensorFromElements)
401       return failure();
402     auto tensorType = tensorFromElements.getType().cast<RankedTensorType>();
403     auto rank = tensorType.getRank();
404     if (rank == 0) {
405       rewriter.replaceOp(extract, tensorFromElements.getOperand(0));
406       return success();
407     }
408     SmallVector<APInt, 3> indices(rank);
409     int64_t flatIndex = 0;
410     int64_t stride = 1;
411     for (int i = rank - 1; i >= 0; --i) {
412       APInt index;
413       if (!matchPattern(extract.indices()[i], m_ConstantInt(&index)))
414         return failure();
415       if (i < rank - 1)
416         stride *= tensorType.getDimSize(i);
417       flatIndex += index.getSExtValue() * stride;
418     }
419     // Prevent out of bounds accesses. This can happen in invalid code that will
420     // never execute.
421     if (tensorFromElements->getNumOperands() <= flatIndex || flatIndex < 0)
422       return failure();
423     rewriter.replaceOp(extract, tensorFromElements.getOperand(flatIndex));
424     return success();
425   }
426 };
427 
428 // Pushes the index_casts that occur before extractions to after the extract.
429 // This minimizes type conversion in some cases and enables the extract
430 // canonicalizer. This changes:
431 //
432 // %cast = arith.index_cast %tensor : tensor<1xi32> to tensor<1xindex>
433 // %extract = tensor.extract %cast[%index] : tensor<1xindex>
434 //
435 // to the following:
436 //
437 // %extract = tensor.extract %tensor[%index] : tensor<1xindex>
438 // %cast = arith.index_cast %extract : i32 to index
439 //
440 // to just %element.
441 //
442 // Consider expanding this to a template and handle all tensor cast operations.
443 struct ExtractElementFromIndexCast
444     : public OpRewritePattern<tensor::ExtractOp> {
445   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
446 
447   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
448                                 PatternRewriter &rewriter) const final {
449     Location loc = extract.getLoc();
450     auto indexCast = extract.tensor().getDefiningOp<arith::IndexCastOp>();
451     if (!indexCast)
452       return failure();
453 
454     Type elementTy = getElementTypeOrSelf(indexCast.getIn());
455 
456     auto newExtract = rewriter.create<tensor::ExtractOp>(
457         loc, elementTy, indexCast.getIn(), extract.indices());
458 
459     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(extract, extract.getType(),
460                                                     newExtract);
461 
462     return success();
463   }
464 };
465 
466 } // namespace
467 
468 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
469                                                  MLIRContext *context) {
470   results
471       .add<ExtractElementFromIndexCast, ExtractElementFromTensorFromElements>(
472           context);
473 }
474 
475 //===----------------------------------------------------------------------===//
476 // InsertOp
477 //===----------------------------------------------------------------------===//
478 
479 LogicalResult InsertOp::verify() {
480   // Verify the # indices match if we have a ranked type.
481   if (auto destType = dest().getType().dyn_cast<RankedTensorType>())
482     if (destType.getRank() != static_cast<int64_t>(indices().size()))
483       return emitOpError("incorrect number of indices");
484   return success();
485 }
486 
487 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
488   Attribute scalar = operands[0];
489   Attribute dest = operands[1];
490   if (scalar && dest)
491     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
492       if (scalar == splatDest.getSplatValue<Attribute>())
493         return dest;
494   return {};
495 }
496 
497 //===----------------------------------------------------------------------===//
498 // GenerateOp
499 //===----------------------------------------------------------------------===//
500 
501 LogicalResult GenerateOp::verify() {
502   // Ensure that the tensor type has as many dynamic dimensions as are specified
503   // by the operands.
504   RankedTensorType resultTy = getType().cast<RankedTensorType>();
505   if (getNumOperands() != resultTy.getNumDynamicDims())
506     return emitError("must have as many index operands as dynamic extents "
507                      "in the result type");
508 
509   // Ensure that region arguments span the index space.
510   if (!llvm::all_of(body().getArgumentTypes(),
511                     [](Type ty) { return ty.isIndex(); }))
512     return emitError("all body arguments must be index");
513   if (body().getNumArguments() != resultTy.getRank())
514     return emitError("must have one body argument per input dimension");
515 
516   // Ensure that the region yields an element of the right type.
517   auto yieldOp = cast<YieldOp>(body().getBlocks().front().getTerminator());
518 
519   if (yieldOp.value().getType() != resultTy.getElementType())
520     return emitOpError(
521         "body must be terminated with a `yield` operation of the tensor "
522         "element type");
523 
524   return success();
525 }
526 
527 void GenerateOp::build(
528     OpBuilder &b, OperationState &result, Type resultTy,
529     ValueRange dynamicExtents,
530     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
531   build(b, result, resultTy, dynamicExtents);
532 
533   // Build and populate body.
534   OpBuilder::InsertionGuard guard(b);
535   Region *bodyRegion = result.regions.front().get();
536   auto rank = resultTy.cast<RankedTensorType>().getRank();
537   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
538   SmallVector<Location, 2> argumentLocs(rank, result.location);
539   Block *bodyBlock =
540       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes, argumentLocs);
541   bodyBuilder(b, result.location, bodyBlock->getArguments());
542 }
543 
544 namespace {
545 
546 /// Canonicalizes tensor.generate operations with a constant
547 /// operand into the equivalent operation with the operand expressed in the
548 /// result type, instead. We also insert a type cast to make sure that the
549 /// resulting IR is still well-typed.
550 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
551   using OpRewritePattern<GenerateOp>::OpRewritePattern;
552 
553   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
554                                 PatternRewriter &rewriter) const final {
555     auto resultType =
556         tensorFromElements.getResult().getType().cast<RankedTensorType>();
557 
558     if (resultType.hasStaticShape())
559       return failure();
560 
561     SmallVector<Value, 4> newOperands;
562     SmallVector<int64_t, 4> newShape;
563     auto operandsIt = tensorFromElements.dynamicExtents().begin();
564 
565     for (int64_t dim : resultType.getShape()) {
566       if (!ShapedType::isDynamic(dim)) {
567         newShape.push_back(dim);
568         continue;
569       }
570       APInt index;
571       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
572         newShape.push_back(ShapedType::kDynamicSize);
573         newOperands.push_back(*operandsIt++);
574         continue;
575       }
576       newShape.push_back(index.getSExtValue());
577       operandsIt++;
578     }
579 
580     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
581       return failure();
582 
583     auto loc = tensorFromElements.getLoc();
584     auto newOp = rewriter.create<GenerateOp>(
585         loc, RankedTensorType::get(newShape, resultType.getElementType()),
586         newOperands);
587     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
588                                 newOp.body().begin());
589     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
590                                                 newOp);
591     return success();
592   }
593 };
594 
595 /// Canonicalizes the pattern of the form
596 ///
597 /// %tensor = tensor.generate %x {
598 ///   ^bb0(%arg0: index):
599 ///   <computation>
600 ///   yield %1 : index
601 /// } : tensor<?xindex>
602 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
603 ///
604 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
605 /// tensor.generate operation has no side-effects.
606 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
607   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
608 
609   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
610                                 PatternRewriter &rewriter) const final {
611     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
612     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
613       return failure();
614 
615     BlockAndValueMapping mapping;
616     Block *body = tensorFromElements.getBody();
617     mapping.map(body->getArguments(), extract.indices());
618     for (auto &op : body->without_terminator())
619       rewriter.clone(op, mapping);
620 
621     auto yield = cast<YieldOp>(body->getTerminator());
622 
623     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
624     return success();
625   }
626 };
627 
628 /// Canonicalizes the pattern of the form
629 ///
630 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
631 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
632 ///
633 /// to
634 ///
635 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
636 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
637   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
638 
639   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
640                                 PatternRewriter &rewriter) const final {
641     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
642     if (!tensorCast)
643       return failure();
644 
645     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
646                                                    extract.indices());
647     return success();
648   }
649 };
650 
651 } // namespace
652 
653 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
654                                              MLIRContext *context) {
655   // TODO: Move extract patterns to tensor::ExtractOp.
656   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
657               StaticTensorGenerate>(context);
658 }
659 
660 //===----------------------------------------------------------------------===//
661 // RankOp
662 //===----------------------------------------------------------------------===//
663 
664 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
665   // Constant fold rank when the rank of the operand is known.
666   auto type = getOperand().getType();
667   auto shapedType = type.dyn_cast<ShapedType>();
668   if (shapedType && shapedType.hasRank())
669     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
670   return IntegerAttr();
671 }
672 
673 //===----------------------------------------------------------------------===//
674 // ReshapeOp
675 //===----------------------------------------------------------------------===//
676 
677 static int64_t getNumElements(ShapedType type) {
678   int64_t numElements = 1;
679   for (auto dim : type.getShape())
680     numElements *= dim;
681   return numElements;
682 }
683 
684 LogicalResult ReshapeOp::verify() {
685   TensorType operandType = source().getType().cast<TensorType>();
686   TensorType resultType = result().getType().cast<TensorType>();
687 
688   if (operandType.getElementType() != resultType.getElementType())
689     return emitOpError("element types of source and destination tensor "
690                        "types should be the same");
691 
692   int64_t shapeSize = shape().getType().cast<RankedTensorType>().getDimSize(0);
693   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
694   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
695 
696   if (resultRankedType) {
697     if (operandRankedType && resultRankedType.hasStaticShape() &&
698         operandRankedType.hasStaticShape()) {
699       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
700         return emitOpError("source and destination tensor should have the "
701                            "same number of elements");
702     }
703     if (ShapedType::isDynamic(shapeSize))
704       return emitOpError("cannot use shape operand with dynamic length to "
705                          "reshape to statically-ranked tensor type");
706     if (shapeSize != resultRankedType.getRank())
707       return emitOpError(
708           "length of shape operand differs from the result's tensor rank");
709   }
710   return success();
711 }
712 
713 //===----------------------------------------------------------------------===//
714 // Reassociative reshape ops
715 //===----------------------------------------------------------------------===//
716 
717 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
718   return getSymbolLessAffineMaps(getReassociationExprs());
719 }
720 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
721   return convertReassociationIndicesToExprs(getContext(),
722                                             getReassociationIndices());
723 }
724 
725 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
726   return getSymbolLessAffineMaps(getReassociationExprs());
727 }
728 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
729   return convertReassociationIndicesToExprs(getContext(),
730                                             getReassociationIndices());
731 }
732 
733 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
734 static RankedTensorType
735 computeTensorReshapeCollapsedType(RankedTensorType type,
736                                   ArrayRef<AffineMap> reassociation) {
737   auto shape = type.getShape();
738   SmallVector<int64_t, 4> newShape;
739   newShape.reserve(reassociation.size());
740 
741   // Use the fact that reassociation is valid to simplify the logic: only use
742   // each map's rank.
743   assert(isReassociationValid(reassociation) && "invalid reassociation");
744   unsigned currentDim = 0;
745   for (AffineMap m : reassociation) {
746     unsigned dim = m.getNumResults();
747     auto band = shape.slice(currentDim, dim);
748     int64_t size = 1;
749     if (llvm::is_contained(band, ShapedType::kDynamicSize))
750       size = ShapedType::kDynamicSize;
751     else
752       for (unsigned d = 0; d < dim; ++d)
753         size *= shape[currentDim + d];
754     newShape.push_back(size);
755     currentDim += dim;
756   }
757 
758   return RankedTensorType::get(newShape, type.getElementType());
759 }
760 
761 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
762                             ArrayRef<ReassociationIndices> reassociation,
763                             ArrayRef<NamedAttribute> attrs) {
764   auto resultType = computeTensorReshapeCollapsedType(
765       src.getType().cast<RankedTensorType>(),
766       getSymbolLessAffineMaps(
767           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
768   build(b, result, resultType, src, attrs);
769   result.addAttribute(getReassociationAttrName(),
770                       getReassociationIndicesAttribute(b, reassociation));
771 }
772 
773 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
774                           ArrayRef<ReassociationIndices> reassociation,
775                           ArrayRef<NamedAttribute> attrs) {
776   auto resultType = computeTensorReshapeCollapsedType(
777       src.getType().cast<RankedTensorType>(),
778       getSymbolLessAffineMaps(
779           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
780   build(b, result, resultType, src, attrs);
781   result.addAttribute(getReassociationAttrName(),
782                       getReassociationIndicesAttribute(b, reassociation));
783 }
784 
785 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
786                                         TensorReshapeOp, ExpandShapeOp>::value>
787 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
788                                            RankedTensorType expandedType,
789                                            RankedTensorType collapsedType) {
790   if (failed(
791           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
792     return failure();
793 
794   auto maps = op.getReassociationMaps();
795   RankedTensorType expectedType =
796       computeTensorReshapeCollapsedType(expandedType, maps);
797   if (collapsedType != expectedType)
798     return op.emitOpError("expected collapsed type to be ")
799            << expectedType << ", but got " << collapsedType;
800   return success();
801 }
802 
803 LogicalResult ExpandShapeOp::verify() {
804   return verifyTensorReshapeOp(*this, getResultType(), getSrcType());
805 }
806 
807 LogicalResult CollapseShapeOp::verify() {
808   return verifyTensorReshapeOp(*this, getSrcType(), getResultType());
809 }
810 
811 namespace {
812 /// Reshape of a splat constant can be replaced with a constant of the result
813 /// type.
814 template <typename TensorReshapeOp>
815 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
816   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
817   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
818                                 PatternRewriter &rewriter) const override {
819     DenseElementsAttr attr;
820     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
821       return failure();
822     if (!attr || !attr.isSplat())
823       return failure();
824     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
825         reshapeOp.getResultType(), attr.getRawData(), true);
826     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
827     return success();
828   }
829 };
830 
831 /// Reshape of a FromElements can be replaced with a FromElements of the result
832 /// type
833 template <typename TensorReshapeOp>
834 struct FoldReshapeWithFromElements : OpRewritePattern<TensorReshapeOp> {
835   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
836   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
837                                 PatternRewriter &rewriter) const override {
838     auto fromElements =
839         reshapeOp.src().template getDefiningOp<FromElementsOp>();
840     if (!fromElements)
841       return failure();
842 
843     auto shapedTy = reshapeOp.getType().template cast<ShapedType>();
844 
845     if (!shapedTy.hasStaticShape())
846       return failure();
847 
848     rewriter.replaceOpWithNewOp<FromElementsOp>(reshapeOp, reshapeOp.getType(),
849                                                 fromElements.elements());
850     return success();
851   }
852 };
853 
854 } // namespace
855 
856 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
857                                                 MLIRContext *context) {
858   results.add<CollapseReshapeOps<ExpandShapeOp>,
859               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>,
860               FoldReshapeWithConstant<ExpandShapeOp>,
861               FoldReshapeWithFromElements<ExpandShapeOp>>(context);
862 }
863 
864 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
865                                                   MLIRContext *context) {
866   results.add<CollapseReshapeOps<CollapseShapeOp>,
867               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
868               FoldReshapeWithConstant<CollapseShapeOp>,
869               FoldReshapeWithFromElements<CollapseShapeOp>>(context);
870 }
871 
872 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
873   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
874 }
875 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
876   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
877 }
878 
879 //===----------------------------------------------------------------------===//
880 // ExtractSliceOp
881 //===----------------------------------------------------------------------===//
882 
883 /// An extract_slice op result type can be fully inferred from the source type
884 /// and the static representation of offsets, sizes and strides. Special
885 /// sentinels encode the dynamic case.
886 RankedTensorType ExtractSliceOp::inferResultType(
887     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
888     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
889   // An extract_slice op may specify only a leading subset of offset/sizes/
890   // strides in which case we complete with offset=0, sizes from memref type and
891   // strides=1.
892   unsigned rank = sourceRankedTensorType.getRank();
893   (void)rank;
894   assert(staticSizes.size() == rank &&
895          "unexpected staticSizes not equal to rank of source");
896   return RankedTensorType::get(staticSizes,
897                                sourceRankedTensorType.getElementType());
898 }
899 
900 RankedTensorType ExtractSliceOp::inferResultType(
901     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
902     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
903   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
904   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
905   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
906                              ShapedType::kDynamicStrideOrOffset);
907   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
908                              ShapedType::kDynamicSize);
909   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
910                              ShapedType::kDynamicStrideOrOffset);
911   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
912                                          staticSizes, staticStrides);
913 }
914 
915 /// An extract_slice op result type can be fully inferred from the source type
916 /// and the static representation of offsets, sizes and strides. Special
917 /// sentinels encode the dynamic case.
918 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
919     unsigned resultRank, RankedTensorType sourceRankedTensorType,
920     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
921     ArrayRef<int64_t> strides) {
922   auto inferredType =
923       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
924           .cast<RankedTensorType>();
925   int rankDiff = inferredType.getRank() - resultRank;
926   if (rankDiff > 0) {
927     auto shape = inferredType.getShape();
928     llvm::SmallBitVector dimsToProject =
929         getPositionsOfShapeOne(rankDiff, shape);
930     SmallVector<int64_t> projectedShape;
931     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
932       if (!dimsToProject.test(pos))
933         projectedShape.push_back(shape[pos]);
934     inferredType =
935         RankedTensorType::get(projectedShape, inferredType.getElementType());
936   }
937   return inferredType;
938 }
939 
940 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
941     unsigned resultRank, RankedTensorType sourceRankedTensorType,
942     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
943     ArrayRef<OpFoldResult> strides) {
944   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
945   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
946   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
947                              ShapedType::kDynamicStrideOrOffset);
948   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
949                              ShapedType::kDynamicSize);
950   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
951                              ShapedType::kDynamicStrideOrOffset);
952   return ExtractSliceOp::inferRankReducedResultType(
953       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
954       staticStrides);
955 }
956 
957 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
958 /// result type. If the type passed is nullptr, it is inferred.
959 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
960                            RankedTensorType resultType, Value source,
961                            ArrayRef<OpFoldResult> offsets,
962                            ArrayRef<OpFoldResult> sizes,
963                            ArrayRef<OpFoldResult> strides,
964                            ArrayRef<NamedAttribute> attrs) {
965   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
966   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
967   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
968                              ShapedType::kDynamicStrideOrOffset);
969   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
970                              ShapedType::kDynamicSize);
971   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
972                              ShapedType::kDynamicStrideOrOffset);
973   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
974   // Structuring implementation this way avoids duplication between builders.
975   if (!resultType) {
976     resultType =
977         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
978                                         staticSizes, staticStrides)
979             .cast<RankedTensorType>();
980   }
981   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
982         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
983         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
984   result.addAttributes(attrs);
985 }
986 
987 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
988 /// result type.
989 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
990                            ArrayRef<OpFoldResult> offsets,
991                            ArrayRef<OpFoldResult> sizes,
992                            ArrayRef<OpFoldResult> strides,
993                            ArrayRef<NamedAttribute> attrs) {
994   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
995 }
996 
997 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
998 /// type passed is nullptr, it is inferred.
999 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1000                            RankedTensorType resultType, Value source,
1001                            ValueRange offsets, ValueRange sizes,
1002                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1003   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1004       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1005   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1006       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1007   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1008       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1009   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1010 }
1011 
1012 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
1013 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1014                            ValueRange offsets, ValueRange sizes,
1015                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1016   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1017 }
1018 
1019 template <typename OpTy>
1020 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
1021                                           OpTy op, Type expectedType) {
1022   auto memrefType = expectedType.cast<ShapedType>();
1023   switch (result) {
1024   case SliceVerificationResult::Success:
1025     return success();
1026   case SliceVerificationResult::RankTooLarge:
1027     return op.emitError("expected rank to be smaller or equal to ")
1028            << "the other rank. ";
1029   case SliceVerificationResult::SizeMismatch:
1030     return op.emitError("expected type to be ")
1031            << expectedType << " or a rank-reduced version. (size mismatch) ";
1032   case SliceVerificationResult::ElemTypeMismatch:
1033     return op.emitError("expected element type to be ")
1034            << memrefType.getElementType();
1035   default:
1036     llvm_unreachable("unexpected extract_slice op verification result");
1037   }
1038 }
1039 
1040 /// Verifier for ExtractSliceOp.
1041 LogicalResult ExtractSliceOp::verify() {
1042   // Verify result type against inferred type.
1043   auto expectedType = ExtractSliceOp::inferResultType(
1044       getSourceType(), getMixedOffsets(), getMixedSizes(), getMixedStrides());
1045   auto result = isRankReducedType(expectedType.cast<ShapedType>(), getType());
1046   return produceSliceErrorMsg(result, *this, expectedType);
1047 }
1048 
1049 /// Infer the canonical type of the result of an extract_slice op. Returns a
1050 /// type with rank `resultRank` that is either the rank of the rank-reduced
1051 /// type, or the non-rank-reduced type.
1052 static RankedTensorType
1053 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1054                             ArrayRef<OpFoldResult> mixedOffsets,
1055                             ArrayRef<OpFoldResult> mixedSizes,
1056                             ArrayRef<OpFoldResult> mixedStrides) {
1057   auto resultType =
1058       ExtractSliceOp::inferRankReducedResultType(
1059           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1060           .cast<RankedTensorType>();
1061   if (resultType.getRank() != resultRank) {
1062     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1063                                                  mixedSizes, mixedStrides)
1064                      .cast<RankedTensorType>();
1065   }
1066   return resultType;
1067 }
1068 
1069 llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
1070   ArrayRef<int64_t> resultShape = getType().getShape();
1071   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1072   llvm::SmallBitVector droppedDims(mixedSizes.size());
1073   unsigned shapePos = 0;
1074   for (const auto &size : enumerate(mixedSizes)) {
1075     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1076     // If the size is not 1, or if the current matched dimension of the result
1077     // is the same static shape as the size value (which is 1), then the
1078     // dimension is preserved.
1079     if (!sizeVal || sizeVal.getValue() != 1 ||
1080         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1081       shapePos++;
1082       continue;
1083     }
1084     droppedDims.set(size.index());
1085   }
1086   return droppedDims;
1087 }
1088 
1089 LogicalResult ExtractSliceOp::reifyResultShapes(
1090     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1091   reifiedReturnShapes.resize(1);
1092   reifiedReturnShapes[0].reserve(getType().getRank());
1093   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1094   llvm::SmallBitVector droppedDims = getDroppedDims();
1095   Location loc = getLoc();
1096   for (const auto &size : enumerate(mixedSizes)) {
1097     if (droppedDims.test(size.index()))
1098       continue;
1099     if (auto attr = size.value().dyn_cast<Attribute>()) {
1100       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1101           loc, attr.cast<IntegerAttr>().getInt()));
1102       continue;
1103     }
1104     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1105   }
1106   return success();
1107 }
1108 
1109 namespace {
1110 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1111 /// This essentially pushes memref_cast past its consuming slice when
1112 /// `canFoldIntoConsumerOp` is true.
1113 ///
1114 /// Example:
1115 /// ```
1116 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1117 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1118 ///   tensor<3x4xf32>
1119 /// ```
1120 /// is rewritten into:
1121 /// ```
1122 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1123 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1124 /// ```
1125 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1126 public:
1127   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1128 
1129   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1130                                 PatternRewriter &rewriter) const override {
1131     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1132     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1133           return matchPattern(operand, matchConstantIndex());
1134         }))
1135       return failure();
1136 
1137     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1138     if (!castOp)
1139       return failure();
1140 
1141     if (!canFoldIntoConsumerOp(castOp))
1142       return failure();
1143 
1144     /// Deduce the type of the result to use for the canonicalized operation.
1145     RankedTensorType resultType = getCanonicalSliceResultType(
1146         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1147         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1148         sliceOp.getMixedStrides());
1149     Value newSlice = rewriter.create<ExtractSliceOp>(
1150         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1151         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1152         sliceOp.static_sizes(), sliceOp.static_strides());
1153     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1154                                                 newSlice);
1155     return success();
1156   }
1157 };
1158 
1159 /// Slice elements from `values` into `outValues`. `counts` represents the
1160 /// numbers of elements to stride in the original values for each dimension.
1161 /// The output values can be used to construct a DenseElementsAttr.
1162 template <typename IterTy, typename ElemTy>
1163 static void sliceElements(IterTy values, ArrayRef<int64_t> counts,
1164                           ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1165                           ArrayRef<int64_t> strides,
1166                           llvm::SmallVectorImpl<ElemTy> *outValues) {
1167   assert(offsets.size() == sizes.size());
1168   assert(offsets.size() == strides.size());
1169   if (offsets.empty())
1170     return;
1171 
1172   int64_t offset = offsets.front();
1173   int64_t size = sizes.front();
1174   int64_t stride = strides.front();
1175   if (offsets.size() == 1) {
1176     for (int64_t i = 0; i < size; ++i, offset += stride)
1177       outValues->push_back(*(values + offset));
1178 
1179     return;
1180   }
1181 
1182   for (int64_t i = 0; i < size; ++i, offset += stride) {
1183     auto begin = values + offset * counts.front();
1184     sliceElements<IterTy, ElemTy>(begin, counts.drop_front(),
1185                                   offsets.drop_front(), sizes.drop_front(),
1186                                   strides.drop_front(), outValues);
1187   }
1188 }
1189 
1190 /// Fold arith.constant and tensor.extract_slice into arith.constant. The folded
1191 /// operation might introduce more constant data; Users can control their
1192 /// heuristics by the control function.
1193 class ConstantOpExtractSliceFolder final
1194     : public OpRewritePattern<ExtractSliceOp> {
1195 public:
1196   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1197 
1198   ConstantOpExtractSliceFolder(MLIRContext *context,
1199                                ControlConstantExtractSliceFusionFn controlFn)
1200       : OpRewritePattern<ExtractSliceOp>(context),
1201         controlFn(std::move(controlFn)) {}
1202 
1203   LogicalResult matchAndRewrite(ExtractSliceOp op,
1204                                 PatternRewriter &rewriter) const override {
1205     DenseElementsAttr attr;
1206     if (!matchPattern(op.source(), m_Constant(&attr)))
1207       return failure();
1208 
1209     // A constant splat is handled by fold().
1210     if (attr.isSplat())
1211       return failure();
1212 
1213     // Dynamic result shape is not supported.
1214     auto sourceType = op.source().getType().cast<ShapedType>();
1215     auto resultType = op.result().getType().cast<ShapedType>();
1216     if (!sourceType.hasStaticShape() || !resultType.hasStaticShape())
1217       return failure();
1218 
1219     // Customized control over the folding.
1220     if (!controlFn(op))
1221       return failure();
1222 
1223     int64_t count = sourceType.getNumElements();
1224     if (count == 0)
1225       return failure();
1226 
1227     // Check if there are any dynamic parts, which are not supported.
1228     auto offsets = extractFromI64ArrayAttr(op.static_offsets());
1229     if (llvm::is_contained(offsets, ShapedType::kDynamicStrideOrOffset))
1230       return failure();
1231     auto sizes = extractFromI64ArrayAttr(op.static_sizes());
1232     if (llvm::is_contained(sizes, ShapedType::kDynamicSize))
1233       return failure();
1234     auto strides = extractFromI64ArrayAttr(op.static_strides());
1235     if (llvm::is_contained(strides, ShapedType::kDynamicStrideOrOffset))
1236       return failure();
1237 
1238     // Compute the stride for each dimension.
1239     SmallVector<int64_t> counts;
1240     ArrayRef<int64_t> shape = sourceType.getShape();
1241     counts.reserve(shape.size());
1242     for (int64_t v : shape) {
1243       count = count / v;
1244       counts.push_back(count);
1245     }
1246 
1247     // New attribute constructed by the sliced values.
1248     DenseElementsAttr newAttr;
1249 
1250     if (auto elems = attr.dyn_cast<DenseIntElementsAttr>()) {
1251       SmallVector<APInt> outValues;
1252       outValues.reserve(sourceType.getNumElements());
1253       sliceElements<DenseElementsAttr::IntElementIterator, APInt>(
1254           elems.begin(), counts, offsets, sizes, strides, &outValues);
1255       newAttr = DenseElementsAttr::get(resultType, outValues);
1256     } else if (auto elems = attr.dyn_cast<DenseFPElementsAttr>()) {
1257       SmallVector<APFloat> outValues;
1258       outValues.reserve(sourceType.getNumElements());
1259       sliceElements<DenseElementsAttr::FloatElementIterator, APFloat>(
1260           elems.begin(), counts, offsets, sizes, strides, &outValues);
1261       newAttr = DenseElementsAttr::get(resultType, outValues);
1262     }
1263 
1264     if (newAttr) {
1265       rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, resultType, newAttr);
1266       return success();
1267     }
1268 
1269     return failure();
1270   }
1271 
1272 private:
1273   /// This additionally controls whether the fold happens or not. Users can
1274   /// impose their heuristics in the function.
1275   ControlConstantExtractSliceFusionFn controlFn;
1276 };
1277 
1278 } // namespace
1279 
1280 void mlir::tensor::populateFoldConstantExtractSlicePatterns(
1281     RewritePatternSet &patterns,
1282     const ControlConstantExtractSliceFusionFn &controlFn) {
1283   patterns.add<ConstantOpExtractSliceFolder>(patterns.getContext(), controlFn);
1284 }
1285 
1286 /// Return the canonical type of the result of an extract_slice op.
1287 struct SliceReturnTypeCanonicalizer {
1288   RankedTensorType operator()(ExtractSliceOp op,
1289                               ArrayRef<OpFoldResult> mixedOffsets,
1290                               ArrayRef<OpFoldResult> mixedSizes,
1291                               ArrayRef<OpFoldResult> mixedStrides) {
1292     return getCanonicalSliceResultType(op.getType().getRank(),
1293                                        op.getSourceType(), mixedOffsets,
1294                                        mixedSizes, mixedStrides);
1295   }
1296 };
1297 
1298 /// A canonicalizer wrapper to replace ExtractSliceOps.
1299 struct SliceCanonicalizer {
1300   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1301                   ExtractSliceOp newOp) {
1302     Value replacement = newOp.getResult();
1303     if (replacement.getType() != op.getType())
1304       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1305                                                     replacement);
1306     rewriter.replaceOp(op, replacement);
1307   }
1308 };
1309 
1310 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1311                                                  MLIRContext *context) {
1312   results.add<
1313       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1314           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1315       ExtractSliceOpCastFolder>(context);
1316 }
1317 
1318 //
1319 static LogicalResult
1320 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1321                                            ShapedType shapedType) {
1322   OpBuilder b(op.getContext());
1323   for (OpFoldResult ofr : op.getMixedOffsets())
1324     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1325       return failure();
1326   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1327   // is appropriate.
1328   auto shape = shapedType.getShape();
1329   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1330     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1331       return failure();
1332   for (OpFoldResult ofr : op.getMixedStrides())
1333     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1334       return failure();
1335   return success();
1336 }
1337 
1338 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1339 /// we can return the InsertSliceOp's source directly.
1340 // TODO: This only checks the immediate producer; extend to go up the
1341 // insert/extract chain if the slices are disjoint.
1342 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1343   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1344 
1345   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1346   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1347       insertOp.isSameAs(extractOp, isSame))
1348     return insertOp.source();
1349 
1350   return {};
1351 }
1352 
1353 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute> operands) {
1354   if (auto splat = operands[0].dyn_cast_or_null<SplatElementsAttr>()) {
1355     auto resultType = result().getType().cast<ShapedType>();
1356     if (resultType.hasStaticShape())
1357       return splat.resizeSplat(resultType);
1358   }
1359   if (getSourceType() == getType() &&
1360       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1361     return this->source();
1362   if (Value slice = foldExtractAfterInsertSlice(*this))
1363     return slice;
1364 
1365   return OpFoldResult();
1366 }
1367 
1368 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1369     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1370   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1371   unsigned rank = rankedTensorType.getRank();
1372   auto shape = rankedTensorType.getShape();
1373   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1374   SmallVector<OpFoldResult> sizes;
1375   for (unsigned i = 0, e = rank; i < e; ++i) {
1376     OpFoldResult dim;
1377     if (rankedTensorType.isDynamicDim(i))
1378       dim = b.createOrFold<tensor::DimOp>(
1379           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1380     else
1381       dim = b.getIndexAttr(shape[i]);
1382     sizes.push_back(dim);
1383   }
1384   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1385   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1386                                                 offsets, sizes, strides);
1387 }
1388 
1389 //===----------------------------------------------------------------------===//
1390 // InsertSliceOp
1391 //===----------------------------------------------------------------------===//
1392 
1393 // Build a InsertSliceOp with mixed static and dynamic entries.
1394 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1395                           Value dest, ArrayRef<OpFoldResult> offsets,
1396                           ArrayRef<OpFoldResult> sizes,
1397                           ArrayRef<OpFoldResult> strides,
1398                           ArrayRef<NamedAttribute> attrs) {
1399   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1400   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1401   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1402                              ShapedType::kDynamicStrideOrOffset);
1403   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1404                              ShapedType::kDynamicSize);
1405   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1406                              ShapedType::kDynamicStrideOrOffset);
1407   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1408         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1409         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1410   result.addAttributes(attrs);
1411 }
1412 
1413 // Build a InsertSliceOp with dynamic entries.
1414 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1415                           Value dest, ValueRange offsets, ValueRange sizes,
1416                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1417   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1418       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1419   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1420       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1421   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1422       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1423   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1424 }
1425 
1426 static SliceVerificationResult
1427 verifyInsertSliceOp(ShapedType srcType, ShapedType dstType,
1428                     ArrayAttr staticOffsets, ArrayAttr staticSizes,
1429                     ArrayAttr staticStrides,
1430                     ShapedType *expectedType = nullptr) {
1431   // insert_slice is the inverse of extract_slice, use the same type inference.
1432   auto expected = ExtractSliceOp::inferRankReducedResultType(
1433                       srcType.getRank(), dstType.cast<RankedTensorType>(),
1434                       extractFromI64ArrayAttr(staticOffsets),
1435                       extractFromI64ArrayAttr(staticSizes),
1436                       extractFromI64ArrayAttr(staticStrides))
1437                       .cast<ShapedType>();
1438   if (expectedType)
1439     *expectedType = expected;
1440   return isRankReducedType(expected, srcType);
1441 }
1442 
1443 /// Verifier for InsertSliceOp.
1444 LogicalResult InsertSliceOp::verify() {
1445   ShapedType expectedType;
1446   auto result =
1447       verifyInsertSliceOp(getSourceType(), getType(), static_offsets(),
1448                           static_sizes(), static_strides(), &expectedType);
1449   return produceSliceErrorMsg(result, *this, expectedType);
1450 }
1451 
1452 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1453 /// can mutate the second InsertSliceOp's destination to the first one's.
1454 ///
1455 /// Example:
1456 ///
1457 /// ```mlir
1458 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1459 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1460 /// ```
1461 ///
1462 /// folds into:
1463 ///
1464 /// ```mlir
1465 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1466 /// ```
1467 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1468   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1469 
1470   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1471   if (!prevInsertOp ||
1472       prevInsertOp.source().getType() != insertOp.source().getType() ||
1473       !prevInsertOp.isSameAs(insertOp, isSame))
1474     return failure();
1475 
1476   insertOp.destMutable().assign(prevInsertOp.dest());
1477   return success();
1478 }
1479 
1480 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1481   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1482       getSourceType() == getType() &&
1483       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1484     return this->source();
1485   if (succeeded(foldInsertAfterInsertSlice(*this)))
1486     return getResult();
1487   return OpFoldResult();
1488 }
1489 
1490 LogicalResult InsertSliceOp::reifyResultShapes(
1491     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1492   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1493   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1494     reifiedReturnShapes[0][dim] =
1495         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1496   }
1497   return success();
1498 }
1499 
1500 namespace {
1501 /// Pattern to rewrite a insert_slice op with constant arguments.
1502 class InsertSliceOpConstantArgumentFolder final
1503     : public OpRewritePattern<InsertSliceOp> {
1504 public:
1505   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1506 
1507   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1508                                 PatternRewriter &rewriter) const override {
1509     // No constant operand, just return.
1510     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1511           return matchPattern(operand, matchConstantIndex());
1512         }))
1513       return failure();
1514 
1515     // At least one of offsets/sizes/strides is a new constant.
1516     // Form the new list of operands and constant attributes from the
1517     // existing.
1518     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1519     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1520     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1521     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1522     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1523     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1524 
1525     // Create the new op in canonical form.
1526     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1527         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1528         mixedOffsets, mixedSizes, mixedStrides);
1529     Value toInsert = insertSliceOp.source();
1530     if (sourceType != insertSliceOp.getSourceType())
1531       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1532                                                  sourceType, toInsert);
1533     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1534         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1535         mixedStrides);
1536     return success();
1537   }
1538 };
1539 
1540 /// Fold tensor_casts with insert_slice operations. If the source or destination
1541 /// tensor is a tensor_cast that removes static type information, the cast is
1542 /// folded into the insert_slice operation. E.g.:
1543 ///
1544 /// ```mlir
1545 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1546 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1547 /// ```
1548 ///
1549 /// folds into:
1550 ///
1551 /// ```mlir
1552 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1553 /// ```
1554 ///
1555 /// Note: When folding a cast on the destination tensor, the result of the
1556 /// insert_slice operation is casted to ensure that the type of the result did
1557 /// not change.
1558 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1559   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1560 
1561   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1562                                 PatternRewriter &rewriter) const override {
1563     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1564           return matchPattern(operand, matchConstantIndex());
1565         }))
1566       return failure();
1567 
1568     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1569       auto castOp = v.getDefiningOp<tensor::CastOp>();
1570       if (!castOp || !canFoldIntoConsumerOp(castOp))
1571         return llvm::None;
1572       return castOp.source();
1573     };
1574     Optional<Value> sourceCastSource =
1575         getSourceOfCastOp(insertSliceOp.source());
1576     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1577     if (!sourceCastSource && !destCastSource)
1578       return failure();
1579 
1580     auto src = (sourceCastSource ? *sourceCastSource : insertSliceOp.source());
1581     auto dst = (destCastSource ? *destCastSource : insertSliceOp.dest());
1582 
1583     auto srcType = src.getType().cast<ShapedType>();
1584     auto dstType = dst.getType().cast<ShapedType>();
1585     if (verifyInsertSliceOp(srcType, dstType, insertSliceOp.static_offsets(),
1586                             insertSliceOp.static_sizes(),
1587                             insertSliceOp.static_strides()) !=
1588         SliceVerificationResult::Success)
1589       return failure();
1590 
1591     Value replacement = rewriter.create<InsertSliceOp>(
1592         insertSliceOp.getLoc(), src, dst, insertSliceOp.getMixedOffsets(),
1593         insertSliceOp.getMixedSizes(), insertSliceOp.getMixedStrides());
1594 
1595     if (replacement.getType() != insertSliceOp.getType()) {
1596       replacement = rewriter.create<tensor::CastOp>(
1597           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1598     }
1599     rewriter.replaceOp(insertSliceOp, replacement);
1600     return success();
1601   }
1602 };
1603 
1604 /// If additional static type information can be deduced from a insert_slice's
1605 /// size operands, insert an explicit cast of the op's source operand. This
1606 /// enables other canonicalization patterns that are matching for tensor_cast
1607 /// ops such as `ForOpTensorCastFolder` in SCF.
1608 ///
1609 /// Example:
1610 ///
1611 /// ```mlir
1612 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1613 ///       : tensor<?x?xf32> into ...
1614 /// ```
1615 ///
1616 /// folds into:
1617 ///
1618 /// ```mlir
1619 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1620 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1621 ///       : tensor<64x64xf32> into ...
1622 /// ```
1623 struct InsertSliceOpSourceCastInserter final
1624     : public OpRewritePattern<InsertSliceOp> {
1625   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1626 
1627   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1628                                 PatternRewriter &rewriter) const override {
1629     RankedTensorType srcType = insertSliceOp.getSourceType();
1630     if (srcType.getRank() != insertSliceOp.getType().getRank())
1631       return failure();
1632     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1633                                      srcType.getShape().end());
1634     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1635       if (Optional<int64_t> constInt =
1636               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1637         newSrcShape[i] = *constInt;
1638     }
1639 
1640     RankedTensorType newSrcType =
1641         RankedTensorType::get(newSrcShape, srcType.getElementType());
1642     if (srcType == newSrcType ||
1643         !preservesStaticInformation(srcType, newSrcType) ||
1644         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1645       return failure();
1646 
1647     // newSrcType is:
1648     //   1) Different from srcType.
1649     //   2) "More static" than srcType.
1650     //   3) Cast-compatible with srcType.
1651     // Insert the cast.
1652     Value cast = rewriter.create<tensor::CastOp>(
1653         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1654     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1655         insertSliceOp, cast, insertSliceOp.dest(),
1656         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1657         insertSliceOp.getMixedStrides());
1658     return success();
1659   }
1660 };
1661 } // namespace
1662 
1663 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1664                                                 MLIRContext *context) {
1665   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1666               InsertSliceOpSourceCastInserter>(context);
1667 }
1668 
1669 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1670                                                              Location loc,
1671                                                              Value tensor,
1672                                                              Value dest) {
1673   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1674   unsigned rank = rankedTensorType.getRank();
1675   auto shape = rankedTensorType.getShape();
1676   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1677   SmallVector<OpFoldResult> sizes;
1678   for (unsigned i = 0, e = rank; i < e; ++i) {
1679     OpFoldResult dim;
1680     if (rankedTensorType.isDynamicDim(i))
1681       dim = b.createOrFold<tensor::DimOp>(
1682           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1683     else
1684       dim = b.getIndexAttr(shape[i]);
1685     sizes.push_back(dim);
1686   }
1687   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1688   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1689                                                sizes, strides);
1690 }
1691 
1692 //===----------------------------------------------------------------------===//
1693 // PadOp
1694 //===----------------------------------------------------------------------===//
1695 
1696 // TODO: Replace custom<InferType> directive with AllTypesMatch as soon as it
1697 // supports optional types.
1698 void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
1699                     Type typeToInfer, Type typeToInferFrom) {}
1700 
1701 ParseResult parseInferType(OpAsmParser &parser,
1702                            Optional<OpAsmParser::OperandType> optOperand,
1703                            Type &typeToInfer, Type typeToInferFrom) {
1704   if (optOperand)
1705     typeToInfer = typeToInferFrom;
1706   return success();
1707 }
1708 
1709 LogicalResult PadOp::verify() {
1710   auto sourceType = source().getType().cast<RankedTensorType>();
1711   auto resultType = result().getType().cast<RankedTensorType>();
1712   auto expectedType =
1713       PadOp::inferResultType(sourceType, extractFromI64ArrayAttr(static_low()),
1714                              extractFromI64ArrayAttr(static_high()));
1715   for (int i = 0, e = sourceType.getRank(); i < e; ++i) {
1716     if (resultType.getDimSize(i) == expectedType.getDimSize(i))
1717       continue;
1718     if (expectedType.isDynamicDim(i))
1719       continue;
1720     return emitError("specified type ")
1721            << resultType << " does not match the inferred type "
1722            << expectedType;
1723   }
1724 
1725   auto &region = getRegion();
1726   unsigned rank = resultType.getRank();
1727   Block &block = region.front();
1728   if (block.getNumArguments() != rank)
1729     return emitError("expected the block to have ") << rank << " arguments";
1730 
1731   // Note: the number and type of yield values are checked in the YieldOp.
1732   for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
1733     if (!en.value().isIndex())
1734       return emitOpError("expected block argument ")
1735              << (en.index() + 1) << " to be an index";
1736   }
1737 
1738   // Ensure that the region yields an element of the right type.
1739   auto yieldOp = llvm::cast<YieldOp>(block.getTerminator());
1740   if (yieldOp.value().getType() !=
1741       getType().cast<ShapedType>().getElementType())
1742     return emitOpError("expected yield type to match shape element type");
1743 
1744   return success();
1745 }
1746 
1747 RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
1748                                         ArrayRef<int64_t> staticLow,
1749                                         ArrayRef<int64_t> staticHigh,
1750                                         ArrayRef<int64_t> resultShape) {
1751   unsigned rank = sourceType.getRank();
1752   assert(staticLow.size() == rank && "unexpected staticLow size mismatch");
1753   assert(staticHigh.size() == rank && "unexpected staticHigh size mismatch");
1754   assert((resultShape.empty() || resultShape.size() == rank) &&
1755          "unexpected resultShape size mismatch");
1756 
1757   SmallVector<int64_t, 4> inferredShape;
1758   for (auto i : llvm::seq<unsigned>(0, rank)) {
1759     if (sourceType.isDynamicDim(i) ||
1760         staticLow[i] == ShapedType::kDynamicSize ||
1761         staticHigh[i] == ShapedType::kDynamicSize) {
1762       inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
1763                                                   : resultShape[i]);
1764     } else {
1765       int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
1766       assert((resultShape.empty() || size == resultShape[i] ||
1767               resultShape[i] == ShapedType::kDynamicSize) &&
1768              "mismatch between inferred shape and result shape");
1769       inferredShape.push_back(size);
1770     }
1771   }
1772 
1773   return RankedTensorType::get(inferredShape, sourceType.getElementType());
1774 }
1775 
1776 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1777                   ArrayRef<int64_t> staticLow, ArrayRef<int64_t> staticHigh,
1778                   ValueRange low, ValueRange high, bool nofold,
1779                   ArrayRef<NamedAttribute> attrs) {
1780   auto sourceType = source.getType().cast<RankedTensorType>();
1781   auto resultType = inferResultType(sourceType, staticLow, staticHigh);
1782   build(b, result, resultType, source, low, high, b.getI64ArrayAttr(staticLow),
1783         b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr());
1784   result.addAttributes(attrs);
1785 }
1786 
1787 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1788                   ValueRange low, ValueRange high, bool nofold,
1789                   ArrayRef<NamedAttribute> attrs) {
1790   auto sourceType = source.getType().cast<RankedTensorType>();
1791   unsigned rank = sourceType.getRank();
1792   SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
1793   build(b, result, source, staticVector, staticVector, low, high, nofold,
1794         attrs);
1795 }
1796 
1797 void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
1798                   Value source, ArrayRef<OpFoldResult> low,
1799                   ArrayRef<OpFoldResult> high, bool nofold,
1800                   ArrayRef<NamedAttribute> attrs) {
1801   assert(resultType.isa<RankedTensorType>());
1802   auto sourceType = source.getType().cast<RankedTensorType>();
1803   SmallVector<Value, 4> dynamicLow, dynamicHigh;
1804   SmallVector<int64_t, 4> staticLow, staticHigh;
1805   // staticLow and staticHigh have full information of the padding config.
1806   // This will grow staticLow and staticHigh with 1 value. If the config is
1807   // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
1808   // value as well.
1809   dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
1810                              ShapedType::kDynamicSize);
1811   dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
1812                              ShapedType::kDynamicSize);
1813   if (!resultType) {
1814     resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
1815   }
1816   build(b, result, resultType, source, dynamicLow, dynamicHigh,
1817         b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh),
1818         nofold ? b.getUnitAttr() : UnitAttr());
1819   result.addAttributes(attrs);
1820 }
1821 
1822 namespace {
1823 // Folds tensor.pad when padding is static zeros and the attribute
1824 // doesn't request otherwise.
1825 struct FoldStaticZeroPadding : public OpRewritePattern<PadOp> {
1826   using OpRewritePattern<PadOp>::OpRewritePattern;
1827 
1828   LogicalResult matchAndRewrite(PadOp padTensorOp,
1829                                 PatternRewriter &rewriter) const override {
1830     if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad())
1831       return failure();
1832     if (padTensorOp.nofold())
1833       return failure();
1834     rewriter.replaceOpWithNewOp<tensor::CastOp>(
1835         padTensorOp, padTensorOp.result().getType(), padTensorOp.source());
1836     return success();
1837   }
1838 };
1839 
1840 // Fold CastOp into PadOp when adding static information.
1841 struct FoldSourceTensorCast : public OpRewritePattern<PadOp> {
1842   using OpRewritePattern<PadOp>::OpRewritePattern;
1843 
1844   LogicalResult matchAndRewrite(PadOp padTensorOp,
1845                                 PatternRewriter &rewriter) const override {
1846     auto castOp = padTensorOp.source().getDefiningOp<tensor::CastOp>();
1847     if (!tensor::canFoldIntoConsumerOp(castOp))
1848       return failure();
1849 
1850     auto newResultType = PadOp::inferResultType(
1851         castOp.source().getType().cast<RankedTensorType>(),
1852         extractFromI64ArrayAttr(padTensorOp.static_low()),
1853         extractFromI64ArrayAttr(padTensorOp.static_high()),
1854         padTensorOp.getResultType().getShape());
1855 
1856     if (newResultType == padTensorOp.getResultType()) {
1857       rewriter.updateRootInPlace(padTensorOp, [&]() {
1858         padTensorOp.sourceMutable().assign(castOp.source());
1859       });
1860     } else {
1861       auto newOp = rewriter.create<PadOp>(
1862           padTensorOp->getLoc(), newResultType, padTensorOp.source(),
1863           padTensorOp.low(), padTensorOp.high(), padTensorOp.static_low(),
1864           padTensorOp.static_high(), padTensorOp.nofold());
1865       BlockAndValueMapping mapper;
1866       padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
1867 
1868       rewriter.replaceOpWithNewOp<tensor::CastOp>(
1869           padTensorOp, padTensorOp.getResultType(), newOp);
1870     }
1871     return success();
1872   }
1873 };
1874 
1875 // Fold CastOp using the result of PadOp back into the latter if it adds
1876 // static information.
1877 struct FoldTargetTensorCast : public OpRewritePattern<PadOp> {
1878   using OpRewritePattern<PadOp>::OpRewritePattern;
1879 
1880   LogicalResult matchAndRewrite(PadOp padTensorOp,
1881                                 PatternRewriter &rewriter) const override {
1882     if (!padTensorOp.result().hasOneUse())
1883       return failure();
1884     auto tensorCastOp =
1885         dyn_cast<tensor::CastOp>(*padTensorOp->getUsers().begin());
1886     if (!tensorCastOp)
1887       return failure();
1888     if (!tensor::preservesStaticInformation(padTensorOp.result().getType(),
1889                                             tensorCastOp.dest().getType()))
1890       return failure();
1891 
1892     auto replacementOp = rewriter.create<PadOp>(
1893         padTensorOp.getLoc(), tensorCastOp.dest().getType(),
1894         padTensorOp.source(), padTensorOp.low(), padTensorOp.high(),
1895         padTensorOp.static_low(), padTensorOp.static_high(),
1896         padTensorOp.nofold());
1897     replacementOp.region().takeBody(padTensorOp.region());
1898 
1899     rewriter.replaceOp(padTensorOp, replacementOp.result());
1900     rewriter.replaceOp(tensorCastOp, replacementOp.result());
1901     return success();
1902   }
1903 };
1904 } // namespace
1905 
1906 void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
1907                                         MLIRContext *context) {
1908   results
1909       .add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast>(
1910           context);
1911 }
1912 
1913 /// Return the padding value of the PadOp if it constant. In this context,
1914 /// "constant" means an actual constant or "defined outside of the block".
1915 ///
1916 /// Values are considered constant in three cases:
1917 ///  - A ConstantLike value.
1918 ///  - A basic block argument from a different block.
1919 ///  - A value defined outside of the block.
1920 ///
1921 /// If the padding value is not constant, an empty Value is returned.
1922 Value PadOp::getConstantPaddingValue() {
1923   auto yieldOp = dyn_cast<YieldOp>(getRegion().front().getTerminator());
1924   if (!yieldOp)
1925     return {};
1926   Value padValue = yieldOp.value();
1927   // Check if yield value is a constant.
1928   if (matchPattern(padValue, m_Constant()))
1929     return padValue;
1930   // Check if yield value is defined inside the PadOp block.
1931   if (padValue.getParentBlock() == &getRegion().front())
1932     return {};
1933   // Else: Yield value defined outside of the PadOp block.
1934   return padValue;
1935 }
1936 
1937 OpFoldResult PadOp::fold(ArrayRef<Attribute>) {
1938   if (getResultType().hasStaticShape() && getResultType() == getSourceType() &&
1939       !nofold())
1940     return source();
1941   return {};
1942 }
1943 
1944 //===----------------------------------------------------------------------===//
1945 // SplatOp
1946 //===----------------------------------------------------------------------===//
1947 
1948 OpFoldResult SplatOp::fold(ArrayRef<Attribute> operands) {
1949   auto constOperand = operands.front();
1950   if (!constOperand.isa_and_nonnull<IntegerAttr, FloatAttr>())
1951     return {};
1952 
1953   // SplatElementsAttr::get treats single value for second arg as being a splat.
1954   return SplatElementsAttr::get(getType(), {constOperand});
1955 }
1956 
1957 //===----------------------------------------------------------------------===//
1958 // TableGen'd op method definitions
1959 //===----------------------------------------------------------------------===//
1960 
1961 #define GET_OP_CLASSES
1962 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
1963