1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/Arithmetic/Utils/Utils.h"
11 #include "mlir/Dialect/Complex/IR/Complex.h"
12 #include "mlir/Dialect/Tensor/IR/Tensor.h"
13 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
14 #include "mlir/Dialect/Utils/StaticValueUtils.h"
15 #include "mlir/IR/BlockAndValueMapping.h"
16 #include "mlir/IR/Builders.h"
17 #include "mlir/IR/BuiltinAttributeInterfaces.h"
18 #include "mlir/IR/Matchers.h"
19 #include "mlir/IR/PatternMatch.h"
20 #include "mlir/IR/TypeUtilities.h"
21 #include "llvm/ADT/STLExtras.h"
22 
23 using namespace mlir;
24 using namespace mlir::tensor;
25 
26 /// Materialize a single constant operation from a given attribute value with
27 /// the desired resultant type.
28 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
29                                               Attribute value, Type type,
30                                               Location loc) {
31   if (arith::ConstantOp::isBuildableWith(value, type))
32     return builder.create<arith::ConstantOp>(loc, value, type);
33   if (complex::ConstantOp::isBuildableWith(value, type))
34     return builder.create<complex::ConstantOp>(loc, type,
35                                                value.cast<ArrayAttr>());
36   return nullptr;
37 }
38 
39 //===----------------------------------------------------------------------===//
40 // CastOp
41 //===----------------------------------------------------------------------===//
42 
43 /// Returns true if `target` is a ranked tensor type that preserves static
44 /// information available in the `source` ranked tensor type.
45 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
46   auto sourceType = source.dyn_cast<RankedTensorType>();
47   auto targetType = target.dyn_cast<RankedTensorType>();
48 
49   // Requires RankedTensorType.
50   if (!sourceType || !targetType)
51     return false;
52 
53   // Requires same elemental type.
54   if (sourceType.getElementType() != targetType.getElementType())
55     return false;
56 
57   // Requires same rank.
58   if (sourceType.getRank() != targetType.getRank())
59     return false;
60 
61   // If cast is towards more static sizes along any dimension, don't fold.
62   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
63     if (!ShapedType::isDynamic(std::get<0>(t)) &&
64         ShapedType::isDynamic(std::get<1>(t)))
65       return false;
66   }
67 
68   return true;
69 }
70 
71 /// Determines whether tensor::CastOp casts to a more dynamic version of the
72 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
73 /// implement canonicalization patterns for ops in different dialects that may
74 /// consume the results of tensor.cast operations. Such foldable tensor.cast
75 /// operations are typically inserted as `slice` ops and are canonicalized,
76 /// to preserve the type compatibility of their uses.
77 ///
78 /// Returns true when all conditions are met:
79 /// 1. source and result are ranked tensors with same element type and rank.
80 /// 2. the tensor type has more static information than the result
81 ///
82 /// Example:
83 /// ```mlir
84 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
85 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
86 /// ```
87 ///
88 /// folds into:
89 ///
90 /// ```mlir
91 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
92 /// ```
93 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
94   if (!castOp)
95     return false;
96 
97   // Can fold if the source of cast has at least as much static information as
98   // its results.
99   return preservesStaticInformation(castOp.getType(),
100                                     castOp.source().getType());
101 }
102 
103 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
104 /// that can be folded.
105 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
106   bool folded = false;
107   for (OpOperand &operand : op->getOpOperands()) {
108     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
109     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
110       operand.set(castOp.getOperand());
111       folded = true;
112     }
113   }
114   return success(folded);
115 }
116 
117 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
118   if (inputs.size() != 1 || outputs.size() != 1)
119     return false;
120   Type a = inputs.front(), b = outputs.front();
121   auto aT = a.dyn_cast<TensorType>();
122   auto bT = b.dyn_cast<TensorType>();
123   if (!aT || !bT)
124     return false;
125 
126   if (aT.getElementType() != bT.getElementType())
127     return false;
128 
129   return succeeded(verifyCompatibleShape(aT, bT));
130 }
131 
132 /// Compute a TensorType that has the joined shape knowledge of the two
133 /// given TensorTypes. The element types need to match.
134 static TensorType joinShapes(TensorType one, TensorType two) {
135   assert(one.getElementType() == two.getElementType());
136 
137   if (!one.hasRank())
138     return two;
139   if (!two.hasRank())
140     return one;
141 
142   int64_t rank = one.getRank();
143   if (rank != two.getRank())
144     return {};
145 
146   SmallVector<int64_t, 4> join;
147   join.reserve(rank);
148   for (int64_t i = 0; i < rank; ++i) {
149     if (one.isDynamicDim(i)) {
150       join.push_back(two.getDimSize(i));
151       continue;
152     }
153     if (two.isDynamicDim(i)) {
154       join.push_back(one.getDimSize(i));
155       continue;
156     }
157     if (one.getDimSize(i) != two.getDimSize(i))
158       return {};
159     join.push_back(one.getDimSize(i));
160   }
161   return RankedTensorType::get(join, one.getElementType());
162 }
163 
164 namespace {
165 
166 /// Replaces chains of two tensor.cast operations by a single tensor.cast
167 /// operation if doing so does not remove runtime constraints.
168 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
169   using OpRewritePattern<CastOp>::OpRewritePattern;
170 
171   LogicalResult matchAndRewrite(CastOp tensorCast,
172                                 PatternRewriter &rewriter) const final {
173     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
174 
175     if (!tensorCastOperand)
176       return failure();
177 
178     auto sourceType =
179         tensorCastOperand.getOperand().getType().cast<TensorType>();
180     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
181     auto resultType = tensorCast.getType().cast<TensorType>();
182 
183     // We can remove the intermediate cast if joining all three produces the
184     // same result as just joining the source and result shapes.
185     auto firstJoin =
186         joinShapes(joinShapes(sourceType, intermediateType), resultType);
187 
188     // The join might not exist if the cast sequence would fail at runtime.
189     if (!firstJoin)
190       return failure();
191 
192     // The newJoin always exists if the above join exists, it might just contain
193     // less information. If so, we cannot drop the intermediate cast, as doing
194     // so would remove runtime checks.
195     auto newJoin = joinShapes(sourceType, resultType);
196     if (firstJoin != newJoin)
197       return failure();
198 
199     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
200                                         tensorCastOperand.getOperand());
201     return success();
202   }
203 };
204 
205 } // namespace
206 
207 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
208                                          MLIRContext *context) {
209   results.add<ChainedTensorCast>(context);
210 }
211 
212 //===----------------------------------------------------------------------===//
213 // DimOp
214 //===----------------------------------------------------------------------===//
215 
216 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
217                   int64_t index) {
218   auto loc = result.location;
219   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
220   build(builder, result, source, indexValue);
221 }
222 
223 Optional<int64_t> DimOp::getConstantIndex() {
224   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
225     return constantOp.getValue().cast<IntegerAttr>().getInt();
226   return {};
227 }
228 
229 LogicalResult DimOp::verify() {
230   // Assume unknown index to be in range.
231   Optional<int64_t> index = getConstantIndex();
232   if (!index.hasValue())
233     return success();
234 
235   // Check that constant index is not knowingly out of range.
236   auto type = source().getType();
237   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
238     if (index.getValue() >= tensorType.getRank())
239       return emitOpError("index is out of range");
240   } else if (type.isa<UnrankedTensorType>()) {
241     // Assume index to be in range.
242   } else {
243     llvm_unreachable("expected operand with tensor type");
244   }
245   return success();
246 }
247 
248 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
249   // All forms of folding require a known index.
250   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
251   if (!index)
252     return {};
253 
254   // Folding for unranked types (UnrankedTensorType) is not supported.
255   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
256   if (!tensorType)
257     return {};
258 
259   // Fold if the shape extent along the given index is known.
260   if (!tensorType.isDynamicDim(index.getInt())) {
261     Builder builder(getContext());
262     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
263   }
264 
265   Operation *definingOp = source().getDefiningOp();
266 
267   // Fold dim to the operand of tensor.generate.
268   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
269     auto resultType =
270         fromElements.getResult().getType().cast<RankedTensorType>();
271     // The case where the type encodes the size of the dimension is handled
272     // above.
273     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
274 
275     // Find the operand of the fromElements that corresponds to this index.
276     auto dynExtents = fromElements.dynamicExtents().begin();
277     for (auto dim : resultType.getShape().take_front(index.getInt()))
278       if (ShapedType::isDynamic(dim))
279         dynExtents++;
280 
281     return Value{*dynExtents};
282   }
283 
284   // The size at the given index is now known to be a dynamic size.
285   unsigned unsignedIndex = index.getValue().getZExtValue();
286 
287   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
288     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
289     // `resolve-shaped-type-result-dims` pass.
290     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
291         sliceOp.isDynamicSize(unsignedIndex)) {
292       return {sliceOp.getDynamicSize(unsignedIndex)};
293     }
294   }
295 
296   // dim(cast) -> dim
297   if (succeeded(foldTensorCast(*this)))
298     return getResult();
299 
300   return {};
301 }
302 
303 namespace {
304 /// Fold dim of a cast into the dim of the source of the tensor cast.
305 struct DimOfCastOp : public OpRewritePattern<DimOp> {
306   using OpRewritePattern<DimOp>::OpRewritePattern;
307 
308   LogicalResult matchAndRewrite(DimOp dimOp,
309                                 PatternRewriter &rewriter) const override {
310     auto castOp = dimOp.source().getDefiningOp<CastOp>();
311     if (!castOp)
312       return failure();
313     Value newSource = castOp.getOperand();
314     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
315     return success();
316   }
317 };
318 } // namespace
319 
320 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
321                                         MLIRContext *context) {
322   results.add<DimOfCastOp>(context);
323 }
324 
325 //===----------------------------------------------------------------------===//
326 // ExtractOp
327 //===----------------------------------------------------------------------===//
328 
329 LogicalResult ExtractOp::verify() {
330   // Verify the # indices match if we have a ranked type.
331   if (auto tensorType = tensor().getType().dyn_cast<RankedTensorType>())
332     if (tensorType.getRank() != static_cast<int64_t>(indices().size()))
333       return emitOpError("incorrect number of indices for extract_element");
334 
335   return success();
336 }
337 
338 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
339   // The tensor operand must be a known constant.
340   Attribute tensor = operands.front();
341   if (!tensor)
342     return {};
343   // If this is a splat elements attribute, simply return the value. All of the
344   // elements of a splat attribute are the same.
345   if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
346     return splatTensor.getSplatValue<Attribute>();
347 
348   // Otherwise, collect the constant indices into the tensor.
349   SmallVector<uint64_t, 8> indices;
350   for (Attribute indice : llvm::drop_begin(operands, 1)) {
351     if (!indice || !indice.isa<IntegerAttr>())
352       return {};
353     indices.push_back(indice.cast<IntegerAttr>().getInt());
354   }
355 
356   // If this is an elements attribute, query the value at the given indices.
357   auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
358   if (elementsAttr && elementsAttr.isValidIndex(indices))
359     return elementsAttr.getValues<Attribute>()[indices];
360   return {};
361 }
362 
363 //===----------------------------------------------------------------------===//
364 // FromElementsOp
365 //===----------------------------------------------------------------------===//
366 
367 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
368                            Type resultType, ValueRange elements) {
369   result.addOperands(elements);
370   result.addTypes(resultType);
371 }
372 
373 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
374                            ValueRange elements) {
375   assert(!elements.empty() && "expected at least one element");
376   Type resultType = RankedTensorType::get(
377       {static_cast<int64_t>(elements.size())}, elements.front().getType());
378   build(builder, result, resultType, elements);
379 }
380 
381 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
382   if (!llvm::is_contained(operands, nullptr))
383     return DenseElementsAttr::get(getType(), operands);
384   return {};
385 }
386 
387 namespace {
388 
389 // Canonicalizes the pattern of the form
390 //
391 // %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
392 // %extracted_element = tensor.extract %tensor[%c0] : tensor<1xi32>
393 //
394 // to just %element.
395 struct ExtractElementFromTensorFromElements
396     : public OpRewritePattern<tensor::ExtractOp> {
397   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
398 
399   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
400                                 PatternRewriter &rewriter) const final {
401     auto tensorFromElements = extract.tensor().getDefiningOp<FromElementsOp>();
402     if (!tensorFromElements)
403       return failure();
404     auto tensorType = tensorFromElements.getType().cast<RankedTensorType>();
405     auto rank = tensorType.getRank();
406     if (rank == 0) {
407       rewriter.replaceOp(extract, tensorFromElements.getOperand(0));
408       return success();
409     }
410     SmallVector<APInt, 3> indices(rank);
411     int64_t flatIndex = 0;
412     int64_t stride = 1;
413     for (int i = rank - 1; i >= 0; --i) {
414       APInt index;
415       if (!matchPattern(extract.indices()[i], m_ConstantInt(&index)))
416         return failure();
417       if (i < rank - 1)
418         stride *= tensorType.getDimSize(i);
419       flatIndex += index.getSExtValue() * stride;
420     }
421     // Prevent out of bounds accesses. This can happen in invalid code that will
422     // never execute.
423     if (tensorFromElements->getNumOperands() <= flatIndex || flatIndex < 0)
424       return failure();
425     rewriter.replaceOp(extract, tensorFromElements.getOperand(flatIndex));
426     return success();
427   }
428 };
429 
430 // Pushes the index_casts that occur before extractions to after the extract.
431 // This minimizes type conversion in some cases and enables the extract
432 // canonicalizer. This changes:
433 //
434 // %cast = arith.index_cast %tensor : tensor<1xi32> to tensor<1xindex>
435 // %extract = tensor.extract %cast[%index] : tensor<1xindex>
436 //
437 // to the following:
438 //
439 // %extract = tensor.extract %tensor[%index] : tensor<1xindex>
440 // %cast = arith.index_cast %extract : i32 to index
441 //
442 // to just %element.
443 //
444 // Consider expanding this to a template and handle all tensor cast operations.
445 struct ExtractElementFromIndexCast
446     : public OpRewritePattern<tensor::ExtractOp> {
447   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
448 
449   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
450                                 PatternRewriter &rewriter) const final {
451     Location loc = extract.getLoc();
452     auto indexCast = extract.tensor().getDefiningOp<arith::IndexCastOp>();
453     if (!indexCast)
454       return failure();
455 
456     Type elementTy = getElementTypeOrSelf(indexCast.getIn());
457 
458     auto newExtract = rewriter.create<tensor::ExtractOp>(
459         loc, elementTy, indexCast.getIn(), extract.indices());
460 
461     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(extract, extract.getType(),
462                                                     newExtract);
463 
464     return success();
465   }
466 };
467 
468 } // namespace
469 
470 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
471                                                  MLIRContext *context) {
472   results
473       .add<ExtractElementFromIndexCast, ExtractElementFromTensorFromElements>(
474           context);
475 }
476 
477 //===----------------------------------------------------------------------===//
478 // InsertOp
479 //===----------------------------------------------------------------------===//
480 
481 LogicalResult InsertOp::verify() {
482   // Verify the # indices match if we have a ranked type.
483   if (auto destType = dest().getType().dyn_cast<RankedTensorType>())
484     if (destType.getRank() != static_cast<int64_t>(indices().size()))
485       return emitOpError("incorrect number of indices");
486   return success();
487 }
488 
489 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
490   Attribute scalar = operands[0];
491   Attribute dest = operands[1];
492   if (scalar && dest)
493     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
494       if (scalar == splatDest.getSplatValue<Attribute>())
495         return dest;
496   return {};
497 }
498 
499 //===----------------------------------------------------------------------===//
500 // GenerateOp
501 //===----------------------------------------------------------------------===//
502 
503 LogicalResult GenerateOp::verify() {
504   // Ensure that the tensor type has as many dynamic dimensions as are specified
505   // by the operands.
506   RankedTensorType resultTy = getType().cast<RankedTensorType>();
507   if (getNumOperands() != resultTy.getNumDynamicDims())
508     return emitError("must have as many index operands as dynamic extents "
509                      "in the result type");
510 
511   // Ensure that region arguments span the index space.
512   if (!llvm::all_of(body().getArgumentTypes(),
513                     [](Type ty) { return ty.isIndex(); }))
514     return emitError("all body arguments must be index");
515   if (body().getNumArguments() != resultTy.getRank())
516     return emitError("must have one body argument per input dimension");
517 
518   // Ensure that the region yields an element of the right type.
519   auto yieldOp = cast<YieldOp>(body().getBlocks().front().getTerminator());
520 
521   if (yieldOp.value().getType() != resultTy.getElementType())
522     return emitOpError(
523         "body must be terminated with a `yield` operation of the tensor "
524         "element type");
525 
526   return success();
527 }
528 
529 void GenerateOp::build(
530     OpBuilder &b, OperationState &result, Type resultTy,
531     ValueRange dynamicExtents,
532     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
533   build(b, result, resultTy, dynamicExtents);
534 
535   // Build and populate body.
536   OpBuilder::InsertionGuard guard(b);
537   Region *bodyRegion = result.regions.front().get();
538   auto rank = resultTy.cast<RankedTensorType>().getRank();
539   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
540   SmallVector<Location, 2> argumentLocs(rank, result.location);
541   Block *bodyBlock =
542       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes, argumentLocs);
543   bodyBuilder(b, result.location, bodyBlock->getArguments());
544 }
545 
546 namespace {
547 
548 /// Canonicalizes tensor.generate operations with a constant
549 /// operand into the equivalent operation with the operand expressed in the
550 /// result type, instead. We also insert a type cast to make sure that the
551 /// resulting IR is still well-typed.
552 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
553   using OpRewritePattern<GenerateOp>::OpRewritePattern;
554 
555   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
556                                 PatternRewriter &rewriter) const final {
557     auto resultType =
558         tensorFromElements.getResult().getType().cast<RankedTensorType>();
559 
560     if (resultType.hasStaticShape())
561       return failure();
562 
563     SmallVector<Value, 4> newOperands;
564     SmallVector<int64_t, 4> newShape;
565     auto operandsIt = tensorFromElements.dynamicExtents().begin();
566 
567     for (int64_t dim : resultType.getShape()) {
568       if (!ShapedType::isDynamic(dim)) {
569         newShape.push_back(dim);
570         continue;
571       }
572       APInt index;
573       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
574         newShape.push_back(ShapedType::kDynamicSize);
575         newOperands.push_back(*operandsIt++);
576         continue;
577       }
578       newShape.push_back(index.getSExtValue());
579       operandsIt++;
580     }
581 
582     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
583       return failure();
584 
585     auto loc = tensorFromElements.getLoc();
586     auto newOp = rewriter.create<GenerateOp>(
587         loc, RankedTensorType::get(newShape, resultType.getElementType()),
588         newOperands);
589     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
590                                 newOp.body().begin());
591     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
592                                                 newOp);
593     return success();
594   }
595 };
596 
597 /// Canonicalizes the pattern of the form
598 ///
599 /// %tensor = tensor.generate %x {
600 ///   ^bb0(%arg0: index):
601 ///   <computation>
602 ///   yield %1 : index
603 /// } : tensor<?xindex>
604 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
605 ///
606 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
607 /// tensor.generate operation has no side-effects.
608 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
609   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
610 
611   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
612                                 PatternRewriter &rewriter) const final {
613     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
614     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
615       return failure();
616 
617     BlockAndValueMapping mapping;
618     Block *body = tensorFromElements.getBody();
619     mapping.map(body->getArguments(), extract.indices());
620     for (auto &op : body->without_terminator())
621       rewriter.clone(op, mapping);
622 
623     auto yield = cast<YieldOp>(body->getTerminator());
624 
625     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
626     return success();
627   }
628 };
629 
630 /// Canonicalizes the pattern of the form
631 ///
632 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
633 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
634 ///
635 /// to
636 ///
637 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
638 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
639   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
640 
641   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
642                                 PatternRewriter &rewriter) const final {
643     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
644     if (!tensorCast)
645       return failure();
646 
647     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
648                                                    extract.indices());
649     return success();
650   }
651 };
652 
653 } // namespace
654 
655 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
656                                              MLIRContext *context) {
657   // TODO: Move extract patterns to tensor::ExtractOp.
658   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
659               StaticTensorGenerate>(context);
660 }
661 
662 //===----------------------------------------------------------------------===//
663 // RankOp
664 //===----------------------------------------------------------------------===//
665 
666 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
667   // Constant fold rank when the rank of the operand is known.
668   auto type = getOperand().getType();
669   auto shapedType = type.dyn_cast<ShapedType>();
670   if (shapedType && shapedType.hasRank())
671     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
672   return IntegerAttr();
673 }
674 
675 //===----------------------------------------------------------------------===//
676 // ReshapeOp
677 //===----------------------------------------------------------------------===//
678 
679 static int64_t getNumElements(ShapedType type) {
680   int64_t numElements = 1;
681   for (auto dim : type.getShape())
682     numElements *= dim;
683   return numElements;
684 }
685 
686 LogicalResult ReshapeOp::verify() {
687   TensorType operandType = source().getType().cast<TensorType>();
688   TensorType resultType = result().getType().cast<TensorType>();
689 
690   if (operandType.getElementType() != resultType.getElementType())
691     return emitOpError("element types of source and destination tensor "
692                        "types should be the same");
693 
694   int64_t shapeSize = shape().getType().cast<RankedTensorType>().getDimSize(0);
695   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
696   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
697 
698   if (resultRankedType) {
699     if (operandRankedType && resultRankedType.hasStaticShape() &&
700         operandRankedType.hasStaticShape()) {
701       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
702         return emitOpError("source and destination tensor should have the "
703                            "same number of elements");
704     }
705     if (ShapedType::isDynamic(shapeSize))
706       return emitOpError("cannot use shape operand with dynamic length to "
707                          "reshape to statically-ranked tensor type");
708     if (shapeSize != resultRankedType.getRank())
709       return emitOpError(
710           "length of shape operand differs from the result's tensor rank");
711   }
712   return success();
713 }
714 
715 //===----------------------------------------------------------------------===//
716 // Reassociative reshape ops
717 //===----------------------------------------------------------------------===//
718 
719 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
720   return getSymbolLessAffineMaps(getReassociationExprs());
721 }
722 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
723   return convertReassociationIndicesToExprs(getContext(),
724                                             getReassociationIndices());
725 }
726 
727 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
728   return getSymbolLessAffineMaps(getReassociationExprs());
729 }
730 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
731   return convertReassociationIndicesToExprs(getContext(),
732                                             getReassociationIndices());
733 }
734 
735 static void print(OpAsmPrinter &p, ExpandShapeOp op) {
736   ::mlir::printReshapeOp<ExpandShapeOp>(p, op);
737 }
738 
739 static void print(OpAsmPrinter &p, CollapseShapeOp op) {
740   ::mlir::printReshapeOp<CollapseShapeOp>(p, op);
741 }
742 
743 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
744 static RankedTensorType
745 computeTensorReshapeCollapsedType(RankedTensorType type,
746                                   ArrayRef<AffineMap> reassociation) {
747   auto shape = type.getShape();
748   SmallVector<int64_t, 4> newShape;
749   newShape.reserve(reassociation.size());
750 
751   // Use the fact that reassociation is valid to simplify the logic: only use
752   // each map's rank.
753   assert(isReassociationValid(reassociation) && "invalid reassociation");
754   unsigned currentDim = 0;
755   for (AffineMap m : reassociation) {
756     unsigned dim = m.getNumResults();
757     auto band = shape.slice(currentDim, dim);
758     int64_t size = 1;
759     if (llvm::is_contained(band, ShapedType::kDynamicSize))
760       size = ShapedType::kDynamicSize;
761     else
762       for (unsigned d = 0; d < dim; ++d)
763         size *= shape[currentDim + d];
764     newShape.push_back(size);
765     currentDim += dim;
766   }
767 
768   return RankedTensorType::get(newShape, type.getElementType());
769 }
770 
771 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
772                             ArrayRef<ReassociationIndices> reassociation,
773                             ArrayRef<NamedAttribute> attrs) {
774   auto resultType = computeTensorReshapeCollapsedType(
775       src.getType().cast<RankedTensorType>(),
776       getSymbolLessAffineMaps(
777           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
778   build(b, result, resultType, src, attrs);
779   result.addAttribute(getReassociationAttrName(),
780                       getReassociationIndicesAttribute(b, reassociation));
781 }
782 
783 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
784                           ArrayRef<ReassociationIndices> reassociation,
785                           ArrayRef<NamedAttribute> attrs) {
786   auto resultType = computeTensorReshapeCollapsedType(
787       src.getType().cast<RankedTensorType>(),
788       getSymbolLessAffineMaps(
789           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
790   build(b, result, resultType, src, attrs);
791   result.addAttribute(getReassociationAttrName(),
792                       getReassociationIndicesAttribute(b, reassociation));
793 }
794 
795 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
796                                         TensorReshapeOp, ExpandShapeOp>::value>
797 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
798                                            RankedTensorType expandedType,
799                                            RankedTensorType collapsedType) {
800   if (failed(
801           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
802     return failure();
803 
804   auto maps = op.getReassociationMaps();
805   RankedTensorType expectedType =
806       computeTensorReshapeCollapsedType(expandedType, maps);
807   if (collapsedType != expectedType)
808     return op.emitOpError("expected collapsed type to be ")
809            << expectedType << ", but got " << collapsedType;
810   return success();
811 }
812 
813 LogicalResult ExpandShapeOp::verify() {
814   return verifyTensorReshapeOp(*this, getResultType(), getSrcType());
815 }
816 
817 LogicalResult CollapseShapeOp::verify() {
818   return verifyTensorReshapeOp(*this, getSrcType(), getResultType());
819 }
820 
821 namespace {
822 /// Reshape of a splat constant can be replaced with a constant of the result
823 /// type.
824 template <typename TensorReshapeOp>
825 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
826   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
827   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
828                                 PatternRewriter &rewriter) const override {
829     DenseElementsAttr attr;
830     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
831       return failure();
832     if (!attr || !attr.isSplat())
833       return failure();
834     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
835         reshapeOp.getResultType(), attr.getRawData(), true);
836     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
837     return success();
838   }
839 };
840 
841 /// Reshape of a FromElements can be replaced with a FromElements of the result
842 /// type
843 template <typename TensorReshapeOp>
844 struct FoldReshapeWithFromElements : OpRewritePattern<TensorReshapeOp> {
845   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
846   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
847                                 PatternRewriter &rewriter) const override {
848     auto fromElements =
849         reshapeOp.src().template getDefiningOp<FromElementsOp>();
850     if (!fromElements)
851       return failure();
852 
853     auto shapedTy = reshapeOp.getType().template cast<ShapedType>();
854 
855     if (!shapedTy.hasStaticShape())
856       return failure();
857 
858     rewriter.replaceOpWithNewOp<FromElementsOp>(reshapeOp, reshapeOp.getType(),
859                                                 fromElements.elements());
860     return success();
861   }
862 };
863 
864 } // namespace
865 
866 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
867                                                 MLIRContext *context) {
868   results.add<CollapseReshapeOps<ExpandShapeOp>,
869               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>,
870               FoldReshapeWithConstant<ExpandShapeOp>,
871               FoldReshapeWithFromElements<ExpandShapeOp>>(context);
872 }
873 
874 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
875                                                   MLIRContext *context) {
876   results.add<CollapseReshapeOps<CollapseShapeOp>,
877               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
878               FoldReshapeWithConstant<CollapseShapeOp>,
879               FoldReshapeWithFromElements<CollapseShapeOp>>(context);
880 }
881 
882 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
883   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
884 }
885 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
886   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
887 }
888 
889 //===----------------------------------------------------------------------===//
890 // ExtractSliceOp
891 //===----------------------------------------------------------------------===//
892 
893 /// An extract_slice op result type can be fully inferred from the source type
894 /// and the static representation of offsets, sizes and strides. Special
895 /// sentinels encode the dynamic case.
896 RankedTensorType ExtractSliceOp::inferResultType(
897     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
898     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
899   // An extract_slice op may specify only a leading subset of offset/sizes/
900   // strides in which case we complete with offset=0, sizes from memref type and
901   // strides=1.
902   unsigned rank = sourceRankedTensorType.getRank();
903   (void)rank;
904   assert(staticSizes.size() == rank &&
905          "unexpected staticSizes not equal to rank of source");
906   return RankedTensorType::get(staticSizes,
907                                sourceRankedTensorType.getElementType());
908 }
909 
910 RankedTensorType ExtractSliceOp::inferResultType(
911     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
912     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
913   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
914   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
915   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
916                              ShapedType::kDynamicStrideOrOffset);
917   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
918                              ShapedType::kDynamicSize);
919   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
920                              ShapedType::kDynamicStrideOrOffset);
921   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
922                                          staticSizes, staticStrides);
923 }
924 
925 /// An extract_slice op result type can be fully inferred from the source type
926 /// and the static representation of offsets, sizes and strides. Special
927 /// sentinels encode the dynamic case.
928 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
929     unsigned resultRank, RankedTensorType sourceRankedTensorType,
930     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
931     ArrayRef<int64_t> strides) {
932   auto inferredType =
933       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
934           .cast<RankedTensorType>();
935   int rankDiff = inferredType.getRank() - resultRank;
936   if (rankDiff > 0) {
937     auto shape = inferredType.getShape();
938     llvm::SmallDenseSet<unsigned> dimsToProject;
939     mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject);
940     SmallVector<int64_t> projectedShape;
941     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
942       if (!dimsToProject.contains(pos))
943         projectedShape.push_back(shape[pos]);
944     inferredType =
945         RankedTensorType::get(projectedShape, inferredType.getElementType());
946   }
947   return inferredType;
948 }
949 
950 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
951     unsigned resultRank, RankedTensorType sourceRankedTensorType,
952     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
953     ArrayRef<OpFoldResult> strides) {
954   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
955   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
956   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
957                              ShapedType::kDynamicStrideOrOffset);
958   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
959                              ShapedType::kDynamicSize);
960   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
961                              ShapedType::kDynamicStrideOrOffset);
962   return ExtractSliceOp::inferRankReducedResultType(
963       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
964       staticStrides);
965 }
966 
967 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
968 /// result type. If the type passed is nullptr, it is inferred.
969 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
970                            RankedTensorType resultType, Value source,
971                            ArrayRef<OpFoldResult> offsets,
972                            ArrayRef<OpFoldResult> sizes,
973                            ArrayRef<OpFoldResult> strides,
974                            ArrayRef<NamedAttribute> attrs) {
975   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
976   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
977   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
978                              ShapedType::kDynamicStrideOrOffset);
979   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
980                              ShapedType::kDynamicSize);
981   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
982                              ShapedType::kDynamicStrideOrOffset);
983   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
984   // Structuring implementation this way avoids duplication between builders.
985   if (!resultType) {
986     resultType =
987         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
988                                         staticSizes, staticStrides)
989             .cast<RankedTensorType>();
990   }
991   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
992         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
993         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
994   result.addAttributes(attrs);
995 }
996 
997 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
998 /// result type.
999 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1000                            ArrayRef<OpFoldResult> offsets,
1001                            ArrayRef<OpFoldResult> sizes,
1002                            ArrayRef<OpFoldResult> strides,
1003                            ArrayRef<NamedAttribute> attrs) {
1004   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1005 }
1006 
1007 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
1008 /// type passed is nullptr, it is inferred.
1009 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1010                            RankedTensorType resultType, Value source,
1011                            ValueRange offsets, ValueRange sizes,
1012                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1013   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1014       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1015   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1016       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1017   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1018       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1019   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1020 }
1021 
1022 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
1023 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1024                            ValueRange offsets, ValueRange sizes,
1025                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1026   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1027 }
1028 
1029 template <typename OpTy>
1030 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
1031                                           OpTy op, Type expectedType) {
1032   auto memrefType = expectedType.cast<ShapedType>();
1033   switch (result) {
1034   case SliceVerificationResult::Success:
1035     return success();
1036   case SliceVerificationResult::RankTooLarge:
1037     return op.emitError("expected rank to be smaller or equal to ")
1038            << "the other rank. ";
1039   case SliceVerificationResult::SizeMismatch:
1040     return op.emitError("expected type to be ")
1041            << expectedType << " or a rank-reduced version. (size mismatch) ";
1042   case SliceVerificationResult::ElemTypeMismatch:
1043     return op.emitError("expected element type to be ")
1044            << memrefType.getElementType();
1045   default:
1046     llvm_unreachable("unexpected extract_slice op verification result");
1047   }
1048 }
1049 
1050 /// Verifier for ExtractSliceOp.
1051 LogicalResult ExtractSliceOp::verify() {
1052   // Verify result type against inferred type.
1053   auto expectedType = ExtractSliceOp::inferResultType(
1054       getSourceType(), getMixedOffsets(), getMixedSizes(), getMixedStrides());
1055   auto result = isRankReducedType(expectedType.cast<ShapedType>(), getType());
1056   return produceSliceErrorMsg(result, *this, expectedType);
1057 }
1058 
1059 /// Infer the canonical type of the result of an extract_slice op. Returns a
1060 /// type with rank `resultRank` that is either the rank of the rank-reduced
1061 /// type, or the non-rank-reduced type.
1062 static RankedTensorType
1063 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1064                             ArrayRef<OpFoldResult> mixedOffsets,
1065                             ArrayRef<OpFoldResult> mixedSizes,
1066                             ArrayRef<OpFoldResult> mixedStrides) {
1067   auto resultType =
1068       ExtractSliceOp::inferRankReducedResultType(
1069           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1070           .cast<RankedTensorType>();
1071   if (resultType.getRank() != resultRank) {
1072     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1073                                                  mixedSizes, mixedStrides)
1074                      .cast<RankedTensorType>();
1075   }
1076   return resultType;
1077 }
1078 
1079 llvm::SmallDenseSet<unsigned> ExtractSliceOp::getDroppedDims() {
1080   llvm::SmallDenseSet<unsigned> droppedDims;
1081   ArrayRef<int64_t> resultShape = getType().getShape();
1082   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1083   unsigned shapePos = 0;
1084   for (const auto &size : enumerate(mixedSizes)) {
1085     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1086     // If the size is not 1, or if the current matched dimension of the result
1087     // is the same static shape as the size value (which is 1), then the
1088     // dimension is preserved.
1089     if (!sizeVal || sizeVal.getValue() != 1 ||
1090         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1091       shapePos++;
1092       continue;
1093     }
1094     droppedDims.insert(size.index());
1095   }
1096   return droppedDims;
1097 }
1098 
1099 LogicalResult ExtractSliceOp::reifyResultShapes(
1100     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1101   reifiedReturnShapes.resize(1);
1102   reifiedReturnShapes[0].reserve(getType().getRank());
1103   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1104   llvm::SmallDenseSet<unsigned> droppedDims = getDroppedDims();
1105   Location loc = getLoc();
1106   for (const auto &size : enumerate(mixedSizes)) {
1107     if (droppedDims.count(size.index()))
1108       continue;
1109     if (auto attr = size.value().dyn_cast<Attribute>()) {
1110       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1111           loc, attr.cast<IntegerAttr>().getInt()));
1112       continue;
1113     }
1114     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1115   }
1116   return success();
1117 }
1118 
1119 namespace {
1120 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1121 /// This essentially pushes memref_cast past its consuming slice when
1122 /// `canFoldIntoConsumerOp` is true.
1123 ///
1124 /// Example:
1125 /// ```
1126 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1127 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1128 ///   tensor<3x4xf32>
1129 /// ```
1130 /// is rewritten into:
1131 /// ```
1132 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1133 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1134 /// ```
1135 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1136 public:
1137   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1138 
1139   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1140                                 PatternRewriter &rewriter) const override {
1141     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1142     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1143           return matchPattern(operand, matchConstantIndex());
1144         }))
1145       return failure();
1146 
1147     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1148     if (!castOp)
1149       return failure();
1150 
1151     if (!canFoldIntoConsumerOp(castOp))
1152       return failure();
1153 
1154     /// Deduce the type of the result to use for the canonicalized operation.
1155     RankedTensorType resultType = getCanonicalSliceResultType(
1156         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1157         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1158         sliceOp.getMixedStrides());
1159     Value newSlice = rewriter.create<ExtractSliceOp>(
1160         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1161         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1162         sliceOp.static_sizes(), sliceOp.static_strides());
1163     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1164                                                 newSlice);
1165     return success();
1166   }
1167 };
1168 } // namespace
1169 
1170 /// Return the canonical type of the result of an extract_slice op.
1171 struct SliceReturnTypeCanonicalizer {
1172   RankedTensorType operator()(ExtractSliceOp op,
1173                               ArrayRef<OpFoldResult> mixedOffsets,
1174                               ArrayRef<OpFoldResult> mixedSizes,
1175                               ArrayRef<OpFoldResult> mixedStrides) {
1176     return getCanonicalSliceResultType(op.getType().getRank(),
1177                                        op.getSourceType(), mixedOffsets,
1178                                        mixedSizes, mixedStrides);
1179   }
1180 };
1181 
1182 /// A canonicalizer wrapper to replace ExtractSliceOps.
1183 struct SliceCanonicalizer {
1184   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1185                   ExtractSliceOp newOp) {
1186     Value replacement = newOp.getResult();
1187     if (replacement.getType() != op.getType())
1188       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1189                                                     replacement);
1190     rewriter.replaceOp(op, replacement);
1191   }
1192 };
1193 
1194 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1195                                                  MLIRContext *context) {
1196   results.add<
1197       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1198           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1199       ExtractSliceOpCastFolder>(context);
1200 }
1201 
1202 //
1203 static LogicalResult
1204 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1205                                            ShapedType shapedType) {
1206   OpBuilder b(op.getContext());
1207   for (OpFoldResult ofr : op.getMixedOffsets())
1208     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1209       return failure();
1210   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1211   // is appropriate.
1212   auto shape = shapedType.getShape();
1213   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1214     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1215       return failure();
1216   for (OpFoldResult ofr : op.getMixedStrides())
1217     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1218       return failure();
1219   return success();
1220 }
1221 
1222 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1223 /// we can return the InsertSliceOp's source directly.
1224 // TODO: This only checks the immediate producer; extend to go up the
1225 // insert/extract chain if the slices are disjoint.
1226 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1227   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1228 
1229   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1230   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1231       insertOp.isSameAs(extractOp, isSame))
1232     return insertOp.source();
1233 
1234   return {};
1235 }
1236 
1237 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute>) {
1238   if (getSourceType() == getType() &&
1239       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1240     return this->source();
1241   if (Value slice = foldExtractAfterInsertSlice(*this))
1242     return slice;
1243   return OpFoldResult();
1244 }
1245 
1246 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1247     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1248   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1249   unsigned rank = rankedTensorType.getRank();
1250   auto shape = rankedTensorType.getShape();
1251   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1252   SmallVector<OpFoldResult> sizes;
1253   for (unsigned i = 0, e = rank; i < e; ++i) {
1254     OpFoldResult dim;
1255     if (rankedTensorType.isDynamicDim(i))
1256       dim = b.createOrFold<tensor::DimOp>(
1257           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1258     else
1259       dim = b.getIndexAttr(shape[i]);
1260     sizes.push_back(dim);
1261   }
1262   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1263   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1264                                                 offsets, sizes, strides);
1265 }
1266 
1267 //===----------------------------------------------------------------------===//
1268 // InsertSliceOp
1269 //===----------------------------------------------------------------------===//
1270 
1271 // Build a InsertSliceOp with mixed static and dynamic entries.
1272 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1273                           Value dest, ArrayRef<OpFoldResult> offsets,
1274                           ArrayRef<OpFoldResult> sizes,
1275                           ArrayRef<OpFoldResult> strides,
1276                           ArrayRef<NamedAttribute> attrs) {
1277   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1278   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1279   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1280                              ShapedType::kDynamicStrideOrOffset);
1281   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1282                              ShapedType::kDynamicSize);
1283   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1284                              ShapedType::kDynamicStrideOrOffset);
1285   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1286         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1287         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1288   result.addAttributes(attrs);
1289 }
1290 
1291 // Build a InsertSliceOp with dynamic entries.
1292 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1293                           Value dest, ValueRange offsets, ValueRange sizes,
1294                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1295   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1296       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1297   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1298       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1299   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1300       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1301   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1302 }
1303 
1304 /// Verifier for InsertSliceOp.
1305 LogicalResult InsertSliceOp::verify() {
1306   // insert_slice is the inverse of extract_slice, use the same type inference.
1307   auto expectedType = ExtractSliceOp::inferRankReducedResultType(
1308       getSourceType().getRank(), getType(),
1309       extractFromI64ArrayAttr(static_offsets()),
1310       extractFromI64ArrayAttr(static_sizes()),
1311       extractFromI64ArrayAttr(static_strides()));
1312   auto result =
1313       isRankReducedType(expectedType.cast<ShapedType>(), getSourceType());
1314   return produceSliceErrorMsg(result, *this, expectedType);
1315 }
1316 
1317 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1318 /// can mutate the second InsertSliceOp's destination to the first one's.
1319 ///
1320 /// Example:
1321 ///
1322 /// ```mlir
1323 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1324 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1325 /// ```
1326 ///
1327 /// folds into:
1328 ///
1329 /// ```mlir
1330 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1331 /// ```
1332 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1333   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1334 
1335   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1336   if (!prevInsertOp ||
1337       prevInsertOp.source().getType() != insertOp.source().getType() ||
1338       !prevInsertOp.isSameAs(insertOp, isSame))
1339     return failure();
1340 
1341   insertOp.destMutable().assign(prevInsertOp.dest());
1342   return success();
1343 }
1344 
1345 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1346   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1347       getSourceType() == getType() &&
1348       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1349     return this->source();
1350   if (succeeded(foldInsertAfterInsertSlice(*this)))
1351     return getResult();
1352   return OpFoldResult();
1353 }
1354 
1355 LogicalResult InsertSliceOp::reifyResultShapes(
1356     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1357   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1358   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1359     reifiedReturnShapes[0][dim] =
1360         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1361   }
1362   return success();
1363 }
1364 
1365 namespace {
1366 /// Pattern to rewrite a insert_slice op with constant arguments.
1367 class InsertSliceOpConstantArgumentFolder final
1368     : public OpRewritePattern<InsertSliceOp> {
1369 public:
1370   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1371 
1372   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1373                                 PatternRewriter &rewriter) const override {
1374     // No constant operand, just return.
1375     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1376           return matchPattern(operand, matchConstantIndex());
1377         }))
1378       return failure();
1379 
1380     // At least one of offsets/sizes/strides is a new constant.
1381     // Form the new list of operands and constant attributes from the
1382     // existing.
1383     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1384     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1385     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1386     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1387     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1388     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1389 
1390     // Create the new op in canonical form.
1391     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1392         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1393         mixedOffsets, mixedSizes, mixedStrides);
1394     Value toInsert = insertSliceOp.source();
1395     if (sourceType != insertSliceOp.getSourceType())
1396       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1397                                                  sourceType, toInsert);
1398     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1399         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1400         mixedStrides);
1401     return success();
1402   }
1403 };
1404 
1405 /// Fold tensor_casts with insert_slice operations. If the source or destination
1406 /// tensor is a tensor_cast that removes static type information, the cast is
1407 /// folded into the insert_slice operation. E.g.:
1408 ///
1409 /// ```mlir
1410 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1411 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1412 /// ```
1413 ///
1414 /// folds into:
1415 ///
1416 /// ```mlir
1417 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1418 /// ```
1419 ///
1420 /// Note: When folding a cast on the destination tensor, the result of the
1421 /// insert_slice operation is casted to ensure that the type of the result did
1422 /// not change.
1423 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1424   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1425 
1426   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1427                                 PatternRewriter &rewriter) const override {
1428     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1429           return matchPattern(operand, matchConstantIndex());
1430         }))
1431       return failure();
1432 
1433     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1434       auto castOp = v.getDefiningOp<tensor::CastOp>();
1435       if (!castOp || !canFoldIntoConsumerOp(castOp))
1436         return llvm::None;
1437       return castOp.source();
1438     };
1439     Optional<Value> sourceCastSource =
1440         getSourceOfCastOp(insertSliceOp.source());
1441     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1442     if (!sourceCastSource && !destCastSource)
1443       return failure();
1444 
1445     Value replacement = rewriter.create<InsertSliceOp>(
1446         insertSliceOp.getLoc(),
1447         (sourceCastSource ? *sourceCastSource : insertSliceOp.source()),
1448         (destCastSource ? *destCastSource : insertSliceOp.dest()),
1449         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1450         insertSliceOp.getMixedStrides());
1451 
1452     if (replacement.getType() != insertSliceOp.getType()) {
1453       replacement = rewriter.create<tensor::CastOp>(
1454           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1455     }
1456     rewriter.replaceOp(insertSliceOp, replacement);
1457     return success();
1458   }
1459 };
1460 
1461 /// If additional static type information can be deduced from a insert_slice's
1462 /// size operands, insert an explicit cast of the op's source operand. This
1463 /// enables other canonicalization patterns that are matching for tensor_cast
1464 /// ops such as `ForOpTensorCastFolder` in SCF.
1465 ///
1466 /// Example:
1467 ///
1468 /// ```mlir
1469 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1470 ///       : tensor<?x?xf32> into ...
1471 /// ```
1472 ///
1473 /// folds into:
1474 ///
1475 /// ```mlir
1476 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1477 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1478 ///       : tensor<64x64xf32> into ...
1479 /// ```
1480 struct InsertSliceOpSourceCastInserter final
1481     : public OpRewritePattern<InsertSliceOp> {
1482   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1483 
1484   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1485                                 PatternRewriter &rewriter) const override {
1486     RankedTensorType srcType = insertSliceOp.getSourceType();
1487     if (srcType.getRank() != insertSliceOp.getType().getRank())
1488       return failure();
1489     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1490                                      srcType.getShape().end());
1491     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1492       if (Optional<int64_t> constInt =
1493               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1494         newSrcShape[i] = *constInt;
1495     }
1496 
1497     RankedTensorType newSrcType =
1498         RankedTensorType::get(newSrcShape, srcType.getElementType());
1499     if (srcType == newSrcType ||
1500         !preservesStaticInformation(srcType, newSrcType) ||
1501         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1502       return failure();
1503 
1504     // newSrcType is:
1505     //   1) Different from srcType.
1506     //   2) "More static" than srcType.
1507     //   3) Cast-compatible with srcType.
1508     // Insert the cast.
1509     Value cast = rewriter.create<tensor::CastOp>(
1510         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1511     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1512         insertSliceOp, cast, insertSliceOp.dest(),
1513         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1514         insertSliceOp.getMixedStrides());
1515     return success();
1516   }
1517 };
1518 } // namespace
1519 
1520 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1521                                                 MLIRContext *context) {
1522   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1523               InsertSliceOpSourceCastInserter>(context);
1524 }
1525 
1526 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1527                                                              Location loc,
1528                                                              Value tensor,
1529                                                              Value dest) {
1530   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1531   unsigned rank = rankedTensorType.getRank();
1532   auto shape = rankedTensorType.getShape();
1533   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1534   SmallVector<OpFoldResult> sizes;
1535   for (unsigned i = 0, e = rank; i < e; ++i) {
1536     OpFoldResult dim;
1537     if (rankedTensorType.isDynamicDim(i))
1538       dim = b.createOrFold<tensor::DimOp>(
1539           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1540     else
1541       dim = b.getIndexAttr(shape[i]);
1542     sizes.push_back(dim);
1543   }
1544   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1545   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1546                                                sizes, strides);
1547 }
1548 
1549 //===----------------------------------------------------------------------===//
1550 // PadOp
1551 //===----------------------------------------------------------------------===//
1552 
1553 // TODO: Replace custom<InferType> directive with AllTypesMatch as soon as it
1554 // supports optional types.
1555 void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
1556                     Type typeToInfer, Type typeToInferFrom) {}
1557 
1558 ParseResult parseInferType(OpAsmParser &parser,
1559                            Optional<OpAsmParser::OperandType> optOperand,
1560                            Type &typeToInfer, Type typeToInferFrom) {
1561   if (optOperand)
1562     typeToInfer = typeToInferFrom;
1563   return success();
1564 }
1565 
1566 LogicalResult PadOp::verify() {
1567   auto sourceType = source().getType().cast<RankedTensorType>();
1568   auto resultType = result().getType().cast<RankedTensorType>();
1569   auto expectedType =
1570       PadOp::inferResultType(sourceType, extractFromI64ArrayAttr(static_low()),
1571                              extractFromI64ArrayAttr(static_high()));
1572   for (int i = 0, e = sourceType.getRank(); i < e; ++i) {
1573     if (resultType.getDimSize(i) == expectedType.getDimSize(i))
1574       continue;
1575     if (expectedType.isDynamicDim(i))
1576       continue;
1577     return emitError("specified type ")
1578            << resultType << " does not match the inferred type "
1579            << expectedType;
1580   }
1581 
1582   auto &region = getRegion();
1583   unsigned rank = resultType.getRank();
1584   Block &block = region.front();
1585   if (block.getNumArguments() != rank)
1586     return emitError("expected the block to have ") << rank << " arguments";
1587 
1588   // Note: the number and type of yield values are checked in the YieldOp.
1589   for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
1590     if (!en.value().isIndex())
1591       return emitOpError("expected block argument ")
1592              << (en.index() + 1) << " to be an index";
1593   }
1594 
1595   // Ensure that the region yields an element of the right type.
1596   auto yieldOp = llvm::cast<YieldOp>(block.getTerminator());
1597   if (yieldOp.value().getType() !=
1598       getType().cast<ShapedType>().getElementType())
1599     return emitOpError("expected yield type to match shape element type");
1600 
1601   return success();
1602 }
1603 
1604 RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
1605                                         ArrayRef<int64_t> staticLow,
1606                                         ArrayRef<int64_t> staticHigh,
1607                                         ArrayRef<int64_t> resultShape) {
1608   unsigned rank = sourceType.getRank();
1609   assert(staticLow.size() == rank && "unexpected staticLow size mismatch");
1610   assert(staticHigh.size() == rank && "unexpected staticHigh size mismatch");
1611   assert((resultShape.empty() || resultShape.size() == rank) &&
1612          "unexpected resultShape size mismatch");
1613 
1614   SmallVector<int64_t, 4> inferredShape;
1615   for (auto i : llvm::seq<unsigned>(0, rank)) {
1616     if (sourceType.isDynamicDim(i) ||
1617         staticLow[i] == ShapedType::kDynamicSize ||
1618         staticHigh[i] == ShapedType::kDynamicSize) {
1619       inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
1620                                                   : resultShape[i]);
1621     } else {
1622       int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
1623       assert((resultShape.empty() || size == resultShape[i] ||
1624               resultShape[i] == ShapedType::kDynamicSize) &&
1625              "mismatch between inferred shape and result shape");
1626       inferredShape.push_back(size);
1627     }
1628   }
1629 
1630   return RankedTensorType::get(inferredShape, sourceType.getElementType());
1631 }
1632 
1633 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1634                   ArrayRef<int64_t> staticLow, ArrayRef<int64_t> staticHigh,
1635                   ValueRange low, ValueRange high, bool nofold,
1636                   ArrayRef<NamedAttribute> attrs) {
1637   auto sourceType = source.getType().cast<RankedTensorType>();
1638   auto resultType = inferResultType(sourceType, staticLow, staticHigh);
1639   build(b, result, resultType, source, low, high, b.getI64ArrayAttr(staticLow),
1640         b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr());
1641   result.addAttributes(attrs);
1642 }
1643 
1644 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1645                   ValueRange low, ValueRange high, bool nofold,
1646                   ArrayRef<NamedAttribute> attrs) {
1647   auto sourceType = source.getType().cast<RankedTensorType>();
1648   unsigned rank = sourceType.getRank();
1649   SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
1650   build(b, result, source, staticVector, staticVector, low, high, nofold,
1651         attrs);
1652 }
1653 
1654 void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
1655                   Value source, ArrayRef<OpFoldResult> low,
1656                   ArrayRef<OpFoldResult> high, bool nofold,
1657                   ArrayRef<NamedAttribute> attrs) {
1658   assert(resultType.isa<RankedTensorType>());
1659   auto sourceType = source.getType().cast<RankedTensorType>();
1660   SmallVector<Value, 4> dynamicLow, dynamicHigh;
1661   SmallVector<int64_t, 4> staticLow, staticHigh;
1662   // staticLow and staticHigh have full information of the padding config.
1663   // This will grow staticLow and staticHigh with 1 value. If the config is
1664   // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
1665   // value as well.
1666   dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
1667                              ShapedType::kDynamicSize);
1668   dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
1669                              ShapedType::kDynamicSize);
1670   if (!resultType) {
1671     resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
1672   }
1673   build(b, result, resultType, source, dynamicLow, dynamicHigh,
1674         b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh),
1675         nofold ? b.getUnitAttr() : UnitAttr());
1676   result.addAttributes(attrs);
1677 }
1678 
1679 namespace {
1680 // Folds tensor.pad when padding is static zeros and the attribute
1681 // doesn't request otherwise.
1682 struct FoldStaticZeroPadding : public OpRewritePattern<PadOp> {
1683   using OpRewritePattern<PadOp>::OpRewritePattern;
1684 
1685   LogicalResult matchAndRewrite(PadOp padTensorOp,
1686                                 PatternRewriter &rewriter) const override {
1687     if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad())
1688       return failure();
1689     if (padTensorOp.nofold())
1690       return failure();
1691     rewriter.replaceOpWithNewOp<tensor::CastOp>(
1692         padTensorOp, padTensorOp.result().getType(), padTensorOp.source());
1693     return success();
1694   }
1695 };
1696 
1697 // Fold CastOp into PadOp when adding static information.
1698 struct FoldSourceTensorCast : public OpRewritePattern<PadOp> {
1699   using OpRewritePattern<PadOp>::OpRewritePattern;
1700 
1701   LogicalResult matchAndRewrite(PadOp padTensorOp,
1702                                 PatternRewriter &rewriter) const override {
1703     auto castOp = padTensorOp.source().getDefiningOp<tensor::CastOp>();
1704     if (!tensor::canFoldIntoConsumerOp(castOp))
1705       return failure();
1706 
1707     auto newResultType = PadOp::inferResultType(
1708         castOp.source().getType().cast<RankedTensorType>(),
1709         extractFromI64ArrayAttr(padTensorOp.static_low()),
1710         extractFromI64ArrayAttr(padTensorOp.static_high()),
1711         padTensorOp.getResultType().getShape());
1712 
1713     if (newResultType == padTensorOp.getResultType()) {
1714       rewriter.updateRootInPlace(padTensorOp, [&]() {
1715         padTensorOp.sourceMutable().assign(castOp.source());
1716       });
1717     } else {
1718       auto newOp = rewriter.create<PadOp>(
1719           padTensorOp->getLoc(), newResultType, padTensorOp.source(),
1720           padTensorOp.low(), padTensorOp.high(), padTensorOp.static_low(),
1721           padTensorOp.static_high(), padTensorOp.nofold());
1722       BlockAndValueMapping mapper;
1723       padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
1724 
1725       rewriter.replaceOpWithNewOp<tensor::CastOp>(
1726           padTensorOp, padTensorOp.getResultType(), newOp);
1727     }
1728     return success();
1729   }
1730 };
1731 
1732 // Fold CastOp using the result of PadOp back into the latter if it adds
1733 // static information.
1734 struct FoldTargetTensorCast : public OpRewritePattern<PadOp> {
1735   using OpRewritePattern<PadOp>::OpRewritePattern;
1736 
1737   LogicalResult matchAndRewrite(PadOp padTensorOp,
1738                                 PatternRewriter &rewriter) const override {
1739     if (!padTensorOp.result().hasOneUse())
1740       return failure();
1741     auto tensorCastOp =
1742         dyn_cast<tensor::CastOp>(*padTensorOp->getUsers().begin());
1743     if (!tensorCastOp)
1744       return failure();
1745     if (!tensor::preservesStaticInformation(padTensorOp.result().getType(),
1746                                             tensorCastOp.dest().getType()))
1747       return failure();
1748 
1749     auto replacementOp = rewriter.create<PadOp>(
1750         padTensorOp.getLoc(), tensorCastOp.dest().getType(),
1751         padTensorOp.source(), padTensorOp.low(), padTensorOp.high(),
1752         padTensorOp.static_low(), padTensorOp.static_high(),
1753         padTensorOp.nofold());
1754     replacementOp.region().takeBody(padTensorOp.region());
1755 
1756     rewriter.replaceOp(padTensorOp, replacementOp.result());
1757     rewriter.replaceOp(tensorCastOp, replacementOp.result());
1758     return success();
1759   }
1760 };
1761 } // namespace
1762 
1763 void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
1764                                         MLIRContext *context) {
1765   results
1766       .add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast>(
1767           context);
1768 }
1769 
1770 /// Return the padding value of the PadOp if it constant. In this context,
1771 /// "constant" means an actual constant or "defined outside of the block".
1772 ///
1773 /// Values are considered constant in three cases:
1774 ///  - A ConstantLike value.
1775 ///  - A basic block argument from a different block.
1776 ///  - A value defined outside of the block.
1777 ///
1778 /// If the padding value is not constant, an empty Value is returned.
1779 Value PadOp::getConstantPaddingValue() {
1780   auto yieldOp = dyn_cast<YieldOp>(getRegion().front().getTerminator());
1781   if (!yieldOp)
1782     return {};
1783   Value padValue = yieldOp.value();
1784   // Check if yield value is a constant.
1785   if (matchPattern(padValue, m_Constant()))
1786     return padValue;
1787   // Check if yield value is defined inside the PadOp block.
1788   if (padValue.getParentBlock() == &getRegion().front())
1789     return {};
1790   // Else: Yield value defined outside of the PadOp block.
1791   return padValue;
1792 }
1793 
1794 OpFoldResult PadOp::fold(ArrayRef<Attribute>) {
1795   if (getResultType().hasStaticShape() && getResultType() == getSourceType() &&
1796       !nofold())
1797     return source();
1798   return {};
1799 }
1800 
1801 //===----------------------------------------------------------------------===//
1802 // SplatOp
1803 //===----------------------------------------------------------------------===//
1804 
1805 OpFoldResult SplatOp::fold(ArrayRef<Attribute> operands) {
1806   auto constOperand = operands.front();
1807   if (!constOperand.isa_and_nonnull<IntegerAttr, FloatAttr>())
1808     return {};
1809 
1810   // SplatElementsAttr::get treats single value for second arg as being a splat.
1811   return SplatElementsAttr::get(getType(), {constOperand});
1812 }
1813 
1814 //===----------------------------------------------------------------------===//
1815 // TableGen'd op method definitions
1816 //===----------------------------------------------------------------------===//
1817 
1818 #define GET_OP_CLASSES
1819 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
1820