1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/Arithmetic/Utils/Utils.h"
11 #include "mlir/Dialect/Complex/IR/Complex.h"
12 #include "mlir/Dialect/Tensor/IR/Tensor.h"
13 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
14 #include "mlir/Dialect/Utils/StaticValueUtils.h"
15 #include "mlir/IR/BlockAndValueMapping.h"
16 #include "mlir/IR/Builders.h"
17 #include "mlir/IR/BuiltinAttributeInterfaces.h"
18 #include "mlir/IR/Matchers.h"
19 #include "mlir/IR/PatternMatch.h"
20 #include "mlir/IR/TypeUtilities.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallBitVector.h"
23 
24 using namespace mlir;
25 using namespace mlir::tensor;
26 
27 /// Materialize a single constant operation from a given attribute value with
28 /// the desired resultant type.
29 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
30                                               Attribute value, Type type,
31                                               Location loc) {
32   if (arith::ConstantOp::isBuildableWith(value, type))
33     return builder.create<arith::ConstantOp>(loc, value, type);
34   if (complex::ConstantOp::isBuildableWith(value, type))
35     return builder.create<complex::ConstantOp>(loc, type,
36                                                value.cast<ArrayAttr>());
37   return nullptr;
38 }
39 
40 //===----------------------------------------------------------------------===//
41 // CastOp
42 //===----------------------------------------------------------------------===//
43 
44 /// Returns true if `target` is a ranked tensor type that preserves static
45 /// information available in the `source` ranked tensor type.
46 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
47   auto sourceType = source.dyn_cast<RankedTensorType>();
48   auto targetType = target.dyn_cast<RankedTensorType>();
49 
50   // Requires RankedTensorType.
51   if (!sourceType || !targetType)
52     return false;
53 
54   // Requires same elemental type.
55   if (sourceType.getElementType() != targetType.getElementType())
56     return false;
57 
58   // Requires same rank.
59   if (sourceType.getRank() != targetType.getRank())
60     return false;
61 
62   // If cast is towards more static sizes along any dimension, don't fold.
63   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
64     if (!ShapedType::isDynamic(std::get<0>(t)) &&
65         ShapedType::isDynamic(std::get<1>(t)))
66       return false;
67   }
68 
69   return true;
70 }
71 
72 /// Determines whether tensor::CastOp casts to a more dynamic version of the
73 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
74 /// implement canonicalization patterns for ops in different dialects that may
75 /// consume the results of tensor.cast operations. Such foldable tensor.cast
76 /// operations are typically inserted as `slice` ops and are canonicalized,
77 /// to preserve the type compatibility of their uses.
78 ///
79 /// Returns true when all conditions are met:
80 /// 1. source and result are ranked tensors with same element type and rank.
81 /// 2. the tensor type has more static information than the result
82 ///
83 /// Example:
84 /// ```mlir
85 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
86 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
87 /// ```
88 ///
89 /// folds into:
90 ///
91 /// ```mlir
92 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
93 /// ```
94 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
95   if (!castOp)
96     return false;
97 
98   // Can fold if the source of cast has at least as much static information as
99   // its results.
100   return preservesStaticInformation(castOp.getType(),
101                                     castOp.source().getType());
102 }
103 
104 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
105 /// that can be folded.
106 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
107   bool folded = false;
108   for (OpOperand &operand : op->getOpOperands()) {
109     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
110     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
111       operand.set(castOp.getOperand());
112       folded = true;
113     }
114   }
115   return success(folded);
116 }
117 
118 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
119   if (inputs.size() != 1 || outputs.size() != 1)
120     return false;
121   Type a = inputs.front(), b = outputs.front();
122   auto aT = a.dyn_cast<TensorType>();
123   auto bT = b.dyn_cast<TensorType>();
124   if (!aT || !bT)
125     return false;
126 
127   if (aT.getElementType() != bT.getElementType())
128     return false;
129 
130   return succeeded(verifyCompatibleShape(aT, bT));
131 }
132 
133 /// Compute a TensorType that has the joined shape knowledge of the two
134 /// given TensorTypes. The element types need to match.
135 static TensorType joinShapes(TensorType one, TensorType two) {
136   assert(one.getElementType() == two.getElementType());
137 
138   if (!one.hasRank())
139     return two;
140   if (!two.hasRank())
141     return one;
142 
143   int64_t rank = one.getRank();
144   if (rank != two.getRank())
145     return {};
146 
147   SmallVector<int64_t, 4> join;
148   join.reserve(rank);
149   for (int64_t i = 0; i < rank; ++i) {
150     if (one.isDynamicDim(i)) {
151       join.push_back(two.getDimSize(i));
152       continue;
153     }
154     if (two.isDynamicDim(i)) {
155       join.push_back(one.getDimSize(i));
156       continue;
157     }
158     if (one.getDimSize(i) != two.getDimSize(i))
159       return {};
160     join.push_back(one.getDimSize(i));
161   }
162   return RankedTensorType::get(join, one.getElementType());
163 }
164 
165 namespace {
166 
167 /// Replaces chains of two tensor.cast operations by a single tensor.cast
168 /// operation if doing so does not remove runtime constraints.
169 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
170   using OpRewritePattern<CastOp>::OpRewritePattern;
171 
172   LogicalResult matchAndRewrite(CastOp tensorCast,
173                                 PatternRewriter &rewriter) const final {
174     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
175 
176     if (!tensorCastOperand)
177       return failure();
178 
179     auto sourceType =
180         tensorCastOperand.getOperand().getType().cast<TensorType>();
181     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
182     auto resultType = tensorCast.getType().cast<TensorType>();
183 
184     // We can remove the intermediate cast if joining all three produces the
185     // same result as just joining the source and result shapes.
186     auto firstJoin =
187         joinShapes(joinShapes(sourceType, intermediateType), resultType);
188 
189     // The join might not exist if the cast sequence would fail at runtime.
190     if (!firstJoin)
191       return failure();
192 
193     // The newJoin always exists if the above join exists, it might just contain
194     // less information. If so, we cannot drop the intermediate cast, as doing
195     // so would remove runtime checks.
196     auto newJoin = joinShapes(sourceType, resultType);
197     if (firstJoin != newJoin)
198       return failure();
199 
200     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
201                                         tensorCastOperand.getOperand());
202     return success();
203   }
204 };
205 
206 } // namespace
207 
208 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
209                                          MLIRContext *context) {
210   results.add<ChainedTensorCast>(context);
211 }
212 
213 //===----------------------------------------------------------------------===//
214 // DimOp
215 //===----------------------------------------------------------------------===//
216 
217 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
218                   int64_t index) {
219   auto loc = result.location;
220   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
221   build(builder, result, source, indexValue);
222 }
223 
224 Optional<int64_t> DimOp::getConstantIndex() {
225   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
226     return constantOp.getValue().cast<IntegerAttr>().getInt();
227   return {};
228 }
229 
230 LogicalResult DimOp::verify() {
231   // Assume unknown index to be in range.
232   Optional<int64_t> index = getConstantIndex();
233   if (!index.hasValue())
234     return success();
235 
236   // Check that constant index is not knowingly out of range.
237   auto type = source().getType();
238   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
239     if (index.getValue() >= tensorType.getRank())
240       return emitOpError("index is out of range");
241   } else if (type.isa<UnrankedTensorType>()) {
242     // Assume index to be in range.
243   } else {
244     llvm_unreachable("expected operand with tensor type");
245   }
246   return success();
247 }
248 
249 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
250   // All forms of folding require a known index.
251   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
252   if (!index)
253     return {};
254 
255   // Folding for unranked types (UnrankedTensorType) is not supported.
256   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
257   if (!tensorType)
258     return {};
259 
260   // Fold if the shape extent along the given index is known.
261   if (!tensorType.isDynamicDim(index.getInt())) {
262     Builder builder(getContext());
263     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
264   }
265 
266   Operation *definingOp = source().getDefiningOp();
267 
268   // Fold dim to the operand of tensor.generate.
269   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
270     auto resultType =
271         fromElements.getResult().getType().cast<RankedTensorType>();
272     // The case where the type encodes the size of the dimension is handled
273     // above.
274     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
275 
276     // Find the operand of the fromElements that corresponds to this index.
277     auto dynExtents = fromElements.dynamicExtents().begin();
278     for (auto dim : resultType.getShape().take_front(index.getInt()))
279       if (ShapedType::isDynamic(dim))
280         dynExtents++;
281 
282     return Value{*dynExtents};
283   }
284 
285   // The size at the given index is now known to be a dynamic size.
286   unsigned unsignedIndex = index.getValue().getZExtValue();
287 
288   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
289     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
290     // `resolve-shaped-type-result-dims` pass.
291     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
292         sliceOp.isDynamicSize(unsignedIndex)) {
293       return {sliceOp.getDynamicSize(unsignedIndex)};
294     }
295   }
296 
297   // dim(cast) -> dim
298   if (succeeded(foldTensorCast(*this)))
299     return getResult();
300 
301   return {};
302 }
303 
304 namespace {
305 /// Fold dim of a cast into the dim of the source of the tensor cast.
306 struct DimOfCastOp : public OpRewritePattern<DimOp> {
307   using OpRewritePattern<DimOp>::OpRewritePattern;
308 
309   LogicalResult matchAndRewrite(DimOp dimOp,
310                                 PatternRewriter &rewriter) const override {
311     auto castOp = dimOp.source().getDefiningOp<CastOp>();
312     if (!castOp)
313       return failure();
314     Value newSource = castOp.getOperand();
315     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
316     return success();
317   }
318 };
319 } // namespace
320 
321 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
322                                         MLIRContext *context) {
323   results.add<DimOfCastOp>(context);
324 }
325 
326 //===----------------------------------------------------------------------===//
327 // ExtractOp
328 //===----------------------------------------------------------------------===//
329 
330 LogicalResult ExtractOp::verify() {
331   // Verify the # indices match if we have a ranked type.
332   if (auto tensorType = tensor().getType().dyn_cast<RankedTensorType>())
333     if (tensorType.getRank() != static_cast<int64_t>(indices().size()))
334       return emitOpError("incorrect number of indices for extract_element");
335 
336   return success();
337 }
338 
339 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
340   // The tensor operand must be a known constant.
341   Attribute tensor = operands.front();
342   if (!tensor)
343     return {};
344   // If this is a splat elements attribute, simply return the value. All of the
345   // elements of a splat attribute are the same.
346   if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
347     return splatTensor.getSplatValue<Attribute>();
348 
349   // Otherwise, collect the constant indices into the tensor.
350   SmallVector<uint64_t, 8> indices;
351   for (Attribute indice : llvm::drop_begin(operands, 1)) {
352     if (!indice || !indice.isa<IntegerAttr>())
353       return {};
354     indices.push_back(indice.cast<IntegerAttr>().getInt());
355   }
356 
357   // If this is an elements attribute, query the value at the given indices.
358   auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
359   if (elementsAttr && elementsAttr.isValidIndex(indices))
360     return elementsAttr.getValues<Attribute>()[indices];
361   return {};
362 }
363 
364 //===----------------------------------------------------------------------===//
365 // FromElementsOp
366 //===----------------------------------------------------------------------===//
367 
368 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
369                            Type resultType, ValueRange elements) {
370   result.addOperands(elements);
371   result.addTypes(resultType);
372 }
373 
374 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
375                            ValueRange elements) {
376   assert(!elements.empty() && "expected at least one element");
377   Type resultType = RankedTensorType::get(
378       {static_cast<int64_t>(elements.size())}, elements.front().getType());
379   build(builder, result, resultType, elements);
380 }
381 
382 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
383   if (!llvm::is_contained(operands, nullptr))
384     return DenseElementsAttr::get(getType(), operands);
385   return {};
386 }
387 
388 namespace {
389 
390 // Canonicalizes the pattern of the form
391 //
392 // %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
393 // %extracted_element = tensor.extract %tensor[%c0] : tensor<1xi32>
394 //
395 // to just %element.
396 struct ExtractElementFromTensorFromElements
397     : public OpRewritePattern<tensor::ExtractOp> {
398   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
399 
400   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
401                                 PatternRewriter &rewriter) const final {
402     auto tensorFromElements = extract.tensor().getDefiningOp<FromElementsOp>();
403     if (!tensorFromElements)
404       return failure();
405     auto tensorType = tensorFromElements.getType().cast<RankedTensorType>();
406     auto rank = tensorType.getRank();
407     if (rank == 0) {
408       rewriter.replaceOp(extract, tensorFromElements.getOperand(0));
409       return success();
410     }
411     SmallVector<APInt, 3> indices(rank);
412     int64_t flatIndex = 0;
413     int64_t stride = 1;
414     for (int i = rank - 1; i >= 0; --i) {
415       APInt index;
416       if (!matchPattern(extract.indices()[i], m_ConstantInt(&index)))
417         return failure();
418       if (i < rank - 1)
419         stride *= tensorType.getDimSize(i);
420       flatIndex += index.getSExtValue() * stride;
421     }
422     // Prevent out of bounds accesses. This can happen in invalid code that will
423     // never execute.
424     if (tensorFromElements->getNumOperands() <= flatIndex || flatIndex < 0)
425       return failure();
426     rewriter.replaceOp(extract, tensorFromElements.getOperand(flatIndex));
427     return success();
428   }
429 };
430 
431 // Pushes the index_casts that occur before extractions to after the extract.
432 // This minimizes type conversion in some cases and enables the extract
433 // canonicalizer. This changes:
434 //
435 // %cast = arith.index_cast %tensor : tensor<1xi32> to tensor<1xindex>
436 // %extract = tensor.extract %cast[%index] : tensor<1xindex>
437 //
438 // to the following:
439 //
440 // %extract = tensor.extract %tensor[%index] : tensor<1xindex>
441 // %cast = arith.index_cast %extract : i32 to index
442 //
443 // to just %element.
444 //
445 // Consider expanding this to a template and handle all tensor cast operations.
446 struct ExtractElementFromIndexCast
447     : public OpRewritePattern<tensor::ExtractOp> {
448   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
449 
450   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
451                                 PatternRewriter &rewriter) const final {
452     Location loc = extract.getLoc();
453     auto indexCast = extract.tensor().getDefiningOp<arith::IndexCastOp>();
454     if (!indexCast)
455       return failure();
456 
457     Type elementTy = getElementTypeOrSelf(indexCast.getIn());
458 
459     auto newExtract = rewriter.create<tensor::ExtractOp>(
460         loc, elementTy, indexCast.getIn(), extract.indices());
461 
462     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(extract, extract.getType(),
463                                                     newExtract);
464 
465     return success();
466   }
467 };
468 
469 } // namespace
470 
471 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
472                                                  MLIRContext *context) {
473   results
474       .add<ExtractElementFromIndexCast, ExtractElementFromTensorFromElements>(
475           context);
476 }
477 
478 //===----------------------------------------------------------------------===//
479 // InsertOp
480 //===----------------------------------------------------------------------===//
481 
482 LogicalResult InsertOp::verify() {
483   // Verify the # indices match if we have a ranked type.
484   if (auto destType = dest().getType().dyn_cast<RankedTensorType>())
485     if (destType.getRank() != static_cast<int64_t>(indices().size()))
486       return emitOpError("incorrect number of indices");
487   return success();
488 }
489 
490 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
491   Attribute scalar = operands[0];
492   Attribute dest = operands[1];
493   if (scalar && dest)
494     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
495       if (scalar == splatDest.getSplatValue<Attribute>())
496         return dest;
497   return {};
498 }
499 
500 //===----------------------------------------------------------------------===//
501 // GenerateOp
502 //===----------------------------------------------------------------------===//
503 
504 LogicalResult GenerateOp::verify() {
505   // Ensure that the tensor type has as many dynamic dimensions as are specified
506   // by the operands.
507   RankedTensorType resultTy = getType().cast<RankedTensorType>();
508   if (getNumOperands() != resultTy.getNumDynamicDims())
509     return emitError("must have as many index operands as dynamic extents "
510                      "in the result type");
511 
512   // Ensure that region arguments span the index space.
513   if (!llvm::all_of(body().getArgumentTypes(),
514                     [](Type ty) { return ty.isIndex(); }))
515     return emitError("all body arguments must be index");
516   if (body().getNumArguments() != resultTy.getRank())
517     return emitError("must have one body argument per input dimension");
518 
519   // Ensure that the region yields an element of the right type.
520   auto yieldOp = cast<YieldOp>(body().getBlocks().front().getTerminator());
521 
522   if (yieldOp.value().getType() != resultTy.getElementType())
523     return emitOpError(
524         "body must be terminated with a `yield` operation of the tensor "
525         "element type");
526 
527   return success();
528 }
529 
530 void GenerateOp::build(
531     OpBuilder &b, OperationState &result, Type resultTy,
532     ValueRange dynamicExtents,
533     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
534   build(b, result, resultTy, dynamicExtents);
535 
536   // Build and populate body.
537   OpBuilder::InsertionGuard guard(b);
538   Region *bodyRegion = result.regions.front().get();
539   auto rank = resultTy.cast<RankedTensorType>().getRank();
540   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
541   SmallVector<Location, 2> argumentLocs(rank, result.location);
542   Block *bodyBlock =
543       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes, argumentLocs);
544   bodyBuilder(b, result.location, bodyBlock->getArguments());
545 }
546 
547 namespace {
548 
549 /// Canonicalizes tensor.generate operations with a constant
550 /// operand into the equivalent operation with the operand expressed in the
551 /// result type, instead. We also insert a type cast to make sure that the
552 /// resulting IR is still well-typed.
553 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
554   using OpRewritePattern<GenerateOp>::OpRewritePattern;
555 
556   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
557                                 PatternRewriter &rewriter) const final {
558     auto resultType =
559         tensorFromElements.getResult().getType().cast<RankedTensorType>();
560 
561     if (resultType.hasStaticShape())
562       return failure();
563 
564     SmallVector<Value, 4> newOperands;
565     SmallVector<int64_t, 4> newShape;
566     auto operandsIt = tensorFromElements.dynamicExtents().begin();
567 
568     for (int64_t dim : resultType.getShape()) {
569       if (!ShapedType::isDynamic(dim)) {
570         newShape.push_back(dim);
571         continue;
572       }
573       APInt index;
574       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
575         newShape.push_back(ShapedType::kDynamicSize);
576         newOperands.push_back(*operandsIt++);
577         continue;
578       }
579       newShape.push_back(index.getSExtValue());
580       operandsIt++;
581     }
582 
583     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
584       return failure();
585 
586     auto loc = tensorFromElements.getLoc();
587     auto newOp = rewriter.create<GenerateOp>(
588         loc, RankedTensorType::get(newShape, resultType.getElementType()),
589         newOperands);
590     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
591                                 newOp.body().begin());
592     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
593                                                 newOp);
594     return success();
595   }
596 };
597 
598 /// Canonicalizes the pattern of the form
599 ///
600 /// %tensor = tensor.generate %x {
601 ///   ^bb0(%arg0: index):
602 ///   <computation>
603 ///   yield %1 : index
604 /// } : tensor<?xindex>
605 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
606 ///
607 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
608 /// tensor.generate operation has no side-effects.
609 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
610   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
611 
612   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
613                                 PatternRewriter &rewriter) const final {
614     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
615     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
616       return failure();
617 
618     BlockAndValueMapping mapping;
619     Block *body = tensorFromElements.getBody();
620     mapping.map(body->getArguments(), extract.indices());
621     for (auto &op : body->without_terminator())
622       rewriter.clone(op, mapping);
623 
624     auto yield = cast<YieldOp>(body->getTerminator());
625 
626     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
627     return success();
628   }
629 };
630 
631 /// Canonicalizes the pattern of the form
632 ///
633 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
634 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
635 ///
636 /// to
637 ///
638 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
639 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
640   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
641 
642   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
643                                 PatternRewriter &rewriter) const final {
644     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
645     if (!tensorCast)
646       return failure();
647 
648     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
649                                                    extract.indices());
650     return success();
651   }
652 };
653 
654 } // namespace
655 
656 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
657                                              MLIRContext *context) {
658   // TODO: Move extract patterns to tensor::ExtractOp.
659   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
660               StaticTensorGenerate>(context);
661 }
662 
663 //===----------------------------------------------------------------------===//
664 // RankOp
665 //===----------------------------------------------------------------------===//
666 
667 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
668   // Constant fold rank when the rank of the operand is known.
669   auto type = getOperand().getType();
670   auto shapedType = type.dyn_cast<ShapedType>();
671   if (shapedType && shapedType.hasRank())
672     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
673   return IntegerAttr();
674 }
675 
676 //===----------------------------------------------------------------------===//
677 // ReshapeOp
678 //===----------------------------------------------------------------------===//
679 
680 static int64_t getNumElements(ShapedType type) {
681   int64_t numElements = 1;
682   for (auto dim : type.getShape())
683     numElements *= dim;
684   return numElements;
685 }
686 
687 LogicalResult ReshapeOp::verify() {
688   TensorType operandType = source().getType().cast<TensorType>();
689   TensorType resultType = result().getType().cast<TensorType>();
690 
691   if (operandType.getElementType() != resultType.getElementType())
692     return emitOpError("element types of source and destination tensor "
693                        "types should be the same");
694 
695   int64_t shapeSize = shape().getType().cast<RankedTensorType>().getDimSize(0);
696   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
697   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
698 
699   if (resultRankedType) {
700     if (operandRankedType && resultRankedType.hasStaticShape() &&
701         operandRankedType.hasStaticShape()) {
702       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
703         return emitOpError("source and destination tensor should have the "
704                            "same number of elements");
705     }
706     if (ShapedType::isDynamic(shapeSize))
707       return emitOpError("cannot use shape operand with dynamic length to "
708                          "reshape to statically-ranked tensor type");
709     if (shapeSize != resultRankedType.getRank())
710       return emitOpError(
711           "length of shape operand differs from the result's tensor rank");
712   }
713   return success();
714 }
715 
716 //===----------------------------------------------------------------------===//
717 // Reassociative reshape ops
718 //===----------------------------------------------------------------------===//
719 
720 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
721   return getSymbolLessAffineMaps(getReassociationExprs());
722 }
723 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
724   return convertReassociationIndicesToExprs(getContext(),
725                                             getReassociationIndices());
726 }
727 
728 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
729   return getSymbolLessAffineMaps(getReassociationExprs());
730 }
731 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
732   return convertReassociationIndicesToExprs(getContext(),
733                                             getReassociationIndices());
734 }
735 
736 ParseResult ExpandShapeOp::parse(OpAsmParser &parser, OperationState &result) {
737   return parseReshapeLikeOp(parser, result);
738 }
739 void ExpandShapeOp::print(OpAsmPrinter &p) { printReshapeOp(p, *this); }
740 
741 ParseResult CollapseShapeOp::parse(OpAsmParser &parser,
742                                    OperationState &result) {
743   return parseReshapeLikeOp(parser, result);
744 }
745 void CollapseShapeOp::print(OpAsmPrinter &p) { printReshapeOp(p, *this); }
746 
747 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
748 static RankedTensorType
749 computeTensorReshapeCollapsedType(RankedTensorType type,
750                                   ArrayRef<AffineMap> reassociation) {
751   auto shape = type.getShape();
752   SmallVector<int64_t, 4> newShape;
753   newShape.reserve(reassociation.size());
754 
755   // Use the fact that reassociation is valid to simplify the logic: only use
756   // each map's rank.
757   assert(isReassociationValid(reassociation) && "invalid reassociation");
758   unsigned currentDim = 0;
759   for (AffineMap m : reassociation) {
760     unsigned dim = m.getNumResults();
761     auto band = shape.slice(currentDim, dim);
762     int64_t size = 1;
763     if (llvm::is_contained(band, ShapedType::kDynamicSize))
764       size = ShapedType::kDynamicSize;
765     else
766       for (unsigned d = 0; d < dim; ++d)
767         size *= shape[currentDim + d];
768     newShape.push_back(size);
769     currentDim += dim;
770   }
771 
772   return RankedTensorType::get(newShape, type.getElementType());
773 }
774 
775 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
776                             ArrayRef<ReassociationIndices> reassociation,
777                             ArrayRef<NamedAttribute> attrs) {
778   auto resultType = computeTensorReshapeCollapsedType(
779       src.getType().cast<RankedTensorType>(),
780       getSymbolLessAffineMaps(
781           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
782   build(b, result, resultType, src, attrs);
783   result.addAttribute(getReassociationAttrName(),
784                       getReassociationIndicesAttribute(b, reassociation));
785 }
786 
787 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
788                           ArrayRef<ReassociationIndices> reassociation,
789                           ArrayRef<NamedAttribute> attrs) {
790   auto resultType = computeTensorReshapeCollapsedType(
791       src.getType().cast<RankedTensorType>(),
792       getSymbolLessAffineMaps(
793           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
794   build(b, result, resultType, src, attrs);
795   result.addAttribute(getReassociationAttrName(),
796                       getReassociationIndicesAttribute(b, reassociation));
797 }
798 
799 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
800                                         TensorReshapeOp, ExpandShapeOp>::value>
801 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
802                                            RankedTensorType expandedType,
803                                            RankedTensorType collapsedType) {
804   if (failed(
805           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
806     return failure();
807 
808   auto maps = op.getReassociationMaps();
809   RankedTensorType expectedType =
810       computeTensorReshapeCollapsedType(expandedType, maps);
811   if (collapsedType != expectedType)
812     return op.emitOpError("expected collapsed type to be ")
813            << expectedType << ", but got " << collapsedType;
814   return success();
815 }
816 
817 LogicalResult ExpandShapeOp::verify() {
818   return verifyTensorReshapeOp(*this, getResultType(), getSrcType());
819 }
820 
821 LogicalResult CollapseShapeOp::verify() {
822   return verifyTensorReshapeOp(*this, getSrcType(), getResultType());
823 }
824 
825 namespace {
826 /// Reshape of a splat constant can be replaced with a constant of the result
827 /// type.
828 template <typename TensorReshapeOp>
829 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
830   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
831   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
832                                 PatternRewriter &rewriter) const override {
833     DenseElementsAttr attr;
834     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
835       return failure();
836     if (!attr || !attr.isSplat())
837       return failure();
838     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
839         reshapeOp.getResultType(), attr.getRawData(), true);
840     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
841     return success();
842   }
843 };
844 
845 /// Reshape of a FromElements can be replaced with a FromElements of the result
846 /// type
847 template <typename TensorReshapeOp>
848 struct FoldReshapeWithFromElements : OpRewritePattern<TensorReshapeOp> {
849   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
850   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
851                                 PatternRewriter &rewriter) const override {
852     auto fromElements =
853         reshapeOp.src().template getDefiningOp<FromElementsOp>();
854     if (!fromElements)
855       return failure();
856 
857     auto shapedTy = reshapeOp.getType().template cast<ShapedType>();
858 
859     if (!shapedTy.hasStaticShape())
860       return failure();
861 
862     rewriter.replaceOpWithNewOp<FromElementsOp>(reshapeOp, reshapeOp.getType(),
863                                                 fromElements.elements());
864     return success();
865   }
866 };
867 
868 } // namespace
869 
870 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
871                                                 MLIRContext *context) {
872   results.add<CollapseReshapeOps<ExpandShapeOp>,
873               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>,
874               FoldReshapeWithConstant<ExpandShapeOp>,
875               FoldReshapeWithFromElements<ExpandShapeOp>>(context);
876 }
877 
878 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
879                                                   MLIRContext *context) {
880   results.add<CollapseReshapeOps<CollapseShapeOp>,
881               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
882               FoldReshapeWithConstant<CollapseShapeOp>,
883               FoldReshapeWithFromElements<CollapseShapeOp>>(context);
884 }
885 
886 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
887   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
888 }
889 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
890   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
891 }
892 
893 //===----------------------------------------------------------------------===//
894 // ExtractSliceOp
895 //===----------------------------------------------------------------------===//
896 
897 /// An extract_slice op result type can be fully inferred from the source type
898 /// and the static representation of offsets, sizes and strides. Special
899 /// sentinels encode the dynamic case.
900 RankedTensorType ExtractSliceOp::inferResultType(
901     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
902     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
903   // An extract_slice op may specify only a leading subset of offset/sizes/
904   // strides in which case we complete with offset=0, sizes from memref type and
905   // strides=1.
906   unsigned rank = sourceRankedTensorType.getRank();
907   (void)rank;
908   assert(staticSizes.size() == rank &&
909          "unexpected staticSizes not equal to rank of source");
910   return RankedTensorType::get(staticSizes,
911                                sourceRankedTensorType.getElementType());
912 }
913 
914 RankedTensorType ExtractSliceOp::inferResultType(
915     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
916     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
917   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
918   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
919   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
920                              ShapedType::kDynamicStrideOrOffset);
921   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
922                              ShapedType::kDynamicSize);
923   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
924                              ShapedType::kDynamicStrideOrOffset);
925   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
926                                          staticSizes, staticStrides);
927 }
928 
929 /// An extract_slice op result type can be fully inferred from the source type
930 /// and the static representation of offsets, sizes and strides. Special
931 /// sentinels encode the dynamic case.
932 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
933     unsigned resultRank, RankedTensorType sourceRankedTensorType,
934     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
935     ArrayRef<int64_t> strides) {
936   auto inferredType =
937       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
938           .cast<RankedTensorType>();
939   int rankDiff = inferredType.getRank() - resultRank;
940   if (rankDiff > 0) {
941     auto shape = inferredType.getShape();
942     llvm::SmallBitVector dimsToProject =
943         getPositionsOfShapeOne(rankDiff, shape);
944     SmallVector<int64_t> projectedShape;
945     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
946       if (!dimsToProject.test(pos))
947         projectedShape.push_back(shape[pos]);
948     inferredType =
949         RankedTensorType::get(projectedShape, inferredType.getElementType());
950   }
951   return inferredType;
952 }
953 
954 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
955     unsigned resultRank, RankedTensorType sourceRankedTensorType,
956     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
957     ArrayRef<OpFoldResult> strides) {
958   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
959   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
960   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
961                              ShapedType::kDynamicStrideOrOffset);
962   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
963                              ShapedType::kDynamicSize);
964   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
965                              ShapedType::kDynamicStrideOrOffset);
966   return ExtractSliceOp::inferRankReducedResultType(
967       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
968       staticStrides);
969 }
970 
971 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
972 /// result type. If the type passed is nullptr, it is inferred.
973 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
974                            RankedTensorType resultType, Value source,
975                            ArrayRef<OpFoldResult> offsets,
976                            ArrayRef<OpFoldResult> sizes,
977                            ArrayRef<OpFoldResult> strides,
978                            ArrayRef<NamedAttribute> attrs) {
979   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
980   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
981   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
982                              ShapedType::kDynamicStrideOrOffset);
983   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
984                              ShapedType::kDynamicSize);
985   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
986                              ShapedType::kDynamicStrideOrOffset);
987   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
988   // Structuring implementation this way avoids duplication between builders.
989   if (!resultType) {
990     resultType =
991         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
992                                         staticSizes, staticStrides)
993             .cast<RankedTensorType>();
994   }
995   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
996         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
997         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
998   result.addAttributes(attrs);
999 }
1000 
1001 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
1002 /// result type.
1003 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1004                            ArrayRef<OpFoldResult> offsets,
1005                            ArrayRef<OpFoldResult> sizes,
1006                            ArrayRef<OpFoldResult> strides,
1007                            ArrayRef<NamedAttribute> attrs) {
1008   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1009 }
1010 
1011 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
1012 /// type passed is nullptr, it is inferred.
1013 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1014                            RankedTensorType resultType, Value source,
1015                            ValueRange offsets, ValueRange sizes,
1016                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1017   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1018       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1019   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1020       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1021   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1022       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1023   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1024 }
1025 
1026 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
1027 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1028                            ValueRange offsets, ValueRange sizes,
1029                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1030   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1031 }
1032 
1033 template <typename OpTy>
1034 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
1035                                           OpTy op, Type expectedType) {
1036   auto memrefType = expectedType.cast<ShapedType>();
1037   switch (result) {
1038   case SliceVerificationResult::Success:
1039     return success();
1040   case SliceVerificationResult::RankTooLarge:
1041     return op.emitError("expected rank to be smaller or equal to ")
1042            << "the other rank. ";
1043   case SliceVerificationResult::SizeMismatch:
1044     return op.emitError("expected type to be ")
1045            << expectedType << " or a rank-reduced version. (size mismatch) ";
1046   case SliceVerificationResult::ElemTypeMismatch:
1047     return op.emitError("expected element type to be ")
1048            << memrefType.getElementType();
1049   default:
1050     llvm_unreachable("unexpected extract_slice op verification result");
1051   }
1052 }
1053 
1054 /// Verifier for ExtractSliceOp.
1055 LogicalResult ExtractSliceOp::verify() {
1056   // Verify result type against inferred type.
1057   auto expectedType = ExtractSliceOp::inferResultType(
1058       getSourceType(), getMixedOffsets(), getMixedSizes(), getMixedStrides());
1059   auto result = isRankReducedType(expectedType.cast<ShapedType>(), getType());
1060   return produceSliceErrorMsg(result, *this, expectedType);
1061 }
1062 
1063 /// Infer the canonical type of the result of an extract_slice op. Returns a
1064 /// type with rank `resultRank` that is either the rank of the rank-reduced
1065 /// type, or the non-rank-reduced type.
1066 static RankedTensorType
1067 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1068                             ArrayRef<OpFoldResult> mixedOffsets,
1069                             ArrayRef<OpFoldResult> mixedSizes,
1070                             ArrayRef<OpFoldResult> mixedStrides) {
1071   auto resultType =
1072       ExtractSliceOp::inferRankReducedResultType(
1073           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1074           .cast<RankedTensorType>();
1075   if (resultType.getRank() != resultRank) {
1076     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1077                                                  mixedSizes, mixedStrides)
1078                      .cast<RankedTensorType>();
1079   }
1080   return resultType;
1081 }
1082 
1083 llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
1084   ArrayRef<int64_t> resultShape = getType().getShape();
1085   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1086   llvm::SmallBitVector droppedDims(mixedSizes.size());
1087   unsigned shapePos = 0;
1088   for (const auto &size : enumerate(mixedSizes)) {
1089     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1090     // If the size is not 1, or if the current matched dimension of the result
1091     // is the same static shape as the size value (which is 1), then the
1092     // dimension is preserved.
1093     if (!sizeVal || sizeVal.getValue() != 1 ||
1094         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1095       shapePos++;
1096       continue;
1097     }
1098     droppedDims.set(size.index());
1099   }
1100   return droppedDims;
1101 }
1102 
1103 LogicalResult ExtractSliceOp::reifyResultShapes(
1104     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1105   reifiedReturnShapes.resize(1);
1106   reifiedReturnShapes[0].reserve(getType().getRank());
1107   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1108   llvm::SmallBitVector droppedDims = getDroppedDims();
1109   Location loc = getLoc();
1110   for (const auto &size : enumerate(mixedSizes)) {
1111     if (droppedDims.test(size.index()))
1112       continue;
1113     if (auto attr = size.value().dyn_cast<Attribute>()) {
1114       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1115           loc, attr.cast<IntegerAttr>().getInt()));
1116       continue;
1117     }
1118     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1119   }
1120   return success();
1121 }
1122 
1123 namespace {
1124 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1125 /// This essentially pushes memref_cast past its consuming slice when
1126 /// `canFoldIntoConsumerOp` is true.
1127 ///
1128 /// Example:
1129 /// ```
1130 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1131 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1132 ///   tensor<3x4xf32>
1133 /// ```
1134 /// is rewritten into:
1135 /// ```
1136 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1137 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1138 /// ```
1139 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1140 public:
1141   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1142 
1143   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1144                                 PatternRewriter &rewriter) const override {
1145     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1146     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1147           return matchPattern(operand, matchConstantIndex());
1148         }))
1149       return failure();
1150 
1151     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1152     if (!castOp)
1153       return failure();
1154 
1155     if (!canFoldIntoConsumerOp(castOp))
1156       return failure();
1157 
1158     /// Deduce the type of the result to use for the canonicalized operation.
1159     RankedTensorType resultType = getCanonicalSliceResultType(
1160         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1161         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1162         sliceOp.getMixedStrides());
1163     Value newSlice = rewriter.create<ExtractSliceOp>(
1164         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1165         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1166         sliceOp.static_sizes(), sliceOp.static_strides());
1167     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1168                                                 newSlice);
1169     return success();
1170   }
1171 };
1172 } // namespace
1173 
1174 /// Return the canonical type of the result of an extract_slice op.
1175 struct SliceReturnTypeCanonicalizer {
1176   RankedTensorType operator()(ExtractSliceOp op,
1177                               ArrayRef<OpFoldResult> mixedOffsets,
1178                               ArrayRef<OpFoldResult> mixedSizes,
1179                               ArrayRef<OpFoldResult> mixedStrides) {
1180     return getCanonicalSliceResultType(op.getType().getRank(),
1181                                        op.getSourceType(), mixedOffsets,
1182                                        mixedSizes, mixedStrides);
1183   }
1184 };
1185 
1186 /// A canonicalizer wrapper to replace ExtractSliceOps.
1187 struct SliceCanonicalizer {
1188   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1189                   ExtractSliceOp newOp) {
1190     Value replacement = newOp.getResult();
1191     if (replacement.getType() != op.getType())
1192       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1193                                                     replacement);
1194     rewriter.replaceOp(op, replacement);
1195   }
1196 };
1197 
1198 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1199                                                  MLIRContext *context) {
1200   results.add<
1201       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1202           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1203       ExtractSliceOpCastFolder>(context);
1204 }
1205 
1206 //
1207 static LogicalResult
1208 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1209                                            ShapedType shapedType) {
1210   OpBuilder b(op.getContext());
1211   for (OpFoldResult ofr : op.getMixedOffsets())
1212     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1213       return failure();
1214   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1215   // is appropriate.
1216   auto shape = shapedType.getShape();
1217   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1218     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1219       return failure();
1220   for (OpFoldResult ofr : op.getMixedStrides())
1221     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1222       return failure();
1223   return success();
1224 }
1225 
1226 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1227 /// we can return the InsertSliceOp's source directly.
1228 // TODO: This only checks the immediate producer; extend to go up the
1229 // insert/extract chain if the slices are disjoint.
1230 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1231   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1232 
1233   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1234   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1235       insertOp.isSameAs(extractOp, isSame))
1236     return insertOp.source();
1237 
1238   return {};
1239 }
1240 
1241 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute>) {
1242   if (getSourceType() == getType() &&
1243       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1244     return this->source();
1245   if (Value slice = foldExtractAfterInsertSlice(*this))
1246     return slice;
1247   return OpFoldResult();
1248 }
1249 
1250 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1251     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1252   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1253   unsigned rank = rankedTensorType.getRank();
1254   auto shape = rankedTensorType.getShape();
1255   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1256   SmallVector<OpFoldResult> sizes;
1257   for (unsigned i = 0, e = rank; i < e; ++i) {
1258     OpFoldResult dim;
1259     if (rankedTensorType.isDynamicDim(i))
1260       dim = b.createOrFold<tensor::DimOp>(
1261           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1262     else
1263       dim = b.getIndexAttr(shape[i]);
1264     sizes.push_back(dim);
1265   }
1266   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1267   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1268                                                 offsets, sizes, strides);
1269 }
1270 
1271 //===----------------------------------------------------------------------===//
1272 // InsertSliceOp
1273 //===----------------------------------------------------------------------===//
1274 
1275 // Build a InsertSliceOp with mixed static and dynamic entries.
1276 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1277                           Value dest, ArrayRef<OpFoldResult> offsets,
1278                           ArrayRef<OpFoldResult> sizes,
1279                           ArrayRef<OpFoldResult> strides,
1280                           ArrayRef<NamedAttribute> attrs) {
1281   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1282   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1283   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1284                              ShapedType::kDynamicStrideOrOffset);
1285   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1286                              ShapedType::kDynamicSize);
1287   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1288                              ShapedType::kDynamicStrideOrOffset);
1289   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1290         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1291         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1292   result.addAttributes(attrs);
1293 }
1294 
1295 // Build a InsertSliceOp with dynamic entries.
1296 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1297                           Value dest, ValueRange offsets, ValueRange sizes,
1298                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1299   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1300       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1301   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1302       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1303   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1304       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1305   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1306 }
1307 
1308 static SliceVerificationResult
1309 verifyInsertSliceOp(ShapedType srcType, ShapedType dstType,
1310                     ArrayAttr staticOffsets, ArrayAttr staticSizes,
1311                     ArrayAttr staticStrides,
1312                     ShapedType *expectedType = nullptr) {
1313   // insert_slice is the inverse of extract_slice, use the same type inference.
1314   auto expected = ExtractSliceOp::inferRankReducedResultType(
1315                       srcType.getRank(), dstType.cast<RankedTensorType>(),
1316                       extractFromI64ArrayAttr(staticOffsets),
1317                       extractFromI64ArrayAttr(staticSizes),
1318                       extractFromI64ArrayAttr(staticStrides))
1319                       .cast<ShapedType>();
1320   if (expectedType)
1321     *expectedType = expected;
1322   return isRankReducedType(expected, srcType);
1323 }
1324 
1325 /// Verifier for InsertSliceOp.
1326 LogicalResult InsertSliceOp::verify() {
1327   ShapedType expectedType;
1328   auto result =
1329       verifyInsertSliceOp(getSourceType(), getType(), static_offsets(),
1330                           static_sizes(), static_strides(), &expectedType);
1331   return produceSliceErrorMsg(result, *this, expectedType);
1332 }
1333 
1334 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1335 /// can mutate the second InsertSliceOp's destination to the first one's.
1336 ///
1337 /// Example:
1338 ///
1339 /// ```mlir
1340 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1341 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1342 /// ```
1343 ///
1344 /// folds into:
1345 ///
1346 /// ```mlir
1347 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1348 /// ```
1349 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1350   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1351 
1352   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1353   if (!prevInsertOp ||
1354       prevInsertOp.source().getType() != insertOp.source().getType() ||
1355       !prevInsertOp.isSameAs(insertOp, isSame))
1356     return failure();
1357 
1358   insertOp.destMutable().assign(prevInsertOp.dest());
1359   return success();
1360 }
1361 
1362 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1363   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1364       getSourceType() == getType() &&
1365       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1366     return this->source();
1367   if (succeeded(foldInsertAfterInsertSlice(*this)))
1368     return getResult();
1369   return OpFoldResult();
1370 }
1371 
1372 LogicalResult InsertSliceOp::reifyResultShapes(
1373     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1374   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1375   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1376     reifiedReturnShapes[0][dim] =
1377         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1378   }
1379   return success();
1380 }
1381 
1382 namespace {
1383 /// Pattern to rewrite a insert_slice op with constant arguments.
1384 class InsertSliceOpConstantArgumentFolder final
1385     : public OpRewritePattern<InsertSliceOp> {
1386 public:
1387   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1388 
1389   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1390                                 PatternRewriter &rewriter) const override {
1391     // No constant operand, just return.
1392     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1393           return matchPattern(operand, matchConstantIndex());
1394         }))
1395       return failure();
1396 
1397     // At least one of offsets/sizes/strides is a new constant.
1398     // Form the new list of operands and constant attributes from the
1399     // existing.
1400     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1401     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1402     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1403     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1404     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1405     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1406 
1407     // Create the new op in canonical form.
1408     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1409         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1410         mixedOffsets, mixedSizes, mixedStrides);
1411     Value toInsert = insertSliceOp.source();
1412     if (sourceType != insertSliceOp.getSourceType())
1413       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1414                                                  sourceType, toInsert);
1415     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1416         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1417         mixedStrides);
1418     return success();
1419   }
1420 };
1421 
1422 /// Fold tensor_casts with insert_slice operations. If the source or destination
1423 /// tensor is a tensor_cast that removes static type information, the cast is
1424 /// folded into the insert_slice operation. E.g.:
1425 ///
1426 /// ```mlir
1427 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1428 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1429 /// ```
1430 ///
1431 /// folds into:
1432 ///
1433 /// ```mlir
1434 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1435 /// ```
1436 ///
1437 /// Note: When folding a cast on the destination tensor, the result of the
1438 /// insert_slice operation is casted to ensure that the type of the result did
1439 /// not change.
1440 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1441   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1442 
1443   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1444                                 PatternRewriter &rewriter) const override {
1445     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1446           return matchPattern(operand, matchConstantIndex());
1447         }))
1448       return failure();
1449 
1450     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1451       auto castOp = v.getDefiningOp<tensor::CastOp>();
1452       if (!castOp || !canFoldIntoConsumerOp(castOp))
1453         return llvm::None;
1454       return castOp.source();
1455     };
1456     Optional<Value> sourceCastSource =
1457         getSourceOfCastOp(insertSliceOp.source());
1458     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1459     if (!sourceCastSource && !destCastSource)
1460       return failure();
1461 
1462     auto src = (sourceCastSource ? *sourceCastSource : insertSliceOp.source());
1463     auto dst = (destCastSource ? *destCastSource : insertSliceOp.dest());
1464 
1465     auto srcType = src.getType().cast<ShapedType>();
1466     auto dstType = dst.getType().cast<ShapedType>();
1467     if (verifyInsertSliceOp(srcType, dstType, insertSliceOp.static_offsets(),
1468                             insertSliceOp.static_sizes(),
1469                             insertSliceOp.static_strides()) !=
1470         SliceVerificationResult::Success)
1471       return failure();
1472 
1473     Value replacement = rewriter.create<InsertSliceOp>(
1474         insertSliceOp.getLoc(), src, dst, insertSliceOp.getMixedOffsets(),
1475         insertSliceOp.getMixedSizes(), insertSliceOp.getMixedStrides());
1476 
1477     if (replacement.getType() != insertSliceOp.getType()) {
1478       replacement = rewriter.create<tensor::CastOp>(
1479           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1480     }
1481     rewriter.replaceOp(insertSliceOp, replacement);
1482     return success();
1483   }
1484 };
1485 
1486 /// If additional static type information can be deduced from a insert_slice's
1487 /// size operands, insert an explicit cast of the op's source operand. This
1488 /// enables other canonicalization patterns that are matching for tensor_cast
1489 /// ops such as `ForOpTensorCastFolder` in SCF.
1490 ///
1491 /// Example:
1492 ///
1493 /// ```mlir
1494 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1495 ///       : tensor<?x?xf32> into ...
1496 /// ```
1497 ///
1498 /// folds into:
1499 ///
1500 /// ```mlir
1501 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1502 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1503 ///       : tensor<64x64xf32> into ...
1504 /// ```
1505 struct InsertSliceOpSourceCastInserter final
1506     : public OpRewritePattern<InsertSliceOp> {
1507   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1508 
1509   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1510                                 PatternRewriter &rewriter) const override {
1511     RankedTensorType srcType = insertSliceOp.getSourceType();
1512     if (srcType.getRank() != insertSliceOp.getType().getRank())
1513       return failure();
1514     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1515                                      srcType.getShape().end());
1516     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1517       if (Optional<int64_t> constInt =
1518               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1519         newSrcShape[i] = *constInt;
1520     }
1521 
1522     RankedTensorType newSrcType =
1523         RankedTensorType::get(newSrcShape, srcType.getElementType());
1524     if (srcType == newSrcType ||
1525         !preservesStaticInformation(srcType, newSrcType) ||
1526         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1527       return failure();
1528 
1529     // newSrcType is:
1530     //   1) Different from srcType.
1531     //   2) "More static" than srcType.
1532     //   3) Cast-compatible with srcType.
1533     // Insert the cast.
1534     Value cast = rewriter.create<tensor::CastOp>(
1535         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1536     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1537         insertSliceOp, cast, insertSliceOp.dest(),
1538         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1539         insertSliceOp.getMixedStrides());
1540     return success();
1541   }
1542 };
1543 } // namespace
1544 
1545 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1546                                                 MLIRContext *context) {
1547   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1548               InsertSliceOpSourceCastInserter>(context);
1549 }
1550 
1551 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1552                                                              Location loc,
1553                                                              Value tensor,
1554                                                              Value dest) {
1555   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1556   unsigned rank = rankedTensorType.getRank();
1557   auto shape = rankedTensorType.getShape();
1558   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1559   SmallVector<OpFoldResult> sizes;
1560   for (unsigned i = 0, e = rank; i < e; ++i) {
1561     OpFoldResult dim;
1562     if (rankedTensorType.isDynamicDim(i))
1563       dim = b.createOrFold<tensor::DimOp>(
1564           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1565     else
1566       dim = b.getIndexAttr(shape[i]);
1567     sizes.push_back(dim);
1568   }
1569   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1570   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1571                                                sizes, strides);
1572 }
1573 
1574 //===----------------------------------------------------------------------===//
1575 // PadOp
1576 //===----------------------------------------------------------------------===//
1577 
1578 // TODO: Replace custom<InferType> directive with AllTypesMatch as soon as it
1579 // supports optional types.
1580 void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
1581                     Type typeToInfer, Type typeToInferFrom) {}
1582 
1583 ParseResult parseInferType(OpAsmParser &parser,
1584                            Optional<OpAsmParser::OperandType> optOperand,
1585                            Type &typeToInfer, Type typeToInferFrom) {
1586   if (optOperand)
1587     typeToInfer = typeToInferFrom;
1588   return success();
1589 }
1590 
1591 LogicalResult PadOp::verify() {
1592   auto sourceType = source().getType().cast<RankedTensorType>();
1593   auto resultType = result().getType().cast<RankedTensorType>();
1594   auto expectedType =
1595       PadOp::inferResultType(sourceType, extractFromI64ArrayAttr(static_low()),
1596                              extractFromI64ArrayAttr(static_high()));
1597   for (int i = 0, e = sourceType.getRank(); i < e; ++i) {
1598     if (resultType.getDimSize(i) == expectedType.getDimSize(i))
1599       continue;
1600     if (expectedType.isDynamicDim(i))
1601       continue;
1602     return emitError("specified type ")
1603            << resultType << " does not match the inferred type "
1604            << expectedType;
1605   }
1606 
1607   auto &region = getRegion();
1608   unsigned rank = resultType.getRank();
1609   Block &block = region.front();
1610   if (block.getNumArguments() != rank)
1611     return emitError("expected the block to have ") << rank << " arguments";
1612 
1613   // Note: the number and type of yield values are checked in the YieldOp.
1614   for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
1615     if (!en.value().isIndex())
1616       return emitOpError("expected block argument ")
1617              << (en.index() + 1) << " to be an index";
1618   }
1619 
1620   // Ensure that the region yields an element of the right type.
1621   auto yieldOp = llvm::cast<YieldOp>(block.getTerminator());
1622   if (yieldOp.value().getType() !=
1623       getType().cast<ShapedType>().getElementType())
1624     return emitOpError("expected yield type to match shape element type");
1625 
1626   return success();
1627 }
1628 
1629 RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
1630                                         ArrayRef<int64_t> staticLow,
1631                                         ArrayRef<int64_t> staticHigh,
1632                                         ArrayRef<int64_t> resultShape) {
1633   unsigned rank = sourceType.getRank();
1634   assert(staticLow.size() == rank && "unexpected staticLow size mismatch");
1635   assert(staticHigh.size() == rank && "unexpected staticHigh size mismatch");
1636   assert((resultShape.empty() || resultShape.size() == rank) &&
1637          "unexpected resultShape size mismatch");
1638 
1639   SmallVector<int64_t, 4> inferredShape;
1640   for (auto i : llvm::seq<unsigned>(0, rank)) {
1641     if (sourceType.isDynamicDim(i) ||
1642         staticLow[i] == ShapedType::kDynamicSize ||
1643         staticHigh[i] == ShapedType::kDynamicSize) {
1644       inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
1645                                                   : resultShape[i]);
1646     } else {
1647       int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
1648       assert((resultShape.empty() || size == resultShape[i] ||
1649               resultShape[i] == ShapedType::kDynamicSize) &&
1650              "mismatch between inferred shape and result shape");
1651       inferredShape.push_back(size);
1652     }
1653   }
1654 
1655   return RankedTensorType::get(inferredShape, sourceType.getElementType());
1656 }
1657 
1658 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1659                   ArrayRef<int64_t> staticLow, ArrayRef<int64_t> staticHigh,
1660                   ValueRange low, ValueRange high, bool nofold,
1661                   ArrayRef<NamedAttribute> attrs) {
1662   auto sourceType = source.getType().cast<RankedTensorType>();
1663   auto resultType = inferResultType(sourceType, staticLow, staticHigh);
1664   build(b, result, resultType, source, low, high, b.getI64ArrayAttr(staticLow),
1665         b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr());
1666   result.addAttributes(attrs);
1667 }
1668 
1669 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1670                   ValueRange low, ValueRange high, bool nofold,
1671                   ArrayRef<NamedAttribute> attrs) {
1672   auto sourceType = source.getType().cast<RankedTensorType>();
1673   unsigned rank = sourceType.getRank();
1674   SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
1675   build(b, result, source, staticVector, staticVector, low, high, nofold,
1676         attrs);
1677 }
1678 
1679 void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
1680                   Value source, ArrayRef<OpFoldResult> low,
1681                   ArrayRef<OpFoldResult> high, bool nofold,
1682                   ArrayRef<NamedAttribute> attrs) {
1683   assert(resultType.isa<RankedTensorType>());
1684   auto sourceType = source.getType().cast<RankedTensorType>();
1685   SmallVector<Value, 4> dynamicLow, dynamicHigh;
1686   SmallVector<int64_t, 4> staticLow, staticHigh;
1687   // staticLow and staticHigh have full information of the padding config.
1688   // This will grow staticLow and staticHigh with 1 value. If the config is
1689   // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
1690   // value as well.
1691   dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
1692                              ShapedType::kDynamicSize);
1693   dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
1694                              ShapedType::kDynamicSize);
1695   if (!resultType) {
1696     resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
1697   }
1698   build(b, result, resultType, source, dynamicLow, dynamicHigh,
1699         b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh),
1700         nofold ? b.getUnitAttr() : UnitAttr());
1701   result.addAttributes(attrs);
1702 }
1703 
1704 namespace {
1705 // Folds tensor.pad when padding is static zeros and the attribute
1706 // doesn't request otherwise.
1707 struct FoldStaticZeroPadding : public OpRewritePattern<PadOp> {
1708   using OpRewritePattern<PadOp>::OpRewritePattern;
1709 
1710   LogicalResult matchAndRewrite(PadOp padTensorOp,
1711                                 PatternRewriter &rewriter) const override {
1712     if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad())
1713       return failure();
1714     if (padTensorOp.nofold())
1715       return failure();
1716     rewriter.replaceOpWithNewOp<tensor::CastOp>(
1717         padTensorOp, padTensorOp.result().getType(), padTensorOp.source());
1718     return success();
1719   }
1720 };
1721 
1722 // Fold CastOp into PadOp when adding static information.
1723 struct FoldSourceTensorCast : public OpRewritePattern<PadOp> {
1724   using OpRewritePattern<PadOp>::OpRewritePattern;
1725 
1726   LogicalResult matchAndRewrite(PadOp padTensorOp,
1727                                 PatternRewriter &rewriter) const override {
1728     auto castOp = padTensorOp.source().getDefiningOp<tensor::CastOp>();
1729     if (!tensor::canFoldIntoConsumerOp(castOp))
1730       return failure();
1731 
1732     auto newResultType = PadOp::inferResultType(
1733         castOp.source().getType().cast<RankedTensorType>(),
1734         extractFromI64ArrayAttr(padTensorOp.static_low()),
1735         extractFromI64ArrayAttr(padTensorOp.static_high()),
1736         padTensorOp.getResultType().getShape());
1737 
1738     if (newResultType == padTensorOp.getResultType()) {
1739       rewriter.updateRootInPlace(padTensorOp, [&]() {
1740         padTensorOp.sourceMutable().assign(castOp.source());
1741       });
1742     } else {
1743       auto newOp = rewriter.create<PadOp>(
1744           padTensorOp->getLoc(), newResultType, padTensorOp.source(),
1745           padTensorOp.low(), padTensorOp.high(), padTensorOp.static_low(),
1746           padTensorOp.static_high(), padTensorOp.nofold());
1747       BlockAndValueMapping mapper;
1748       padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
1749 
1750       rewriter.replaceOpWithNewOp<tensor::CastOp>(
1751           padTensorOp, padTensorOp.getResultType(), newOp);
1752     }
1753     return success();
1754   }
1755 };
1756 
1757 // Fold CastOp using the result of PadOp back into the latter if it adds
1758 // static information.
1759 struct FoldTargetTensorCast : public OpRewritePattern<PadOp> {
1760   using OpRewritePattern<PadOp>::OpRewritePattern;
1761 
1762   LogicalResult matchAndRewrite(PadOp padTensorOp,
1763                                 PatternRewriter &rewriter) const override {
1764     if (!padTensorOp.result().hasOneUse())
1765       return failure();
1766     auto tensorCastOp =
1767         dyn_cast<tensor::CastOp>(*padTensorOp->getUsers().begin());
1768     if (!tensorCastOp)
1769       return failure();
1770     if (!tensor::preservesStaticInformation(padTensorOp.result().getType(),
1771                                             tensorCastOp.dest().getType()))
1772       return failure();
1773 
1774     auto replacementOp = rewriter.create<PadOp>(
1775         padTensorOp.getLoc(), tensorCastOp.dest().getType(),
1776         padTensorOp.source(), padTensorOp.low(), padTensorOp.high(),
1777         padTensorOp.static_low(), padTensorOp.static_high(),
1778         padTensorOp.nofold());
1779     replacementOp.region().takeBody(padTensorOp.region());
1780 
1781     rewriter.replaceOp(padTensorOp, replacementOp.result());
1782     rewriter.replaceOp(tensorCastOp, replacementOp.result());
1783     return success();
1784   }
1785 };
1786 } // namespace
1787 
1788 void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
1789                                         MLIRContext *context) {
1790   results
1791       .add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast>(
1792           context);
1793 }
1794 
1795 /// Return the padding value of the PadOp if it constant. In this context,
1796 /// "constant" means an actual constant or "defined outside of the block".
1797 ///
1798 /// Values are considered constant in three cases:
1799 ///  - A ConstantLike value.
1800 ///  - A basic block argument from a different block.
1801 ///  - A value defined outside of the block.
1802 ///
1803 /// If the padding value is not constant, an empty Value is returned.
1804 Value PadOp::getConstantPaddingValue() {
1805   auto yieldOp = dyn_cast<YieldOp>(getRegion().front().getTerminator());
1806   if (!yieldOp)
1807     return {};
1808   Value padValue = yieldOp.value();
1809   // Check if yield value is a constant.
1810   if (matchPattern(padValue, m_Constant()))
1811     return padValue;
1812   // Check if yield value is defined inside the PadOp block.
1813   if (padValue.getParentBlock() == &getRegion().front())
1814     return {};
1815   // Else: Yield value defined outside of the PadOp block.
1816   return padValue;
1817 }
1818 
1819 OpFoldResult PadOp::fold(ArrayRef<Attribute>) {
1820   if (getResultType().hasStaticShape() && getResultType() == getSourceType() &&
1821       !nofold())
1822     return source();
1823   return {};
1824 }
1825 
1826 //===----------------------------------------------------------------------===//
1827 // SplatOp
1828 //===----------------------------------------------------------------------===//
1829 
1830 OpFoldResult SplatOp::fold(ArrayRef<Attribute> operands) {
1831   auto constOperand = operands.front();
1832   if (!constOperand.isa_and_nonnull<IntegerAttr, FloatAttr>())
1833     return {};
1834 
1835   // SplatElementsAttr::get treats single value for second arg as being a splat.
1836   return SplatElementsAttr::get(getType(), {constOperand});
1837 }
1838 
1839 //===----------------------------------------------------------------------===//
1840 // TableGen'd op method definitions
1841 //===----------------------------------------------------------------------===//
1842 
1843 #define GET_OP_CLASSES
1844 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
1845