1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/Complex/IR/Complex.h"
11 #include "mlir/Dialect/StandardOps/Utils/Utils.h"
12 #include "mlir/Dialect/Tensor/IR/Tensor.h"
13 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
14 #include "mlir/Dialect/Utils/StaticValueUtils.h"
15 #include "mlir/IR/BlockAndValueMapping.h"
16 #include "mlir/IR/Builders.h"
17 #include "mlir/IR/BuiltinAttributeInterfaces.h"
18 #include "mlir/IR/Matchers.h"
19 #include "mlir/IR/PatternMatch.h"
20 #include "mlir/IR/TypeUtilities.h"
21 #include "llvm/ADT/STLExtras.h"
22 
23 using namespace mlir;
24 using namespace mlir::tensor;
25 
26 /// Materialize a single constant operation from a given attribute value with
27 /// the desired resultant type.
28 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
29                                               Attribute value, Type type,
30                                               Location loc) {
31   if (arith::ConstantOp::isBuildableWith(value, type))
32     return builder.create<arith::ConstantOp>(loc, value, type);
33   if (complex::ConstantOp::isBuildableWith(value, type))
34     return builder.create<complex::ConstantOp>(loc, type,
35                                                value.cast<ArrayAttr>());
36   if (ConstantOp::isBuildableWith(value, type))
37     return builder.create<ConstantOp>(loc, value, type);
38   return nullptr;
39 }
40 
41 //===----------------------------------------------------------------------===//
42 // CastOp
43 //===----------------------------------------------------------------------===//
44 
45 /// Returns true if `target` is a ranked tensor type that preserves static
46 /// information available in the `source` ranked tensor type.
47 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
48   auto sourceType = source.dyn_cast<RankedTensorType>();
49   auto targetType = target.dyn_cast<RankedTensorType>();
50 
51   // Requires RankedTensorType.
52   if (!sourceType || !targetType)
53     return false;
54 
55   // Requires same elemental type.
56   if (sourceType.getElementType() != targetType.getElementType())
57     return false;
58 
59   // Requires same rank.
60   if (sourceType.getRank() != targetType.getRank())
61     return false;
62 
63   // If cast is towards more static sizes along any dimension, don't fold.
64   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
65     if (!ShapedType::isDynamic(std::get<0>(t)) &&
66         ShapedType::isDynamic(std::get<1>(t)))
67       return false;
68   }
69 
70   return true;
71 }
72 
73 /// Determines whether tensor::CastOp casts to a more dynamic version of the
74 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
75 /// implement canonicalization patterns for ops in different dialects that may
76 /// consume the results of tensor.cast operations. Such foldable tensor.cast
77 /// operations are typically inserted as `slice` ops and are canonicalized,
78 /// to preserve the type compatibility of their uses.
79 ///
80 /// Returns true when all conditions are met:
81 /// 1. source and result are ranked tensors with same element type and rank.
82 /// 2. the tensor type has more static information than the result
83 ///
84 /// Example:
85 /// ```mlir
86 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
87 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
88 /// ```
89 ///
90 /// folds into:
91 ///
92 /// ```mlir
93 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
94 /// ```
95 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
96   if (!castOp)
97     return false;
98 
99   // Can fold if the source of cast has at least as much static information as
100   // its results.
101   return preservesStaticInformation(castOp.getType(),
102                                     castOp.source().getType());
103 }
104 
105 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
106 /// that can be folded.
107 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
108   bool folded = false;
109   for (OpOperand &operand : op->getOpOperands()) {
110     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
111     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
112       operand.set(castOp.getOperand());
113       folded = true;
114     }
115   }
116   return success(folded);
117 }
118 
119 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
120   if (inputs.size() != 1 || outputs.size() != 1)
121     return false;
122   Type a = inputs.front(), b = outputs.front();
123   auto aT = a.dyn_cast<TensorType>();
124   auto bT = b.dyn_cast<TensorType>();
125   if (!aT || !bT)
126     return false;
127 
128   if (aT.getElementType() != bT.getElementType())
129     return false;
130 
131   return succeeded(verifyCompatibleShape(aT, bT));
132 }
133 
134 /// Compute a TensorType that has the joined shape knowledge of the two
135 /// given TensorTypes. The element types need to match.
136 static TensorType joinShapes(TensorType one, TensorType two) {
137   assert(one.getElementType() == two.getElementType());
138 
139   if (!one.hasRank())
140     return two;
141   if (!two.hasRank())
142     return one;
143 
144   int64_t rank = one.getRank();
145   if (rank != two.getRank())
146     return {};
147 
148   SmallVector<int64_t, 4> join;
149   join.reserve(rank);
150   for (int64_t i = 0; i < rank; ++i) {
151     if (one.isDynamicDim(i)) {
152       join.push_back(two.getDimSize(i));
153       continue;
154     }
155     if (two.isDynamicDim(i)) {
156       join.push_back(one.getDimSize(i));
157       continue;
158     }
159     if (one.getDimSize(i) != two.getDimSize(i))
160       return {};
161     join.push_back(one.getDimSize(i));
162   }
163   return RankedTensorType::get(join, one.getElementType());
164 }
165 
166 namespace {
167 
168 /// Replaces chains of two tensor.cast operations by a single tensor.cast
169 /// operation if doing so does not remove runtime constraints.
170 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
171   using OpRewritePattern<CastOp>::OpRewritePattern;
172 
173   LogicalResult matchAndRewrite(CastOp tensorCast,
174                                 PatternRewriter &rewriter) const final {
175     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
176 
177     if (!tensorCastOperand)
178       return failure();
179 
180     auto sourceType =
181         tensorCastOperand.getOperand().getType().cast<TensorType>();
182     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
183     auto resultType = tensorCast.getType().cast<TensorType>();
184 
185     // We can remove the intermediate cast if joining all three produces the
186     // same result as just joining the source and result shapes.
187     auto firstJoin =
188         joinShapes(joinShapes(sourceType, intermediateType), resultType);
189 
190     // The join might not exist if the cast sequence would fail at runtime.
191     if (!firstJoin)
192       return failure();
193 
194     // The newJoin always exists if the above join exists, it might just contain
195     // less information. If so, we cannot drop the intermediate cast, as doing
196     // so would remove runtime checks.
197     auto newJoin = joinShapes(sourceType, resultType);
198     if (firstJoin != newJoin)
199       return failure();
200 
201     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
202                                         tensorCastOperand.getOperand());
203     return success();
204   }
205 };
206 
207 } // namespace
208 
209 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
210                                          MLIRContext *context) {
211   results.add<ChainedTensorCast>(context);
212 }
213 
214 //===----------------------------------------------------------------------===//
215 // DimOp
216 //===----------------------------------------------------------------------===//
217 
218 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
219                   int64_t index) {
220   auto loc = result.location;
221   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
222   build(builder, result, source, indexValue);
223 }
224 
225 Optional<int64_t> DimOp::getConstantIndex() {
226   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
227     return constantOp.getValue().cast<IntegerAttr>().getInt();
228   return {};
229 }
230 
231 static LogicalResult verify(DimOp op) {
232   // Assume unknown index to be in range.
233   Optional<int64_t> index = op.getConstantIndex();
234   if (!index.hasValue())
235     return success();
236 
237   // Check that constant index is not knowingly out of range.
238   auto type = op.source().getType();
239   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
240     if (index.getValue() >= tensorType.getRank())
241       return op.emitOpError("index is out of range");
242   } else if (type.isa<UnrankedTensorType>()) {
243     // Assume index to be in range.
244   } else {
245     llvm_unreachable("expected operand with tensor type");
246   }
247   return success();
248 }
249 
250 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
251   // All forms of folding require a known index.
252   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
253   if (!index)
254     return {};
255 
256   // Folding for unranked types (UnrankedTensorType) is not supported.
257   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
258   if (!tensorType)
259     return {};
260 
261   // Fold if the shape extent along the given index is known.
262   if (!tensorType.isDynamicDim(index.getInt())) {
263     Builder builder(getContext());
264     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
265   }
266 
267   Operation *definingOp = source().getDefiningOp();
268 
269   // Fold dim to the operand of tensor.generate.
270   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
271     auto resultType =
272         fromElements.getResult().getType().cast<RankedTensorType>();
273     // The case where the type encodes the size of the dimension is handled
274     // above.
275     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
276 
277     // Find the operand of the fromElements that corresponds to this index.
278     auto dynExtents = fromElements.dynamicExtents().begin();
279     for (auto dim : resultType.getShape().take_front(index.getInt()))
280       if (ShapedType::isDynamic(dim))
281         dynExtents++;
282 
283     return Value{*dynExtents};
284   }
285 
286   // The size at the given index is now known to be a dynamic size.
287   unsigned unsignedIndex = index.getValue().getZExtValue();
288 
289   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
290     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
291     // `resolve-shaped-type-result-dims` pass.
292     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
293         sliceOp.isDynamicSize(unsignedIndex)) {
294       return {sliceOp.getDynamicSize(unsignedIndex)};
295     }
296   }
297 
298   // dim(cast) -> dim
299   if (succeeded(foldTensorCast(*this)))
300     return getResult();
301 
302   return {};
303 }
304 
305 namespace {
306 /// Fold dim of a cast into the dim of the source of the tensor cast.
307 struct DimOfCastOp : public OpRewritePattern<DimOp> {
308   using OpRewritePattern<DimOp>::OpRewritePattern;
309 
310   LogicalResult matchAndRewrite(DimOp dimOp,
311                                 PatternRewriter &rewriter) const override {
312     auto castOp = dimOp.source().getDefiningOp<CastOp>();
313     if (!castOp)
314       return failure();
315     Value newSource = castOp.getOperand();
316     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
317     return success();
318   }
319 };
320 } // namespace
321 
322 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
323                                         MLIRContext *context) {
324   results.add<DimOfCastOp>(context);
325 }
326 
327 //===----------------------------------------------------------------------===//
328 // ExtractOp
329 //===----------------------------------------------------------------------===//
330 
331 static LogicalResult verify(ExtractOp op) {
332   // Verify the # indices match if we have a ranked type.
333   if (auto tensorType = op.tensor().getType().dyn_cast<RankedTensorType>())
334     if (tensorType.getRank() != static_cast<int64_t>(op.indices().size()))
335       return op.emitOpError("incorrect number of indices for extract_element");
336 
337   return success();
338 }
339 
340 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
341   // The tensor operand must be a known constant.
342   Attribute tensor = operands.front();
343   if (!tensor)
344     return {};
345   // If this is a splat elements attribute, simply return the value. All of the
346   // elements of a splat attribute are the same.
347   if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
348     return splatTensor.getSplatValue<Attribute>();
349 
350   // Otherwise, collect the constant indices into the tensor.
351   SmallVector<uint64_t, 8> indices;
352   for (Attribute indice : llvm::drop_begin(operands, 1)) {
353     if (!indice || !indice.isa<IntegerAttr>())
354       return {};
355     indices.push_back(indice.cast<IntegerAttr>().getInt());
356   }
357 
358   // If this is an elements attribute, query the value at the given indices.
359   auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
360   if (elementsAttr && elementsAttr.isValidIndex(indices))
361     return elementsAttr.getValues<Attribute>()[indices];
362   return {};
363 }
364 
365 //===----------------------------------------------------------------------===//
366 // FromElementsOp
367 //===----------------------------------------------------------------------===//
368 
369 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
370                            Type resultType, ValueRange elements) {
371   result.addOperands(elements);
372   result.addTypes(resultType);
373 }
374 
375 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
376                            ValueRange elements) {
377   assert(!elements.empty() && "expected at least one element");
378   Type resultType = RankedTensorType::get(
379       {static_cast<int64_t>(elements.size())}, elements.front().getType());
380   build(builder, result, resultType, elements);
381 }
382 
383 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
384   if (!llvm::is_contained(operands, nullptr))
385     return DenseElementsAttr::get(getType(), operands);
386   return {};
387 }
388 
389 namespace {
390 
391 // Canonicalizes the pattern of the form
392 //
393 // %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
394 // %extracted_element = tensor.extract %tensor[%c0] : tensor<1xi32>
395 //
396 // to just %element.
397 struct ExtractElementFromTensorFromElements
398     : public OpRewritePattern<tensor::ExtractOp> {
399   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
400 
401   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
402                                 PatternRewriter &rewriter) const final {
403     auto tensorFromElements = extract.tensor().getDefiningOp<FromElementsOp>();
404     if (!tensorFromElements)
405       return failure();
406     auto tensorType = tensorFromElements.getType().cast<RankedTensorType>();
407     auto rank = tensorType.getRank();
408     if (rank == 0) {
409       rewriter.replaceOp(extract, tensorFromElements.getOperand(0));
410       return success();
411     }
412     SmallVector<APInt, 3> indices(rank);
413     int64_t flatIndex = 0;
414     int64_t stride = 1;
415     for (int i = rank - 1; i >= 0; --i) {
416       APInt index;
417       if (!matchPattern(extract.indices()[i], m_ConstantInt(&index)))
418         return failure();
419       if (i < rank - 1)
420         stride *= tensorType.getDimSize(i);
421       flatIndex += index.getSExtValue() * stride;
422     }
423     // Prevent out of bounds accesses. This can happen in invalid code that will
424     // never execute.
425     if (tensorFromElements->getNumOperands() <= flatIndex || flatIndex < 0)
426       return failure();
427     rewriter.replaceOp(extract, tensorFromElements.getOperand(flatIndex));
428     return success();
429   }
430 };
431 
432 // Pushes the index_casts that occur before extractions to after the extract.
433 // This minimizes type conversion in some cases and enables the extract
434 // canonicalizer. This changes:
435 //
436 // %cast = arith.index_cast %tensor : tensor<1xi32> to tensor<1xindex>
437 // %extract = tensor.extract %cast[%index] : tensor<1xindex>
438 //
439 // to the following:
440 //
441 // %extract = tensor.extract %tensor[%index] : tensor<1xindex>
442 // %cast = arith.index_cast %extract : i32 to index
443 //
444 // to just %element.
445 //
446 // Consider expanding this to a template and handle all tensor cast operations.
447 struct ExtractElementFromIndexCast
448     : public OpRewritePattern<tensor::ExtractOp> {
449   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
450 
451   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
452                                 PatternRewriter &rewriter) const final {
453     Location loc = extract.getLoc();
454     auto indexCast = extract.tensor().getDefiningOp<arith::IndexCastOp>();
455     if (!indexCast)
456       return failure();
457 
458     Type elementTy = getElementTypeOrSelf(indexCast.getIn());
459 
460     auto newExtract = rewriter.create<tensor::ExtractOp>(
461         loc, elementTy, indexCast.getIn(), extract.indices());
462 
463     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(extract, extract.getType(),
464                                                     newExtract);
465 
466     return success();
467   }
468 };
469 
470 } // namespace
471 
472 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
473                                                  MLIRContext *context) {
474   results
475       .add<ExtractElementFromIndexCast, ExtractElementFromTensorFromElements>(
476           context);
477 }
478 
479 //===----------------------------------------------------------------------===//
480 // InsertOp
481 //===----------------------------------------------------------------------===//
482 
483 static LogicalResult verify(InsertOp op) {
484   // Verify the # indices match if we have a ranked type.
485   if (auto destType = op.dest().getType().dyn_cast<RankedTensorType>())
486     if (destType.getRank() != static_cast<int64_t>(op.indices().size()))
487       return op.emitOpError("incorrect number of indices");
488   return success();
489 }
490 
491 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
492   Attribute scalar = operands[0];
493   Attribute dest = operands[1];
494   if (scalar && dest)
495     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
496       if (scalar == splatDest.getSplatValue<Attribute>())
497         return dest;
498   return {};
499 }
500 
501 //===----------------------------------------------------------------------===//
502 // GenerateOp
503 //===----------------------------------------------------------------------===//
504 
505 static LogicalResult verify(GenerateOp op) {
506   // Ensure that the tensor type has as many dynamic dimensions as are specified
507   // by the operands.
508   RankedTensorType resultTy = op.getType().cast<RankedTensorType>();
509   if (op.getNumOperands() != resultTy.getNumDynamicDims())
510     return op.emitError("must have as many index operands as dynamic extents "
511                         "in the result type");
512 
513   // Ensure that region arguments span the index space.
514   if (!llvm::all_of(op.body().getArgumentTypes(),
515                     [](Type ty) { return ty.isIndex(); }))
516     return op.emitError("all body arguments must be index");
517   if (op.body().getNumArguments() != resultTy.getRank())
518     return op.emitError("must have one body argument per input dimension");
519 
520   // Ensure that the region yields an element of the right type.
521   auto yieldOp =
522       llvm::cast<YieldOp>(op.body().getBlocks().front().getTerminator());
523 
524   if (yieldOp.value().getType() != resultTy.getElementType())
525     return op.emitOpError(
526         "body must be terminated with a `yield` operation of the tensor "
527         "element type");
528 
529   return success();
530 }
531 
532 void GenerateOp::build(
533     OpBuilder &b, OperationState &result, Type resultTy,
534     ValueRange dynamicExtents,
535     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
536   build(b, result, resultTy, dynamicExtents);
537 
538   // Build and populate body.
539   OpBuilder::InsertionGuard guard(b);
540   Region *bodyRegion = result.regions.front().get();
541   auto rank = resultTy.cast<RankedTensorType>().getRank();
542   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
543   SmallVector<Location, 2> argumentLocs(rank, result.location);
544   Block *bodyBlock =
545       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes, argumentLocs);
546   bodyBuilder(b, result.location, bodyBlock->getArguments());
547 }
548 
549 namespace {
550 
551 /// Canonicalizes tensor.generate operations with a constant
552 /// operand into the equivalent operation with the operand expressed in the
553 /// result type, instead. We also insert a type cast to make sure that the
554 /// resulting IR is still well-typed.
555 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
556   using OpRewritePattern<GenerateOp>::OpRewritePattern;
557 
558   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
559                                 PatternRewriter &rewriter) const final {
560     auto resultType =
561         tensorFromElements.getResult().getType().cast<RankedTensorType>();
562 
563     if (resultType.hasStaticShape())
564       return failure();
565 
566     SmallVector<Value, 4> newOperands;
567     SmallVector<int64_t, 4> newShape;
568     auto operandsIt = tensorFromElements.dynamicExtents().begin();
569 
570     for (int64_t dim : resultType.getShape()) {
571       if (!ShapedType::isDynamic(dim)) {
572         newShape.push_back(dim);
573         continue;
574       }
575       APInt index;
576       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
577         newShape.push_back(ShapedType::kDynamicSize);
578         newOperands.push_back(*operandsIt++);
579         continue;
580       }
581       newShape.push_back(index.getSExtValue());
582       operandsIt++;
583     }
584 
585     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
586       return failure();
587 
588     auto loc = tensorFromElements.getLoc();
589     auto newOp = rewriter.create<GenerateOp>(
590         loc, RankedTensorType::get(newShape, resultType.getElementType()),
591         newOperands);
592     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
593                                 newOp.body().begin());
594     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
595                                                 newOp);
596     return success();
597   }
598 };
599 
600 /// Canonicalizes the pattern of the form
601 ///
602 /// %tensor = tensor.generate %x {
603 ///   ^bb0(%arg0: index):
604 ///   <computation>
605 ///   yield %1 : index
606 /// } : tensor<?xindex>
607 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
608 ///
609 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
610 /// tensor.generate operation has no side-effects.
611 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
612   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
613 
614   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
615                                 PatternRewriter &rewriter) const final {
616     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
617     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
618       return failure();
619 
620     BlockAndValueMapping mapping;
621     Block *body = tensorFromElements.getBody();
622     mapping.map(body->getArguments(), extract.indices());
623     for (auto &op : body->without_terminator())
624       rewriter.clone(op, mapping);
625 
626     auto yield = cast<YieldOp>(body->getTerminator());
627 
628     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
629     return success();
630   }
631 };
632 
633 /// Canonicalizes the pattern of the form
634 ///
635 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
636 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
637 ///
638 /// to
639 ///
640 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
641 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
642   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
643 
644   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
645                                 PatternRewriter &rewriter) const final {
646     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
647     if (!tensorCast)
648       return failure();
649 
650     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
651                                                    extract.indices());
652     return success();
653   }
654 };
655 
656 } // namespace
657 
658 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
659                                              MLIRContext *context) {
660   // TODO: Move extract patterns to tensor::ExtractOp.
661   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
662               StaticTensorGenerate>(context);
663 }
664 
665 //===----------------------------------------------------------------------===//
666 // RankOp
667 //===----------------------------------------------------------------------===//
668 
669 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
670   // Constant fold rank when the rank of the operand is known.
671   auto type = getOperand().getType();
672   auto shapedType = type.dyn_cast<ShapedType>();
673   if (shapedType && shapedType.hasRank())
674     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
675   return IntegerAttr();
676 }
677 
678 //===----------------------------------------------------------------------===//
679 // ReshapeOp
680 //===----------------------------------------------------------------------===//
681 
682 static int64_t getNumElements(ShapedType type) {
683   int64_t numElements = 1;
684   for (auto dim : type.getShape())
685     numElements *= dim;
686   return numElements;
687 }
688 
689 static LogicalResult verify(ReshapeOp op) {
690   TensorType operandType = op.source().getType().cast<TensorType>();
691   TensorType resultType = op.result().getType().cast<TensorType>();
692 
693   if (operandType.getElementType() != resultType.getElementType())
694     return op.emitOpError("element types of source and destination tensor "
695                           "types should be the same");
696 
697   int64_t shapeSize =
698       op.shape().getType().cast<RankedTensorType>().getDimSize(0);
699   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
700   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
701 
702   if (resultRankedType) {
703     if (operandRankedType && resultRankedType.hasStaticShape() &&
704         operandRankedType.hasStaticShape()) {
705       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
706         return op.emitOpError("source and destination tensor should have the "
707                               "same number of elements");
708     }
709     if (ShapedType::isDynamic(shapeSize))
710       return op.emitOpError("cannot use shape operand with dynamic length to "
711                             "reshape to statically-ranked tensor type");
712     if (shapeSize != resultRankedType.getRank())
713       return op.emitOpError(
714           "length of shape operand differs from the result's tensor rank");
715   }
716   return success();
717 }
718 
719 //===----------------------------------------------------------------------===//
720 // Reassociative reshape ops
721 //===----------------------------------------------------------------------===//
722 
723 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
724   return getSymbolLessAffineMaps(getReassociationExprs());
725 }
726 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
727   return convertReassociationIndicesToExprs(getContext(),
728                                             getReassociationIndices());
729 }
730 
731 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
732   return getSymbolLessAffineMaps(getReassociationExprs());
733 }
734 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
735   return convertReassociationIndicesToExprs(getContext(),
736                                             getReassociationIndices());
737 }
738 
739 static void print(OpAsmPrinter &p, ExpandShapeOp op) {
740   ::mlir::printReshapeOp<ExpandShapeOp>(p, op);
741 }
742 
743 static void print(OpAsmPrinter &p, CollapseShapeOp op) {
744   ::mlir::printReshapeOp<CollapseShapeOp>(p, op);
745 }
746 
747 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
748 static RankedTensorType
749 computeTensorReshapeCollapsedType(RankedTensorType type,
750                                   ArrayRef<AffineMap> reassociation) {
751   auto shape = type.getShape();
752   SmallVector<int64_t, 4> newShape;
753   newShape.reserve(reassociation.size());
754 
755   // Use the fact that reassociation is valid to simplify the logic: only use
756   // each map's rank.
757   assert(isReassociationValid(reassociation) && "invalid reassociation");
758   unsigned currentDim = 0;
759   for (AffineMap m : reassociation) {
760     unsigned dim = m.getNumResults();
761     auto band = shape.slice(currentDim, dim);
762     int64_t size = 1;
763     if (llvm::is_contained(band, ShapedType::kDynamicSize))
764       size = ShapedType::kDynamicSize;
765     else
766       for (unsigned d = 0; d < dim; ++d)
767         size *= shape[currentDim + d];
768     newShape.push_back(size);
769     currentDim += dim;
770   }
771 
772   return RankedTensorType::get(newShape, type.getElementType());
773 }
774 
775 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
776                             ArrayRef<ReassociationIndices> reassociation,
777                             ArrayRef<NamedAttribute> attrs) {
778   auto resultType = computeTensorReshapeCollapsedType(
779       src.getType().cast<RankedTensorType>(),
780       getSymbolLessAffineMaps(
781           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
782   build(b, result, resultType, src, attrs);
783   result.addAttribute(getReassociationAttrName(),
784                       getReassociationIndicesAttribute(b, reassociation));
785 }
786 
787 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
788                           ArrayRef<ReassociationIndices> reassociation,
789                           ArrayRef<NamedAttribute> attrs) {
790   auto resultType = computeTensorReshapeCollapsedType(
791       src.getType().cast<RankedTensorType>(),
792       getSymbolLessAffineMaps(
793           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
794   build(b, result, resultType, src, attrs);
795   result.addAttribute(getReassociationAttrName(),
796                       getReassociationIndicesAttribute(b, reassociation));
797 }
798 
799 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
800                                         TensorReshapeOp, ExpandShapeOp>::value>
801 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
802                                            RankedTensorType expandedType,
803                                            RankedTensorType collapsedType) {
804   if (failed(
805           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
806     return failure();
807 
808   auto maps = op.getReassociationMaps();
809   RankedTensorType expectedType =
810       computeTensorReshapeCollapsedType(expandedType, maps);
811   if (collapsedType != expectedType)
812     return op.emitOpError("expected collapsed type to be ")
813            << expectedType << ", but got " << collapsedType;
814   return success();
815 }
816 
817 static LogicalResult verify(ExpandShapeOp op) {
818   return verifyTensorReshapeOp(op, op.getResultType(), op.getSrcType());
819 }
820 
821 static LogicalResult verify(CollapseShapeOp op) {
822   return verifyTensorReshapeOp(op, op.getSrcType(), op.getResultType());
823 }
824 
825 namespace {
826 /// Reshape of a splat constant can be replaced with a constant of the result
827 /// type.
828 template <typename TensorReshapeOp>
829 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
830   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
831   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
832                                 PatternRewriter &rewriter) const override {
833     DenseElementsAttr attr;
834     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
835       return failure();
836     if (!attr || !attr.isSplat())
837       return failure();
838     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
839         reshapeOp.getResultType(), attr.getRawData(), true);
840     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
841     return success();
842   }
843 };
844 
845 /// Reshape of a FromElements can be replaced with a FromElements of the result
846 /// type
847 template <typename TensorReshapeOp>
848 struct FoldReshapeWithFromElements : OpRewritePattern<TensorReshapeOp> {
849   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
850   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
851                                 PatternRewriter &rewriter) const override {
852     auto fromElements =
853         reshapeOp.src().template getDefiningOp<FromElementsOp>();
854     if (!fromElements)
855       return failure();
856 
857     auto shapedTy = reshapeOp.getType().template cast<ShapedType>();
858 
859     if (!shapedTy.hasStaticShape())
860       return failure();
861 
862     rewriter.replaceOpWithNewOp<FromElementsOp>(reshapeOp, reshapeOp.getType(),
863                                                 fromElements.elements());
864     return success();
865   }
866 };
867 
868 } // namespace
869 
870 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
871                                                 MLIRContext *context) {
872   results.add<CollapseReshapeOps<ExpandShapeOp>,
873               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>,
874               FoldReshapeWithConstant<ExpandShapeOp>,
875               FoldReshapeWithFromElements<ExpandShapeOp>>(context);
876 }
877 
878 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
879                                                   MLIRContext *context) {
880   results.add<CollapseReshapeOps<CollapseShapeOp>,
881               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
882               FoldReshapeWithConstant<CollapseShapeOp>,
883               FoldReshapeWithFromElements<CollapseShapeOp>>(context);
884 }
885 
886 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
887   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
888 }
889 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
890   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
891 }
892 
893 //===----------------------------------------------------------------------===//
894 // ExtractSliceOp
895 //===----------------------------------------------------------------------===//
896 
897 /// An extract_slice op result type can be fully inferred from the source type
898 /// and the static representation of offsets, sizes and strides. Special
899 /// sentinels encode the dynamic case.
900 RankedTensorType ExtractSliceOp::inferResultType(
901     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
902     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
903   // An extract_slice op may specify only a leading subset of offset/sizes/
904   // strides in which case we complete with offset=0, sizes from memref type and
905   // strides=1.
906   unsigned rank = sourceRankedTensorType.getRank();
907   (void)rank;
908   assert(staticSizes.size() == rank &&
909          "unexpected staticSizes not equal to rank of source");
910   return RankedTensorType::get(staticSizes,
911                                sourceRankedTensorType.getElementType());
912 }
913 
914 RankedTensorType ExtractSliceOp::inferResultType(
915     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
916     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
917   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
918   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
919   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
920                              ShapedType::kDynamicStrideOrOffset);
921   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
922                              ShapedType::kDynamicSize);
923   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
924                              ShapedType::kDynamicStrideOrOffset);
925   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
926                                          staticSizes, staticStrides);
927 }
928 
929 /// An extract_slice op result type can be fully inferred from the source type
930 /// and the static representation of offsets, sizes and strides. Special
931 /// sentinels encode the dynamic case.
932 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
933     unsigned resultRank, RankedTensorType sourceRankedTensorType,
934     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
935     ArrayRef<int64_t> strides) {
936   auto inferredType =
937       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
938           .cast<RankedTensorType>();
939   int rankDiff = inferredType.getRank() - resultRank;
940   if (rankDiff > 0) {
941     auto shape = inferredType.getShape();
942     llvm::SmallDenseSet<unsigned> dimsToProject;
943     mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject);
944     SmallVector<int64_t> projectedShape;
945     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
946       if (!dimsToProject.contains(pos))
947         projectedShape.push_back(shape[pos]);
948     inferredType =
949         RankedTensorType::get(projectedShape, inferredType.getElementType());
950   }
951   return inferredType;
952 }
953 
954 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
955     unsigned resultRank, RankedTensorType sourceRankedTensorType,
956     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
957     ArrayRef<OpFoldResult> strides) {
958   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
959   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
960   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
961                              ShapedType::kDynamicStrideOrOffset);
962   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
963                              ShapedType::kDynamicSize);
964   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
965                              ShapedType::kDynamicStrideOrOffset);
966   return ExtractSliceOp::inferRankReducedResultType(
967       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
968       staticStrides);
969 }
970 
971 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
972 /// result type. If the type passed is nullptr, it is inferred.
973 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
974                            RankedTensorType resultType, Value source,
975                            ArrayRef<OpFoldResult> offsets,
976                            ArrayRef<OpFoldResult> sizes,
977                            ArrayRef<OpFoldResult> strides,
978                            ArrayRef<NamedAttribute> attrs) {
979   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
980   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
981   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
982                              ShapedType::kDynamicStrideOrOffset);
983   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
984                              ShapedType::kDynamicSize);
985   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
986                              ShapedType::kDynamicStrideOrOffset);
987   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
988   // Structuring implementation this way avoids duplication between builders.
989   if (!resultType) {
990     resultType =
991         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
992                                         staticSizes, staticStrides)
993             .cast<RankedTensorType>();
994   }
995   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
996         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
997         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
998   result.addAttributes(attrs);
999 }
1000 
1001 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
1002 /// result type.
1003 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1004                            ArrayRef<OpFoldResult> offsets,
1005                            ArrayRef<OpFoldResult> sizes,
1006                            ArrayRef<OpFoldResult> strides,
1007                            ArrayRef<NamedAttribute> attrs) {
1008   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1009 }
1010 
1011 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
1012 /// type passed is nullptr, it is inferred.
1013 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1014                            RankedTensorType resultType, Value source,
1015                            ValueRange offsets, ValueRange sizes,
1016                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1017   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1018       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1019   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1020       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1021   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1022       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1023   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1024 }
1025 
1026 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
1027 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1028                            ValueRange offsets, ValueRange sizes,
1029                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1030   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1031 }
1032 
1033 template <typename OpTy>
1034 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
1035                                           OpTy op, Type expectedType) {
1036   auto memrefType = expectedType.cast<ShapedType>();
1037   switch (result) {
1038   case SliceVerificationResult::Success:
1039     return success();
1040   case SliceVerificationResult::RankTooLarge:
1041     return op.emitError("expected rank to be smaller or equal to ")
1042            << "the other rank. ";
1043   case SliceVerificationResult::SizeMismatch:
1044     return op.emitError("expected type to be ")
1045            << expectedType << " or a rank-reduced version. (size mismatch) ";
1046   case SliceVerificationResult::ElemTypeMismatch:
1047     return op.emitError("expected element type to be ")
1048            << memrefType.getElementType();
1049   default:
1050     llvm_unreachable("unexpected extract_slice op verification result");
1051   }
1052 }
1053 
1054 /// Verifier for ExtractSliceOp.
1055 static LogicalResult verify(ExtractSliceOp op) {
1056   // Verify result type against inferred type.
1057   auto expectedType =
1058       ExtractSliceOp::inferResultType(op.getSourceType(), op.getMixedOffsets(),
1059                                       op.getMixedSizes(), op.getMixedStrides());
1060   auto result =
1061       isRankReducedType(expectedType.cast<ShapedType>(), op.getType());
1062   return produceSliceErrorMsg(result, op, expectedType);
1063 }
1064 
1065 /// Infer the canonical type of the result of an extract_slice op. Returns a
1066 /// type with rank `resultRank` that is either the rank of the rank-reduced
1067 /// type, or the non-rank-reduced type.
1068 static RankedTensorType
1069 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1070                             ArrayRef<OpFoldResult> mixedOffsets,
1071                             ArrayRef<OpFoldResult> mixedSizes,
1072                             ArrayRef<OpFoldResult> mixedStrides) {
1073   auto resultType =
1074       ExtractSliceOp::inferRankReducedResultType(
1075           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1076           .cast<RankedTensorType>();
1077   if (resultType.getRank() != resultRank) {
1078     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1079                                                  mixedSizes, mixedStrides)
1080                      .cast<RankedTensorType>();
1081   }
1082   return resultType;
1083 }
1084 
1085 llvm::SmallDenseSet<unsigned> ExtractSliceOp::getDroppedDims() {
1086   llvm::SmallDenseSet<unsigned> droppedDims;
1087   ArrayRef<int64_t> resultShape = getType().getShape();
1088   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1089   unsigned shapePos = 0;
1090   for (const auto &size : enumerate(mixedSizes)) {
1091     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1092     // If the size is not 1, or if the current matched dimension of the result
1093     // is the same static shape as the size value (which is 1), then the
1094     // dimension is preserved.
1095     if (!sizeVal || sizeVal.getValue() != 1 ||
1096         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1097       shapePos++;
1098       continue;
1099     }
1100     droppedDims.insert(size.index());
1101   }
1102   return droppedDims;
1103 }
1104 
1105 LogicalResult ExtractSliceOp::reifyResultShapes(
1106     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1107   reifiedReturnShapes.resize(1);
1108   reifiedReturnShapes[0].reserve(getType().getRank());
1109   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1110   llvm::SmallDenseSet<unsigned> droppedDims = getDroppedDims();
1111   Location loc = getLoc();
1112   for (const auto &size : enumerate(mixedSizes)) {
1113     if (droppedDims.count(size.index()))
1114       continue;
1115     if (auto attr = size.value().dyn_cast<Attribute>()) {
1116       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1117           loc, attr.cast<IntegerAttr>().getInt()));
1118       continue;
1119     }
1120     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1121   }
1122   return success();
1123 }
1124 
1125 namespace {
1126 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1127 /// This essentially pushes memref_cast past its consuming slice when
1128 /// `canFoldIntoConsumerOp` is true.
1129 ///
1130 /// Example:
1131 /// ```
1132 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1133 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1134 ///   tensor<3x4xf32>
1135 /// ```
1136 /// is rewritten into:
1137 /// ```
1138 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1139 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1140 /// ```
1141 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1142 public:
1143   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1144 
1145   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1146                                 PatternRewriter &rewriter) const override {
1147     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1148     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1149           return matchPattern(operand, matchConstantIndex());
1150         }))
1151       return failure();
1152 
1153     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1154     if (!castOp)
1155       return failure();
1156 
1157     if (!canFoldIntoConsumerOp(castOp))
1158       return failure();
1159 
1160     /// Deduce the type of the result to use for the canonicalized operation.
1161     RankedTensorType resultType = getCanonicalSliceResultType(
1162         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1163         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1164         sliceOp.getMixedStrides());
1165     Value newSlice = rewriter.create<ExtractSliceOp>(
1166         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1167         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1168         sliceOp.static_sizes(), sliceOp.static_strides());
1169     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1170                                                 newSlice);
1171     return success();
1172   }
1173 };
1174 } // namespace
1175 
1176 /// Return the canonical type of the result of an extract_slice op.
1177 struct SliceReturnTypeCanonicalizer {
1178   RankedTensorType operator()(ExtractSliceOp op,
1179                               ArrayRef<OpFoldResult> mixedOffsets,
1180                               ArrayRef<OpFoldResult> mixedSizes,
1181                               ArrayRef<OpFoldResult> mixedStrides) {
1182     return getCanonicalSliceResultType(op.getType().getRank(),
1183                                        op.getSourceType(), mixedOffsets,
1184                                        mixedSizes, mixedStrides);
1185   }
1186 };
1187 
1188 /// A canonicalizer wrapper to replace ExtractSliceOps.
1189 struct SliceCanonicalizer {
1190   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1191                   ExtractSliceOp newOp) {
1192     Value replacement = newOp.getResult();
1193     if (replacement.getType() != op.getType())
1194       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1195                                                     replacement);
1196     rewriter.replaceOp(op, replacement);
1197   }
1198 };
1199 
1200 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1201                                                  MLIRContext *context) {
1202   results.add<
1203       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1204           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1205       ExtractSliceOpCastFolder>(context);
1206 }
1207 
1208 //
1209 static LogicalResult
1210 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1211                                            ShapedType shapedType) {
1212   OpBuilder b(op.getContext());
1213   for (OpFoldResult ofr : op.getMixedOffsets())
1214     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1215       return failure();
1216   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1217   // is appropriate.
1218   auto shape = shapedType.getShape();
1219   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1220     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1221       return failure();
1222   for (OpFoldResult ofr : op.getMixedStrides())
1223     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1224       return failure();
1225   return success();
1226 }
1227 
1228 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1229 /// we can return the InsertSliceOp's source directly.
1230 // TODO: This only checks the immediate producer; extend to go up the
1231 // insert/extract chain if the slices are disjoint.
1232 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1233   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1234 
1235   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1236   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1237       insertOp.isSameAs(extractOp, isSame))
1238     return insertOp.source();
1239 
1240   return {};
1241 }
1242 
1243 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute>) {
1244   if (getSourceType() == getType() &&
1245       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1246     return this->source();
1247   if (Value slice = foldExtractAfterInsertSlice(*this))
1248     return slice;
1249   return OpFoldResult();
1250 }
1251 
1252 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1253     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1254   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1255   unsigned rank = rankedTensorType.getRank();
1256   auto shape = rankedTensorType.getShape();
1257   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1258   SmallVector<OpFoldResult> sizes;
1259   for (unsigned i = 0, e = rank; i < e; ++i) {
1260     OpFoldResult dim;
1261     if (rankedTensorType.isDynamicDim(i))
1262       dim = b.createOrFold<tensor::DimOp>(
1263           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1264     else
1265       dim = b.getIndexAttr(shape[i]);
1266     sizes.push_back(dim);
1267   }
1268   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1269   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1270                                                 offsets, sizes, strides);
1271 }
1272 
1273 //===----------------------------------------------------------------------===//
1274 // InsertSliceOp
1275 //===----------------------------------------------------------------------===//
1276 
1277 // Build a InsertSliceOp with mixed static and dynamic entries.
1278 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1279                           Value dest, ArrayRef<OpFoldResult> offsets,
1280                           ArrayRef<OpFoldResult> sizes,
1281                           ArrayRef<OpFoldResult> strides,
1282                           ArrayRef<NamedAttribute> attrs) {
1283   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1284   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1285   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1286                              ShapedType::kDynamicStrideOrOffset);
1287   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1288                              ShapedType::kDynamicSize);
1289   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1290                              ShapedType::kDynamicStrideOrOffset);
1291   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1292         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1293         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1294   result.addAttributes(attrs);
1295 }
1296 
1297 // Build a InsertSliceOp with dynamic entries.
1298 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1299                           Value dest, ValueRange offsets, ValueRange sizes,
1300                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1301   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1302       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1303   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1304       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1305   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1306       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1307   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1308 }
1309 
1310 /// Verifier for InsertSliceOp.
1311 static LogicalResult verify(InsertSliceOp op) {
1312   // insert_slice is the inverse of extract_slice, use the same type inference.
1313   auto expectedType = ExtractSliceOp::inferRankReducedResultType(
1314       op.getSourceType().getRank(), op.getType(),
1315       extractFromI64ArrayAttr(op.static_offsets()),
1316       extractFromI64ArrayAttr(op.static_sizes()),
1317       extractFromI64ArrayAttr(op.static_strides()));
1318   auto result =
1319       isRankReducedType(expectedType.cast<ShapedType>(), op.getSourceType());
1320   return produceSliceErrorMsg(result, op, expectedType);
1321 }
1322 
1323 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1324 /// can mutate the second InsertSliceOp's destination to the first one's.
1325 ///
1326 /// Example:
1327 ///
1328 /// ```mlir
1329 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1330 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1331 /// ```
1332 ///
1333 /// folds into:
1334 ///
1335 /// ```mlir
1336 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1337 /// ```
1338 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1339   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1340 
1341   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1342   if (!prevInsertOp ||
1343       prevInsertOp.source().getType() != insertOp.source().getType() ||
1344       !prevInsertOp.isSameAs(insertOp, isSame))
1345     return failure();
1346 
1347   insertOp.destMutable().assign(prevInsertOp.dest());
1348   return success();
1349 }
1350 
1351 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1352   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1353       getSourceType() == getType() &&
1354       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1355     return this->source();
1356   if (succeeded(foldInsertAfterInsertSlice(*this)))
1357     return getResult();
1358   return OpFoldResult();
1359 }
1360 
1361 LogicalResult InsertSliceOp::reifyResultShapes(
1362     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1363   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1364   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1365     reifiedReturnShapes[0][dim] =
1366         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1367   }
1368   return success();
1369 }
1370 
1371 namespace {
1372 /// Pattern to rewrite a insert_slice op with constant arguments.
1373 class InsertSliceOpConstantArgumentFolder final
1374     : public OpRewritePattern<InsertSliceOp> {
1375 public:
1376   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1377 
1378   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1379                                 PatternRewriter &rewriter) const override {
1380     // No constant operand, just return.
1381     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1382           return matchPattern(operand, matchConstantIndex());
1383         }))
1384       return failure();
1385 
1386     // At least one of offsets/sizes/strides is a new constant.
1387     // Form the new list of operands and constant attributes from the
1388     // existing.
1389     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1390     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1391     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1392     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1393     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1394     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1395 
1396     // Create the new op in canonical form.
1397     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1398         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1399         mixedOffsets, mixedSizes, mixedStrides);
1400     Value toInsert = insertSliceOp.source();
1401     if (sourceType != insertSliceOp.getSourceType())
1402       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1403                                                  sourceType, toInsert);
1404     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1405         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1406         mixedStrides);
1407     return success();
1408   }
1409 };
1410 
1411 /// Fold tensor_casts with insert_slice operations. If the source or destination
1412 /// tensor is a tensor_cast that removes static type information, the cast is
1413 /// folded into the insert_slice operation. E.g.:
1414 ///
1415 /// ```mlir
1416 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1417 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1418 /// ```
1419 ///
1420 /// folds into:
1421 ///
1422 /// ```mlir
1423 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1424 /// ```
1425 ///
1426 /// Note: When folding a cast on the destination tensor, the result of the
1427 /// insert_slice operation is casted to ensure that the type of the result did
1428 /// not change.
1429 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1430   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1431 
1432   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1433                                 PatternRewriter &rewriter) const override {
1434     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1435           return matchPattern(operand, matchConstantIndex());
1436         }))
1437       return failure();
1438 
1439     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1440       auto castOp = v.getDefiningOp<tensor::CastOp>();
1441       if (!castOp || !canFoldIntoConsumerOp(castOp))
1442         return llvm::None;
1443       return castOp.source();
1444     };
1445     Optional<Value> sourceCastSource =
1446         getSourceOfCastOp(insertSliceOp.source());
1447     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1448     if (!sourceCastSource && !destCastSource)
1449       return failure();
1450 
1451     Value replacement = rewriter.create<InsertSliceOp>(
1452         insertSliceOp.getLoc(),
1453         (sourceCastSource ? *sourceCastSource : insertSliceOp.source()),
1454         (destCastSource ? *destCastSource : insertSliceOp.dest()),
1455         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1456         insertSliceOp.getMixedStrides());
1457 
1458     if (replacement.getType() != insertSliceOp.getType()) {
1459       replacement = rewriter.create<tensor::CastOp>(
1460           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1461     }
1462     rewriter.replaceOp(insertSliceOp, replacement);
1463     return success();
1464   }
1465 };
1466 
1467 /// If additional static type information can be deduced from a insert_slice's
1468 /// size operands, insert an explicit cast of the op's source operand. This
1469 /// enables other canonicalization patterns that are matching for tensor_cast
1470 /// ops such as `ForOpTensorCastFolder` in SCF.
1471 ///
1472 /// Example:
1473 ///
1474 /// ```mlir
1475 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1476 ///       : tensor<?x?xf32> into ...
1477 /// ```
1478 ///
1479 /// folds into:
1480 ///
1481 /// ```mlir
1482 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1483 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1484 ///       : tensor<64x64xf32> into ...
1485 /// ```
1486 struct InsertSliceOpSourceCastInserter final
1487     : public OpRewritePattern<InsertSliceOp> {
1488   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1489 
1490   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1491                                 PatternRewriter &rewriter) const override {
1492     RankedTensorType srcType = insertSliceOp.getSourceType();
1493     if (srcType.getRank() != insertSliceOp.getType().getRank())
1494       return failure();
1495     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1496                                      srcType.getShape().end());
1497     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1498       if (Optional<int64_t> constInt =
1499               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1500         newSrcShape[i] = *constInt;
1501     }
1502 
1503     RankedTensorType newSrcType =
1504         RankedTensorType::get(newSrcShape, srcType.getElementType());
1505     if (srcType == newSrcType ||
1506         !preservesStaticInformation(srcType, newSrcType) ||
1507         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1508       return failure();
1509 
1510     // newSrcType is:
1511     //   1) Different from srcType.
1512     //   2) "More static" than srcType.
1513     //   3) Cast-compatible with srcType.
1514     // Insert the cast.
1515     Value cast = rewriter.create<tensor::CastOp>(
1516         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1517     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1518         insertSliceOp, cast, insertSliceOp.dest(),
1519         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1520         insertSliceOp.getMixedStrides());
1521     return success();
1522   }
1523 };
1524 } // namespace
1525 
1526 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1527                                                 MLIRContext *context) {
1528   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1529               InsertSliceOpSourceCastInserter>(context);
1530 }
1531 
1532 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1533                                                              Location loc,
1534                                                              Value tensor,
1535                                                              Value dest) {
1536   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1537   unsigned rank = rankedTensorType.getRank();
1538   auto shape = rankedTensorType.getShape();
1539   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1540   SmallVector<OpFoldResult> sizes;
1541   for (unsigned i = 0, e = rank; i < e; ++i) {
1542     OpFoldResult dim;
1543     if (rankedTensorType.isDynamicDim(i))
1544       dim = b.createOrFold<tensor::DimOp>(
1545           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1546     else
1547       dim = b.getIndexAttr(shape[i]);
1548     sizes.push_back(dim);
1549   }
1550   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1551   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1552                                                sizes, strides);
1553 }
1554 
1555 //===----------------------------------------------------------------------===//
1556 // PadOp
1557 //===----------------------------------------------------------------------===//
1558 
1559 // TODO: Replace custom<InferType> directive with AllTypesMatch as soon as it
1560 // supports optional types.
1561 void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
1562                     Type typeToInfer, Type typeToInferFrom) {}
1563 
1564 ParseResult parseInferType(OpAsmParser &parser,
1565                            Optional<OpAsmParser::OperandType> optOperand,
1566                            Type &typeToInfer, Type typeToInferFrom) {
1567   if (optOperand)
1568     typeToInfer = typeToInferFrom;
1569   return success();
1570 }
1571 
1572 static LogicalResult verify(PadOp op) {
1573   auto sourceType = op.source().getType().cast<RankedTensorType>();
1574   auto resultType = op.result().getType().cast<RankedTensorType>();
1575   auto expectedType = PadOp::inferResultType(
1576       sourceType, extractFromI64ArrayAttr(op.static_low()),
1577       extractFromI64ArrayAttr(op.static_high()));
1578   for (int i = 0, e = sourceType.getRank(); i < e; ++i) {
1579     if (resultType.getDimSize(i) == expectedType.getDimSize(i))
1580       continue;
1581     if (expectedType.isDynamicDim(i))
1582       continue;
1583     return op.emitError("specified type ")
1584            << resultType << " does not match the inferred type "
1585            << expectedType;
1586   }
1587 
1588   auto &region = op.region();
1589   unsigned rank = resultType.getRank();
1590   Block &block = region.front();
1591   if (block.getNumArguments() != rank)
1592     return op.emitError("expected the block to have ") << rank << " arguments";
1593 
1594   // Note: the number and type of yield values are checked in the YieldOp.
1595   for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
1596     if (!en.value().isIndex())
1597       return op.emitOpError("expected block argument ")
1598              << (en.index() + 1) << " to be an index";
1599   }
1600 
1601   // Ensure that the region yields an element of the right type.
1602   auto yieldOp = llvm::cast<YieldOp>(block.getTerminator());
1603   if (yieldOp.value().getType() !=
1604       op.getType().cast<ShapedType>().getElementType())
1605     return op.emitOpError("expected yield type to match shape element type");
1606 
1607   return success();
1608 }
1609 
1610 RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
1611                                         ArrayRef<int64_t> staticLow,
1612                                         ArrayRef<int64_t> staticHigh,
1613                                         ArrayRef<int64_t> resultShape) {
1614   unsigned rank = sourceType.getRank();
1615   assert(staticLow.size() == rank && "unexpected staticLow size mismatch");
1616   assert(staticHigh.size() == rank && "unexpected staticHigh size mismatch");
1617   assert((resultShape.empty() || resultShape.size() == rank) &&
1618          "unexpected resultShape size mismatch");
1619 
1620   SmallVector<int64_t, 4> inferredShape;
1621   for (auto i : llvm::seq<unsigned>(0, rank)) {
1622     if (sourceType.isDynamicDim(i) ||
1623         staticLow[i] == ShapedType::kDynamicSize ||
1624         staticHigh[i] == ShapedType::kDynamicSize) {
1625       inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
1626                                                   : resultShape[i]);
1627     } else {
1628       int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
1629       assert((resultShape.empty() || size == resultShape[i] ||
1630               resultShape[i] == ShapedType::kDynamicSize) &&
1631              "mismatch between inferred shape and result shape");
1632       inferredShape.push_back(size);
1633     }
1634   }
1635 
1636   return RankedTensorType::get(inferredShape, sourceType.getElementType());
1637 }
1638 
1639 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1640                   ArrayRef<int64_t> staticLow, ArrayRef<int64_t> staticHigh,
1641                   ValueRange low, ValueRange high, bool nofold,
1642                   ArrayRef<NamedAttribute> attrs) {
1643   auto sourceType = source.getType().cast<RankedTensorType>();
1644   auto resultType = inferResultType(sourceType, staticLow, staticHigh);
1645   build(b, result, resultType, source, low, high, b.getI64ArrayAttr(staticLow),
1646         b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr());
1647   result.addAttributes(attrs);
1648 }
1649 
1650 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1651                   ValueRange low, ValueRange high, bool nofold,
1652                   ArrayRef<NamedAttribute> attrs) {
1653   auto sourceType = source.getType().cast<RankedTensorType>();
1654   unsigned rank = sourceType.getRank();
1655   SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
1656   build(b, result, source, staticVector, staticVector, low, high, nofold,
1657         attrs);
1658 }
1659 
1660 void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
1661                   Value source, ArrayRef<OpFoldResult> low,
1662                   ArrayRef<OpFoldResult> high, bool nofold,
1663                   ArrayRef<NamedAttribute> attrs) {
1664   assert(resultType.isa<RankedTensorType>());
1665   auto sourceType = source.getType().cast<RankedTensorType>();
1666   SmallVector<Value, 4> dynamicLow, dynamicHigh;
1667   SmallVector<int64_t, 4> staticLow, staticHigh;
1668   // staticLow and staticHigh have full information of the padding config.
1669   // This will grow staticLow and staticHigh with 1 value. If the config is
1670   // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
1671   // value as well.
1672   dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
1673                              ShapedType::kDynamicSize);
1674   dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
1675                              ShapedType::kDynamicSize);
1676   if (!resultType) {
1677     resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
1678   }
1679   build(b, result, resultType, source, dynamicLow, dynamicHigh,
1680         b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh),
1681         nofold ? b.getUnitAttr() : UnitAttr());
1682   result.addAttributes(attrs);
1683 }
1684 
1685 namespace {
1686 // Folds tensor.pad when padding is static zeros and the attribute
1687 // doesn't request otherwise.
1688 struct FoldStaticZeroPadding : public OpRewritePattern<PadOp> {
1689   using OpRewritePattern<PadOp>::OpRewritePattern;
1690 
1691   LogicalResult matchAndRewrite(PadOp padTensorOp,
1692                                 PatternRewriter &rewriter) const override {
1693     if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad())
1694       return failure();
1695     if (padTensorOp.nofold())
1696       return failure();
1697     rewriter.replaceOpWithNewOp<tensor::CastOp>(
1698         padTensorOp, padTensorOp.result().getType(), padTensorOp.source());
1699     return success();
1700   }
1701 };
1702 
1703 // Fold CastOp into PadOp when adding static information.
1704 struct FoldSourceTensorCast : public OpRewritePattern<PadOp> {
1705   using OpRewritePattern<PadOp>::OpRewritePattern;
1706 
1707   LogicalResult matchAndRewrite(PadOp padTensorOp,
1708                                 PatternRewriter &rewriter) const override {
1709     auto castOp = padTensorOp.source().getDefiningOp<tensor::CastOp>();
1710     if (!tensor::canFoldIntoConsumerOp(castOp))
1711       return failure();
1712 
1713     auto newResultType = PadOp::inferResultType(
1714         castOp.source().getType().cast<RankedTensorType>(),
1715         extractFromI64ArrayAttr(padTensorOp.static_low()),
1716         extractFromI64ArrayAttr(padTensorOp.static_high()),
1717         padTensorOp.getResultType().getShape());
1718 
1719     if (newResultType == padTensorOp.getResultType()) {
1720       rewriter.updateRootInPlace(padTensorOp, [&]() {
1721         padTensorOp.sourceMutable().assign(castOp.source());
1722       });
1723     } else {
1724       auto newOp = rewriter.create<PadOp>(
1725           padTensorOp->getLoc(), newResultType, padTensorOp.source(),
1726           padTensorOp.low(), padTensorOp.high(), padTensorOp.static_low(),
1727           padTensorOp.static_high(), padTensorOp.nofold());
1728       BlockAndValueMapping mapper;
1729       padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
1730 
1731       rewriter.replaceOpWithNewOp<tensor::CastOp>(
1732           padTensorOp, padTensorOp.getResultType(), newOp);
1733     }
1734     return success();
1735   }
1736 };
1737 
1738 // Fold CastOp using the result of PadOp back into the latter if it adds
1739 // static information.
1740 struct FoldTargetTensorCast : public OpRewritePattern<PadOp> {
1741   using OpRewritePattern<PadOp>::OpRewritePattern;
1742 
1743   LogicalResult matchAndRewrite(PadOp padTensorOp,
1744                                 PatternRewriter &rewriter) const override {
1745     if (!padTensorOp.result().hasOneUse())
1746       return failure();
1747     auto tensorCastOp =
1748         dyn_cast<tensor::CastOp>(*padTensorOp->getUsers().begin());
1749     if (!tensorCastOp)
1750       return failure();
1751     if (!tensor::preservesStaticInformation(padTensorOp.result().getType(),
1752                                             tensorCastOp.dest().getType()))
1753       return failure();
1754 
1755     auto replacementOp = rewriter.create<PadOp>(
1756         padTensorOp.getLoc(), tensorCastOp.dest().getType(),
1757         padTensorOp.source(), padTensorOp.low(), padTensorOp.high(),
1758         padTensorOp.static_low(), padTensorOp.static_high(),
1759         padTensorOp.nofold());
1760     replacementOp.region().takeBody(padTensorOp.region());
1761 
1762     rewriter.replaceOp(padTensorOp, replacementOp.result());
1763     rewriter.replaceOp(tensorCastOp, replacementOp.result());
1764     return success();
1765   }
1766 };
1767 } // namespace
1768 
1769 void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
1770                                         MLIRContext *context) {
1771   results
1772       .add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast>(
1773           context);
1774 }
1775 
1776 /// Return the padding value of the PadOp if it constant. In this context,
1777 /// "constant" means an actual constant or "defined outside of the block".
1778 ///
1779 /// Values are considered constant in three cases:
1780 ///  - A ConstantLike value.
1781 ///  - A basic block argument from a different block.
1782 ///  - A value defined outside of the block.
1783 ///
1784 /// If the padding value is not constant, an empty Value is returned.
1785 Value PadOp::getConstantPaddingValue() {
1786   auto yieldOp = dyn_cast<YieldOp>(getRegion().front().getTerminator());
1787   if (!yieldOp)
1788     return {};
1789   Value padValue = yieldOp.value();
1790   // Check if yield value is a constant.
1791   if (matchPattern(padValue, m_Constant()))
1792     return padValue;
1793   // Check if yield value is defined inside the PadOp block.
1794   if (padValue.getParentBlock() == &getRegion().front())
1795     return {};
1796   // Else: Yield value defined outside of the PadOp block.
1797   return padValue;
1798 }
1799 
1800 OpFoldResult PadOp::fold(ArrayRef<Attribute>) {
1801   if (getResultType().hasStaticShape() && getResultType() == getSourceType() &&
1802       !nofold())
1803     return source();
1804   return {};
1805 }
1806 
1807 //===----------------------------------------------------------------------===//
1808 // TableGen'd op method definitions
1809 //===----------------------------------------------------------------------===//
1810 
1811 #define GET_OP_CLASSES
1812 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
1813