1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/StandardOps/Utils/Utils.h"
11 #include "mlir/Dialect/Tensor/IR/Tensor.h"
12 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
13 #include "mlir/Dialect/Utils/StaticValueUtils.h"
14 #include "mlir/IR/BlockAndValueMapping.h"
15 #include "mlir/IR/Builders.h"
16 #include "mlir/IR/BuiltinAttributeInterfaces.h"
17 #include "mlir/IR/Matchers.h"
18 #include "mlir/IR/PatternMatch.h"
19 #include "mlir/IR/TypeUtilities.h"
20 #include "llvm/ADT/STLExtras.h"
21 
22 using namespace mlir;
23 using namespace mlir::tensor;
24 
25 /// Materialize a single constant operation from a given attribute value with
26 /// the desired resultant type.
27 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
28                                               Attribute value, Type type,
29                                               Location loc) {
30   if (arith::ConstantOp::isBuildableWith(value, type))
31     return builder.create<arith::ConstantOp>(loc, value, type);
32   if (ConstantOp::isBuildableWith(value, type))
33     return builder.create<ConstantOp>(loc, value, type);
34   return nullptr;
35 }
36 
37 //===----------------------------------------------------------------------===//
38 // CastOp
39 //===----------------------------------------------------------------------===//
40 
41 /// Returns true if `target` is a ranked tensor type that preserves static
42 /// information available in the `source` ranked tensor type.
43 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
44   auto sourceType = source.dyn_cast<RankedTensorType>();
45   auto targetType = target.dyn_cast<RankedTensorType>();
46 
47   // Requires RankedTensorType.
48   if (!sourceType || !targetType)
49     return false;
50 
51   // Requires same elemental type.
52   if (sourceType.getElementType() != targetType.getElementType())
53     return false;
54 
55   // Requires same rank.
56   if (sourceType.getRank() != targetType.getRank())
57     return false;
58 
59   // If cast is towards more static sizes along any dimension, don't fold.
60   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
61     if (!ShapedType::isDynamic(std::get<0>(t)) &&
62         ShapedType::isDynamic(std::get<1>(t)))
63       return false;
64   }
65 
66   return true;
67 }
68 
69 /// Determines whether tensor::CastOp casts to a more dynamic version of the
70 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
71 /// implement canonicalization patterns for ops in different dialects that may
72 /// consume the results of tensor.cast operations. Such foldable tensor.cast
73 /// operations are typically inserted as `slice` ops and are canonicalized,
74 /// to preserve the type compatibility of their uses.
75 ///
76 /// Returns true when all conditions are met:
77 /// 1. source and result are ranked tensors with same element type and rank.
78 /// 2. the tensor type has more static information than the result
79 ///
80 /// Example:
81 /// ```mlir
82 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
83 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
84 /// ```
85 ///
86 /// folds into:
87 ///
88 /// ```mlir
89 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
90 /// ```
91 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
92   if (!castOp)
93     return false;
94 
95   // Can fold if the source of cast has at least as much static information as
96   // its results.
97   return preservesStaticInformation(castOp.getType(),
98                                     castOp.source().getType());
99 }
100 
101 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
102 /// that can be folded.
103 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
104   bool folded = false;
105   for (OpOperand &operand : op->getOpOperands()) {
106     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
107     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
108       operand.set(castOp.getOperand());
109       folded = true;
110     }
111   }
112   return success(folded);
113 }
114 
115 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
116   if (inputs.size() != 1 || outputs.size() != 1)
117     return false;
118   Type a = inputs.front(), b = outputs.front();
119   auto aT = a.dyn_cast<TensorType>();
120   auto bT = b.dyn_cast<TensorType>();
121   if (!aT || !bT)
122     return false;
123 
124   if (aT.getElementType() != bT.getElementType())
125     return false;
126 
127   return succeeded(verifyCompatibleShape(aT, bT));
128 }
129 
130 /// Compute a TensorType that has the joined shape knowledge of the two
131 /// given TensorTypes. The element types need to match.
132 static TensorType joinShapes(TensorType one, TensorType two) {
133   assert(one.getElementType() == two.getElementType());
134 
135   if (!one.hasRank())
136     return two;
137   if (!two.hasRank())
138     return one;
139 
140   int64_t rank = one.getRank();
141   if (rank != two.getRank())
142     return {};
143 
144   SmallVector<int64_t, 4> join;
145   join.reserve(rank);
146   for (int64_t i = 0; i < rank; ++i) {
147     if (one.isDynamicDim(i)) {
148       join.push_back(two.getDimSize(i));
149       continue;
150     }
151     if (two.isDynamicDim(i)) {
152       join.push_back(one.getDimSize(i));
153       continue;
154     }
155     if (one.getDimSize(i) != two.getDimSize(i))
156       return {};
157     join.push_back(one.getDimSize(i));
158   }
159   return RankedTensorType::get(join, one.getElementType());
160 }
161 
162 namespace {
163 
164 /// Replaces chains of two tensor.cast operations by a single tensor.cast
165 /// operation if doing so does not remove runtime constraints.
166 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
167   using OpRewritePattern<CastOp>::OpRewritePattern;
168 
169   LogicalResult matchAndRewrite(CastOp tensorCast,
170                                 PatternRewriter &rewriter) const final {
171     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
172 
173     if (!tensorCastOperand)
174       return failure();
175 
176     auto sourceType =
177         tensorCastOperand.getOperand().getType().cast<TensorType>();
178     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
179     auto resultType = tensorCast.getType().cast<TensorType>();
180 
181     // We can remove the intermediate cast if joining all three produces the
182     // same result as just joining the source and result shapes.
183     auto firstJoin =
184         joinShapes(joinShapes(sourceType, intermediateType), resultType);
185 
186     // The join might not exist if the cast sequence would fail at runtime.
187     if (!firstJoin)
188       return failure();
189 
190     // The newJoin always exists if the above join exists, it might just contain
191     // less information. If so, we cannot drop the intermediate cast, as doing
192     // so would remove runtime checks.
193     auto newJoin = joinShapes(sourceType, resultType);
194     if (firstJoin != newJoin)
195       return failure();
196 
197     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
198                                         tensorCastOperand.getOperand());
199     return success();
200   }
201 };
202 
203 } // namespace
204 
205 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
206                                          MLIRContext *context) {
207   results.add<ChainedTensorCast>(context);
208 }
209 
210 //===----------------------------------------------------------------------===//
211 // DimOp
212 //===----------------------------------------------------------------------===//
213 
214 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
215                   int64_t index) {
216   auto loc = result.location;
217   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
218   build(builder, result, source, indexValue);
219 }
220 
221 Optional<int64_t> DimOp::getConstantIndex() {
222   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
223     return constantOp.getValue().cast<IntegerAttr>().getInt();
224   return {};
225 }
226 
227 static LogicalResult verify(DimOp op) {
228   // Assume unknown index to be in range.
229   Optional<int64_t> index = op.getConstantIndex();
230   if (!index.hasValue())
231     return success();
232 
233   // Check that constant index is not knowingly out of range.
234   auto type = op.source().getType();
235   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
236     if (index.getValue() >= tensorType.getRank())
237       return op.emitOpError("index is out of range");
238   } else if (type.isa<UnrankedTensorType>()) {
239     // Assume index to be in range.
240   } else {
241     llvm_unreachable("expected operand with tensor type");
242   }
243   return success();
244 }
245 
246 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
247   // All forms of folding require a known index.
248   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
249   if (!index)
250     return {};
251 
252   // Folding for unranked types (UnrankedTensorType) is not supported.
253   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
254   if (!tensorType)
255     return {};
256 
257   // Fold if the shape extent along the given index is known.
258   if (!tensorType.isDynamicDim(index.getInt())) {
259     Builder builder(getContext());
260     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
261   }
262 
263   Operation *definingOp = source().getDefiningOp();
264 
265   // Fold dim to the operand of tensor.generate.
266   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
267     auto resultType =
268         fromElements.getResult().getType().cast<RankedTensorType>();
269     // The case where the type encodes the size of the dimension is handled
270     // above.
271     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
272 
273     // Find the operand of the fromElements that corresponds to this index.
274     auto dynExtents = fromElements.dynamicExtents().begin();
275     for (auto dim : resultType.getShape().take_front(index.getInt()))
276       if (ShapedType::isDynamic(dim))
277         dynExtents++;
278 
279     return Value{*dynExtents};
280   }
281 
282   // The size at the given index is now known to be a dynamic size.
283   unsigned unsignedIndex = index.getValue().getZExtValue();
284 
285   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
286     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
287     // `resolve-shaped-type-result-dims` pass.
288     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
289         sliceOp.isDynamicSize(unsignedIndex)) {
290       return {sliceOp.getDynamicSize(unsignedIndex)};
291     }
292   }
293 
294   // dim(cast) -> dim
295   if (succeeded(foldTensorCast(*this)))
296     return getResult();
297 
298   return {};
299 }
300 
301 namespace {
302 /// Fold dim of a cast into the dim of the source of the tensor cast.
303 struct DimOfCastOp : public OpRewritePattern<DimOp> {
304   using OpRewritePattern<DimOp>::OpRewritePattern;
305 
306   LogicalResult matchAndRewrite(DimOp dimOp,
307                                 PatternRewriter &rewriter) const override {
308     auto castOp = dimOp.source().getDefiningOp<CastOp>();
309     if (!castOp)
310       return failure();
311     Value newSource = castOp.getOperand();
312     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
313     return success();
314   }
315 };
316 } // namespace
317 
318 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
319                                         MLIRContext *context) {
320   results.add<DimOfCastOp>(context);
321 }
322 
323 //===----------------------------------------------------------------------===//
324 // ExtractOp
325 //===----------------------------------------------------------------------===//
326 
327 static LogicalResult verify(ExtractOp op) {
328   // Verify the # indices match if we have a ranked type.
329   if (auto tensorType = op.tensor().getType().dyn_cast<RankedTensorType>())
330     if (tensorType.getRank() != static_cast<int64_t>(op.indices().size()))
331       return op.emitOpError("incorrect number of indices for extract_element");
332 
333   return success();
334 }
335 
336 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
337   // The tensor operand must be a known constant.
338   Attribute tensor = operands.front();
339   if (!tensor)
340     return {};
341   // If this is a splat elements attribute, simply return the value. All of the
342   // elements of a splat attribute are the same.
343   if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
344     return splatTensor.getSplatValue<Attribute>();
345 
346   // Otherwise, collect the constant indices into the tensor.
347   SmallVector<uint64_t, 8> indices;
348   for (Attribute indice : llvm::drop_begin(operands, 1)) {
349     if (!indice || !indice.isa<IntegerAttr>())
350       return {};
351     indices.push_back(indice.cast<IntegerAttr>().getInt());
352   }
353 
354   // If this is an elements attribute, query the value at the given indices.
355   auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
356   if (elementsAttr && elementsAttr.isValidIndex(indices))
357     return elementsAttr.getValues<Attribute>()[indices];
358   return {};
359 }
360 
361 //===----------------------------------------------------------------------===//
362 // FromElementsOp
363 //===----------------------------------------------------------------------===//
364 
365 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
366                            Type resultType, ValueRange elements) {
367   result.addOperands(elements);
368   result.addTypes(resultType);
369 }
370 
371 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
372                            ValueRange elements) {
373   assert(!elements.empty() && "expected at least one element");
374   Type resultType = RankedTensorType::get(
375       {static_cast<int64_t>(elements.size())}, elements.front().getType());
376   build(builder, result, resultType, elements);
377 }
378 
379 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
380   if (!llvm::is_contained(operands, nullptr))
381     return DenseElementsAttr::get(getType(), operands);
382   return {};
383 }
384 
385 namespace {
386 
387 // Canonicalizes the pattern of the form
388 //
389 // %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
390 // %extracted_element = tensor.extract %tensor[%c0] : tensor<1xi32>
391 //
392 // to just %element.
393 struct ExtractElementFromTensorFromElements
394     : public OpRewritePattern<tensor::ExtractOp> {
395   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
396 
397   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
398                                 PatternRewriter &rewriter) const final {
399     auto tensorFromElements = extract.tensor().getDefiningOp<FromElementsOp>();
400     if (!tensorFromElements)
401       return failure();
402     auto tensorType = tensorFromElements.getType().cast<RankedTensorType>();
403     auto rank = tensorType.getRank();
404     if (rank == 0) {
405       rewriter.replaceOp(extract, tensorFromElements.getOperand(0));
406       return success();
407     }
408     SmallVector<APInt, 3> indices(rank);
409     int64_t flatIndex = 0;
410     int64_t stride = 1;
411     for (int i = rank - 1; i >= 0; --i) {
412       APInt index;
413       if (!matchPattern(extract.indices()[i], m_ConstantInt(&index)))
414         return failure();
415       if (i < rank - 1)
416         stride *= tensorType.getDimSize(i);
417       flatIndex += index.getSExtValue() * stride;
418     }
419     // Prevent out of bounds accesses. This can happen in invalid code that will
420     // never execute.
421     if (tensorFromElements->getNumOperands() <= flatIndex || flatIndex < 0)
422       return failure();
423     rewriter.replaceOp(extract, tensorFromElements.getOperand(flatIndex));
424     return success();
425   }
426 };
427 
428 } // namespace
429 
430 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
431                                                  MLIRContext *context) {
432   results.add<ExtractElementFromTensorFromElements>(context);
433 }
434 
435 //===----------------------------------------------------------------------===//
436 // InsertOp
437 //===----------------------------------------------------------------------===//
438 
439 static LogicalResult verify(InsertOp op) {
440   // Verify the # indices match if we have a ranked type.
441   if (auto destType = op.dest().getType().dyn_cast<RankedTensorType>())
442     if (destType.getRank() != static_cast<int64_t>(op.indices().size()))
443       return op.emitOpError("incorrect number of indices");
444   return success();
445 }
446 
447 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
448   Attribute scalar = operands[0];
449   Attribute dest = operands[1];
450   if (scalar && dest)
451     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
452       if (scalar == splatDest.getSplatValue<Attribute>())
453         return dest;
454   return {};
455 }
456 
457 //===----------------------------------------------------------------------===//
458 // GenerateOp
459 //===----------------------------------------------------------------------===//
460 
461 static LogicalResult verify(GenerateOp op) {
462   // Ensure that the tensor type has as many dynamic dimensions as are specified
463   // by the operands.
464   RankedTensorType resultTy = op.getType().cast<RankedTensorType>();
465   if (op.getNumOperands() != resultTy.getNumDynamicDims())
466     return op.emitError("must have as many index operands as dynamic extents "
467                         "in the result type");
468 
469   // Ensure that region arguments span the index space.
470   if (!llvm::all_of(op.body().getArgumentTypes(),
471                     [](Type ty) { return ty.isIndex(); }))
472     return op.emitError("all body arguments must be index");
473   if (op.body().getNumArguments() != resultTy.getRank())
474     return op.emitError("must have one body argument per input dimension");
475 
476   // Ensure that the region yields an element of the right type.
477   auto yieldOp =
478       llvm::cast<YieldOp>(op.body().getBlocks().front().getTerminator());
479   if (yieldOp.value().getType() != resultTy.getElementType())
480     return op.emitOpError(
481         "body must be terminated with a `yield` operation of the tensor "
482         "element type");
483 
484   return success();
485 }
486 
487 void GenerateOp::build(
488     OpBuilder &b, OperationState &result, Type resultTy,
489     ValueRange dynamicExtents,
490     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
491   build(b, result, resultTy, dynamicExtents);
492 
493   // Build and populate body.
494   OpBuilder::InsertionGuard guard(b);
495   Region *bodyRegion = result.regions.front().get();
496   auto rank = resultTy.cast<RankedTensorType>().getRank();
497   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
498   SmallVector<Location, 2> argumentLocs(rank, result.location);
499   Block *bodyBlock =
500       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes, argumentLocs);
501   bodyBuilder(b, result.location, bodyBlock->getArguments());
502 }
503 
504 namespace {
505 
506 /// Canonicalizes tensor.generate operations with a constant
507 /// operand into the equivalent operation with the operand expressed in the
508 /// result type, instead. We also insert a type cast to make sure that the
509 /// resulting IR is still well-typed.
510 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
511   using OpRewritePattern<GenerateOp>::OpRewritePattern;
512 
513   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
514                                 PatternRewriter &rewriter) const final {
515     auto resultType =
516         tensorFromElements.getResult().getType().cast<RankedTensorType>();
517 
518     if (resultType.hasStaticShape())
519       return failure();
520 
521     SmallVector<Value, 4> newOperands;
522     SmallVector<int64_t, 4> newShape;
523     auto operandsIt = tensorFromElements.dynamicExtents().begin();
524 
525     for (int64_t dim : resultType.getShape()) {
526       if (!ShapedType::isDynamic(dim)) {
527         newShape.push_back(dim);
528         continue;
529       }
530       APInt index;
531       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
532         newShape.push_back(ShapedType::kDynamicSize);
533         newOperands.push_back(*operandsIt++);
534         continue;
535       }
536       newShape.push_back(index.getSExtValue());
537       operandsIt++;
538     }
539 
540     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
541       return failure();
542 
543     auto loc = tensorFromElements.getLoc();
544     auto newOp = rewriter.create<GenerateOp>(
545         loc, RankedTensorType::get(newShape, resultType.getElementType()),
546         newOperands);
547     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
548                                 newOp.body().begin());
549     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
550                                                 newOp);
551     return success();
552   }
553 };
554 
555 /// Canonicalizes the pattern of the form
556 ///
557 /// %tensor = tensor.generate %x {
558 ///   ^bb0(%arg0: index):
559 ///   <computation>
560 ///   yield %1 : index
561 /// } : tensor<?xindex>
562 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
563 ///
564 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
565 /// tensor.generate operation has no side-effects.
566 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
567   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
568 
569   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
570                                 PatternRewriter &rewriter) const final {
571     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
572     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
573       return failure();
574 
575     BlockAndValueMapping mapping;
576     Block *body = tensorFromElements.getBody();
577     mapping.map(body->getArguments(), extract.indices());
578     for (auto &op : body->without_terminator())
579       rewriter.clone(op, mapping);
580 
581     auto yield = cast<YieldOp>(body->getTerminator());
582 
583     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
584     return success();
585   }
586 };
587 
588 /// Canonicalizes the pattern of the form
589 ///
590 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
591 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
592 ///
593 /// to
594 ///
595 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
596 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
597   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
598 
599   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
600                                 PatternRewriter &rewriter) const final {
601     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
602     if (!tensorCast)
603       return failure();
604 
605     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
606                                                    extract.indices());
607     return success();
608   }
609 };
610 
611 } // namespace
612 
613 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
614                                              MLIRContext *context) {
615   // TODO: Move extract patterns to tensor::ExtractOp.
616   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
617               StaticTensorGenerate>(context);
618 }
619 
620 //===----------------------------------------------------------------------===//
621 // RankOp
622 //===----------------------------------------------------------------------===//
623 
624 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
625   // Constant fold rank when the rank of the operand is known.
626   auto type = getOperand().getType();
627   auto shapedType = type.dyn_cast<ShapedType>();
628   if (shapedType && shapedType.hasRank())
629     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
630   return IntegerAttr();
631 }
632 
633 //===----------------------------------------------------------------------===//
634 // ReshapeOp
635 //===----------------------------------------------------------------------===//
636 
637 static int64_t getNumElements(ShapedType type) {
638   int64_t numElements = 1;
639   for (auto dim : type.getShape())
640     numElements *= dim;
641   return numElements;
642 }
643 
644 static LogicalResult verify(ReshapeOp op) {
645   TensorType operandType = op.source().getType().cast<TensorType>();
646   TensorType resultType = op.result().getType().cast<TensorType>();
647 
648   if (operandType.getElementType() != resultType.getElementType())
649     return op.emitOpError("element types of source and destination tensor "
650                           "types should be the same");
651 
652   int64_t shapeSize =
653       op.shape().getType().cast<RankedTensorType>().getDimSize(0);
654   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
655   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
656 
657   if (resultRankedType) {
658     if (operandRankedType && resultRankedType.hasStaticShape() &&
659         operandRankedType.hasStaticShape()) {
660       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
661         return op.emitOpError("source and destination tensor should have the "
662                               "same number of elements");
663     }
664     if (ShapedType::isDynamic(shapeSize))
665       return op.emitOpError("cannot use shape operand with dynamic length to "
666                             "reshape to statically-ranked tensor type");
667     if (shapeSize != resultRankedType.getRank())
668       return op.emitOpError(
669           "length of shape operand differs from the result's tensor rank");
670   }
671   return success();
672 }
673 
674 //===----------------------------------------------------------------------===//
675 // Reassociative reshape ops
676 //===----------------------------------------------------------------------===//
677 
678 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
679   return getSymbolLessAffineMaps(getReassociationExprs());
680 }
681 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
682   return convertReassociationIndicesToExprs(getContext(),
683                                             getReassociationIndices());
684 }
685 
686 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
687   return getSymbolLessAffineMaps(getReassociationExprs());
688 }
689 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
690   return convertReassociationIndicesToExprs(getContext(),
691                                             getReassociationIndices());
692 }
693 
694 static void print(OpAsmPrinter &p, ExpandShapeOp op) {
695   ::mlir::printReshapeOp<ExpandShapeOp>(p, op);
696 }
697 
698 static void print(OpAsmPrinter &p, CollapseShapeOp op) {
699   ::mlir::printReshapeOp<CollapseShapeOp>(p, op);
700 }
701 
702 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
703 static RankedTensorType
704 computeTensorReshapeCollapsedType(RankedTensorType type,
705                                   ArrayRef<AffineMap> reassociation) {
706   auto shape = type.getShape();
707   SmallVector<int64_t, 4> newShape;
708   newShape.reserve(reassociation.size());
709 
710   // Use the fact that reassociation is valid to simplify the logic: only use
711   // each map's rank.
712   assert(isReassociationValid(reassociation) && "invalid reassociation");
713   unsigned currentDim = 0;
714   for (AffineMap m : reassociation) {
715     unsigned dim = m.getNumResults();
716     auto band = shape.slice(currentDim, dim);
717     int64_t size = 1;
718     if (llvm::is_contained(band, ShapedType::kDynamicSize))
719       size = ShapedType::kDynamicSize;
720     else
721       for (unsigned d = 0; d < dim; ++d)
722         size *= shape[currentDim + d];
723     newShape.push_back(size);
724     currentDim += dim;
725   }
726 
727   return RankedTensorType::get(newShape, type.getElementType());
728 }
729 
730 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
731                             ArrayRef<ReassociationIndices> reassociation,
732                             ArrayRef<NamedAttribute> attrs) {
733   auto resultType = computeTensorReshapeCollapsedType(
734       src.getType().cast<RankedTensorType>(),
735       getSymbolLessAffineMaps(
736           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
737   build(b, result, resultType, src, attrs);
738   result.addAttribute(getReassociationAttrName(),
739                       getReassociationIndicesAttribute(b, reassociation));
740 }
741 
742 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
743                           ArrayRef<ReassociationIndices> reassociation,
744                           ArrayRef<NamedAttribute> attrs) {
745   auto resultType = computeTensorReshapeCollapsedType(
746       src.getType().cast<RankedTensorType>(),
747       getSymbolLessAffineMaps(
748           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
749   build(b, result, resultType, src, attrs);
750   result.addAttribute(getReassociationAttrName(),
751                       getReassociationIndicesAttribute(b, reassociation));
752 }
753 
754 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
755                                         TensorReshapeOp, ExpandShapeOp>::value>
756 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
757                                            RankedTensorType expandedType,
758                                            RankedTensorType collapsedType) {
759   if (failed(
760           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
761     return failure();
762 
763   auto maps = op.getReassociationMaps();
764   RankedTensorType expectedType =
765       computeTensorReshapeCollapsedType(expandedType, maps);
766   if (collapsedType != expectedType)
767     return op.emitOpError("expected collapsed type to be ")
768            << expectedType << ", but got " << collapsedType;
769   return success();
770 }
771 
772 static LogicalResult verify(ExpandShapeOp op) {
773   return verifyTensorReshapeOp(op, op.getResultType(), op.getSrcType());
774 }
775 
776 static LogicalResult verify(CollapseShapeOp op) {
777   return verifyTensorReshapeOp(op, op.getSrcType(), op.getResultType());
778 }
779 
780 namespace {
781 /// Reshape of a splat constant can be replaced with a constant of the result
782 /// type.
783 template <typename TensorReshapeOp>
784 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
785   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
786   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
787                                 PatternRewriter &rewriter) const override {
788     DenseElementsAttr attr;
789     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
790       return failure();
791     if (!attr || !attr.isSplat())
792       return failure();
793     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
794         reshapeOp.getResultType(), attr.getRawData(), true);
795     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
796     return success();
797   }
798 };
799 
800 } // namespace
801 
802 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
803                                                 MLIRContext *context) {
804   results.add<CollapseReshapeOps<ExpandShapeOp>,
805               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>,
806               FoldReshapeWithConstant<ExpandShapeOp>>(context);
807 }
808 
809 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
810                                                   MLIRContext *context) {
811   results.add<CollapseReshapeOps<CollapseShapeOp>,
812               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
813               FoldReshapeWithConstant<CollapseShapeOp>>(context);
814 }
815 
816 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
817   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
818 }
819 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
820   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
821 }
822 
823 //===----------------------------------------------------------------------===//
824 // ExtractSliceOp
825 //===----------------------------------------------------------------------===//
826 
827 /// An extract_slice op result type can be fully inferred from the source type
828 /// and the static representation of offsets, sizes and strides. Special
829 /// sentinels encode the dynamic case.
830 RankedTensorType ExtractSliceOp::inferResultType(
831     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
832     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
833   // An extract_slice op may specify only a leading subset of offset/sizes/
834   // strides in which case we complete with offset=0, sizes from memref type and
835   // strides=1.
836   unsigned rank = sourceRankedTensorType.getRank();
837   (void)rank;
838   assert(staticSizes.size() == rank &&
839          "unexpected staticSizes not equal to rank of source");
840   return RankedTensorType::get(staticSizes,
841                                sourceRankedTensorType.getElementType());
842 }
843 
844 RankedTensorType ExtractSliceOp::inferResultType(
845     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
846     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
847   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
848   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
849   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
850                              ShapedType::kDynamicStrideOrOffset);
851   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
852                              ShapedType::kDynamicSize);
853   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
854                              ShapedType::kDynamicStrideOrOffset);
855   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
856                                          staticSizes, staticStrides);
857 }
858 
859 /// An extract_slice op result type can be fully inferred from the source type
860 /// and the static representation of offsets, sizes and strides. Special
861 /// sentinels encode the dynamic case.
862 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
863     unsigned resultRank, RankedTensorType sourceRankedTensorType,
864     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
865     ArrayRef<int64_t> strides) {
866   auto inferredType =
867       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
868           .cast<RankedTensorType>();
869   int rankDiff = inferredType.getRank() - resultRank;
870   if (rankDiff > 0) {
871     auto shape = inferredType.getShape();
872     llvm::SmallDenseSet<unsigned> dimsToProject;
873     mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject);
874     SmallVector<int64_t> projectedShape;
875     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
876       if (!dimsToProject.contains(pos))
877         projectedShape.push_back(shape[pos]);
878     inferredType =
879         RankedTensorType::get(projectedShape, inferredType.getElementType());
880   }
881   return inferredType;
882 }
883 
884 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
885     unsigned resultRank, RankedTensorType sourceRankedTensorType,
886     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
887     ArrayRef<OpFoldResult> strides) {
888   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
889   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
890   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
891                              ShapedType::kDynamicStrideOrOffset);
892   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
893                              ShapedType::kDynamicSize);
894   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
895                              ShapedType::kDynamicStrideOrOffset);
896   return ExtractSliceOp::inferRankReducedResultType(
897       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
898       staticStrides);
899 }
900 
901 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
902 /// result type. If the type passed is nullptr, it is inferred.
903 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
904                            RankedTensorType resultType, Value source,
905                            ArrayRef<OpFoldResult> offsets,
906                            ArrayRef<OpFoldResult> sizes,
907                            ArrayRef<OpFoldResult> strides,
908                            ArrayRef<NamedAttribute> attrs) {
909   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
910   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
911   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
912                              ShapedType::kDynamicStrideOrOffset);
913   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
914                              ShapedType::kDynamicSize);
915   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
916                              ShapedType::kDynamicStrideOrOffset);
917   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
918   // Structuring implementation this way avoids duplication between builders.
919   if (!resultType) {
920     resultType =
921         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
922                                         staticSizes, staticStrides)
923             .cast<RankedTensorType>();
924   }
925   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
926         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
927         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
928   result.addAttributes(attrs);
929 }
930 
931 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
932 /// result type.
933 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
934                            ArrayRef<OpFoldResult> offsets,
935                            ArrayRef<OpFoldResult> sizes,
936                            ArrayRef<OpFoldResult> strides,
937                            ArrayRef<NamedAttribute> attrs) {
938   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
939 }
940 
941 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
942 /// type passed is nullptr, it is inferred.
943 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
944                            RankedTensorType resultType, Value source,
945                            ValueRange offsets, ValueRange sizes,
946                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
947   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
948       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
949   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
950       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
951   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
952       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
953   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
954 }
955 
956 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
957 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
958                            ValueRange offsets, ValueRange sizes,
959                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
960   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
961 }
962 
963 template <typename OpTy>
964 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
965                                           OpTy op, Type expectedType) {
966   auto memrefType = expectedType.cast<ShapedType>();
967   switch (result) {
968   case SliceVerificationResult::Success:
969     return success();
970   case SliceVerificationResult::RankTooLarge:
971     return op.emitError("expected rank to be smaller or equal to ")
972            << "the other rank. ";
973   case SliceVerificationResult::SizeMismatch:
974     return op.emitError("expected type to be ")
975            << expectedType << " or a rank-reduced version. (size mismatch) ";
976   case SliceVerificationResult::ElemTypeMismatch:
977     return op.emitError("expected element type to be ")
978            << memrefType.getElementType();
979   default:
980     llvm_unreachable("unexpected extract_slice op verification result");
981   }
982 }
983 
984 /// Verifier for ExtractSliceOp.
985 static LogicalResult verify(ExtractSliceOp op) {
986   // Verify result type against inferred type.
987   auto expectedType =
988       ExtractSliceOp::inferResultType(op.getSourceType(), op.getMixedOffsets(),
989                                       op.getMixedSizes(), op.getMixedStrides());
990   auto result =
991       isRankReducedType(expectedType.cast<ShapedType>(), op.getType());
992   return produceSliceErrorMsg(result, op, expectedType);
993 }
994 
995 /// Infer the canonical type of the result of an extract_slice op. Returns a
996 /// type with rank `resultRank` that is either the rank of the rank-reduced
997 /// type, or the non-rank-reduced type.
998 static RankedTensorType
999 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1000                             ArrayRef<OpFoldResult> mixedOffsets,
1001                             ArrayRef<OpFoldResult> mixedSizes,
1002                             ArrayRef<OpFoldResult> mixedStrides) {
1003   auto resultType =
1004       ExtractSliceOp::inferRankReducedResultType(
1005           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1006           .cast<RankedTensorType>();
1007   if (resultType.getRank() != resultRank) {
1008     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1009                                                  mixedSizes, mixedStrides)
1010                      .cast<RankedTensorType>();
1011   }
1012   return resultType;
1013 }
1014 
1015 llvm::SmallDenseSet<unsigned> ExtractSliceOp::getDroppedDims() {
1016   llvm::SmallDenseSet<unsigned> droppedDims;
1017   ArrayRef<int64_t> resultShape = getType().getShape();
1018   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1019   unsigned shapePos = 0;
1020   for (const auto &size : enumerate(mixedSizes)) {
1021     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1022     // If the size is not 1, or if the current matched dimension of the result
1023     // is the same static shape as the size value (which is 1), then the
1024     // dimension is preserved.
1025     if (!sizeVal || sizeVal.getValue() != 1 ||
1026         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1027       shapePos++;
1028       continue;
1029     }
1030     droppedDims.insert(size.index());
1031   }
1032   return droppedDims;
1033 }
1034 
1035 LogicalResult ExtractSliceOp::reifyResultShapes(
1036     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1037   reifiedReturnShapes.resize(1);
1038   reifiedReturnShapes[0].reserve(getType().getRank());
1039   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1040   llvm::SmallDenseSet<unsigned> droppedDims = getDroppedDims();
1041   Location loc = getLoc();
1042   for (const auto &size : enumerate(mixedSizes)) {
1043     if (droppedDims.count(size.index()))
1044       continue;
1045     if (auto attr = size.value().dyn_cast<Attribute>()) {
1046       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1047           loc, attr.cast<IntegerAttr>().getInt()));
1048       continue;
1049     }
1050     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1051   }
1052   return success();
1053 }
1054 
1055 namespace {
1056 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1057 /// This essentially pushes memref_cast past its consuming slice when
1058 /// `canFoldIntoConsumerOp` is true.
1059 ///
1060 /// Example:
1061 /// ```
1062 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1063 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1064 ///   tensor<3x4xf32>
1065 /// ```
1066 /// is rewritten into:
1067 /// ```
1068 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1069 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1070 /// ```
1071 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1072 public:
1073   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1074 
1075   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1076                                 PatternRewriter &rewriter) const override {
1077     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1078     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1079           return matchPattern(operand, matchConstantIndex());
1080         }))
1081       return failure();
1082 
1083     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1084     if (!castOp)
1085       return failure();
1086 
1087     if (!canFoldIntoConsumerOp(castOp))
1088       return failure();
1089 
1090     /// Deduce the type of the result to use for the canonicalized operation.
1091     RankedTensorType resultType = getCanonicalSliceResultType(
1092         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1093         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1094         sliceOp.getMixedStrides());
1095     Value newSlice = rewriter.create<ExtractSliceOp>(
1096         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1097         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1098         sliceOp.static_sizes(), sliceOp.static_strides());
1099     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1100                                                 newSlice);
1101     return success();
1102   }
1103 };
1104 } // namespace
1105 
1106 /// Return the canonical type of the result of an extract_slice op.
1107 struct SliceReturnTypeCanonicalizer {
1108   RankedTensorType operator()(ExtractSliceOp op,
1109                               ArrayRef<OpFoldResult> mixedOffsets,
1110                               ArrayRef<OpFoldResult> mixedSizes,
1111                               ArrayRef<OpFoldResult> mixedStrides) {
1112     return getCanonicalSliceResultType(op.getType().getRank(),
1113                                        op.getSourceType(), mixedOffsets,
1114                                        mixedSizes, mixedStrides);
1115   }
1116 };
1117 
1118 /// A canonicalizer wrapper to replace ExtractSliceOps.
1119 struct SliceCanonicalizer {
1120   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1121                   ExtractSliceOp newOp) {
1122     Value replacement = newOp.getResult();
1123     if (replacement.getType() != op.getType())
1124       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1125                                                     replacement);
1126     rewriter.replaceOp(op, replacement);
1127   }
1128 };
1129 
1130 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1131                                                  MLIRContext *context) {
1132   results.add<
1133       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1134           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1135       ExtractSliceOpCastFolder>(context);
1136 }
1137 
1138 //
1139 static LogicalResult
1140 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1141                                            ShapedType shapedType) {
1142   OpBuilder b(op.getContext());
1143   for (OpFoldResult ofr : op.getMixedOffsets())
1144     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1145       return failure();
1146   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1147   // is appropriate.
1148   auto shape = shapedType.getShape();
1149   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1150     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1151       return failure();
1152   for (OpFoldResult ofr : op.getMixedStrides())
1153     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1154       return failure();
1155   return success();
1156 }
1157 
1158 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1159 /// we can return the InsertSliceOp's source directly.
1160 // TODO: This only checks the immediate producer; extend to go up the
1161 // insert/extract chain if the slices are disjoint.
1162 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1163   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1164 
1165   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1166   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1167       insertOp.isSameAs(extractOp, isSame))
1168     return insertOp.source();
1169 
1170   return {};
1171 }
1172 
1173 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute>) {
1174   if (getSourceType() == getType() &&
1175       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1176     return this->source();
1177   if (Value slice = foldExtractAfterInsertSlice(*this))
1178     return slice;
1179   return OpFoldResult();
1180 }
1181 
1182 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1183     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1184   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1185   unsigned rank = rankedTensorType.getRank();
1186   auto shape = rankedTensorType.getShape();
1187   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1188   SmallVector<OpFoldResult> sizes;
1189   for (unsigned i = 0, e = rank; i < e; ++i) {
1190     OpFoldResult dim;
1191     if (rankedTensorType.isDynamicDim(i))
1192       dim = b.createOrFold<tensor::DimOp>(
1193           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1194     else
1195       dim = b.getIndexAttr(shape[i]);
1196     sizes.push_back(dim);
1197   }
1198   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1199   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1200                                                 offsets, sizes, strides);
1201 }
1202 
1203 //===----------------------------------------------------------------------===//
1204 // InsertSliceOp
1205 //===----------------------------------------------------------------------===//
1206 
1207 // Build a InsertSliceOp with mixed static and dynamic entries.
1208 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1209                           Value dest, ArrayRef<OpFoldResult> offsets,
1210                           ArrayRef<OpFoldResult> sizes,
1211                           ArrayRef<OpFoldResult> strides,
1212                           ArrayRef<NamedAttribute> attrs) {
1213   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1214   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1215   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1216                              ShapedType::kDynamicStrideOrOffset);
1217   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1218                              ShapedType::kDynamicSize);
1219   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1220                              ShapedType::kDynamicStrideOrOffset);
1221   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1222         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1223         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1224   result.addAttributes(attrs);
1225 }
1226 
1227 // Build a InsertSliceOp with dynamic entries.
1228 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1229                           Value dest, ValueRange offsets, ValueRange sizes,
1230                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1231   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1232       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1233   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1234       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1235   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1236       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1237   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1238 }
1239 
1240 /// Verifier for InsertSliceOp.
1241 static LogicalResult verify(InsertSliceOp op) {
1242   // insert_slice is the inverse of extract_slice, use the same type inference.
1243   auto expectedType = ExtractSliceOp::inferRankReducedResultType(
1244       op.getSourceType().getRank(), op.getType(),
1245       extractFromI64ArrayAttr(op.static_offsets()),
1246       extractFromI64ArrayAttr(op.static_sizes()),
1247       extractFromI64ArrayAttr(op.static_strides()));
1248   auto result =
1249       isRankReducedType(expectedType.cast<ShapedType>(), op.getSourceType());
1250   return produceSliceErrorMsg(result, op, expectedType);
1251 }
1252 
1253 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1254 /// can mutate the second InsertSliceOp's destination to the first one's.
1255 ///
1256 /// Example:
1257 ///
1258 /// ```mlir
1259 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1260 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1261 /// ```
1262 ///
1263 /// folds into:
1264 ///
1265 /// ```mlir
1266 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1267 /// ```
1268 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1269   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1270 
1271   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1272   if (!prevInsertOp ||
1273       prevInsertOp.source().getType() != insertOp.source().getType() ||
1274       !prevInsertOp.isSameAs(insertOp, isSame))
1275     return failure();
1276 
1277   insertOp.destMutable().assign(prevInsertOp.dest());
1278   return success();
1279 }
1280 
1281 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1282   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1283       getSourceType() == getType() &&
1284       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1285     return this->source();
1286   if (succeeded(foldInsertAfterInsertSlice(*this)))
1287     return getResult();
1288   return OpFoldResult();
1289 }
1290 
1291 LogicalResult InsertSliceOp::reifyResultShapes(
1292     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1293   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1294   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1295     reifiedReturnShapes[0][dim] =
1296         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1297   }
1298   return success();
1299 }
1300 
1301 namespace {
1302 /// Pattern to rewrite a insert_slice op with constant arguments.
1303 class InsertSliceOpConstantArgumentFolder final
1304     : public OpRewritePattern<InsertSliceOp> {
1305 public:
1306   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1307 
1308   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1309                                 PatternRewriter &rewriter) const override {
1310     // No constant operand, just return.
1311     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1312           return matchPattern(operand, matchConstantIndex());
1313         }))
1314       return failure();
1315 
1316     // At least one of offsets/sizes/strides is a new constant.
1317     // Form the new list of operands and constant attributes from the
1318     // existing.
1319     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1320     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1321     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1322     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1323     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1324     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1325 
1326     // Create the new op in canonical form.
1327     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1328         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1329         mixedOffsets, mixedSizes, mixedStrides);
1330     Value toInsert = insertSliceOp.source();
1331     if (sourceType != insertSliceOp.getSourceType())
1332       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1333                                                  sourceType, toInsert);
1334     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1335         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1336         mixedStrides);
1337     return success();
1338   }
1339 };
1340 
1341 /// Fold tensor_casts with insert_slice operations. If the source or destination
1342 /// tensor is a tensor_cast that removes static type information, the cast is
1343 /// folded into the insert_slice operation. E.g.:
1344 ///
1345 /// ```mlir
1346 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1347 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1348 /// ```
1349 ///
1350 /// folds into:
1351 ///
1352 /// ```mlir
1353 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1354 /// ```
1355 ///
1356 /// Note: When folding a cast on the destination tensor, the result of the
1357 /// insert_slice operation is casted to ensure that the type of the result did
1358 /// not change.
1359 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1360   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1361 
1362   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1363                                 PatternRewriter &rewriter) const override {
1364     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1365           return matchPattern(operand, matchConstantIndex());
1366         }))
1367       return failure();
1368 
1369     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1370       auto castOp = v.getDefiningOp<tensor::CastOp>();
1371       if (!castOp || !canFoldIntoConsumerOp(castOp))
1372         return llvm::None;
1373       return castOp.source();
1374     };
1375     Optional<Value> sourceCastSource =
1376         getSourceOfCastOp(insertSliceOp.source());
1377     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1378     if (!sourceCastSource && !destCastSource)
1379       return failure();
1380 
1381     Value replacement = rewriter.create<InsertSliceOp>(
1382         insertSliceOp.getLoc(),
1383         (sourceCastSource ? *sourceCastSource : insertSliceOp.source()),
1384         (destCastSource ? *destCastSource : insertSliceOp.dest()),
1385         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1386         insertSliceOp.getMixedStrides());
1387 
1388     if (replacement.getType() != insertSliceOp.getType()) {
1389       replacement = rewriter.create<tensor::CastOp>(
1390           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1391     }
1392     rewriter.replaceOp(insertSliceOp, replacement);
1393     return success();
1394   }
1395 };
1396 
1397 /// If additional static type information can be deduced from a insert_slice's
1398 /// size operands, insert an explicit cast of the op's source operand. This
1399 /// enables other canonicalization patterns that are matching for tensor_cast
1400 /// ops such as `ForOpTensorCastFolder` in SCF.
1401 ///
1402 /// Example:
1403 ///
1404 /// ```mlir
1405 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1406 ///       : tensor<?x?xf32> into ...
1407 /// ```
1408 ///
1409 /// folds into:
1410 ///
1411 /// ```mlir
1412 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1413 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1414 ///       : tensor<64x64xf32> into ...
1415 /// ```
1416 struct InsertSliceOpSourceCastInserter final
1417     : public OpRewritePattern<InsertSliceOp> {
1418   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1419 
1420   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1421                                 PatternRewriter &rewriter) const override {
1422     RankedTensorType srcType = insertSliceOp.getSourceType();
1423     if (srcType.getRank() != insertSliceOp.getType().getRank())
1424       return failure();
1425     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1426                                      srcType.getShape().end());
1427     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1428       if (Optional<int64_t> constInt =
1429               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1430         newSrcShape[i] = *constInt;
1431     }
1432 
1433     RankedTensorType newSrcType =
1434         RankedTensorType::get(newSrcShape, srcType.getElementType());
1435     if (srcType == newSrcType ||
1436         !preservesStaticInformation(srcType, newSrcType) ||
1437         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1438       return failure();
1439 
1440     // newSrcType is:
1441     //   1) Different from srcType.
1442     //   2) "More static" than srcType.
1443     //   3) Cast-compatible with srcType.
1444     // Insert the cast.
1445     Value cast = rewriter.create<tensor::CastOp>(
1446         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1447     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1448         insertSliceOp, cast, insertSliceOp.dest(),
1449         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1450         insertSliceOp.getMixedStrides());
1451     return success();
1452   }
1453 };
1454 } // namespace
1455 
1456 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1457                                                 MLIRContext *context) {
1458   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1459               InsertSliceOpSourceCastInserter>(context);
1460 }
1461 
1462 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1463                                                              Location loc,
1464                                                              Value tensor,
1465                                                              Value dest) {
1466   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1467   unsigned rank = rankedTensorType.getRank();
1468   auto shape = rankedTensorType.getShape();
1469   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1470   SmallVector<OpFoldResult> sizes;
1471   for (unsigned i = 0, e = rank; i < e; ++i) {
1472     OpFoldResult dim;
1473     if (rankedTensorType.isDynamicDim(i))
1474       dim = b.createOrFold<tensor::DimOp>(
1475           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1476     else
1477       dim = b.getIndexAttr(shape[i]);
1478     sizes.push_back(dim);
1479   }
1480   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1481   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1482                                                sizes, strides);
1483 }
1484 
1485 //===----------------------------------------------------------------------===//
1486 // TableGen'd op method definitions
1487 //===----------------------------------------------------------------------===//
1488 
1489 #define GET_OP_CLASSES
1490 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
1491