1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/StandardOps/Utils/Utils.h"
11 #include "mlir/Dialect/Tensor/IR/Tensor.h"
12 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
13 #include "mlir/Dialect/Utils/StaticValueUtils.h"
14 #include "mlir/IR/BlockAndValueMapping.h"
15 #include "mlir/IR/Builders.h"
16 #include "mlir/IR/BuiltinAttributeInterfaces.h"
17 #include "mlir/IR/Matchers.h"
18 #include "mlir/IR/PatternMatch.h"
19 #include "mlir/IR/TypeUtilities.h"
20 #include "llvm/ADT/STLExtras.h"
21 
22 using namespace mlir;
23 using namespace mlir::tensor;
24 
25 /// Materialize a single constant operation from a given attribute value with
26 /// the desired resultant type.
27 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
28                                               Attribute value, Type type,
29                                               Location loc) {
30   if (arith::ConstantOp::isBuildableWith(value, type))
31     return builder.create<arith::ConstantOp>(loc, value, type);
32   if (ConstantOp::isBuildableWith(value, type))
33     return builder.create<ConstantOp>(loc, value, type);
34   return nullptr;
35 }
36 
37 //===----------------------------------------------------------------------===//
38 // CastOp
39 //===----------------------------------------------------------------------===//
40 
41 /// Returns true if `target` is a ranked tensor type that preserves static
42 /// information available in the `source` ranked tensor type.
43 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
44   auto sourceType = source.dyn_cast<RankedTensorType>();
45   auto targetType = target.dyn_cast<RankedTensorType>();
46 
47   // Requires RankedTensorType.
48   if (!sourceType || !targetType)
49     return false;
50 
51   // Requires same elemental type.
52   if (sourceType.getElementType() != targetType.getElementType())
53     return false;
54 
55   // Requires same rank.
56   if (sourceType.getRank() != targetType.getRank())
57     return false;
58 
59   // If cast is towards more static sizes along any dimension, don't fold.
60   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
61     if (!ShapedType::isDynamic(std::get<0>(t)) &&
62         ShapedType::isDynamic(std::get<1>(t)))
63       return false;
64   }
65 
66   return true;
67 }
68 
69 /// Determines whether tensor::CastOp casts to a more dynamic version of the
70 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
71 /// implement canonicalization patterns for ops in different dialects that may
72 /// consume the results of tensor.cast operations. Such foldable tensor.cast
73 /// operations are typically inserted as `slice` ops and are canonicalized,
74 /// to preserve the type compatibility of their uses.
75 ///
76 /// Returns true when all conditions are met:
77 /// 1. source and result are ranked tensors with same element type and rank.
78 /// 2. the tensor type has more static information than the result
79 ///
80 /// Example:
81 /// ```mlir
82 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
83 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
84 /// ```
85 ///
86 /// folds into:
87 ///
88 /// ```mlir
89 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
90 /// ```
91 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
92   if (!castOp)
93     return false;
94 
95   // Can fold if the source of cast has at least as much static information as
96   // its results.
97   return preservesStaticInformation(castOp.getType(),
98                                     castOp.source().getType());
99 }
100 
101 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
102 /// that can be folded.
103 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
104   bool folded = false;
105   for (OpOperand &operand : op->getOpOperands()) {
106     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
107     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
108       operand.set(castOp.getOperand());
109       folded = true;
110     }
111   }
112   return success(folded);
113 }
114 
115 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
116   if (inputs.size() != 1 || outputs.size() != 1)
117     return false;
118   Type a = inputs.front(), b = outputs.front();
119   auto aT = a.dyn_cast<TensorType>();
120   auto bT = b.dyn_cast<TensorType>();
121   if (!aT || !bT)
122     return false;
123 
124   if (aT.getElementType() != bT.getElementType())
125     return false;
126 
127   return succeeded(verifyCompatibleShape(aT, bT));
128 }
129 
130 /// Compute a TensorType that has the joined shape knowledge of the two
131 /// given TensorTypes. The element types need to match.
132 static TensorType joinShapes(TensorType one, TensorType two) {
133   assert(one.getElementType() == two.getElementType());
134 
135   if (!one.hasRank())
136     return two;
137   if (!two.hasRank())
138     return one;
139 
140   int64_t rank = one.getRank();
141   if (rank != two.getRank())
142     return {};
143 
144   SmallVector<int64_t, 4> join;
145   join.reserve(rank);
146   for (int64_t i = 0; i < rank; ++i) {
147     if (one.isDynamicDim(i)) {
148       join.push_back(two.getDimSize(i));
149       continue;
150     }
151     if (two.isDynamicDim(i)) {
152       join.push_back(one.getDimSize(i));
153       continue;
154     }
155     if (one.getDimSize(i) != two.getDimSize(i))
156       return {};
157     join.push_back(one.getDimSize(i));
158   }
159   return RankedTensorType::get(join, one.getElementType());
160 }
161 
162 namespace {
163 
164 /// Replaces chains of two tensor.cast operations by a single tensor.cast
165 /// operation if doing so does not remove runtime constraints.
166 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
167   using OpRewritePattern<CastOp>::OpRewritePattern;
168 
169   LogicalResult matchAndRewrite(CastOp tensorCast,
170                                 PatternRewriter &rewriter) const final {
171     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
172 
173     if (!tensorCastOperand)
174       return failure();
175 
176     auto sourceType =
177         tensorCastOperand.getOperand().getType().cast<TensorType>();
178     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
179     auto resultType = tensorCast.getType().cast<TensorType>();
180 
181     // We can remove the intermediate cast if joining all three produces the
182     // same result as just joining the source and result shapes.
183     auto firstJoin =
184         joinShapes(joinShapes(sourceType, intermediateType), resultType);
185 
186     // The join might not exist if the cast sequence would fail at runtime.
187     if (!firstJoin)
188       return failure();
189 
190     // The newJoin always exists if the above join exists, it might just contain
191     // less information. If so, we cannot drop the intermediate cast, as doing
192     // so would remove runtime checks.
193     auto newJoin = joinShapes(sourceType, resultType);
194     if (firstJoin != newJoin)
195       return failure();
196 
197     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
198                                         tensorCastOperand.getOperand());
199     return success();
200   }
201 };
202 
203 } // namespace
204 
205 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
206                                          MLIRContext *context) {
207   results.add<ChainedTensorCast>(context);
208 }
209 
210 //===----------------------------------------------------------------------===//
211 // DimOp
212 //===----------------------------------------------------------------------===//
213 
214 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
215                   int64_t index) {
216   auto loc = result.location;
217   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
218   build(builder, result, source, indexValue);
219 }
220 
221 Optional<int64_t> DimOp::getConstantIndex() {
222   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
223     return constantOp.getValue().cast<IntegerAttr>().getInt();
224   return {};
225 }
226 
227 static LogicalResult verify(DimOp op) {
228   // Assume unknown index to be in range.
229   Optional<int64_t> index = op.getConstantIndex();
230   if (!index.hasValue())
231     return success();
232 
233   // Check that constant index is not knowingly out of range.
234   auto type = op.source().getType();
235   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
236     if (index.getValue() >= tensorType.getRank())
237       return op.emitOpError("index is out of range");
238   } else if (type.isa<UnrankedTensorType>()) {
239     // Assume index to be in range.
240   } else {
241     llvm_unreachable("expected operand with tensor type");
242   }
243   return success();
244 }
245 
246 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
247   // All forms of folding require a known index.
248   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
249   if (!index)
250     return {};
251 
252   // Folding for unranked types (UnrankedTensorType) is not supported.
253   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
254   if (!tensorType)
255     return {};
256 
257   // Fold if the shape extent along the given index is known.
258   if (!tensorType.isDynamicDim(index.getInt())) {
259     Builder builder(getContext());
260     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
261   }
262 
263   Operation *definingOp = source().getDefiningOp();
264 
265   // Fold dim to the operand of tensor.generate.
266   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
267     auto resultType =
268         fromElements.getResult().getType().cast<RankedTensorType>();
269     // The case where the type encodes the size of the dimension is handled
270     // above.
271     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
272 
273     // Find the operand of the fromElements that corresponds to this index.
274     auto dynExtents = fromElements.dynamicExtents().begin();
275     for (auto dim : resultType.getShape().take_front(index.getInt()))
276       if (ShapedType::isDynamic(dim))
277         dynExtents++;
278 
279     return Value{*dynExtents};
280   }
281 
282   // The size at the given index is now known to be a dynamic size.
283   unsigned unsignedIndex = index.getValue().getZExtValue();
284 
285   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
286     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
287     // `resolve-shaped-type-result-dims` pass.
288     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
289         sliceOp.isDynamicSize(unsignedIndex)) {
290       return {sliceOp.getDynamicSize(unsignedIndex)};
291     }
292   }
293 
294   // dim(cast) -> dim
295   if (succeeded(foldTensorCast(*this)))
296     return getResult();
297 
298   return {};
299 }
300 
301 namespace {
302 /// Fold dim of a cast into the dim of the source of the tensor cast.
303 struct DimOfCastOp : public OpRewritePattern<DimOp> {
304   using OpRewritePattern<DimOp>::OpRewritePattern;
305 
306   LogicalResult matchAndRewrite(DimOp dimOp,
307                                 PatternRewriter &rewriter) const override {
308     auto castOp = dimOp.source().getDefiningOp<CastOp>();
309     if (!castOp)
310       return failure();
311     Value newSource = castOp.getOperand();
312     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
313     return success();
314   }
315 };
316 } // namespace
317 
318 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
319                                         MLIRContext *context) {
320   results.add<DimOfCastOp>(context);
321 }
322 
323 //===----------------------------------------------------------------------===//
324 // ExtractOp
325 //===----------------------------------------------------------------------===//
326 
327 static LogicalResult verify(ExtractOp op) {
328   // Verify the # indices match if we have a ranked type.
329   if (auto tensorType = op.tensor().getType().dyn_cast<RankedTensorType>())
330     if (tensorType.getRank() != static_cast<int64_t>(op.indices().size()))
331       return op.emitOpError("incorrect number of indices for extract_element");
332 
333   return success();
334 }
335 
336 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
337   // The tensor operand must be a known constant.
338   Attribute tensor = operands.front();
339   if (!tensor)
340     return {};
341   // If this is a splat elements attribute, simply return the value. All of the
342   // elements of a splat attribute are the same.
343   if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
344     return splatTensor.getSplatValue<Attribute>();
345 
346   // Otherwise, collect the constant indices into the tensor.
347   SmallVector<uint64_t, 8> indices;
348   for (Attribute indice : llvm::drop_begin(operands, 1)) {
349     if (!indice || !indice.isa<IntegerAttr>())
350       return {};
351     indices.push_back(indice.cast<IntegerAttr>().getInt());
352   }
353 
354   // If this is an elements attribute, query the value at the given indices.
355   auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
356   if (elementsAttr && elementsAttr.isValidIndex(indices))
357     return elementsAttr.getValues<Attribute>()[indices];
358   return {};
359 }
360 
361 //===----------------------------------------------------------------------===//
362 // FromElementsOp
363 //===----------------------------------------------------------------------===//
364 
365 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
366                            Type resultType, ValueRange elements) {
367   result.addOperands(elements);
368   result.addTypes(resultType);
369 }
370 
371 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
372                            ValueRange elements) {
373   assert(!elements.empty() && "expected at least one element");
374   Type resultType = RankedTensorType::get(
375       {static_cast<int64_t>(elements.size())}, elements.front().getType());
376   build(builder, result, resultType, elements);
377 }
378 
379 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
380   if (!llvm::is_contained(operands, nullptr))
381     return DenseElementsAttr::get(getType(), operands);
382   return {};
383 }
384 
385 namespace {
386 
387 // Canonicalizes the pattern of the form
388 //
389 // %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
390 // %extracted_element = tensor.extract %tensor[%c0] : tensor<1xi32>
391 //
392 // to just %element.
393 struct ExtractElementFromTensorFromElements
394     : public OpRewritePattern<tensor::ExtractOp> {
395   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
396 
397   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
398                                 PatternRewriter &rewriter) const final {
399     auto tensorFromElements = extract.tensor().getDefiningOp<FromElementsOp>();
400     if (!tensorFromElements)
401       return failure();
402     auto tensorType = tensorFromElements.getType().cast<RankedTensorType>();
403     auto rank = tensorType.getRank();
404     if (rank == 0) {
405       rewriter.replaceOp(extract, tensorFromElements.getOperand(0));
406       return success();
407     }
408     SmallVector<APInt, 3> indices(rank);
409     int64_t flatIndex = 0;
410     int64_t stride = 1;
411     for (int i = rank - 1; i >= 0; --i) {
412       APInt index;
413       if (!matchPattern(extract.indices()[i], m_ConstantInt(&index)))
414         return failure();
415       if (i < rank - 1)
416         stride *= tensorType.getDimSize(i);
417       flatIndex += index.getSExtValue() * stride;
418     }
419     // Prevent out of bounds accesses. This can happen in invalid code that will
420     // never execute.
421     if (tensorFromElements->getNumOperands() <= flatIndex || flatIndex < 0)
422       return failure();
423     rewriter.replaceOp(extract, tensorFromElements.getOperand(flatIndex));
424     return success();
425   }
426 };
427 
428 } // namespace
429 
430 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
431                                                  MLIRContext *context) {
432   results.add<ExtractElementFromTensorFromElements>(context);
433 }
434 
435 //===----------------------------------------------------------------------===//
436 // InsertOp
437 //===----------------------------------------------------------------------===//
438 
439 static LogicalResult verify(InsertOp op) {
440   // Verify the # indices match if we have a ranked type.
441   if (auto destType = op.dest().getType().dyn_cast<RankedTensorType>())
442     if (destType.getRank() != static_cast<int64_t>(op.indices().size()))
443       return op.emitOpError("incorrect number of indices");
444   return success();
445 }
446 
447 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
448   Attribute scalar = operands[0];
449   Attribute dest = operands[1];
450   if (scalar && dest)
451     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
452       if (scalar == splatDest.getSplatValue<Attribute>())
453         return dest;
454   return {};
455 }
456 
457 //===----------------------------------------------------------------------===//
458 // GenerateOp
459 //===----------------------------------------------------------------------===//
460 
461 static LogicalResult verify(GenerateOp op) {
462   // Ensure that the tensor type has as many dynamic dimensions as are specified
463   // by the operands.
464   RankedTensorType resultTy = op.getType().cast<RankedTensorType>();
465   if (op.getNumOperands() != resultTy.getNumDynamicDims())
466     return op.emitError("must have as many index operands as dynamic extents "
467                         "in the result type");
468 
469   // Ensure that region arguments span the index space.
470   if (!llvm::all_of(op.body().getArgumentTypes(),
471                     [](Type ty) { return ty.isIndex(); }))
472     return op.emitError("all body arguments must be index");
473   if (op.body().getNumArguments() != resultTy.getRank())
474     return op.emitError("must have one body argument per input dimension");
475 
476   // Ensure that the region yields an element of the right type.
477   auto yieldOp =
478       llvm::cast<YieldOp>(op.body().getBlocks().front().getTerminator());
479   if (yieldOp.value().getType() != resultTy.getElementType())
480     return op.emitOpError(
481         "body must be terminated with a `yield` operation of the tensor "
482         "element type");
483 
484   return success();
485 }
486 
487 void GenerateOp::build(
488     OpBuilder &b, OperationState &result, Type resultTy,
489     ValueRange dynamicExtents,
490     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
491   build(b, result, resultTy, dynamicExtents);
492 
493   // Build and populate body.
494   OpBuilder::InsertionGuard guard(b);
495   Region *bodyRegion = result.regions.front().get();
496   auto rank = resultTy.cast<RankedTensorType>().getRank();
497   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
498   Block *bodyBlock =
499       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes);
500   bodyBuilder(b, result.location, bodyBlock->getArguments());
501 }
502 
503 namespace {
504 
505 /// Canonicalizes tensor.generate operations with a constant
506 /// operand into the equivalent operation with the operand expressed in the
507 /// result type, instead. We also insert a type cast to make sure that the
508 /// resulting IR is still well-typed.
509 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
510   using OpRewritePattern<GenerateOp>::OpRewritePattern;
511 
512   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
513                                 PatternRewriter &rewriter) const final {
514     auto resultType =
515         tensorFromElements.getResult().getType().cast<RankedTensorType>();
516 
517     if (resultType.hasStaticShape())
518       return failure();
519 
520     SmallVector<Value, 4> newOperands;
521     SmallVector<int64_t, 4> newShape;
522     auto operandsIt = tensorFromElements.dynamicExtents().begin();
523 
524     for (int64_t dim : resultType.getShape()) {
525       if (!ShapedType::isDynamic(dim)) {
526         newShape.push_back(dim);
527         continue;
528       }
529       APInt index;
530       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
531         newShape.push_back(ShapedType::kDynamicSize);
532         newOperands.push_back(*operandsIt++);
533         continue;
534       }
535       newShape.push_back(index.getSExtValue());
536       operandsIt++;
537     }
538 
539     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
540       return failure();
541 
542     auto loc = tensorFromElements.getLoc();
543     auto newOp = rewriter.create<GenerateOp>(
544         loc, RankedTensorType::get(newShape, resultType.getElementType()),
545         newOperands);
546     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
547                                 newOp.body().begin());
548     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
549                                                 newOp);
550     return success();
551   }
552 };
553 
554 /// Canonicalizes the pattern of the form
555 ///
556 /// %tensor = tensor.generate %x {
557 ///   ^bb0(%arg0: index):  // no predecessors
558 ///   <computation>
559 ///   yield %1 : index
560 /// } : tensor<?xindex>
561 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
562 ///
563 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
564 /// tensor.generate operation has no side-effects.
565 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
566   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
567 
568   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
569                                 PatternRewriter &rewriter) const final {
570     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
571     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
572       return failure();
573 
574     BlockAndValueMapping mapping;
575     Block *body = tensorFromElements.getBody();
576     mapping.map(body->getArguments(), extract.indices());
577     for (auto &op : body->without_terminator())
578       rewriter.clone(op, mapping);
579 
580     auto yield = cast<YieldOp>(body->getTerminator());
581 
582     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
583     return success();
584   }
585 };
586 
587 /// Canonicalizes the pattern of the form
588 ///
589 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
590 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
591 ///
592 /// to
593 ///
594 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
595 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
596   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
597 
598   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
599                                 PatternRewriter &rewriter) const final {
600     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
601     if (!tensorCast)
602       return failure();
603 
604     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
605                                                    extract.indices());
606     return success();
607   }
608 };
609 
610 } // namespace
611 
612 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
613                                              MLIRContext *context) {
614   // TODO: Move extract patterns to tensor::ExtractOp.
615   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
616               StaticTensorGenerate>(context);
617 }
618 
619 //===----------------------------------------------------------------------===//
620 // RankOp
621 //===----------------------------------------------------------------------===//
622 
623 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
624   // Constant fold rank when the rank of the operand is known.
625   auto type = getOperand().getType();
626   auto shapedType = type.dyn_cast<ShapedType>();
627   if (shapedType && shapedType.hasRank())
628     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
629   return IntegerAttr();
630 }
631 
632 //===----------------------------------------------------------------------===//
633 // ReshapeOp
634 //===----------------------------------------------------------------------===//
635 
636 static int64_t getNumElements(ShapedType type) {
637   int64_t numElements = 1;
638   for (auto dim : type.getShape())
639     numElements *= dim;
640   return numElements;
641 }
642 
643 static LogicalResult verify(ReshapeOp op) {
644   TensorType operandType = op.source().getType().cast<TensorType>();
645   TensorType resultType = op.result().getType().cast<TensorType>();
646 
647   if (operandType.getElementType() != resultType.getElementType())
648     return op.emitOpError("element types of source and destination tensor "
649                           "types should be the same");
650 
651   int64_t shapeSize =
652       op.shape().getType().cast<RankedTensorType>().getDimSize(0);
653   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
654   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
655 
656   if (resultRankedType) {
657     if (operandRankedType && resultRankedType.hasStaticShape() &&
658         operandRankedType.hasStaticShape()) {
659       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
660         return op.emitOpError("source and destination tensor should have the "
661                               "same number of elements");
662     }
663     if (ShapedType::isDynamic(shapeSize))
664       return op.emitOpError("cannot use shape operand with dynamic length to "
665                             "reshape to statically-ranked tensor type");
666     if (shapeSize != resultRankedType.getRank())
667       return op.emitOpError(
668           "length of shape operand differs from the result's tensor rank");
669   }
670   return success();
671 }
672 
673 //===----------------------------------------------------------------------===//
674 // Reassociative reshape ops
675 //===----------------------------------------------------------------------===//
676 
677 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
678   return getSymbolLessAffineMaps(getReassociationExprs());
679 }
680 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
681   return convertReassociationIndicesToExprs(getContext(),
682                                             getReassociationIndices());
683 }
684 
685 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
686   return getSymbolLessAffineMaps(getReassociationExprs());
687 }
688 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
689   return convertReassociationIndicesToExprs(getContext(),
690                                             getReassociationIndices());
691 }
692 
693 static void print(OpAsmPrinter &p, ExpandShapeOp op) {
694   ::mlir::printReshapeOp<ExpandShapeOp>(p, op);
695 }
696 
697 static void print(OpAsmPrinter &p, CollapseShapeOp op) {
698   ::mlir::printReshapeOp<CollapseShapeOp>(p, op);
699 }
700 
701 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
702 static RankedTensorType
703 computeTensorReshapeCollapsedType(RankedTensorType type,
704                                   ArrayRef<AffineMap> reassociation) {
705   auto shape = type.getShape();
706   SmallVector<int64_t, 4> newShape;
707   newShape.reserve(reassociation.size());
708 
709   // Use the fact that reassociation is valid to simplify the logic: only use
710   // each map's rank.
711   assert(isReassociationValid(reassociation) && "invalid reassociation");
712   unsigned currentDim = 0;
713   for (AffineMap m : reassociation) {
714     unsigned dim = m.getNumResults();
715     auto band = shape.slice(currentDim, dim);
716     int64_t size = 1;
717     if (llvm::is_contained(band, ShapedType::kDynamicSize))
718       size = ShapedType::kDynamicSize;
719     else
720       for (unsigned d = 0; d < dim; ++d)
721         size *= shape[currentDim + d];
722     newShape.push_back(size);
723     currentDim += dim;
724   }
725 
726   return RankedTensorType::get(newShape, type.getElementType());
727 }
728 
729 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
730                             ArrayRef<ReassociationIndices> reassociation,
731                             ArrayRef<NamedAttribute> attrs) {
732   auto resultType = computeTensorReshapeCollapsedType(
733       src.getType().cast<RankedTensorType>(),
734       getSymbolLessAffineMaps(
735           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
736   build(b, result, resultType, src, attrs);
737   result.addAttribute(getReassociationAttrName(),
738                       getReassociationIndicesAttribute(b, reassociation));
739 }
740 
741 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
742                           ArrayRef<ReassociationIndices> reassociation,
743                           ArrayRef<NamedAttribute> attrs) {
744   auto resultType = computeTensorReshapeCollapsedType(
745       src.getType().cast<RankedTensorType>(),
746       getSymbolLessAffineMaps(
747           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
748   build(b, result, resultType, src, attrs);
749   result.addAttribute(getReassociationAttrName(),
750                       getReassociationIndicesAttribute(b, reassociation));
751 }
752 
753 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
754                                         TensorReshapeOp, ExpandShapeOp>::value>
755 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
756                                            RankedTensorType expandedType,
757                                            RankedTensorType collapsedType) {
758   if (failed(
759           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
760     return failure();
761 
762   auto maps = op.getReassociationMaps();
763   RankedTensorType expectedType =
764       computeTensorReshapeCollapsedType(expandedType, maps);
765   if (collapsedType != expectedType)
766     return op.emitOpError("expected collapsed type to be ")
767            << expectedType << ", but got " << collapsedType;
768   return success();
769 }
770 
771 static LogicalResult verify(ExpandShapeOp op) {
772   return verifyTensorReshapeOp(op, op.getResultType(), op.getSrcType());
773 }
774 
775 static LogicalResult verify(CollapseShapeOp op) {
776   return verifyTensorReshapeOp(op, op.getSrcType(), op.getResultType());
777 }
778 
779 namespace {
780 /// Reshape of a splat constant can be replaced with a constant of the result
781 /// type.
782 template <typename TensorReshapeOp>
783 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
784   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
785   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
786                                 PatternRewriter &rewriter) const override {
787     DenseElementsAttr attr;
788     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
789       return failure();
790     if (!attr || !attr.isSplat())
791       return failure();
792     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
793         reshapeOp.getResultType(), attr.getRawData(), true);
794     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
795     return success();
796   }
797 };
798 
799 } // namespace
800 
801 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
802                                                 MLIRContext *context) {
803   results.add<CollapseReshapeOps<ExpandShapeOp>,
804               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>,
805               FoldReshapeWithConstant<ExpandShapeOp>>(context);
806 }
807 
808 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
809                                                   MLIRContext *context) {
810   results.add<CollapseReshapeOps<CollapseShapeOp>,
811               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
812               FoldReshapeWithConstant<CollapseShapeOp>>(context);
813 }
814 
815 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
816   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
817 }
818 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
819   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
820 }
821 
822 //===----------------------------------------------------------------------===//
823 // ExtractSliceOp
824 //===----------------------------------------------------------------------===//
825 
826 /// An extract_slice op result type can be fully inferred from the source type
827 /// and the static representation of offsets, sizes and strides. Special
828 /// sentinels encode the dynamic case.
829 RankedTensorType ExtractSliceOp::inferResultType(
830     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
831     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
832   // An extract_slice op may specify only a leading subset of offset/sizes/
833   // strides in which case we complete with offset=0, sizes from memref type and
834   // strides=1.
835   unsigned rank = sourceRankedTensorType.getRank();
836   (void)rank;
837   assert(staticSizes.size() == rank &&
838          "unexpected staticSizes not equal to rank of source");
839   return RankedTensorType::get(staticSizes,
840                                sourceRankedTensorType.getElementType());
841 }
842 
843 RankedTensorType ExtractSliceOp::inferResultType(
844     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
845     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
846   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
847   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
848   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
849                              ShapedType::kDynamicStrideOrOffset);
850   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
851                              ShapedType::kDynamicSize);
852   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
853                              ShapedType::kDynamicStrideOrOffset);
854   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
855                                          staticSizes, staticStrides);
856 }
857 
858 /// An extract_slice op result type can be fully inferred from the source type
859 /// and the static representation of offsets, sizes and strides. Special
860 /// sentinels encode the dynamic case.
861 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
862     unsigned resultRank, RankedTensorType sourceRankedTensorType,
863     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
864     ArrayRef<int64_t> strides) {
865   auto inferredType =
866       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
867           .cast<RankedTensorType>();
868   int rankDiff = inferredType.getRank() - resultRank;
869   if (rankDiff > 0) {
870     auto shape = inferredType.getShape();
871     llvm::SmallDenseSet<unsigned> dimsToProject;
872     mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject);
873     SmallVector<int64_t> projectedShape;
874     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
875       if (!dimsToProject.contains(pos))
876         projectedShape.push_back(shape[pos]);
877     inferredType =
878         RankedTensorType::get(projectedShape, inferredType.getElementType());
879   }
880   return inferredType;
881 }
882 
883 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
884     unsigned resultRank, RankedTensorType sourceRankedTensorType,
885     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
886     ArrayRef<OpFoldResult> strides) {
887   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
888   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
889   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
890                              ShapedType::kDynamicStrideOrOffset);
891   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
892                              ShapedType::kDynamicSize);
893   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
894                              ShapedType::kDynamicStrideOrOffset);
895   return ExtractSliceOp::inferRankReducedResultType(
896       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
897       staticStrides);
898 }
899 
900 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
901 /// result type. If the type passed is nullptr, it is inferred.
902 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
903                            RankedTensorType resultType, Value source,
904                            ArrayRef<OpFoldResult> offsets,
905                            ArrayRef<OpFoldResult> sizes,
906                            ArrayRef<OpFoldResult> strides,
907                            ArrayRef<NamedAttribute> attrs) {
908   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
909   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
910   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
911                              ShapedType::kDynamicStrideOrOffset);
912   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
913                              ShapedType::kDynamicSize);
914   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
915                              ShapedType::kDynamicStrideOrOffset);
916   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
917   // Structuring implementation this way avoids duplication between builders.
918   if (!resultType) {
919     resultType =
920         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
921                                         staticSizes, staticStrides)
922             .cast<RankedTensorType>();
923   }
924   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
925         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
926         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
927   result.addAttributes(attrs);
928 }
929 
930 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
931 /// result type.
932 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
933                            ArrayRef<OpFoldResult> offsets,
934                            ArrayRef<OpFoldResult> sizes,
935                            ArrayRef<OpFoldResult> strides,
936                            ArrayRef<NamedAttribute> attrs) {
937   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
938 }
939 
940 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
941 /// type passed is nullptr, it is inferred.
942 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
943                            RankedTensorType resultType, Value source,
944                            ValueRange offsets, ValueRange sizes,
945                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
946   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
947       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
948   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
949       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
950   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
951       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
952   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
953 }
954 
955 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
956 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
957                            ValueRange offsets, ValueRange sizes,
958                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
959   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
960 }
961 
962 template <typename OpTy>
963 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
964                                           OpTy op, Type expectedType) {
965   auto memrefType = expectedType.cast<ShapedType>();
966   switch (result) {
967   case SliceVerificationResult::Success:
968     return success();
969   case SliceVerificationResult::RankTooLarge:
970     return op.emitError("expected rank to be smaller or equal to ")
971            << "the other rank. ";
972   case SliceVerificationResult::SizeMismatch:
973     return op.emitError("expected type to be ")
974            << expectedType << " or a rank-reduced version. (size mismatch) ";
975   case SliceVerificationResult::ElemTypeMismatch:
976     return op.emitError("expected element type to be ")
977            << memrefType.getElementType();
978   default:
979     llvm_unreachable("unexpected extract_slice op verification result");
980   }
981 }
982 
983 /// Verifier for ExtractSliceOp.
984 static LogicalResult verify(ExtractSliceOp op) {
985   // Verify result type against inferred type.
986   auto expectedType =
987       ExtractSliceOp::inferResultType(op.getSourceType(), op.getMixedOffsets(),
988                                       op.getMixedSizes(), op.getMixedStrides());
989   auto result =
990       isRankReducedType(expectedType.cast<ShapedType>(), op.getType());
991   return produceSliceErrorMsg(result, op, expectedType);
992 }
993 
994 /// Infer the canonical type of the result of an extract_slice op. Returns a
995 /// type with rank `resultRank` that is either the rank of the rank-reduced
996 /// type, or the non-rank-reduced type.
997 static RankedTensorType
998 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
999                             ArrayRef<OpFoldResult> mixedOffsets,
1000                             ArrayRef<OpFoldResult> mixedSizes,
1001                             ArrayRef<OpFoldResult> mixedStrides) {
1002   auto resultType =
1003       ExtractSliceOp::inferRankReducedResultType(
1004           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1005           .cast<RankedTensorType>();
1006   if (resultType.getRank() != resultRank) {
1007     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1008                                                  mixedSizes, mixedStrides)
1009                      .cast<RankedTensorType>();
1010   }
1011   return resultType;
1012 }
1013 
1014 llvm::SmallDenseSet<unsigned> ExtractSliceOp::getDroppedDims() {
1015   llvm::SmallDenseSet<unsigned> droppedDims;
1016   ArrayRef<int64_t> resultShape = getType().getShape();
1017   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1018   unsigned shapePos = 0;
1019   for (const auto &size : enumerate(mixedSizes)) {
1020     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1021     // If the size is not 1, or if the current matched dimension of the result
1022     // is the same static shape as the size value (which is 1), then the
1023     // dimension is preserved.
1024     if (!sizeVal || sizeVal.getValue() != 1 ||
1025         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1026       shapePos++;
1027       continue;
1028     }
1029     droppedDims.insert(size.index());
1030   }
1031   return droppedDims;
1032 }
1033 
1034 LogicalResult ExtractSliceOp::reifyResultShapes(
1035     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1036   reifiedReturnShapes.resize(1);
1037   reifiedReturnShapes[0].reserve(getType().getRank());
1038   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1039   llvm::SmallDenseSet<unsigned> droppedDims = getDroppedDims();
1040   Location loc = getLoc();
1041   for (const auto &size : enumerate(mixedSizes)) {
1042     if (droppedDims.count(size.index()))
1043       continue;
1044     if (auto attr = size.value().dyn_cast<Attribute>()) {
1045       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1046           loc, attr.cast<IntegerAttr>().getInt()));
1047       continue;
1048     }
1049     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1050   }
1051   return success();
1052 }
1053 
1054 namespace {
1055 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1056 /// This essentially pushes memref_cast past its consuming slice when
1057 /// `canFoldIntoConsumerOp` is true.
1058 ///
1059 /// Example:
1060 /// ```
1061 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1062 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1063 ///   tensor<3x4xf32>
1064 /// ```
1065 /// is rewritten into:
1066 /// ```
1067 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1068 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1069 /// ```
1070 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1071 public:
1072   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1073 
1074   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1075                                 PatternRewriter &rewriter) const override {
1076     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1077     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1078           return matchPattern(operand, matchConstantIndex());
1079         }))
1080       return failure();
1081 
1082     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1083     if (!castOp)
1084       return failure();
1085 
1086     if (!canFoldIntoConsumerOp(castOp))
1087       return failure();
1088 
1089     /// Deduce the type of the result to use for the canonicalized operation.
1090     RankedTensorType resultType = getCanonicalSliceResultType(
1091         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1092         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1093         sliceOp.getMixedStrides());
1094     Value newSlice = rewriter.create<ExtractSliceOp>(
1095         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1096         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1097         sliceOp.static_sizes(), sliceOp.static_strides());
1098     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1099                                                 newSlice);
1100     return success();
1101   }
1102 };
1103 } // namespace
1104 
1105 /// Return the canonical type of the result of an extract_slice op.
1106 struct SliceReturnTypeCanonicalizer {
1107   RankedTensorType operator()(ExtractSliceOp op,
1108                               ArrayRef<OpFoldResult> mixedOffsets,
1109                               ArrayRef<OpFoldResult> mixedSizes,
1110                               ArrayRef<OpFoldResult> mixedStrides) {
1111     return getCanonicalSliceResultType(op.getType().getRank(),
1112                                        op.getSourceType(), mixedOffsets,
1113                                        mixedSizes, mixedStrides);
1114   }
1115 };
1116 
1117 /// A canonicalizer wrapper to replace ExtractSliceOps.
1118 struct SliceCanonicalizer {
1119   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1120                   ExtractSliceOp newOp) {
1121     Value replacement = newOp.getResult();
1122     if (replacement.getType() != op.getType())
1123       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1124                                                     replacement);
1125     rewriter.replaceOp(op, replacement);
1126   }
1127 };
1128 
1129 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1130                                                  MLIRContext *context) {
1131   results.add<
1132       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1133           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1134       ExtractSliceOpCastFolder>(context);
1135 }
1136 
1137 //
1138 static LogicalResult
1139 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1140                                            ShapedType shapedType) {
1141   OpBuilder b(op.getContext());
1142   for (OpFoldResult ofr : op.getMixedOffsets())
1143     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1144       return failure();
1145   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1146   // is appropriate.
1147   auto shape = shapedType.getShape();
1148   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1149     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1150       return failure();
1151   for (OpFoldResult ofr : op.getMixedStrides())
1152     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1153       return failure();
1154   return success();
1155 }
1156 
1157 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1158 /// we can return the InsertSliceOp's source directly.
1159 // TODO: This only checks the immediate producer; extend to go up the
1160 // insert/extract chain if the slices are disjoint.
1161 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1162   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1163 
1164   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1165   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1166       insertOp.isSameAs(extractOp, isSame))
1167     return insertOp.source();
1168 
1169   return {};
1170 }
1171 
1172 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute>) {
1173   if (getSourceType() == getType() &&
1174       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1175     return this->source();
1176   if (Value slice = foldExtractAfterInsertSlice(*this))
1177     return slice;
1178   return OpFoldResult();
1179 }
1180 
1181 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1182     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1183   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1184   unsigned rank = rankedTensorType.getRank();
1185   auto shape = rankedTensorType.getShape();
1186   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1187   SmallVector<OpFoldResult> sizes;
1188   for (unsigned i = 0, e = rank; i < e; ++i) {
1189     OpFoldResult dim;
1190     if (rankedTensorType.isDynamicDim(i))
1191       dim = b.createOrFold<tensor::DimOp>(
1192           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1193     else
1194       dim = b.getIndexAttr(shape[i]);
1195     sizes.push_back(dim);
1196   }
1197   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1198   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1199                                                 offsets, sizes, strides);
1200 }
1201 
1202 //===----------------------------------------------------------------------===//
1203 // InsertSliceOp
1204 //===----------------------------------------------------------------------===//
1205 
1206 // Build a InsertSliceOp with mixed static and dynamic entries.
1207 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1208                           Value dest, ArrayRef<OpFoldResult> offsets,
1209                           ArrayRef<OpFoldResult> sizes,
1210                           ArrayRef<OpFoldResult> strides,
1211                           ArrayRef<NamedAttribute> attrs) {
1212   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1213   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1214   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1215                              ShapedType::kDynamicStrideOrOffset);
1216   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1217                              ShapedType::kDynamicSize);
1218   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1219                              ShapedType::kDynamicStrideOrOffset);
1220   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1221         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1222         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1223   result.addAttributes(attrs);
1224 }
1225 
1226 // Build a InsertSliceOp with dynamic entries.
1227 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1228                           Value dest, ValueRange offsets, ValueRange sizes,
1229                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1230   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1231       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1232   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1233       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1234   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1235       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1236   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1237 }
1238 
1239 /// Verifier for InsertSliceOp.
1240 static LogicalResult verify(InsertSliceOp op) {
1241   // insert_slice is the inverse of extract_slice, use the same type inference.
1242   auto expectedType = ExtractSliceOp::inferRankReducedResultType(
1243       op.getSourceType().getRank(), op.getType(),
1244       extractFromI64ArrayAttr(op.static_offsets()),
1245       extractFromI64ArrayAttr(op.static_sizes()),
1246       extractFromI64ArrayAttr(op.static_strides()));
1247   auto result =
1248       isRankReducedType(expectedType.cast<ShapedType>(), op.getSourceType());
1249   return produceSliceErrorMsg(result, op, expectedType);
1250 }
1251 
1252 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1253 /// can mutate the second InsertSliceOp's destination to the first one's.
1254 ///
1255 /// Example:
1256 ///
1257 /// ```mlir
1258 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1259 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1260 /// ```
1261 ///
1262 /// folds into:
1263 ///
1264 /// ```mlir
1265 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1266 /// ```
1267 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1268   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1269 
1270   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1271   if (!prevInsertOp ||
1272       prevInsertOp.source().getType() != insertOp.source().getType() ||
1273       !prevInsertOp.isSameAs(insertOp, isSame))
1274     return failure();
1275 
1276   insertOp.destMutable().assign(prevInsertOp.dest());
1277   return success();
1278 }
1279 
1280 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1281   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1282       getSourceType() == getType() &&
1283       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1284     return this->source();
1285   if (succeeded(foldInsertAfterInsertSlice(*this)))
1286     return getResult();
1287   return OpFoldResult();
1288 }
1289 
1290 LogicalResult InsertSliceOp::reifyResultShapes(
1291     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1292   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1293   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1294     reifiedReturnShapes[0][dim] =
1295         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1296   }
1297   return success();
1298 }
1299 
1300 namespace {
1301 /// Pattern to rewrite a insert_slice op with constant arguments.
1302 class InsertSliceOpConstantArgumentFolder final
1303     : public OpRewritePattern<InsertSliceOp> {
1304 public:
1305   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1306 
1307   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1308                                 PatternRewriter &rewriter) const override {
1309     // No constant operand, just return.
1310     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1311           return matchPattern(operand, matchConstantIndex());
1312         }))
1313       return failure();
1314 
1315     // At least one of offsets/sizes/strides is a new constant.
1316     // Form the new list of operands and constant attributes from the
1317     // existing.
1318     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1319     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1320     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1321     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1322     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1323     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1324 
1325     // Create the new op in canonical form.
1326     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1327         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1328         mixedOffsets, mixedSizes, mixedStrides);
1329     Value toInsert = insertSliceOp.source();
1330     if (sourceType != insertSliceOp.getSourceType())
1331       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1332                                                  sourceType, toInsert);
1333     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1334         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1335         mixedStrides);
1336     return success();
1337   }
1338 };
1339 
1340 /// Fold tensor_casts with insert_slice operations. If the source or destination
1341 /// tensor is a tensor_cast that removes static type information, the cast is
1342 /// folded into the insert_slice operation. E.g.:
1343 ///
1344 /// ```mlir
1345 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1346 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1347 /// ```
1348 ///
1349 /// folds into:
1350 ///
1351 /// ```mlir
1352 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1353 /// ```
1354 ///
1355 /// Note: When folding a cast on the destination tensor, the result of the
1356 /// insert_slice operation is casted to ensure that the type of the result did
1357 /// not change.
1358 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1359   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1360 
1361   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1362                                 PatternRewriter &rewriter) const override {
1363     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1364           return matchPattern(operand, matchConstantIndex());
1365         }))
1366       return failure();
1367 
1368     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1369       auto castOp = v.getDefiningOp<tensor::CastOp>();
1370       if (!castOp || !canFoldIntoConsumerOp(castOp))
1371         return llvm::None;
1372       return castOp.source();
1373     };
1374     Optional<Value> sourceCastSource =
1375         getSourceOfCastOp(insertSliceOp.source());
1376     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1377     if (!sourceCastSource && !destCastSource)
1378       return failure();
1379 
1380     Value replacement = rewriter.create<InsertSliceOp>(
1381         insertSliceOp.getLoc(),
1382         (sourceCastSource ? *sourceCastSource : insertSliceOp.source()),
1383         (destCastSource ? *destCastSource : insertSliceOp.dest()),
1384         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1385         insertSliceOp.getMixedStrides());
1386 
1387     if (replacement.getType() != insertSliceOp.getType()) {
1388       replacement = rewriter.create<tensor::CastOp>(
1389           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1390     }
1391     rewriter.replaceOp(insertSliceOp, replacement);
1392     return success();
1393   }
1394 };
1395 
1396 /// If additional static type information can be deduced from a insert_slice's
1397 /// size operands, insert an explicit cast of the op's source operand. This
1398 /// enables other canonicalization patterns that are matching for tensor_cast
1399 /// ops such as `ForOpTensorCastFolder` in SCF.
1400 ///
1401 /// Example:
1402 ///
1403 /// ```mlir
1404 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1405 ///       : tensor<?x?xf32> into ...
1406 /// ```
1407 ///
1408 /// folds into:
1409 ///
1410 /// ```mlir
1411 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1412 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1413 ///       : tensor<64x64xf32> into ...
1414 /// ```
1415 struct InsertSliceOpSourceCastInserter final
1416     : public OpRewritePattern<InsertSliceOp> {
1417   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1418 
1419   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1420                                 PatternRewriter &rewriter) const override {
1421     RankedTensorType srcType = insertSliceOp.getSourceType();
1422     if (srcType.getRank() != insertSliceOp.getType().getRank())
1423       return failure();
1424     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1425                                      srcType.getShape().end());
1426     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1427       if (Optional<int64_t> constInt =
1428               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1429         newSrcShape[i] = *constInt;
1430     }
1431 
1432     RankedTensorType newSrcType =
1433         RankedTensorType::get(newSrcShape, srcType.getElementType());
1434     if (srcType == newSrcType ||
1435         !preservesStaticInformation(srcType, newSrcType) ||
1436         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1437       return failure();
1438 
1439     // newSrcType is:
1440     //   1) Different from srcType.
1441     //   2) "More static" than srcType.
1442     //   3) Cast-compatible with srcType.
1443     // Insert the cast.
1444     Value cast = rewriter.create<tensor::CastOp>(
1445         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1446     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1447         insertSliceOp, cast, insertSliceOp.dest(),
1448         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1449         insertSliceOp.getMixedStrides());
1450     return success();
1451   }
1452 };
1453 } // namespace
1454 
1455 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1456                                                 MLIRContext *context) {
1457   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1458               InsertSliceOpSourceCastInserter>(context);
1459 }
1460 
1461 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1462                                                              Location loc,
1463                                                              Value tensor,
1464                                                              Value dest) {
1465   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1466   unsigned rank = rankedTensorType.getRank();
1467   auto shape = rankedTensorType.getShape();
1468   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1469   SmallVector<OpFoldResult> sizes;
1470   for (unsigned i = 0, e = rank; i < e; ++i) {
1471     OpFoldResult dim;
1472     if (rankedTensorType.isDynamicDim(i))
1473       dim = b.createOrFold<tensor::DimOp>(
1474           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1475     else
1476       dim = b.getIndexAttr(shape[i]);
1477     sizes.push_back(dim);
1478   }
1479   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1480   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1481                                                sizes, strides);
1482 }
1483 
1484 //===----------------------------------------------------------------------===//
1485 // TableGen'd op method definitions
1486 //===----------------------------------------------------------------------===//
1487 
1488 #define GET_OP_CLASSES
1489 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
1490