1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/StandardOps/Utils/Utils.h"
11 #include "mlir/Dialect/Tensor/IR/Tensor.h"
12 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
13 #include "mlir/Dialect/Utils/StaticValueUtils.h"
14 #include "mlir/IR/BlockAndValueMapping.h"
15 #include "mlir/IR/Builders.h"
16 #include "mlir/IR/BuiltinAttributeInterfaces.h"
17 #include "mlir/IR/Matchers.h"
18 #include "mlir/IR/PatternMatch.h"
19 #include "mlir/IR/TypeUtilities.h"
20 #include "llvm/ADT/STLExtras.h"
21 
22 using namespace mlir;
23 using namespace mlir::tensor;
24 
25 /// Materialize a single constant operation from a given attribute value with
26 /// the desired resultant type.
27 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
28                                               Attribute value, Type type,
29                                               Location loc) {
30   if (arith::ConstantOp::isBuildableWith(value, type))
31     return builder.create<arith::ConstantOp>(loc, value, type);
32   if (ConstantOp::isBuildableWith(value, type))
33     return builder.create<ConstantOp>(loc, value, type);
34   return nullptr;
35 }
36 
37 //===----------------------------------------------------------------------===//
38 // CastOp
39 //===----------------------------------------------------------------------===//
40 
41 /// Returns true if `target` is a ranked tensor type that preserves static
42 /// information available in the `source` ranked tensor type.
43 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
44   auto sourceType = source.dyn_cast<RankedTensorType>();
45   auto targetType = target.dyn_cast<RankedTensorType>();
46 
47   // Requires RankedTensorType.
48   if (!sourceType || !targetType)
49     return false;
50 
51   // Requires same elemental type.
52   if (sourceType.getElementType() != targetType.getElementType())
53     return false;
54 
55   // Requires same rank.
56   if (sourceType.getRank() != targetType.getRank())
57     return false;
58 
59   // If cast is towards more static sizes along any dimension, don't fold.
60   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
61     if (!ShapedType::isDynamic(std::get<0>(t)) &&
62         ShapedType::isDynamic(std::get<1>(t)))
63       return false;
64   }
65 
66   return true;
67 }
68 
69 /// Determines whether tensor::CastOp casts to a more dynamic version of the
70 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
71 /// implement canonicalization patterns for ops in different dialects that may
72 /// consume the results of tensor.cast operations. Such foldable tensor.cast
73 /// operations are typically inserted as `slice` ops and are canonicalized,
74 /// to preserve the type compatibility of their uses.
75 ///
76 /// Returns true when all conditions are met:
77 /// 1. source and result are ranked tensors with same element type and rank.
78 /// 2. the tensor type has more static information than the result
79 ///
80 /// Example:
81 /// ```mlir
82 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
83 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
84 /// ```
85 ///
86 /// folds into:
87 ///
88 /// ```mlir
89 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
90 /// ```
91 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
92   if (!castOp)
93     return false;
94 
95   // Can fold if the source of cast has at least as much static information as
96   // its results.
97   return preservesStaticInformation(castOp.getType(),
98                                     castOp.source().getType());
99 }
100 
101 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
102 /// that can be folded.
103 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
104   bool folded = false;
105   for (OpOperand &operand : op->getOpOperands()) {
106     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
107     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
108       operand.set(castOp.getOperand());
109       folded = true;
110     }
111   }
112   return success(folded);
113 }
114 
115 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
116   if (inputs.size() != 1 || outputs.size() != 1)
117     return false;
118   Type a = inputs.front(), b = outputs.front();
119   auto aT = a.dyn_cast<TensorType>();
120   auto bT = b.dyn_cast<TensorType>();
121   if (!aT || !bT)
122     return false;
123 
124   if (aT.getElementType() != bT.getElementType())
125     return false;
126 
127   return succeeded(verifyCompatibleShape(aT, bT));
128 }
129 
130 /// Compute a TensorType that has the joined shape knowledge of the two
131 /// given TensorTypes. The element types need to match.
132 static TensorType joinShapes(TensorType one, TensorType two) {
133   assert(one.getElementType() == two.getElementType());
134 
135   if (!one.hasRank())
136     return two;
137   if (!two.hasRank())
138     return one;
139 
140   int64_t rank = one.getRank();
141   if (rank != two.getRank())
142     return {};
143 
144   SmallVector<int64_t, 4> join;
145   join.reserve(rank);
146   for (int64_t i = 0; i < rank; ++i) {
147     if (one.isDynamicDim(i)) {
148       join.push_back(two.getDimSize(i));
149       continue;
150     }
151     if (two.isDynamicDim(i)) {
152       join.push_back(one.getDimSize(i));
153       continue;
154     }
155     if (one.getDimSize(i) != two.getDimSize(i))
156       return {};
157     join.push_back(one.getDimSize(i));
158   }
159   return RankedTensorType::get(join, one.getElementType());
160 }
161 
162 namespace {
163 
164 /// Replaces chains of two tensor.cast operations by a single tensor.cast
165 /// operation if doing so does not remove runtime constraints.
166 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
167   using OpRewritePattern<CastOp>::OpRewritePattern;
168 
169   LogicalResult matchAndRewrite(CastOp tensorCast,
170                                 PatternRewriter &rewriter) const final {
171     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
172 
173     if (!tensorCastOperand)
174       return failure();
175 
176     auto sourceType =
177         tensorCastOperand.getOperand().getType().cast<TensorType>();
178     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
179     auto resultType = tensorCast.getType().cast<TensorType>();
180 
181     // We can remove the intermediate cast if joining all three produces the
182     // same result as just joining the source and result shapes.
183     auto firstJoin =
184         joinShapes(joinShapes(sourceType, intermediateType), resultType);
185 
186     // The join might not exist if the cast sequence would fail at runtime.
187     if (!firstJoin)
188       return failure();
189 
190     // The newJoin always exists if the above join exists, it might just contain
191     // less information. If so, we cannot drop the intermediate cast, as doing
192     // so would remove runtime checks.
193     auto newJoin = joinShapes(sourceType, resultType);
194     if (firstJoin != newJoin)
195       return failure();
196 
197     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
198                                         tensorCastOperand.getOperand());
199     return success();
200   }
201 };
202 
203 } // namespace
204 
205 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
206                                          MLIRContext *context) {
207   results.add<ChainedTensorCast>(context);
208 }
209 
210 //===----------------------------------------------------------------------===//
211 // DimOp
212 //===----------------------------------------------------------------------===//
213 
214 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
215                   int64_t index) {
216   auto loc = result.location;
217   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
218   build(builder, result, source, indexValue);
219 }
220 
221 Optional<int64_t> DimOp::getConstantIndex() {
222   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
223     return constantOp.getValue().cast<IntegerAttr>().getInt();
224   return {};
225 }
226 
227 static LogicalResult verify(DimOp op) {
228   // Assume unknown index to be in range.
229   Optional<int64_t> index = op.getConstantIndex();
230   if (!index.hasValue())
231     return success();
232 
233   // Check that constant index is not knowingly out of range.
234   auto type = op.source().getType();
235   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
236     if (index.getValue() >= tensorType.getRank())
237       return op.emitOpError("index is out of range");
238   } else if (type.isa<UnrankedTensorType>()) {
239     // Assume index to be in range.
240   } else {
241     llvm_unreachable("expected operand with tensor type");
242   }
243   return success();
244 }
245 
246 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
247   // All forms of folding require a known index.
248   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
249   if (!index)
250     return {};
251 
252   // Folding for unranked types (UnrankedTensorType) is not supported.
253   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
254   if (!tensorType)
255     return {};
256 
257   // Fold if the shape extent along the given index is known.
258   if (!tensorType.isDynamicDim(index.getInt())) {
259     Builder builder(getContext());
260     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
261   }
262 
263   Operation *definingOp = source().getDefiningOp();
264 
265   // Fold dim to the operand of tensor.generate.
266   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
267     auto resultType =
268         fromElements.getResult().getType().cast<RankedTensorType>();
269     // The case where the type encodes the size of the dimension is handled
270     // above.
271     assert(resultType.getShape()[index.getInt()] ==
272            RankedTensorType::kDynamicSize);
273 
274     // Find the operand of the fromElements that corresponds to this index.
275     auto dynExtents = fromElements.dynamicExtents().begin();
276     for (auto dim : resultType.getShape().take_front(index.getInt()))
277       if (dim == RankedTensorType::kDynamicSize)
278         dynExtents++;
279 
280     return Value{*dynExtents};
281   }
282 
283   // The size at the given index is now known to be a dynamic size.
284   unsigned unsignedIndex = index.getValue().getZExtValue();
285 
286   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
287     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
288     // `resolve-shaped-type-result-dims` pass.
289     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
290         sliceOp.isDynamicSize(unsignedIndex)) {
291       return {sliceOp.getDynamicSize(unsignedIndex)};
292     }
293   }
294 
295   // dim(cast) -> dim
296   if (succeeded(foldTensorCast(*this)))
297     return getResult();
298 
299   return {};
300 }
301 
302 namespace {
303 /// Fold dim of a cast into the dim of the source of the tensor cast.
304 struct DimOfCastOp : public OpRewritePattern<DimOp> {
305   using OpRewritePattern<DimOp>::OpRewritePattern;
306 
307   LogicalResult matchAndRewrite(DimOp dimOp,
308                                 PatternRewriter &rewriter) const override {
309     auto castOp = dimOp.source().getDefiningOp<CastOp>();
310     if (!castOp)
311       return failure();
312     Value newSource = castOp.getOperand();
313     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
314     return success();
315   }
316 };
317 } // namespace
318 
319 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
320                                         MLIRContext *context) {
321   results.add<DimOfCastOp>(context);
322 }
323 
324 //===----------------------------------------------------------------------===//
325 // ExtractOp
326 //===----------------------------------------------------------------------===//
327 
328 static LogicalResult verify(ExtractOp op) {
329   // Verify the # indices match if we have a ranked type.
330   if (auto tensorType = op.tensor().getType().dyn_cast<RankedTensorType>())
331     if (tensorType.getRank() != static_cast<int64_t>(op.indices().size()))
332       return op.emitOpError("incorrect number of indices for extract_element");
333 
334   return success();
335 }
336 
337 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
338   // The tensor operand must be a known constant.
339   Attribute tensor = operands.front();
340   if (!tensor)
341     return {};
342   // If this is a splat elements attribute, simply return the value. All of the
343   // elements of a splat attribute are the same.
344   if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
345     return splatTensor.getSplatValue<Attribute>();
346 
347   // Otherwise, collect the constant indices into the tensor.
348   SmallVector<uint64_t, 8> indices;
349   for (Attribute indice : llvm::drop_begin(operands, 1)) {
350     if (!indice || !indice.isa<IntegerAttr>())
351       return {};
352     indices.push_back(indice.cast<IntegerAttr>().getInt());
353   }
354 
355   // If this is an elements attribute, query the value at the given indices.
356   auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
357   if (elementsAttr && elementsAttr.isValidIndex(indices))
358     return elementsAttr.getValues<Attribute>()[indices];
359   return {};
360 }
361 
362 //===----------------------------------------------------------------------===//
363 // FromElementsOp
364 //===----------------------------------------------------------------------===//
365 
366 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
367                            Type elementType, ValueRange elements) {
368   Type resultTy = RankedTensorType::get({static_cast<int64_t>(elements.size())},
369                                         elementType);
370   result.addOperands(elements);
371   result.addTypes(resultTy);
372 }
373 
374 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
375                            ValueRange elements) {
376   assert(!elements.empty() && "expected at least one element");
377   build(builder, result, elements.front().getType(), elements);
378 }
379 
380 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
381   if (!llvm::is_contained(operands, nullptr))
382     return DenseElementsAttr::get(getType(), operands);
383   return {};
384 }
385 
386 namespace {
387 
388 // Canonicalizes the pattern of the form
389 //
390 // %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
391 // %extracted_element = tensor.extract %tensor[%c0] : tensor<1xi32>
392 //
393 // to just %element.
394 struct ExtractElementFromTensorFromElements
395     : public OpRewritePattern<tensor::ExtractOp> {
396   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
397 
398   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
399                                 PatternRewriter &rewriter) const final {
400     if (extract.indices().size() != 1)
401       return failure();
402 
403     auto tensorFromElements = extract.tensor().getDefiningOp<FromElementsOp>();
404     if (tensorFromElements == nullptr)
405       return failure();
406 
407     APInt index;
408     if (!matchPattern(*extract.indices().begin(), m_ConstantInt(&index)))
409       return failure();
410     // Prevent out of bounds accesses. This can happen in invalid code that will
411     // never execute.
412     if (tensorFromElements->getNumOperands() <= index.getZExtValue() ||
413         index.getSExtValue() < 0)
414       return failure();
415     rewriter.replaceOp(extract,
416                        tensorFromElements.getOperand(index.getZExtValue()));
417     return success();
418   }
419 };
420 
421 } // namespace
422 
423 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
424                                                  MLIRContext *context) {
425   results.add<ExtractElementFromTensorFromElements>(context);
426 }
427 
428 //===----------------------------------------------------------------------===//
429 // InsertOp
430 //===----------------------------------------------------------------------===//
431 
432 static LogicalResult verify(InsertOp op) {
433   // Verify the # indices match if we have a ranked type.
434   if (auto destType = op.dest().getType().dyn_cast<RankedTensorType>())
435     if (destType.getRank() != static_cast<int64_t>(op.indices().size()))
436       return op.emitOpError("incorrect number of indices");
437   return success();
438 }
439 
440 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
441   Attribute scalar = operands[0];
442   Attribute dest = operands[1];
443   if (scalar && dest)
444     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
445       if (scalar == splatDest.getSplatValue<Attribute>())
446         return dest;
447   return {};
448 }
449 
450 //===----------------------------------------------------------------------===//
451 // GenerateOp
452 //===----------------------------------------------------------------------===//
453 
454 static LogicalResult verify(GenerateOp op) {
455   // Ensure that the tensor type has as many dynamic dimensions as are specified
456   // by the operands.
457   RankedTensorType resultTy = op.getType().cast<RankedTensorType>();
458   if (op.getNumOperands() != resultTy.getNumDynamicDims())
459     return op.emitError("must have as many index operands as dynamic extents "
460                         "in the result type");
461 
462   // Ensure that region arguments span the index space.
463   if (!llvm::all_of(op.body().getArgumentTypes(),
464                     [](Type ty) { return ty.isIndex(); }))
465     return op.emitError("all body arguments must be index");
466   if (op.body().getNumArguments() != resultTy.getRank())
467     return op.emitError("must have one body argument per input dimension");
468 
469   // Ensure that the region yields an element of the right type.
470   auto yieldOp =
471       llvm::cast<YieldOp>(op.body().getBlocks().front().getTerminator());
472   if (yieldOp.value().getType() != resultTy.getElementType())
473     return op.emitOpError(
474         "body must be terminated with a `yield` operation of the tensor "
475         "element type");
476 
477   return success();
478 }
479 
480 void GenerateOp::build(
481     OpBuilder &b, OperationState &result, Type resultTy,
482     ValueRange dynamicExtents,
483     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
484   build(b, result, resultTy, dynamicExtents);
485 
486   // Build and populate body.
487   OpBuilder::InsertionGuard guard(b);
488   Region *bodyRegion = result.regions.front().get();
489   auto rank = resultTy.cast<RankedTensorType>().getRank();
490   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
491   Block *bodyBlock =
492       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes);
493   bodyBuilder(b, result.location, bodyBlock->getArguments());
494 }
495 
496 namespace {
497 
498 /// Canonicalizes tensor.generate operations with a constant
499 /// operand into the equivalent operation with the operand expressed in the
500 /// result type, instead. We also insert a type cast to make sure that the
501 /// resulting IR is still well-typed.
502 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
503   using OpRewritePattern<GenerateOp>::OpRewritePattern;
504 
505   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
506                                 PatternRewriter &rewriter) const final {
507     auto resultType =
508         tensorFromElements.getResult().getType().cast<RankedTensorType>();
509 
510     if (resultType.hasStaticShape())
511       return failure();
512 
513     SmallVector<Value, 4> newOperands;
514     SmallVector<int64_t, 4> newShape;
515     auto operandsIt = tensorFromElements.dynamicExtents().begin();
516 
517     for (int64_t dim : resultType.getShape()) {
518       if (dim != RankedTensorType::kDynamicSize) {
519         newShape.push_back(dim);
520         continue;
521       }
522       APInt index;
523       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
524         newShape.push_back(RankedTensorType::kDynamicSize);
525         newOperands.push_back(*operandsIt++);
526         continue;
527       }
528       newShape.push_back(index.getSExtValue());
529       operandsIt++;
530     }
531 
532     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
533       return failure();
534 
535     auto loc = tensorFromElements.getLoc();
536     auto newOp = rewriter.create<GenerateOp>(
537         loc, RankedTensorType::get(newShape, resultType.getElementType()),
538         newOperands);
539     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
540                                 newOp.body().begin());
541     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
542                                                 newOp);
543     return success();
544   }
545 };
546 
547 /// Canonicalizes the pattern of the form
548 ///
549 /// %tensor = tensor.generate %x {
550 ///   ^bb0(%arg0: index):  // no predecessors
551 ///   <computation>
552 ///   yield %1 : index
553 /// } : tensor<?xindex>
554 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
555 ///
556 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
557 /// tensor.generate operation has no side-effects.
558 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
559   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
560 
561   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
562                                 PatternRewriter &rewriter) const final {
563     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
564     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
565       return failure();
566 
567     BlockAndValueMapping mapping;
568     Block *body = tensorFromElements.getBody();
569     mapping.map(body->getArguments(), extract.indices());
570     for (auto &op : body->without_terminator())
571       rewriter.clone(op, mapping);
572 
573     auto yield = cast<YieldOp>(body->getTerminator());
574 
575     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
576     return success();
577   }
578 };
579 
580 /// Canonicalizes the pattern of the form
581 ///
582 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
583 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
584 ///
585 /// to
586 ///
587 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
588 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
589   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
590 
591   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
592                                 PatternRewriter &rewriter) const final {
593     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
594     if (!tensorCast)
595       return failure();
596 
597     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
598                                                    extract.indices());
599     return success();
600   }
601 };
602 
603 } // namespace
604 
605 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
606                                              MLIRContext *context) {
607   // TODO: Move extract patterns to tensor::ExtractOp.
608   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
609               StaticTensorGenerate>(context);
610 }
611 
612 //===----------------------------------------------------------------------===//
613 // RankOp
614 //===----------------------------------------------------------------------===//
615 
616 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
617   // Constant fold rank when the rank of the operand is known.
618   auto type = getOperand().getType();
619   auto shapedType = type.dyn_cast<ShapedType>();
620   if (shapedType && shapedType.hasRank())
621     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
622   return IntegerAttr();
623 }
624 
625 //===----------------------------------------------------------------------===//
626 // ReshapeOp
627 //===----------------------------------------------------------------------===//
628 
629 static int64_t GetNumElements(ShapedType type) {
630   int64_t numElements = 1;
631   for (auto dim : type.getShape())
632     numElements *= dim;
633   return numElements;
634 }
635 
636 static LogicalResult verify(ReshapeOp op) {
637   TensorType operandType = op.source().getType().cast<TensorType>();
638   TensorType resultType = op.result().getType().cast<TensorType>();
639 
640   if (operandType.getElementType() != resultType.getElementType())
641     return op.emitOpError("element types of source and destination tensor "
642                           "types should be the same");
643 
644   int64_t shapeSize =
645       op.shape().getType().cast<RankedTensorType>().getDimSize(0);
646   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
647   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
648 
649   if (resultRankedType) {
650     if (operandRankedType && resultRankedType.hasStaticShape() &&
651         operandRankedType.hasStaticShape()) {
652       if (GetNumElements(operandRankedType) != GetNumElements(resultRankedType))
653         return op.emitOpError("source and destination tensor should have the "
654                               "same number of elements");
655     }
656     if (shapeSize == TensorType::kDynamicSize)
657       return op.emitOpError("cannot use shape operand with dynamic length to "
658                             "reshape to statically-ranked tensor type");
659     if (shapeSize != resultRankedType.getRank())
660       return op.emitOpError(
661           "length of shape operand differs from the result's tensor rank");
662   }
663   return success();
664 }
665 
666 //===----------------------------------------------------------------------===//
667 // Reassociative reshape ops
668 //===----------------------------------------------------------------------===//
669 
670 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
671   return getSymbolLessAffineMaps(getReassociationExprs());
672 }
673 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
674   return convertReassociationIndicesToExprs(getContext(),
675                                             getReassociationIndices());
676 }
677 
678 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
679   return getSymbolLessAffineMaps(getReassociationExprs());
680 }
681 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
682   return convertReassociationIndicesToExprs(getContext(),
683                                             getReassociationIndices());
684 }
685 
686 static void print(OpAsmPrinter &p, ExpandShapeOp op) {
687   ::mlir::printReshapeOp<ExpandShapeOp>(p, op);
688 }
689 
690 static void print(OpAsmPrinter &p, CollapseShapeOp op) {
691   ::mlir::printReshapeOp<CollapseShapeOp>(p, op);
692 }
693 
694 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
695 static RankedTensorType
696 computeTensorReshapeCollapsedType(RankedTensorType type,
697                                   ArrayRef<AffineMap> reassociation) {
698   auto shape = type.getShape();
699   SmallVector<int64_t, 4> newShape;
700   newShape.reserve(reassociation.size());
701 
702   // Use the fact that reassociation is valid to simplify the logic: only use
703   // each map's rank.
704   assert(isReassociationValid(reassociation) && "invalid reassociation");
705   unsigned currentDim = 0;
706   for (AffineMap m : reassociation) {
707     unsigned dim = m.getNumResults();
708     auto band = shape.slice(currentDim, dim);
709     int64_t size = 1;
710     if (llvm::is_contained(band, ShapedType::kDynamicSize))
711       size = ShapedType::kDynamicSize;
712     else
713       for (unsigned d = 0; d < dim; ++d)
714         size *= shape[currentDim + d];
715     newShape.push_back(size);
716     currentDim += dim;
717   }
718 
719   return RankedTensorType::get(newShape, type.getElementType());
720 }
721 
722 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
723                             ArrayRef<ReassociationIndices> reassociation,
724                             ArrayRef<NamedAttribute> attrs) {
725   auto resultType = computeTensorReshapeCollapsedType(
726       src.getType().cast<RankedTensorType>(),
727       getSymbolLessAffineMaps(
728           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
729   build(b, result, resultType, src, attrs);
730   result.addAttribute(getReassociationAttrName(),
731                       getReassociationIndicesAttribute(b, reassociation));
732 }
733 
734 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
735                           ArrayRef<ReassociationIndices> reassociation,
736                           ArrayRef<NamedAttribute> attrs) {
737   auto resultType = computeTensorReshapeCollapsedType(
738       src.getType().cast<RankedTensorType>(),
739       getSymbolLessAffineMaps(
740           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
741   build(b, result, resultType, src, attrs);
742   result.addAttribute(getReassociationAttrName(),
743                       getReassociationIndicesAttribute(b, reassociation));
744 }
745 
746 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
747                                         TensorReshapeOp, ExpandShapeOp>::value>
748 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
749                                            RankedTensorType expandedType,
750                                            RankedTensorType collapsedType) {
751   if (failed(
752           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
753     return failure();
754 
755   auto maps = op.getReassociationMaps();
756   RankedTensorType expectedType =
757       computeTensorReshapeCollapsedType(expandedType, maps);
758   if (collapsedType != expectedType)
759     return op.emitOpError("expected collapsed type to be ")
760            << expectedType << ", but got " << collapsedType;
761   return success();
762 }
763 
764 static LogicalResult verify(ExpandShapeOp op) {
765   return verifyTensorReshapeOp(op, op.getResultType(), op.getSrcType());
766 }
767 
768 static LogicalResult verify(CollapseShapeOp op) {
769   return verifyTensorReshapeOp(op, op.getSrcType(), op.getResultType());
770 }
771 
772 namespace {
773 /// Reshape of a splat constant can be replaced with a constant of the result
774 /// type.
775 template <typename TensorReshapeOp>
776 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
777   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
778   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
779                                 PatternRewriter &rewriter) const override {
780     DenseElementsAttr attr;
781     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
782       return failure();
783     if (!attr || !attr.isSplat())
784       return failure();
785     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
786         reshapeOp.getResultType(), attr.getRawData(), true);
787     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
788     return success();
789   }
790 };
791 
792 } // namespace
793 
794 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
795                                                 MLIRContext *context) {
796   results.add<CollapseReshapeOps<ExpandShapeOp>,
797               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>,
798               FoldReshapeWithConstant<ExpandShapeOp>>(context);
799 }
800 
801 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
802                                                   MLIRContext *context) {
803   results.add<CollapseReshapeOps<CollapseShapeOp>,
804               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
805               FoldReshapeWithConstant<CollapseShapeOp>>(context);
806 }
807 
808 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
809   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
810 }
811 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
812   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
813 }
814 
815 //===----------------------------------------------------------------------===//
816 // ExtractSliceOp
817 //===----------------------------------------------------------------------===//
818 
819 /// An extract_slice op result type can be fully inferred from the source type
820 /// and the static representation of offsets, sizes and strides. Special
821 /// sentinels encode the dynamic case.
822 RankedTensorType
823 ExtractSliceOp::inferResultType(RankedTensorType sourceRankedTensorType,
824                                 ArrayRef<int64_t> leadingStaticOffsets,
825                                 ArrayRef<int64_t> leadingStaticSizes,
826                                 ArrayRef<int64_t> leadingStaticStrides) {
827   // An extract_slice op may specify only a leading subset of offset/sizes/
828   // strides in which case we complete with offset=0, sizes from memref type and
829   // strides=1.
830   unsigned rank = sourceRankedTensorType.getRank();
831   assert(leadingStaticSizes.size() <= rank &&
832          "unexpected leadingStaticSizes overflow");
833   auto staticSizes = llvm::to_vector<4>(leadingStaticSizes);
834   unsigned numTrailingSizes = rank - staticSizes.size();
835   llvm::append_range(staticSizes, sourceRankedTensorType.getShape().take_back(
836                                       numTrailingSizes));
837   return RankedTensorType::get(staticSizes,
838                                sourceRankedTensorType.getElementType());
839 }
840 
841 RankedTensorType
842 ExtractSliceOp::inferResultType(RankedTensorType sourceRankedTensorType,
843                                 ArrayRef<OpFoldResult> leadingStaticOffsets,
844                                 ArrayRef<OpFoldResult> leadingStaticSizes,
845                                 ArrayRef<OpFoldResult> leadingStaticStrides) {
846   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
847   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
848   dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets,
849                              staticOffsets, ShapedType::kDynamicStrideOrOffset);
850   dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes,
851                              ShapedType::kDynamicSize);
852   dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides,
853                              staticStrides, ShapedType::kDynamicStrideOrOffset);
854   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
855                                          staticSizes, staticStrides);
856 }
857 
858 /// An extract_slice op result type can be fully inferred from the source type
859 /// and the static representation of offsets, sizes and strides. Special
860 /// sentinels encode the dynamic case.
861 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
862     unsigned resultRank, RankedTensorType sourceRankedTensorType,
863     ArrayRef<int64_t> leadingStaticOffsets,
864     ArrayRef<int64_t> leadingStaticSizes,
865     ArrayRef<int64_t> leadingStaticStrides) {
866   auto inferredType =
867       inferResultType(sourceRankedTensorType, leadingStaticOffsets,
868                       leadingStaticSizes, leadingStaticStrides)
869           .cast<RankedTensorType>();
870   int rankDiff = inferredType.getRank() - resultRank;
871   if (rankDiff > 0) {
872     auto shape = inferredType.getShape();
873     llvm::SmallDenseSet<unsigned> dimsToProject;
874     mlir::getPositionsOfShapeOne(rankDiff, shape, dimsToProject);
875     SmallVector<int64_t> projectedShape;
876     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
877       if (!dimsToProject.contains(pos))
878         projectedShape.push_back(shape[pos]);
879     inferredType =
880         RankedTensorType::get(projectedShape, inferredType.getElementType());
881   }
882   return inferredType;
883 }
884 
885 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
886     unsigned resultRank, RankedTensorType sourceRankedTensorType,
887     ArrayRef<OpFoldResult> leadingStaticOffsets,
888     ArrayRef<OpFoldResult> leadingStaticSizes,
889     ArrayRef<OpFoldResult> leadingStaticStrides) {
890   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
891   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
892   dispatchIndexOpFoldResults(leadingStaticOffsets, dynamicOffsets,
893                              staticOffsets, ShapedType::kDynamicStrideOrOffset);
894   dispatchIndexOpFoldResults(leadingStaticSizes, dynamicSizes, staticSizes,
895                              ShapedType::kDynamicSize);
896   dispatchIndexOpFoldResults(leadingStaticStrides, dynamicStrides,
897                              staticStrides, ShapedType::kDynamicStrideOrOffset);
898   return ExtractSliceOp::inferRankReducedResultType(
899       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
900       staticStrides);
901 }
902 
903 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
904 /// result type. If the type passed is nullptr, it is inferred.
905 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
906                            RankedTensorType resultType, Value source,
907                            ArrayRef<OpFoldResult> offsets,
908                            ArrayRef<OpFoldResult> sizes,
909                            ArrayRef<OpFoldResult> strides,
910                            ArrayRef<NamedAttribute> attrs) {
911   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
912   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
913   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
914 
915                              ShapedType::kDynamicStrideOrOffset);
916   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
917                              ShapedType::kDynamicSize);
918   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
919 
920                              ShapedType::kDynamicStrideOrOffset);
921   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
922   // Structuring implementation this way avoids duplication between builders.
923   if (!resultType) {
924     resultType =
925         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
926                                         staticSizes, staticStrides)
927             .cast<RankedTensorType>();
928   }
929   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
930         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
931         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
932   result.addAttributes(attrs);
933 }
934 
935 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
936 /// result type.
937 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
938                            ArrayRef<OpFoldResult> offsets,
939                            ArrayRef<OpFoldResult> sizes,
940                            ArrayRef<OpFoldResult> strides,
941                            ArrayRef<NamedAttribute> attrs) {
942   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
943 }
944 
945 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
946 /// type passed is nullptr, it is inferred.
947 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
948                            RankedTensorType resultType, Value source,
949                            ValueRange offsets, ValueRange sizes,
950                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
951   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
952       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
953   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
954       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
955   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
956       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
957   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
958 }
959 
960 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
961 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
962                            ValueRange offsets, ValueRange sizes,
963                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
964   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
965 }
966 
967 template <typename OpTy>
968 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
969                                           OpTy op, Type expectedType) {
970   auto memrefType = expectedType.cast<ShapedType>();
971   switch (result) {
972   case SliceVerificationResult::Success:
973     return success();
974   case SliceVerificationResult::RankTooLarge:
975     return op.emitError("expected rank to be smaller or equal to ")
976            << "the other rank. ";
977   case SliceVerificationResult::SizeMismatch:
978     return op.emitError("expected type to be ")
979            << expectedType << " or a rank-reduced version. (size mismatch) ";
980   case SliceVerificationResult::ElemTypeMismatch:
981     return op.emitError("expected element type to be ")
982            << memrefType.getElementType();
983   default:
984     llvm_unreachable("unexpected extract_slice op verification result");
985   }
986 }
987 
988 /// Verifier for ExtractSliceOp.
989 static LogicalResult verify(ExtractSliceOp op) {
990   // Verify result type against inferred type.
991   auto expectedType =
992       ExtractSliceOp::inferResultType(op.getSourceType(), op.getMixedOffsets(),
993                                       op.getMixedSizes(), op.getMixedStrides());
994   auto result =
995       isRankReducedType(expectedType.cast<ShapedType>(), op.getType());
996   return produceSliceErrorMsg(result, op, expectedType);
997 }
998 
999 /// Infer the canonical type of the result of an extract_slice op. Returns a
1000 /// type with rank `resultRank` that is either the rank of the rank-reduced
1001 /// type, or the non-rank-reduced type.
1002 static RankedTensorType
1003 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1004                             ArrayRef<OpFoldResult> mixedOffsets,
1005                             ArrayRef<OpFoldResult> mixedSizes,
1006                             ArrayRef<OpFoldResult> mixedStrides) {
1007   auto resultType =
1008       ExtractSliceOp::inferRankReducedResultType(
1009           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1010           .cast<RankedTensorType>();
1011   if (resultType.getRank() != resultRank) {
1012     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1013                                                  mixedSizes, mixedStrides)
1014                      .cast<RankedTensorType>();
1015   }
1016   return resultType;
1017 }
1018 
1019 llvm::SmallDenseSet<unsigned> ExtractSliceOp::getDroppedDims() {
1020   llvm::SmallDenseSet<unsigned> droppedDims;
1021   ArrayRef<int64_t> resultShape = getType().getShape();
1022   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1023   unsigned shapePos = 0;
1024   for (auto size : enumerate(mixedSizes)) {
1025     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1026     // If the size is not 1, or if the current matched dimension of the result
1027     // is the same static shape as the size value (which is 1), then the
1028     // dimension is preserved.
1029     if (!sizeVal || sizeVal.getValue() != 1 ||
1030         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1031       shapePos++;
1032       continue;
1033     }
1034     droppedDims.insert(size.index());
1035   }
1036   return droppedDims;
1037 }
1038 
1039 LogicalResult ExtractSliceOp::reifyResultShapes(
1040     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1041   reifiedReturnShapes.resize(1);
1042   reifiedReturnShapes[0].reserve(getType().getRank());
1043   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1044   llvm::SmallDenseSet<unsigned> droppedDims = getDroppedDims();
1045   Location loc = getLoc();
1046   for (auto size : enumerate(mixedSizes)) {
1047     if (droppedDims.count(size.index()))
1048       continue;
1049     if (auto attr = size.value().dyn_cast<Attribute>()) {
1050       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1051           loc, attr.cast<IntegerAttr>().getInt()));
1052       continue;
1053     }
1054     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1055   }
1056   return success();
1057 }
1058 
1059 namespace {
1060 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1061 /// This essentially pushes memref_cast past its consuming slice when
1062 /// `canFoldIntoConsumerOp` is true.
1063 ///
1064 /// Example:
1065 /// ```
1066 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1067 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1068 ///   tensor<3x4xf32>
1069 /// ```
1070 /// is rewritten into:
1071 /// ```
1072 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1073 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1074 /// ```
1075 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1076 public:
1077   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1078 
1079   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1080                                 PatternRewriter &rewriter) const override {
1081     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1082     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1083           return matchPattern(operand, matchConstantIndex());
1084         }))
1085       return failure();
1086 
1087     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1088     if (!castOp)
1089       return failure();
1090 
1091     if (!canFoldIntoConsumerOp(castOp))
1092       return failure();
1093 
1094     /// Deduce the type of the result to use for the canonicalized operation.
1095     RankedTensorType resultType = getCanonicalSliceResultType(
1096         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1097         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1098         sliceOp.getMixedStrides());
1099     Value newSlice = rewriter.create<ExtractSliceOp>(
1100         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1101         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1102         sliceOp.static_sizes(), sliceOp.static_strides());
1103     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1104                                                 newSlice);
1105     return success();
1106   }
1107 };
1108 } // namespace
1109 
1110 /// Return the canonical type of the result of an extract_slice op.
1111 struct SliceReturnTypeCanonicalizer {
1112   RankedTensorType operator()(ExtractSliceOp op,
1113                               ArrayRef<OpFoldResult> mixedOffsets,
1114                               ArrayRef<OpFoldResult> mixedSizes,
1115                               ArrayRef<OpFoldResult> mixedStrides) {
1116     return getCanonicalSliceResultType(op.getType().getRank(),
1117                                        op.getSourceType(), mixedOffsets,
1118                                        mixedSizes, mixedStrides);
1119   }
1120 };
1121 
1122 /// A canonicalizer wrapper to replace ExtractSliceOps.
1123 struct SliceCanonicalizer {
1124   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1125                   ExtractSliceOp newOp) {
1126     Value replacement = newOp.getResult();
1127     if (replacement.getType() != op.getType())
1128       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1129                                                     replacement);
1130     rewriter.replaceOp(op, replacement);
1131   }
1132 };
1133 
1134 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1135                                                  MLIRContext *context) {
1136   results.add<
1137       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1138           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1139       ExtractSliceOpCastFolder>(context);
1140 }
1141 
1142 //
1143 static LogicalResult
1144 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1145                                            ShapedType shapedType) {
1146   OpBuilder b(op.getContext());
1147   for (OpFoldResult ofr : op.getMixedOffsets())
1148     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1149       return failure();
1150   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1151   // is appropriate.
1152   auto shape = shapedType.getShape();
1153   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1154     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1155       return failure();
1156   for (OpFoldResult ofr : op.getMixedStrides())
1157     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1158       return failure();
1159   return success();
1160 }
1161 
1162 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1163 /// we can return the InsertSliceOp's source directly.
1164 // TODO: This only checks the immediate producer; extend to go up the
1165 // insert/extract chain if the slices are disjoint.
1166 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1167   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1168 
1169   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1170   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1171       insertOp.isSameAs(extractOp, isSame))
1172     return insertOp.source();
1173 
1174   return {};
1175 }
1176 
1177 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute>) {
1178   if (getSourceType() == getType() &&
1179       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1180     return this->source();
1181   if (Value slice = foldExtractAfterInsertSlice(*this))
1182     return slice;
1183   return OpFoldResult();
1184 }
1185 
1186 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1187     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1188   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1189   unsigned rank = rankedTensorType.getRank();
1190   auto shape = rankedTensorType.getShape();
1191   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1192   SmallVector<OpFoldResult> sizes;
1193   for (unsigned i = 0, e = rank; i < e; ++i) {
1194     OpFoldResult dim;
1195     if (rankedTensorType.isDynamicDim(i))
1196       dim = b.createOrFold<tensor::DimOp>(
1197           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1198     else
1199       dim = b.getIndexAttr(shape[i]);
1200     sizes.push_back(dim);
1201   }
1202   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1203   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1204                                                 offsets, sizes, strides);
1205 }
1206 
1207 //===----------------------------------------------------------------------===//
1208 // InsertSliceOp
1209 //===----------------------------------------------------------------------===//
1210 
1211 // Build a InsertSliceOp with mixed static and dynamic entries.
1212 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1213                           Value dest, ArrayRef<OpFoldResult> offsets,
1214                           ArrayRef<OpFoldResult> sizes,
1215                           ArrayRef<OpFoldResult> strides,
1216                           ArrayRef<NamedAttribute> attrs) {
1217   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1218   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1219   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1220 
1221                              ShapedType::kDynamicStrideOrOffset);
1222   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1223                              ShapedType::kDynamicSize);
1224   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1225 
1226                              ShapedType::kDynamicStrideOrOffset);
1227   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1228         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1229         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1230   result.addAttributes(attrs);
1231 }
1232 
1233 // Build a InsertSliceOp with dynamic entries.
1234 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1235                           Value dest, ValueRange offsets, ValueRange sizes,
1236                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1237   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1238       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1239   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1240       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1241   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1242       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1243   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1244 }
1245 
1246 /// Verifier for InsertSliceOp.
1247 static LogicalResult verify(InsertSliceOp op) {
1248   // insert_slice is the inverse of extract_slice, use the same type inference.
1249   auto expectedType = ExtractSliceOp::inferRankReducedResultType(
1250       op.getSourceType().getRank(), op.getType(),
1251       extractFromI64ArrayAttr(op.static_offsets()),
1252       extractFromI64ArrayAttr(op.static_sizes()),
1253       extractFromI64ArrayAttr(op.static_strides()));
1254   auto result =
1255       isRankReducedType(expectedType.cast<ShapedType>(), op.getSourceType());
1256   return produceSliceErrorMsg(result, op, expectedType);
1257 }
1258 
1259 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1260 /// can mutate the second InsertSliceOp's destination to the first one's.
1261 ///
1262 /// Example:
1263 ///
1264 /// ```mlir
1265 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1266 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1267 /// ```
1268 ///
1269 /// folds into:
1270 ///
1271 /// ```mlir
1272 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1273 /// ```
1274 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1275   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1276 
1277   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1278   if (!prevInsertOp ||
1279       prevInsertOp.source().getType() != insertOp.source().getType() ||
1280       !prevInsertOp.isSameAs(insertOp, isSame))
1281     return failure();
1282 
1283   insertOp.destMutable().assign(prevInsertOp.dest());
1284   return success();
1285 }
1286 
1287 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1288   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1289       getSourceType() == getType() &&
1290       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1291     return this->source();
1292   if (succeeded(foldInsertAfterInsertSlice(*this)))
1293     return getResult();
1294   return OpFoldResult();
1295 }
1296 
1297 LogicalResult InsertSliceOp::reifyResultShapes(
1298     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1299   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1300   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1301     reifiedReturnShapes[0][dim] =
1302         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1303   }
1304   return success();
1305 }
1306 
1307 namespace {
1308 /// Pattern to rewrite a insert_slice op with constant arguments.
1309 class InsertSliceOpConstantArgumentFolder final
1310     : public OpRewritePattern<InsertSliceOp> {
1311 public:
1312   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1313 
1314   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1315                                 PatternRewriter &rewriter) const override {
1316     // No constant operand, just return.
1317     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1318           return matchPattern(operand, matchConstantIndex());
1319         }))
1320       return failure();
1321 
1322     // At least one of offsets/sizes/strides is a new constant.
1323     // Form the new list of operands and constant attributes from the
1324     // existing.
1325     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1326     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1327     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1328     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1329     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1330     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1331 
1332     // Create the new op in canonical form.
1333     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1334         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1335         mixedOffsets, mixedSizes, mixedStrides);
1336     Value toInsert = insertSliceOp.source();
1337     if (sourceType != insertSliceOp.getSourceType())
1338       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1339                                                  sourceType, toInsert);
1340     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1341         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1342         mixedStrides);
1343     return success();
1344   }
1345 };
1346 
1347 /// Fold tensor_casts with insert_slice operations. If the source or destination
1348 /// tensor is a tensor_cast that removes static type information, the cast is
1349 /// folded into the insert_slice operation. E.g.:
1350 ///
1351 /// ```mlir
1352 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1353 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1354 /// ```
1355 ///
1356 /// folds into:
1357 ///
1358 /// ```mlir
1359 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1360 /// ```
1361 ///
1362 /// Note: When folding a cast on the destination tensor, the result of the
1363 /// insert_slice operation is casted to ensure that the type of the result did
1364 /// not change.
1365 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1366   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1367 
1368   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1369                                 PatternRewriter &rewriter) const override {
1370     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1371           return matchPattern(operand, matchConstantIndex());
1372         }))
1373       return failure();
1374 
1375     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1376       auto castOp = v.getDefiningOp<tensor::CastOp>();
1377       if (!castOp || !canFoldIntoConsumerOp(castOp))
1378         return llvm::None;
1379       return castOp.source();
1380     };
1381     Optional<Value> sourceCastSource =
1382         getSourceOfCastOp(insertSliceOp.source());
1383     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1384     if (!sourceCastSource && !destCastSource)
1385       return failure();
1386 
1387     Value replacement = rewriter.create<InsertSliceOp>(
1388         insertSliceOp.getLoc(),
1389         (sourceCastSource ? *sourceCastSource : insertSliceOp.source()),
1390         (destCastSource ? *destCastSource : insertSliceOp.dest()),
1391         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1392         insertSliceOp.getMixedStrides());
1393 
1394     if (replacement.getType() != insertSliceOp.getType()) {
1395       replacement = rewriter.create<tensor::CastOp>(
1396           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1397     }
1398     rewriter.replaceOp(insertSliceOp, replacement);
1399     return success();
1400   }
1401 };
1402 
1403 /// If additional static type information can be deduced from a insert_slice's
1404 /// size operands, insert an explicit cast of the op's source operand. This
1405 /// enables other canonicalization patterns that are matching for tensor_cast
1406 /// ops such as `ForOpTensorCastFolder` in SCF.
1407 ///
1408 /// Example:
1409 ///
1410 /// ```mlir
1411 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1412 ///       : tensor<?x?xf32> into ...
1413 /// ```
1414 ///
1415 /// folds into:
1416 ///
1417 /// ```mlir
1418 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1419 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1420 ///       : tensor<64x64xf32> into ...
1421 /// ```
1422 struct InsertSliceOpSourceCastInserter final
1423     : public OpRewritePattern<InsertSliceOp> {
1424   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1425 
1426   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1427                                 PatternRewriter &rewriter) const override {
1428     RankedTensorType srcType = insertSliceOp.getSourceType();
1429     if (srcType.getRank() != insertSliceOp.getType().getRank())
1430       return failure();
1431     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1432                                      srcType.getShape().end());
1433     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1434       if (Optional<int64_t> constInt =
1435               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1436         newSrcShape[i] = *constInt;
1437     }
1438 
1439     RankedTensorType newSrcType =
1440         RankedTensorType::get(newSrcShape, srcType.getElementType());
1441     if (srcType == newSrcType ||
1442         !preservesStaticInformation(srcType, newSrcType) ||
1443         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1444       return failure();
1445 
1446     // newSrcType is:
1447     //   1) Different from srcType.
1448     //   2) "More static" than srcType.
1449     //   3) Cast-compatible with srcType.
1450     // Insert the cast.
1451     Value cast = rewriter.create<tensor::CastOp>(
1452         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1453     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1454         insertSliceOp, cast, insertSliceOp.dest(),
1455         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1456         insertSliceOp.getMixedStrides());
1457     return success();
1458   }
1459 };
1460 } // namespace
1461 
1462 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1463                                                 MLIRContext *context) {
1464   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1465               InsertSliceOpSourceCastInserter>(context);
1466 }
1467 
1468 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1469                                                              Location loc,
1470                                                              Value tensor,
1471                                                              Value dest) {
1472   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1473   unsigned rank = rankedTensorType.getRank();
1474   auto shape = rankedTensorType.getShape();
1475   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1476   SmallVector<OpFoldResult> sizes;
1477   for (unsigned i = 0, e = rank; i < e; ++i) {
1478     OpFoldResult dim;
1479     if (rankedTensorType.isDynamicDim(i))
1480       dim = b.createOrFold<tensor::DimOp>(
1481           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1482     else
1483       dim = b.getIndexAttr(shape[i]);
1484     sizes.push_back(dim);
1485   }
1486   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1487   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1488                                                sizes, strides);
1489 }
1490 
1491 //===----------------------------------------------------------------------===//
1492 // TableGen'd op method definitions
1493 //===----------------------------------------------------------------------===//
1494 
1495 #define GET_OP_CLASSES
1496 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
1497