1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/Utils/Utils.h"
10 #include "mlir/Dialect/Tensor/IR/Tensor.h"
11 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
12 #include "mlir/Dialect/Utils/StaticValueUtils.h"
13 #include "mlir/IR/BlockAndValueMapping.h"
14 #include "mlir/IR/Builders.h"
15 #include "mlir/IR/BuiltinAttributeInterfaces.h"
16 #include "mlir/IR/Matchers.h"
17 #include "mlir/IR/TypeUtilities.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallBitVector.h"
20 
21 using namespace mlir;
22 using namespace mlir::tensor;
23 
24 /// Materialize a single constant operation from a given attribute value with
25 /// the desired resultant type.
26 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
27                                               Attribute value, Type type,
28                                               Location loc) {
29   if (arith::ConstantOp::isBuildableWith(value, type))
30     return builder.create<arith::ConstantOp>(loc, value, type);
31   if (complex::ConstantOp::isBuildableWith(value, type))
32     return builder.create<complex::ConstantOp>(loc, type,
33                                                value.cast<ArrayAttr>());
34   return nullptr;
35 }
36 
37 //===----------------------------------------------------------------------===//
38 // CastOp
39 //===----------------------------------------------------------------------===//
40 
41 /// Returns true if `target` is a ranked tensor type that preserves static
42 /// information available in the `source` ranked tensor type.
43 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
44   auto sourceType = source.dyn_cast<RankedTensorType>();
45   auto targetType = target.dyn_cast<RankedTensorType>();
46 
47   // Requires RankedTensorType.
48   if (!sourceType || !targetType)
49     return false;
50 
51   // Requires same elemental type.
52   if (sourceType.getElementType() != targetType.getElementType())
53     return false;
54 
55   // Requires same rank.
56   if (sourceType.getRank() != targetType.getRank())
57     return false;
58 
59   // If cast is towards more static sizes along any dimension, don't fold.
60   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
61     if (!ShapedType::isDynamic(std::get<0>(t)) &&
62         ShapedType::isDynamic(std::get<1>(t)))
63       return false;
64   }
65 
66   return true;
67 }
68 
69 /// Determines whether tensor::CastOp casts to a more dynamic version of the
70 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
71 /// implement canonicalization patterns for ops in different dialects that may
72 /// consume the results of tensor.cast operations. Such foldable tensor.cast
73 /// operations are typically inserted as `slice` ops and are canonicalized,
74 /// to preserve the type compatibility of their uses.
75 ///
76 /// Returns true when all conditions are met:
77 /// 1. source and result are ranked tensors with same element type and rank.
78 /// 2. the tensor type has more static information than the result
79 ///
80 /// Example:
81 /// ```mlir
82 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
83 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
84 /// ```
85 ///
86 /// folds into:
87 ///
88 /// ```mlir
89 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
90 /// ```
91 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
92   if (!castOp)
93     return false;
94 
95   // Can fold if the source of cast has at least as much static information as
96   // its results.
97   return preservesStaticInformation(castOp.getType(),
98                                     castOp.source().getType());
99 }
100 
101 /// Determines whether the tensor::CastOp casts to a more static version of the
102 /// source tensor. This is useful to fold into a producing op and implement
103 /// canonicaliation patterns with the `tensor.cast` op as the root, but producer
104 /// being from different dialects. Returns true when all conditions are met:
105 /// 1. source and result and ranked tensors with same element type and rank.
106 /// 2. the result type has more static information than the source.
107 ///
108 /// Example:
109 /// ```mlir
110 ///   %1 = producer ... : tensor<?x?xf32>
111 ///   %2 = tensor.cast %1 : tensor<?x?xf32> to tensor<8x16xf32>
112 /// ```
113 ///
114 /// can be canonicalized to :
115 ///
116 /// ```mlir
117 ///   %2 = producer ... : tensor<8x16xf32>
118 /// ```
119 /// Not all ops might be canonicalizable this way, but for those that can be,
120 /// this method provides a check that it is worth doing the canonicalization.
121 bool mlir::tensor::canFoldIntoProducerOp(CastOp castOp) {
122   if (!castOp)
123     return false;
124   return preservesStaticInformation(castOp.source().getType(),
125                                     castOp.getType());
126 }
127 
128 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
129 /// that can be folded.
130 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
131   bool folded = false;
132   for (OpOperand &operand : op->getOpOperands()) {
133     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
134     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
135       operand.set(castOp.getOperand());
136       folded = true;
137     }
138   }
139   return success(folded);
140 }
141 
142 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
143   if (inputs.size() != 1 || outputs.size() != 1)
144     return false;
145   Type a = inputs.front(), b = outputs.front();
146   auto aT = a.dyn_cast<TensorType>();
147   auto bT = b.dyn_cast<TensorType>();
148   if (!aT || !bT)
149     return false;
150 
151   if (aT.getElementType() != bT.getElementType())
152     return false;
153 
154   return succeeded(verifyCompatibleShape(aT, bT));
155 }
156 
157 /// Compute a TensorType that has the joined shape knowledge of the two
158 /// given TensorTypes. The element types need to match.
159 static TensorType joinShapes(TensorType one, TensorType two) {
160   assert(one.getElementType() == two.getElementType());
161 
162   if (!one.hasRank())
163     return two;
164   if (!two.hasRank())
165     return one;
166 
167   int64_t rank = one.getRank();
168   if (rank != two.getRank())
169     return {};
170 
171   SmallVector<int64_t, 4> join;
172   join.reserve(rank);
173   for (int64_t i = 0; i < rank; ++i) {
174     if (one.isDynamicDim(i)) {
175       join.push_back(two.getDimSize(i));
176       continue;
177     }
178     if (two.isDynamicDim(i)) {
179       join.push_back(one.getDimSize(i));
180       continue;
181     }
182     if (one.getDimSize(i) != two.getDimSize(i))
183       return {};
184     join.push_back(one.getDimSize(i));
185   }
186   return RankedTensorType::get(join, one.getElementType());
187 }
188 
189 namespace {
190 
191 /// Replaces chains of two tensor.cast operations by a single tensor.cast
192 /// operation if doing so does not remove runtime constraints.
193 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
194   using OpRewritePattern<CastOp>::OpRewritePattern;
195 
196   LogicalResult matchAndRewrite(CastOp tensorCast,
197                                 PatternRewriter &rewriter) const final {
198     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
199 
200     if (!tensorCastOperand)
201       return failure();
202 
203     auto sourceType =
204         tensorCastOperand.getOperand().getType().cast<TensorType>();
205     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
206     auto resultType = tensorCast.getType().cast<TensorType>();
207 
208     // We can remove the intermediate cast if joining all three produces the
209     // same result as just joining the source and result shapes.
210     auto firstJoin =
211         joinShapes(joinShapes(sourceType, intermediateType), resultType);
212 
213     // The join might not exist if the cast sequence would fail at runtime.
214     if (!firstJoin)
215       return failure();
216 
217     // The newJoin always exists if the above join exists, it might just contain
218     // less information. If so, we cannot drop the intermediate cast, as doing
219     // so would remove runtime checks.
220     auto newJoin = joinShapes(sourceType, resultType);
221     if (firstJoin != newJoin)
222       return failure();
223 
224     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
225                                         tensorCastOperand.getOperand());
226     return success();
227   }
228 };
229 
230 } // namespace
231 
232 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
233                                          MLIRContext *context) {
234   results.add<ChainedTensorCast>(context);
235 }
236 
237 //===----------------------------------------------------------------------===//
238 // DimOp
239 //===----------------------------------------------------------------------===//
240 
241 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
242                   int64_t index) {
243   auto loc = result.location;
244   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
245   build(builder, result, source, indexValue);
246 }
247 
248 Optional<int64_t> DimOp::getConstantIndex() {
249   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
250     return constantOp.getValue().cast<IntegerAttr>().getInt();
251   return {};
252 }
253 
254 LogicalResult DimOp::verify() {
255   // Assume unknown index to be in range.
256   Optional<int64_t> index = getConstantIndex();
257   if (!index.hasValue())
258     return success();
259 
260   // Check that constant index is not knowingly out of range.
261   auto type = source().getType();
262   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
263     if (index.getValue() >= tensorType.getRank())
264       return emitOpError("index is out of range");
265   } else if (type.isa<UnrankedTensorType>()) {
266     // Assume index to be in range.
267   } else {
268     llvm_unreachable("expected operand with tensor type");
269   }
270   return success();
271 }
272 
273 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
274   // All forms of folding require a known index.
275   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
276   if (!index)
277     return {};
278 
279   // Folding for unranked types (UnrankedTensorType) is not supported.
280   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
281   if (!tensorType)
282     return {};
283 
284   // Fold if the shape extent along the given index is known.
285   if (!tensorType.isDynamicDim(index.getInt())) {
286     Builder builder(getContext());
287     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
288   }
289 
290   Operation *definingOp = source().getDefiningOp();
291 
292   // Fold dim to the operand of tensor.generate.
293   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
294     auto resultType =
295         fromElements.getResult().getType().cast<RankedTensorType>();
296     // The case where the type encodes the size of the dimension is handled
297     // above.
298     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
299 
300     // Find the operand of the fromElements that corresponds to this index.
301     auto dynExtents = fromElements.dynamicExtents().begin();
302     for (auto dim : resultType.getShape().take_front(index.getInt()))
303       if (ShapedType::isDynamic(dim))
304         dynExtents++;
305 
306     return Value{*dynExtents};
307   }
308 
309   // The size at the given index is now known to be a dynamic size.
310   unsigned unsignedIndex = index.getValue().getZExtValue();
311 
312   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
313     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
314     // `resolve-shaped-type-result-dims` pass.
315     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
316         sliceOp.isDynamicSize(unsignedIndex)) {
317       return {sliceOp.getDynamicSize(unsignedIndex)};
318     }
319   }
320 
321   // dim(cast) -> dim
322   if (succeeded(foldTensorCast(*this)))
323     return getResult();
324 
325   return {};
326 }
327 
328 namespace {
329 /// Fold dim of a cast into the dim of the source of the tensor cast.
330 struct DimOfCastOp : public OpRewritePattern<DimOp> {
331   using OpRewritePattern<DimOp>::OpRewritePattern;
332 
333   LogicalResult matchAndRewrite(DimOp dimOp,
334                                 PatternRewriter &rewriter) const override {
335     auto castOp = dimOp.source().getDefiningOp<CastOp>();
336     if (!castOp)
337       return failure();
338     Value newSource = castOp.getOperand();
339     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
340     return success();
341   }
342 };
343 } // namespace
344 
345 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
346                                         MLIRContext *context) {
347   results.add<DimOfCastOp>(context);
348 }
349 
350 //===----------------------------------------------------------------------===//
351 // ExtractOp
352 //===----------------------------------------------------------------------===//
353 
354 LogicalResult ExtractOp::verify() {
355   // Verify the # indices match if we have a ranked type.
356   if (auto tensorType = tensor().getType().dyn_cast<RankedTensorType>())
357     if (tensorType.getRank() != static_cast<int64_t>(indices().size()))
358       return emitOpError("incorrect number of indices for extract_element");
359 
360   return success();
361 }
362 
363 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
364   // The tensor operand must be a known constant.
365   Attribute tensor = operands.front();
366   if (!tensor)
367     return {};
368   // If this is a splat elements attribute, simply return the value. All of the
369   // elements of a splat attribute are the same.
370   if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
371     return splatTensor.getSplatValue<Attribute>();
372 
373   // Otherwise, collect the constant indices into the tensor.
374   SmallVector<uint64_t, 8> indices;
375   for (Attribute indice : llvm::drop_begin(operands, 1)) {
376     if (!indice || !indice.isa<IntegerAttr>())
377       return {};
378     indices.push_back(indice.cast<IntegerAttr>().getInt());
379   }
380 
381   // If this is an elements attribute, query the value at the given indices.
382   auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
383   if (elementsAttr && elementsAttr.isValidIndex(indices))
384     return elementsAttr.getValues<Attribute>()[indices];
385   return {};
386 }
387 
388 //===----------------------------------------------------------------------===//
389 // FromElementsOp
390 //===----------------------------------------------------------------------===//
391 
392 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
393                            Type resultType, ValueRange elements) {
394   result.addOperands(elements);
395   result.addTypes(resultType);
396 }
397 
398 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
399                            ValueRange elements) {
400   assert(!elements.empty() && "expected at least one element");
401   Type resultType = RankedTensorType::get(
402       {static_cast<int64_t>(elements.size())}, elements.front().getType());
403   build(builder, result, resultType, elements);
404 }
405 
406 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
407   if (!llvm::is_contained(operands, nullptr))
408     return DenseElementsAttr::get(getType(), operands);
409   return {};
410 }
411 
412 namespace {
413 
414 // Canonicalizes the pattern of the form
415 //
416 // %tensor = tensor.from_elements(%element) : (i32) -> tensor<1xi32>
417 // %extracted_element = tensor.extract %tensor[%c0] : tensor<1xi32>
418 //
419 // to just %element.
420 struct ExtractElementFromTensorFromElements
421     : public OpRewritePattern<tensor::ExtractOp> {
422   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
423 
424   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
425                                 PatternRewriter &rewriter) const final {
426     auto tensorFromElements = extract.tensor().getDefiningOp<FromElementsOp>();
427     if (!tensorFromElements)
428       return failure();
429     auto tensorType = tensorFromElements.getType().cast<RankedTensorType>();
430     auto rank = tensorType.getRank();
431     if (rank == 0) {
432       rewriter.replaceOp(extract, tensorFromElements.getOperand(0));
433       return success();
434     }
435     SmallVector<APInt, 3> indices(rank);
436     int64_t flatIndex = 0;
437     int64_t stride = 1;
438     for (int i = rank - 1; i >= 0; --i) {
439       APInt index;
440       if (!matchPattern(extract.indices()[i], m_ConstantInt(&index)))
441         return failure();
442       if (i < rank - 1)
443         stride *= tensorType.getDimSize(i);
444       flatIndex += index.getSExtValue() * stride;
445     }
446     // Prevent out of bounds accesses. This can happen in invalid code that will
447     // never execute.
448     if (tensorFromElements->getNumOperands() <= flatIndex || flatIndex < 0)
449       return failure();
450     rewriter.replaceOp(extract, tensorFromElements.getOperand(flatIndex));
451     return success();
452   }
453 };
454 
455 // Pushes the index_casts that occur before extractions to after the extract.
456 // This minimizes type conversion in some cases and enables the extract
457 // canonicalizer. This changes:
458 //
459 // %cast = arith.index_cast %tensor : tensor<1xi32> to tensor<1xindex>
460 // %extract = tensor.extract %cast[%index] : tensor<1xindex>
461 //
462 // to the following:
463 //
464 // %extract = tensor.extract %tensor[%index] : tensor<1xindex>
465 // %cast = arith.index_cast %extract : i32 to index
466 //
467 // to just %element.
468 //
469 // Consider expanding this to a template and handle all tensor cast operations.
470 struct ExtractElementFromIndexCast
471     : public OpRewritePattern<tensor::ExtractOp> {
472   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
473 
474   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
475                                 PatternRewriter &rewriter) const final {
476     Location loc = extract.getLoc();
477     auto indexCast = extract.tensor().getDefiningOp<arith::IndexCastOp>();
478     if (!indexCast)
479       return failure();
480 
481     Type elementTy = getElementTypeOrSelf(indexCast.getIn());
482 
483     auto newExtract = rewriter.create<tensor::ExtractOp>(
484         loc, elementTy, indexCast.getIn(), extract.indices());
485 
486     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(extract, extract.getType(),
487                                                     newExtract);
488 
489     return success();
490   }
491 };
492 
493 } // namespace
494 
495 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
496                                                  MLIRContext *context) {
497   results
498       .add<ExtractElementFromIndexCast, ExtractElementFromTensorFromElements>(
499           context);
500 }
501 
502 //===----------------------------------------------------------------------===//
503 // InsertOp
504 //===----------------------------------------------------------------------===//
505 
506 LogicalResult InsertOp::verify() {
507   // Verify the # indices match if we have a ranked type.
508   if (auto destType = dest().getType().dyn_cast<RankedTensorType>())
509     if (destType.getRank() != static_cast<int64_t>(indices().size()))
510       return emitOpError("incorrect number of indices");
511   return success();
512 }
513 
514 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
515   Attribute scalar = operands[0];
516   Attribute dest = operands[1];
517   if (scalar && dest)
518     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
519       if (scalar == splatDest.getSplatValue<Attribute>())
520         return dest;
521   return {};
522 }
523 
524 //===----------------------------------------------------------------------===//
525 // GenerateOp
526 //===----------------------------------------------------------------------===//
527 
528 LogicalResult GenerateOp::reifyResultShapes(
529     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
530   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
531   int idx = 0;
532   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
533     if (getType().isDynamicDim(dim)) {
534       reifiedReturnShapes[0][dim] = getOperand(idx++);
535     } else {
536       reifiedReturnShapes[0][dim] = builder.create<arith::ConstantIndexOp>(
537           getLoc(), getType().getDimSize(dim));
538     }
539   }
540   return success();
541 }
542 
543 LogicalResult GenerateOp::verify() {
544   // Ensure that the tensor type has as many dynamic dimensions as are specified
545   // by the operands.
546   RankedTensorType resultTy = getType().cast<RankedTensorType>();
547   if (getNumOperands() != resultTy.getNumDynamicDims())
548     return emitError("must have as many index operands as dynamic extents "
549                      "in the result type");
550 
551   return success();
552 }
553 
554 LogicalResult GenerateOp::verifyRegions() {
555   RankedTensorType resultTy = getType().cast<RankedTensorType>();
556   // Ensure that region arguments span the index space.
557   if (!llvm::all_of(body().getArgumentTypes(),
558                     [](Type ty) { return ty.isIndex(); }))
559     return emitError("all body arguments must be index");
560   if (body().getNumArguments() != resultTy.getRank())
561     return emitError("must have one body argument per input dimension");
562 
563   // Ensure that the region yields an element of the right type.
564   auto yieldOp = cast<YieldOp>(body().getBlocks().front().getTerminator());
565 
566   if (yieldOp.value().getType() != resultTy.getElementType())
567     return emitOpError(
568         "body must be terminated with a `yield` operation of the tensor "
569         "element type");
570 
571   return success();
572 }
573 
574 void GenerateOp::build(
575     OpBuilder &b, OperationState &result, Type resultTy,
576     ValueRange dynamicExtents,
577     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
578   build(b, result, resultTy, dynamicExtents);
579 
580   // Build and populate body.
581   OpBuilder::InsertionGuard guard(b);
582   Region *bodyRegion = result.regions.front().get();
583   auto rank = resultTy.cast<RankedTensorType>().getRank();
584   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
585   SmallVector<Location, 2> argumentLocs(rank, result.location);
586   Block *bodyBlock =
587       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes, argumentLocs);
588   bodyBuilder(b, result.location, bodyBlock->getArguments());
589 }
590 
591 namespace {
592 
593 /// Canonicalizes tensor.generate operations with a constant
594 /// operand into the equivalent operation with the operand expressed in the
595 /// result type, instead. We also insert a type cast to make sure that the
596 /// resulting IR is still well-typed.
597 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
598   using OpRewritePattern<GenerateOp>::OpRewritePattern;
599 
600   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
601                                 PatternRewriter &rewriter) const final {
602     auto resultType =
603         tensorFromElements.getResult().getType().cast<RankedTensorType>();
604 
605     if (resultType.hasStaticShape())
606       return failure();
607 
608     SmallVector<Value, 4> newOperands;
609     SmallVector<int64_t, 4> newShape;
610     auto operandsIt = tensorFromElements.dynamicExtents().begin();
611 
612     for (int64_t dim : resultType.getShape()) {
613       if (!ShapedType::isDynamic(dim)) {
614         newShape.push_back(dim);
615         continue;
616       }
617       APInt index;
618       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
619         newShape.push_back(ShapedType::kDynamicSize);
620         newOperands.push_back(*operandsIt++);
621         continue;
622       }
623       newShape.push_back(index.getSExtValue());
624       operandsIt++;
625     }
626 
627     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
628       return failure();
629 
630     auto loc = tensorFromElements.getLoc();
631     auto newOp = rewriter.create<GenerateOp>(
632         loc, RankedTensorType::get(newShape, resultType.getElementType()),
633         newOperands);
634     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
635                                 newOp.body().begin());
636     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
637                                                 newOp);
638     return success();
639   }
640 };
641 
642 /// Canonicalizes the pattern of the form
643 ///
644 /// %tensor = tensor.generate %x {
645 ///   ^bb0(%arg0: index):
646 ///   <computation>
647 ///   yield %1 : index
648 /// } : tensor<?xindex>
649 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
650 ///
651 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
652 /// tensor.generate operation has no side-effects.
653 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
654   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
655 
656   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
657                                 PatternRewriter &rewriter) const final {
658     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
659     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
660       return failure();
661 
662     BlockAndValueMapping mapping;
663     Block *body = tensorFromElements.getBody();
664     mapping.map(body->getArguments(), extract.indices());
665     for (auto &op : body->without_terminator())
666       rewriter.clone(op, mapping);
667 
668     auto yield = cast<YieldOp>(body->getTerminator());
669 
670     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
671     return success();
672   }
673 };
674 
675 /// Canonicalizes the pattern of the form
676 ///
677 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
678 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
679 ///
680 /// to
681 ///
682 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
683 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
684   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
685 
686   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
687                                 PatternRewriter &rewriter) const final {
688     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
689     if (!tensorCast)
690       return failure();
691 
692     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
693                                                    extract.indices());
694     return success();
695   }
696 };
697 
698 } // namespace
699 
700 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
701                                              MLIRContext *context) {
702   // TODO: Move extract patterns to tensor::ExtractOp.
703   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
704               StaticTensorGenerate>(context);
705 }
706 
707 //===----------------------------------------------------------------------===//
708 // RankOp
709 //===----------------------------------------------------------------------===//
710 
711 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
712   // Constant fold rank when the rank of the operand is known.
713   auto type = getOperand().getType();
714   auto shapedType = type.dyn_cast<ShapedType>();
715   if (shapedType && shapedType.hasRank())
716     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
717   return IntegerAttr();
718 }
719 
720 //===----------------------------------------------------------------------===//
721 // ReshapeOp
722 //===----------------------------------------------------------------------===//
723 
724 static int64_t getNumElements(ShapedType type) {
725   int64_t numElements = 1;
726   for (auto dim : type.getShape())
727     numElements *= dim;
728   return numElements;
729 }
730 
731 LogicalResult ReshapeOp::verify() {
732   TensorType operandType = source().getType().cast<TensorType>();
733   TensorType resultType = result().getType().cast<TensorType>();
734 
735   if (operandType.getElementType() != resultType.getElementType())
736     return emitOpError("element types of source and destination tensor "
737                        "types should be the same");
738 
739   int64_t shapeSize = shape().getType().cast<RankedTensorType>().getDimSize(0);
740   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
741   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
742 
743   if (resultRankedType) {
744     if (operandRankedType && resultRankedType.hasStaticShape() &&
745         operandRankedType.hasStaticShape()) {
746       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
747         return emitOpError("source and destination tensor should have the "
748                            "same number of elements");
749     }
750     if (ShapedType::isDynamic(shapeSize))
751       return emitOpError("cannot use shape operand with dynamic length to "
752                          "reshape to statically-ranked tensor type");
753     if (shapeSize != resultRankedType.getRank())
754       return emitOpError(
755           "length of shape operand differs from the result's tensor rank");
756   }
757   return success();
758 }
759 
760 //===----------------------------------------------------------------------===//
761 // Reassociative reshape ops
762 //===----------------------------------------------------------------------===//
763 
764 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
765   return getSymbolLessAffineMaps(getReassociationExprs());
766 }
767 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
768   return convertReassociationIndicesToExprs(getContext(),
769                                             getReassociationIndices());
770 }
771 
772 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
773   return getSymbolLessAffineMaps(getReassociationExprs());
774 }
775 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
776   return convertReassociationIndicesToExprs(getContext(),
777                                             getReassociationIndices());
778 }
779 
780 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
781 static RankedTensorType
782 computeTensorReshapeCollapsedType(RankedTensorType type,
783                                   ArrayRef<AffineMap> reassociation) {
784   auto shape = type.getShape();
785   SmallVector<int64_t, 4> newShape;
786   newShape.reserve(reassociation.size());
787 
788   // Use the fact that reassociation is valid to simplify the logic: only use
789   // each map's rank.
790   assert(isReassociationValid(reassociation) && "invalid reassociation");
791   unsigned currentDim = 0;
792   for (AffineMap m : reassociation) {
793     unsigned dim = m.getNumResults();
794     auto band = shape.slice(currentDim, dim);
795     int64_t size = 1;
796     if (llvm::is_contained(band, ShapedType::kDynamicSize))
797       size = ShapedType::kDynamicSize;
798     else
799       for (unsigned d = 0; d < dim; ++d)
800         size *= shape[currentDim + d];
801     newShape.push_back(size);
802     currentDim += dim;
803   }
804 
805   return RankedTensorType::get(newShape, type.getElementType());
806 }
807 
808 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
809                             ArrayRef<ReassociationIndices> reassociation,
810                             ArrayRef<NamedAttribute> attrs) {
811   auto resultType = computeTensorReshapeCollapsedType(
812       src.getType().cast<RankedTensorType>(),
813       getSymbolLessAffineMaps(
814           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
815   build(b, result, resultType, src, attrs);
816   result.addAttribute(getReassociationAttrName(),
817                       getReassociationIndicesAttribute(b, reassociation));
818 }
819 
820 void ExpandShapeOp::build(OpBuilder &b, OperationState &result, Value src,
821                           ArrayRef<ReassociationIndices> reassociation,
822                           ArrayRef<NamedAttribute> attrs) {
823   auto resultType = computeTensorReshapeCollapsedType(
824       src.getType().cast<RankedTensorType>(),
825       getSymbolLessAffineMaps(
826           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
827   build(b, result, resultType, src, attrs);
828   result.addAttribute(getReassociationAttrName(),
829                       getReassociationIndicesAttribute(b, reassociation));
830 }
831 
832 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
833                                         TensorReshapeOp, ExpandShapeOp>::value>
834 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
835                                            RankedTensorType expandedType,
836                                            RankedTensorType collapsedType) {
837   if (failed(
838           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
839     return failure();
840 
841   auto maps = op.getReassociationMaps();
842   RankedTensorType expectedType =
843       computeTensorReshapeCollapsedType(expandedType, maps);
844   if (collapsedType != expectedType)
845     return op.emitOpError("expected collapsed type to be ")
846            << expectedType << ", but got " << collapsedType;
847   return success();
848 }
849 
850 LogicalResult ExpandShapeOp::verify() {
851   return verifyTensorReshapeOp(*this, getResultType(), getSrcType());
852 }
853 
854 LogicalResult CollapseShapeOp::verify() {
855   return verifyTensorReshapeOp(*this, getSrcType(), getResultType());
856 }
857 
858 namespace {
859 /// Reshape of a splat constant can be replaced with a constant of the result
860 /// type.
861 template <typename TensorReshapeOp>
862 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
863   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
864   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
865                                 PatternRewriter &rewriter) const override {
866     DenseElementsAttr attr;
867     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
868       return failure();
869     if (!attr || !attr.isSplat())
870       return failure();
871     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
872         reshapeOp.getResultType(), attr.getRawData(), true);
873     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
874     return success();
875   }
876 };
877 
878 /// Reshape of a FromElements can be replaced with a FromElements of the result
879 /// type
880 template <typename TensorReshapeOp>
881 struct FoldReshapeWithFromElements : OpRewritePattern<TensorReshapeOp> {
882   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
883   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
884                                 PatternRewriter &rewriter) const override {
885     auto fromElements =
886         reshapeOp.src().template getDefiningOp<FromElementsOp>();
887     if (!fromElements)
888       return failure();
889 
890     auto shapedTy = reshapeOp.getType().template cast<ShapedType>();
891 
892     if (!shapedTy.hasStaticShape())
893       return failure();
894 
895     rewriter.replaceOpWithNewOp<FromElementsOp>(reshapeOp, reshapeOp.getType(),
896                                                 fromElements.elements());
897     return success();
898   }
899 };
900 
901 } // namespace
902 
903 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
904                                                 MLIRContext *context) {
905   results.add<CollapseReshapeOps<ExpandShapeOp>,
906               CollapseMixedReshapeOps<ExpandShapeOp, CollapseShapeOp>,
907               FoldReshapeWithConstant<ExpandShapeOp>,
908               FoldReshapeWithFromElements<ExpandShapeOp>>(context);
909 }
910 
911 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
912                                                   MLIRContext *context) {
913   results.add<CollapseReshapeOps<CollapseShapeOp>,
914               CollapseMixedReshapeOps<CollapseShapeOp, ExpandShapeOp>,
915               FoldReshapeWithConstant<CollapseShapeOp>,
916               FoldReshapeWithFromElements<CollapseShapeOp>>(context);
917 }
918 
919 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
920   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
921 }
922 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
923   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
924 }
925 
926 //===----------------------------------------------------------------------===//
927 // ExtractSliceOp
928 //===----------------------------------------------------------------------===//
929 
930 /// An extract_slice op result type can be fully inferred from the source type
931 /// and the static representation of offsets, sizes and strides. Special
932 /// sentinels encode the dynamic case.
933 RankedTensorType ExtractSliceOp::inferResultType(
934     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
935     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
936   // An extract_slice op may specify only a leading subset of offset/sizes/
937   // strides in which case we complete with offset=0, sizes from memref type and
938   // strides=1.
939   unsigned rank = sourceRankedTensorType.getRank();
940   (void)rank;
941   assert(staticSizes.size() == rank &&
942          "unexpected staticSizes not equal to rank of source");
943   return RankedTensorType::get(staticSizes,
944                                sourceRankedTensorType.getElementType());
945 }
946 
947 RankedTensorType ExtractSliceOp::inferResultType(
948     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
949     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
950   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
951   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
952   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
953                              ShapedType::kDynamicStrideOrOffset);
954   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
955                              ShapedType::kDynamicSize);
956   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
957                              ShapedType::kDynamicStrideOrOffset);
958   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
959                                          staticSizes, staticStrides);
960 }
961 
962 /// An extract_slice op result type can be fully inferred from the source type
963 /// and the static representation of offsets, sizes and strides. Special
964 /// sentinels encode the dynamic case.
965 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
966     unsigned resultRank, RankedTensorType sourceRankedTensorType,
967     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
968     ArrayRef<int64_t> strides) {
969   auto inferredType =
970       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
971           .cast<RankedTensorType>();
972   int rankDiff = inferredType.getRank() - resultRank;
973   if (rankDiff > 0) {
974     auto shape = inferredType.getShape();
975     llvm::SmallBitVector dimsToProject =
976         getPositionsOfShapeOne(rankDiff, shape);
977     SmallVector<int64_t> projectedShape;
978     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
979       if (!dimsToProject.test(pos))
980         projectedShape.push_back(shape[pos]);
981     inferredType =
982         RankedTensorType::get(projectedShape, inferredType.getElementType());
983   }
984   return inferredType;
985 }
986 
987 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
988     unsigned resultRank, RankedTensorType sourceRankedTensorType,
989     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
990     ArrayRef<OpFoldResult> strides) {
991   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
992   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
993   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
994                              ShapedType::kDynamicStrideOrOffset);
995   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
996                              ShapedType::kDynamicSize);
997   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
998                              ShapedType::kDynamicStrideOrOffset);
999   return ExtractSliceOp::inferRankReducedResultType(
1000       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
1001       staticStrides);
1002 }
1003 
1004 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
1005 /// result type. If the type passed is nullptr, it is inferred.
1006 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1007                            RankedTensorType resultType, Value source,
1008                            ArrayRef<OpFoldResult> offsets,
1009                            ArrayRef<OpFoldResult> sizes,
1010                            ArrayRef<OpFoldResult> strides,
1011                            ArrayRef<NamedAttribute> attrs) {
1012   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1013   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1014   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1015                              ShapedType::kDynamicStrideOrOffset);
1016   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1017                              ShapedType::kDynamicSize);
1018   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1019                              ShapedType::kDynamicStrideOrOffset);
1020   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
1021   // Structuring implementation this way avoids duplication between builders.
1022   if (!resultType) {
1023     resultType =
1024         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
1025                                         staticSizes, staticStrides)
1026             .cast<RankedTensorType>();
1027   }
1028   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
1029         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1030         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1031   result.addAttributes(attrs);
1032 }
1033 
1034 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
1035 /// result type.
1036 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1037                            ArrayRef<OpFoldResult> offsets,
1038                            ArrayRef<OpFoldResult> sizes,
1039                            ArrayRef<OpFoldResult> strides,
1040                            ArrayRef<NamedAttribute> attrs) {
1041   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1042 }
1043 
1044 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
1045 /// type passed is nullptr, it is inferred.
1046 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1047                            RankedTensorType resultType, Value source,
1048                            ValueRange offsets, ValueRange sizes,
1049                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1050   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1051       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1052   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1053       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1054   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1055       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1056   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1057 }
1058 
1059 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
1060 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1061                            ValueRange offsets, ValueRange sizes,
1062                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1063   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1064 }
1065 
1066 template <typename OpTy>
1067 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
1068                                           OpTy op, Type expectedType) {
1069   auto memrefType = expectedType.cast<ShapedType>();
1070   switch (result) {
1071   case SliceVerificationResult::Success:
1072     return success();
1073   case SliceVerificationResult::RankTooLarge:
1074     return op.emitError("expected rank to be smaller or equal to ")
1075            << "the other rank. ";
1076   case SliceVerificationResult::SizeMismatch:
1077     return op.emitError("expected type to be ")
1078            << expectedType << " or a rank-reduced version. (size mismatch) ";
1079   case SliceVerificationResult::ElemTypeMismatch:
1080     return op.emitError("expected element type to be ")
1081            << memrefType.getElementType();
1082   default:
1083     llvm_unreachable("unexpected extract_slice op verification result");
1084   }
1085 }
1086 
1087 /// Verifier for ExtractSliceOp.
1088 LogicalResult ExtractSliceOp::verify() {
1089   // Verify result type against inferred type.
1090   auto expectedType = ExtractSliceOp::inferResultType(
1091       getSourceType(), getMixedOffsets(), getMixedSizes(), getMixedStrides());
1092   auto result = isRankReducedType(expectedType.cast<ShapedType>(), getType());
1093   return produceSliceErrorMsg(result, *this, expectedType);
1094 }
1095 
1096 /// Infer the canonical type of the result of an extract_slice op. Returns a
1097 /// type with rank `resultRank` that is either the rank of the rank-reduced
1098 /// type, or the non-rank-reduced type.
1099 static RankedTensorType
1100 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1101                             ArrayRef<OpFoldResult> mixedOffsets,
1102                             ArrayRef<OpFoldResult> mixedSizes,
1103                             ArrayRef<OpFoldResult> mixedStrides) {
1104   auto resultType =
1105       ExtractSliceOp::inferRankReducedResultType(
1106           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1107           .cast<RankedTensorType>();
1108   if (resultType.getRank() != resultRank) {
1109     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1110                                                  mixedSizes, mixedStrides)
1111                      .cast<RankedTensorType>();
1112   }
1113   return resultType;
1114 }
1115 
1116 llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
1117   ArrayRef<int64_t> resultShape = getType().getShape();
1118   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1119   llvm::SmallBitVector droppedDims(mixedSizes.size());
1120   unsigned shapePos = 0;
1121   for (const auto &size : enumerate(mixedSizes)) {
1122     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1123     // If the size is not 1, or if the current matched dimension of the result
1124     // is the same static shape as the size value (which is 1), then the
1125     // dimension is preserved.
1126     if (!sizeVal || sizeVal.getValue() != 1 ||
1127         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1128       shapePos++;
1129       continue;
1130     }
1131     droppedDims.set(size.index());
1132   }
1133   return droppedDims;
1134 }
1135 
1136 LogicalResult ExtractSliceOp::reifyResultShapes(
1137     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1138   reifiedReturnShapes.resize(1);
1139   reifiedReturnShapes[0].reserve(getType().getRank());
1140   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1141   llvm::SmallBitVector droppedDims = getDroppedDims();
1142   Location loc = getLoc();
1143   for (const auto &size : enumerate(mixedSizes)) {
1144     if (droppedDims.test(size.index()))
1145       continue;
1146     if (auto attr = size.value().dyn_cast<Attribute>()) {
1147       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1148           loc, attr.cast<IntegerAttr>().getInt()));
1149       continue;
1150     }
1151     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1152   }
1153   return success();
1154 }
1155 
1156 namespace {
1157 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1158 /// This essentially pushes memref_cast past its consuming slice when
1159 /// `canFoldIntoConsumerOp` is true.
1160 ///
1161 /// Example:
1162 /// ```
1163 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1164 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1165 ///   tensor<3x4xf32>
1166 /// ```
1167 /// is rewritten into:
1168 /// ```
1169 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1170 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1171 /// ```
1172 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1173 public:
1174   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1175 
1176   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1177                                 PatternRewriter &rewriter) const override {
1178     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1179     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1180           return matchPattern(operand, matchConstantIndex());
1181         }))
1182       return failure();
1183 
1184     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1185     if (!castOp)
1186       return failure();
1187 
1188     if (!canFoldIntoConsumerOp(castOp))
1189       return failure();
1190 
1191     /// Deduce the type of the result to use for the canonicalized operation.
1192     RankedTensorType resultType = getCanonicalSliceResultType(
1193         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1194         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1195         sliceOp.getMixedStrides());
1196     Value newSlice = rewriter.create<ExtractSliceOp>(
1197         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1198         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1199         sliceOp.static_sizes(), sliceOp.static_strides());
1200     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1201                                                 newSlice);
1202     return success();
1203   }
1204 };
1205 
1206 /// Slice elements from `values` into `outValues`. `counts` represents the
1207 /// numbers of elements to stride in the original values for each dimension.
1208 /// The output values can be used to construct a DenseElementsAttr.
1209 template <typename IterTy, typename ElemTy>
1210 static void sliceElements(IterTy values, ArrayRef<int64_t> counts,
1211                           ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1212                           ArrayRef<int64_t> strides,
1213                           llvm::SmallVectorImpl<ElemTy> *outValues) {
1214   assert(offsets.size() == sizes.size());
1215   assert(offsets.size() == strides.size());
1216   if (offsets.empty())
1217     return;
1218 
1219   int64_t offset = offsets.front();
1220   int64_t size = sizes.front();
1221   int64_t stride = strides.front();
1222   if (offsets.size() == 1) {
1223     for (int64_t i = 0; i < size; ++i, offset += stride)
1224       outValues->push_back(*(values + offset));
1225 
1226     return;
1227   }
1228 
1229   for (int64_t i = 0; i < size; ++i, offset += stride) {
1230     auto begin = values + offset * counts.front();
1231     sliceElements<IterTy, ElemTy>(begin, counts.drop_front(),
1232                                   offsets.drop_front(), sizes.drop_front(),
1233                                   strides.drop_front(), outValues);
1234   }
1235 }
1236 
1237 /// Fold arith.constant and tensor.extract_slice into arith.constant. The folded
1238 /// operation might introduce more constant data; Users can control their
1239 /// heuristics by the control function.
1240 class ConstantOpExtractSliceFolder final
1241     : public OpRewritePattern<ExtractSliceOp> {
1242 public:
1243   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1244 
1245   ConstantOpExtractSliceFolder(MLIRContext *context,
1246                                ControlConstantExtractSliceFusionFn controlFn)
1247       : OpRewritePattern<ExtractSliceOp>(context),
1248         controlFn(std::move(controlFn)) {}
1249 
1250   LogicalResult matchAndRewrite(ExtractSliceOp op,
1251                                 PatternRewriter &rewriter) const override {
1252     DenseElementsAttr attr;
1253     if (!matchPattern(op.source(), m_Constant(&attr)))
1254       return failure();
1255 
1256     // A constant splat is handled by fold().
1257     if (attr.isSplat())
1258       return failure();
1259 
1260     // Dynamic result shape is not supported.
1261     auto sourceType = op.source().getType().cast<ShapedType>();
1262     auto resultType = op.result().getType().cast<ShapedType>();
1263     if (!sourceType.hasStaticShape() || !resultType.hasStaticShape())
1264       return failure();
1265 
1266     // Customized control over the folding.
1267     if (!controlFn(op))
1268       return failure();
1269 
1270     int64_t count = sourceType.getNumElements();
1271     if (count == 0)
1272       return failure();
1273 
1274     // Check if there are any dynamic parts, which are not supported.
1275     auto offsets = extractFromI64ArrayAttr(op.static_offsets());
1276     if (llvm::is_contained(offsets, ShapedType::kDynamicStrideOrOffset))
1277       return failure();
1278     auto sizes = extractFromI64ArrayAttr(op.static_sizes());
1279     if (llvm::is_contained(sizes, ShapedType::kDynamicSize))
1280       return failure();
1281     auto strides = extractFromI64ArrayAttr(op.static_strides());
1282     if (llvm::is_contained(strides, ShapedType::kDynamicStrideOrOffset))
1283       return failure();
1284 
1285     // Compute the stride for each dimension.
1286     SmallVector<int64_t> counts;
1287     ArrayRef<int64_t> shape = sourceType.getShape();
1288     counts.reserve(shape.size());
1289     for (int64_t v : shape) {
1290       count = count / v;
1291       counts.push_back(count);
1292     }
1293 
1294     // New attribute constructed by the sliced values.
1295     DenseElementsAttr newAttr;
1296 
1297     if (auto elems = attr.dyn_cast<DenseIntElementsAttr>()) {
1298       SmallVector<APInt> outValues;
1299       outValues.reserve(sourceType.getNumElements());
1300       sliceElements<DenseElementsAttr::IntElementIterator, APInt>(
1301           elems.begin(), counts, offsets, sizes, strides, &outValues);
1302       newAttr = DenseElementsAttr::get(resultType, outValues);
1303     } else if (auto elems = attr.dyn_cast<DenseFPElementsAttr>()) {
1304       SmallVector<APFloat> outValues;
1305       outValues.reserve(sourceType.getNumElements());
1306       sliceElements<DenseElementsAttr::FloatElementIterator, APFloat>(
1307           elems.begin(), counts, offsets, sizes, strides, &outValues);
1308       newAttr = DenseElementsAttr::get(resultType, outValues);
1309     }
1310 
1311     if (newAttr) {
1312       rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, resultType, newAttr);
1313       return success();
1314     }
1315 
1316     return failure();
1317   }
1318 
1319 private:
1320   /// This additionally controls whether the fold happens or not. Users can
1321   /// impose their heuristics in the function.
1322   ControlConstantExtractSliceFusionFn controlFn;
1323 };
1324 
1325 } // namespace
1326 
1327 void mlir::tensor::populateFoldConstantExtractSlicePatterns(
1328     RewritePatternSet &patterns,
1329     const ControlConstantExtractSliceFusionFn &controlFn) {
1330   patterns.add<ConstantOpExtractSliceFolder>(patterns.getContext(), controlFn);
1331 }
1332 
1333 /// Return the canonical type of the result of an extract_slice op.
1334 struct SliceReturnTypeCanonicalizer {
1335   RankedTensorType operator()(ExtractSliceOp op,
1336                               ArrayRef<OpFoldResult> mixedOffsets,
1337                               ArrayRef<OpFoldResult> mixedSizes,
1338                               ArrayRef<OpFoldResult> mixedStrides) {
1339     return getCanonicalSliceResultType(op.getType().getRank(),
1340                                        op.getSourceType(), mixedOffsets,
1341                                        mixedSizes, mixedStrides);
1342   }
1343 };
1344 
1345 /// A canonicalizer wrapper to replace ExtractSliceOps.
1346 struct SliceCanonicalizer {
1347   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1348                   ExtractSliceOp newOp) {
1349     Value replacement = newOp.getResult();
1350     if (replacement.getType() != op.getType())
1351       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1352                                                     replacement);
1353     rewriter.replaceOp(op, replacement);
1354   }
1355 };
1356 
1357 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1358                                                  MLIRContext *context) {
1359   results.add<
1360       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1361           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1362       ExtractSliceOpCastFolder>(context);
1363 }
1364 
1365 //
1366 static LogicalResult
1367 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1368                                            ShapedType shapedType) {
1369   OpBuilder b(op.getContext());
1370   for (OpFoldResult ofr : op.getMixedOffsets())
1371     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1372       return failure();
1373   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1374   // is appropriate.
1375   auto shape = shapedType.getShape();
1376   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1377     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1378       return failure();
1379   for (OpFoldResult ofr : op.getMixedStrides())
1380     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1381       return failure();
1382   return success();
1383 }
1384 
1385 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1386 /// we can return the InsertSliceOp's source directly.
1387 // TODO: This only checks the immediate producer; extend to go up the
1388 // insert/extract chain if the slices are disjoint.
1389 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1390   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1391 
1392   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1393   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1394       insertOp.isSameAs(extractOp, isSame))
1395     return insertOp.source();
1396 
1397   return {};
1398 }
1399 
1400 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute> operands) {
1401   if (auto splat = operands[0].dyn_cast_or_null<SplatElementsAttr>()) {
1402     auto resultType = result().getType().cast<ShapedType>();
1403     if (resultType.hasStaticShape())
1404       return splat.resizeSplat(resultType);
1405   }
1406   if (getSourceType() == getType() &&
1407       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1408     return this->source();
1409   if (Value slice = foldExtractAfterInsertSlice(*this))
1410     return slice;
1411 
1412   return OpFoldResult();
1413 }
1414 
1415 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1416     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1417   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1418   unsigned rank = rankedTensorType.getRank();
1419   auto shape = rankedTensorType.getShape();
1420   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1421   SmallVector<OpFoldResult> sizes;
1422   for (unsigned i = 0, e = rank; i < e; ++i) {
1423     OpFoldResult dim;
1424     if (rankedTensorType.isDynamicDim(i))
1425       dim = b.createOrFold<tensor::DimOp>(
1426           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1427     else
1428       dim = b.getIndexAttr(shape[i]);
1429     sizes.push_back(dim);
1430   }
1431   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1432   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1433                                                 offsets, sizes, strides);
1434 }
1435 
1436 //===----------------------------------------------------------------------===//
1437 // InsertSliceOp
1438 //===----------------------------------------------------------------------===//
1439 
1440 // Build a InsertSliceOp with mixed static and dynamic entries.
1441 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1442                           Value dest, ArrayRef<OpFoldResult> offsets,
1443                           ArrayRef<OpFoldResult> sizes,
1444                           ArrayRef<OpFoldResult> strides,
1445                           ArrayRef<NamedAttribute> attrs) {
1446   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1447   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1448   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1449                              ShapedType::kDynamicStrideOrOffset);
1450   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1451                              ShapedType::kDynamicSize);
1452   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1453                              ShapedType::kDynamicStrideOrOffset);
1454   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1455         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1456         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1457   result.addAttributes(attrs);
1458 }
1459 
1460 // Build a InsertSliceOp with dynamic entries.
1461 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1462                           Value dest, ValueRange offsets, ValueRange sizes,
1463                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1464   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1465       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1466   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1467       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1468   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1469       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1470   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1471 }
1472 
1473 static SliceVerificationResult
1474 verifyInsertSliceOp(ShapedType srcType, ShapedType dstType,
1475                     ArrayAttr staticOffsets, ArrayAttr staticSizes,
1476                     ArrayAttr staticStrides,
1477                     ShapedType *expectedType = nullptr) {
1478   // insert_slice is the inverse of extract_slice, use the same type inference.
1479   auto expected = ExtractSliceOp::inferRankReducedResultType(
1480                       srcType.getRank(), dstType.cast<RankedTensorType>(),
1481                       extractFromI64ArrayAttr(staticOffsets),
1482                       extractFromI64ArrayAttr(staticSizes),
1483                       extractFromI64ArrayAttr(staticStrides))
1484                       .cast<ShapedType>();
1485   if (expectedType)
1486     *expectedType = expected;
1487   return isRankReducedType(expected, srcType);
1488 }
1489 
1490 /// Verifier for InsertSliceOp.
1491 LogicalResult InsertSliceOp::verify() {
1492   ShapedType expectedType;
1493   auto result =
1494       verifyInsertSliceOp(getSourceType(), getType(), static_offsets(),
1495                           static_sizes(), static_strides(), &expectedType);
1496   return produceSliceErrorMsg(result, *this, expectedType);
1497 }
1498 
1499 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1500 /// can mutate the second InsertSliceOp's destination to the first one's.
1501 ///
1502 /// Example:
1503 ///
1504 /// ```mlir
1505 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1506 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1507 /// ```
1508 ///
1509 /// folds into:
1510 ///
1511 /// ```mlir
1512 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1513 /// ```
1514 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1515   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1516 
1517   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1518   if (!prevInsertOp ||
1519       prevInsertOp.source().getType() != insertOp.source().getType() ||
1520       !prevInsertOp.isSameAs(insertOp, isSame))
1521     return failure();
1522 
1523   insertOp.destMutable().assign(prevInsertOp.dest());
1524   return success();
1525 }
1526 
1527 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1528   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1529       getSourceType() == getType() &&
1530       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1531     return this->source();
1532   if (succeeded(foldInsertAfterInsertSlice(*this)))
1533     return getResult();
1534   return OpFoldResult();
1535 }
1536 
1537 LogicalResult InsertSliceOp::reifyResultShapes(
1538     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1539   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1540   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1541     reifiedReturnShapes[0][dim] =
1542         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1543   }
1544   return success();
1545 }
1546 
1547 namespace {
1548 /// Pattern to rewrite a insert_slice op with constant arguments.
1549 class InsertSliceOpConstantArgumentFolder final
1550     : public OpRewritePattern<InsertSliceOp> {
1551 public:
1552   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1553 
1554   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1555                                 PatternRewriter &rewriter) const override {
1556     // No constant operand, just return.
1557     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1558           return matchPattern(operand, matchConstantIndex());
1559         }))
1560       return failure();
1561 
1562     // At least one of offsets/sizes/strides is a new constant.
1563     // Form the new list of operands and constant attributes from the
1564     // existing.
1565     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1566     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1567     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1568     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1569     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1570     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1571 
1572     // Create the new op in canonical form.
1573     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1574         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1575         mixedOffsets, mixedSizes, mixedStrides);
1576     Value toInsert = insertSliceOp.source();
1577     if (sourceType != insertSliceOp.getSourceType())
1578       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1579                                                  sourceType, toInsert);
1580     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1581         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1582         mixedStrides);
1583     return success();
1584   }
1585 };
1586 
1587 /// Fold tensor_casts with insert_slice operations. If the source or destination
1588 /// tensor is a tensor_cast that removes static type information, the cast is
1589 /// folded into the insert_slice operation. E.g.:
1590 ///
1591 /// ```mlir
1592 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1593 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1594 /// ```
1595 ///
1596 /// folds into:
1597 ///
1598 /// ```mlir
1599 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1600 /// ```
1601 ///
1602 /// Note: When folding a cast on the destination tensor, the result of the
1603 /// insert_slice operation is casted to ensure that the type of the result did
1604 /// not change.
1605 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1606   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1607 
1608   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1609                                 PatternRewriter &rewriter) const override {
1610     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1611           return matchPattern(operand, matchConstantIndex());
1612         }))
1613       return failure();
1614 
1615     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1616       auto castOp = v.getDefiningOp<tensor::CastOp>();
1617       if (!castOp || !canFoldIntoConsumerOp(castOp))
1618         return llvm::None;
1619       return castOp.source();
1620     };
1621     Optional<Value> sourceCastSource =
1622         getSourceOfCastOp(insertSliceOp.source());
1623     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1624     if (!sourceCastSource && !destCastSource)
1625       return failure();
1626 
1627     auto src = (sourceCastSource ? *sourceCastSource : insertSliceOp.source());
1628     auto dst = (destCastSource ? *destCastSource : insertSliceOp.dest());
1629 
1630     auto srcType = src.getType().cast<ShapedType>();
1631     auto dstType = dst.getType().cast<ShapedType>();
1632     if (verifyInsertSliceOp(srcType, dstType, insertSliceOp.static_offsets(),
1633                             insertSliceOp.static_sizes(),
1634                             insertSliceOp.static_strides()) !=
1635         SliceVerificationResult::Success)
1636       return failure();
1637 
1638     Value replacement = rewriter.create<InsertSliceOp>(
1639         insertSliceOp.getLoc(), src, dst, insertSliceOp.getMixedOffsets(),
1640         insertSliceOp.getMixedSizes(), insertSliceOp.getMixedStrides());
1641 
1642     if (replacement.getType() != insertSliceOp.getType()) {
1643       replacement = rewriter.create<tensor::CastOp>(
1644           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1645     }
1646     rewriter.replaceOp(insertSliceOp, replacement);
1647     return success();
1648   }
1649 };
1650 
1651 /// If additional static type information can be deduced from a insert_slice's
1652 /// size operands, insert an explicit cast of the op's source operand. This
1653 /// enables other canonicalization patterns that are matching for tensor_cast
1654 /// ops such as `ForOpTensorCastFolder` in SCF.
1655 ///
1656 /// Example:
1657 ///
1658 /// ```mlir
1659 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1660 ///       : tensor<?x?xf32> into ...
1661 /// ```
1662 ///
1663 /// folds into:
1664 ///
1665 /// ```mlir
1666 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1667 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1668 ///       : tensor<64x64xf32> into ...
1669 /// ```
1670 struct InsertSliceOpSourceCastInserter final
1671     : public OpRewritePattern<InsertSliceOp> {
1672   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1673 
1674   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1675                                 PatternRewriter &rewriter) const override {
1676     RankedTensorType srcType = insertSliceOp.getSourceType();
1677     if (srcType.getRank() != insertSliceOp.getType().getRank())
1678       return failure();
1679     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1680                                      srcType.getShape().end());
1681     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1682       if (Optional<int64_t> constInt =
1683               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1684         newSrcShape[i] = *constInt;
1685     }
1686 
1687     RankedTensorType newSrcType =
1688         RankedTensorType::get(newSrcShape, srcType.getElementType());
1689     if (srcType == newSrcType ||
1690         !preservesStaticInformation(srcType, newSrcType) ||
1691         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1692       return failure();
1693 
1694     // newSrcType is:
1695     //   1) Different from srcType.
1696     //   2) "More static" than srcType.
1697     //   3) Cast-compatible with srcType.
1698     // Insert the cast.
1699     Value cast = rewriter.create<tensor::CastOp>(
1700         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1701     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1702         insertSliceOp, cast, insertSliceOp.dest(),
1703         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1704         insertSliceOp.getMixedStrides());
1705     return success();
1706   }
1707 };
1708 } // namespace
1709 
1710 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1711                                                 MLIRContext *context) {
1712   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1713               InsertSliceOpSourceCastInserter>(context);
1714 }
1715 
1716 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1717                                                              Location loc,
1718                                                              Value tensor,
1719                                                              Value dest) {
1720   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1721   unsigned rank = rankedTensorType.getRank();
1722   auto shape = rankedTensorType.getShape();
1723   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1724   SmallVector<OpFoldResult> sizes;
1725   for (unsigned i = 0, e = rank; i < e; ++i) {
1726     OpFoldResult dim;
1727     if (rankedTensorType.isDynamicDim(i))
1728       dim = b.createOrFold<tensor::DimOp>(
1729           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1730     else
1731       dim = b.getIndexAttr(shape[i]);
1732     sizes.push_back(dim);
1733   }
1734   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1735   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1736                                                sizes, strides);
1737 }
1738 
1739 //===----------------------------------------------------------------------===//
1740 // PadOp
1741 //===----------------------------------------------------------------------===//
1742 
1743 // TODO: Replace custom<InferType> directive with AllTypesMatch as soon as it
1744 // supports optional types.
1745 void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
1746                     Type typeToInfer, Type typeToInferFrom) {}
1747 
1748 ParseResult parseInferType(OpAsmParser &parser,
1749                            Optional<OpAsmParser::OperandType> optOperand,
1750                            Type &typeToInfer, Type typeToInferFrom) {
1751   if (optOperand)
1752     typeToInfer = typeToInferFrom;
1753   return success();
1754 }
1755 
1756 LogicalResult PadOp::verify() {
1757   auto sourceType = source().getType().cast<RankedTensorType>();
1758   auto resultType = result().getType().cast<RankedTensorType>();
1759   auto expectedType =
1760       PadOp::inferResultType(sourceType, extractFromI64ArrayAttr(static_low()),
1761                              extractFromI64ArrayAttr(static_high()));
1762   for (int i = 0, e = sourceType.getRank(); i < e; ++i) {
1763     if (resultType.getDimSize(i) == expectedType.getDimSize(i))
1764       continue;
1765     if (expectedType.isDynamicDim(i))
1766       continue;
1767     return emitError("specified type ")
1768            << resultType << " does not match the inferred type "
1769            << expectedType;
1770   }
1771 
1772   return success();
1773 }
1774 
1775 LogicalResult PadOp::verifyRegions() {
1776   auto &region = getRegion();
1777   unsigned rank = result().getType().cast<RankedTensorType>().getRank();
1778   Block &block = region.front();
1779   if (block.getNumArguments() != rank)
1780     return emitError("expected the block to have ") << rank << " arguments";
1781 
1782   // Note: the number and type of yield values are checked in the YieldOp.
1783   for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
1784     if (!en.value().isIndex())
1785       return emitOpError("expected block argument ")
1786              << (en.index() + 1) << " to be an index";
1787   }
1788 
1789   // Ensure that the region yields an element of the right type.
1790   auto yieldOp = llvm::cast<YieldOp>(block.getTerminator());
1791   if (yieldOp.value().getType() !=
1792       getType().cast<ShapedType>().getElementType())
1793     return emitOpError("expected yield type to match shape element type");
1794 
1795   return success();
1796 }
1797 
1798 RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
1799                                         ArrayRef<int64_t> staticLow,
1800                                         ArrayRef<int64_t> staticHigh,
1801                                         ArrayRef<int64_t> resultShape) {
1802   unsigned rank = sourceType.getRank();
1803   assert(staticLow.size() == rank && "unexpected staticLow size mismatch");
1804   assert(staticHigh.size() == rank && "unexpected staticHigh size mismatch");
1805   assert((resultShape.empty() || resultShape.size() == rank) &&
1806          "unexpected resultShape size mismatch");
1807 
1808   SmallVector<int64_t, 4> inferredShape;
1809   for (auto i : llvm::seq<unsigned>(0, rank)) {
1810     if (sourceType.isDynamicDim(i) ||
1811         staticLow[i] == ShapedType::kDynamicSize ||
1812         staticHigh[i] == ShapedType::kDynamicSize) {
1813       inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
1814                                                   : resultShape[i]);
1815     } else {
1816       int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
1817       assert((resultShape.empty() || size == resultShape[i] ||
1818               resultShape[i] == ShapedType::kDynamicSize) &&
1819              "mismatch between inferred shape and result shape");
1820       inferredShape.push_back(size);
1821     }
1822   }
1823 
1824   return RankedTensorType::get(inferredShape, sourceType.getElementType());
1825 }
1826 
1827 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1828                   ArrayRef<int64_t> staticLow, ArrayRef<int64_t> staticHigh,
1829                   ValueRange low, ValueRange high, bool nofold,
1830                   ArrayRef<NamedAttribute> attrs) {
1831   auto sourceType = source.getType().cast<RankedTensorType>();
1832   auto resultType = inferResultType(sourceType, staticLow, staticHigh);
1833   build(b, result, resultType, source, low, high, b.getI64ArrayAttr(staticLow),
1834         b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr());
1835   result.addAttributes(attrs);
1836 }
1837 
1838 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1839                   ValueRange low, ValueRange high, bool nofold,
1840                   ArrayRef<NamedAttribute> attrs) {
1841   auto sourceType = source.getType().cast<RankedTensorType>();
1842   unsigned rank = sourceType.getRank();
1843   SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
1844   build(b, result, source, staticVector, staticVector, low, high, nofold,
1845         attrs);
1846 }
1847 
1848 void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
1849                   Value source, ArrayRef<OpFoldResult> low,
1850                   ArrayRef<OpFoldResult> high, bool nofold,
1851                   ArrayRef<NamedAttribute> attrs) {
1852   assert(resultType.isa<RankedTensorType>());
1853   auto sourceType = source.getType().cast<RankedTensorType>();
1854   SmallVector<Value, 4> dynamicLow, dynamicHigh;
1855   SmallVector<int64_t, 4> staticLow, staticHigh;
1856   // staticLow and staticHigh have full information of the padding config.
1857   // This will grow staticLow and staticHigh with 1 value. If the config is
1858   // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
1859   // value as well.
1860   dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
1861                              ShapedType::kDynamicSize);
1862   dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
1863                              ShapedType::kDynamicSize);
1864   if (!resultType) {
1865     resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
1866   }
1867   build(b, result, resultType, source, dynamicLow, dynamicHigh,
1868         b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh),
1869         nofold ? b.getUnitAttr() : UnitAttr());
1870   result.addAttributes(attrs);
1871 }
1872 
1873 namespace {
1874 // Folds tensor.pad when padding is static zeros and the attribute
1875 // doesn't request otherwise.
1876 struct FoldStaticZeroPadding : public OpRewritePattern<PadOp> {
1877   using OpRewritePattern<PadOp>::OpRewritePattern;
1878 
1879   LogicalResult matchAndRewrite(PadOp padTensorOp,
1880                                 PatternRewriter &rewriter) const override {
1881     if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad())
1882       return failure();
1883     if (padTensorOp.nofold())
1884       return failure();
1885     rewriter.replaceOpWithNewOp<tensor::CastOp>(
1886         padTensorOp, padTensorOp.result().getType(), padTensorOp.source());
1887     return success();
1888   }
1889 };
1890 
1891 // Fold CastOp into PadOp when adding static information.
1892 struct FoldSourceTensorCast : public OpRewritePattern<PadOp> {
1893   using OpRewritePattern<PadOp>::OpRewritePattern;
1894 
1895   LogicalResult matchAndRewrite(PadOp padTensorOp,
1896                                 PatternRewriter &rewriter) const override {
1897     auto castOp = padTensorOp.source().getDefiningOp<tensor::CastOp>();
1898     if (!tensor::canFoldIntoConsumerOp(castOp))
1899       return failure();
1900 
1901     auto newResultType = PadOp::inferResultType(
1902         castOp.source().getType().cast<RankedTensorType>(),
1903         extractFromI64ArrayAttr(padTensorOp.static_low()),
1904         extractFromI64ArrayAttr(padTensorOp.static_high()),
1905         padTensorOp.getResultType().getShape());
1906 
1907     if (newResultType == padTensorOp.getResultType()) {
1908       rewriter.updateRootInPlace(padTensorOp, [&]() {
1909         padTensorOp.sourceMutable().assign(castOp.source());
1910       });
1911     } else {
1912       auto newOp = rewriter.create<PadOp>(
1913           padTensorOp->getLoc(), newResultType, padTensorOp.source(),
1914           padTensorOp.low(), padTensorOp.high(), padTensorOp.static_low(),
1915           padTensorOp.static_high(), padTensorOp.nofold());
1916       BlockAndValueMapping mapper;
1917       padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
1918 
1919       rewriter.replaceOpWithNewOp<tensor::CastOp>(
1920           padTensorOp, padTensorOp.getResultType(), newOp);
1921     }
1922     return success();
1923   }
1924 };
1925 
1926 // Fold CastOp using the result of PadOp back into the latter if it adds
1927 // static information.
1928 struct FoldTargetTensorCast : public OpRewritePattern<PadOp> {
1929   using OpRewritePattern<PadOp>::OpRewritePattern;
1930 
1931   LogicalResult matchAndRewrite(PadOp padTensorOp,
1932                                 PatternRewriter &rewriter) const override {
1933     if (!padTensorOp.result().hasOneUse())
1934       return failure();
1935     auto tensorCastOp =
1936         dyn_cast<tensor::CastOp>(*padTensorOp->getUsers().begin());
1937     if (!tensorCastOp)
1938       return failure();
1939     if (!tensor::preservesStaticInformation(padTensorOp.result().getType(),
1940                                             tensorCastOp.dest().getType()))
1941       return failure();
1942 
1943     auto replacementOp = rewriter.create<PadOp>(
1944         padTensorOp.getLoc(), tensorCastOp.dest().getType(),
1945         padTensorOp.source(), padTensorOp.low(), padTensorOp.high(),
1946         padTensorOp.static_low(), padTensorOp.static_high(),
1947         padTensorOp.nofold());
1948     replacementOp.region().takeBody(padTensorOp.region());
1949 
1950     rewriter.replaceOp(padTensorOp, replacementOp.result());
1951     rewriter.replaceOp(tensorCastOp, replacementOp.result());
1952     return success();
1953   }
1954 };
1955 } // namespace
1956 
1957 void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
1958                                         MLIRContext *context) {
1959   results
1960       .add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast>(
1961           context);
1962 }
1963 
1964 /// Return the padding value of the PadOp if it constant. In this context,
1965 /// "constant" means an actual constant or "defined outside of the block".
1966 ///
1967 /// Values are considered constant in three cases:
1968 ///  - A ConstantLike value.
1969 ///  - A basic block argument from a different block.
1970 ///  - A value defined outside of the block.
1971 ///
1972 /// If the padding value is not constant, an empty Value is returned.
1973 Value PadOp::getConstantPaddingValue() {
1974   auto yieldOp = dyn_cast<YieldOp>(getRegion().front().getTerminator());
1975   if (!yieldOp)
1976     return {};
1977   Value padValue = yieldOp.value();
1978   // Check if yield value is a constant.
1979   if (matchPattern(padValue, m_Constant()))
1980     return padValue;
1981   // Check if yield value is defined inside the PadOp block.
1982   if (padValue.getParentBlock() == &getRegion().front())
1983     return {};
1984   // Else: Yield value defined outside of the PadOp block.
1985   return padValue;
1986 }
1987 
1988 OpFoldResult PadOp::fold(ArrayRef<Attribute>) {
1989   if (getResultType().hasStaticShape() && getResultType() == getSourceType() &&
1990       !nofold())
1991     return source();
1992   return {};
1993 }
1994 
1995 //===----------------------------------------------------------------------===//
1996 // SplatOp
1997 //===----------------------------------------------------------------------===//
1998 
1999 OpFoldResult SplatOp::fold(ArrayRef<Attribute> operands) {
2000   auto constOperand = operands.front();
2001   if (!constOperand.isa_and_nonnull<IntegerAttr, FloatAttr>())
2002     return {};
2003 
2004   // SplatElementsAttr::get treats single value for second arg as being a splat.
2005   return SplatElementsAttr::get(getType(), {constOperand});
2006 }
2007 
2008 //===----------------------------------------------------------------------===//
2009 // TableGen'd op method definitions
2010 //===----------------------------------------------------------------------===//
2011 
2012 #define GET_OP_CLASSES
2013 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
2014