1 //===----------------------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
10 #include "mlir/Dialect/Arithmetic/Utils/Utils.h"
11 #include "mlir/Dialect/Complex/IR/Complex.h"
12 #include "mlir/Dialect/Tensor/IR/Tensor.h"
13 #include "mlir/Dialect/Utils/ReshapeOpsUtils.h"
14 #include "mlir/Dialect/Utils/StaticValueUtils.h"
15 #include "mlir/IR/BlockAndValueMapping.h"
16 #include "mlir/IR/Builders.h"
17 #include "mlir/IR/BuiltinAttributeInterfaces.h"
18 #include "mlir/IR/Matchers.h"
19 #include "mlir/IR/TypeUtilities.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 
23 using namespace mlir;
24 using namespace mlir::tensor;
25 
26 /// Materialize a single constant operation from a given attribute value with
27 /// the desired resultant type.
28 Operation *TensorDialect::materializeConstant(OpBuilder &builder,
29                                               Attribute value, Type type,
30                                               Location loc) {
31   if (arith::ConstantOp::isBuildableWith(value, type))
32     return builder.create<arith::ConstantOp>(loc, value, type);
33   if (complex::ConstantOp::isBuildableWith(value, type))
34     return builder.create<complex::ConstantOp>(loc, type,
35                                                value.cast<ArrayAttr>());
36   return nullptr;
37 }
38 
39 //===----------------------------------------------------------------------===//
40 // CastOp
41 //===----------------------------------------------------------------------===//
42 
43 /// Returns true if `target` is a ranked tensor type that preserves static
44 /// information available in the `source` ranked tensor type.
45 bool mlir::tensor::preservesStaticInformation(Type source, Type target) {
46   auto sourceType = source.dyn_cast<RankedTensorType>();
47   auto targetType = target.dyn_cast<RankedTensorType>();
48 
49   // Requires RankedTensorType.
50   if (!sourceType || !targetType)
51     return false;
52 
53   // Requires same elemental type.
54   if (sourceType.getElementType() != targetType.getElementType())
55     return false;
56 
57   // Requires same rank.
58   if (sourceType.getRank() != targetType.getRank())
59     return false;
60 
61   // If cast is towards more static sizes along any dimension, don't fold.
62   for (auto t : llvm::zip(sourceType.getShape(), targetType.getShape())) {
63     if (!ShapedType::isDynamic(std::get<0>(t)) &&
64         ShapedType::isDynamic(std::get<1>(t)))
65       return false;
66   }
67 
68   return true;
69 }
70 
71 /// Determines whether tensor::CastOp casts to a more dynamic version of the
72 /// source tensor. This is useful to fold a tensor.cast into a consuming op and
73 /// implement canonicalization patterns for ops in different dialects that may
74 /// consume the results of tensor.cast operations. Such foldable tensor.cast
75 /// operations are typically inserted as `slice` ops and are canonicalized,
76 /// to preserve the type compatibility of their uses.
77 ///
78 /// Returns true when all conditions are met:
79 /// 1. source and result are ranked tensors with same element type and rank.
80 /// 2. the tensor type has more static information than the result
81 ///
82 /// Example:
83 /// ```mlir
84 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
85 ///   %2 = consumer %1 ... : tensor<?x?xf32> ...
86 /// ```
87 ///
88 /// folds into:
89 ///
90 /// ```mlir
91 ///   %2 = consumer %0 ... : tensor<8x16xf32> ...
92 /// ```
93 bool mlir::tensor::canFoldIntoConsumerOp(CastOp castOp) {
94   if (!castOp)
95     return false;
96 
97   // Can fold if the source of cast has at least as much static information as
98   // its results.
99   return preservesStaticInformation(castOp.getType(),
100                                     castOp.source().getType());
101 }
102 
103 /// Determines whether the tensor::CastOp casts to a more static version of the
104 /// source tensor. This is useful to fold into a producing op and implement
105 /// canonicaliation patterns with the `tensor.cast` op as the root, but producer
106 /// being from different dialects. Returns true when all conditions are met:
107 /// 1. source and result and ranked tensors with same element type and rank.
108 /// 2. the result type has more static information than the source.
109 ///
110 /// Example:
111 /// ```mlir
112 ///   %1 = producer ... : tensor<?x?xf32>
113 ///   %2 = tensor.cast %1 : tensor<?x?xf32> to tensor<8x16xf32>
114 /// ```
115 ///
116 /// can be canonicalized to :
117 ///
118 /// ```mlir
119 ///   %2 = producer ... : tensor<8x16xf32>
120 /// ```
121 /// Not all ops might be canonicalizable this way, but for those that can be,
122 /// this method provides a check that it is worth doing the canonicalization.
123 bool mlir::tensor::canFoldIntoProducerOp(CastOp castOp) {
124   if (!castOp)
125     return false;
126   return preservesStaticInformation(castOp.source().getType(),
127                                     castOp.getType());
128 }
129 
130 /// Performs folding of any operand of `op` if it comes from a tensor::CastOp
131 /// that can be folded.
132 LogicalResult mlir::tensor::foldTensorCast(Operation *op) {
133   bool folded = false;
134   for (OpOperand &operand : op->getOpOperands()) {
135     auto castOp = operand.get().getDefiningOp<tensor::CastOp>();
136     if (castOp && tensor::canFoldIntoConsumerOp(castOp)) {
137       operand.set(castOp.getOperand());
138       folded = true;
139     }
140   }
141   return success(folded);
142 }
143 
144 bool CastOp::areCastCompatible(TypeRange inputs, TypeRange outputs) {
145   if (inputs.size() != 1 || outputs.size() != 1)
146     return false;
147   Type a = inputs.front(), b = outputs.front();
148   auto aT = a.dyn_cast<TensorType>();
149   auto bT = b.dyn_cast<TensorType>();
150   if (!aT || !bT)
151     return false;
152 
153   if (aT.getElementType() != bT.getElementType())
154     return false;
155 
156   return succeeded(verifyCompatibleShape(aT, bT));
157 }
158 
159 /// Compute a TensorType that has the joined shape knowledge of the two
160 /// given TensorTypes. The element types need to match.
161 static TensorType joinShapes(TensorType one, TensorType two) {
162   assert(one.getElementType() == two.getElementType());
163 
164   if (!one.hasRank())
165     return two;
166   if (!two.hasRank())
167     return one;
168 
169   int64_t rank = one.getRank();
170   if (rank != two.getRank())
171     return {};
172 
173   SmallVector<int64_t, 4> join;
174   join.reserve(rank);
175   for (int64_t i = 0; i < rank; ++i) {
176     if (one.isDynamicDim(i)) {
177       join.push_back(two.getDimSize(i));
178       continue;
179     }
180     if (two.isDynamicDim(i)) {
181       join.push_back(one.getDimSize(i));
182       continue;
183     }
184     if (one.getDimSize(i) != two.getDimSize(i))
185       return {};
186     join.push_back(one.getDimSize(i));
187   }
188   return RankedTensorType::get(join, one.getElementType());
189 }
190 
191 namespace {
192 
193 /// Replaces chains of two tensor.cast operations by a single tensor.cast
194 /// operation if doing so does not remove runtime constraints.
195 struct ChainedTensorCast : public OpRewritePattern<CastOp> {
196   using OpRewritePattern<CastOp>::OpRewritePattern;
197 
198   LogicalResult matchAndRewrite(CastOp tensorCast,
199                                 PatternRewriter &rewriter) const final {
200     auto tensorCastOperand = tensorCast.getOperand().getDefiningOp<CastOp>();
201 
202     if (!tensorCastOperand)
203       return failure();
204 
205     auto sourceType =
206         tensorCastOperand.getOperand().getType().cast<TensorType>();
207     auto intermediateType = tensorCastOperand.getType().cast<TensorType>();
208     auto resultType = tensorCast.getType().cast<TensorType>();
209 
210     // We can remove the intermediate cast if joining all three produces the
211     // same result as just joining the source and result shapes.
212     auto firstJoin =
213         joinShapes(joinShapes(sourceType, intermediateType), resultType);
214 
215     // The join might not exist if the cast sequence would fail at runtime.
216     if (!firstJoin)
217       return failure();
218 
219     // The newJoin always exists if the above join exists, it might just contain
220     // less information. If so, we cannot drop the intermediate cast, as doing
221     // so would remove runtime checks.
222     auto newJoin = joinShapes(sourceType, resultType);
223     if (firstJoin != newJoin)
224       return failure();
225 
226     rewriter.replaceOpWithNewOp<CastOp>(tensorCast, resultType,
227                                         tensorCastOperand.getOperand());
228     return success();
229   }
230 };
231 
232 } // namespace
233 
234 void CastOp::getCanonicalizationPatterns(RewritePatternSet &results,
235                                          MLIRContext *context) {
236   results.add<ChainedTensorCast>(context);
237 }
238 
239 //===----------------------------------------------------------------------===//
240 // DimOp
241 //===----------------------------------------------------------------------===//
242 
243 void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
244                   int64_t index) {
245   auto loc = result.location;
246   Value indexValue = builder.create<arith::ConstantIndexOp>(loc, index);
247   build(builder, result, source, indexValue);
248 }
249 
250 Optional<int64_t> DimOp::getConstantIndex() {
251   if (auto constantOp = index().getDefiningOp<arith::ConstantOp>())
252     return constantOp.getValue().cast<IntegerAttr>().getInt();
253   return {};
254 }
255 
256 LogicalResult DimOp::verify() {
257   // Assume unknown index to be in range.
258   Optional<int64_t> index = getConstantIndex();
259   if (!index.hasValue())
260     return success();
261 
262   // Check that constant index is not knowingly out of range.
263   auto type = source().getType();
264   if (auto tensorType = type.dyn_cast<RankedTensorType>()) {
265     if (index.getValue() >= tensorType.getRank())
266       return emitOpError("index is out of range");
267   } else if (type.isa<UnrankedTensorType>()) {
268     // Assume index to be in range.
269   } else {
270     llvm_unreachable("expected operand with tensor type");
271   }
272   return success();
273 }
274 
275 OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
276   // All forms of folding require a known index.
277   auto index = operands[1].dyn_cast_or_null<IntegerAttr>();
278   if (!index)
279     return {};
280 
281   // Folding for unranked types (UnrankedTensorType) is not supported.
282   auto tensorType = source().getType().dyn_cast<RankedTensorType>();
283   if (!tensorType)
284     return {};
285 
286   // Fold if the shape extent along the given index is known.
287   if (!tensorType.isDynamicDim(index.getInt())) {
288     Builder builder(getContext());
289     return builder.getIndexAttr(tensorType.getShape()[index.getInt()]);
290   }
291 
292   Operation *definingOp = source().getDefiningOp();
293 
294   // Fold dim to the operand of tensor.generate.
295   if (auto fromElements = dyn_cast_or_null<tensor::GenerateOp>(definingOp)) {
296     auto resultType =
297         fromElements.getResult().getType().cast<RankedTensorType>();
298     // The case where the type encodes the size of the dimension is handled
299     // above.
300     assert(ShapedType::isDynamic(resultType.getShape()[index.getInt()]));
301 
302     // Find the operand of the fromElements that corresponds to this index.
303     auto dynExtents = fromElements.dynamicExtents().begin();
304     for (auto dim : resultType.getShape().take_front(index.getInt()))
305       if (ShapedType::isDynamic(dim))
306         dynExtents++;
307 
308     return Value{*dynExtents};
309   }
310 
311   // The size at the given index is now known to be a dynamic size.
312   unsigned unsignedIndex = index.getValue().getZExtValue();
313 
314   if (auto sliceOp = dyn_cast_or_null<tensor::ExtractSliceOp>(definingOp)) {
315     // Fold only for non-rank reduced ops. For the rank-reduced version, rely on
316     // `resolve-shaped-type-result-dims` pass.
317     if (sliceOp.getType().getRank() == sliceOp.getSourceType().getRank() &&
318         sliceOp.isDynamicSize(unsignedIndex)) {
319       return {sliceOp.getDynamicSize(unsignedIndex)};
320     }
321   }
322 
323   // dim(cast) -> dim
324   if (succeeded(foldTensorCast(*this)))
325     return getResult();
326 
327   return {};
328 }
329 
330 namespace {
331 /// Fold dim of a cast into the dim of the source of the tensor cast.
332 struct DimOfCastOp : public OpRewritePattern<DimOp> {
333   using OpRewritePattern<DimOp>::OpRewritePattern;
334 
335   LogicalResult matchAndRewrite(DimOp dimOp,
336                                 PatternRewriter &rewriter) const override {
337     auto castOp = dimOp.source().getDefiningOp<CastOp>();
338     if (!castOp)
339       return failure();
340     Value newSource = castOp.getOperand();
341     rewriter.replaceOpWithNewOp<DimOp>(dimOp, newSource, dimOp.index());
342     return success();
343   }
344 };
345 } // namespace
346 
347 void DimOp::getCanonicalizationPatterns(RewritePatternSet &results,
348                                         MLIRContext *context) {
349   results.add<DimOfCastOp>(context);
350 }
351 
352 //===----------------------------------------------------------------------===//
353 // ExtractOp
354 //===----------------------------------------------------------------------===//
355 
356 LogicalResult ExtractOp::verify() {
357   // Verify the # indices match if we have a ranked type.
358   if (auto tensorType = tensor().getType().dyn_cast<RankedTensorType>())
359     if (tensorType.getRank() != static_cast<int64_t>(indices().size()))
360       return emitOpError("incorrect number of indices for extract_element");
361 
362   return success();
363 }
364 
365 OpFoldResult ExtractOp::fold(ArrayRef<Attribute> operands) {
366   // If this is a splat elements attribute, simply return the value. All of the
367   // elements of a splat attribute are the same.
368   if (Attribute tensor = operands.front())
369     if (auto splatTensor = tensor.dyn_cast<SplatElementsAttr>())
370       return splatTensor.getSplatValue<Attribute>();
371 
372   // Collect the constant indices into the tensor.
373   SmallVector<uint64_t, 8> indices;
374   for (Attribute indice : llvm::drop_begin(operands, 1)) {
375     if (!indice || !indice.isa<IntegerAttr>())
376       return {};
377     indices.push_back(indice.cast<IntegerAttr>().getInt());
378   }
379 
380   // Fold extract(from_elements(...)).
381   if (auto fromElementsOp = tensor().getDefiningOp<FromElementsOp>()) {
382     auto tensorType = fromElementsOp.getType().cast<RankedTensorType>();
383     auto rank = tensorType.getRank();
384     assert(static_cast<int64_t>(indices.size()) == tensorType.getRank() &&
385            "rank mismatch");
386     int flatIndex = 0;
387     int stride = 1;
388     for (int i = rank - 1; i >= 0; --i) {
389       if (i < rank - 1)
390         stride *= tensorType.getDimSize(i);
391       flatIndex += indices[i] * stride;
392     }
393     // Prevent out of bounds accesses. This can happen in invalid code that will
394     // never execute.
395     if (static_cast<int>(fromElementsOp.elements().size()) <= flatIndex ||
396         flatIndex < 0)
397       return {};
398     return fromElementsOp.elements()[flatIndex];
399   }
400 
401   // If this is an elements attribute, query the value at the given indices.
402   if (Attribute tensor = operands.front()) {
403     auto elementsAttr = tensor.dyn_cast<ElementsAttr>();
404     if (elementsAttr && elementsAttr.isValidIndex(indices))
405       return elementsAttr.getValues<Attribute>()[indices];
406   }
407 
408   return {};
409 }
410 
411 //===----------------------------------------------------------------------===//
412 // FromElementsOp
413 //===----------------------------------------------------------------------===//
414 
415 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
416                            Type resultType, ValueRange elements) {
417   result.addOperands(elements);
418   result.addTypes(resultType);
419 }
420 
421 void FromElementsOp::build(OpBuilder &builder, OperationState &result,
422                            ValueRange elements) {
423   assert(!elements.empty() && "expected at least one element");
424   Type resultType = RankedTensorType::get(
425       {static_cast<int64_t>(elements.size())}, elements.front().getType());
426   build(builder, result, resultType, elements);
427 }
428 
429 OpFoldResult FromElementsOp::fold(ArrayRef<Attribute> operands) {
430   if (!llvm::is_contained(operands, nullptr))
431     return DenseElementsAttr::get(getType(), operands);
432   return {};
433 }
434 
435 namespace {
436 
437 // Pushes the index_casts that occur before extractions to after the extract.
438 // This minimizes type conversion in some cases and enables the extract
439 // canonicalizer. This changes:
440 //
441 // %cast = arith.index_cast %tensor : tensor<1xi32> to tensor<1xindex>
442 // %extract = tensor.extract %cast[%index] : tensor<1xindex>
443 //
444 // to the following:
445 //
446 // %extract = tensor.extract %tensor[%index] : tensor<1xindex>
447 // %cast = arith.index_cast %extract : i32 to index
448 //
449 // to just %element.
450 //
451 // Consider expanding this to a template and handle all tensor cast operations.
452 struct ExtractElementFromIndexCast
453     : public OpRewritePattern<tensor::ExtractOp> {
454   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
455 
456   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
457                                 PatternRewriter &rewriter) const final {
458     Location loc = extract.getLoc();
459     auto indexCast = extract.tensor().getDefiningOp<arith::IndexCastOp>();
460     if (!indexCast)
461       return failure();
462 
463     Type elementTy = getElementTypeOrSelf(indexCast.getIn());
464 
465     auto newExtract = rewriter.create<tensor::ExtractOp>(
466         loc, elementTy, indexCast.getIn(), extract.indices());
467 
468     rewriter.replaceOpWithNewOp<arith::IndexCastOp>(extract, extract.getType(),
469                                                     newExtract);
470 
471     return success();
472   }
473 };
474 
475 } // namespace
476 
477 void FromElementsOp::getCanonicalizationPatterns(RewritePatternSet &results,
478                                                  MLIRContext *context) {
479   results.add<ExtractElementFromIndexCast>(context);
480 }
481 
482 //===----------------------------------------------------------------------===//
483 // InsertOp
484 //===----------------------------------------------------------------------===//
485 
486 LogicalResult InsertOp::verify() {
487   // Verify the # indices match if we have a ranked type.
488   if (auto destType = dest().getType().dyn_cast<RankedTensorType>())
489     if (destType.getRank() != static_cast<int64_t>(indices().size()))
490       return emitOpError("incorrect number of indices");
491   return success();
492 }
493 
494 OpFoldResult InsertOp::fold(ArrayRef<Attribute> operands) {
495   Attribute scalar = operands[0];
496   Attribute dest = operands[1];
497   if (scalar && dest)
498     if (auto splatDest = dest.dyn_cast<SplatElementsAttr>())
499       if (scalar == splatDest.getSplatValue<Attribute>())
500         return dest;
501   return {};
502 }
503 
504 //===----------------------------------------------------------------------===//
505 // GenerateOp
506 //===----------------------------------------------------------------------===//
507 
508 LogicalResult GenerateOp::reifyResultShapes(
509     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
510   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
511   int idx = 0;
512   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
513     if (getType().isDynamicDim(dim)) {
514       reifiedReturnShapes[0][dim] = getOperand(idx++);
515     } else {
516       reifiedReturnShapes[0][dim] = builder.create<arith::ConstantIndexOp>(
517           getLoc(), getType().getDimSize(dim));
518     }
519   }
520   return success();
521 }
522 
523 LogicalResult GenerateOp::verify() {
524   // Ensure that the tensor type has as many dynamic dimensions as are specified
525   // by the operands.
526   RankedTensorType resultTy = getType().cast<RankedTensorType>();
527   if (getNumOperands() != resultTy.getNumDynamicDims())
528     return emitError("must have as many index operands as dynamic extents "
529                      "in the result type");
530 
531   return success();
532 }
533 
534 LogicalResult GenerateOp::verifyRegions() {
535   RankedTensorType resultTy = getType().cast<RankedTensorType>();
536   // Ensure that region arguments span the index space.
537   if (!llvm::all_of(body().getArgumentTypes(),
538                     [](Type ty) { return ty.isIndex(); }))
539     return emitError("all body arguments must be index");
540   if (body().getNumArguments() != resultTy.getRank())
541     return emitError("must have one body argument per input dimension");
542 
543   // Ensure that the region yields an element of the right type.
544   auto yieldOp = cast<YieldOp>(body().getBlocks().front().getTerminator());
545 
546   if (yieldOp.value().getType() != resultTy.getElementType())
547     return emitOpError(
548         "body must be terminated with a `yield` operation of the tensor "
549         "element type");
550 
551   return success();
552 }
553 
554 void GenerateOp::build(
555     OpBuilder &b, OperationState &result, Type resultTy,
556     ValueRange dynamicExtents,
557     function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuilder) {
558   build(b, result, resultTy, dynamicExtents);
559 
560   // Build and populate body.
561   OpBuilder::InsertionGuard guard(b);
562   Region *bodyRegion = result.regions.front().get();
563   auto rank = resultTy.cast<RankedTensorType>().getRank();
564   SmallVector<Type, 2> argumentTypes(rank, b.getIndexType());
565   SmallVector<Location, 2> argumentLocs(rank, result.location);
566   Block *bodyBlock =
567       b.createBlock(bodyRegion, bodyRegion->end(), argumentTypes, argumentLocs);
568   bodyBuilder(b, result.location, bodyBlock->getArguments());
569 }
570 
571 namespace {
572 
573 /// Canonicalizes tensor.generate operations with a constant
574 /// operand into the equivalent operation with the operand expressed in the
575 /// result type, instead. We also insert a type cast to make sure that the
576 /// resulting IR is still well-typed.
577 struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
578   using OpRewritePattern<GenerateOp>::OpRewritePattern;
579 
580   LogicalResult matchAndRewrite(GenerateOp tensorFromElements,
581                                 PatternRewriter &rewriter) const final {
582     auto resultType =
583         tensorFromElements.getResult().getType().cast<RankedTensorType>();
584 
585     if (resultType.hasStaticShape())
586       return failure();
587 
588     SmallVector<Value, 4> newOperands;
589     SmallVector<int64_t, 4> newShape;
590     auto operandsIt = tensorFromElements.dynamicExtents().begin();
591 
592     for (int64_t dim : resultType.getShape()) {
593       if (!ShapedType::isDynamic(dim)) {
594         newShape.push_back(dim);
595         continue;
596       }
597       APInt index;
598       if (!matchPattern(*operandsIt, m_ConstantInt(&index))) {
599         newShape.push_back(ShapedType::kDynamicSize);
600         newOperands.push_back(*operandsIt++);
601         continue;
602       }
603       newShape.push_back(index.getSExtValue());
604       operandsIt++;
605     }
606 
607     if (newOperands.size() == tensorFromElements.dynamicExtents().size())
608       return failure();
609 
610     auto loc = tensorFromElements.getLoc();
611     auto newOp = rewriter.create<GenerateOp>(
612         loc, RankedTensorType::get(newShape, resultType.getElementType()),
613         newOperands);
614     rewriter.inlineRegionBefore(tensorFromElements.body(), newOp.body(),
615                                 newOp.body().begin());
616     rewriter.replaceOpWithNewOp<tensor::CastOp>(tensorFromElements, resultType,
617                                                 newOp);
618     return success();
619   }
620 };
621 
622 /// Canonicalizes the pattern of the form
623 ///
624 /// %tensor = tensor.generate %x {
625 ///   ^bb0(%arg0: index):
626 ///   <computation>
627 ///   yield %1 : index
628 /// } : tensor<?xindex>
629 /// %extracted_element = tensor.extract %tensor[%c0] : tensor<?xi32>
630 ///
631 /// to just <computation> with %arg0 replaced by %c0. We only do this if the
632 /// tensor.generate operation has no side-effects.
633 struct ExtractFromTensorGenerate : public OpRewritePattern<tensor::ExtractOp> {
634   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
635 
636   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
637                                 PatternRewriter &rewriter) const final {
638     auto tensorFromElements = extract.tensor().getDefiningOp<GenerateOp>();
639     if (!tensorFromElements || !wouldOpBeTriviallyDead(tensorFromElements))
640       return failure();
641 
642     BlockAndValueMapping mapping;
643     Block *body = tensorFromElements.getBody();
644     mapping.map(body->getArguments(), extract.indices());
645     for (auto &op : body->without_terminator())
646       rewriter.clone(op, mapping);
647 
648     auto yield = cast<YieldOp>(body->getTerminator());
649 
650     rewriter.replaceOp(extract, mapping.lookupOrDefault(yield.value()));
651     return success();
652   }
653 };
654 
655 /// Canonicalizes the pattern of the form
656 ///
657 /// %val = tensor.cast %source : : tensor<?xi32> to tensor<2xi32>
658 /// %extracted_element = tensor.extract %val[%c0] : tensor<2xi32>
659 ///
660 /// to
661 ///
662 /// %extracted_element = tensor.extract %source[%c0] : tensor<?xi32>
663 struct ExtractFromTensorCast : public OpRewritePattern<tensor::ExtractOp> {
664   using OpRewritePattern<tensor::ExtractOp>::OpRewritePattern;
665 
666   LogicalResult matchAndRewrite(tensor::ExtractOp extract,
667                                 PatternRewriter &rewriter) const final {
668     auto tensorCast = extract.tensor().getDefiningOp<tensor::CastOp>();
669     if (!tensorCast)
670       return failure();
671 
672     rewriter.replaceOpWithNewOp<tensor::ExtractOp>(extract, tensorCast.source(),
673                                                    extract.indices());
674     return success();
675   }
676 };
677 
678 } // namespace
679 
680 void GenerateOp::getCanonicalizationPatterns(RewritePatternSet &results,
681                                              MLIRContext *context) {
682   // TODO: Move extract patterns to tensor::ExtractOp.
683   results.add<ExtractFromTensorGenerate, ExtractFromTensorCast,
684               StaticTensorGenerate>(context);
685 }
686 
687 //===----------------------------------------------------------------------===//
688 // RankOp
689 //===----------------------------------------------------------------------===//
690 
691 OpFoldResult RankOp::fold(ArrayRef<Attribute> operands) {
692   // Constant fold rank when the rank of the operand is known.
693   auto type = getOperand().getType();
694   auto shapedType = type.dyn_cast<ShapedType>();
695   if (shapedType && shapedType.hasRank())
696     return IntegerAttr::get(IndexType::get(getContext()), shapedType.getRank());
697   return IntegerAttr();
698 }
699 
700 //===----------------------------------------------------------------------===//
701 // ReshapeOp
702 //===----------------------------------------------------------------------===//
703 
704 static int64_t getNumElements(ShapedType type) {
705   int64_t numElements = 1;
706   for (auto dim : type.getShape())
707     numElements *= dim;
708   return numElements;
709 }
710 
711 LogicalResult ReshapeOp::verify() {
712   TensorType operandType = source().getType().cast<TensorType>();
713   TensorType resultType = result().getType().cast<TensorType>();
714 
715   if (operandType.getElementType() != resultType.getElementType())
716     return emitOpError("element types of source and destination tensor "
717                        "types should be the same");
718 
719   int64_t shapeSize = shape().getType().cast<RankedTensorType>().getDimSize(0);
720   auto resultRankedType = resultType.dyn_cast<RankedTensorType>();
721   auto operandRankedType = operandType.dyn_cast<RankedTensorType>();
722 
723   if (resultRankedType) {
724     if (operandRankedType && resultRankedType.hasStaticShape() &&
725         operandRankedType.hasStaticShape()) {
726       if (getNumElements(operandRankedType) != getNumElements(resultRankedType))
727         return emitOpError("source and destination tensor should have the "
728                            "same number of elements");
729     }
730     if (ShapedType::isDynamic(shapeSize))
731       return emitOpError("cannot use shape operand with dynamic length to "
732                          "reshape to statically-ranked tensor type");
733     if (shapeSize != resultRankedType.getRank())
734       return emitOpError(
735           "length of shape operand differs from the result's tensor rank");
736   }
737   return success();
738 }
739 
740 //===----------------------------------------------------------------------===//
741 // Reassociative reshape ops
742 //===----------------------------------------------------------------------===//
743 
744 SmallVector<AffineMap, 4> CollapseShapeOp::getReassociationMaps() {
745   return getSymbolLessAffineMaps(getReassociationExprs());
746 }
747 SmallVector<ReassociationExprs, 4> CollapseShapeOp::getReassociationExprs() {
748   return convertReassociationIndicesToExprs(getContext(),
749                                             getReassociationIndices());
750 }
751 
752 SmallVector<AffineMap, 4> ExpandShapeOp::getReassociationMaps() {
753   return getSymbolLessAffineMaps(getReassociationExprs());
754 }
755 SmallVector<ReassociationExprs, 4> ExpandShapeOp::getReassociationExprs() {
756   return convertReassociationIndicesToExprs(getContext(),
757                                             getReassociationIndices());
758 }
759 
760 /// Compute the RankedTensorType obtained by applying `reassociation` to `type`.
761 static RankedTensorType
762 computeTensorReshapeCollapsedType(RankedTensorType type,
763                                   ArrayRef<AffineMap> reassociation) {
764   auto shape = type.getShape();
765   SmallVector<int64_t, 4> newShape;
766   newShape.reserve(reassociation.size());
767 
768   // Use the fact that reassociation is valid to simplify the logic: only use
769   // each map's rank.
770   assert(isReassociationValid(reassociation) && "invalid reassociation");
771   unsigned currentDim = 0;
772   for (AffineMap m : reassociation) {
773     unsigned dim = m.getNumResults();
774     auto band = shape.slice(currentDim, dim);
775     int64_t size = 1;
776     if (llvm::is_contained(band, ShapedType::kDynamicSize))
777       size = ShapedType::kDynamicSize;
778     else
779       for (unsigned d = 0; d < dim; ++d)
780         size *= shape[currentDim + d];
781     newShape.push_back(size);
782     currentDim += dim;
783   }
784 
785   return RankedTensorType::get(newShape, type.getElementType());
786 }
787 
788 void CollapseShapeOp::build(OpBuilder &b, OperationState &result, Value src,
789                             ArrayRef<ReassociationIndices> reassociation,
790                             ArrayRef<NamedAttribute> attrs) {
791   auto resultType = computeTensorReshapeCollapsedType(
792       src.getType().cast<RankedTensorType>(),
793       getSymbolLessAffineMaps(
794           convertReassociationIndicesToExprs(b.getContext(), reassociation)));
795   build(b, result, resultType, src, attrs);
796   result.addAttribute(getReassociationAttrName(),
797                       getReassociationIndicesAttribute(b, reassociation));
798 }
799 
800 template <typename TensorReshapeOp, bool isExpansion = std::is_same<
801                                         TensorReshapeOp, ExpandShapeOp>::value>
802 static LogicalResult verifyTensorReshapeOp(TensorReshapeOp op,
803                                            RankedTensorType expandedType,
804                                            RankedTensorType collapsedType) {
805   if (failed(
806           verifyReshapeLikeTypes(op, expandedType, collapsedType, isExpansion)))
807     return failure();
808 
809   auto maps = op.getReassociationMaps();
810   RankedTensorType expectedType =
811       computeTensorReshapeCollapsedType(expandedType, maps);
812   if (collapsedType != expectedType)
813     return op.emitOpError("expected collapsed type to be ")
814            << expectedType << ", but got " << collapsedType;
815   return success();
816 }
817 
818 LogicalResult ExpandShapeOp::verify() {
819   return verifyTensorReshapeOp(*this, getResultType(), getSrcType());
820 }
821 
822 LogicalResult CollapseShapeOp::verify() {
823   return verifyTensorReshapeOp(*this, getSrcType(), getResultType());
824 }
825 
826 namespace {
827 /// Reshape of a splat constant can be replaced with a constant of the result
828 /// type.
829 template <typename TensorReshapeOp>
830 struct FoldReshapeWithConstant : OpRewritePattern<TensorReshapeOp> {
831   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
832   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
833                                 PatternRewriter &rewriter) const override {
834     DenseElementsAttr attr;
835     if (!matchPattern(reshapeOp.src(), m_Constant(&attr)))
836       return failure();
837     if (!attr || !attr.isSplat())
838       return failure();
839     DenseElementsAttr newAttr = DenseElementsAttr::getFromRawBuffer(
840         reshapeOp.getResultType(), attr.getRawData());
841     rewriter.replaceOpWithNewOp<arith::ConstantOp>(reshapeOp, newAttr);
842     return success();
843   }
844 };
845 
846 /// Reshape of a FromElements can be replaced with a FromElements of the result
847 /// type
848 template <typename TensorReshapeOp>
849 struct FoldReshapeWithFromElements : OpRewritePattern<TensorReshapeOp> {
850   using OpRewritePattern<TensorReshapeOp>::OpRewritePattern;
851   LogicalResult matchAndRewrite(TensorReshapeOp reshapeOp,
852                                 PatternRewriter &rewriter) const override {
853     auto fromElements =
854         reshapeOp.src().template getDefiningOp<FromElementsOp>();
855     if (!fromElements)
856       return failure();
857 
858     auto shapedTy = reshapeOp.getType().template cast<ShapedType>();
859 
860     if (!shapedTy.hasStaticShape())
861       return failure();
862 
863     rewriter.replaceOpWithNewOp<FromElementsOp>(reshapeOp, reshapeOp.getType(),
864                                                 fromElements.elements());
865     return success();
866   }
867 };
868 
869 } // namespace
870 
871 void ExpandShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
872                                                 MLIRContext *context) {
873   results.add<ComposeReassociativeReshapeOps<ExpandShapeOp>,
874               ComposeExpandOfCollapseOp<ExpandShapeOp, CollapseShapeOp>,
875               FoldReshapeWithConstant<ExpandShapeOp>,
876               FoldReshapeWithFromElements<ExpandShapeOp>>(context);
877 }
878 
879 void CollapseShapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
880                                                   MLIRContext *context) {
881   results.add<ComposeReassociativeReshapeOps<CollapseShapeOp>,
882               ComposeCollapseOfExpandOp<CollapseShapeOp, ExpandShapeOp>,
883               FoldReshapeWithConstant<CollapseShapeOp>,
884               FoldReshapeWithFromElements<CollapseShapeOp>>(context);
885 }
886 
887 OpFoldResult ExpandShapeOp::fold(ArrayRef<Attribute> operands) {
888   return foldReshapeOp<ExpandShapeOp, CollapseShapeOp>(*this, operands);
889 }
890 OpFoldResult CollapseShapeOp::fold(ArrayRef<Attribute> operands) {
891   return foldReshapeOp<CollapseShapeOp, ExpandShapeOp>(*this, operands);
892 }
893 
894 //===----------------------------------------------------------------------===//
895 // ExtractSliceOp
896 //===----------------------------------------------------------------------===//
897 
898 /// An extract_slice op result type can be fully inferred from the source type
899 /// and the static representation of offsets, sizes and strides. Special
900 /// sentinels encode the dynamic case.
901 RankedTensorType ExtractSliceOp::inferResultType(
902     RankedTensorType sourceRankedTensorType, ArrayRef<int64_t> staticOffsets,
903     ArrayRef<int64_t> staticSizes, ArrayRef<int64_t> staticStrides) {
904   // An extract_slice op may specify only a leading subset of offset/sizes/
905   // strides in which case we complete with offset=0, sizes from memref type and
906   // strides=1.
907   unsigned rank = sourceRankedTensorType.getRank();
908   (void)rank;
909   assert(staticSizes.size() == rank &&
910          "unexpected staticSizes not equal to rank of source");
911   return RankedTensorType::get(staticSizes,
912                                sourceRankedTensorType.getElementType());
913 }
914 
915 RankedTensorType ExtractSliceOp::inferResultType(
916     RankedTensorType sourceRankedTensorType, ArrayRef<OpFoldResult> offsets,
917     ArrayRef<OpFoldResult> sizes, ArrayRef<OpFoldResult> strides) {
918   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
919   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
920   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
921                              ShapedType::kDynamicStrideOrOffset);
922   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
923                              ShapedType::kDynamicSize);
924   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
925                              ShapedType::kDynamicStrideOrOffset);
926   return ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
927                                          staticSizes, staticStrides);
928 }
929 
930 /// An extract_slice op result type can be fully inferred from the source type
931 /// and the static representation of offsets, sizes and strides. Special
932 /// sentinels encode the dynamic case.
933 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
934     unsigned resultRank, RankedTensorType sourceRankedTensorType,
935     ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
936     ArrayRef<int64_t> strides) {
937   auto inferredType =
938       inferResultType(sourceRankedTensorType, offsets, sizes, strides)
939           .cast<RankedTensorType>();
940   int rankDiff = inferredType.getRank() - resultRank;
941   if (rankDiff > 0) {
942     auto shape = inferredType.getShape();
943     llvm::SmallBitVector dimsToProject =
944         getPositionsOfShapeOne(rankDiff, shape);
945     SmallVector<int64_t> projectedShape;
946     for (unsigned pos = 0, e = shape.size(); pos < e; ++pos)
947       if (!dimsToProject.test(pos))
948         projectedShape.push_back(shape[pos]);
949     inferredType =
950         RankedTensorType::get(projectedShape, inferredType.getElementType());
951   }
952   return inferredType;
953 }
954 
955 RankedTensorType ExtractSliceOp::inferRankReducedResultType(
956     unsigned resultRank, RankedTensorType sourceRankedTensorType,
957     ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes,
958     ArrayRef<OpFoldResult> strides) {
959   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
960   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
961   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
962                              ShapedType::kDynamicStrideOrOffset);
963   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
964                              ShapedType::kDynamicSize);
965   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
966                              ShapedType::kDynamicStrideOrOffset);
967   return ExtractSliceOp::inferRankReducedResultType(
968       resultRank, sourceRankedTensorType, staticOffsets, staticSizes,
969       staticStrides);
970 }
971 
972 /// Build an ExtractSliceOp with mixed static and dynamic entries and custom
973 /// result type. If the type passed is nullptr, it is inferred.
974 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
975                            RankedTensorType resultType, Value source,
976                            ArrayRef<OpFoldResult> offsets,
977                            ArrayRef<OpFoldResult> sizes,
978                            ArrayRef<OpFoldResult> strides,
979                            ArrayRef<NamedAttribute> attrs) {
980   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
981   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
982   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
983                              ShapedType::kDynamicStrideOrOffset);
984   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
985                              ShapedType::kDynamicSize);
986   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
987                              ShapedType::kDynamicStrideOrOffset);
988   auto sourceRankedTensorType = source.getType().cast<RankedTensorType>();
989   // Structuring implementation this way avoids duplication between builders.
990   if (!resultType) {
991     resultType =
992         ExtractSliceOp::inferResultType(sourceRankedTensorType, staticOffsets,
993                                         staticSizes, staticStrides)
994             .cast<RankedTensorType>();
995   }
996   build(b, result, resultType, source, dynamicOffsets, dynamicSizes,
997         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
998         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
999   result.addAttributes(attrs);
1000 }
1001 
1002 /// Build an ExtractSliceOp with mixed static and dynamic entries and inferred
1003 /// result type.
1004 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1005                            ArrayRef<OpFoldResult> offsets,
1006                            ArrayRef<OpFoldResult> sizes,
1007                            ArrayRef<OpFoldResult> strides,
1008                            ArrayRef<NamedAttribute> attrs) {
1009   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1010 }
1011 
1012 /// Build an ExtractSliceOp with dynamic entries and custom result type. If the
1013 /// type passed is nullptr, it is inferred.
1014 void ExtractSliceOp::build(OpBuilder &b, OperationState &result,
1015                            RankedTensorType resultType, Value source,
1016                            ValueRange offsets, ValueRange sizes,
1017                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1018   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1019       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1020   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1021       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1022   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1023       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1024   build(b, result, resultType, source, offsetValues, sizeValues, strideValues);
1025 }
1026 
1027 /// Build an ExtractSliceOp with dynamic entries and inferred result type.
1028 void ExtractSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1029                            ValueRange offsets, ValueRange sizes,
1030                            ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1031   build(b, result, RankedTensorType(), source, offsets, sizes, strides, attrs);
1032 }
1033 
1034 template <typename OpTy>
1035 static LogicalResult produceSliceErrorMsg(SliceVerificationResult result,
1036                                           OpTy op, Type expectedType) {
1037   auto memrefType = expectedType.cast<ShapedType>();
1038   switch (result) {
1039   case SliceVerificationResult::Success:
1040     return success();
1041   case SliceVerificationResult::RankTooLarge:
1042     return op.emitError("expected rank to be smaller or equal to ")
1043            << "the other rank. ";
1044   case SliceVerificationResult::SizeMismatch:
1045     return op.emitError("expected type to be ")
1046            << expectedType << " or a rank-reduced version. (size mismatch) ";
1047   case SliceVerificationResult::ElemTypeMismatch:
1048     return op.emitError("expected element type to be ")
1049            << memrefType.getElementType();
1050   default:
1051     llvm_unreachable("unexpected extract_slice op verification result");
1052   }
1053 }
1054 
1055 /// Verifier for ExtractSliceOp.
1056 LogicalResult ExtractSliceOp::verify() {
1057   // Verify result type against inferred type.
1058   auto expectedType = ExtractSliceOp::inferResultType(
1059       getSourceType(), getMixedOffsets(), getMixedSizes(), getMixedStrides());
1060   auto result = isRankReducedType(expectedType.cast<ShapedType>(), getType());
1061   return produceSliceErrorMsg(result, *this, expectedType);
1062 }
1063 
1064 /// Infer the canonical type of the result of an extract_slice op. Returns a
1065 /// type with rank `resultRank` that is either the rank of the rank-reduced
1066 /// type, or the non-rank-reduced type.
1067 static RankedTensorType
1068 getCanonicalSliceResultType(unsigned resultRank, RankedTensorType sourceType,
1069                             ArrayRef<OpFoldResult> mixedOffsets,
1070                             ArrayRef<OpFoldResult> mixedSizes,
1071                             ArrayRef<OpFoldResult> mixedStrides) {
1072   auto resultType =
1073       ExtractSliceOp::inferRankReducedResultType(
1074           resultRank, sourceType, mixedOffsets, mixedSizes, mixedStrides)
1075           .cast<RankedTensorType>();
1076   if (resultType.getRank() != resultRank) {
1077     resultType = ExtractSliceOp::inferResultType(sourceType, mixedOffsets,
1078                                                  mixedSizes, mixedStrides)
1079                      .cast<RankedTensorType>();
1080   }
1081   return resultType;
1082 }
1083 
1084 llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
1085   ArrayRef<int64_t> resultShape = getType().getShape();
1086   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1087   llvm::SmallBitVector droppedDims(mixedSizes.size());
1088   unsigned shapePos = 0;
1089   for (const auto &size : enumerate(mixedSizes)) {
1090     Optional<int64_t> sizeVal = getConstantIntValue(size.value());
1091     // If the size is not 1, or if the current matched dimension of the result
1092     // is the same static shape as the size value (which is 1), then the
1093     // dimension is preserved.
1094     if (!sizeVal || sizeVal.getValue() != 1 ||
1095         (shapePos < resultShape.size() && resultShape[shapePos] == 1)) {
1096       shapePos++;
1097       continue;
1098     }
1099     droppedDims.set(size.index());
1100   }
1101   return droppedDims;
1102 }
1103 
1104 LogicalResult ExtractSliceOp::reifyResultShapes(
1105     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1106   reifiedReturnShapes.resize(1);
1107   reifiedReturnShapes[0].reserve(getType().getRank());
1108   SmallVector<OpFoldResult> mixedSizes = getMixedSizes();
1109   llvm::SmallBitVector droppedDims = getDroppedDims();
1110   Location loc = getLoc();
1111   for (const auto &size : enumerate(mixedSizes)) {
1112     if (droppedDims.test(size.index()))
1113       continue;
1114     if (auto attr = size.value().dyn_cast<Attribute>()) {
1115       reifiedReturnShapes[0].push_back(builder.create<arith::ConstantIndexOp>(
1116           loc, attr.cast<IntegerAttr>().getInt()));
1117       continue;
1118     }
1119     reifiedReturnShapes[0].push_back(size.value().get<Value>());
1120   }
1121   return success();
1122 }
1123 
1124 namespace {
1125 /// Pattern to rewrite an extract_slice op with tensor::Cast arguments.
1126 /// This essentially pushes memref_cast past its consuming slice when
1127 /// `canFoldIntoConsumerOp` is true.
1128 ///
1129 /// Example:
1130 /// ```
1131 ///   %0 = tensor.cast %V : tensor<16x16xf32> to tensor<?x?xf32>
1132 ///   %1 = tensor.extract_slice %0[0, 0][3, 4][1, 1] : tensor<?x?xf32> to
1133 ///   tensor<3x4xf32>
1134 /// ```
1135 /// is rewritten into:
1136 /// ```
1137 ///   %0 = tensor.extract_slice %V[0, 0][3, 4][1, 1] : tensor<16x16xf32> to
1138 ///   tensor<3x4xf32> %1 = tensor.cast %0: tensor<3x4xf32> to tensor<3x4xf32>
1139 /// ```
1140 class ExtractSliceOpCastFolder final : public OpRewritePattern<ExtractSliceOp> {
1141 public:
1142   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1143 
1144   LogicalResult matchAndRewrite(ExtractSliceOp sliceOp,
1145                                 PatternRewriter &rewriter) const override {
1146     // Any constant operand, just return to let SubViewOpConstantFolder kick in.
1147     if (llvm::any_of(sliceOp.getOperands(), [](Value operand) {
1148           return matchPattern(operand, matchConstantIndex());
1149         }))
1150       return failure();
1151 
1152     auto castOp = sliceOp.source().getDefiningOp<tensor::CastOp>();
1153     if (!castOp)
1154       return failure();
1155 
1156     if (!canFoldIntoConsumerOp(castOp))
1157       return failure();
1158 
1159     /// Deduce the type of the result to use for the canonicalized operation.
1160     RankedTensorType resultType = getCanonicalSliceResultType(
1161         sliceOp.getType().getRank(), sliceOp.getSourceType(),
1162         sliceOp.getMixedOffsets(), sliceOp.getMixedSizes(),
1163         sliceOp.getMixedStrides());
1164     Value newSlice = rewriter.create<ExtractSliceOp>(
1165         sliceOp.getLoc(), resultType, castOp.source(), sliceOp.offsets(),
1166         sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
1167         sliceOp.static_sizes(), sliceOp.static_strides());
1168     rewriter.replaceOpWithNewOp<tensor::CastOp>(sliceOp, sliceOp.getType(),
1169                                                 newSlice);
1170     return success();
1171   }
1172 };
1173 
1174 /// Slice elements from `values` into `outValues`. `counts` represents the
1175 /// numbers of elements to stride in the original values for each dimension.
1176 /// The output values can be used to construct a DenseElementsAttr.
1177 template <typename IterTy, typename ElemTy>
1178 static void sliceElements(IterTy values, ArrayRef<int64_t> counts,
1179                           ArrayRef<int64_t> offsets, ArrayRef<int64_t> sizes,
1180                           ArrayRef<int64_t> strides,
1181                           llvm::SmallVectorImpl<ElemTy> *outValues) {
1182   assert(offsets.size() == sizes.size());
1183   assert(offsets.size() == strides.size());
1184   if (offsets.empty())
1185     return;
1186 
1187   int64_t offset = offsets.front();
1188   int64_t size = sizes.front();
1189   int64_t stride = strides.front();
1190   if (offsets.size() == 1) {
1191     for (int64_t i = 0; i < size; ++i, offset += stride)
1192       outValues->push_back(*(values + offset));
1193 
1194     return;
1195   }
1196 
1197   for (int64_t i = 0; i < size; ++i, offset += stride) {
1198     auto begin = values + offset * counts.front();
1199     sliceElements<IterTy, ElemTy>(begin, counts.drop_front(),
1200                                   offsets.drop_front(), sizes.drop_front(),
1201                                   strides.drop_front(), outValues);
1202   }
1203 }
1204 
1205 /// Fold arith.constant and tensor.extract_slice into arith.constant. The folded
1206 /// operation might introduce more constant data; Users can control their
1207 /// heuristics by the control function.
1208 class ConstantOpExtractSliceFolder final
1209     : public OpRewritePattern<ExtractSliceOp> {
1210 public:
1211   using OpRewritePattern<ExtractSliceOp>::OpRewritePattern;
1212 
1213   ConstantOpExtractSliceFolder(MLIRContext *context,
1214                                ControlConstantExtractSliceFusionFn controlFn)
1215       : OpRewritePattern<ExtractSliceOp>(context),
1216         controlFn(std::move(controlFn)) {}
1217 
1218   LogicalResult matchAndRewrite(ExtractSliceOp op,
1219                                 PatternRewriter &rewriter) const override {
1220     DenseElementsAttr attr;
1221     if (!matchPattern(op.source(), m_Constant(&attr)))
1222       return failure();
1223 
1224     // A constant splat is handled by fold().
1225     if (attr.isSplat())
1226       return failure();
1227 
1228     // Dynamic result shape is not supported.
1229     auto sourceType = op.source().getType().cast<ShapedType>();
1230     auto resultType = op.result().getType().cast<ShapedType>();
1231     if (!sourceType.hasStaticShape() || !resultType.hasStaticShape())
1232       return failure();
1233 
1234     // Customized control over the folding.
1235     if (!controlFn(op))
1236       return failure();
1237 
1238     int64_t count = sourceType.getNumElements();
1239     if (count == 0)
1240       return failure();
1241 
1242     // Check if there are any dynamic parts, which are not supported.
1243     auto offsets = extractFromI64ArrayAttr(op.static_offsets());
1244     if (llvm::is_contained(offsets, ShapedType::kDynamicStrideOrOffset))
1245       return failure();
1246     auto sizes = extractFromI64ArrayAttr(op.static_sizes());
1247     if (llvm::is_contained(sizes, ShapedType::kDynamicSize))
1248       return failure();
1249     auto strides = extractFromI64ArrayAttr(op.static_strides());
1250     if (llvm::is_contained(strides, ShapedType::kDynamicStrideOrOffset))
1251       return failure();
1252 
1253     // Compute the stride for each dimension.
1254     SmallVector<int64_t> counts;
1255     ArrayRef<int64_t> shape = sourceType.getShape();
1256     counts.reserve(shape.size());
1257     for (int64_t v : shape) {
1258       count = count / v;
1259       counts.push_back(count);
1260     }
1261 
1262     // New attribute constructed by the sliced values.
1263     DenseElementsAttr newAttr;
1264 
1265     if (auto elems = attr.dyn_cast<DenseIntElementsAttr>()) {
1266       SmallVector<APInt> outValues;
1267       outValues.reserve(sourceType.getNumElements());
1268       sliceElements<DenseElementsAttr::IntElementIterator, APInt>(
1269           elems.begin(), counts, offsets, sizes, strides, &outValues);
1270       newAttr = DenseElementsAttr::get(resultType, outValues);
1271     } else if (auto elems = attr.dyn_cast<DenseFPElementsAttr>()) {
1272       SmallVector<APFloat> outValues;
1273       outValues.reserve(sourceType.getNumElements());
1274       sliceElements<DenseElementsAttr::FloatElementIterator, APFloat>(
1275           elems.begin(), counts, offsets, sizes, strides, &outValues);
1276       newAttr = DenseElementsAttr::get(resultType, outValues);
1277     }
1278 
1279     if (newAttr) {
1280       rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, resultType, newAttr);
1281       return success();
1282     }
1283 
1284     return failure();
1285   }
1286 
1287 private:
1288   /// This additionally controls whether the fold happens or not. Users can
1289   /// impose their heuristics in the function.
1290   ControlConstantExtractSliceFusionFn controlFn;
1291 };
1292 
1293 } // namespace
1294 
1295 void mlir::tensor::populateFoldConstantExtractSlicePatterns(
1296     RewritePatternSet &patterns,
1297     const ControlConstantExtractSliceFusionFn &controlFn) {
1298   patterns.add<ConstantOpExtractSliceFolder>(patterns.getContext(), controlFn);
1299 }
1300 
1301 /// Return the canonical type of the result of an extract_slice op.
1302 struct SliceReturnTypeCanonicalizer {
1303   RankedTensorType operator()(ExtractSliceOp op,
1304                               ArrayRef<OpFoldResult> mixedOffsets,
1305                               ArrayRef<OpFoldResult> mixedSizes,
1306                               ArrayRef<OpFoldResult> mixedStrides) {
1307     return getCanonicalSliceResultType(op.getType().getRank(),
1308                                        op.getSourceType(), mixedOffsets,
1309                                        mixedSizes, mixedStrides);
1310   }
1311 };
1312 
1313 /// A canonicalizer wrapper to replace ExtractSliceOps.
1314 struct SliceCanonicalizer {
1315   void operator()(PatternRewriter &rewriter, ExtractSliceOp op,
1316                   ExtractSliceOp newOp) {
1317     Value replacement = newOp.getResult();
1318     if (replacement.getType() != op.getType())
1319       replacement = rewriter.create<tensor::CastOp>(op.getLoc(), op.getType(),
1320                                                     replacement);
1321     rewriter.replaceOp(op, replacement);
1322   }
1323 };
1324 
1325 void ExtractSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1326                                                  MLIRContext *context) {
1327   results.add<
1328       OpWithOffsetSizesAndStridesConstantArgumentFolder<
1329           ExtractSliceOp, SliceReturnTypeCanonicalizer, SliceCanonicalizer>,
1330       ExtractSliceOpCastFolder>(context);
1331 }
1332 
1333 //
1334 static LogicalResult
1335 foldIdentityOffsetSizeAndStrideOpInterface(OffsetSizeAndStrideOpInterface op,
1336                                            ShapedType shapedType) {
1337   OpBuilder b(op.getContext());
1338   for (OpFoldResult ofr : op.getMixedOffsets())
1339     if (getConstantIntValue(ofr) != static_cast<int64_t>(0))
1340       return failure();
1341   // Rank-reducing noops only need to inspect the leading dimensions: llvm::zip
1342   // is appropriate.
1343   auto shape = shapedType.getShape();
1344   for (auto it : llvm::zip(op.getMixedSizes(), shape))
1345     if (getConstantIntValue(std::get<0>(it)) != std::get<1>(it))
1346       return failure();
1347   for (OpFoldResult ofr : op.getMixedStrides())
1348     if (getConstantIntValue(ofr) != static_cast<int64_t>(1))
1349       return failure();
1350   return success();
1351 }
1352 
1353 /// If we have an ExtractSliceOp consuming an InsertSliceOp with the same slice,
1354 /// we can return the InsertSliceOp's source directly.
1355 // TODO: This only checks the immediate producer; extend to go up the
1356 // insert/extract chain if the slices are disjoint.
1357 static Value foldExtractAfterInsertSlice(ExtractSliceOp extractOp) {
1358   auto insertOp = extractOp.source().getDefiningOp<InsertSliceOp>();
1359 
1360   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1361   if (insertOp && insertOp.source().getType() == extractOp.getType() &&
1362       insertOp.isSameAs(extractOp, isSame))
1363     return insertOp.source();
1364 
1365   return {};
1366 }
1367 
1368 OpFoldResult ExtractSliceOp::fold(ArrayRef<Attribute> operands) {
1369   if (auto splat = operands[0].dyn_cast_or_null<SplatElementsAttr>()) {
1370     auto resultType = result().getType().cast<ShapedType>();
1371     if (resultType.hasStaticShape())
1372       return splat.resizeSplat(resultType);
1373   }
1374   if (getSourceType() == getType() &&
1375       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1376     return this->source();
1377   if (Value slice = foldExtractAfterInsertSlice(*this))
1378     return slice;
1379 
1380   return OpFoldResult();
1381 }
1382 
1383 Value mlir::tensor::createCanonicalRankReducingExtractSliceOp(
1384     OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType) {
1385   auto rankedTensorType = tensor.getType().cast<RankedTensorType>();
1386   unsigned rank = rankedTensorType.getRank();
1387   auto shape = rankedTensorType.getShape();
1388   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1389   SmallVector<OpFoldResult> sizes;
1390   for (unsigned i = 0, e = rank; i < e; ++i) {
1391     OpFoldResult dim;
1392     if (rankedTensorType.isDynamicDim(i))
1393       dim = b.createOrFold<tensor::DimOp>(
1394           loc, tensor, b.create<arith::ConstantIndexOp>(loc, i));
1395     else
1396       dim = b.getIndexAttr(shape[i]);
1397     sizes.push_back(dim);
1398   }
1399   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1400   return b.createOrFold<tensor::ExtractSliceOp>(loc, targetType, tensor,
1401                                                 offsets, sizes, strides);
1402 }
1403 
1404 //===----------------------------------------------------------------------===//
1405 // InsertSliceOp
1406 //===----------------------------------------------------------------------===//
1407 
1408 // Build a InsertSliceOp with mixed static and dynamic entries.
1409 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1410                           Value dest, ArrayRef<OpFoldResult> offsets,
1411                           ArrayRef<OpFoldResult> sizes,
1412                           ArrayRef<OpFoldResult> strides,
1413                           ArrayRef<NamedAttribute> attrs) {
1414   SmallVector<int64_t> staticOffsets, staticSizes, staticStrides;
1415   SmallVector<Value> dynamicOffsets, dynamicSizes, dynamicStrides;
1416   dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets,
1417                              ShapedType::kDynamicStrideOrOffset);
1418   dispatchIndexOpFoldResults(sizes, dynamicSizes, staticSizes,
1419                              ShapedType::kDynamicSize);
1420   dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides,
1421                              ShapedType::kDynamicStrideOrOffset);
1422   build(b, result, dest.getType(), source, dest, dynamicOffsets, dynamicSizes,
1423         dynamicStrides, b.getI64ArrayAttr(staticOffsets),
1424         b.getI64ArrayAttr(staticSizes), b.getI64ArrayAttr(staticStrides));
1425   result.addAttributes(attrs);
1426 }
1427 
1428 // Build a InsertSliceOp with dynamic entries.
1429 void InsertSliceOp::build(OpBuilder &b, OperationState &result, Value source,
1430                           Value dest, ValueRange offsets, ValueRange sizes,
1431                           ValueRange strides, ArrayRef<NamedAttribute> attrs) {
1432   SmallVector<OpFoldResult> offsetValues = llvm::to_vector<4>(
1433       llvm::map_range(offsets, [](Value v) -> OpFoldResult { return v; }));
1434   SmallVector<OpFoldResult> sizeValues = llvm::to_vector<4>(
1435       llvm::map_range(sizes, [](Value v) -> OpFoldResult { return v; }));
1436   SmallVector<OpFoldResult> strideValues = llvm::to_vector<4>(
1437       llvm::map_range(strides, [](Value v) -> OpFoldResult { return v; }));
1438   build(b, result, source, dest, offsetValues, sizeValues, strideValues);
1439 }
1440 
1441 static SliceVerificationResult
1442 verifyInsertSliceOp(ShapedType srcType, ShapedType dstType,
1443                     ArrayAttr staticOffsets, ArrayAttr staticSizes,
1444                     ArrayAttr staticStrides,
1445                     ShapedType *expectedType = nullptr) {
1446   // insert_slice is the inverse of extract_slice, use the same type inference.
1447   auto expected = ExtractSliceOp::inferRankReducedResultType(
1448                       srcType.getRank(), dstType.cast<RankedTensorType>(),
1449                       extractFromI64ArrayAttr(staticOffsets),
1450                       extractFromI64ArrayAttr(staticSizes),
1451                       extractFromI64ArrayAttr(staticStrides))
1452                       .cast<ShapedType>();
1453   if (expectedType)
1454     *expectedType = expected;
1455   return isRankReducedType(expected, srcType);
1456 }
1457 
1458 /// Verifier for InsertSliceOp.
1459 LogicalResult InsertSliceOp::verify() {
1460   ShapedType expectedType;
1461   auto result =
1462       verifyInsertSliceOp(getSourceType(), getType(), static_offsets(),
1463                           static_sizes(), static_strides(), &expectedType);
1464   return produceSliceErrorMsg(result, *this, expectedType);
1465 }
1466 
1467 /// If we have two consecutive InsertSliceOp writing to the same slice, we
1468 /// can mutate the second InsertSliceOp's destination to the first one's.
1469 ///
1470 /// Example:
1471 ///
1472 /// ```mlir
1473 ///   %0 = tensor.insert_slice %slice0 into %input[0, 0] [64, 64] [1, 1]
1474 ///   %1 = tensor.insert_slice %slice1 into %0[0, 0] [64, 64] [1, 1]
1475 /// ```
1476 ///
1477 /// folds into:
1478 ///
1479 /// ```mlir
1480 ///   %1 = tensor.insert_slice %slice1 into %input[0, 0] [64, 64] [1, 1]
1481 /// ```
1482 static LogicalResult foldInsertAfterInsertSlice(InsertSliceOp insertOp) {
1483   auto prevInsertOp = insertOp.dest().getDefiningOp<InsertSliceOp>();
1484 
1485   auto isSame = [](OpFoldResult a, OpFoldResult b) { return a == b; };
1486   if (!prevInsertOp ||
1487       prevInsertOp.source().getType() != insertOp.source().getType() ||
1488       !prevInsertOp.isSameAs(insertOp, isSame))
1489     return failure();
1490 
1491   insertOp.destMutable().assign(prevInsertOp.dest());
1492   return success();
1493 }
1494 
1495 OpFoldResult InsertSliceOp::fold(ArrayRef<Attribute>) {
1496   if (getSourceType().hasStaticShape() && getType().hasStaticShape() &&
1497       getSourceType() == getType() &&
1498       succeeded(foldIdentityOffsetSizeAndStrideOpInterface(*this, getType())))
1499     return this->source();
1500   if (succeeded(foldInsertAfterInsertSlice(*this)))
1501     return getResult();
1502   return OpFoldResult();
1503 }
1504 
1505 LogicalResult InsertSliceOp::reifyResultShapes(
1506     OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
1507   reifiedReturnShapes.resize(1, SmallVector<Value>(getType().getRank()));
1508   for (auto dim : llvm::seq<int64_t>(0, getType().getRank())) {
1509     reifiedReturnShapes[0][dim] =
1510         builder.createOrFold<tensor::DimOp>(getLoc(), dest(), dim);
1511   }
1512   return success();
1513 }
1514 
1515 namespace {
1516 /// Pattern to rewrite a insert_slice op with constant arguments.
1517 class InsertSliceOpConstantArgumentFolder final
1518     : public OpRewritePattern<InsertSliceOp> {
1519 public:
1520   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1521 
1522   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1523                                 PatternRewriter &rewriter) const override {
1524     // No constant operand, just return.
1525     if (llvm::none_of(insertSliceOp.getOperands(), [](Value operand) {
1526           return matchPattern(operand, matchConstantIndex());
1527         }))
1528       return failure();
1529 
1530     // At least one of offsets/sizes/strides is a new constant.
1531     // Form the new list of operands and constant attributes from the
1532     // existing.
1533     SmallVector<OpFoldResult> mixedOffsets(insertSliceOp.getMixedOffsets());
1534     SmallVector<OpFoldResult> mixedSizes(insertSliceOp.getMixedSizes());
1535     SmallVector<OpFoldResult> mixedStrides(insertSliceOp.getMixedStrides());
1536     canonicalizeSubViewPart(mixedOffsets, ShapedType::isDynamicStrideOrOffset);
1537     canonicalizeSubViewPart(mixedSizes, ShapedType::isDynamic);
1538     canonicalizeSubViewPart(mixedStrides, ShapedType::isDynamicStrideOrOffset);
1539 
1540     // Create the new op in canonical form.
1541     auto sourceType = ExtractSliceOp::inferRankReducedResultType(
1542         insertSliceOp.getSourceType().getRank(), insertSliceOp.getType(),
1543         mixedOffsets, mixedSizes, mixedStrides);
1544     Value toInsert = insertSliceOp.source();
1545     if (sourceType != insertSliceOp.getSourceType())
1546       toInsert = rewriter.create<tensor::CastOp>(insertSliceOp.getLoc(),
1547                                                  sourceType, toInsert);
1548     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1549         insertSliceOp, toInsert, insertSliceOp.dest(), mixedOffsets, mixedSizes,
1550         mixedStrides);
1551     return success();
1552   }
1553 };
1554 
1555 /// Fold tensor_casts with insert_slice operations. If the source or destination
1556 /// tensor is a tensor_cast that removes static type information, the cast is
1557 /// folded into the insert_slice operation. E.g.:
1558 ///
1559 /// ```mlir
1560 ///   %1 = tensor.cast %0 : tensor<8x16xf32> to tensor<?x?xf32>
1561 ///   %2 = tensor.insert_slice %1 into ... : tensor<?x?xf32> into ...
1562 /// ```
1563 ///
1564 /// folds into:
1565 ///
1566 /// ```mlir
1567 ///   %2 = tensor.insert_slice %0 into ... : tensor<8x16xf32> into ...
1568 /// ```
1569 ///
1570 /// Note: When folding a cast on the destination tensor, the result of the
1571 /// insert_slice operation is casted to ensure that the type of the result did
1572 /// not change.
1573 struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertSliceOp> {
1574   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1575 
1576   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1577                                 PatternRewriter &rewriter) const override {
1578     if (llvm::any_of(insertSliceOp.getOperands(), [](Value operand) {
1579           return matchPattern(operand, matchConstantIndex());
1580         }))
1581       return failure();
1582 
1583     auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
1584       auto castOp = v.getDefiningOp<tensor::CastOp>();
1585       if (!castOp || !canFoldIntoConsumerOp(castOp))
1586         return llvm::None;
1587       return castOp.source();
1588     };
1589     Optional<Value> sourceCastSource =
1590         getSourceOfCastOp(insertSliceOp.source());
1591     Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.dest());
1592     if (!sourceCastSource && !destCastSource)
1593       return failure();
1594 
1595     auto src = (sourceCastSource ? *sourceCastSource : insertSliceOp.source());
1596     auto dst = (destCastSource ? *destCastSource : insertSliceOp.dest());
1597 
1598     auto srcType = src.getType().cast<ShapedType>();
1599     auto dstType = dst.getType().cast<ShapedType>();
1600     if (verifyInsertSliceOp(srcType, dstType, insertSliceOp.static_offsets(),
1601                             insertSliceOp.static_sizes(),
1602                             insertSliceOp.static_strides()) !=
1603         SliceVerificationResult::Success)
1604       return failure();
1605 
1606     Value replacement = rewriter.create<InsertSliceOp>(
1607         insertSliceOp.getLoc(), src, dst, insertSliceOp.getMixedOffsets(),
1608         insertSliceOp.getMixedSizes(), insertSliceOp.getMixedStrides());
1609 
1610     if (replacement.getType() != insertSliceOp.getType()) {
1611       replacement = rewriter.create<tensor::CastOp>(
1612           insertSliceOp.getLoc(), insertSliceOp.getType(), replacement);
1613     }
1614     rewriter.replaceOp(insertSliceOp, replacement);
1615     return success();
1616   }
1617 };
1618 
1619 /// If additional static type information can be deduced from a insert_slice's
1620 /// size operands, insert an explicit cast of the op's source operand. This
1621 /// enables other canonicalization patterns that are matching for tensor_cast
1622 /// ops such as `ForOpTensorCastFolder` in SCF.
1623 ///
1624 /// Example:
1625 ///
1626 /// ```mlir
1627 ///   %r = tensor.insert_slice %0 into %1[...] [64, 64] [1, 1]
1628 ///       : tensor<?x?xf32> into ...
1629 /// ```
1630 ///
1631 /// folds into:
1632 ///
1633 /// ```mlir
1634 ///   %tmp = tensor.cast %0 : tensor<?x?xf32> to tensor<64x64xf32>
1635 ///   %r = tensor.insert_slice %tmp into %1[...] [64, 64] [1, 1]
1636 ///       : tensor<64x64xf32> into ...
1637 /// ```
1638 struct InsertSliceOpSourceCastInserter final
1639     : public OpRewritePattern<InsertSliceOp> {
1640   using OpRewritePattern<InsertSliceOp>::OpRewritePattern;
1641 
1642   LogicalResult matchAndRewrite(InsertSliceOp insertSliceOp,
1643                                 PatternRewriter &rewriter) const override {
1644     RankedTensorType srcType = insertSliceOp.getSourceType();
1645     if (srcType.getRank() != insertSliceOp.getType().getRank())
1646       return failure();
1647     SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
1648                                      srcType.getShape().end());
1649     for (int64_t i = 0; i < srcType.getRank(); ++i) {
1650       if (Optional<int64_t> constInt =
1651               getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
1652         newSrcShape[i] = *constInt;
1653     }
1654 
1655     RankedTensorType newSrcType =
1656         RankedTensorType::get(newSrcShape, srcType.getElementType());
1657     if (srcType == newSrcType ||
1658         !preservesStaticInformation(srcType, newSrcType) ||
1659         !tensor::CastOp::areCastCompatible(srcType, newSrcType))
1660       return failure();
1661 
1662     // newSrcType is:
1663     //   1) Different from srcType.
1664     //   2) "More static" than srcType.
1665     //   3) Cast-compatible with srcType.
1666     // Insert the cast.
1667     Value cast = rewriter.create<tensor::CastOp>(
1668         insertSliceOp.getLoc(), newSrcType, insertSliceOp.source());
1669     rewriter.replaceOpWithNewOp<InsertSliceOp>(
1670         insertSliceOp, cast, insertSliceOp.dest(),
1671         insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
1672         insertSliceOp.getMixedStrides());
1673     return success();
1674   }
1675 };
1676 } // namespace
1677 
1678 void InsertSliceOp::getCanonicalizationPatterns(RewritePatternSet &results,
1679                                                 MLIRContext *context) {
1680   results.add<InsertSliceOpConstantArgumentFolder, InsertSliceOpCastFolder,
1681               InsertSliceOpSourceCastInserter>(context);
1682 }
1683 
1684 Value mlir::tensor::createCanonicalRankReducingInsertSliceOp(OpBuilder &b,
1685                                                              Location loc,
1686                                                              Value tensor,
1687                                                              Value dest) {
1688   auto rankedTensorType = dest.getType().cast<RankedTensorType>();
1689   unsigned rank = rankedTensorType.getRank();
1690   auto shape = rankedTensorType.getShape();
1691   SmallVector<OpFoldResult> offsets(rank, b.getIndexAttr(0));
1692   SmallVector<OpFoldResult> sizes;
1693   for (unsigned i = 0, e = rank; i < e; ++i) {
1694     OpFoldResult dim;
1695     if (rankedTensorType.isDynamicDim(i))
1696       dim = b.createOrFold<tensor::DimOp>(
1697           loc, dest, b.create<arith::ConstantIndexOp>(loc, i));
1698     else
1699       dim = b.getIndexAttr(shape[i]);
1700     sizes.push_back(dim);
1701   }
1702   SmallVector<OpFoldResult> strides(rank, b.getIndexAttr(1));
1703   return b.createOrFold<tensor::InsertSliceOp>(loc, tensor, dest, offsets,
1704                                                sizes, strides);
1705 }
1706 
1707 //===----------------------------------------------------------------------===//
1708 // PadOp
1709 //===----------------------------------------------------------------------===//
1710 
1711 // TODO: Replace custom<InferType> directive with AllTypesMatch as soon as it
1712 // supports optional types.
1713 void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
1714                     Type typeToInfer, Type typeToInferFrom) {}
1715 
1716 ParseResult parseInferType(OpAsmParser &parser,
1717                            Optional<OpAsmParser::UnresolvedOperand> optOperand,
1718                            Type &typeToInfer, Type typeToInferFrom) {
1719   if (optOperand)
1720     typeToInfer = typeToInferFrom;
1721   return success();
1722 }
1723 
1724 LogicalResult PadOp::verify() {
1725   auto sourceType = source().getType().cast<RankedTensorType>();
1726   auto resultType = result().getType().cast<RankedTensorType>();
1727   auto expectedType =
1728       PadOp::inferResultType(sourceType, extractFromI64ArrayAttr(static_low()),
1729                              extractFromI64ArrayAttr(static_high()));
1730   for (int i = 0, e = sourceType.getRank(); i < e; ++i) {
1731     if (resultType.getDimSize(i) == expectedType.getDimSize(i))
1732       continue;
1733     if (expectedType.isDynamicDim(i))
1734       continue;
1735     return emitError("specified type ")
1736            << resultType << " does not match the inferred type "
1737            << expectedType;
1738   }
1739 
1740   return success();
1741 }
1742 
1743 LogicalResult PadOp::verifyRegions() {
1744   auto &region = getRegion();
1745   unsigned rank = result().getType().cast<RankedTensorType>().getRank();
1746   Block &block = region.front();
1747   if (block.getNumArguments() != rank)
1748     return emitError("expected the block to have ") << rank << " arguments";
1749 
1750   // Note: the number and type of yield values are checked in the YieldOp.
1751   for (const auto &en : llvm::enumerate(block.getArgumentTypes())) {
1752     if (!en.value().isIndex())
1753       return emitOpError("expected block argument ")
1754              << (en.index() + 1) << " to be an index";
1755   }
1756 
1757   // Ensure that the region yields an element of the right type.
1758   auto yieldOp = llvm::cast<YieldOp>(block.getTerminator());
1759   if (yieldOp.value().getType() !=
1760       getType().cast<ShapedType>().getElementType())
1761     return emitOpError("expected yield type to match shape element type");
1762 
1763   return success();
1764 }
1765 
1766 RankedTensorType PadOp::inferResultType(RankedTensorType sourceType,
1767                                         ArrayRef<int64_t> staticLow,
1768                                         ArrayRef<int64_t> staticHigh,
1769                                         ArrayRef<int64_t> resultShape) {
1770   unsigned rank = sourceType.getRank();
1771   assert(staticLow.size() == rank && "unexpected staticLow size mismatch");
1772   assert(staticHigh.size() == rank && "unexpected staticHigh size mismatch");
1773   assert((resultShape.empty() || resultShape.size() == rank) &&
1774          "unexpected resultShape size mismatch");
1775 
1776   SmallVector<int64_t, 4> inferredShape;
1777   for (auto i : llvm::seq<unsigned>(0, rank)) {
1778     if (sourceType.isDynamicDim(i) ||
1779         staticLow[i] == ShapedType::kDynamicSize ||
1780         staticHigh[i] == ShapedType::kDynamicSize) {
1781       inferredShape.push_back(resultShape.empty() ? ShapedType::kDynamicSize
1782                                                   : resultShape[i]);
1783     } else {
1784       int64_t size = sourceType.getDimSize(i) + staticLow[i] + staticHigh[i];
1785       assert((resultShape.empty() || size == resultShape[i] ||
1786               resultShape[i] == ShapedType::kDynamicSize) &&
1787              "mismatch between inferred shape and result shape");
1788       inferredShape.push_back(size);
1789     }
1790   }
1791 
1792   return RankedTensorType::get(inferredShape, sourceType.getElementType());
1793 }
1794 
1795 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1796                   ArrayRef<int64_t> staticLow, ArrayRef<int64_t> staticHigh,
1797                   ValueRange low, ValueRange high, bool nofold,
1798                   ArrayRef<NamedAttribute> attrs) {
1799   auto sourceType = source.getType().cast<RankedTensorType>();
1800   auto resultType = inferResultType(sourceType, staticLow, staticHigh);
1801   build(b, result, resultType, source, low, high, b.getI64ArrayAttr(staticLow),
1802         b.getI64ArrayAttr(staticHigh), nofold ? b.getUnitAttr() : UnitAttr());
1803   result.addAttributes(attrs);
1804 }
1805 
1806 void PadOp::build(OpBuilder &b, OperationState &result, Value source,
1807                   ValueRange low, ValueRange high, bool nofold,
1808                   ArrayRef<NamedAttribute> attrs) {
1809   auto sourceType = source.getType().cast<RankedTensorType>();
1810   unsigned rank = sourceType.getRank();
1811   SmallVector<int64_t, 4> staticVector(rank, ShapedType::kDynamicSize);
1812   build(b, result, source, staticVector, staticVector, low, high, nofold,
1813         attrs);
1814 }
1815 
1816 void PadOp::build(OpBuilder &b, OperationState &result, Type resultType,
1817                   Value source, ArrayRef<OpFoldResult> low,
1818                   ArrayRef<OpFoldResult> high, bool nofold,
1819                   ArrayRef<NamedAttribute> attrs) {
1820   assert(resultType.isa<RankedTensorType>());
1821   auto sourceType = source.getType().cast<RankedTensorType>();
1822   SmallVector<Value, 4> dynamicLow, dynamicHigh;
1823   SmallVector<int64_t, 4> staticLow, staticHigh;
1824   // staticLow and staticHigh have full information of the padding config.
1825   // This will grow staticLow and staticHigh with 1 value. If the config is
1826   // dynamic (ie not a constant), dynamicLow and dynamicHigh will grow with 1
1827   // value as well.
1828   dispatchIndexOpFoldResults(low, dynamicLow, staticLow,
1829                              ShapedType::kDynamicSize);
1830   dispatchIndexOpFoldResults(high, dynamicHigh, staticHigh,
1831                              ShapedType::kDynamicSize);
1832   if (!resultType) {
1833     resultType = PadOp::inferResultType(sourceType, staticLow, staticHigh);
1834   }
1835   build(b, result, resultType, source, dynamicLow, dynamicHigh,
1836         b.getI64ArrayAttr(staticLow), b.getI64ArrayAttr(staticHigh),
1837         nofold ? b.getUnitAttr() : UnitAttr());
1838   result.addAttributes(attrs);
1839 }
1840 
1841 llvm::SmallBitVector PadOp::getPaddedDims() {
1842   llvm::SmallBitVector paddedDims(getSourceType().getRank());
1843   auto extractPaddedDims = [&](ArrayRef<OpFoldResult> paddingWidths) {
1844     for (const auto &en : enumerate(paddingWidths))
1845       if (getConstantIntValue(en.value()) != static_cast<int64_t>(0))
1846         paddedDims.set(en.index());
1847   };
1848   extractPaddedDims(getMixedLowPad());
1849   extractPaddedDims(getMixedHighPad());
1850   return paddedDims;
1851 }
1852 
1853 namespace {
1854 // Folds tensor.pad when padding is static zeros and the attribute
1855 // doesn't request otherwise.
1856 struct FoldStaticZeroPadding : public OpRewritePattern<PadOp> {
1857   using OpRewritePattern<PadOp>::OpRewritePattern;
1858 
1859   LogicalResult matchAndRewrite(PadOp padTensorOp,
1860                                 PatternRewriter &rewriter) const override {
1861     if (!padTensorOp.hasZeroLowPad() || !padTensorOp.hasZeroHighPad())
1862       return failure();
1863     if (padTensorOp.nofold())
1864       return failure();
1865     rewriter.replaceOpWithNewOp<tensor::CastOp>(
1866         padTensorOp, padTensorOp.result().getType(), padTensorOp.source());
1867     return success();
1868   }
1869 };
1870 
1871 // Fold CastOp into PadOp when adding static information.
1872 struct FoldSourceTensorCast : public OpRewritePattern<PadOp> {
1873   using OpRewritePattern<PadOp>::OpRewritePattern;
1874 
1875   LogicalResult matchAndRewrite(PadOp padTensorOp,
1876                                 PatternRewriter &rewriter) const override {
1877     auto castOp = padTensorOp.source().getDefiningOp<tensor::CastOp>();
1878     if (!tensor::canFoldIntoConsumerOp(castOp))
1879       return failure();
1880 
1881     auto newResultType = PadOp::inferResultType(
1882         castOp.source().getType().cast<RankedTensorType>(),
1883         extractFromI64ArrayAttr(padTensorOp.static_low()),
1884         extractFromI64ArrayAttr(padTensorOp.static_high()),
1885         padTensorOp.getResultType().getShape());
1886 
1887     if (newResultType == padTensorOp.getResultType()) {
1888       rewriter.updateRootInPlace(padTensorOp, [&]() {
1889         padTensorOp.sourceMutable().assign(castOp.source());
1890       });
1891     } else {
1892       auto newOp = rewriter.create<PadOp>(
1893           padTensorOp->getLoc(), newResultType, padTensorOp.source(),
1894           padTensorOp.low(), padTensorOp.high(), padTensorOp.static_low(),
1895           padTensorOp.static_high(), padTensorOp.nofold());
1896       BlockAndValueMapping mapper;
1897       padTensorOp.getRegion().cloneInto(&newOp.getRegion(), mapper);
1898 
1899       rewriter.replaceOpWithNewOp<tensor::CastOp>(
1900           padTensorOp, padTensorOp.getResultType(), newOp);
1901     }
1902     return success();
1903   }
1904 };
1905 
1906 // Fold CastOp using the result of PadOp back into the latter if it adds
1907 // static information.
1908 struct FoldTargetTensorCast : public OpRewritePattern<PadOp> {
1909   using OpRewritePattern<PadOp>::OpRewritePattern;
1910 
1911   LogicalResult matchAndRewrite(PadOp padTensorOp,
1912                                 PatternRewriter &rewriter) const override {
1913     if (!padTensorOp.result().hasOneUse())
1914       return failure();
1915     auto tensorCastOp =
1916         dyn_cast<tensor::CastOp>(*padTensorOp->getUsers().begin());
1917     if (!tensorCastOp)
1918       return failure();
1919     if (!tensor::preservesStaticInformation(padTensorOp.result().getType(),
1920                                             tensorCastOp.dest().getType()))
1921       return failure();
1922 
1923     auto replacementOp = rewriter.create<PadOp>(
1924         padTensorOp.getLoc(), tensorCastOp.dest().getType(),
1925         padTensorOp.source(), padTensorOp.low(), padTensorOp.high(),
1926         padTensorOp.static_low(), padTensorOp.static_high(),
1927         padTensorOp.nofold());
1928     replacementOp.region().takeBody(padTensorOp.region());
1929 
1930     rewriter.replaceOp(padTensorOp, replacementOp.result());
1931     rewriter.replaceOp(tensorCastOp, replacementOp.result());
1932     return success();
1933   }
1934 };
1935 
1936 /// Fold chains of tensor::ExtractSliceOp, tensor::PadOp pairs that pad
1937 /// different dimensions. The pattern applies if the following preconditions
1938 /// hold:
1939 ///   1) the tensor::ExtractSliceOps are not rank-reducing,
1940 ///   2) the tensor::ExtractSliceOps have only unit-strides,
1941 ///   3) the tensor::PadOps perform only high-padding,
1942 ///   4) the tensor::PadOps have the same constant padding value,
1943 ///   5) the tensor::PadOps do not have common padding dimensions,
1944 ///   6) one tensor::ExtractSliceOp, tensor::PadOp pair has zero-padding and
1945 ///      zero-offset for every dimension.
1946 ///   7) the tensor::ExtractSliceOp sizes match the source tensor sizes for the
1947 ///      padded source dimensions.
1948 ///
1949 /// Example:
1950 ///
1951 /// ```mlir
1952 ///   %0 = tensor.extract_slice %input[16, 0] [%sz0, 64] [1, 1]
1953 ///       : tensor<64x64xf32> to tensor<?x64xf32>
1954 ///   %1 = tensor.pad %0 low[0, 0] high[%pw0, 0] { ...
1955 ///     } : tensor<?x64xf32> to tensor<8x64xf32>
1956 ///   %2 = tensor.extract_slice %1[0, 4] [8, %sz1] [1, 1]
1957 ///        : tensor<8x64xf32> to tensor<8x?xf32>
1958 ///   %res = tensor.pad %2 nofold low[0, 0] high[0, %pw1] { ...
1959 ///     } : tensor<8x?xf32> to tensor<8x4xf32>
1960 /// ```
1961 ///
1962 /// folds into:
1963 ///
1964 /// ```mlir
1965 ///   %0 = tensor.extract_slice %input[16, 4] [%sz0, %sz1] [1, 1]
1966 ///        : tensor<64x64xf32> to tensor<?x?xf32>
1967 ///   %res = tensor.pad %0 nofold low[0, 0] high[%pw0, %pw1] { ...
1968 ///     } : tensor<?x?xf32> to tensor<8x4xf32>
1969 /// ```
1970 struct FoldOrthogonalPaddings : public OpRewritePattern<PadOp> {
1971   using OpRewritePattern<PadOp>::OpRewritePattern;
1972 
1973   LogicalResult matchAndRewrite(PadOp padOp,
1974                                 PatternRewriter &rewriter) const override {
1975     auto innerSliceOp = padOp.source().getDefiningOp<ExtractSliceOp>();
1976     if (!innerSliceOp)
1977       return failure();
1978     auto outerPadOp = innerSliceOp.source().getDefiningOp<PadOp>();
1979     if (!outerPadOp || outerPadOp.nofold())
1980       return failure();
1981     auto outerSliceOp = outerPadOp.source().getDefiningOp<ExtractSliceOp>();
1982     if (!outerSliceOp)
1983       return failure();
1984 
1985     // 1) Fail if the chain is rank-reducing.
1986     int64_t rank = padOp.getSourceType().getRank();
1987     if (outerSliceOp.getSourceType().getRank() != rank) {
1988       return rewriter.notifyMatchFailure(padOp,
1989                                          "cannot fold rank-reducing chain");
1990     }
1991 
1992     // 2) Fail if the tensor::ExtractSliceOps have non-unit strides.
1993     if (!innerSliceOp.hasUnitStride() || !outerSliceOp.hasUnitStride()) {
1994       return rewriter.notifyMatchFailure(
1995           padOp, "cannot fold non-unit stride ExtractSliceOps");
1996     }
1997 
1998     // 3) Fail if the tensor::PadOps have non-zero low padding.
1999     if (!padOp.hasZeroLowPad() || !outerPadOp.hasZeroLowPad()) {
2000       return rewriter.notifyMatchFailure(padOp,
2001                                          "cannot fold PadOps with low padding");
2002     }
2003 
2004     // 4) Fail if the tensor::PadOps padding values do not match.
2005     Attribute innerAttr, outerAttr;
2006     Value innerValue = padOp.getConstantPaddingValue();
2007     Value outerValue = outerPadOp.getConstantPaddingValue();
2008     if (!innerValue || !outerValue ||
2009         !matchPattern(innerValue, m_Constant(&innerAttr)) ||
2010         !matchPattern(outerValue, m_Constant(&outerAttr)) ||
2011         innerAttr != outerAttr) {
2012       return rewriter.notifyMatchFailure(
2013           padOp, "cannot fold PadOps with different padding values");
2014     }
2015 
2016     // 5) Fail if a dimension is padded by both tensor::PadOps.
2017     llvm::SmallBitVector innerDims = padOp.getPaddedDims();
2018     llvm::SmallBitVector outerDims = outerPadOp.getPaddedDims();
2019     if (innerDims.anyCommon(outerDims)) {
2020       return rewriter.notifyMatchFailure(
2021           padOp, "cannot fold PadOps with common padding dimensions");
2022     }
2023 
2024     // 6) Combine the offsets of the two tensor::ExtractSliceOps. Find the
2025     // zero-offset and zero-padding tensor::ExtractSliceOp, tensor::PadOp pair
2026     // for every dimension, and use the offset the other pair. Fail if no
2027     // zero-offset and zero-padding tensor::ExtractSliceOp, tensor::PadOp pair
2028     // exists.
2029     SmallVector<OpFoldResult> newOffsets(rank, rewriter.getIndexAttr(0));
2030     for (auto &en : enumerate(newOffsets)) {
2031       OpFoldResult innerOffset = innerSliceOp.getMixedOffsets()[en.index()];
2032       OpFoldResult outerOffset = outerSliceOp.getMixedOffsets()[en.index()];
2033       if (!innerDims.test(en.index()) &&
2034           (getConstantIntValue(innerOffset) == static_cast<int64_t>(0))) {
2035         en.value() = outerOffset;
2036         continue;
2037       }
2038       if (!outerDims.test(en.index()) &&
2039           (getConstantIntValue(outerOffset) == static_cast<int64_t>(0))) {
2040         en.value() = innerOffset;
2041         continue;
2042       }
2043       return rewriter.notifyMatchFailure(
2044           padOp, "cannot find zero-offset and zero-padding pair");
2045     }
2046 
2047     // 7) Combine the sizes of the two tensor::ExtractSliceOps. Take the size of
2048     // the outer tensor::ExtractSliceOp for the dimensions padded by the outer
2049     // tensor::PadOp and fail if the size of the inner tensor::ExtractSliceOp
2050     // does not match the size of the padded dimension. Otherwise, take the size
2051     // of the inner tensor::ExtractSliceOp.
2052     SmallVector<OpFoldResult> newSizes = innerSliceOp.getMixedSizes();
2053     for (auto &en : enumerate(newSizes)) {
2054       if (!outerDims.test(en.index()))
2055         continue;
2056       OpFoldResult sliceSize = innerSliceOp.getMixedSizes()[en.index()];
2057       int64_t sourceSize = innerSliceOp.getSourceType().getShape()[en.index()];
2058       assert(!ShapedType::isDynamic(sourceSize) &&
2059              "expected padded dimension to have a static size");
2060       if (getConstantIntValue(sliceSize) != sourceSize) {
2061         return rewriter.notifyMatchFailure(
2062             padOp, "cannot fold since the inner ExtractSliceOp size does not "
2063                    "match the size of the outer padding");
2064       }
2065       en.value() = outerSliceOp.getMixedSizes()[en.index()];
2066     }
2067 
2068     // Combine the high paddings of the two tensor::PadOps.
2069     SmallVector<OpFoldResult> newHighPad(rank, rewriter.getIndexAttr(0));
2070     for (auto &en : enumerate(newHighPad)) {
2071       if (innerDims.test(en.index()))
2072         newHighPad[en.index()] = padOp.getMixedHighPad()[en.index()];
2073       if (outerDims.test(en.index()))
2074         newHighPad[en.index()] = outerPadOp.getMixedHighPad()[en.index()];
2075     }
2076 
2077     // Create a new tensor::ExtractSliceOp, tensor::PadOp pair that performs the
2078     // two paddings in one step.
2079     auto newSliceOp = rewriter.create<ExtractSliceOp>(
2080         padOp.getLoc(), outerSliceOp.source(), newOffsets, newSizes,
2081         innerSliceOp.getMixedStrides());
2082     auto newPadOp = rewriter.create<PadOp>(
2083         padOp.getLoc(), padOp.getResultType(), newSliceOp.getResult(),
2084         padOp.getMixedLowPad(), newHighPad, padOp.nofold());
2085     rewriter.inlineRegionBefore(padOp.getRegion(), newPadOp.getRegion(),
2086                                 newPadOp.getRegion().begin());
2087     rewriter.replaceOp(padOp, newPadOp.getResult());
2088     return success();
2089   }
2090 };
2091 
2092 } // namespace
2093 
2094 void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
2095                                         MLIRContext *context) {
2096   results.add<FoldStaticZeroPadding, FoldSourceTensorCast, FoldTargetTensorCast,
2097               FoldOrthogonalPaddings>(context);
2098 }
2099 
2100 /// Return the padding value of the PadOp if it constant. In this context,
2101 /// "constant" means an actual constant or "defined outside of the block".
2102 ///
2103 /// Values are considered constant in three cases:
2104 ///  - A ConstantLike value.
2105 ///  - A basic block argument from a different block.
2106 ///  - A value defined outside of the block.
2107 ///
2108 /// If the padding value is not constant, an empty Value is returned.
2109 Value PadOp::getConstantPaddingValue() {
2110   auto yieldOp = dyn_cast<YieldOp>(getRegion().front().getTerminator());
2111   if (!yieldOp)
2112     return {};
2113   Value padValue = yieldOp.value();
2114   // Check if yield value is a constant.
2115   if (matchPattern(padValue, m_Constant()))
2116     return padValue;
2117   // Check if yield value is defined inside the PadOp block.
2118   if (padValue.getParentBlock() == &getRegion().front())
2119     return {};
2120   // Else: Yield value defined outside of the PadOp block.
2121   return padValue;
2122 }
2123 
2124 OpFoldResult PadOp::fold(ArrayRef<Attribute>) {
2125   if (getResultType().hasStaticShape() && getResultType() == getSourceType() &&
2126       !nofold())
2127     return source();
2128   return {};
2129 }
2130 
2131 //===----------------------------------------------------------------------===//
2132 // SplatOp
2133 //===----------------------------------------------------------------------===//
2134 
2135 OpFoldResult SplatOp::fold(ArrayRef<Attribute> operands) {
2136   auto constOperand = operands.front();
2137   if (!constOperand.isa_and_nonnull<IntegerAttr, FloatAttr>())
2138     return {};
2139 
2140   // SplatElementsAttr::get treats single value for second arg as being a splat.
2141   return SplatElementsAttr::get(getType(), {constOperand});
2142 }
2143 
2144 //===----------------------------------------------------------------------===//
2145 // TableGen'd op method definitions
2146 //===----------------------------------------------------------------------===//
2147 
2148 #define GET_OP_CLASSES
2149 #include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
2150