1 //===- BuiltinTypes.cpp - MLIR Builtin Type Classes -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "mlir/IR/BuiltinTypes.h"
10 #include "TypeDetail.h"
11 #include "mlir/IR/AffineExpr.h"
12 #include "mlir/IR/AffineMap.h"
13 #include "mlir/IR/Diagnostics.h"
14 #include "mlir/IR/Dialect.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/Sequence.h"
18 #include "llvm/ADT/Twine.h"
19 
20 using namespace mlir;
21 using namespace mlir::detail;
22 
23 //===----------------------------------------------------------------------===//
24 /// Tablegen Type Definitions
25 //===----------------------------------------------------------------------===//
26 
27 #define GET_TYPEDEF_CLASSES
28 #include "mlir/IR/BuiltinTypes.cpp.inc"
29 
30 //===----------------------------------------------------------------------===//
31 /// ComplexType
32 //===----------------------------------------------------------------------===//
33 
34 /// Verify the construction of an integer type.
35 LogicalResult ComplexType::verifyConstructionInvariants(Location loc,
36                                                         Type elementType) {
37   if (!elementType.isIntOrFloat())
38     return emitError(loc, "invalid element type for complex");
39   return success();
40 }
41 
42 //===----------------------------------------------------------------------===//
43 // Integer Type
44 //===----------------------------------------------------------------------===//
45 
46 // static constexpr must have a definition (until in C++17 and inline variable).
47 constexpr unsigned IntegerType::kMaxWidth;
48 
49 /// Verify the construction of an integer type.
50 LogicalResult
51 IntegerType::verifyConstructionInvariants(Location loc, unsigned width,
52                                           SignednessSemantics signedness) {
53   if (width > IntegerType::kMaxWidth) {
54     return emitError(loc) << "integer bitwidth is limited to "
55                           << IntegerType::kMaxWidth << " bits";
56   }
57   return success();
58 }
59 
60 unsigned IntegerType::getWidth() const { return getImpl()->width; }
61 
62 IntegerType::SignednessSemantics IntegerType::getSignedness() const {
63   return getImpl()->signedness;
64 }
65 
66 IntegerType IntegerType::scaleElementBitwidth(unsigned scale) {
67   if (!scale)
68     return IntegerType();
69   return IntegerType::get(getContext(), scale * getWidth(), getSignedness());
70 }
71 
72 //===----------------------------------------------------------------------===//
73 // Float Type
74 //===----------------------------------------------------------------------===//
75 
76 unsigned FloatType::getWidth() {
77   if (isa<Float16Type, BFloat16Type>())
78     return 16;
79   if (isa<Float32Type>())
80     return 32;
81   if (isa<Float64Type>())
82     return 64;
83   if (isa<Float80Type>())
84     return 80;
85   if (isa<Float128Type>())
86     return 128;
87   llvm_unreachable("unexpected float type");
88 }
89 
90 /// Returns the floating semantics for the given type.
91 const llvm::fltSemantics &FloatType::getFloatSemantics() {
92   if (isa<BFloat16Type>())
93     return APFloat::BFloat();
94   if (isa<Float16Type>())
95     return APFloat::IEEEhalf();
96   if (isa<Float32Type>())
97     return APFloat::IEEEsingle();
98   if (isa<Float64Type>())
99     return APFloat::IEEEdouble();
100   if (isa<Float80Type>())
101     return APFloat::x87DoubleExtended();
102   if (isa<Float128Type>())
103     return APFloat::IEEEquad();
104   llvm_unreachable("non-floating point type used");
105 }
106 
107 FloatType FloatType::scaleElementBitwidth(unsigned scale) {
108   if (!scale)
109     return FloatType();
110   MLIRContext *ctx = getContext();
111   if (isF16() || isBF16()) {
112     if (scale == 2)
113       return FloatType::getF32(ctx);
114     if (scale == 4)
115       return FloatType::getF64(ctx);
116   }
117   if (isF32())
118     if (scale == 2)
119       return FloatType::getF64(ctx);
120   return FloatType();
121 }
122 
123 //===----------------------------------------------------------------------===//
124 // FunctionType
125 //===----------------------------------------------------------------------===//
126 
127 unsigned FunctionType::getNumInputs() const { return getImpl()->numInputs; }
128 
129 ArrayRef<Type> FunctionType::getInputs() const {
130   return getImpl()->getInputs();
131 }
132 
133 unsigned FunctionType::getNumResults() const { return getImpl()->numResults; }
134 
135 ArrayRef<Type> FunctionType::getResults() const {
136   return getImpl()->getResults();
137 }
138 
139 /// Helper to call a callback once on each index in the range
140 /// [0, `totalIndices`), *except* for the indices given in `indices`.
141 /// `indices` is allowed to have duplicates and can be in any order.
142 inline void iterateIndicesExcept(unsigned totalIndices,
143                                  ArrayRef<unsigned> indices,
144                                  function_ref<void(unsigned)> callback) {
145   llvm::BitVector skipIndices(totalIndices);
146   for (unsigned i : indices)
147     skipIndices.set(i);
148 
149   for (unsigned i = 0; i < totalIndices; ++i)
150     if (!skipIndices.test(i))
151       callback(i);
152 }
153 
154 /// Returns a new function type without the specified arguments and results.
155 FunctionType
156 FunctionType::getWithoutArgsAndResults(ArrayRef<unsigned> argIndices,
157                                        ArrayRef<unsigned> resultIndices) {
158   ArrayRef<Type> newInputTypes = getInputs();
159   SmallVector<Type, 4> newInputTypesBuffer;
160   if (!argIndices.empty()) {
161     unsigned originalNumArgs = getNumInputs();
162     iterateIndicesExcept(originalNumArgs, argIndices, [&](unsigned i) {
163       newInputTypesBuffer.emplace_back(getInput(i));
164     });
165     newInputTypes = newInputTypesBuffer;
166   }
167 
168   ArrayRef<Type> newResultTypes = getResults();
169   SmallVector<Type, 4> newResultTypesBuffer;
170   if (!resultIndices.empty()) {
171     unsigned originalNumResults = getNumResults();
172     iterateIndicesExcept(originalNumResults, resultIndices, [&](unsigned i) {
173       newResultTypesBuffer.emplace_back(getResult(i));
174     });
175     newResultTypes = newResultTypesBuffer;
176   }
177 
178   return get(getContext(), newInputTypes, newResultTypes);
179 }
180 
181 //===----------------------------------------------------------------------===//
182 // OpaqueType
183 //===----------------------------------------------------------------------===//
184 
185 /// Verify the construction of an opaque type.
186 LogicalResult OpaqueType::verifyConstructionInvariants(Location loc,
187                                                        Identifier dialect,
188                                                        StringRef typeData) {
189   if (!Dialect::isValidNamespace(dialect.strref()))
190     return emitError(loc, "invalid dialect namespace '") << dialect << "'";
191   return success();
192 }
193 
194 //===----------------------------------------------------------------------===//
195 // ShapedType
196 //===----------------------------------------------------------------------===//
197 constexpr int64_t ShapedType::kDynamicSize;
198 constexpr int64_t ShapedType::kDynamicStrideOrOffset;
199 
200 Type ShapedType::getElementType() const {
201   return static_cast<ImplType *>(impl)->elementType;
202 }
203 
204 unsigned ShapedType::getElementTypeBitWidth() const {
205   return getElementType().getIntOrFloatBitWidth();
206 }
207 
208 int64_t ShapedType::getNumElements() const {
209   assert(hasStaticShape() && "cannot get element count of dynamic shaped type");
210   auto shape = getShape();
211   int64_t num = 1;
212   for (auto dim : shape)
213     num *= dim;
214   return num;
215 }
216 
217 int64_t ShapedType::getRank() const {
218   assert(hasRank() && "cannot query rank of unranked shaped type");
219   return getShape().size();
220 }
221 
222 bool ShapedType::hasRank() const {
223   return !isa<UnrankedMemRefType, UnrankedTensorType>();
224 }
225 
226 int64_t ShapedType::getDimSize(unsigned idx) const {
227   assert(idx < getRank() && "invalid index for shaped type");
228   return getShape()[idx];
229 }
230 
231 bool ShapedType::isDynamicDim(unsigned idx) const {
232   assert(idx < getRank() && "invalid index for shaped type");
233   return isDynamic(getShape()[idx]);
234 }
235 
236 unsigned ShapedType::getDynamicDimIndex(unsigned index) const {
237   assert(index < getRank() && "invalid index");
238   assert(ShapedType::isDynamic(getDimSize(index)) && "invalid index");
239   return llvm::count_if(getShape().take_front(index), ShapedType::isDynamic);
240 }
241 
242 /// Get the number of bits require to store a value of the given shaped type.
243 /// Compute the value recursively since tensors are allowed to have vectors as
244 /// elements.
245 int64_t ShapedType::getSizeInBits() const {
246   assert(hasStaticShape() &&
247          "cannot get the bit size of an aggregate with a dynamic shape");
248 
249   auto elementType = getElementType();
250   if (elementType.isIntOrFloat())
251     return elementType.getIntOrFloatBitWidth() * getNumElements();
252 
253   if (auto complexType = elementType.dyn_cast<ComplexType>()) {
254     elementType = complexType.getElementType();
255     return elementType.getIntOrFloatBitWidth() * getNumElements() * 2;
256   }
257 
258   // Tensors can have vectors and other tensors as elements, other shaped types
259   // cannot.
260   assert(isa<TensorType>() && "unsupported element type");
261   assert((elementType.isa<VectorType, TensorType>()) &&
262          "unsupported tensor element type");
263   return getNumElements() * elementType.cast<ShapedType>().getSizeInBits();
264 }
265 
266 ArrayRef<int64_t> ShapedType::getShape() const {
267   if (auto vectorType = dyn_cast<VectorType>())
268     return vectorType.getShape();
269   if (auto tensorType = dyn_cast<RankedTensorType>())
270     return tensorType.getShape();
271   return cast<MemRefType>().getShape();
272 }
273 
274 int64_t ShapedType::getNumDynamicDims() const {
275   return llvm::count_if(getShape(), isDynamic);
276 }
277 
278 bool ShapedType::hasStaticShape() const {
279   return hasRank() && llvm::none_of(getShape(), isDynamic);
280 }
281 
282 bool ShapedType::hasStaticShape(ArrayRef<int64_t> shape) const {
283   return hasStaticShape() && getShape() == shape;
284 }
285 
286 //===----------------------------------------------------------------------===//
287 // VectorType
288 //===----------------------------------------------------------------------===//
289 
290 VectorType VectorType::get(ArrayRef<int64_t> shape, Type elementType) {
291   return Base::get(elementType.getContext(), shape, elementType);
292 }
293 
294 VectorType VectorType::getChecked(Location location, ArrayRef<int64_t> shape,
295                                   Type elementType) {
296   return Base::getChecked(location, shape, elementType);
297 }
298 
299 LogicalResult VectorType::verifyConstructionInvariants(Location loc,
300                                                        ArrayRef<int64_t> shape,
301                                                        Type elementType) {
302   if (shape.empty())
303     return emitError(loc, "vector types must have at least one dimension");
304 
305   if (!isValidElementType(elementType))
306     return emitError(loc, "vector elements must be int or float type");
307 
308   if (any_of(shape, [](int64_t i) { return i <= 0; }))
309     return emitError(loc, "vector types must have positive constant sizes");
310 
311   return success();
312 }
313 
314 ArrayRef<int64_t> VectorType::getShape() const { return getImpl()->getShape(); }
315 
316 VectorType VectorType::scaleElementBitwidth(unsigned scale) {
317   if (!scale)
318     return VectorType();
319   if (auto et = getElementType().dyn_cast<IntegerType>())
320     if (auto scaledEt = et.scaleElementBitwidth(scale))
321       return VectorType::get(getShape(), scaledEt);
322   if (auto et = getElementType().dyn_cast<FloatType>())
323     if (auto scaledEt = et.scaleElementBitwidth(scale))
324       return VectorType::get(getShape(), scaledEt);
325   return VectorType();
326 }
327 
328 //===----------------------------------------------------------------------===//
329 // TensorType
330 //===----------------------------------------------------------------------===//
331 
332 // Check if "elementType" can be an element type of a tensor. Emit errors if
333 // location is not nullptr.  Returns failure if check failed.
334 static LogicalResult checkTensorElementType(Location location,
335                                             Type elementType) {
336   if (!TensorType::isValidElementType(elementType))
337     return emitError(location, "invalid tensor element type: ") << elementType;
338   return success();
339 }
340 
341 /// Return true if the specified element type is ok in a tensor.
342 bool TensorType::isValidElementType(Type type) {
343   // Note: Non standard/builtin types are allowed to exist within tensor
344   // types. Dialects are expected to verify that tensor types have a valid
345   // element type within that dialect.
346   return type.isa<ComplexType, FloatType, IntegerType, OpaqueType, VectorType,
347                   IndexType>() ||
348          !type.getDialect().getNamespace().empty();
349 }
350 
351 //===----------------------------------------------------------------------===//
352 // RankedTensorType
353 //===----------------------------------------------------------------------===//
354 
355 RankedTensorType RankedTensorType::get(ArrayRef<int64_t> shape,
356                                        Type elementType) {
357   return Base::get(elementType.getContext(), shape, elementType);
358 }
359 
360 RankedTensorType RankedTensorType::getChecked(Location location,
361                                               ArrayRef<int64_t> shape,
362                                               Type elementType) {
363   return Base::getChecked(location, shape, elementType);
364 }
365 
366 LogicalResult RankedTensorType::verifyConstructionInvariants(
367     Location loc, ArrayRef<int64_t> shape, Type elementType) {
368   for (int64_t s : shape) {
369     if (s < -1)
370       return emitError(loc, "invalid tensor dimension size");
371   }
372   return checkTensorElementType(loc, elementType);
373 }
374 
375 ArrayRef<int64_t> RankedTensorType::getShape() const {
376   return getImpl()->getShape();
377 }
378 
379 //===----------------------------------------------------------------------===//
380 // UnrankedTensorType
381 //===----------------------------------------------------------------------===//
382 
383 UnrankedTensorType UnrankedTensorType::get(Type elementType) {
384   return Base::get(elementType.getContext(), elementType);
385 }
386 
387 UnrankedTensorType UnrankedTensorType::getChecked(Location location,
388                                                   Type elementType) {
389   return Base::getChecked(location, elementType);
390 }
391 
392 LogicalResult
393 UnrankedTensorType::verifyConstructionInvariants(Location loc,
394                                                  Type elementType) {
395   return checkTensorElementType(loc, elementType);
396 }
397 
398 //===----------------------------------------------------------------------===//
399 // BaseMemRefType
400 //===----------------------------------------------------------------------===//
401 
402 unsigned BaseMemRefType::getMemorySpace() const {
403   return static_cast<ImplType *>(impl)->memorySpace;
404 }
405 
406 //===----------------------------------------------------------------------===//
407 // MemRefType
408 //===----------------------------------------------------------------------===//
409 
410 /// Get or create a new MemRefType based on shape, element type, affine
411 /// map composition, and memory space.  Assumes the arguments define a
412 /// well-formed MemRef type.  Use getChecked to gracefully handle MemRefType
413 /// construction failures.
414 MemRefType MemRefType::get(ArrayRef<int64_t> shape, Type elementType,
415                            ArrayRef<AffineMap> affineMapComposition,
416                            unsigned memorySpace) {
417   auto result = getImpl(shape, elementType, affineMapComposition, memorySpace,
418                         /*location=*/llvm::None);
419   assert(result && "Failed to construct instance of MemRefType.");
420   return result;
421 }
422 
423 /// Get or create a new MemRefType based on shape, element type, affine
424 /// map composition, and memory space declared at the given location.
425 /// If the location is unknown, the last argument should be an instance of
426 /// UnknownLoc.  If the MemRefType defined by the arguments would be
427 /// ill-formed, emits errors (to the handler registered with the context or to
428 /// the error stream) and returns nullptr.
429 MemRefType MemRefType::getChecked(Location location, ArrayRef<int64_t> shape,
430                                   Type elementType,
431                                   ArrayRef<AffineMap> affineMapComposition,
432                                   unsigned memorySpace) {
433   return getImpl(shape, elementType, affineMapComposition, memorySpace,
434                  location);
435 }
436 
437 /// Get or create a new MemRefType defined by the arguments.  If the resulting
438 /// type would be ill-formed, return nullptr.  If the location is provided,
439 /// emit detailed error messages.  To emit errors when the location is unknown,
440 /// pass in an instance of UnknownLoc.
441 MemRefType MemRefType::getImpl(ArrayRef<int64_t> shape, Type elementType,
442                                ArrayRef<AffineMap> affineMapComposition,
443                                unsigned memorySpace,
444                                Optional<Location> location) {
445   auto *context = elementType.getContext();
446 
447   if (!BaseMemRefType::isValidElementType(elementType))
448     return (void)emitOptionalError(location, "invalid memref element type"),
449            MemRefType();
450 
451   for (int64_t s : shape) {
452     // Negative sizes are not allowed except for `-1` that means dynamic size.
453     if (s < -1)
454       return (void)emitOptionalError(location, "invalid memref size"),
455              MemRefType();
456   }
457 
458   // Check that the structure of the composition is valid, i.e. that each
459   // subsequent affine map has as many inputs as the previous map has results.
460   // Take the dimensionality of the MemRef for the first map.
461   auto dim = shape.size();
462   unsigned i = 0;
463   for (const auto &affineMap : affineMapComposition) {
464     if (affineMap.getNumDims() != dim) {
465       if (location)
466         emitError(*location)
467             << "memref affine map dimension mismatch between "
468             << (i == 0 ? Twine("memref rank") : "affine map " + Twine(i))
469             << " and affine map" << i + 1 << ": " << dim
470             << " != " << affineMap.getNumDims();
471       return nullptr;
472     }
473 
474     dim = affineMap.getNumResults();
475     ++i;
476   }
477 
478   // Drop identity maps from the composition.
479   // This may lead to the composition becoming empty, which is interpreted as an
480   // implicit identity.
481   SmallVector<AffineMap, 2> cleanedAffineMapComposition;
482   for (const auto &map : affineMapComposition) {
483     if (map.isIdentity())
484       continue;
485     cleanedAffineMapComposition.push_back(map);
486   }
487 
488   return Base::get(context, shape, elementType, cleanedAffineMapComposition,
489                    memorySpace);
490 }
491 
492 ArrayRef<int64_t> MemRefType::getShape() const { return getImpl()->getShape(); }
493 
494 ArrayRef<AffineMap> MemRefType::getAffineMaps() const {
495   return getImpl()->getAffineMaps();
496 }
497 
498 //===----------------------------------------------------------------------===//
499 // UnrankedMemRefType
500 //===----------------------------------------------------------------------===//
501 
502 UnrankedMemRefType UnrankedMemRefType::get(Type elementType,
503                                            unsigned memorySpace) {
504   return Base::get(elementType.getContext(), elementType, memorySpace);
505 }
506 
507 UnrankedMemRefType UnrankedMemRefType::getChecked(Location location,
508                                                   Type elementType,
509                                                   unsigned memorySpace) {
510   return Base::getChecked(location, elementType, memorySpace);
511 }
512 
513 LogicalResult
514 UnrankedMemRefType::verifyConstructionInvariants(Location loc, Type elementType,
515                                                  unsigned memorySpace) {
516   if (!BaseMemRefType::isValidElementType(elementType))
517     return emitError(loc, "invalid memref element type");
518   return success();
519 }
520 
521 // Fallback cases for terminal dim/sym/cst that are not part of a binary op (
522 // i.e. single term). Accumulate the AffineExpr into the existing one.
523 static void extractStridesFromTerm(AffineExpr e,
524                                    AffineExpr multiplicativeFactor,
525                                    MutableArrayRef<AffineExpr> strides,
526                                    AffineExpr &offset) {
527   if (auto dim = e.dyn_cast<AffineDimExpr>())
528     strides[dim.getPosition()] =
529         strides[dim.getPosition()] + multiplicativeFactor;
530   else
531     offset = offset + e * multiplicativeFactor;
532 }
533 
534 /// Takes a single AffineExpr `e` and populates the `strides` array with the
535 /// strides expressions for each dim position.
536 /// The convention is that the strides for dimensions d0, .. dn appear in
537 /// order to make indexing intuitive into the result.
538 static LogicalResult extractStrides(AffineExpr e,
539                                     AffineExpr multiplicativeFactor,
540                                     MutableArrayRef<AffineExpr> strides,
541                                     AffineExpr &offset) {
542   auto bin = e.dyn_cast<AffineBinaryOpExpr>();
543   if (!bin) {
544     extractStridesFromTerm(e, multiplicativeFactor, strides, offset);
545     return success();
546   }
547 
548   if (bin.getKind() == AffineExprKind::CeilDiv ||
549       bin.getKind() == AffineExprKind::FloorDiv ||
550       bin.getKind() == AffineExprKind::Mod)
551     return failure();
552 
553   if (bin.getKind() == AffineExprKind::Mul) {
554     auto dim = bin.getLHS().dyn_cast<AffineDimExpr>();
555     if (dim) {
556       strides[dim.getPosition()] =
557           strides[dim.getPosition()] + bin.getRHS() * multiplicativeFactor;
558       return success();
559     }
560     // LHS and RHS may both contain complex expressions of dims. Try one path
561     // and if it fails try the other. This is guaranteed to succeed because
562     // only one path may have a `dim`, otherwise this is not an AffineExpr in
563     // the first place.
564     if (bin.getLHS().isSymbolicOrConstant())
565       return extractStrides(bin.getRHS(), multiplicativeFactor * bin.getLHS(),
566                             strides, offset);
567     return extractStrides(bin.getLHS(), multiplicativeFactor * bin.getRHS(),
568                           strides, offset);
569   }
570 
571   if (bin.getKind() == AffineExprKind::Add) {
572     auto res1 =
573         extractStrides(bin.getLHS(), multiplicativeFactor, strides, offset);
574     auto res2 =
575         extractStrides(bin.getRHS(), multiplicativeFactor, strides, offset);
576     return success(succeeded(res1) && succeeded(res2));
577   }
578 
579   llvm_unreachable("unexpected binary operation");
580 }
581 
582 LogicalResult mlir::getStridesAndOffset(MemRefType t,
583                                         SmallVectorImpl<AffineExpr> &strides,
584                                         AffineExpr &offset) {
585   auto affineMaps = t.getAffineMaps();
586   // For now strides are only computed on a single affine map with a single
587   // result (i.e. the closed subset of linearization maps that are compatible
588   // with striding semantics).
589   // TODO: support more forms on a per-need basis.
590   if (affineMaps.size() > 1)
591     return failure();
592   if (affineMaps.size() == 1 && affineMaps[0].getNumResults() != 1)
593     return failure();
594 
595   auto zero = getAffineConstantExpr(0, t.getContext());
596   auto one = getAffineConstantExpr(1, t.getContext());
597   offset = zero;
598   strides.assign(t.getRank(), zero);
599 
600   AffineMap m;
601   if (!affineMaps.empty()) {
602     m = affineMaps.front();
603     assert(!m.isIdentity() && "unexpected identity map");
604   }
605 
606   // Canonical case for empty map.
607   if (!m) {
608     // 0-D corner case, offset is already 0.
609     if (t.getRank() == 0)
610       return success();
611     auto stridedExpr =
612         makeCanonicalStridedLayoutExpr(t.getShape(), t.getContext());
613     if (succeeded(extractStrides(stridedExpr, one, strides, offset)))
614       return success();
615     assert(false && "unexpected failure: extract strides in canonical layout");
616   }
617 
618   // Non-canonical case requires more work.
619   auto stridedExpr =
620       simplifyAffineExpr(m.getResult(0), m.getNumDims(), m.getNumSymbols());
621   if (failed(extractStrides(stridedExpr, one, strides, offset))) {
622     offset = AffineExpr();
623     strides.clear();
624     return failure();
625   }
626 
627   // Simplify results to allow folding to constants and simple checks.
628   unsigned numDims = m.getNumDims();
629   unsigned numSymbols = m.getNumSymbols();
630   offset = simplifyAffineExpr(offset, numDims, numSymbols);
631   for (auto &stride : strides)
632     stride = simplifyAffineExpr(stride, numDims, numSymbols);
633 
634   /// In practice, a strided memref must be internally non-aliasing. Test
635   /// against 0 as a proxy.
636   /// TODO: static cases can have more advanced checks.
637   /// TODO: dynamic cases would require a way to compare symbolic
638   /// expressions and would probably need an affine set context propagated
639   /// everywhere.
640   if (llvm::any_of(strides, [](AffineExpr e) {
641         return e == getAffineConstantExpr(0, e.getContext());
642       })) {
643     offset = AffineExpr();
644     strides.clear();
645     return failure();
646   }
647 
648   return success();
649 }
650 
651 LogicalResult mlir::getStridesAndOffset(MemRefType t,
652                                         SmallVectorImpl<int64_t> &strides,
653                                         int64_t &offset) {
654   AffineExpr offsetExpr;
655   SmallVector<AffineExpr, 4> strideExprs;
656   if (failed(::getStridesAndOffset(t, strideExprs, offsetExpr)))
657     return failure();
658   if (auto cst = offsetExpr.dyn_cast<AffineConstantExpr>())
659     offset = cst.getValue();
660   else
661     offset = ShapedType::kDynamicStrideOrOffset;
662   for (auto e : strideExprs) {
663     if (auto c = e.dyn_cast<AffineConstantExpr>())
664       strides.push_back(c.getValue());
665     else
666       strides.push_back(ShapedType::kDynamicStrideOrOffset);
667   }
668   return success();
669 }
670 
671 //===----------------------------------------------------------------------===//
672 /// TupleType
673 //===----------------------------------------------------------------------===//
674 
675 /// Return the elements types for this tuple.
676 ArrayRef<Type> TupleType::getTypes() const { return getImpl()->getTypes(); }
677 
678 /// Accumulate the types contained in this tuple and tuples nested within it.
679 /// Note that this only flattens nested tuples, not any other container type,
680 /// e.g. a tuple<i32, tensor<i32>, tuple<f32, tuple<i64>>> is flattened to
681 /// (i32, tensor<i32>, f32, i64)
682 void TupleType::getFlattenedTypes(SmallVectorImpl<Type> &types) {
683   for (Type type : getTypes()) {
684     if (auto nestedTuple = type.dyn_cast<TupleType>())
685       nestedTuple.getFlattenedTypes(types);
686     else
687       types.push_back(type);
688   }
689 }
690 
691 /// Return the number of element types.
692 size_t TupleType::size() const { return getImpl()->size(); }
693 
694 //===----------------------------------------------------------------------===//
695 // Type Utilities
696 //===----------------------------------------------------------------------===//
697 
698 AffineMap mlir::makeStridedLinearLayoutMap(ArrayRef<int64_t> strides,
699                                            int64_t offset,
700                                            MLIRContext *context) {
701   AffineExpr expr;
702   unsigned nSymbols = 0;
703 
704   // AffineExpr for offset.
705   // Static case.
706   if (offset != MemRefType::getDynamicStrideOrOffset()) {
707     auto cst = getAffineConstantExpr(offset, context);
708     expr = cst;
709   } else {
710     // Dynamic case, new symbol for the offset.
711     auto sym = getAffineSymbolExpr(nSymbols++, context);
712     expr = sym;
713   }
714 
715   // AffineExpr for strides.
716   for (auto en : llvm::enumerate(strides)) {
717     auto dim = en.index();
718     auto stride = en.value();
719     assert(stride != 0 && "Invalid stride specification");
720     auto d = getAffineDimExpr(dim, context);
721     AffineExpr mult;
722     // Static case.
723     if (stride != MemRefType::getDynamicStrideOrOffset())
724       mult = getAffineConstantExpr(stride, context);
725     else
726       // Dynamic case, new symbol for each new stride.
727       mult = getAffineSymbolExpr(nSymbols++, context);
728     expr = expr + d * mult;
729   }
730 
731   return AffineMap::get(strides.size(), nSymbols, expr);
732 }
733 
734 /// Return a version of `t` with identity layout if it can be determined
735 /// statically that the layout is the canonical contiguous strided layout.
736 /// Otherwise pass `t`'s layout into `simplifyAffineMap` and return a copy of
737 /// `t` with simplified layout.
738 /// If `t` has multiple layout maps or a multi-result layout, just return `t`.
739 MemRefType mlir::canonicalizeStridedLayout(MemRefType t) {
740   auto affineMaps = t.getAffineMaps();
741   // Already in canonical form.
742   if (affineMaps.empty())
743     return t;
744 
745   // Can't reduce to canonical identity form, return in canonical form.
746   if (affineMaps.size() > 1 || affineMaps[0].getNumResults() > 1)
747     return t;
748 
749   // Corner-case for 0-D affine maps.
750   auto m = affineMaps[0];
751   if (m.getNumDims() == 0 && m.getNumSymbols() == 0) {
752     if (auto cst = m.getResult(0).dyn_cast<AffineConstantExpr>())
753       if (cst.getValue() == 0)
754         return MemRefType::Builder(t).setAffineMaps({});
755     return t;
756   }
757 
758   // 0-D corner case for empty shape that still have an affine map. Example:
759   // `memref<f32, affine_map<()[s0] -> (s0)>>`. This is a 1 element memref whose
760   // offset needs to remain, just return t.
761   if (t.getShape().empty())
762     return t;
763 
764   // If the canonical strided layout for the sizes of `t` is equal to the
765   // simplified layout of `t` we can just return an empty layout. Otherwise,
766   // just simplify the existing layout.
767   AffineExpr expr =
768       makeCanonicalStridedLayoutExpr(t.getShape(), t.getContext());
769   auto simplifiedLayoutExpr =
770       simplifyAffineExpr(m.getResult(0), m.getNumDims(), m.getNumSymbols());
771   if (expr != simplifiedLayoutExpr)
772     return MemRefType::Builder(t).setAffineMaps({AffineMap::get(
773         m.getNumDims(), m.getNumSymbols(), simplifiedLayoutExpr)});
774   return MemRefType::Builder(t).setAffineMaps({});
775 }
776 
777 AffineExpr mlir::makeCanonicalStridedLayoutExpr(ArrayRef<int64_t> sizes,
778                                                 ArrayRef<AffineExpr> exprs,
779                                                 MLIRContext *context) {
780   assert(!sizes.empty() && !exprs.empty() &&
781          "expected non-empty sizes and exprs");
782 
783   // Size 0 corner case is useful for canonicalizations.
784   if (llvm::is_contained(sizes, 0))
785     return getAffineConstantExpr(0, context);
786 
787   auto maps = AffineMap::inferFromExprList(exprs);
788   assert(!maps.empty() && "Expected one non-empty map");
789   unsigned numDims = maps[0].getNumDims(), nSymbols = maps[0].getNumSymbols();
790 
791   AffineExpr expr;
792   bool dynamicPoisonBit = false;
793   int64_t runningSize = 1;
794   for (auto en : llvm::zip(llvm::reverse(exprs), llvm::reverse(sizes))) {
795     int64_t size = std::get<1>(en);
796     // Degenerate case, no size =-> no stride
797     if (size == 0)
798       continue;
799     AffineExpr dimExpr = std::get<0>(en);
800     AffineExpr stride = dynamicPoisonBit
801                             ? getAffineSymbolExpr(nSymbols++, context)
802                             : getAffineConstantExpr(runningSize, context);
803     expr = expr ? expr + dimExpr * stride : dimExpr * stride;
804     if (size > 0)
805       runningSize *= size;
806     else
807       dynamicPoisonBit = true;
808   }
809   return simplifyAffineExpr(expr, numDims, nSymbols);
810 }
811 
812 /// Return a version of `t` with a layout that has all dynamic offset and
813 /// strides. This is used to erase the static layout.
814 MemRefType mlir::eraseStridedLayout(MemRefType t) {
815   auto val = ShapedType::kDynamicStrideOrOffset;
816   return MemRefType::Builder(t).setAffineMaps(makeStridedLinearLayoutMap(
817       SmallVector<int64_t, 4>(t.getRank(), val), val, t.getContext()));
818 }
819 
820 AffineExpr mlir::makeCanonicalStridedLayoutExpr(ArrayRef<int64_t> sizes,
821                                                 MLIRContext *context) {
822   SmallVector<AffineExpr, 4> exprs;
823   exprs.reserve(sizes.size());
824   for (auto dim : llvm::seq<unsigned>(0, sizes.size()))
825     exprs.push_back(getAffineDimExpr(dim, context));
826   return makeCanonicalStridedLayoutExpr(sizes, exprs, context);
827 }
828 
829 /// Return true if the layout for `t` is compatible with strided semantics.
830 bool mlir::isStrided(MemRefType t) {
831   int64_t offset;
832   SmallVector<int64_t, 4> stridesAndOffset;
833   auto res = getStridesAndOffset(t, stridesAndOffset, offset);
834   return succeeded(res);
835 }
836