1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the code that handles AST -> LLVM type lowering.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CodeGenTypes.h"
14 #include "CGCXXABI.h"
15 #include "CGCall.h"
16 #include "CGOpenCLRuntime.h"
17 #include "CGRecordLayout.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/AST/Expr.h"
23 #include "clang/AST/RecordLayout.h"
24 #include "clang/CodeGen/CGFunctionInfo.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Module.h"
28 using namespace clang;
29 using namespace CodeGen;
30 
31 CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
32   : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
33     Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()),
34     TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) {
35   SkippedLayout = false;
36 }
37 
38 CodeGenTypes::~CodeGenTypes() {
39   for (llvm::FoldingSet<CGFunctionInfo>::iterator
40        I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
41     delete &*I++;
42 }
43 
44 const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
45   return CGM.getCodeGenOpts();
46 }
47 
48 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
49                                      llvm::StructType *Ty,
50                                      StringRef suffix) {
51   SmallString<256> TypeName;
52   llvm::raw_svector_ostream OS(TypeName);
53   OS << RD->getKindName() << '.';
54 
55   // Name the codegen type after the typedef name
56   // if there is no tag type name available
57   if (RD->getIdentifier()) {
58     // FIXME: We should not have to check for a null decl context here.
59     // Right now we do it because the implicit Obj-C decls don't have one.
60     if (RD->getDeclContext())
61       RD->printQualifiedName(OS);
62     else
63       RD->printName(OS);
64   } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
65     // FIXME: We should not have to check for a null decl context here.
66     // Right now we do it because the implicit Obj-C decls don't have one.
67     if (TDD->getDeclContext())
68       TDD->printQualifiedName(OS);
69     else
70       TDD->printName(OS);
71   } else
72     OS << "anon";
73 
74   if (!suffix.empty())
75     OS << suffix;
76 
77   Ty->setName(OS.str());
78 }
79 
80 /// ConvertTypeForMem - Convert type T into a llvm::Type.  This differs from
81 /// ConvertType in that it is used to convert to the memory representation for
82 /// a type.  For example, the scalar representation for _Bool is i1, but the
83 /// memory representation is usually i8 or i32, depending on the target.
84 llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
85   if (T->isConstantMatrixType()) {
86     const Type *Ty = Context.getCanonicalType(T).getTypePtr();
87     const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
88     return llvm::ArrayType::get(ConvertType(MT->getElementType()),
89                                 MT->getNumRows() * MT->getNumColumns());
90   }
91 
92   llvm::Type *R = ConvertType(T);
93 
94   // If this is a bool type, or an ExtIntType in a bitfield representation,
95   // map this integer to the target-specified size.
96   if ((ForBitField && T->isExtIntType()) ||
97       (!T->isExtIntType() && R->isIntegerTy(1)))
98     return llvm::IntegerType::get(getLLVMContext(),
99                                   (unsigned)Context.getTypeSize(T));
100 
101   // Else, don't map it.
102   return R;
103 }
104 
105 /// isRecordLayoutComplete - Return true if the specified type is already
106 /// completely laid out.
107 bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
108   llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
109   RecordDeclTypes.find(Ty);
110   return I != RecordDeclTypes.end() && !I->second->isOpaque();
111 }
112 
113 static bool
114 isSafeToConvert(QualType T, CodeGenTypes &CGT,
115                 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked);
116 
117 
118 /// isSafeToConvert - Return true if it is safe to convert the specified record
119 /// decl to IR and lay it out, false if doing so would cause us to get into a
120 /// recursive compilation mess.
121 static bool
122 isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
123                 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
124   // If we have already checked this type (maybe the same type is used by-value
125   // multiple times in multiple structure fields, don't check again.
126   if (!AlreadyChecked.insert(RD).second)
127     return true;
128 
129   const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
130 
131   // If this type is already laid out, converting it is a noop.
132   if (CGT.isRecordLayoutComplete(Key)) return true;
133 
134   // If this type is currently being laid out, we can't recursively compile it.
135   if (CGT.isRecordBeingLaidOut(Key))
136     return false;
137 
138   // If this type would require laying out bases that are currently being laid
139   // out, don't do it.  This includes virtual base classes which get laid out
140   // when a class is translated, even though they aren't embedded by-value into
141   // the class.
142   if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
143     for (const auto &I : CRD->bases())
144       if (!isSafeToConvert(I.getType()->castAs<RecordType>()->getDecl(), CGT,
145                            AlreadyChecked))
146         return false;
147   }
148 
149   // If this type would require laying out members that are currently being laid
150   // out, don't do it.
151   for (const auto *I : RD->fields())
152     if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
153       return false;
154 
155   // If there are no problems, lets do it.
156   return true;
157 }
158 
159 /// isSafeToConvert - Return true if it is safe to convert this field type,
160 /// which requires the structure elements contained by-value to all be
161 /// recursively safe to convert.
162 static bool
163 isSafeToConvert(QualType T, CodeGenTypes &CGT,
164                 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
165   // Strip off atomic type sugar.
166   if (const auto *AT = T->getAs<AtomicType>())
167     T = AT->getValueType();
168 
169   // If this is a record, check it.
170   if (const auto *RT = T->getAs<RecordType>())
171     return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked);
172 
173   // If this is an array, check the elements, which are embedded inline.
174   if (const auto *AT = CGT.getContext().getAsArrayType(T))
175     return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked);
176 
177   // Otherwise, there is no concern about transforming this.  We only care about
178   // things that are contained by-value in a structure that can have another
179   // structure as a member.
180   return true;
181 }
182 
183 
184 /// isSafeToConvert - Return true if it is safe to convert the specified record
185 /// decl to IR and lay it out, false if doing so would cause us to get into a
186 /// recursive compilation mess.
187 static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
188   // If no structs are being laid out, we can certainly do this one.
189   if (CGT.noRecordsBeingLaidOut()) return true;
190 
191   llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked;
192   return isSafeToConvert(RD, CGT, AlreadyChecked);
193 }
194 
195 /// isFuncParamTypeConvertible - Return true if the specified type in a
196 /// function parameter or result position can be converted to an IR type at this
197 /// point.  This boils down to being whether it is complete, as well as whether
198 /// we've temporarily deferred expanding the type because we're in a recursive
199 /// context.
200 bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
201   // Some ABIs cannot have their member pointers represented in IR unless
202   // certain circumstances have been reached.
203   if (const auto *MPT = Ty->getAs<MemberPointerType>())
204     return getCXXABI().isMemberPointerConvertible(MPT);
205 
206   // If this isn't a tagged type, we can convert it!
207   const TagType *TT = Ty->getAs<TagType>();
208   if (!TT) return true;
209 
210   // Incomplete types cannot be converted.
211   if (TT->isIncompleteType())
212     return false;
213 
214   // If this is an enum, then it is always safe to convert.
215   const RecordType *RT = dyn_cast<RecordType>(TT);
216   if (!RT) return true;
217 
218   // Otherwise, we have to be careful.  If it is a struct that we're in the
219   // process of expanding, then we can't convert the function type.  That's ok
220   // though because we must be in a pointer context under the struct, so we can
221   // just convert it to a dummy type.
222   //
223   // We decide this by checking whether ConvertRecordDeclType returns us an
224   // opaque type for a struct that we know is defined.
225   return isSafeToConvert(RT->getDecl(), *this);
226 }
227 
228 
229 /// Code to verify a given function type is complete, i.e. the return type
230 /// and all of the parameter types are complete.  Also check to see if we are in
231 /// a RS_StructPointer context, and if so whether any struct types have been
232 /// pended.  If so, we don't want to ask the ABI lowering code to handle a type
233 /// that cannot be converted to an IR type.
234 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
235   if (!isFuncParamTypeConvertible(FT->getReturnType()))
236     return false;
237 
238   if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
239     for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
240       if (!isFuncParamTypeConvertible(FPT->getParamType(i)))
241         return false;
242 
243   return true;
244 }
245 
246 /// UpdateCompletedType - When we find the full definition for a TagDecl,
247 /// replace the 'opaque' type we previously made for it if applicable.
248 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
249   // If this is an enum being completed, then we flush all non-struct types from
250   // the cache.  This allows function types and other things that may be derived
251   // from the enum to be recomputed.
252   if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
253     // Only flush the cache if we've actually already converted this type.
254     if (TypeCache.count(ED->getTypeForDecl())) {
255       // Okay, we formed some types based on this.  We speculated that the enum
256       // would be lowered to i32, so we only need to flush the cache if this
257       // didn't happen.
258       if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
259         TypeCache.clear();
260     }
261     // If necessary, provide the full definition of a type only used with a
262     // declaration so far.
263     if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
264       DI->completeType(ED);
265     return;
266   }
267 
268   // If we completed a RecordDecl that we previously used and converted to an
269   // anonymous type, then go ahead and complete it now.
270   const RecordDecl *RD = cast<RecordDecl>(TD);
271   if (RD->isDependentType()) return;
272 
273   // Only complete it if we converted it already.  If we haven't converted it
274   // yet, we'll just do it lazily.
275   if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
276     ConvertRecordDeclType(RD);
277 
278   // If necessary, provide the full definition of a type only used with a
279   // declaration so far.
280   if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
281     DI->completeType(RD);
282 }
283 
284 void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
285   QualType T = Context.getRecordType(RD);
286   T = Context.getCanonicalType(T);
287 
288   const Type *Ty = T.getTypePtr();
289   if (RecordsWithOpaqueMemberPointers.count(Ty)) {
290     TypeCache.clear();
291     RecordsWithOpaqueMemberPointers.clear();
292   }
293 }
294 
295 static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
296                                     const llvm::fltSemantics &format,
297                                     bool UseNativeHalf = false) {
298   if (&format == &llvm::APFloat::IEEEhalf()) {
299     if (UseNativeHalf)
300       return llvm::Type::getHalfTy(VMContext);
301     else
302       return llvm::Type::getInt16Ty(VMContext);
303   }
304   if (&format == &llvm::APFloat::BFloat())
305     return llvm::Type::getBFloatTy(VMContext);
306   if (&format == &llvm::APFloat::IEEEsingle())
307     return llvm::Type::getFloatTy(VMContext);
308   if (&format == &llvm::APFloat::IEEEdouble())
309     return llvm::Type::getDoubleTy(VMContext);
310   if (&format == &llvm::APFloat::IEEEquad())
311     return llvm::Type::getFP128Ty(VMContext);
312   if (&format == &llvm::APFloat::PPCDoubleDouble())
313     return llvm::Type::getPPC_FP128Ty(VMContext);
314   if (&format == &llvm::APFloat::x87DoubleExtended())
315     return llvm::Type::getX86_FP80Ty(VMContext);
316   llvm_unreachable("Unknown float format!");
317 }
318 
319 llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
320   assert(QFT.isCanonical());
321   const Type *Ty = QFT.getTypePtr();
322   const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
323   // First, check whether we can build the full function type.  If the
324   // function type depends on an incomplete type (e.g. a struct or enum), we
325   // cannot lower the function type.
326   if (!isFuncTypeConvertible(FT)) {
327     // This function's type depends on an incomplete tag type.
328 
329     // Force conversion of all the relevant record types, to make sure
330     // we re-convert the FunctionType when appropriate.
331     if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
332       ConvertRecordDeclType(RT->getDecl());
333     if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
334       for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
335         if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
336           ConvertRecordDeclType(RT->getDecl());
337 
338     SkippedLayout = true;
339 
340     // Return a placeholder type.
341     return llvm::StructType::get(getLLVMContext());
342   }
343 
344   // While we're converting the parameter types for a function, we don't want
345   // to recursively convert any pointed-to structs.  Converting directly-used
346   // structs is ok though.
347   if (!RecordsBeingLaidOut.insert(Ty).second) {
348     SkippedLayout = true;
349     return llvm::StructType::get(getLLVMContext());
350   }
351 
352   // The function type can be built; call the appropriate routines to
353   // build it.
354   const CGFunctionInfo *FI;
355   if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
356     FI = &arrangeFreeFunctionType(
357         CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
358   } else {
359     const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
360     FI = &arrangeFreeFunctionType(
361         CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
362   }
363 
364   llvm::Type *ResultType = nullptr;
365   // If there is something higher level prodding our CGFunctionInfo, then
366   // don't recurse into it again.
367   if (FunctionsBeingProcessed.count(FI)) {
368 
369     ResultType = llvm::StructType::get(getLLVMContext());
370     SkippedLayout = true;
371   } else {
372 
373     // Otherwise, we're good to go, go ahead and convert it.
374     ResultType = GetFunctionType(*FI);
375   }
376 
377   RecordsBeingLaidOut.erase(Ty);
378 
379   if (SkippedLayout)
380     TypeCache.clear();
381 
382   if (RecordsBeingLaidOut.empty())
383     while (!DeferredRecords.empty())
384       ConvertRecordDeclType(DeferredRecords.pop_back_val());
385   return ResultType;
386 }
387 
388 /// ConvertType - Convert the specified type to its LLVM form.
389 llvm::Type *CodeGenTypes::ConvertType(QualType T) {
390   T = Context.getCanonicalType(T);
391 
392   const Type *Ty = T.getTypePtr();
393 
394   // For the device-side compilation, CUDA device builtin surface/texture types
395   // may be represented in different types.
396   if (Context.getLangOpts().CUDAIsDevice) {
397     if (T->isCUDADeviceBuiltinSurfaceType()) {
398       if (auto *Ty = CGM.getTargetCodeGenInfo()
399                          .getCUDADeviceBuiltinSurfaceDeviceType())
400         return Ty;
401     } else if (T->isCUDADeviceBuiltinTextureType()) {
402       if (auto *Ty = CGM.getTargetCodeGenInfo()
403                          .getCUDADeviceBuiltinTextureDeviceType())
404         return Ty;
405     }
406   }
407 
408   // RecordTypes are cached and processed specially.
409   if (const RecordType *RT = dyn_cast<RecordType>(Ty))
410     return ConvertRecordDeclType(RT->getDecl());
411 
412   // See if type is already cached.
413   llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty);
414   // If type is found in map then use it. Otherwise, convert type T.
415   if (TCI != TypeCache.end())
416     return TCI->second;
417 
418   // If we don't have it in the cache, convert it now.
419   llvm::Type *ResultType = nullptr;
420   switch (Ty->getTypeClass()) {
421   case Type::Record: // Handled above.
422 #define TYPE(Class, Base)
423 #define ABSTRACT_TYPE(Class, Base)
424 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
425 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
426 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
427 #include "clang/AST/TypeNodes.inc"
428     llvm_unreachable("Non-canonical or dependent types aren't possible.");
429 
430   case Type::Builtin: {
431     switch (cast<BuiltinType>(Ty)->getKind()) {
432     case BuiltinType::Void:
433     case BuiltinType::ObjCId:
434     case BuiltinType::ObjCClass:
435     case BuiltinType::ObjCSel:
436       // LLVM void type can only be used as the result of a function call.  Just
437       // map to the same as char.
438       ResultType = llvm::Type::getInt8Ty(getLLVMContext());
439       break;
440 
441     case BuiltinType::Bool:
442       // Note that we always return bool as i1 for use as a scalar type.
443       ResultType = llvm::Type::getInt1Ty(getLLVMContext());
444       break;
445 
446     case BuiltinType::Char_S:
447     case BuiltinType::Char_U:
448     case BuiltinType::SChar:
449     case BuiltinType::UChar:
450     case BuiltinType::Short:
451     case BuiltinType::UShort:
452     case BuiltinType::Int:
453     case BuiltinType::UInt:
454     case BuiltinType::Long:
455     case BuiltinType::ULong:
456     case BuiltinType::LongLong:
457     case BuiltinType::ULongLong:
458     case BuiltinType::WChar_S:
459     case BuiltinType::WChar_U:
460     case BuiltinType::Char8:
461     case BuiltinType::Char16:
462     case BuiltinType::Char32:
463     case BuiltinType::ShortAccum:
464     case BuiltinType::Accum:
465     case BuiltinType::LongAccum:
466     case BuiltinType::UShortAccum:
467     case BuiltinType::UAccum:
468     case BuiltinType::ULongAccum:
469     case BuiltinType::ShortFract:
470     case BuiltinType::Fract:
471     case BuiltinType::LongFract:
472     case BuiltinType::UShortFract:
473     case BuiltinType::UFract:
474     case BuiltinType::ULongFract:
475     case BuiltinType::SatShortAccum:
476     case BuiltinType::SatAccum:
477     case BuiltinType::SatLongAccum:
478     case BuiltinType::SatUShortAccum:
479     case BuiltinType::SatUAccum:
480     case BuiltinType::SatULongAccum:
481     case BuiltinType::SatShortFract:
482     case BuiltinType::SatFract:
483     case BuiltinType::SatLongFract:
484     case BuiltinType::SatUShortFract:
485     case BuiltinType::SatUFract:
486     case BuiltinType::SatULongFract:
487       ResultType = llvm::IntegerType::get(getLLVMContext(),
488                                  static_cast<unsigned>(Context.getTypeSize(T)));
489       break;
490 
491     case BuiltinType::Float16:
492       ResultType =
493           getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T),
494                            /* UseNativeHalf = */ true);
495       break;
496 
497     case BuiltinType::Half:
498       // Half FP can either be storage-only (lowered to i16) or native.
499       ResultType = getTypeForFormat(
500           getLLVMContext(), Context.getFloatTypeSemantics(T),
501           Context.getLangOpts().NativeHalfType ||
502               !Context.getTargetInfo().useFP16ConversionIntrinsics());
503       break;
504     case BuiltinType::BFloat16:
505     case BuiltinType::Float:
506     case BuiltinType::Double:
507     case BuiltinType::LongDouble:
508     case BuiltinType::Float128:
509       ResultType = getTypeForFormat(getLLVMContext(),
510                                     Context.getFloatTypeSemantics(T),
511                                     /* UseNativeHalf = */ false);
512       break;
513 
514     case BuiltinType::NullPtr:
515       // Model std::nullptr_t as i8*
516       ResultType = llvm::Type::getInt8PtrTy(getLLVMContext());
517       break;
518 
519     case BuiltinType::UInt128:
520     case BuiltinType::Int128:
521       ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
522       break;
523 
524 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
525     case BuiltinType::Id:
526 #include "clang/Basic/OpenCLImageTypes.def"
527 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
528     case BuiltinType::Id:
529 #include "clang/Basic/OpenCLExtensionTypes.def"
530     case BuiltinType::OCLSampler:
531     case BuiltinType::OCLEvent:
532     case BuiltinType::OCLClkEvent:
533     case BuiltinType::OCLQueue:
534     case BuiltinType::OCLReserveID:
535       ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
536       break;
537     case BuiltinType::SveInt8:
538     case BuiltinType::SveUint8:
539     case BuiltinType::SveInt8x2:
540     case BuiltinType::SveUint8x2:
541     case BuiltinType::SveInt8x3:
542     case BuiltinType::SveUint8x3:
543     case BuiltinType::SveInt8x4:
544     case BuiltinType::SveUint8x4:
545     case BuiltinType::SveInt16:
546     case BuiltinType::SveUint16:
547     case BuiltinType::SveInt16x2:
548     case BuiltinType::SveUint16x2:
549     case BuiltinType::SveInt16x3:
550     case BuiltinType::SveUint16x3:
551     case BuiltinType::SveInt16x4:
552     case BuiltinType::SveUint16x4:
553     case BuiltinType::SveInt32:
554     case BuiltinType::SveUint32:
555     case BuiltinType::SveInt32x2:
556     case BuiltinType::SveUint32x2:
557     case BuiltinType::SveInt32x3:
558     case BuiltinType::SveUint32x3:
559     case BuiltinType::SveInt32x4:
560     case BuiltinType::SveUint32x4:
561     case BuiltinType::SveInt64:
562     case BuiltinType::SveUint64:
563     case BuiltinType::SveInt64x2:
564     case BuiltinType::SveUint64x2:
565     case BuiltinType::SveInt64x3:
566     case BuiltinType::SveUint64x3:
567     case BuiltinType::SveInt64x4:
568     case BuiltinType::SveUint64x4:
569     case BuiltinType::SveBool:
570     case BuiltinType::SveFloat16:
571     case BuiltinType::SveFloat16x2:
572     case BuiltinType::SveFloat16x3:
573     case BuiltinType::SveFloat16x4:
574     case BuiltinType::SveFloat32:
575     case BuiltinType::SveFloat32x2:
576     case BuiltinType::SveFloat32x3:
577     case BuiltinType::SveFloat32x4:
578     case BuiltinType::SveFloat64:
579     case BuiltinType::SveFloat64x2:
580     case BuiltinType::SveFloat64x3:
581     case BuiltinType::SveFloat64x4:
582     case BuiltinType::SveBFloat16:
583     case BuiltinType::SveBFloat16x2:
584     case BuiltinType::SveBFloat16x3:
585     case BuiltinType::SveBFloat16x4: {
586       ASTContext::BuiltinVectorTypeInfo Info =
587           Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
588       return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
589                                            Info.EC.getKnownMinValue() *
590                                                Info.NumVectors);
591     }
592     case BuiltinType::Dependent:
593 #define BUILTIN_TYPE(Id, SingletonId)
594 #define PLACEHOLDER_TYPE(Id, SingletonId) \
595     case BuiltinType::Id:
596 #include "clang/AST/BuiltinTypes.def"
597       llvm_unreachable("Unexpected placeholder builtin type!");
598     }
599     break;
600   }
601   case Type::Auto:
602   case Type::DeducedTemplateSpecialization:
603     llvm_unreachable("Unexpected undeduced type!");
604   case Type::Complex: {
605     llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
606     ResultType = llvm::StructType::get(EltTy, EltTy);
607     break;
608   }
609   case Type::LValueReference:
610   case Type::RValueReference: {
611     const ReferenceType *RTy = cast<ReferenceType>(Ty);
612     QualType ETy = RTy->getPointeeType();
613     llvm::Type *PointeeType = ConvertTypeForMem(ETy);
614     unsigned AS = Context.getTargetAddressSpace(ETy);
615     ResultType = llvm::PointerType::get(PointeeType, AS);
616     break;
617   }
618   case Type::Pointer: {
619     const PointerType *PTy = cast<PointerType>(Ty);
620     QualType ETy = PTy->getPointeeType();
621     llvm::Type *PointeeType = ConvertTypeForMem(ETy);
622     if (PointeeType->isVoidTy())
623       PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
624 
625     unsigned AS = PointeeType->isFunctionTy()
626                       ? getDataLayout().getProgramAddressSpace()
627                       : Context.getTargetAddressSpace(ETy);
628 
629     ResultType = llvm::PointerType::get(PointeeType, AS);
630     break;
631   }
632 
633   case Type::VariableArray: {
634     const VariableArrayType *A = cast<VariableArrayType>(Ty);
635     assert(A->getIndexTypeCVRQualifiers() == 0 &&
636            "FIXME: We only handle trivial array types so far!");
637     // VLAs resolve to the innermost element type; this matches
638     // the return of alloca, and there isn't any obviously better choice.
639     ResultType = ConvertTypeForMem(A->getElementType());
640     break;
641   }
642   case Type::IncompleteArray: {
643     const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
644     assert(A->getIndexTypeCVRQualifiers() == 0 &&
645            "FIXME: We only handle trivial array types so far!");
646     // int X[] -> [0 x int], unless the element type is not sized.  If it is
647     // unsized (e.g. an incomplete struct) just use [0 x i8].
648     ResultType = ConvertTypeForMem(A->getElementType());
649     if (!ResultType->isSized()) {
650       SkippedLayout = true;
651       ResultType = llvm::Type::getInt8Ty(getLLVMContext());
652     }
653     ResultType = llvm::ArrayType::get(ResultType, 0);
654     break;
655   }
656   case Type::ConstantArray: {
657     const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
658     llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
659 
660     // Lower arrays of undefined struct type to arrays of i8 just to have a
661     // concrete type.
662     if (!EltTy->isSized()) {
663       SkippedLayout = true;
664       EltTy = llvm::Type::getInt8Ty(getLLVMContext());
665     }
666 
667     ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
668     break;
669   }
670   case Type::ExtVector:
671   case Type::Vector: {
672     const VectorType *VT = cast<VectorType>(Ty);
673     ResultType = llvm::FixedVectorType::get(ConvertType(VT->getElementType()),
674                                             VT->getNumElements());
675     break;
676   }
677   case Type::ConstantMatrix: {
678     const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
679     ResultType =
680         llvm::FixedVectorType::get(ConvertType(MT->getElementType()),
681                                    MT->getNumRows() * MT->getNumColumns());
682     break;
683   }
684   case Type::FunctionNoProto:
685   case Type::FunctionProto:
686     ResultType = ConvertFunctionTypeInternal(T);
687     break;
688   case Type::ObjCObject:
689     ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
690     break;
691 
692   case Type::ObjCInterface: {
693     // Objective-C interfaces are always opaque (outside of the
694     // runtime, which can do whatever it likes); we never refine
695     // these.
696     llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
697     if (!T)
698       T = llvm::StructType::create(getLLVMContext());
699     ResultType = T;
700     break;
701   }
702 
703   case Type::ObjCObjectPointer: {
704     // Protocol qualifications do not influence the LLVM type, we just return a
705     // pointer to the underlying interface type. We don't need to worry about
706     // recursive conversion.
707     llvm::Type *T =
708       ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
709     ResultType = T->getPointerTo();
710     break;
711   }
712 
713   case Type::Enum: {
714     const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
715     if (ED->isCompleteDefinition() || ED->isFixed())
716       return ConvertType(ED->getIntegerType());
717     // Return a placeholder 'i32' type.  This can be changed later when the
718     // type is defined (see UpdateCompletedType), but is likely to be the
719     // "right" answer.
720     ResultType = llvm::Type::getInt32Ty(getLLVMContext());
721     break;
722   }
723 
724   case Type::BlockPointer: {
725     const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
726     llvm::Type *PointeeType = CGM.getLangOpts().OpenCL
727                                   ? CGM.getGenericBlockLiteralType()
728                                   : ConvertTypeForMem(FTy);
729     unsigned AS = Context.getTargetAddressSpace(FTy);
730     ResultType = llvm::PointerType::get(PointeeType, AS);
731     break;
732   }
733 
734   case Type::MemberPointer: {
735     auto *MPTy = cast<MemberPointerType>(Ty);
736     if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
737       RecordsWithOpaqueMemberPointers.insert(MPTy->getClass());
738       ResultType = llvm::StructType::create(getLLVMContext());
739     } else {
740       ResultType = getCXXABI().ConvertMemberPointerType(MPTy);
741     }
742     break;
743   }
744 
745   case Type::Atomic: {
746     QualType valueType = cast<AtomicType>(Ty)->getValueType();
747     ResultType = ConvertTypeForMem(valueType);
748 
749     // Pad out to the inflated size if necessary.
750     uint64_t valueSize = Context.getTypeSize(valueType);
751     uint64_t atomicSize = Context.getTypeSize(Ty);
752     if (valueSize != atomicSize) {
753       assert(valueSize < atomicSize);
754       llvm::Type *elts[] = {
755         ResultType,
756         llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8)
757       };
758       ResultType = llvm::StructType::get(getLLVMContext(),
759                                          llvm::makeArrayRef(elts));
760     }
761     break;
762   }
763   case Type::Pipe: {
764     ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
765     break;
766   }
767   case Type::ExtInt: {
768     const auto &EIT = cast<ExtIntType>(Ty);
769     ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
770     break;
771   }
772   }
773 
774   assert(ResultType && "Didn't convert a type?");
775 
776   TypeCache[Ty] = ResultType;
777   return ResultType;
778 }
779 
780 bool CodeGenModule::isPaddedAtomicType(QualType type) {
781   return isPaddedAtomicType(type->castAs<AtomicType>());
782 }
783 
784 bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
785   return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType());
786 }
787 
788 /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
789 llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
790   // TagDecl's are not necessarily unique, instead use the (clang)
791   // type connected to the decl.
792   const Type *Key = Context.getTagDeclType(RD).getTypePtr();
793 
794   llvm::StructType *&Entry = RecordDeclTypes[Key];
795 
796   // If we don't have a StructType at all yet, create the forward declaration.
797   if (!Entry) {
798     Entry = llvm::StructType::create(getLLVMContext());
799     addRecordTypeName(RD, Entry, "");
800   }
801   llvm::StructType *Ty = Entry;
802 
803   // If this is still a forward declaration, or the LLVM type is already
804   // complete, there's nothing more to do.
805   RD = RD->getDefinition();
806   if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
807     return Ty;
808 
809   // If converting this type would cause us to infinitely loop, don't do it!
810   if (!isSafeToConvert(RD, *this)) {
811     DeferredRecords.push_back(RD);
812     return Ty;
813   }
814 
815   // Okay, this is a definition of a type.  Compile the implementation now.
816   bool InsertResult = RecordsBeingLaidOut.insert(Key).second;
817   (void)InsertResult;
818   assert(InsertResult && "Recursively compiling a struct?");
819 
820   // Force conversion of non-virtual base classes recursively.
821   if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
822     for (const auto &I : CRD->bases()) {
823       if (I.isVirtual()) continue;
824       ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
825     }
826   }
827 
828   // Layout fields.
829   std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
830   CGRecordLayouts[Key] = std::move(Layout);
831 
832   // We're done laying out this struct.
833   bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
834   assert(EraseResult && "struct not in RecordsBeingLaidOut set?");
835 
836   // If this struct blocked a FunctionType conversion, then recompute whatever
837   // was derived from that.
838   // FIXME: This is hugely overconservative.
839   if (SkippedLayout)
840     TypeCache.clear();
841 
842   // If we're done converting the outer-most record, then convert any deferred
843   // structs as well.
844   if (RecordsBeingLaidOut.empty())
845     while (!DeferredRecords.empty())
846       ConvertRecordDeclType(DeferredRecords.pop_back_val());
847 
848   return Ty;
849 }
850 
851 /// getCGRecordLayout - Return record layout info for the given record decl.
852 const CGRecordLayout &
853 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
854   const Type *Key = Context.getTagDeclType(RD).getTypePtr();
855 
856   auto I = CGRecordLayouts.find(Key);
857   if (I != CGRecordLayouts.end())
858     return *I->second;
859   // Compute the type information.
860   ConvertRecordDeclType(RD);
861 
862   // Now try again.
863   I = CGRecordLayouts.find(Key);
864 
865   assert(I != CGRecordLayouts.end() &&
866          "Unable to find record layout information for type");
867   return *I->second;
868 }
869 
870 bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
871   assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type");
872   return isZeroInitializable(T);
873 }
874 
875 bool CodeGenTypes::isZeroInitializable(QualType T) {
876   if (T->getAs<PointerType>())
877     return Context.getTargetNullPointerValue(T) == 0;
878 
879   if (const auto *AT = Context.getAsArrayType(T)) {
880     if (isa<IncompleteArrayType>(AT))
881       return true;
882     if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
883       if (Context.getConstantArrayElementCount(CAT) == 0)
884         return true;
885     T = Context.getBaseElementType(T);
886   }
887 
888   // Records are non-zero-initializable if they contain any
889   // non-zero-initializable subobjects.
890   if (const RecordType *RT = T->getAs<RecordType>()) {
891     const RecordDecl *RD = RT->getDecl();
892     return isZeroInitializable(RD);
893   }
894 
895   // We have to ask the ABI about member pointers.
896   if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
897     return getCXXABI().isZeroInitializable(MPT);
898 
899   // Everything else is okay.
900   return true;
901 }
902 
903 bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
904   return getCGRecordLayout(RD).isZeroInitializable();
905 }
906