1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the code that handles AST -> LLVM type lowering. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CodeGenTypes.h" 14 #include "CGCXXABI.h" 15 #include "CGCall.h" 16 #include "CGOpenCLRuntime.h" 17 #include "CGRecordLayout.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/DeclCXX.h" 21 #include "clang/AST/DeclObjC.h" 22 #include "clang/AST/Expr.h" 23 #include "clang/AST/RecordLayout.h" 24 #include "clang/CodeGen/CGFunctionInfo.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Module.h" 28 29 using namespace clang; 30 using namespace CodeGen; 31 32 #ifndef NDEBUG 33 #include "llvm/Support/CommandLine.h" 34 // TODO: turn on by default when defined(EXPENSIVE_CHECKS) once check-clang is 35 // -verify-type-cache clean. 36 static llvm::cl::opt<bool> VerifyTypeCache( 37 "verify-type-cache", 38 llvm::cl::desc("Verify that the type cache matches the computed type"), 39 llvm::cl::init(false), llvm::cl::Hidden); 40 #endif 41 42 CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) 43 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), 44 Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), 45 TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) { 46 SkippedLayout = false; 47 } 48 49 CodeGenTypes::~CodeGenTypes() { 50 for (llvm::FoldingSet<CGFunctionInfo>::iterator 51 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) 52 delete &*I++; 53 } 54 55 const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const { 56 return CGM.getCodeGenOpts(); 57 } 58 59 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, 60 llvm::StructType *Ty, 61 StringRef suffix) { 62 SmallString<256> TypeName; 63 llvm::raw_svector_ostream OS(TypeName); 64 OS << RD->getKindName() << '.'; 65 66 // FIXME: We probably want to make more tweaks to the printing policy. For 67 // example, we should probably enable PrintCanonicalTypes and 68 // FullyQualifiedNames. 69 PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy(); 70 Policy.SuppressInlineNamespace = false; 71 72 // Name the codegen type after the typedef name 73 // if there is no tag type name available 74 if (RD->getIdentifier()) { 75 // FIXME: We should not have to check for a null decl context here. 76 // Right now we do it because the implicit Obj-C decls don't have one. 77 if (RD->getDeclContext()) 78 RD->printQualifiedName(OS, Policy); 79 else 80 RD->printName(OS); 81 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { 82 // FIXME: We should not have to check for a null decl context here. 83 // Right now we do it because the implicit Obj-C decls don't have one. 84 if (TDD->getDeclContext()) 85 TDD->printQualifiedName(OS, Policy); 86 else 87 TDD->printName(OS); 88 } else 89 OS << "anon"; 90 91 if (!suffix.empty()) 92 OS << suffix; 93 94 Ty->setName(OS.str()); 95 } 96 97 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 98 /// ConvertType in that it is used to convert to the memory representation for 99 /// a type. For example, the scalar representation for _Bool is i1, but the 100 /// memory representation is usually i8 or i32, depending on the target. 101 llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) { 102 if (T->isConstantMatrixType()) { 103 const Type *Ty = Context.getCanonicalType(T).getTypePtr(); 104 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); 105 return llvm::ArrayType::get(ConvertType(MT->getElementType()), 106 MT->getNumRows() * MT->getNumColumns()); 107 } 108 109 llvm::Type *R = ConvertType(T); 110 111 // If this is a bool type, or a bit-precise integer type in a bitfield 112 // representation, map this integer to the target-specified size. 113 if ((ForBitField && T->isBitIntType()) || 114 (!T->isBitIntType() && R->isIntegerTy(1))) 115 return llvm::IntegerType::get(getLLVMContext(), 116 (unsigned)Context.getTypeSize(T)); 117 118 // Else, don't map it. 119 return R; 120 } 121 122 /// isRecordLayoutComplete - Return true if the specified type is already 123 /// completely laid out. 124 bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { 125 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = 126 RecordDeclTypes.find(Ty); 127 return I != RecordDeclTypes.end() && !I->second->isOpaque(); 128 } 129 130 static bool 131 isSafeToConvert(QualType T, CodeGenTypes &CGT, 132 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked); 133 134 135 /// isSafeToConvert - Return true if it is safe to convert the specified record 136 /// decl to IR and lay it out, false if doing so would cause us to get into a 137 /// recursive compilation mess. 138 static bool 139 isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT, 140 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { 141 // If we have already checked this type (maybe the same type is used by-value 142 // multiple times in multiple structure fields, don't check again. 143 if (!AlreadyChecked.insert(RD).second) 144 return true; 145 146 const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr(); 147 148 // If this type is already laid out, converting it is a noop. 149 if (CGT.isRecordLayoutComplete(Key)) return true; 150 151 // If this type is currently being laid out, we can't recursively compile it. 152 if (CGT.isRecordBeingLaidOut(Key)) 153 return false; 154 155 // If this type would require laying out bases that are currently being laid 156 // out, don't do it. This includes virtual base classes which get laid out 157 // when a class is translated, even though they aren't embedded by-value into 158 // the class. 159 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 160 for (const auto &I : CRD->bases()) 161 if (!isSafeToConvert(I.getType()->castAs<RecordType>()->getDecl(), CGT, 162 AlreadyChecked)) 163 return false; 164 } 165 166 // If this type would require laying out members that are currently being laid 167 // out, don't do it. 168 for (const auto *I : RD->fields()) 169 if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked)) 170 return false; 171 172 // If there are no problems, lets do it. 173 return true; 174 } 175 176 /// isSafeToConvert - Return true if it is safe to convert this field type, 177 /// which requires the structure elements contained by-value to all be 178 /// recursively safe to convert. 179 static bool 180 isSafeToConvert(QualType T, CodeGenTypes &CGT, 181 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { 182 // Strip off atomic type sugar. 183 if (const auto *AT = T->getAs<AtomicType>()) 184 T = AT->getValueType(); 185 186 // If this is a record, check it. 187 if (const auto *RT = T->getAs<RecordType>()) 188 return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked); 189 190 // If this is an array, check the elements, which are embedded inline. 191 if (const auto *AT = CGT.getContext().getAsArrayType(T)) 192 return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked); 193 194 // Otherwise, there is no concern about transforming this. We only care about 195 // things that are contained by-value in a structure that can have another 196 // structure as a member. 197 return true; 198 } 199 200 201 /// isSafeToConvert - Return true if it is safe to convert the specified record 202 /// decl to IR and lay it out, false if doing so would cause us to get into a 203 /// recursive compilation mess. 204 static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) { 205 // If no structs are being laid out, we can certainly do this one. 206 if (CGT.noRecordsBeingLaidOut()) return true; 207 208 llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked; 209 return isSafeToConvert(RD, CGT, AlreadyChecked); 210 } 211 212 /// isFuncParamTypeConvertible - Return true if the specified type in a 213 /// function parameter or result position can be converted to an IR type at this 214 /// point. This boils down to being whether it is complete, as well as whether 215 /// we've temporarily deferred expanding the type because we're in a recursive 216 /// context. 217 bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) { 218 // Some ABIs cannot have their member pointers represented in IR unless 219 // certain circumstances have been reached. 220 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 221 return getCXXABI().isMemberPointerConvertible(MPT); 222 223 // If this isn't a tagged type, we can convert it! 224 const TagType *TT = Ty->getAs<TagType>(); 225 if (!TT) return true; 226 227 // Incomplete types cannot be converted. 228 if (TT->isIncompleteType()) 229 return false; 230 231 // If this is an enum, then it is always safe to convert. 232 const RecordType *RT = dyn_cast<RecordType>(TT); 233 if (!RT) return true; 234 235 // Otherwise, we have to be careful. If it is a struct that we're in the 236 // process of expanding, then we can't convert the function type. That's ok 237 // though because we must be in a pointer context under the struct, so we can 238 // just convert it to a dummy type. 239 // 240 // We decide this by checking whether ConvertRecordDeclType returns us an 241 // opaque type for a struct that we know is defined. 242 return isSafeToConvert(RT->getDecl(), *this); 243 } 244 245 246 /// Code to verify a given function type is complete, i.e. the return type 247 /// and all of the parameter types are complete. Also check to see if we are in 248 /// a RS_StructPointer context, and if so whether any struct types have been 249 /// pended. If so, we don't want to ask the ABI lowering code to handle a type 250 /// that cannot be converted to an IR type. 251 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { 252 if (!isFuncParamTypeConvertible(FT->getReturnType())) 253 return false; 254 255 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 256 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 257 if (!isFuncParamTypeConvertible(FPT->getParamType(i))) 258 return false; 259 260 return true; 261 } 262 263 /// UpdateCompletedType - When we find the full definition for a TagDecl, 264 /// replace the 'opaque' type we previously made for it if applicable. 265 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 266 // If this is an enum being completed, then we flush all non-struct types from 267 // the cache. This allows function types and other things that may be derived 268 // from the enum to be recomputed. 269 if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) { 270 // Only flush the cache if we've actually already converted this type. 271 if (TypeCache.count(ED->getTypeForDecl())) { 272 // Okay, we formed some types based on this. We speculated that the enum 273 // would be lowered to i32, so we only need to flush the cache if this 274 // didn't happen. 275 if (!ConvertType(ED->getIntegerType())->isIntegerTy(32)) 276 TypeCache.clear(); 277 } 278 // If necessary, provide the full definition of a type only used with a 279 // declaration so far. 280 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 281 DI->completeType(ED); 282 return; 283 } 284 285 // If we completed a RecordDecl that we previously used and converted to an 286 // anonymous type, then go ahead and complete it now. 287 const RecordDecl *RD = cast<RecordDecl>(TD); 288 if (RD->isDependentType()) return; 289 290 // Only complete it if we converted it already. If we haven't converted it 291 // yet, we'll just do it lazily. 292 if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) 293 ConvertRecordDeclType(RD); 294 295 // If necessary, provide the full definition of a type only used with a 296 // declaration so far. 297 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 298 DI->completeType(RD); 299 } 300 301 void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { 302 QualType T = Context.getRecordType(RD); 303 T = Context.getCanonicalType(T); 304 305 const Type *Ty = T.getTypePtr(); 306 if (RecordsWithOpaqueMemberPointers.count(Ty)) { 307 TypeCache.clear(); 308 RecordsWithOpaqueMemberPointers.clear(); 309 } 310 } 311 312 static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, 313 const llvm::fltSemantics &format, 314 bool UseNativeHalf = false) { 315 if (&format == &llvm::APFloat::IEEEhalf()) { 316 if (UseNativeHalf) 317 return llvm::Type::getHalfTy(VMContext); 318 else 319 return llvm::Type::getInt16Ty(VMContext); 320 } 321 if (&format == &llvm::APFloat::BFloat()) 322 return llvm::Type::getBFloatTy(VMContext); 323 if (&format == &llvm::APFloat::IEEEsingle()) 324 return llvm::Type::getFloatTy(VMContext); 325 if (&format == &llvm::APFloat::IEEEdouble()) 326 return llvm::Type::getDoubleTy(VMContext); 327 if (&format == &llvm::APFloat::IEEEquad()) 328 return llvm::Type::getFP128Ty(VMContext); 329 if (&format == &llvm::APFloat::PPCDoubleDouble()) 330 return llvm::Type::getPPC_FP128Ty(VMContext); 331 if (&format == &llvm::APFloat::x87DoubleExtended()) 332 return llvm::Type::getX86_FP80Ty(VMContext); 333 llvm_unreachable("Unknown float format!"); 334 } 335 336 llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) { 337 assert(QFT.isCanonical()); 338 const Type *Ty = QFT.getTypePtr(); 339 const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr()); 340 // First, check whether we can build the full function type. If the 341 // function type depends on an incomplete type (e.g. a struct or enum), we 342 // cannot lower the function type. 343 if (!isFuncTypeConvertible(FT)) { 344 // This function's type depends on an incomplete tag type. 345 346 // Force conversion of all the relevant record types, to make sure 347 // we re-convert the FunctionType when appropriate. 348 if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>()) 349 ConvertRecordDeclType(RT->getDecl()); 350 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 351 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 352 if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>()) 353 ConvertRecordDeclType(RT->getDecl()); 354 355 SkippedLayout = true; 356 357 // Return a placeholder type. 358 return llvm::StructType::get(getLLVMContext()); 359 } 360 361 // While we're converting the parameter types for a function, we don't want 362 // to recursively convert any pointed-to structs. Converting directly-used 363 // structs is ok though. 364 if (!RecordsBeingLaidOut.insert(Ty).second) { 365 SkippedLayout = true; 366 return llvm::StructType::get(getLLVMContext()); 367 } 368 369 // The function type can be built; call the appropriate routines to 370 // build it. 371 const CGFunctionInfo *FI; 372 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { 373 FI = &arrangeFreeFunctionType( 374 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); 375 } else { 376 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT); 377 FI = &arrangeFreeFunctionType( 378 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); 379 } 380 381 llvm::Type *ResultType = nullptr; 382 // If there is something higher level prodding our CGFunctionInfo, then 383 // don't recurse into it again. 384 if (FunctionsBeingProcessed.count(FI)) { 385 386 ResultType = llvm::StructType::get(getLLVMContext()); 387 SkippedLayout = true; 388 } else { 389 390 // Otherwise, we're good to go, go ahead and convert it. 391 ResultType = GetFunctionType(*FI); 392 } 393 394 RecordsBeingLaidOut.erase(Ty); 395 396 if (RecordsBeingLaidOut.empty()) 397 while (!DeferredRecords.empty()) 398 ConvertRecordDeclType(DeferredRecords.pop_back_val()); 399 return ResultType; 400 } 401 402 /// ConvertType - Convert the specified type to its LLVM form. 403 llvm::Type *CodeGenTypes::ConvertType(QualType T) { 404 T = Context.getCanonicalType(T); 405 406 const Type *Ty = T.getTypePtr(); 407 408 // For the device-side compilation, CUDA device builtin surface/texture types 409 // may be represented in different types. 410 if (Context.getLangOpts().CUDAIsDevice) { 411 if (T->isCUDADeviceBuiltinSurfaceType()) { 412 if (auto *Ty = CGM.getTargetCodeGenInfo() 413 .getCUDADeviceBuiltinSurfaceDeviceType()) 414 return Ty; 415 } else if (T->isCUDADeviceBuiltinTextureType()) { 416 if (auto *Ty = CGM.getTargetCodeGenInfo() 417 .getCUDADeviceBuiltinTextureDeviceType()) 418 return Ty; 419 } 420 } 421 422 // RecordTypes are cached and processed specially. 423 if (const RecordType *RT = dyn_cast<RecordType>(Ty)) 424 return ConvertRecordDeclType(RT->getDecl()); 425 426 // The LLVM type we return for a given Clang type may not always be the same, 427 // most notably when dealing with recursive structs. We mark these potential 428 // cases with ShouldUseCache below. Builtin types cannot be recursive. 429 // TODO: when clang uses LLVM opaque pointers we won't be able to represent 430 // recursive types with LLVM types, making this logic much simpler. 431 llvm::Type *CachedType = nullptr; 432 bool ShouldUseCache = 433 Ty->isBuiltinType() || 434 (noRecordsBeingLaidOut() && FunctionsBeingProcessed.empty()); 435 if (ShouldUseCache) { 436 llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = 437 TypeCache.find(Ty); 438 if (TCI != TypeCache.end()) 439 CachedType = TCI->second; 440 if (CachedType) { 441 #ifndef NDEBUG 442 if (!VerifyTypeCache) 443 return CachedType; 444 #else 445 return CachedType; 446 #endif 447 } 448 } 449 450 // If we don't have it in the cache, convert it now. 451 llvm::Type *ResultType = nullptr; 452 switch (Ty->getTypeClass()) { 453 case Type::Record: // Handled above. 454 #define TYPE(Class, Base) 455 #define ABSTRACT_TYPE(Class, Base) 456 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 457 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 458 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 459 #include "clang/AST/TypeNodes.inc" 460 llvm_unreachable("Non-canonical or dependent types aren't possible."); 461 462 case Type::Builtin: { 463 switch (cast<BuiltinType>(Ty)->getKind()) { 464 case BuiltinType::Void: 465 case BuiltinType::ObjCId: 466 case BuiltinType::ObjCClass: 467 case BuiltinType::ObjCSel: 468 // LLVM void type can only be used as the result of a function call. Just 469 // map to the same as char. 470 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 471 break; 472 473 case BuiltinType::Bool: 474 // Note that we always return bool as i1 for use as a scalar type. 475 ResultType = llvm::Type::getInt1Ty(getLLVMContext()); 476 break; 477 478 case BuiltinType::Char_S: 479 case BuiltinType::Char_U: 480 case BuiltinType::SChar: 481 case BuiltinType::UChar: 482 case BuiltinType::Short: 483 case BuiltinType::UShort: 484 case BuiltinType::Int: 485 case BuiltinType::UInt: 486 case BuiltinType::Long: 487 case BuiltinType::ULong: 488 case BuiltinType::LongLong: 489 case BuiltinType::ULongLong: 490 case BuiltinType::WChar_S: 491 case BuiltinType::WChar_U: 492 case BuiltinType::Char8: 493 case BuiltinType::Char16: 494 case BuiltinType::Char32: 495 case BuiltinType::ShortAccum: 496 case BuiltinType::Accum: 497 case BuiltinType::LongAccum: 498 case BuiltinType::UShortAccum: 499 case BuiltinType::UAccum: 500 case BuiltinType::ULongAccum: 501 case BuiltinType::ShortFract: 502 case BuiltinType::Fract: 503 case BuiltinType::LongFract: 504 case BuiltinType::UShortFract: 505 case BuiltinType::UFract: 506 case BuiltinType::ULongFract: 507 case BuiltinType::SatShortAccum: 508 case BuiltinType::SatAccum: 509 case BuiltinType::SatLongAccum: 510 case BuiltinType::SatUShortAccum: 511 case BuiltinType::SatUAccum: 512 case BuiltinType::SatULongAccum: 513 case BuiltinType::SatShortFract: 514 case BuiltinType::SatFract: 515 case BuiltinType::SatLongFract: 516 case BuiltinType::SatUShortFract: 517 case BuiltinType::SatUFract: 518 case BuiltinType::SatULongFract: 519 ResultType = llvm::IntegerType::get(getLLVMContext(), 520 static_cast<unsigned>(Context.getTypeSize(T))); 521 break; 522 523 case BuiltinType::Float16: 524 ResultType = 525 getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T), 526 /* UseNativeHalf = */ true); 527 break; 528 529 case BuiltinType::Half: 530 // Half FP can either be storage-only (lowered to i16) or native. 531 ResultType = getTypeForFormat( 532 getLLVMContext(), Context.getFloatTypeSemantics(T), 533 Context.getLangOpts().NativeHalfType || 534 !Context.getTargetInfo().useFP16ConversionIntrinsics()); 535 break; 536 case BuiltinType::BFloat16: 537 case BuiltinType::Float: 538 case BuiltinType::Double: 539 case BuiltinType::LongDouble: 540 case BuiltinType::Float128: 541 case BuiltinType::Ibm128: 542 ResultType = getTypeForFormat(getLLVMContext(), 543 Context.getFloatTypeSemantics(T), 544 /* UseNativeHalf = */ false); 545 break; 546 547 case BuiltinType::NullPtr: 548 // Model std::nullptr_t as i8* 549 ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); 550 break; 551 552 case BuiltinType::UInt128: 553 case BuiltinType::Int128: 554 ResultType = llvm::IntegerType::get(getLLVMContext(), 128); 555 break; 556 557 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 558 case BuiltinType::Id: 559 #include "clang/Basic/OpenCLImageTypes.def" 560 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 561 case BuiltinType::Id: 562 #include "clang/Basic/OpenCLExtensionTypes.def" 563 case BuiltinType::OCLSampler: 564 case BuiltinType::OCLEvent: 565 case BuiltinType::OCLClkEvent: 566 case BuiltinType::OCLQueue: 567 case BuiltinType::OCLReserveID: 568 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty); 569 break; 570 case BuiltinType::SveInt8: 571 case BuiltinType::SveUint8: 572 case BuiltinType::SveInt8x2: 573 case BuiltinType::SveUint8x2: 574 case BuiltinType::SveInt8x3: 575 case BuiltinType::SveUint8x3: 576 case BuiltinType::SveInt8x4: 577 case BuiltinType::SveUint8x4: 578 case BuiltinType::SveInt16: 579 case BuiltinType::SveUint16: 580 case BuiltinType::SveInt16x2: 581 case BuiltinType::SveUint16x2: 582 case BuiltinType::SveInt16x3: 583 case BuiltinType::SveUint16x3: 584 case BuiltinType::SveInt16x4: 585 case BuiltinType::SveUint16x4: 586 case BuiltinType::SveInt32: 587 case BuiltinType::SveUint32: 588 case BuiltinType::SveInt32x2: 589 case BuiltinType::SveUint32x2: 590 case BuiltinType::SveInt32x3: 591 case BuiltinType::SveUint32x3: 592 case BuiltinType::SveInt32x4: 593 case BuiltinType::SveUint32x4: 594 case BuiltinType::SveInt64: 595 case BuiltinType::SveUint64: 596 case BuiltinType::SveInt64x2: 597 case BuiltinType::SveUint64x2: 598 case BuiltinType::SveInt64x3: 599 case BuiltinType::SveUint64x3: 600 case BuiltinType::SveInt64x4: 601 case BuiltinType::SveUint64x4: 602 case BuiltinType::SveBool: 603 case BuiltinType::SveFloat16: 604 case BuiltinType::SveFloat16x2: 605 case BuiltinType::SveFloat16x3: 606 case BuiltinType::SveFloat16x4: 607 case BuiltinType::SveFloat32: 608 case BuiltinType::SveFloat32x2: 609 case BuiltinType::SveFloat32x3: 610 case BuiltinType::SveFloat32x4: 611 case BuiltinType::SveFloat64: 612 case BuiltinType::SveFloat64x2: 613 case BuiltinType::SveFloat64x3: 614 case BuiltinType::SveFloat64x4: 615 case BuiltinType::SveBFloat16: 616 case BuiltinType::SveBFloat16x2: 617 case BuiltinType::SveBFloat16x3: 618 case BuiltinType::SveBFloat16x4: { 619 ASTContext::BuiltinVectorTypeInfo Info = 620 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); 621 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), 622 Info.EC.getKnownMinValue() * 623 Info.NumVectors); 624 } 625 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 626 case BuiltinType::Id: \ 627 ResultType = \ 628 llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \ 629 break; 630 #include "clang/Basic/PPCTypes.def" 631 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 632 #include "clang/Basic/RISCVVTypes.def" 633 { 634 ASTContext::BuiltinVectorTypeInfo Info = 635 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); 636 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), 637 Info.EC.getKnownMinValue() * 638 Info.NumVectors); 639 } 640 case BuiltinType::Dependent: 641 #define BUILTIN_TYPE(Id, SingletonId) 642 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 643 case BuiltinType::Id: 644 #include "clang/AST/BuiltinTypes.def" 645 llvm_unreachable("Unexpected placeholder builtin type!"); 646 } 647 break; 648 } 649 case Type::Auto: 650 case Type::DeducedTemplateSpecialization: 651 llvm_unreachable("Unexpected undeduced type!"); 652 case Type::Complex: { 653 llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType()); 654 ResultType = llvm::StructType::get(EltTy, EltTy); 655 break; 656 } 657 case Type::LValueReference: 658 case Type::RValueReference: { 659 const ReferenceType *RTy = cast<ReferenceType>(Ty); 660 QualType ETy = RTy->getPointeeType(); 661 llvm::Type *PointeeType = ConvertTypeForMem(ETy); 662 unsigned AS = Context.getTargetAddressSpace(ETy); 663 ResultType = llvm::PointerType::get(PointeeType, AS); 664 break; 665 } 666 case Type::Pointer: { 667 const PointerType *PTy = cast<PointerType>(Ty); 668 QualType ETy = PTy->getPointeeType(); 669 llvm::Type *PointeeType = ConvertTypeForMem(ETy); 670 if (PointeeType->isVoidTy()) 671 PointeeType = llvm::Type::getInt8Ty(getLLVMContext()); 672 unsigned AS = Context.getTargetAddressSpace(ETy); 673 ResultType = llvm::PointerType::get(PointeeType, AS); 674 break; 675 } 676 677 case Type::VariableArray: { 678 const VariableArrayType *A = cast<VariableArrayType>(Ty); 679 assert(A->getIndexTypeCVRQualifiers() == 0 && 680 "FIXME: We only handle trivial array types so far!"); 681 // VLAs resolve to the innermost element type; this matches 682 // the return of alloca, and there isn't any obviously better choice. 683 ResultType = ConvertTypeForMem(A->getElementType()); 684 break; 685 } 686 case Type::IncompleteArray: { 687 const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); 688 assert(A->getIndexTypeCVRQualifiers() == 0 && 689 "FIXME: We only handle trivial array types so far!"); 690 // int X[] -> [0 x int], unless the element type is not sized. If it is 691 // unsized (e.g. an incomplete struct) just use [0 x i8]. 692 ResultType = ConvertTypeForMem(A->getElementType()); 693 if (!ResultType->isSized()) { 694 SkippedLayout = true; 695 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 696 } 697 ResultType = llvm::ArrayType::get(ResultType, 0); 698 break; 699 } 700 case Type::ConstantArray: { 701 const ConstantArrayType *A = cast<ConstantArrayType>(Ty); 702 llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); 703 704 // Lower arrays of undefined struct type to arrays of i8 just to have a 705 // concrete type. 706 if (!EltTy->isSized()) { 707 SkippedLayout = true; 708 EltTy = llvm::Type::getInt8Ty(getLLVMContext()); 709 } 710 711 ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue()); 712 break; 713 } 714 case Type::ExtVector: 715 case Type::Vector: { 716 const VectorType *VT = cast<VectorType>(Ty); 717 ResultType = llvm::FixedVectorType::get(ConvertType(VT->getElementType()), 718 VT->getNumElements()); 719 break; 720 } 721 case Type::ConstantMatrix: { 722 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); 723 ResultType = 724 llvm::FixedVectorType::get(ConvertType(MT->getElementType()), 725 MT->getNumRows() * MT->getNumColumns()); 726 break; 727 } 728 case Type::FunctionNoProto: 729 case Type::FunctionProto: 730 ResultType = ConvertFunctionTypeInternal(T); 731 break; 732 case Type::ObjCObject: 733 ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); 734 break; 735 736 case Type::ObjCInterface: { 737 // Objective-C interfaces are always opaque (outside of the 738 // runtime, which can do whatever it likes); we never refine 739 // these. 740 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; 741 if (!T) 742 T = llvm::StructType::create(getLLVMContext()); 743 ResultType = T; 744 break; 745 } 746 747 case Type::ObjCObjectPointer: { 748 // Protocol qualifications do not influence the LLVM type, we just return a 749 // pointer to the underlying interface type. We don't need to worry about 750 // recursive conversion. 751 llvm::Type *T = 752 ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 753 ResultType = T->getPointerTo(); 754 break; 755 } 756 757 case Type::Enum: { 758 const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); 759 if (ED->isCompleteDefinition() || ED->isFixed()) 760 return ConvertType(ED->getIntegerType()); 761 // Return a placeholder 'i32' type. This can be changed later when the 762 // type is defined (see UpdateCompletedType), but is likely to be the 763 // "right" answer. 764 ResultType = llvm::Type::getInt32Ty(getLLVMContext()); 765 break; 766 } 767 768 case Type::BlockPointer: { 769 const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); 770 llvm::Type *PointeeType = CGM.getLangOpts().OpenCL 771 ? CGM.getGenericBlockLiteralType() 772 : ConvertTypeForMem(FTy); 773 // Block pointers lower to function type. For function type, 774 // getTargetAddressSpace() returns default address space for 775 // function pointer i.e. program address space. Therefore, for block 776 // pointers, it is important to pass qualifiers when calling 777 // getTargetAddressSpace(), to ensure that we get the address space 778 // for data pointers and not function pointers. 779 unsigned AS = Context.getTargetAddressSpace(FTy.getQualifiers()); 780 ResultType = llvm::PointerType::get(PointeeType, AS); 781 break; 782 } 783 784 case Type::MemberPointer: { 785 auto *MPTy = cast<MemberPointerType>(Ty); 786 if (!getCXXABI().isMemberPointerConvertible(MPTy)) { 787 RecordsWithOpaqueMemberPointers.insert(MPTy->getClass()); 788 ResultType = llvm::StructType::create(getLLVMContext()); 789 } else { 790 ResultType = getCXXABI().ConvertMemberPointerType(MPTy); 791 } 792 break; 793 } 794 795 case Type::Atomic: { 796 QualType valueType = cast<AtomicType>(Ty)->getValueType(); 797 ResultType = ConvertTypeForMem(valueType); 798 799 // Pad out to the inflated size if necessary. 800 uint64_t valueSize = Context.getTypeSize(valueType); 801 uint64_t atomicSize = Context.getTypeSize(Ty); 802 if (valueSize != atomicSize) { 803 assert(valueSize < atomicSize); 804 llvm::Type *elts[] = { 805 ResultType, 806 llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8) 807 }; 808 ResultType = llvm::StructType::get(getLLVMContext(), 809 llvm::makeArrayRef(elts)); 810 } 811 break; 812 } 813 case Type::Pipe: { 814 ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty)); 815 break; 816 } 817 case Type::BitInt: { 818 const auto &EIT = cast<BitIntType>(Ty); 819 ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits()); 820 break; 821 } 822 } 823 824 assert(ResultType && "Didn't convert a type?"); 825 826 #ifndef NDEBUG 827 if (CachedType) { 828 assert(CachedType == ResultType && 829 "Cached type doesn't match computed type"); 830 } 831 #endif 832 833 if (ShouldUseCache) 834 TypeCache[Ty] = ResultType; 835 return ResultType; 836 } 837 838 bool CodeGenModule::isPaddedAtomicType(QualType type) { 839 return isPaddedAtomicType(type->castAs<AtomicType>()); 840 } 841 842 bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { 843 return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType()); 844 } 845 846 /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. 847 llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { 848 // TagDecl's are not necessarily unique, instead use the (clang) 849 // type connected to the decl. 850 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 851 852 llvm::StructType *&Entry = RecordDeclTypes[Key]; 853 854 // If we don't have a StructType at all yet, create the forward declaration. 855 if (!Entry) { 856 Entry = llvm::StructType::create(getLLVMContext()); 857 addRecordTypeName(RD, Entry, ""); 858 } 859 llvm::StructType *Ty = Entry; 860 861 // If this is still a forward declaration, or the LLVM type is already 862 // complete, there's nothing more to do. 863 RD = RD->getDefinition(); 864 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque()) 865 return Ty; 866 867 // If converting this type would cause us to infinitely loop, don't do it! 868 if (!isSafeToConvert(RD, *this)) { 869 DeferredRecords.push_back(RD); 870 return Ty; 871 } 872 873 // Okay, this is a definition of a type. Compile the implementation now. 874 bool InsertResult = RecordsBeingLaidOut.insert(Key).second; 875 (void)InsertResult; 876 assert(InsertResult && "Recursively compiling a struct?"); 877 878 // Force conversion of non-virtual base classes recursively. 879 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 880 for (const auto &I : CRD->bases()) { 881 if (I.isVirtual()) continue; 882 ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl()); 883 } 884 } 885 886 // Layout fields. 887 std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty); 888 CGRecordLayouts[Key] = std::move(Layout); 889 890 // We're done laying out this struct. 891 bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult; 892 assert(EraseResult && "struct not in RecordsBeingLaidOut set?"); 893 894 // If this struct blocked a FunctionType conversion, then recompute whatever 895 // was derived from that. 896 // FIXME: This is hugely overconservative. 897 if (SkippedLayout) 898 TypeCache.clear(); 899 900 // If we're done converting the outer-most record, then convert any deferred 901 // structs as well. 902 if (RecordsBeingLaidOut.empty()) 903 while (!DeferredRecords.empty()) 904 ConvertRecordDeclType(DeferredRecords.pop_back_val()); 905 906 return Ty; 907 } 908 909 /// getCGRecordLayout - Return record layout info for the given record decl. 910 const CGRecordLayout & 911 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { 912 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 913 914 auto I = CGRecordLayouts.find(Key); 915 if (I != CGRecordLayouts.end()) 916 return *I->second; 917 // Compute the type information. 918 ConvertRecordDeclType(RD); 919 920 // Now try again. 921 I = CGRecordLayouts.find(Key); 922 923 assert(I != CGRecordLayouts.end() && 924 "Unable to find record layout information for type"); 925 return *I->second; 926 } 927 928 bool CodeGenTypes::isPointerZeroInitializable(QualType T) { 929 assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type"); 930 return isZeroInitializable(T); 931 } 932 933 bool CodeGenTypes::isZeroInitializable(QualType T) { 934 if (T->getAs<PointerType>()) 935 return Context.getTargetNullPointerValue(T) == 0; 936 937 if (const auto *AT = Context.getAsArrayType(T)) { 938 if (isa<IncompleteArrayType>(AT)) 939 return true; 940 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 941 if (Context.getConstantArrayElementCount(CAT) == 0) 942 return true; 943 T = Context.getBaseElementType(T); 944 } 945 946 // Records are non-zero-initializable if they contain any 947 // non-zero-initializable subobjects. 948 if (const RecordType *RT = T->getAs<RecordType>()) { 949 const RecordDecl *RD = RT->getDecl(); 950 return isZeroInitializable(RD); 951 } 952 953 // We have to ask the ABI about member pointers. 954 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) 955 return getCXXABI().isZeroInitializable(MPT); 956 957 // Everything else is okay. 958 return true; 959 } 960 961 bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) { 962 return getCGRecordLayout(RD).isZeroInitializable(); 963 } 964