1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This is the code that handles AST -> LLVM type lowering. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CodeGenTypes.h" 14 #include "CGCXXABI.h" 15 #include "CGCall.h" 16 #include "CGOpenCLRuntime.h" 17 #include "CGRecordLayout.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/DeclCXX.h" 21 #include "clang/AST/DeclObjC.h" 22 #include "clang/AST/Expr.h" 23 #include "clang/AST/RecordLayout.h" 24 #include "clang/CodeGen/CGFunctionInfo.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/Module.h" 28 29 using namespace clang; 30 using namespace CodeGen; 31 32 CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) 33 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), 34 Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), 35 TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) { 36 SkippedLayout = false; 37 } 38 39 CodeGenTypes::~CodeGenTypes() { 40 for (llvm::FoldingSet<CGFunctionInfo>::iterator 41 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) 42 delete &*I++; 43 } 44 45 const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const { 46 return CGM.getCodeGenOpts(); 47 } 48 49 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, 50 llvm::StructType *Ty, 51 StringRef suffix) { 52 SmallString<256> TypeName; 53 llvm::raw_svector_ostream OS(TypeName); 54 OS << RD->getKindName() << '.'; 55 56 // FIXME: We probably want to make more tweaks to the printing policy. For 57 // example, we should probably enable PrintCanonicalTypes and 58 // FullyQualifiedNames. 59 PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy(); 60 Policy.SuppressInlineNamespace = false; 61 62 // Name the codegen type after the typedef name 63 // if there is no tag type name available 64 if (RD->getIdentifier()) { 65 // FIXME: We should not have to check for a null decl context here. 66 // Right now we do it because the implicit Obj-C decls don't have one. 67 if (RD->getDeclContext()) 68 RD->printQualifiedName(OS, Policy); 69 else 70 RD->printName(OS); 71 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { 72 // FIXME: We should not have to check for a null decl context here. 73 // Right now we do it because the implicit Obj-C decls don't have one. 74 if (TDD->getDeclContext()) 75 TDD->printQualifiedName(OS, Policy); 76 else 77 TDD->printName(OS); 78 } else 79 OS << "anon"; 80 81 if (!suffix.empty()) 82 OS << suffix; 83 84 Ty->setName(OS.str()); 85 } 86 87 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 88 /// ConvertType in that it is used to convert to the memory representation for 89 /// a type. For example, the scalar representation for _Bool is i1, but the 90 /// memory representation is usually i8 or i32, depending on the target. 91 llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) { 92 if (T->isConstantMatrixType()) { 93 const Type *Ty = Context.getCanonicalType(T).getTypePtr(); 94 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); 95 return llvm::ArrayType::get(ConvertType(MT->getElementType()), 96 MT->getNumRows() * MT->getNumColumns()); 97 } 98 99 llvm::Type *R = ConvertType(T); 100 101 // If this is a bool type, or a bit-precise integer type in a bitfield 102 // representation, map this integer to the target-specified size. 103 if ((ForBitField && T->isBitIntType()) || 104 (!T->isBitIntType() && R->isIntegerTy(1))) 105 return llvm::IntegerType::get(getLLVMContext(), 106 (unsigned)Context.getTypeSize(T)); 107 108 // Else, don't map it. 109 return R; 110 } 111 112 /// isRecordLayoutComplete - Return true if the specified type is already 113 /// completely laid out. 114 bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { 115 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = 116 RecordDeclTypes.find(Ty); 117 return I != RecordDeclTypes.end() && !I->second->isOpaque(); 118 } 119 120 static bool 121 isSafeToConvert(QualType T, CodeGenTypes &CGT, 122 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked); 123 124 125 /// isSafeToConvert - Return true if it is safe to convert the specified record 126 /// decl to IR and lay it out, false if doing so would cause us to get into a 127 /// recursive compilation mess. 128 static bool 129 isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT, 130 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { 131 // If we have already checked this type (maybe the same type is used by-value 132 // multiple times in multiple structure fields, don't check again. 133 if (!AlreadyChecked.insert(RD).second) 134 return true; 135 136 const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr(); 137 138 // If this type is already laid out, converting it is a noop. 139 if (CGT.isRecordLayoutComplete(Key)) return true; 140 141 // If this type is currently being laid out, we can't recursively compile it. 142 if (CGT.isRecordBeingLaidOut(Key)) 143 return false; 144 145 // If this type would require laying out bases that are currently being laid 146 // out, don't do it. This includes virtual base classes which get laid out 147 // when a class is translated, even though they aren't embedded by-value into 148 // the class. 149 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 150 for (const auto &I : CRD->bases()) 151 if (!isSafeToConvert(I.getType()->castAs<RecordType>()->getDecl(), CGT, 152 AlreadyChecked)) 153 return false; 154 } 155 156 // If this type would require laying out members that are currently being laid 157 // out, don't do it. 158 for (const auto *I : RD->fields()) 159 if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked)) 160 return false; 161 162 // If there are no problems, lets do it. 163 return true; 164 } 165 166 /// isSafeToConvert - Return true if it is safe to convert this field type, 167 /// which requires the structure elements contained by-value to all be 168 /// recursively safe to convert. 169 static bool 170 isSafeToConvert(QualType T, CodeGenTypes &CGT, 171 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { 172 // Strip off atomic type sugar. 173 if (const auto *AT = T->getAs<AtomicType>()) 174 T = AT->getValueType(); 175 176 // If this is a record, check it. 177 if (const auto *RT = T->getAs<RecordType>()) 178 return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked); 179 180 // If this is an array, check the elements, which are embedded inline. 181 if (const auto *AT = CGT.getContext().getAsArrayType(T)) 182 return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked); 183 184 // Otherwise, there is no concern about transforming this. We only care about 185 // things that are contained by-value in a structure that can have another 186 // structure as a member. 187 return true; 188 } 189 190 191 /// isSafeToConvert - Return true if it is safe to convert the specified record 192 /// decl to IR and lay it out, false if doing so would cause us to get into a 193 /// recursive compilation mess. 194 static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) { 195 // If no structs are being laid out, we can certainly do this one. 196 if (CGT.noRecordsBeingLaidOut()) return true; 197 198 llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked; 199 return isSafeToConvert(RD, CGT, AlreadyChecked); 200 } 201 202 /// isFuncParamTypeConvertible - Return true if the specified type in a 203 /// function parameter or result position can be converted to an IR type at this 204 /// point. This boils down to being whether it is complete, as well as whether 205 /// we've temporarily deferred expanding the type because we're in a recursive 206 /// context. 207 bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) { 208 // Some ABIs cannot have their member pointers represented in IR unless 209 // certain circumstances have been reached. 210 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 211 return getCXXABI().isMemberPointerConvertible(MPT); 212 213 // If this isn't a tagged type, we can convert it! 214 const TagType *TT = Ty->getAs<TagType>(); 215 if (!TT) return true; 216 217 // Incomplete types cannot be converted. 218 if (TT->isIncompleteType()) 219 return false; 220 221 // If this is an enum, then it is always safe to convert. 222 const RecordType *RT = dyn_cast<RecordType>(TT); 223 if (!RT) return true; 224 225 // Otherwise, we have to be careful. If it is a struct that we're in the 226 // process of expanding, then we can't convert the function type. That's ok 227 // though because we must be in a pointer context under the struct, so we can 228 // just convert it to a dummy type. 229 // 230 // We decide this by checking whether ConvertRecordDeclType returns us an 231 // opaque type for a struct that we know is defined. 232 return isSafeToConvert(RT->getDecl(), *this); 233 } 234 235 236 /// Code to verify a given function type is complete, i.e. the return type 237 /// and all of the parameter types are complete. Also check to see if we are in 238 /// a RS_StructPointer context, and if so whether any struct types have been 239 /// pended. If so, we don't want to ask the ABI lowering code to handle a type 240 /// that cannot be converted to an IR type. 241 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { 242 if (!isFuncParamTypeConvertible(FT->getReturnType())) 243 return false; 244 245 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 246 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 247 if (!isFuncParamTypeConvertible(FPT->getParamType(i))) 248 return false; 249 250 return true; 251 } 252 253 /// UpdateCompletedType - When we find the full definition for a TagDecl, 254 /// replace the 'opaque' type we previously made for it if applicable. 255 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 256 // If this is an enum being completed, then we flush all non-struct types from 257 // the cache. This allows function types and other things that may be derived 258 // from the enum to be recomputed. 259 if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) { 260 // Only flush the cache if we've actually already converted this type. 261 if (TypeCache.count(ED->getTypeForDecl())) { 262 // Okay, we formed some types based on this. We speculated that the enum 263 // would be lowered to i32, so we only need to flush the cache if this 264 // didn't happen. 265 if (!ConvertType(ED->getIntegerType())->isIntegerTy(32)) 266 TypeCache.clear(); 267 } 268 // If necessary, provide the full definition of a type only used with a 269 // declaration so far. 270 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 271 DI->completeType(ED); 272 return; 273 } 274 275 // If we completed a RecordDecl that we previously used and converted to an 276 // anonymous type, then go ahead and complete it now. 277 const RecordDecl *RD = cast<RecordDecl>(TD); 278 if (RD->isDependentType()) return; 279 280 // Only complete it if we converted it already. If we haven't converted it 281 // yet, we'll just do it lazily. 282 if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) 283 ConvertRecordDeclType(RD); 284 285 // If necessary, provide the full definition of a type only used with a 286 // declaration so far. 287 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 288 DI->completeType(RD); 289 } 290 291 void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) { 292 QualType T = Context.getRecordType(RD); 293 T = Context.getCanonicalType(T); 294 295 const Type *Ty = T.getTypePtr(); 296 if (RecordsWithOpaqueMemberPointers.count(Ty)) { 297 TypeCache.clear(); 298 RecordsWithOpaqueMemberPointers.clear(); 299 } 300 } 301 302 static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, 303 const llvm::fltSemantics &format, 304 bool UseNativeHalf = false) { 305 if (&format == &llvm::APFloat::IEEEhalf()) { 306 if (UseNativeHalf) 307 return llvm::Type::getHalfTy(VMContext); 308 else 309 return llvm::Type::getInt16Ty(VMContext); 310 } 311 if (&format == &llvm::APFloat::BFloat()) 312 return llvm::Type::getBFloatTy(VMContext); 313 if (&format == &llvm::APFloat::IEEEsingle()) 314 return llvm::Type::getFloatTy(VMContext); 315 if (&format == &llvm::APFloat::IEEEdouble()) 316 return llvm::Type::getDoubleTy(VMContext); 317 if (&format == &llvm::APFloat::IEEEquad()) 318 return llvm::Type::getFP128Ty(VMContext); 319 if (&format == &llvm::APFloat::PPCDoubleDouble()) 320 return llvm::Type::getPPC_FP128Ty(VMContext); 321 if (&format == &llvm::APFloat::x87DoubleExtended()) 322 return llvm::Type::getX86_FP80Ty(VMContext); 323 llvm_unreachable("Unknown float format!"); 324 } 325 326 llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) { 327 assert(QFT.isCanonical()); 328 const Type *Ty = QFT.getTypePtr(); 329 const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr()); 330 // First, check whether we can build the full function type. If the 331 // function type depends on an incomplete type (e.g. a struct or enum), we 332 // cannot lower the function type. 333 if (!isFuncTypeConvertible(FT)) { 334 // This function's type depends on an incomplete tag type. 335 336 // Force conversion of all the relevant record types, to make sure 337 // we re-convert the FunctionType when appropriate. 338 if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>()) 339 ConvertRecordDeclType(RT->getDecl()); 340 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 341 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 342 if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>()) 343 ConvertRecordDeclType(RT->getDecl()); 344 345 SkippedLayout = true; 346 347 // Return a placeholder type. 348 return llvm::StructType::get(getLLVMContext()); 349 } 350 351 // While we're converting the parameter types for a function, we don't want 352 // to recursively convert any pointed-to structs. Converting directly-used 353 // structs is ok though. 354 if (!RecordsBeingLaidOut.insert(Ty).second) { 355 SkippedLayout = true; 356 return llvm::StructType::get(getLLVMContext()); 357 } 358 359 // The function type can be built; call the appropriate routines to 360 // build it. 361 const CGFunctionInfo *FI; 362 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { 363 FI = &arrangeFreeFunctionType( 364 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); 365 } else { 366 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT); 367 FI = &arrangeFreeFunctionType( 368 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); 369 } 370 371 llvm::Type *ResultType = nullptr; 372 // If there is something higher level prodding our CGFunctionInfo, then 373 // don't recurse into it again. 374 if (FunctionsBeingProcessed.count(FI)) { 375 376 ResultType = llvm::StructType::get(getLLVMContext()); 377 SkippedLayout = true; 378 } else { 379 380 // Otherwise, we're good to go, go ahead and convert it. 381 ResultType = GetFunctionType(*FI); 382 } 383 384 RecordsBeingLaidOut.erase(Ty); 385 386 if (RecordsBeingLaidOut.empty()) 387 while (!DeferredRecords.empty()) 388 ConvertRecordDeclType(DeferredRecords.pop_back_val()); 389 return ResultType; 390 } 391 392 /// ConvertType - Convert the specified type to its LLVM form. 393 llvm::Type *CodeGenTypes::ConvertType(QualType T) { 394 T = Context.getCanonicalType(T); 395 396 const Type *Ty = T.getTypePtr(); 397 398 // For the device-side compilation, CUDA device builtin surface/texture types 399 // may be represented in different types. 400 if (Context.getLangOpts().CUDAIsDevice) { 401 if (T->isCUDADeviceBuiltinSurfaceType()) { 402 if (auto *Ty = CGM.getTargetCodeGenInfo() 403 .getCUDADeviceBuiltinSurfaceDeviceType()) 404 return Ty; 405 } else if (T->isCUDADeviceBuiltinTextureType()) { 406 if (auto *Ty = CGM.getTargetCodeGenInfo() 407 .getCUDADeviceBuiltinTextureDeviceType()) 408 return Ty; 409 } 410 } 411 412 // RecordTypes are cached and processed specially. 413 if (const RecordType *RT = dyn_cast<RecordType>(Ty)) 414 return ConvertRecordDeclType(RT->getDecl()); 415 416 // The LLVM type we return for a given Clang type may not always be the same, 417 // most notably when dealing with recursive structs. We mark these potential 418 // cases with ShouldUseCache below. Builtin types cannot be recursive. 419 // TODO: when clang uses LLVM opaque pointers we won't be able to represent 420 // recursive types with LLVM types, making this logic much simpler. 421 llvm::Type *CachedType = nullptr; 422 bool ShouldUseCache = 423 Ty->isBuiltinType() || 424 (noRecordsBeingLaidOut() && FunctionsBeingProcessed.empty()); 425 if (ShouldUseCache) { 426 llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = 427 TypeCache.find(Ty); 428 if (TCI != TypeCache.end()) 429 CachedType = TCI->second; 430 // With expensive checks, check that the type we compute matches the 431 // cached type. 432 #ifndef EXPENSIVE_CHECKS 433 if (CachedType) 434 return CachedType; 435 #endif 436 } 437 438 // If we don't have it in the cache, convert it now. 439 llvm::Type *ResultType = nullptr; 440 switch (Ty->getTypeClass()) { 441 case Type::Record: // Handled above. 442 #define TYPE(Class, Base) 443 #define ABSTRACT_TYPE(Class, Base) 444 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 445 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 446 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 447 #include "clang/AST/TypeNodes.inc" 448 llvm_unreachable("Non-canonical or dependent types aren't possible."); 449 450 case Type::Builtin: { 451 switch (cast<BuiltinType>(Ty)->getKind()) { 452 case BuiltinType::Void: 453 case BuiltinType::ObjCId: 454 case BuiltinType::ObjCClass: 455 case BuiltinType::ObjCSel: 456 // LLVM void type can only be used as the result of a function call. Just 457 // map to the same as char. 458 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 459 break; 460 461 case BuiltinType::Bool: 462 // Note that we always return bool as i1 for use as a scalar type. 463 ResultType = llvm::Type::getInt1Ty(getLLVMContext()); 464 break; 465 466 case BuiltinType::Char_S: 467 case BuiltinType::Char_U: 468 case BuiltinType::SChar: 469 case BuiltinType::UChar: 470 case BuiltinType::Short: 471 case BuiltinType::UShort: 472 case BuiltinType::Int: 473 case BuiltinType::UInt: 474 case BuiltinType::Long: 475 case BuiltinType::ULong: 476 case BuiltinType::LongLong: 477 case BuiltinType::ULongLong: 478 case BuiltinType::WChar_S: 479 case BuiltinType::WChar_U: 480 case BuiltinType::Char8: 481 case BuiltinType::Char16: 482 case BuiltinType::Char32: 483 case BuiltinType::ShortAccum: 484 case BuiltinType::Accum: 485 case BuiltinType::LongAccum: 486 case BuiltinType::UShortAccum: 487 case BuiltinType::UAccum: 488 case BuiltinType::ULongAccum: 489 case BuiltinType::ShortFract: 490 case BuiltinType::Fract: 491 case BuiltinType::LongFract: 492 case BuiltinType::UShortFract: 493 case BuiltinType::UFract: 494 case BuiltinType::ULongFract: 495 case BuiltinType::SatShortAccum: 496 case BuiltinType::SatAccum: 497 case BuiltinType::SatLongAccum: 498 case BuiltinType::SatUShortAccum: 499 case BuiltinType::SatUAccum: 500 case BuiltinType::SatULongAccum: 501 case BuiltinType::SatShortFract: 502 case BuiltinType::SatFract: 503 case BuiltinType::SatLongFract: 504 case BuiltinType::SatUShortFract: 505 case BuiltinType::SatUFract: 506 case BuiltinType::SatULongFract: 507 ResultType = llvm::IntegerType::get(getLLVMContext(), 508 static_cast<unsigned>(Context.getTypeSize(T))); 509 break; 510 511 case BuiltinType::Float16: 512 ResultType = 513 getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T), 514 /* UseNativeHalf = */ true); 515 break; 516 517 case BuiltinType::Half: 518 // Half FP can either be storage-only (lowered to i16) or native. 519 ResultType = getTypeForFormat( 520 getLLVMContext(), Context.getFloatTypeSemantics(T), 521 Context.getLangOpts().NativeHalfType || 522 !Context.getTargetInfo().useFP16ConversionIntrinsics()); 523 break; 524 case BuiltinType::BFloat16: 525 case BuiltinType::Float: 526 case BuiltinType::Double: 527 case BuiltinType::LongDouble: 528 case BuiltinType::Float128: 529 case BuiltinType::Ibm128: 530 ResultType = getTypeForFormat(getLLVMContext(), 531 Context.getFloatTypeSemantics(T), 532 /* UseNativeHalf = */ false); 533 break; 534 535 case BuiltinType::NullPtr: 536 // Model std::nullptr_t as i8* 537 ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); 538 break; 539 540 case BuiltinType::UInt128: 541 case BuiltinType::Int128: 542 ResultType = llvm::IntegerType::get(getLLVMContext(), 128); 543 break; 544 545 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 546 case BuiltinType::Id: 547 #include "clang/Basic/OpenCLImageTypes.def" 548 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 549 case BuiltinType::Id: 550 #include "clang/Basic/OpenCLExtensionTypes.def" 551 case BuiltinType::OCLSampler: 552 case BuiltinType::OCLEvent: 553 case BuiltinType::OCLClkEvent: 554 case BuiltinType::OCLQueue: 555 case BuiltinType::OCLReserveID: 556 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty); 557 break; 558 case BuiltinType::SveInt8: 559 case BuiltinType::SveUint8: 560 case BuiltinType::SveInt8x2: 561 case BuiltinType::SveUint8x2: 562 case BuiltinType::SveInt8x3: 563 case BuiltinType::SveUint8x3: 564 case BuiltinType::SveInt8x4: 565 case BuiltinType::SveUint8x4: 566 case BuiltinType::SveInt16: 567 case BuiltinType::SveUint16: 568 case BuiltinType::SveInt16x2: 569 case BuiltinType::SveUint16x2: 570 case BuiltinType::SveInt16x3: 571 case BuiltinType::SveUint16x3: 572 case BuiltinType::SveInt16x4: 573 case BuiltinType::SveUint16x4: 574 case BuiltinType::SveInt32: 575 case BuiltinType::SveUint32: 576 case BuiltinType::SveInt32x2: 577 case BuiltinType::SveUint32x2: 578 case BuiltinType::SveInt32x3: 579 case BuiltinType::SveUint32x3: 580 case BuiltinType::SveInt32x4: 581 case BuiltinType::SveUint32x4: 582 case BuiltinType::SveInt64: 583 case BuiltinType::SveUint64: 584 case BuiltinType::SveInt64x2: 585 case BuiltinType::SveUint64x2: 586 case BuiltinType::SveInt64x3: 587 case BuiltinType::SveUint64x3: 588 case BuiltinType::SveInt64x4: 589 case BuiltinType::SveUint64x4: 590 case BuiltinType::SveBool: 591 case BuiltinType::SveFloat16: 592 case BuiltinType::SveFloat16x2: 593 case BuiltinType::SveFloat16x3: 594 case BuiltinType::SveFloat16x4: 595 case BuiltinType::SveFloat32: 596 case BuiltinType::SveFloat32x2: 597 case BuiltinType::SveFloat32x3: 598 case BuiltinType::SveFloat32x4: 599 case BuiltinType::SveFloat64: 600 case BuiltinType::SveFloat64x2: 601 case BuiltinType::SveFloat64x3: 602 case BuiltinType::SveFloat64x4: 603 case BuiltinType::SveBFloat16: 604 case BuiltinType::SveBFloat16x2: 605 case BuiltinType::SveBFloat16x3: 606 case BuiltinType::SveBFloat16x4: { 607 ASTContext::BuiltinVectorTypeInfo Info = 608 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); 609 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), 610 Info.EC.getKnownMinValue() * 611 Info.NumVectors); 612 } 613 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 614 case BuiltinType::Id: \ 615 ResultType = \ 616 llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \ 617 break; 618 #include "clang/Basic/PPCTypes.def" 619 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 620 #include "clang/Basic/RISCVVTypes.def" 621 { 622 ASTContext::BuiltinVectorTypeInfo Info = 623 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty)); 624 return llvm::ScalableVectorType::get(ConvertType(Info.ElementType), 625 Info.EC.getKnownMinValue() * 626 Info.NumVectors); 627 } 628 case BuiltinType::Dependent: 629 #define BUILTIN_TYPE(Id, SingletonId) 630 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 631 case BuiltinType::Id: 632 #include "clang/AST/BuiltinTypes.def" 633 llvm_unreachable("Unexpected placeholder builtin type!"); 634 } 635 break; 636 } 637 case Type::Auto: 638 case Type::DeducedTemplateSpecialization: 639 llvm_unreachable("Unexpected undeduced type!"); 640 case Type::Complex: { 641 llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType()); 642 ResultType = llvm::StructType::get(EltTy, EltTy); 643 break; 644 } 645 case Type::LValueReference: 646 case Type::RValueReference: { 647 const ReferenceType *RTy = cast<ReferenceType>(Ty); 648 QualType ETy = RTy->getPointeeType(); 649 llvm::Type *PointeeType = ConvertTypeForMem(ETy); 650 unsigned AS = Context.getTargetAddressSpace(ETy); 651 ResultType = llvm::PointerType::get(PointeeType, AS); 652 break; 653 } 654 case Type::Pointer: { 655 const PointerType *PTy = cast<PointerType>(Ty); 656 QualType ETy = PTy->getPointeeType(); 657 llvm::Type *PointeeType = ConvertTypeForMem(ETy); 658 if (PointeeType->isVoidTy()) 659 PointeeType = llvm::Type::getInt8Ty(getLLVMContext()); 660 unsigned AS = Context.getTargetAddressSpace(ETy); 661 ResultType = llvm::PointerType::get(PointeeType, AS); 662 break; 663 } 664 665 case Type::VariableArray: { 666 const VariableArrayType *A = cast<VariableArrayType>(Ty); 667 assert(A->getIndexTypeCVRQualifiers() == 0 && 668 "FIXME: We only handle trivial array types so far!"); 669 // VLAs resolve to the innermost element type; this matches 670 // the return of alloca, and there isn't any obviously better choice. 671 ResultType = ConvertTypeForMem(A->getElementType()); 672 break; 673 } 674 case Type::IncompleteArray: { 675 const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); 676 assert(A->getIndexTypeCVRQualifiers() == 0 && 677 "FIXME: We only handle trivial array types so far!"); 678 // int X[] -> [0 x int], unless the element type is not sized. If it is 679 // unsized (e.g. an incomplete struct) just use [0 x i8]. 680 ResultType = ConvertTypeForMem(A->getElementType()); 681 if (!ResultType->isSized()) { 682 SkippedLayout = true; 683 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 684 } 685 ResultType = llvm::ArrayType::get(ResultType, 0); 686 break; 687 } 688 case Type::ConstantArray: { 689 const ConstantArrayType *A = cast<ConstantArrayType>(Ty); 690 llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); 691 692 // Lower arrays of undefined struct type to arrays of i8 just to have a 693 // concrete type. 694 if (!EltTy->isSized()) { 695 SkippedLayout = true; 696 EltTy = llvm::Type::getInt8Ty(getLLVMContext()); 697 } 698 699 ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue()); 700 break; 701 } 702 case Type::ExtVector: 703 case Type::Vector: { 704 const VectorType *VT = cast<VectorType>(Ty); 705 ResultType = llvm::FixedVectorType::get(ConvertType(VT->getElementType()), 706 VT->getNumElements()); 707 break; 708 } 709 case Type::ConstantMatrix: { 710 const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty); 711 ResultType = 712 llvm::FixedVectorType::get(ConvertType(MT->getElementType()), 713 MT->getNumRows() * MT->getNumColumns()); 714 break; 715 } 716 case Type::FunctionNoProto: 717 case Type::FunctionProto: 718 ResultType = ConvertFunctionTypeInternal(T); 719 break; 720 case Type::ObjCObject: 721 ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); 722 break; 723 724 case Type::ObjCInterface: { 725 // Objective-C interfaces are always opaque (outside of the 726 // runtime, which can do whatever it likes); we never refine 727 // these. 728 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; 729 if (!T) 730 T = llvm::StructType::create(getLLVMContext()); 731 ResultType = T; 732 break; 733 } 734 735 case Type::ObjCObjectPointer: { 736 // Protocol qualifications do not influence the LLVM type, we just return a 737 // pointer to the underlying interface type. We don't need to worry about 738 // recursive conversion. 739 llvm::Type *T = 740 ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 741 ResultType = T->getPointerTo(); 742 break; 743 } 744 745 case Type::Enum: { 746 const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); 747 if (ED->isCompleteDefinition() || ED->isFixed()) 748 return ConvertType(ED->getIntegerType()); 749 // Return a placeholder 'i32' type. This can be changed later when the 750 // type is defined (see UpdateCompletedType), but is likely to be the 751 // "right" answer. 752 ResultType = llvm::Type::getInt32Ty(getLLVMContext()); 753 break; 754 } 755 756 case Type::BlockPointer: { 757 const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); 758 llvm::Type *PointeeType = CGM.getLangOpts().OpenCL 759 ? CGM.getGenericBlockLiteralType() 760 : ConvertTypeForMem(FTy); 761 // Block pointers lower to function type. For function type, 762 // getTargetAddressSpace() returns default address space for 763 // function pointer i.e. program address space. Therefore, for block 764 // pointers, it is important to pass qualifiers when calling 765 // getTargetAddressSpace(), to ensure that we get the address space 766 // for data pointers and not function pointers. 767 unsigned AS = Context.getTargetAddressSpace(FTy.getQualifiers()); 768 ResultType = llvm::PointerType::get(PointeeType, AS); 769 break; 770 } 771 772 case Type::MemberPointer: { 773 auto *MPTy = cast<MemberPointerType>(Ty); 774 if (!getCXXABI().isMemberPointerConvertible(MPTy)) { 775 auto *C = MPTy->getClass(); 776 auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr}); 777 if (Insertion.second) 778 Insertion.first->second = llvm::StructType::create(getLLVMContext()); 779 ResultType = Insertion.first->second; 780 } else { 781 ResultType = getCXXABI().ConvertMemberPointerType(MPTy); 782 } 783 break; 784 } 785 786 case Type::Atomic: { 787 QualType valueType = cast<AtomicType>(Ty)->getValueType(); 788 ResultType = ConvertTypeForMem(valueType); 789 790 // Pad out to the inflated size if necessary. 791 uint64_t valueSize = Context.getTypeSize(valueType); 792 uint64_t atomicSize = Context.getTypeSize(Ty); 793 if (valueSize != atomicSize) { 794 assert(valueSize < atomicSize); 795 llvm::Type *elts[] = { 796 ResultType, 797 llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8) 798 }; 799 ResultType = llvm::StructType::get(getLLVMContext(), 800 llvm::makeArrayRef(elts)); 801 } 802 break; 803 } 804 case Type::Pipe: { 805 ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty)); 806 break; 807 } 808 case Type::BitInt: { 809 const auto &EIT = cast<BitIntType>(Ty); 810 ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits()); 811 break; 812 } 813 } 814 815 assert(ResultType && "Didn't convert a type?"); 816 assert((!CachedType || CachedType == ResultType) && 817 "Cached type doesn't match computed type"); 818 819 if (ShouldUseCache) 820 TypeCache[Ty] = ResultType; 821 return ResultType; 822 } 823 824 bool CodeGenModule::isPaddedAtomicType(QualType type) { 825 return isPaddedAtomicType(type->castAs<AtomicType>()); 826 } 827 828 bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { 829 return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType()); 830 } 831 832 /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. 833 llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { 834 // TagDecl's are not necessarily unique, instead use the (clang) 835 // type connected to the decl. 836 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 837 838 llvm::StructType *&Entry = RecordDeclTypes[Key]; 839 840 // If we don't have a StructType at all yet, create the forward declaration. 841 if (!Entry) { 842 Entry = llvm::StructType::create(getLLVMContext()); 843 addRecordTypeName(RD, Entry, ""); 844 } 845 llvm::StructType *Ty = Entry; 846 847 // If this is still a forward declaration, or the LLVM type is already 848 // complete, there's nothing more to do. 849 RD = RD->getDefinition(); 850 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque()) 851 return Ty; 852 853 // If converting this type would cause us to infinitely loop, don't do it! 854 if (!isSafeToConvert(RD, *this)) { 855 DeferredRecords.push_back(RD); 856 return Ty; 857 } 858 859 // Okay, this is a definition of a type. Compile the implementation now. 860 bool InsertResult = RecordsBeingLaidOut.insert(Key).second; 861 (void)InsertResult; 862 assert(InsertResult && "Recursively compiling a struct?"); 863 864 // Force conversion of non-virtual base classes recursively. 865 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 866 for (const auto &I : CRD->bases()) { 867 if (I.isVirtual()) continue; 868 ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl()); 869 } 870 } 871 872 // Layout fields. 873 std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty); 874 CGRecordLayouts[Key] = std::move(Layout); 875 876 // We're done laying out this struct. 877 bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult; 878 assert(EraseResult && "struct not in RecordsBeingLaidOut set?"); 879 880 // If this struct blocked a FunctionType conversion, then recompute whatever 881 // was derived from that. 882 // FIXME: This is hugely overconservative. 883 if (SkippedLayout) 884 TypeCache.clear(); 885 886 // If we're done converting the outer-most record, then convert any deferred 887 // structs as well. 888 if (RecordsBeingLaidOut.empty()) 889 while (!DeferredRecords.empty()) 890 ConvertRecordDeclType(DeferredRecords.pop_back_val()); 891 892 return Ty; 893 } 894 895 /// getCGRecordLayout - Return record layout info for the given record decl. 896 const CGRecordLayout & 897 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { 898 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 899 900 auto I = CGRecordLayouts.find(Key); 901 if (I != CGRecordLayouts.end()) 902 return *I->second; 903 // Compute the type information. 904 ConvertRecordDeclType(RD); 905 906 // Now try again. 907 I = CGRecordLayouts.find(Key); 908 909 assert(I != CGRecordLayouts.end() && 910 "Unable to find record layout information for type"); 911 return *I->second; 912 } 913 914 bool CodeGenTypes::isPointerZeroInitializable(QualType T) { 915 assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type"); 916 return isZeroInitializable(T); 917 } 918 919 bool CodeGenTypes::isZeroInitializable(QualType T) { 920 if (T->getAs<PointerType>()) 921 return Context.getTargetNullPointerValue(T) == 0; 922 923 if (const auto *AT = Context.getAsArrayType(T)) { 924 if (isa<IncompleteArrayType>(AT)) 925 return true; 926 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 927 if (Context.getConstantArrayElementCount(CAT) == 0) 928 return true; 929 T = Context.getBaseElementType(T); 930 } 931 932 // Records are non-zero-initializable if they contain any 933 // non-zero-initializable subobjects. 934 if (const RecordType *RT = T->getAs<RecordType>()) { 935 const RecordDecl *RD = RT->getDecl(); 936 return isZeroInitializable(RD); 937 } 938 939 // We have to ask the ABI about member pointers. 940 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) 941 return getCXXABI().isZeroInitializable(MPT); 942 943 // Everything else is okay. 944 return true; 945 } 946 947 bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) { 948 return getCGRecordLayout(RD).isZeroInitializable(); 949 } 950