1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the code that handles AST -> LLVM type lowering. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenTypes.h" 15 #include "CGCall.h" 16 #include "CGCXXABI.h" 17 #include "CGRecordLayout.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/DeclObjC.h" 20 #include "clang/AST/DeclCXX.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "llvm/DerivedTypes.h" 24 #include "llvm/Module.h" 25 #include "llvm/Target/TargetData.h" 26 using namespace clang; 27 using namespace CodeGen; 28 29 CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M, 30 const llvm::TargetData &TD, const ABIInfo &Info, 31 CGCXXABI &CXXABI) 32 : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD), 33 TheABIInfo(Info), TheCXXABI(CXXABI) { 34 } 35 36 CodeGenTypes::~CodeGenTypes() { 37 for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator 38 I = CGRecordLayouts.begin(), E = CGRecordLayouts.end(); 39 I != E; ++I) 40 delete I->second; 41 42 for (llvm::FoldingSet<CGFunctionInfo>::iterator 43 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) 44 delete &*I++; 45 } 46 47 /// HandleLateResolvedPointers - For top-level ConvertType calls, this handles 48 /// pointers that are referenced but have not been converted yet. This is used 49 /// to handle cyclic structures properly. 50 void CodeGenTypes::HandleLateResolvedPointers() { 51 assert(!PointersToResolve.empty() && "No pointers to resolve!"); 52 53 // Any pointers that were converted deferred evaluation of their pointee type, 54 // creating an opaque type instead. This is in order to avoid problems with 55 // circular types. Loop through all these defered pointees, if any, and 56 // resolve them now. 57 while (!PointersToResolve.empty()) { 58 std::pair<QualType, llvm::OpaqueType*> P = PointersToResolve.pop_back_val(); 59 60 // We can handle bare pointers here because we know that the only pointers 61 // to the Opaque type are P.second and from other types. Refining the 62 // opqaue type away will invalidate P.second, but we don't mind :). 63 const llvm::Type *NT = ConvertTypeForMemRecursive(P.first); 64 P.second->refineAbstractTypeTo(NT); 65 } 66 } 67 68 69 /// ConvertType - Convert the specified type to its LLVM form. 70 const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) { 71 const llvm::Type *Result = ConvertTypeRecursive(T); 72 73 // If this is a top-level call to ConvertType and sub-conversions caused 74 // pointers to get lazily built as opaque types, resolve the pointers, which 75 // might cause Result to be merged away. 76 if (!IsRecursive && !PointersToResolve.empty()) { 77 llvm::PATypeHolder ResultHandle = Result; 78 HandleLateResolvedPointers(); 79 Result = ResultHandle; 80 } 81 return Result; 82 } 83 84 const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) { 85 T = Context.getCanonicalType(T); 86 87 // See if type is already cached. 88 llvm::DenseMap<const Type *, llvm::PATypeHolder>::iterator 89 I = TypeCache.find(T.getTypePtr()); 90 // If type is found in map and this is not a definition for a opaque 91 // place holder type then use it. Otherwise, convert type T. 92 if (I != TypeCache.end()) 93 return I->second.get(); 94 95 const llvm::Type *ResultType = ConvertNewType(T); 96 TypeCache.insert(std::make_pair(T.getTypePtr(), 97 llvm::PATypeHolder(ResultType))); 98 return ResultType; 99 } 100 101 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 102 /// ConvertType in that it is used to convert to the memory representation for 103 /// a type. For example, the scalar representation for _Bool is i1, but the 104 /// memory representation is usually i8 or i32, depending on the target. 105 const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){ 106 const llvm::Type *R = ConvertType(T, IsRecursive); 107 108 // If this is a non-bool type, don't map it. 109 if (!R->isIntegerTy(1)) 110 return R; 111 112 // Otherwise, return an integer of the target-specified size. 113 return llvm::IntegerType::get(getLLVMContext(), 114 (unsigned)Context.getTypeSize(T)); 115 116 } 117 118 // Code to verify a given function type is complete, i.e. the return type 119 // and all of the argument types are complete. 120 const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) { 121 const FunctionType *FT = cast<FunctionType>(T); 122 if (const TagType* TT = FT->getResultType()->getAs<TagType>()) 123 if (!TT->getDecl()->isDefinition()) 124 return TT; 125 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T)) 126 for (unsigned i = 0; i < FPT->getNumArgs(); i++) 127 if (const TagType* TT = FPT->getArgType(i)->getAs<TagType>()) 128 if (!TT->getDecl()->isDefinition()) 129 return TT; 130 return 0; 131 } 132 133 /// UpdateCompletedType - When we find the full definition for a TagDecl, 134 /// replace the 'opaque' type we previously made for it if applicable. 135 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 136 const Type *Key = Context.getTagDeclType(TD).getTypePtr(); 137 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI = 138 TagDeclTypes.find(Key); 139 if (TDTI == TagDeclTypes.end()) return; 140 141 // Remember the opaque LLVM type for this tagdecl. 142 llvm::PATypeHolder OpaqueHolder = TDTI->second; 143 assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) && 144 "Updating compilation of an already non-opaque type?"); 145 146 // Remove it from TagDeclTypes so that it will be regenerated. 147 TagDeclTypes.erase(TDTI); 148 149 // Generate the new type. 150 const llvm::Type *NT = ConvertTagDeclType(TD); 151 152 // Refine the old opaque type to its new definition. 153 cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT); 154 155 // Since we just completed a tag type, check to see if any function types 156 // were completed along with the tag type. 157 // FIXME: This is very inefficient; if we track which function types depend 158 // on which tag types, though, it should be reasonably efficient. 159 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i; 160 for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) { 161 if (const TagType* TT = VerifyFuncTypeComplete(i->first)) { 162 // This function type still depends on an incomplete tag type; make sure 163 // that tag type has an associated opaque type. 164 ConvertTagDeclType(TT->getDecl()); 165 } else { 166 // This function no longer depends on an incomplete tag type; create the 167 // function type, and refine the opaque type to the new function type. 168 llvm::PATypeHolder OpaqueHolder = i->second; 169 const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0)); 170 cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT); 171 FunctionTypes.erase(i); 172 } 173 } 174 } 175 176 static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext, 177 const llvm::fltSemantics &format) { 178 if (&format == &llvm::APFloat::IEEEsingle) 179 return llvm::Type::getFloatTy(VMContext); 180 if (&format == &llvm::APFloat::IEEEdouble) 181 return llvm::Type::getDoubleTy(VMContext); 182 if (&format == &llvm::APFloat::IEEEquad) 183 return llvm::Type::getFP128Ty(VMContext); 184 if (&format == &llvm::APFloat::PPCDoubleDouble) 185 return llvm::Type::getPPC_FP128Ty(VMContext); 186 if (&format == &llvm::APFloat::x87DoubleExtended) 187 return llvm::Type::getX86_FP80Ty(VMContext); 188 assert(0 && "Unknown float format!"); 189 return 0; 190 } 191 192 const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { 193 const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr(); 194 195 switch (Ty.getTypeClass()) { 196 #define TYPE(Class, Base) 197 #define ABSTRACT_TYPE(Class, Base) 198 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 199 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 200 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 201 #include "clang/AST/TypeNodes.def" 202 assert(false && "Non-canonical or dependent types aren't possible."); 203 break; 204 205 case Type::Builtin: { 206 switch (cast<BuiltinType>(Ty).getKind()) { 207 case BuiltinType::Void: 208 case BuiltinType::ObjCId: 209 case BuiltinType::ObjCClass: 210 case BuiltinType::ObjCSel: 211 // LLVM void type can only be used as the result of a function call. Just 212 // map to the same as char. 213 return llvm::Type::getInt8Ty(getLLVMContext()); 214 215 case BuiltinType::Bool: 216 // Note that we always return bool as i1 for use as a scalar type. 217 return llvm::Type::getInt1Ty(getLLVMContext()); 218 219 case BuiltinType::Char_S: 220 case BuiltinType::Char_U: 221 case BuiltinType::SChar: 222 case BuiltinType::UChar: 223 case BuiltinType::Short: 224 case BuiltinType::UShort: 225 case BuiltinType::Int: 226 case BuiltinType::UInt: 227 case BuiltinType::Long: 228 case BuiltinType::ULong: 229 case BuiltinType::LongLong: 230 case BuiltinType::ULongLong: 231 case BuiltinType::WChar_S: 232 case BuiltinType::WChar_U: 233 case BuiltinType::Char16: 234 case BuiltinType::Char32: 235 return llvm::IntegerType::get(getLLVMContext(), 236 static_cast<unsigned>(Context.getTypeSize(T))); 237 238 case BuiltinType::Float: 239 case BuiltinType::Double: 240 case BuiltinType::LongDouble: 241 return getTypeForFormat(getLLVMContext(), 242 Context.getFloatTypeSemantics(T)); 243 244 case BuiltinType::NullPtr: { 245 // Model std::nullptr_t as i8* 246 const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext()); 247 return llvm::PointerType::getUnqual(Ty); 248 } 249 250 case BuiltinType::UInt128: 251 case BuiltinType::Int128: 252 return llvm::IntegerType::get(getLLVMContext(), 128); 253 254 case BuiltinType::Overload: 255 case BuiltinType::Dependent: 256 case BuiltinType::UnknownAny: 257 llvm_unreachable("Unexpected builtin type!"); 258 break; 259 } 260 llvm_unreachable("Unknown builtin type!"); 261 break; 262 } 263 case Type::Complex: { 264 const llvm::Type *EltTy = 265 ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType()); 266 return llvm::StructType::get(TheModule.getContext(), EltTy, EltTy, NULL); 267 } 268 case Type::LValueReference: 269 case Type::RValueReference: { 270 const ReferenceType &RTy = cast<ReferenceType>(Ty); 271 QualType ETy = RTy.getPointeeType(); 272 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext()); 273 PointersToResolve.push_back(std::make_pair(ETy, PointeeType)); 274 unsigned AS = Context.getTargetAddressSpace(ETy); 275 return llvm::PointerType::get(PointeeType, AS); 276 } 277 case Type::Pointer: { 278 const PointerType &PTy = cast<PointerType>(Ty); 279 QualType ETy = PTy.getPointeeType(); 280 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext()); 281 PointersToResolve.push_back(std::make_pair(ETy, PointeeType)); 282 unsigned AS = Context.getTargetAddressSpace(ETy); 283 return llvm::PointerType::get(PointeeType, AS); 284 } 285 286 case Type::VariableArray: { 287 const VariableArrayType &A = cast<VariableArrayType>(Ty); 288 assert(A.getIndexTypeCVRQualifiers() == 0 && 289 "FIXME: We only handle trivial array types so far!"); 290 // VLAs resolve to the innermost element type; this matches 291 // the return of alloca, and there isn't any obviously better choice. 292 return ConvertTypeForMemRecursive(A.getElementType()); 293 } 294 case Type::IncompleteArray: { 295 const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty); 296 assert(A.getIndexTypeCVRQualifiers() == 0 && 297 "FIXME: We only handle trivial array types so far!"); 298 // int X[] -> [0 x int] 299 return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 300 0); 301 } 302 case Type::ConstantArray: { 303 const ConstantArrayType &A = cast<ConstantArrayType>(Ty); 304 const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType()); 305 return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue()); 306 } 307 case Type::ExtVector: 308 case Type::Vector: { 309 const VectorType &VT = cast<VectorType>(Ty); 310 return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()), 311 VT.getNumElements()); 312 } 313 case Type::FunctionNoProto: 314 case Type::FunctionProto: { 315 // First, check whether we can build the full function type. If the 316 // function type depends on an incomplete type (e.g. a struct or enum), we 317 // cannot lower the function type. Instead, turn it into an Opaque pointer 318 // and have UpdateCompletedType revisit the function type when/if the opaque 319 // argument type is defined. 320 if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) { 321 // This function's type depends on an incomplete tag type; make sure 322 // we have an opaque type corresponding to the tag type. 323 ConvertTagDeclType(TT->getDecl()); 324 // Create an opaque type for this function type, save it, and return it. 325 llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext()); 326 FunctionTypes.insert(std::make_pair(&Ty, ResultType)); 327 return ResultType; 328 } 329 330 // The function type can be built; call the appropriate routines to 331 // build it. 332 const CGFunctionInfo *FI; 333 bool isVariadic; 334 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) { 335 FI = &getFunctionInfo( 336 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)), 337 true /*Recursive*/); 338 isVariadic = FPT->isVariadic(); 339 } else { 340 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty); 341 FI = &getFunctionInfo( 342 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)), 343 true /*Recursive*/); 344 isVariadic = true; 345 } 346 347 return GetFunctionType(*FI, isVariadic, true); 348 } 349 350 case Type::ObjCObject: 351 return ConvertTypeRecursive(cast<ObjCObjectType>(Ty).getBaseType()); 352 353 case Type::ObjCInterface: { 354 // Objective-C interfaces are always opaque (outside of the 355 // runtime, which can do whatever it likes); we never refine 356 // these. 357 const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)]; 358 if (!T) 359 T = llvm::OpaqueType::get(getLLVMContext()); 360 return T; 361 } 362 363 case Type::ObjCObjectPointer: { 364 // Protocol qualifications do not influence the LLVM type, we just return a 365 // pointer to the underlying interface type. We don't need to worry about 366 // recursive conversion. 367 const llvm::Type *T = 368 ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType()); 369 return llvm::PointerType::getUnqual(T); 370 } 371 372 case Type::Record: 373 case Type::Enum: { 374 const TagDecl *TD = cast<TagType>(Ty).getDecl(); 375 const llvm::Type *Res = ConvertTagDeclType(TD); 376 377 llvm::SmallString<256> TypeName; 378 llvm::raw_svector_ostream OS(TypeName); 379 OS << TD->getKindName() << '.'; 380 381 // Name the codegen type after the typedef name 382 // if there is no tag type name available 383 if (TD->getIdentifier()) { 384 // FIXME: We should not have to check for a null decl context here. 385 // Right now we do it because the implicit Obj-C decls don't have one. 386 if (TD->getDeclContext()) 387 OS << TD->getQualifiedNameAsString(); 388 else 389 TD->printName(OS); 390 } else if (const TypedefNameDecl *TDD = TD->getTypedefNameForAnonDecl()) { 391 // FIXME: We should not have to check for a null decl context here. 392 // Right now we do it because the implicit Obj-C decls don't have one. 393 if (TDD->getDeclContext()) 394 OS << TDD->getQualifiedNameAsString(); 395 else 396 TDD->printName(OS); 397 } else 398 OS << "anon"; 399 400 TheModule.addTypeName(OS.str(), Res); 401 return Res; 402 } 403 404 case Type::BlockPointer: { 405 const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType(); 406 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext()); 407 PointersToResolve.push_back(std::make_pair(FTy, PointeeType)); 408 unsigned AS = Context.getTargetAddressSpace(FTy); 409 return llvm::PointerType::get(PointeeType, AS); 410 } 411 412 case Type::MemberPointer: { 413 return getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(&Ty)); 414 } 415 } 416 417 // FIXME: implement. 418 return llvm::OpaqueType::get(getLLVMContext()); 419 } 420 421 /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or 422 /// enum. 423 const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) { 424 // TagDecl's are not necessarily unique, instead use the (clang) 425 // type connected to the decl. 426 const Type *Key = 427 Context.getTagDeclType(TD).getTypePtr(); 428 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI = 429 TagDeclTypes.find(Key); 430 431 // If we've already compiled this tag type, use the previous definition. 432 if (TDTI != TagDeclTypes.end()) 433 return TDTI->second; 434 435 const EnumDecl *ED = dyn_cast<EnumDecl>(TD); 436 437 // If this is still a forward declaration, just define an opaque 438 // type to use for this tagged decl. 439 // C++0x: If this is a enumeration type with fixed underlying type, 440 // consider it complete. 441 if (!TD->isDefinition() && !(ED && ED->isFixed())) { 442 llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext()); 443 TagDeclTypes.insert(std::make_pair(Key, ResultType)); 444 return ResultType; 445 } 446 447 // Okay, this is a definition of a type. Compile the implementation now. 448 449 if (ED) // Don't bother storing enums in TagDeclTypes. 450 return ConvertTypeRecursive(ED->getIntegerType()); 451 452 // This decl could well be recursive. In this case, insert an opaque 453 // definition of this type, which the recursive uses will get. We will then 454 // refine this opaque version later. 455 456 // Create new OpaqueType now for later use in case this is a recursive 457 // type. This will later be refined to the actual type. 458 llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext()); 459 TagDeclTypes.insert(std::make_pair(Key, ResultHolder)); 460 461 const RecordDecl *RD = cast<const RecordDecl>(TD); 462 463 // Force conversion of non-virtual base classes recursively. 464 if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) { 465 for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(), 466 e = RD->bases_end(); i != e; ++i) { 467 if (!i->isVirtual()) { 468 const CXXRecordDecl *Base = 469 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl()); 470 ConvertTagDeclType(Base); 471 } 472 } 473 } 474 475 // Layout fields. 476 CGRecordLayout *Layout = ComputeRecordLayout(RD); 477 478 CGRecordLayouts[Key] = Layout; 479 const llvm::Type *ResultType = Layout->getLLVMType(); 480 481 // Refine our Opaque type to ResultType. This can invalidate ResultType, so 482 // make sure to read the result out of the holder. 483 cast<llvm::OpaqueType>(ResultHolder.get()) 484 ->refineAbstractTypeTo(ResultType); 485 486 return ResultHolder.get(); 487 } 488 489 /// getCGRecordLayout - Return record layout info for the given record decl. 490 const CGRecordLayout & 491 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { 492 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 493 494 const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key); 495 if (!Layout) { 496 // Compute the type information. 497 ConvertTagDeclType(RD); 498 499 // Now try again. 500 Layout = CGRecordLayouts.lookup(Key); 501 } 502 503 assert(Layout && "Unable to find record layout information for type"); 504 return *Layout; 505 } 506 507 bool CodeGenTypes::isZeroInitializable(QualType T) { 508 // No need to check for member pointers when not compiling C++. 509 if (!Context.getLangOptions().CPlusPlus) 510 return true; 511 512 T = Context.getBaseElementType(T); 513 514 // Records are non-zero-initializable if they contain any 515 // non-zero-initializable subobjects. 516 if (const RecordType *RT = T->getAs<RecordType>()) { 517 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 518 return isZeroInitializable(RD); 519 } 520 521 // We have to ask the ABI about member pointers. 522 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) 523 return getCXXABI().isZeroInitializable(MPT); 524 525 // Everything else is okay. 526 return true; 527 } 528 529 bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) { 530 return getCGRecordLayout(RD).isZeroInitializable(); 531 } 532