1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This is the code that handles AST -> LLVM type lowering. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenTypes.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/DeclObjC.h" 17 #include "clang/AST/Expr.h" 18 #include "clang/AST/RecordLayout.h" 19 #include "llvm/DerivedTypes.h" 20 #include "llvm/Module.h" 21 #include "llvm/Target/TargetData.h" 22 23 #include "CGCall.h" 24 #include "CGRecordLayoutBuilder.h" 25 26 using namespace clang; 27 using namespace CodeGen; 28 29 namespace { 30 /// RecordOrganizer - This helper class, used by CGRecordLayout, layouts 31 /// structs and unions. It manages transient information used during layout. 32 /// FIXME : Handle field aligments. Handle packed structs. 33 class RecordOrganizer { 34 public: 35 explicit RecordOrganizer(CodeGenTypes &Types, const RecordDecl& Record) : 36 CGT(Types), RD(Record), STy(NULL) {} 37 38 /// layoutStructFields - Do the actual work and lay out all fields. Create 39 /// corresponding llvm struct type. This should be invoked only after 40 /// all fields are added. 41 void layoutStructFields(const ASTRecordLayout &RL); 42 43 /// layoutUnionFields - Do the actual work and lay out all fields. Create 44 /// corresponding llvm struct type. This should be invoked only after 45 /// all fields are added. 46 void layoutUnionFields(const ASTRecordLayout &RL); 47 48 /// getLLVMType - Return associated llvm struct type. This may be NULL 49 /// if fields are not laid out. 50 llvm::Type *getLLVMType() const { 51 return STy; 52 } 53 54 llvm::SmallSet<unsigned, 8> &getPaddingFields() { 55 return PaddingFields; 56 } 57 58 private: 59 CodeGenTypes &CGT; 60 const RecordDecl& RD; 61 llvm::Type *STy; 62 llvm::SmallSet<unsigned, 8> PaddingFields; 63 }; 64 } 65 66 CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M, 67 const llvm::TargetData &TD) 68 : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD), 69 TheABIInfo(0) { 70 } 71 72 CodeGenTypes::~CodeGenTypes() { 73 for(llvm::DenseMap<const Type *, CGRecordLayout *>::iterator 74 I = CGRecordLayouts.begin(), E = CGRecordLayouts.end(); 75 I != E; ++I) 76 delete I->second; 77 CGRecordLayouts.clear(); 78 } 79 80 /// ConvertType - Convert the specified type to its LLVM form. 81 const llvm::Type *CodeGenTypes::ConvertType(QualType T) { 82 llvm::PATypeHolder Result = ConvertTypeRecursive(T); 83 84 // Any pointers that were converted defered evaluation of their pointee type, 85 // creating an opaque type instead. This is in order to avoid problems with 86 // circular types. Loop through all these defered pointees, if any, and 87 // resolve them now. 88 while (!PointersToResolve.empty()) { 89 std::pair<QualType, llvm::OpaqueType*> P = 90 PointersToResolve.back(); 91 PointersToResolve.pop_back(); 92 // We can handle bare pointers here because we know that the only pointers 93 // to the Opaque type are P.second and from other types. Refining the 94 // opqaue type away will invalidate P.second, but we don't mind :). 95 const llvm::Type *NT = ConvertTypeForMemRecursive(P.first); 96 P.second->refineAbstractTypeTo(NT); 97 } 98 99 return Result; 100 } 101 102 const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) { 103 T = Context.getCanonicalType(T); 104 105 // See if type is already cached. 106 llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator 107 I = TypeCache.find(T.getTypePtr()); 108 // If type is found in map and this is not a definition for a opaque 109 // place holder type then use it. Otherwise, convert type T. 110 if (I != TypeCache.end()) 111 return I->second.get(); 112 113 const llvm::Type *ResultType = ConvertNewType(T); 114 TypeCache.insert(std::make_pair(T.getTypePtr(), 115 llvm::PATypeHolder(ResultType))); 116 return ResultType; 117 } 118 119 const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) { 120 const llvm::Type *ResultType = ConvertTypeRecursive(T); 121 if (ResultType == llvm::Type::Int1Ty) 122 return llvm::IntegerType::get((unsigned)Context.getTypeSize(T)); 123 return ResultType; 124 } 125 126 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 127 /// ConvertType in that it is used to convert to the memory representation for 128 /// a type. For example, the scalar representation for _Bool is i1, but the 129 /// memory representation is usually i8 or i32, depending on the target. 130 const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { 131 const llvm::Type *R = ConvertType(T); 132 133 // If this is a non-bool type, don't map it. 134 if (R != llvm::Type::Int1Ty) 135 return R; 136 137 // Otherwise, return an integer of the target-specified size. 138 return llvm::IntegerType::get((unsigned)Context.getTypeSize(T)); 139 140 } 141 142 // Code to verify a given function type is complete, i.e. the return type 143 // and all of the argument types are complete. 144 static const TagType *VerifyFuncTypeComplete(const Type* T) { 145 const FunctionType *FT = cast<FunctionType>(T); 146 if (const TagType* TT = FT->getResultType()->getAsTagType()) 147 if (!TT->getDecl()->isDefinition()) 148 return TT; 149 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T)) 150 for (unsigned i = 0; i < FPT->getNumArgs(); i++) 151 if (const TagType* TT = FPT->getArgType(i)->getAsTagType()) 152 if (!TT->getDecl()->isDefinition()) 153 return TT; 154 return 0; 155 } 156 157 /// UpdateCompletedType - When we find the full definition for a TagDecl, 158 /// replace the 'opaque' type we previously made for it if applicable. 159 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 160 const Type *Key = 161 Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr(); 162 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI = 163 TagDeclTypes.find(Key); 164 if (TDTI == TagDeclTypes.end()) return; 165 166 // Remember the opaque LLVM type for this tagdecl. 167 llvm::PATypeHolder OpaqueHolder = TDTI->second; 168 assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) && 169 "Updating compilation of an already non-opaque type?"); 170 171 // Remove it from TagDeclTypes so that it will be regenerated. 172 TagDeclTypes.erase(TDTI); 173 174 // Generate the new type. 175 const llvm::Type *NT = ConvertTagDeclType(TD); 176 177 // Refine the old opaque type to its new definition. 178 cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT); 179 180 // Since we just completed a tag type, check to see if any function types 181 // were completed along with the tag type. 182 // FIXME: This is very inefficient; if we track which function types depend 183 // on which tag types, though, it should be reasonably efficient. 184 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i; 185 for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) { 186 if (const TagType* TT = VerifyFuncTypeComplete(i->first)) { 187 // This function type still depends on an incomplete tag type; make sure 188 // that tag type has an associated opaque type. 189 ConvertTagDeclType(TT->getDecl()); 190 } else { 191 // This function no longer depends on an incomplete tag type; create the 192 // function type, and refine the opaque type to the new function type. 193 llvm::PATypeHolder OpaqueHolder = i->second; 194 const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0)); 195 cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT); 196 FunctionTypes.erase(i); 197 } 198 } 199 } 200 201 static const llvm::Type* getTypeForFormat(const llvm::fltSemantics &format) { 202 if (&format == &llvm::APFloat::IEEEsingle) 203 return llvm::Type::FloatTy; 204 if (&format == &llvm::APFloat::IEEEdouble) 205 return llvm::Type::DoubleTy; 206 if (&format == &llvm::APFloat::IEEEquad) 207 return llvm::Type::FP128Ty; 208 if (&format == &llvm::APFloat::PPCDoubleDouble) 209 return llvm::Type::PPC_FP128Ty; 210 if (&format == &llvm::APFloat::x87DoubleExtended) 211 return llvm::Type::X86_FP80Ty; 212 assert(0 && "Unknown float format!"); 213 return 0; 214 } 215 216 const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { 217 const clang::Type &Ty = *Context.getCanonicalType(T); 218 219 switch (Ty.getTypeClass()) { 220 #define TYPE(Class, Base) 221 #define ABSTRACT_TYPE(Class, Base) 222 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 223 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 224 #include "clang/AST/TypeNodes.def" 225 assert(false && "Non-canonical or dependent types aren't possible."); 226 break; 227 228 case Type::Builtin: { 229 switch (cast<BuiltinType>(Ty).getKind()) { 230 default: assert(0 && "Unknown builtin type!"); 231 case BuiltinType::Void: 232 case BuiltinType::ObjCId: 233 case BuiltinType::ObjCClass: 234 // LLVM void type can only be used as the result of a function call. Just 235 // map to the same as char. 236 return llvm::IntegerType::get(8); 237 238 case BuiltinType::Bool: 239 // Note that we always return bool as i1 for use as a scalar type. 240 return llvm::Type::Int1Ty; 241 242 case BuiltinType::Char_S: 243 case BuiltinType::Char_U: 244 case BuiltinType::SChar: 245 case BuiltinType::UChar: 246 case BuiltinType::Short: 247 case BuiltinType::UShort: 248 case BuiltinType::Int: 249 case BuiltinType::UInt: 250 case BuiltinType::Long: 251 case BuiltinType::ULong: 252 case BuiltinType::LongLong: 253 case BuiltinType::ULongLong: 254 case BuiltinType::WChar: 255 case BuiltinType::Char16: 256 case BuiltinType::Char32: 257 return llvm::IntegerType::get( 258 static_cast<unsigned>(Context.getTypeSize(T))); 259 260 case BuiltinType::Float: 261 case BuiltinType::Double: 262 case BuiltinType::LongDouble: 263 return getTypeForFormat(Context.getFloatTypeSemantics(T)); 264 265 case BuiltinType::UInt128: 266 case BuiltinType::Int128: 267 return llvm::IntegerType::get(128); 268 } 269 break; 270 } 271 case Type::FixedWidthInt: 272 return llvm::IntegerType::get(cast<FixedWidthIntType>(T)->getWidth()); 273 case Type::Complex: { 274 const llvm::Type *EltTy = 275 ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType()); 276 return llvm::StructType::get(EltTy, EltTy, NULL); 277 } 278 case Type::LValueReference: 279 case Type::RValueReference: { 280 const ReferenceType &RTy = cast<ReferenceType>(Ty); 281 QualType ETy = RTy.getPointeeType(); 282 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(); 283 PointersToResolve.push_back(std::make_pair(ETy, PointeeType)); 284 return llvm::PointerType::get(PointeeType, ETy.getAddressSpace()); 285 } 286 case Type::Pointer: { 287 const PointerType &PTy = cast<PointerType>(Ty); 288 QualType ETy = PTy.getPointeeType(); 289 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(); 290 PointersToResolve.push_back(std::make_pair(ETy, PointeeType)); 291 return llvm::PointerType::get(PointeeType, ETy.getAddressSpace()); 292 } 293 294 case Type::VariableArray: { 295 const VariableArrayType &A = cast<VariableArrayType>(Ty); 296 assert(A.getIndexTypeQualifier() == 0 && 297 "FIXME: We only handle trivial array types so far!"); 298 // VLAs resolve to the innermost element type; this matches 299 // the return of alloca, and there isn't any obviously better choice. 300 return ConvertTypeForMemRecursive(A.getElementType()); 301 } 302 case Type::IncompleteArray: { 303 const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty); 304 assert(A.getIndexTypeQualifier() == 0 && 305 "FIXME: We only handle trivial array types so far!"); 306 // int X[] -> [0 x int] 307 return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 0); 308 } 309 case Type::ConstantArray: { 310 const ConstantArrayType &A = cast<ConstantArrayType>(Ty); 311 const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType()); 312 return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue()); 313 } 314 case Type::ExtVector: 315 case Type::Vector: { 316 const VectorType &VT = cast<VectorType>(Ty); 317 return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()), 318 VT.getNumElements()); 319 } 320 case Type::FunctionNoProto: 321 case Type::FunctionProto: { 322 // First, check whether we can build the full function type. 323 if (const TagType* TT = VerifyFuncTypeComplete(&Ty)) { 324 // This function's type depends on an incomplete tag type; make sure 325 // we have an opaque type corresponding to the tag type. 326 ConvertTagDeclType(TT->getDecl()); 327 // Create an opaque type for this function type, save it, and return it. 328 llvm::Type *ResultType = llvm::OpaqueType::get(); 329 FunctionTypes.insert(std::make_pair(&Ty, ResultType)); 330 return ResultType; 331 } 332 // The function type can be built; call the appropriate routines to 333 // build it. 334 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) 335 return GetFunctionType(getFunctionInfo(FPT), FPT->isVariadic()); 336 337 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty); 338 return GetFunctionType(getFunctionInfo(FNPT), true); 339 } 340 341 case Type::ExtQual: 342 return 343 ConvertTypeRecursive(QualType(cast<ExtQualType>(Ty).getBaseType(), 0)); 344 345 case Type::ObjCInterface: { 346 // Objective-C interfaces are always opaque (outside of the 347 // runtime, which can do whatever it likes); we never refine 348 // these. 349 const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)]; 350 if (!T) 351 T = llvm::OpaqueType::get(); 352 return T; 353 } 354 355 case Type::ObjCObjectPointer: { 356 // Protocol qualifications do not influence the LLVM type, we just return a 357 // pointer to the underlying interface type. We don't need to worry about 358 // recursive conversion. 359 const llvm::Type *T = 360 ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType()); 361 return llvm::PointerType::getUnqual(T); 362 } 363 364 case Type::Record: 365 case Type::Enum: { 366 const TagDecl *TD = cast<TagType>(Ty).getDecl(); 367 const llvm::Type *Res = ConvertTagDeclType(TD); 368 369 std::string TypeName(TD->getKindName()); 370 TypeName += '.'; 371 372 // Name the codegen type after the typedef name 373 // if there is no tag type name available 374 if (TD->getIdentifier()) 375 TypeName += TD->getNameAsString(); 376 else if (const TypedefType *TdT = dyn_cast<TypedefType>(T)) 377 TypeName += TdT->getDecl()->getNameAsString(); 378 else 379 TypeName += "anon"; 380 381 TheModule.addTypeName(TypeName, Res); 382 return Res; 383 } 384 385 case Type::BlockPointer: { 386 const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType(); 387 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(); 388 PointersToResolve.push_back(std::make_pair(FTy, PointeeType)); 389 return llvm::PointerType::get(PointeeType, FTy.getAddressSpace()); 390 } 391 392 case Type::MemberPointer: { 393 // FIXME: This is ABI dependent. We use the Itanium C++ ABI. 394 // http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers 395 // If we ever want to support other ABIs this needs to be abstracted. 396 397 QualType ETy = cast<MemberPointerType>(Ty).getPointeeType(); 398 if (ETy->isFunctionType()) { 399 return llvm::StructType::get(ConvertType(Context.getPointerDiffType()), 400 ConvertType(Context.getPointerDiffType()), 401 NULL); 402 } else 403 return ConvertType(Context.getPointerDiffType()); 404 } 405 406 case Type::TemplateSpecialization: 407 assert(false && "Dependent types can't get here"); 408 } 409 410 // FIXME: implement. 411 return llvm::OpaqueType::get(); 412 } 413 414 /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or 415 /// enum. 416 const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) { 417 // TagDecl's are not necessarily unique, instead use the (clang) 418 // type connected to the decl. 419 const Type *Key = 420 Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr(); 421 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI = 422 TagDeclTypes.find(Key); 423 424 // If we've already compiled this tag type, use the previous definition. 425 if (TDTI != TagDeclTypes.end()) 426 return TDTI->second; 427 428 // If this is still a forward definition, just define an opaque type to use 429 // for this tagged decl. 430 if (!TD->isDefinition()) { 431 llvm::Type *ResultType = llvm::OpaqueType::get(); 432 TagDeclTypes.insert(std::make_pair(Key, ResultType)); 433 return ResultType; 434 } 435 436 // Okay, this is a definition of a type. Compile the implementation now. 437 438 if (TD->isEnum()) { 439 // Don't bother storing enums in TagDeclTypes. 440 return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType()); 441 } 442 443 // This decl could well be recursive. In this case, insert an opaque 444 // definition of this type, which the recursive uses will get. We will then 445 // refine this opaque version later. 446 447 // Create new OpaqueType now for later use in case this is a recursive 448 // type. This will later be refined to the actual type. 449 llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(); 450 TagDeclTypes.insert(std::make_pair(Key, ResultHolder)); 451 452 const llvm::Type *ResultType; 453 const RecordDecl *RD = cast<const RecordDecl>(TD); 454 455 // There isn't any extra information for empty structures/unions. 456 if (RD->field_empty()) { 457 ResultType = llvm::StructType::get(std::vector<const llvm::Type*>()); 458 } else { 459 // Layout fields. 460 CGRecordLayout *Layout = 461 CGRecordLayoutBuilder::ComputeLayout(*this, RD); 462 463 if (!Layout) { 464 // Layout fields. 465 RecordOrganizer RO(*this, *RD); 466 467 if (TD->isStruct() || TD->isClass()) 468 RO.layoutStructFields(Context.getASTRecordLayout(RD)); 469 else { 470 assert(TD->isUnion() && "unknown tag decl kind!"); 471 RO.layoutUnionFields(Context.getASTRecordLayout(RD)); 472 } 473 474 Layout = new CGRecordLayout(RO.getLLVMType(), 475 RO.getPaddingFields()); 476 } 477 478 // Get llvm::StructType. 479 const Type *Key = 480 Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr(); 481 482 CGRecordLayouts[Key] = Layout; 483 ResultType = Layout->getLLVMType(); 484 } 485 486 // Refine our Opaque type to ResultType. This can invalidate ResultType, so 487 // make sure to read the result out of the holder. 488 cast<llvm::OpaqueType>(ResultHolder.get()) 489 ->refineAbstractTypeTo(ResultType); 490 491 return ResultHolder.get(); 492 } 493 494 /// getLLVMFieldNo - Return llvm::StructType element number 495 /// that corresponds to the field FD. 496 unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) { 497 assert(!FD->isBitField() && "Don't use getLLVMFieldNo on bit fields!"); 498 499 llvm::DenseMap<const FieldDecl*, unsigned>::iterator I = FieldInfo.find(FD); 500 assert (I != FieldInfo.end() && "Unable to find field info"); 501 return I->second; 502 } 503 504 /// addFieldInfo - Assign field number to field FD. 505 void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) { 506 FieldInfo[FD] = No; 507 } 508 509 /// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field FD. 510 CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) { 511 llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator 512 I = BitFields.find(FD); 513 assert (I != BitFields.end() && "Unable to find bitfield info"); 514 return I->second; 515 } 516 517 /// addBitFieldInfo - Assign a start bit and a size to field FD. 518 void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned FieldNo, 519 unsigned Start, unsigned Size) { 520 BitFields.insert(std::make_pair(FD, BitFieldInfo(FieldNo, Start, Size))); 521 } 522 523 /// getCGRecordLayout - Return record layout info for the given llvm::Type. 524 const CGRecordLayout * 525 CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const { 526 const Type *Key = 527 Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr(); 528 llvm::DenseMap<const Type*, CGRecordLayout *>::iterator I 529 = CGRecordLayouts.find(Key); 530 assert (I != CGRecordLayouts.end() 531 && "Unable to find record layout information for type"); 532 return I->second; 533 } 534 535 /// layoutStructFields - Do the actual work and lay out all fields. Create 536 /// corresponding llvm struct type. 537 /// Note that this doesn't actually try to do struct layout; it depends on 538 /// the layout built by the AST. (We have to do struct layout to do Sema, 539 /// and there's no point to duplicating the work.) 540 void RecordOrganizer::layoutStructFields(const ASTRecordLayout &RL) { 541 // FIXME: This code currently always generates packed structures. 542 // Unpacked structures are more readable, and sometimes more efficient! 543 // (But note that any changes here are likely to impact CGExprConstant, 544 // which makes some messy assumptions.) 545 uint64_t llvmSize = 0; 546 // FIXME: Make this a SmallVector 547 std::vector<const llvm::Type*> LLVMFields; 548 549 unsigned curField = 0; 550 for (RecordDecl::field_iterator Field = RD.field_begin(), 551 FieldEnd = RD.field_end(); 552 Field != FieldEnd; ++Field) { 553 uint64_t offset = RL.getFieldOffset(curField); 554 const llvm::Type *Ty = CGT.ConvertTypeForMemRecursive(Field->getType()); 555 uint64_t size = CGT.getTargetData().getTypeAllocSizeInBits(Ty); 556 557 if (Field->isBitField()) { 558 uint64_t BitFieldSize = 559 Field->getBitWidth()->EvaluateAsInt(CGT.getContext()).getZExtValue(); 560 561 // Bitfield field info is different from other field info; 562 // it actually ignores the underlying LLVM struct because 563 // there isn't any convenient mapping. 564 CGT.addBitFieldInfo(*Field, offset / size, offset % size, BitFieldSize); 565 } else { 566 // Put the element into the struct. This would be simpler 567 // if we didn't bother, but it seems a bit too strange to 568 // allocate all structs as i8 arrays. 569 while (llvmSize < offset) { 570 LLVMFields.push_back(llvm::Type::Int8Ty); 571 llvmSize += 8; 572 } 573 574 llvmSize += size; 575 CGT.addFieldInfo(*Field, LLVMFields.size()); 576 LLVMFields.push_back(Ty); 577 } 578 ++curField; 579 } 580 581 while (llvmSize < RL.getSize()) { 582 LLVMFields.push_back(llvm::Type::Int8Ty); 583 llvmSize += 8; 584 } 585 586 STy = llvm::StructType::get(LLVMFields, true); 587 assert(CGT.getTargetData().getTypeAllocSizeInBits(STy) == RL.getSize()); 588 } 589 590 /// layoutUnionFields - Do the actual work and lay out all fields. Create 591 /// corresponding llvm struct type. This should be invoked only after 592 /// all fields are added. 593 void RecordOrganizer::layoutUnionFields(const ASTRecordLayout &RL) { 594 unsigned curField = 0; 595 for (RecordDecl::field_iterator Field = RD.field_begin(), 596 FieldEnd = RD.field_end(); 597 Field != FieldEnd; ++Field) { 598 // The offset should usually be zero, but bitfields could be strange 599 uint64_t offset = RL.getFieldOffset(curField); 600 CGT.ConvertTypeRecursive(Field->getType()); 601 602 if (Field->isBitField()) { 603 Expr *BitWidth = Field->getBitWidth(); 604 uint64_t BitFieldSize = 605 BitWidth->EvaluateAsInt(CGT.getContext()).getZExtValue(); 606 607 CGT.addBitFieldInfo(*Field, 0, offset, BitFieldSize); 608 } else { 609 CGT.addFieldInfo(*Field, 0); 610 } 611 ++curField; 612 } 613 614 // This looks stupid, but it is correct in the sense that 615 // it works no matter how complicated the sizes and alignments 616 // of the union elements are. The natural alignment 617 // of the result doesn't matter because anyone allocating 618 // structures should be aligning them appropriately anyway. 619 // FIXME: We can be a bit more intuitive in a lot of cases. 620 // FIXME: Make this a struct type to work around PR2399; the 621 // C backend doesn't like structs using array types. 622 std::vector<const llvm::Type*> LLVMFields; 623 LLVMFields.push_back(llvm::ArrayType::get(llvm::Type::Int8Ty, 624 RL.getSize() / 8)); 625 STy = llvm::StructType::get(LLVMFields, true); 626 assert(CGT.getTargetData().getTypeAllocSizeInBits(STy) == RL.getSize()); 627 } 628