1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Builder implementation for CGRecordLayout objects. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGRecordLayout.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/CXXInheritance.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/Expr.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/Frontend/CodeGenOptions.h" 22 #include "CodeGenTypes.h" 23 #include "CGCXXABI.h" 24 #include "llvm/DerivedTypes.h" 25 #include "llvm/Type.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/raw_ostream.h" 28 #include "llvm/Target/TargetData.h" 29 using namespace clang; 30 using namespace CodeGen; 31 32 namespace { 33 34 class CGRecordLayoutBuilder { 35 public: 36 /// FieldTypes - Holds the LLVM types that the struct is created from. 37 /// 38 SmallVector<llvm::Type *, 16> FieldTypes; 39 40 /// BaseSubobjectType - Holds the LLVM type for the non-virtual part 41 /// of the struct. For example, consider: 42 /// 43 /// struct A { int i; }; 44 /// struct B { void *v; }; 45 /// struct C : virtual A, B { }; 46 /// 47 /// The LLVM type of C will be 48 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B } 49 /// 50 /// And the LLVM type of the non-virtual base struct will be 51 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 } 52 /// 53 /// This only gets initialized if the base subobject type is 54 /// different from the complete-object type. 55 llvm::StructType *BaseSubobjectType; 56 57 /// FieldInfo - Holds a field and its corresponding LLVM field number. 58 llvm::DenseMap<const FieldDecl *, unsigned> Fields; 59 60 /// BitFieldInfo - Holds location and size information about a bit field. 61 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields; 62 63 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases; 64 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases; 65 66 /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are 67 /// primary base classes for some other direct or indirect base class. 68 CXXIndirectPrimaryBaseSet IndirectPrimaryBases; 69 70 /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid 71 /// avoid laying out virtual bases more than once. 72 llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases; 73 74 /// IsZeroInitializable - Whether this struct can be C++ 75 /// zero-initialized with an LLVM zeroinitializer. 76 bool IsZeroInitializable; 77 bool IsZeroInitializableAsBase; 78 79 /// Packed - Whether the resulting LLVM struct will be packed or not. 80 bool Packed; 81 82 /// IsMsStruct - Whether ms_struct is in effect or not 83 bool IsMsStruct; 84 85 private: 86 CodeGenTypes &Types; 87 88 /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the 89 /// last base laid out. Used so that we can replace the last laid out base 90 /// type with an i8 array if needed. 91 struct LastLaidOutBaseInfo { 92 CharUnits Offset; 93 CharUnits NonVirtualSize; 94 95 bool isValid() const { return !NonVirtualSize.isZero(); } 96 void invalidate() { NonVirtualSize = CharUnits::Zero(); } 97 98 } LastLaidOutBase; 99 100 /// Alignment - Contains the alignment of the RecordDecl. 101 CharUnits Alignment; 102 103 /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field, 104 /// this will have the number of bits still available in the field. 105 char BitsAvailableInLastField; 106 107 /// NextFieldOffset - Holds the next field offset. 108 CharUnits NextFieldOffset; 109 110 /// LayoutUnionField - Will layout a field in an union and return the type 111 /// that the field will have. 112 llvm::Type *LayoutUnionField(const FieldDecl *Field, 113 const ASTRecordLayout &Layout); 114 115 /// LayoutUnion - Will layout a union RecordDecl. 116 void LayoutUnion(const RecordDecl *D); 117 118 /// LayoutField - try to layout all fields in the record decl. 119 /// Returns false if the operation failed because the struct is not packed. 120 bool LayoutFields(const RecordDecl *D); 121 122 /// Layout a single base, virtual or non-virtual 123 void LayoutBase(const CXXRecordDecl *base, 124 const CGRecordLayout &baseLayout, 125 CharUnits baseOffset); 126 127 /// LayoutVirtualBase - layout a single virtual base. 128 void LayoutVirtualBase(const CXXRecordDecl *base, 129 CharUnits baseOffset); 130 131 /// LayoutVirtualBases - layout the virtual bases of a record decl. 132 void LayoutVirtualBases(const CXXRecordDecl *RD, 133 const ASTRecordLayout &Layout); 134 135 /// MSLayoutVirtualBases - layout the virtual bases of a record decl, 136 /// like MSVC. 137 void MSLayoutVirtualBases(const CXXRecordDecl *RD, 138 const ASTRecordLayout &Layout); 139 140 /// LayoutNonVirtualBase - layout a single non-virtual base. 141 void LayoutNonVirtualBase(const CXXRecordDecl *base, 142 CharUnits baseOffset); 143 144 /// LayoutNonVirtualBases - layout the virtual bases of a record decl. 145 void LayoutNonVirtualBases(const CXXRecordDecl *RD, 146 const ASTRecordLayout &Layout); 147 148 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types. 149 bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD); 150 151 /// LayoutField - layout a single field. Returns false if the operation failed 152 /// because the current struct is not packed. 153 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset); 154 155 /// LayoutBitField - layout a single bit field. 156 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); 157 158 /// AppendField - Appends a field with the given offset and type. 159 void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy); 160 161 /// AppendPadding - Appends enough padding bytes so that the total 162 /// struct size is a multiple of the field alignment. 163 void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment); 164 165 /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the 166 /// tail padding of a previous base. If this happens, the type of the previous 167 /// base needs to be changed to an array of i8. Returns true if the last 168 /// laid out base was resized. 169 bool ResizeLastBaseFieldIfNecessary(CharUnits offset); 170 171 /// getByteArrayType - Returns a byte array type with the given number of 172 /// elements. 173 llvm::Type *getByteArrayType(CharUnits NumBytes); 174 175 /// AppendBytes - Append a given number of bytes to the record. 176 void AppendBytes(CharUnits numBytes); 177 178 /// AppendTailPadding - Append enough tail padding so that the type will have 179 /// the passed size. 180 void AppendTailPadding(CharUnits RecordSize); 181 182 CharUnits getTypeAlignment(llvm::Type *Ty) const; 183 184 /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the 185 /// LLVM element types. 186 CharUnits getAlignmentAsLLVMStruct() const; 187 188 /// CheckZeroInitializable - Check if the given type contains a pointer 189 /// to data member. 190 void CheckZeroInitializable(QualType T); 191 192 public: 193 CGRecordLayoutBuilder(CodeGenTypes &Types) 194 : BaseSubobjectType(0), 195 IsZeroInitializable(true), IsZeroInitializableAsBase(true), 196 Packed(false), IsMsStruct(false), 197 Types(Types), BitsAvailableInLastField(0) { } 198 199 /// Layout - Will layout a RecordDecl. 200 void Layout(const RecordDecl *D); 201 }; 202 203 } 204 205 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) { 206 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment(); 207 Packed = D->hasAttr<PackedAttr>(); 208 209 IsMsStruct = D->hasAttr<MsStructAttr>(); 210 211 if (D->isUnion()) { 212 LayoutUnion(D); 213 return; 214 } 215 216 if (LayoutFields(D)) 217 return; 218 219 // We weren't able to layout the struct. Try again with a packed struct 220 Packed = true; 221 LastLaidOutBase.invalidate(); 222 NextFieldOffset = CharUnits::Zero(); 223 FieldTypes.clear(); 224 Fields.clear(); 225 BitFields.clear(); 226 NonVirtualBases.clear(); 227 VirtualBases.clear(); 228 229 LayoutFields(D); 230 } 231 232 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 233 const FieldDecl *FD, 234 uint64_t FieldOffset, 235 uint64_t FieldSize, 236 uint64_t ContainingTypeSizeInBits, 237 unsigned ContainingTypeAlign) { 238 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType()); 239 CharUnits TypeSizeInBytes = 240 CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty)); 241 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes); 242 243 bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); 244 245 if (FieldSize > TypeSizeInBits) { 246 // We have a wide bit-field. The extra bits are only used for padding, so 247 // if we have a bitfield of type T, with size N: 248 // 249 // T t : N; 250 // 251 // We can just assume that it's: 252 // 253 // T t : sizeof(T); 254 // 255 FieldSize = TypeSizeInBits; 256 } 257 258 // in big-endian machines the first fields are in higher bit positions, 259 // so revert the offset. The byte offsets are reversed(back) later. 260 if (Types.getTargetData().isBigEndian()) { 261 FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize); 262 } 263 264 // Compute the access components. The policy we use is to start by attempting 265 // to access using the width of the bit-field type itself and to always access 266 // at aligned indices of that type. If such an access would fail because it 267 // extends past the bound of the type, then we reduce size to the next smaller 268 // power of two and retry. The current algorithm assumes pow2 sized types, 269 // although this is easy to fix. 270 // 271 assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!"); 272 CGBitFieldInfo::AccessInfo Components[3]; 273 unsigned NumComponents = 0; 274 unsigned AccessedTargetBits = 0; // The number of target bits accessed. 275 unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt. 276 277 // If requested, widen the initial bit-field access to be register sized. The 278 // theory is that this is most likely to allow multiple accesses into the same 279 // structure to be coalesced, and that the backend should be smart enough to 280 // narrow the store if no coalescing is ever done. 281 // 282 // The subsequent code will handle align these access to common boundaries and 283 // guaranteeing that we do not access past the end of the structure. 284 if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) { 285 if (AccessWidth < Types.getTarget().getRegisterWidth()) 286 AccessWidth = Types.getTarget().getRegisterWidth(); 287 } 288 289 // Round down from the field offset to find the first access position that is 290 // at an aligned offset of the initial access type. 291 uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth); 292 293 // Adjust initial access size to fit within record. 294 while (AccessWidth > Types.getTarget().getCharWidth() && 295 AccessStart + AccessWidth > ContainingTypeSizeInBits) { 296 AccessWidth >>= 1; 297 AccessStart = FieldOffset - (FieldOffset % AccessWidth); 298 } 299 300 while (AccessedTargetBits < FieldSize) { 301 // Check that we can access using a type of this size, without reading off 302 // the end of the structure. This can occur with packed structures and 303 // -fno-bitfield-type-align, for example. 304 if (AccessStart + AccessWidth > ContainingTypeSizeInBits) { 305 // If so, reduce access size to the next smaller power-of-two and retry. 306 AccessWidth >>= 1; 307 assert(AccessWidth >= Types.getTarget().getCharWidth() 308 && "Cannot access under byte size!"); 309 continue; 310 } 311 312 // Otherwise, add an access component. 313 314 // First, compute the bits inside this access which are part of the 315 // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the 316 // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits 317 // in the target that we are reading. 318 assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!"); 319 assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!"); 320 uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset); 321 uint64_t AccessBitsInFieldSize = 322 std::min(AccessWidth + AccessStart, 323 FieldOffset + FieldSize) - AccessBitsInFieldStart; 324 325 assert(NumComponents < 3 && "Unexpected number of components!"); 326 CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++]; 327 AI.FieldIndex = 0; 328 // FIXME: We still follow the old access pattern of only using the field 329 // byte offset. We should switch this once we fix the struct layout to be 330 // pretty. 331 332 // on big-endian machines we reverted the bit offset because first fields are 333 // in higher bits. But this also reverts the bytes, so fix this here by reverting 334 // the byte offset on big-endian machines. 335 if (Types.getTargetData().isBigEndian()) { 336 AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits( 337 ContainingTypeSizeInBits - AccessStart - AccessWidth); 338 } else { 339 AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart); 340 } 341 AI.FieldBitStart = AccessBitsInFieldStart - AccessStart; 342 AI.AccessWidth = AccessWidth; 343 AI.AccessAlignment = Types.getContext().toCharUnitsFromBits( 344 llvm::MinAlign(ContainingTypeAlign, AccessStart)); 345 AI.TargetBitOffset = AccessedTargetBits; 346 AI.TargetBitWidth = AccessBitsInFieldSize; 347 348 AccessStart += AccessWidth; 349 AccessedTargetBits += AI.TargetBitWidth; 350 } 351 352 assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!"); 353 return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned); 354 } 355 356 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 357 const FieldDecl *FD, 358 uint64_t FieldOffset, 359 uint64_t FieldSize) { 360 const RecordDecl *RD = FD->getParent(); 361 const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD); 362 uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize()); 363 unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment()); 364 365 return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits, 366 ContainingTypeAlign); 367 } 368 369 void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D, 370 uint64_t fieldOffset) { 371 uint64_t fieldSize = D->getBitWidthValue(Types.getContext()); 372 373 if (fieldSize == 0) 374 return; 375 376 uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 377 CharUnits numBytesToAppend; 378 unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign(); 379 380 if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) { 381 assert(fieldOffset % charAlign == 0 && 382 "Field offset not aligned correctly"); 383 384 CharUnits fieldOffsetInCharUnits = 385 Types.getContext().toCharUnitsFromBits(fieldOffset); 386 387 // Try to resize the last base field. 388 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits)) 389 nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 390 } 391 392 if (fieldOffset < nextFieldOffsetInBits) { 393 assert(BitsAvailableInLastField && "Bitfield size mismatch!"); 394 assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte"); 395 396 // The bitfield begins in the previous bit-field. 397 numBytesToAppend = Types.getContext().toCharUnitsFromBits( 398 llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField, 399 charAlign)); 400 } else { 401 assert(fieldOffset % charAlign == 0 && 402 "Field offset not aligned correctly"); 403 404 // Append padding if necessary. 405 AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset), 406 CharUnits::One()); 407 408 numBytesToAppend = Types.getContext().toCharUnitsFromBits( 409 llvm::RoundUpToAlignment(fieldSize, charAlign)); 410 411 assert(!numBytesToAppend.isZero() && "No bytes to append!"); 412 } 413 414 // Add the bit field info. 415 BitFields.insert(std::make_pair(D, 416 CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize))); 417 418 AppendBytes(numBytesToAppend); 419 420 BitsAvailableInLastField = 421 Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize); 422 } 423 424 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, 425 uint64_t fieldOffset) { 426 // If the field is packed, then we need a packed struct. 427 if (!Packed && D->hasAttr<PackedAttr>()) 428 return false; 429 430 if (D->isBitField()) { 431 // We must use packed structs for unnamed bit fields since they 432 // don't affect the struct alignment. 433 if (!Packed && !D->getDeclName()) 434 return false; 435 436 LayoutBitField(D, fieldOffset); 437 return true; 438 } 439 440 CheckZeroInitializable(D->getType()); 441 442 assert(fieldOffset % Types.getTarget().getCharWidth() == 0 443 && "field offset is not on a byte boundary!"); 444 CharUnits fieldOffsetInBytes 445 = Types.getContext().toCharUnitsFromBits(fieldOffset); 446 447 llvm::Type *Ty = Types.ConvertTypeForMem(D->getType()); 448 CharUnits typeAlignment = getTypeAlignment(Ty); 449 450 // If the type alignment is larger then the struct alignment, we must use 451 // a packed struct. 452 if (typeAlignment > Alignment) { 453 assert(!Packed && "Alignment is wrong even with packed struct!"); 454 return false; 455 } 456 457 if (!Packed) { 458 if (const RecordType *RT = D->getType()->getAs<RecordType>()) { 459 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl()); 460 if (const MaxFieldAlignmentAttr *MFAA = 461 RD->getAttr<MaxFieldAlignmentAttr>()) { 462 if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment)) 463 return false; 464 } 465 } 466 } 467 468 // Round up the field offset to the alignment of the field type. 469 CharUnits alignedNextFieldOffsetInBytes = 470 NextFieldOffset.RoundUpToAlignment(typeAlignment); 471 472 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 473 // Try to resize the last base field. 474 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) { 475 alignedNextFieldOffsetInBytes = 476 NextFieldOffset.RoundUpToAlignment(typeAlignment); 477 } 478 } 479 480 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 481 assert(!Packed && "Could not place field even with packed struct!"); 482 return false; 483 } 484 485 AppendPadding(fieldOffsetInBytes, typeAlignment); 486 487 // Now append the field. 488 Fields[D] = FieldTypes.size(); 489 AppendField(fieldOffsetInBytes, Ty); 490 491 LastLaidOutBase.invalidate(); 492 return true; 493 } 494 495 llvm::Type * 496 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, 497 const ASTRecordLayout &Layout) { 498 if (Field->isBitField()) { 499 uint64_t FieldSize = Field->getBitWidthValue(Types.getContext()); 500 501 // Ignore zero sized bit fields. 502 if (FieldSize == 0) 503 return 0; 504 505 llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); 506 CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits( 507 llvm::RoundUpToAlignment(FieldSize, 508 Types.getContext().getTargetInfo().getCharAlign())); 509 510 if (NumBytesToAppend > CharUnits::One()) 511 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity()); 512 513 // Add the bit field info. 514 BitFields.insert(std::make_pair(Field, 515 CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize))); 516 return FieldTy; 517 } 518 519 // This is a regular union field. 520 Fields[Field] = 0; 521 return Types.ConvertTypeForMem(Field->getType()); 522 } 523 524 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { 525 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!"); 526 527 const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D); 528 529 llvm::Type *unionType = 0; 530 CharUnits unionSize = CharUnits::Zero(); 531 CharUnits unionAlign = CharUnits::Zero(); 532 533 bool hasOnlyZeroSizedBitFields = true; 534 535 unsigned fieldNo = 0; 536 for (RecordDecl::field_iterator field = D->field_begin(), 537 fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) { 538 assert(layout.getFieldOffset(fieldNo) == 0 && 539 "Union field offset did not start at the beginning of record!"); 540 llvm::Type *fieldType = LayoutUnionField(*field, layout); 541 542 if (!fieldType) 543 continue; 544 545 hasOnlyZeroSizedBitFields = false; 546 547 CharUnits fieldAlign = CharUnits::fromQuantity( 548 Types.getTargetData().getABITypeAlignment(fieldType)); 549 CharUnits fieldSize = CharUnits::fromQuantity( 550 Types.getTargetData().getTypeAllocSize(fieldType)); 551 552 if (fieldAlign < unionAlign) 553 continue; 554 555 if (fieldAlign > unionAlign || fieldSize > unionSize) { 556 unionType = fieldType; 557 unionAlign = fieldAlign; 558 unionSize = fieldSize; 559 } 560 } 561 562 // Now add our field. 563 if (unionType) { 564 AppendField(CharUnits::Zero(), unionType); 565 566 if (getTypeAlignment(unionType) > layout.getAlignment()) { 567 // We need a packed struct. 568 Packed = true; 569 unionAlign = CharUnits::One(); 570 } 571 } 572 if (unionAlign.isZero()) { 573 assert(hasOnlyZeroSizedBitFields && 574 "0-align record did not have all zero-sized bit-fields!"); 575 unionAlign = CharUnits::One(); 576 } 577 578 // Append tail padding. 579 CharUnits recordSize = layout.getSize(); 580 if (recordSize > unionSize) 581 AppendPadding(recordSize, unionAlign); 582 } 583 584 void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base, 585 const CGRecordLayout &baseLayout, 586 CharUnits baseOffset) { 587 ResizeLastBaseFieldIfNecessary(baseOffset); 588 589 AppendPadding(baseOffset, CharUnits::One()); 590 591 const ASTRecordLayout &baseASTLayout 592 = Types.getContext().getASTRecordLayout(base); 593 594 LastLaidOutBase.Offset = NextFieldOffset; 595 LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize(); 596 597 // Fields and bases can be laid out in the tail padding of previous 598 // bases. If this happens, we need to allocate the base as an i8 599 // array; otherwise, we can use the subobject type. However, 600 // actually doing that would require knowledge of what immediately 601 // follows this base in the layout, so instead we do a conservative 602 // approximation, which is to use the base subobject type if it 603 // has the same LLVM storage size as the nvsize. 604 605 llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType(); 606 AppendField(baseOffset, subobjectType); 607 } 608 609 void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base, 610 CharUnits baseOffset) { 611 // Ignore empty bases. 612 if (base->isEmpty()) return; 613 614 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 615 if (IsZeroInitializableAsBase) { 616 assert(IsZeroInitializable && 617 "class zero-initializable as base but not as complete object"); 618 619 IsZeroInitializable = IsZeroInitializableAsBase = 620 baseLayout.isZeroInitializableAsBase(); 621 } 622 623 LayoutBase(base, baseLayout, baseOffset); 624 NonVirtualBases[base] = (FieldTypes.size() - 1); 625 } 626 627 void 628 CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base, 629 CharUnits baseOffset) { 630 // Ignore empty bases. 631 if (base->isEmpty()) return; 632 633 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 634 if (IsZeroInitializable) 635 IsZeroInitializable = baseLayout.isZeroInitializableAsBase(); 636 637 LayoutBase(base, baseLayout, baseOffset); 638 VirtualBases[base] = (FieldTypes.size() - 1); 639 } 640 641 void 642 CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD, 643 const ASTRecordLayout &Layout) { 644 if (!RD->getNumVBases()) 645 return; 646 647 // The vbases list is uniqued and ordered by a depth-first 648 // traversal, which is what we need here. 649 for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), 650 E = RD->vbases_end(); I != E; ++I) { 651 652 const CXXRecordDecl *BaseDecl = 653 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 654 655 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 656 LayoutVirtualBase(BaseDecl, vbaseOffset); 657 } 658 } 659 660 /// LayoutVirtualBases - layout the non-virtual bases of a record decl. 661 void 662 CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD, 663 const ASTRecordLayout &Layout) { 664 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 665 E = RD->bases_end(); I != E; ++I) { 666 const CXXRecordDecl *BaseDecl = 667 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 668 669 // We only want to lay out virtual bases that aren't indirect primary bases 670 // of some other base. 671 if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) { 672 // Only lay out the base once. 673 if (!LaidOutVirtualBases.insert(BaseDecl)) 674 continue; 675 676 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 677 LayoutVirtualBase(BaseDecl, vbaseOffset); 678 } 679 680 if (!BaseDecl->getNumVBases()) { 681 // This base isn't interesting since it doesn't have any virtual bases. 682 continue; 683 } 684 685 LayoutVirtualBases(BaseDecl, Layout); 686 } 687 } 688 689 void 690 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD, 691 const ASTRecordLayout &Layout) { 692 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); 693 694 // If we have a primary base, lay it out first. 695 if (PrimaryBase) { 696 if (!Layout.isPrimaryBaseVirtual()) 697 LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()); 698 else 699 LayoutVirtualBase(PrimaryBase, CharUnits::Zero()); 700 701 // Otherwise, add a vtable / vf-table if the layout says to do so. 702 } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft 703 ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1) 704 : RD->isDynamicClass()) { 705 llvm::Type *FunctionType = 706 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()), 707 /*isVarArg=*/true); 708 llvm::Type *VTableTy = FunctionType->getPointerTo(); 709 710 assert(NextFieldOffset.isZero() && 711 "VTable pointer must come first!"); 712 AppendField(CharUnits::Zero(), VTableTy->getPointerTo()); 713 } 714 715 // Layout the non-virtual bases. 716 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 717 E = RD->bases_end(); I != E; ++I) { 718 if (I->isVirtual()) 719 continue; 720 721 const CXXRecordDecl *BaseDecl = 722 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 723 724 // We've already laid out the primary base. 725 if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual()) 726 continue; 727 728 LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)); 729 } 730 731 // Add a vb-table pointer if the layout insists. 732 if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) { 733 CharUnits VBPtrOffset = Layout.getVBPtrOffset(); 734 llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext()); 735 AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr)); 736 AppendField(VBPtrOffset, Vbptr); 737 } 738 } 739 740 bool 741 CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) { 742 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD); 743 744 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 745 CharUnits NonVirtualAlign = Layout.getNonVirtualAlign(); 746 CharUnits AlignedNonVirtualTypeSize = 747 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 748 749 // First check if we can use the same fields as for the complete class. 750 CharUnits RecordSize = Layout.getSize(); 751 if (AlignedNonVirtualTypeSize == RecordSize) 752 return true; 753 754 // Check if we need padding. 755 CharUnits AlignedNextFieldOffset = 756 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 757 758 if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) { 759 assert(!Packed && "cannot layout even as packed struct"); 760 return false; // Needs packing. 761 } 762 763 bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset); 764 if (needsPadding) { 765 CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset; 766 FieldTypes.push_back(getByteArrayType(NumBytes)); 767 } 768 769 BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(), 770 FieldTypes, "", Packed); 771 Types.addRecordTypeName(RD, BaseSubobjectType, ".base"); 772 773 // Pull the padding back off. 774 if (needsPadding) 775 FieldTypes.pop_back(); 776 777 return true; 778 } 779 780 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) { 781 assert(!D->isUnion() && "Can't call LayoutFields on a union!"); 782 assert(!Alignment.isZero() && "Did not set alignment!"); 783 784 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 785 786 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D); 787 if (RD) 788 LayoutNonVirtualBases(RD, Layout); 789 790 unsigned FieldNo = 0; 791 const FieldDecl *LastFD = 0; 792 793 for (RecordDecl::field_iterator Field = D->field_begin(), 794 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) { 795 if (IsMsStruct) { 796 // Zero-length bitfields following non-bitfield members are 797 // ignored: 798 const FieldDecl *FD = (*Field); 799 if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) { 800 --FieldNo; 801 continue; 802 } 803 LastFD = FD; 804 } 805 806 if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) { 807 assert(!Packed && 808 "Could not layout fields even with a packed LLVM struct!"); 809 return false; 810 } 811 } 812 813 if (RD) { 814 // We've laid out the non-virtual bases and the fields, now compute the 815 // non-virtual base field types. 816 if (!ComputeNonVirtualBaseType(RD)) { 817 assert(!Packed && "Could not layout even with a packed LLVM struct!"); 818 return false; 819 } 820 821 // Lay out the virtual bases. The MS ABI uses a different 822 // algorithm here due to the lack of primary virtual bases. 823 if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) { 824 RD->getIndirectPrimaryBases(IndirectPrimaryBases); 825 if (Layout.isPrimaryBaseVirtual()) 826 IndirectPrimaryBases.insert(Layout.getPrimaryBase()); 827 828 LayoutVirtualBases(RD, Layout); 829 } else { 830 MSLayoutVirtualBases(RD, Layout); 831 } 832 } 833 834 // Append tail padding if necessary. 835 AppendTailPadding(Layout.getSize()); 836 837 return true; 838 } 839 840 void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) { 841 ResizeLastBaseFieldIfNecessary(RecordSize); 842 843 assert(NextFieldOffset <= RecordSize && "Size mismatch!"); 844 845 CharUnits AlignedNextFieldOffset = 846 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 847 848 if (AlignedNextFieldOffset == RecordSize) { 849 // We don't need any padding. 850 return; 851 } 852 853 CharUnits NumPadBytes = RecordSize - NextFieldOffset; 854 AppendBytes(NumPadBytes); 855 } 856 857 void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset, 858 llvm::Type *fieldType) { 859 CharUnits fieldSize = 860 CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType)); 861 862 FieldTypes.push_back(fieldType); 863 864 NextFieldOffset = fieldOffset + fieldSize; 865 BitsAvailableInLastField = 0; 866 } 867 868 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset, 869 CharUnits fieldAlignment) { 870 assert(NextFieldOffset <= fieldOffset && 871 "Incorrect field layout!"); 872 873 // Do nothing if we're already at the right offset. 874 if (fieldOffset == NextFieldOffset) return; 875 876 // If we're not emitting a packed LLVM type, try to avoid adding 877 // unnecessary padding fields. 878 if (!Packed) { 879 // Round up the field offset to the alignment of the field type. 880 CharUnits alignedNextFieldOffset = 881 NextFieldOffset.RoundUpToAlignment(fieldAlignment); 882 assert(alignedNextFieldOffset <= fieldOffset); 883 884 // If that's the right offset, we're done. 885 if (alignedNextFieldOffset == fieldOffset) return; 886 } 887 888 // Otherwise we need explicit padding. 889 CharUnits padding = fieldOffset - NextFieldOffset; 890 AppendBytes(padding); 891 } 892 893 bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) { 894 // Check if we have a base to resize. 895 if (!LastLaidOutBase.isValid()) 896 return false; 897 898 // This offset does not overlap with the tail padding. 899 if (offset >= NextFieldOffset) 900 return false; 901 902 // Restore the field offset and append an i8 array instead. 903 FieldTypes.pop_back(); 904 NextFieldOffset = LastLaidOutBase.Offset; 905 AppendBytes(LastLaidOutBase.NonVirtualSize); 906 LastLaidOutBase.invalidate(); 907 908 return true; 909 } 910 911 llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) { 912 assert(!numBytes.isZero() && "Empty byte arrays aren't allowed."); 913 914 llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); 915 if (numBytes > CharUnits::One()) 916 Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity()); 917 918 return Ty; 919 } 920 921 void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) { 922 if (numBytes.isZero()) 923 return; 924 925 // Append the padding field 926 AppendField(NextFieldOffset, getByteArrayType(numBytes)); 927 } 928 929 CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const { 930 if (Packed) 931 return CharUnits::One(); 932 933 return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty)); 934 } 935 936 CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const { 937 if (Packed) 938 return CharUnits::One(); 939 940 CharUnits maxAlignment = CharUnits::One(); 941 for (size_t i = 0; i != FieldTypes.size(); ++i) 942 maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i])); 943 944 return maxAlignment; 945 } 946 947 /// Merge in whether a field of the given type is zero-initializable. 948 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) { 949 // This record already contains a member pointer. 950 if (!IsZeroInitializableAsBase) 951 return; 952 953 // Can only have member pointers if we're compiling C++. 954 if (!Types.getContext().getLangOptions().CPlusPlus) 955 return; 956 957 const Type *elementType = T->getBaseElementTypeUnsafe(); 958 959 if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) { 960 if (!Types.getCXXABI().isZeroInitializable(MPT)) 961 IsZeroInitializable = IsZeroInitializableAsBase = false; 962 } else if (const RecordType *RT = elementType->getAs<RecordType>()) { 963 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 964 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD); 965 if (!Layout.isZeroInitializable()) 966 IsZeroInitializable = IsZeroInitializableAsBase = false; 967 } 968 } 969 970 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, 971 llvm::StructType *Ty) { 972 CGRecordLayoutBuilder Builder(*this); 973 974 Builder.Layout(D); 975 976 Ty->setBody(Builder.FieldTypes, Builder.Packed); 977 978 // If we're in C++, compute the base subobject type. 979 llvm::StructType *BaseTy = 0; 980 if (isa<CXXRecordDecl>(D)) { 981 BaseTy = Builder.BaseSubobjectType; 982 if (!BaseTy) BaseTy = Ty; 983 } 984 985 CGRecordLayout *RL = 986 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable, 987 Builder.IsZeroInitializableAsBase); 988 989 RL->NonVirtualBases.swap(Builder.NonVirtualBases); 990 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases); 991 992 // Add all the field numbers. 993 RL->FieldInfo.swap(Builder.Fields); 994 995 // Add bitfield info. 996 RL->BitFields.swap(Builder.BitFields); 997 998 // Dump the layout, if requested. 999 if (getContext().getLangOptions().DumpRecordLayouts) { 1000 llvm::errs() << "\n*** Dumping IRgen Record Layout\n"; 1001 llvm::errs() << "Record: "; 1002 D->dump(); 1003 llvm::errs() << "\nLayout: "; 1004 RL->dump(); 1005 } 1006 1007 #ifndef NDEBUG 1008 // Verify that the computed LLVM struct size matches the AST layout size. 1009 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D); 1010 1011 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize()); 1012 assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) && 1013 "Type size mismatch!"); 1014 1015 if (BaseTy) { 1016 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 1017 CharUnits NonVirtualAlign = Layout.getNonVirtualAlign(); 1018 CharUnits AlignedNonVirtualTypeSize = 1019 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 1020 1021 uint64_t AlignedNonVirtualTypeSizeInBits = 1022 getContext().toBits(AlignedNonVirtualTypeSize); 1023 1024 assert(AlignedNonVirtualTypeSizeInBits == 1025 getTargetData().getTypeAllocSizeInBits(BaseTy) && 1026 "Type size mismatch!"); 1027 } 1028 1029 // Verify that the LLVM and AST field offsets agree. 1030 llvm::StructType *ST = 1031 dyn_cast<llvm::StructType>(RL->getLLVMType()); 1032 const llvm::StructLayout *SL = getTargetData().getStructLayout(ST); 1033 1034 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D); 1035 RecordDecl::field_iterator it = D->field_begin(); 1036 const FieldDecl *LastFD = 0; 1037 bool IsMsStruct = D->hasAttr<MsStructAttr>(); 1038 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) { 1039 const FieldDecl *FD = *it; 1040 1041 // For non-bit-fields, just check that the LLVM struct offset matches the 1042 // AST offset. 1043 if (!FD->isBitField()) { 1044 unsigned FieldNo = RL->getLLVMFieldNo(FD); 1045 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) && 1046 "Invalid field offset!"); 1047 LastFD = FD; 1048 continue; 1049 } 1050 1051 if (IsMsStruct) { 1052 // Zero-length bitfields following non-bitfield members are 1053 // ignored: 1054 if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) { 1055 --i; 1056 continue; 1057 } 1058 LastFD = FD; 1059 } 1060 1061 // Ignore unnamed bit-fields. 1062 if (!FD->getDeclName()) { 1063 LastFD = FD; 1064 continue; 1065 } 1066 1067 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); 1068 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1069 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1070 1071 // Verify that every component access is within the structure. 1072 uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex); 1073 uint64_t AccessBitOffset = FieldOffset + 1074 getContext().toBits(AI.FieldByteOffset); 1075 assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits && 1076 "Invalid bit-field access (out of range)!"); 1077 } 1078 } 1079 #endif 1080 1081 return RL; 1082 } 1083 1084 void CGRecordLayout::print(raw_ostream &OS) const { 1085 OS << "<CGRecordLayout\n"; 1086 OS << " LLVMType:" << *CompleteObjectType << "\n"; 1087 if (BaseSubobjectType) 1088 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n"; 1089 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n"; 1090 OS << " BitFields:[\n"; 1091 1092 // Print bit-field infos in declaration order. 1093 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs; 1094 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator 1095 it = BitFields.begin(), ie = BitFields.end(); 1096 it != ie; ++it) { 1097 const RecordDecl *RD = it->first->getParent(); 1098 unsigned Index = 0; 1099 for (RecordDecl::field_iterator 1100 it2 = RD->field_begin(); *it2 != it->first; ++it2) 1101 ++Index; 1102 BFIs.push_back(std::make_pair(Index, &it->second)); 1103 } 1104 llvm::array_pod_sort(BFIs.begin(), BFIs.end()); 1105 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) { 1106 OS.indent(4); 1107 BFIs[i].second->print(OS); 1108 OS << "\n"; 1109 } 1110 1111 OS << "]>\n"; 1112 } 1113 1114 void CGRecordLayout::dump() const { 1115 print(llvm::errs()); 1116 } 1117 1118 void CGBitFieldInfo::print(raw_ostream &OS) const { 1119 OS << "<CGBitFieldInfo"; 1120 OS << " Size:" << Size; 1121 OS << " IsSigned:" << IsSigned << "\n"; 1122 1123 OS.indent(4 + strlen("<CGBitFieldInfo")); 1124 OS << " NumComponents:" << getNumComponents(); 1125 OS << " Components: ["; 1126 if (getNumComponents()) { 1127 OS << "\n"; 1128 for (unsigned i = 0, e = getNumComponents(); i != e; ++i) { 1129 const AccessInfo &AI = getComponent(i); 1130 OS.indent(8); 1131 OS << "<AccessInfo" 1132 << " FieldIndex:" << AI.FieldIndex 1133 << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity() 1134 << " FieldBitStart:" << AI.FieldBitStart 1135 << " AccessWidth:" << AI.AccessWidth << "\n"; 1136 OS.indent(8 + strlen("<AccessInfo")); 1137 OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity() 1138 << " TargetBitOffset:" << AI.TargetBitOffset 1139 << " TargetBitWidth:" << AI.TargetBitWidth 1140 << ">\n"; 1141 } 1142 OS.indent(4); 1143 } 1144 OS << "]>"; 1145 } 1146 1147 void CGBitFieldInfo::dump() const { 1148 print(llvm::errs()); 1149 } 1150