1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Builder implementation for CGRecordLayout objects. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGRecordLayout.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/CXXInheritance.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/Expr.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/Frontend/CodeGenOptions.h" 22 #include "CodeGenTypes.h" 23 #include "CGCXXABI.h" 24 #include "llvm/DerivedTypes.h" 25 #include "llvm/Type.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/raw_ostream.h" 28 #include "llvm/Target/TargetData.h" 29 using namespace clang; 30 using namespace CodeGen; 31 32 namespace { 33 34 class CGRecordLayoutBuilder { 35 public: 36 /// FieldTypes - Holds the LLVM types that the struct is created from. 37 /// 38 SmallVector<llvm::Type *, 16> FieldTypes; 39 40 /// BaseSubobjectType - Holds the LLVM type for the non-virtual part 41 /// of the struct. For example, consider: 42 /// 43 /// struct A { int i; }; 44 /// struct B { void *v; }; 45 /// struct C : virtual A, B { }; 46 /// 47 /// The LLVM type of C will be 48 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B } 49 /// 50 /// And the LLVM type of the non-virtual base struct will be 51 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 } 52 /// 53 /// This only gets initialized if the base subobject type is 54 /// different from the complete-object type. 55 llvm::StructType *BaseSubobjectType; 56 57 /// FieldInfo - Holds a field and its corresponding LLVM field number. 58 llvm::DenseMap<const FieldDecl *, unsigned> Fields; 59 60 /// BitFieldInfo - Holds location and size information about a bit field. 61 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields; 62 63 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases; 64 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases; 65 66 /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are 67 /// primary base classes for some other direct or indirect base class. 68 CXXIndirectPrimaryBaseSet IndirectPrimaryBases; 69 70 /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid 71 /// avoid laying out virtual bases more than once. 72 llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases; 73 74 /// IsZeroInitializable - Whether this struct can be C++ 75 /// zero-initialized with an LLVM zeroinitializer. 76 bool IsZeroInitializable; 77 bool IsZeroInitializableAsBase; 78 79 /// Packed - Whether the resulting LLVM struct will be packed or not. 80 bool Packed; 81 82 /// IsMsStruct - Whether ms_struct is in effect or not 83 bool IsMsStruct; 84 85 private: 86 CodeGenTypes &Types; 87 88 /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the 89 /// last base laid out. Used so that we can replace the last laid out base 90 /// type with an i8 array if needed. 91 struct LastLaidOutBaseInfo { 92 CharUnits Offset; 93 CharUnits NonVirtualSize; 94 95 bool isValid() const { return !NonVirtualSize.isZero(); } 96 void invalidate() { NonVirtualSize = CharUnits::Zero(); } 97 98 } LastLaidOutBase; 99 100 /// Alignment - Contains the alignment of the RecordDecl. 101 CharUnits Alignment; 102 103 /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field, 104 /// this will have the number of bits still available in the field. 105 char BitsAvailableInLastField; 106 107 /// NextFieldOffset - Holds the next field offset. 108 CharUnits NextFieldOffset; 109 110 /// LayoutUnionField - Will layout a field in an union and return the type 111 /// that the field will have. 112 llvm::Type *LayoutUnionField(const FieldDecl *Field, 113 const ASTRecordLayout &Layout); 114 115 /// LayoutUnion - Will layout a union RecordDecl. 116 void LayoutUnion(const RecordDecl *D); 117 118 /// LayoutField - try to layout all fields in the record decl. 119 /// Returns false if the operation failed because the struct is not packed. 120 bool LayoutFields(const RecordDecl *D); 121 122 /// Layout a single base, virtual or non-virtual 123 bool LayoutBase(const CXXRecordDecl *base, 124 const CGRecordLayout &baseLayout, 125 CharUnits baseOffset); 126 127 /// LayoutVirtualBase - layout a single virtual base. 128 bool LayoutVirtualBase(const CXXRecordDecl *base, 129 CharUnits baseOffset); 130 131 /// LayoutVirtualBases - layout the virtual bases of a record decl. 132 bool LayoutVirtualBases(const CXXRecordDecl *RD, 133 const ASTRecordLayout &Layout); 134 135 /// MSLayoutVirtualBases - layout the virtual bases of a record decl, 136 /// like MSVC. 137 bool MSLayoutVirtualBases(const CXXRecordDecl *RD, 138 const ASTRecordLayout &Layout); 139 140 /// LayoutNonVirtualBase - layout a single non-virtual base. 141 bool LayoutNonVirtualBase(const CXXRecordDecl *base, 142 CharUnits baseOffset); 143 144 /// LayoutNonVirtualBases - layout the virtual bases of a record decl. 145 bool LayoutNonVirtualBases(const CXXRecordDecl *RD, 146 const ASTRecordLayout &Layout); 147 148 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types. 149 bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD); 150 151 /// LayoutField - layout a single field. Returns false if the operation failed 152 /// because the current struct is not packed. 153 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset); 154 155 /// LayoutBitField - layout a single bit field. 156 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); 157 158 /// AppendField - Appends a field with the given offset and type. 159 void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy); 160 161 /// AppendPadding - Appends enough padding bytes so that the total 162 /// struct size is a multiple of the field alignment. 163 void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment); 164 165 /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the 166 /// tail padding of a previous base. If this happens, the type of the previous 167 /// base needs to be changed to an array of i8. Returns true if the last 168 /// laid out base was resized. 169 bool ResizeLastBaseFieldIfNecessary(CharUnits offset); 170 171 /// getByteArrayType - Returns a byte array type with the given number of 172 /// elements. 173 llvm::Type *getByteArrayType(CharUnits NumBytes); 174 175 /// AppendBytes - Append a given number of bytes to the record. 176 void AppendBytes(CharUnits numBytes); 177 178 /// AppendTailPadding - Append enough tail padding so that the type will have 179 /// the passed size. 180 void AppendTailPadding(CharUnits RecordSize); 181 182 CharUnits getTypeAlignment(llvm::Type *Ty) const; 183 184 /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the 185 /// LLVM element types. 186 CharUnits getAlignmentAsLLVMStruct() const; 187 188 /// CheckZeroInitializable - Check if the given type contains a pointer 189 /// to data member. 190 void CheckZeroInitializable(QualType T); 191 192 public: 193 CGRecordLayoutBuilder(CodeGenTypes &Types) 194 : BaseSubobjectType(0), 195 IsZeroInitializable(true), IsZeroInitializableAsBase(true), 196 Packed(false), IsMsStruct(false), 197 Types(Types), BitsAvailableInLastField(0) { } 198 199 /// Layout - Will layout a RecordDecl. 200 void Layout(const RecordDecl *D); 201 }; 202 203 } 204 205 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) { 206 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment(); 207 Packed = D->hasAttr<PackedAttr>(); 208 209 IsMsStruct = D->hasAttr<MsStructAttr>(); 210 211 if (D->isUnion()) { 212 LayoutUnion(D); 213 return; 214 } 215 216 if (LayoutFields(D)) 217 return; 218 219 // We weren't able to layout the struct. Try again with a packed struct 220 Packed = true; 221 LastLaidOutBase.invalidate(); 222 NextFieldOffset = CharUnits::Zero(); 223 FieldTypes.clear(); 224 Fields.clear(); 225 BitFields.clear(); 226 NonVirtualBases.clear(); 227 VirtualBases.clear(); 228 229 LayoutFields(D); 230 } 231 232 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 233 const FieldDecl *FD, 234 uint64_t FieldOffset, 235 uint64_t FieldSize, 236 uint64_t ContainingTypeSizeInBits, 237 unsigned ContainingTypeAlign) { 238 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType()); 239 CharUnits TypeSizeInBytes = 240 CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty)); 241 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes); 242 243 bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); 244 245 if (FieldSize > TypeSizeInBits) { 246 // We have a wide bit-field. The extra bits are only used for padding, so 247 // if we have a bitfield of type T, with size N: 248 // 249 // T t : N; 250 // 251 // We can just assume that it's: 252 // 253 // T t : sizeof(T); 254 // 255 FieldSize = TypeSizeInBits; 256 } 257 258 // in big-endian machines the first fields are in higher bit positions, 259 // so revert the offset. The byte offsets are reversed(back) later. 260 if (Types.getTargetData().isBigEndian()) { 261 FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize); 262 } 263 264 // Compute the access components. The policy we use is to start by attempting 265 // to access using the width of the bit-field type itself and to always access 266 // at aligned indices of that type. If such an access would fail because it 267 // extends past the bound of the type, then we reduce size to the next smaller 268 // power of two and retry. The current algorithm assumes pow2 sized types, 269 // although this is easy to fix. 270 // 271 assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!"); 272 CGBitFieldInfo::AccessInfo Components[3]; 273 unsigned NumComponents = 0; 274 unsigned AccessedTargetBits = 0; // The number of target bits accessed. 275 unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt. 276 277 // If requested, widen the initial bit-field access to be register sized. The 278 // theory is that this is most likely to allow multiple accesses into the same 279 // structure to be coalesced, and that the backend should be smart enough to 280 // narrow the store if no coalescing is ever done. 281 // 282 // The subsequent code will handle align these access to common boundaries and 283 // guaranteeing that we do not access past the end of the structure. 284 if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) { 285 if (AccessWidth < Types.getTarget().getRegisterWidth()) 286 AccessWidth = Types.getTarget().getRegisterWidth(); 287 } 288 289 // Round down from the field offset to find the first access position that is 290 // at an aligned offset of the initial access type. 291 uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth); 292 293 // Adjust initial access size to fit within record. 294 while (AccessWidth > Types.getTarget().getCharWidth() && 295 AccessStart + AccessWidth > ContainingTypeSizeInBits) { 296 AccessWidth >>= 1; 297 AccessStart = FieldOffset - (FieldOffset % AccessWidth); 298 } 299 300 while (AccessedTargetBits < FieldSize) { 301 // Check that we can access using a type of this size, without reading off 302 // the end of the structure. This can occur with packed structures and 303 // -fno-bitfield-type-align, for example. 304 if (AccessStart + AccessWidth > ContainingTypeSizeInBits) { 305 // If so, reduce access size to the next smaller power-of-two and retry. 306 AccessWidth >>= 1; 307 assert(AccessWidth >= Types.getTarget().getCharWidth() 308 && "Cannot access under byte size!"); 309 continue; 310 } 311 312 // Otherwise, add an access component. 313 314 // First, compute the bits inside this access which are part of the 315 // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the 316 // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits 317 // in the target that we are reading. 318 assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!"); 319 assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!"); 320 uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset); 321 uint64_t AccessBitsInFieldSize = 322 std::min(AccessWidth + AccessStart, 323 FieldOffset + FieldSize) - AccessBitsInFieldStart; 324 325 assert(NumComponents < 3 && "Unexpected number of components!"); 326 CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++]; 327 AI.FieldIndex = 0; 328 // FIXME: We still follow the old access pattern of only using the field 329 // byte offset. We should switch this once we fix the struct layout to be 330 // pretty. 331 332 // on big-endian machines we reverted the bit offset because first fields are 333 // in higher bits. But this also reverts the bytes, so fix this here by reverting 334 // the byte offset on big-endian machines. 335 if (Types.getTargetData().isBigEndian()) { 336 AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits( 337 ContainingTypeSizeInBits - AccessStart - AccessWidth); 338 } else { 339 AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart); 340 } 341 AI.FieldBitStart = AccessBitsInFieldStart - AccessStart; 342 AI.AccessWidth = AccessWidth; 343 AI.AccessAlignment = Types.getContext().toCharUnitsFromBits( 344 llvm::MinAlign(ContainingTypeAlign, AccessStart)); 345 AI.TargetBitOffset = AccessedTargetBits; 346 AI.TargetBitWidth = AccessBitsInFieldSize; 347 348 AccessStart += AccessWidth; 349 AccessedTargetBits += AI.TargetBitWidth; 350 } 351 352 assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!"); 353 return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned); 354 } 355 356 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 357 const FieldDecl *FD, 358 uint64_t FieldOffset, 359 uint64_t FieldSize) { 360 const RecordDecl *RD = FD->getParent(); 361 const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD); 362 uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize()); 363 unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment()); 364 365 return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits, 366 ContainingTypeAlign); 367 } 368 369 void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D, 370 uint64_t fieldOffset) { 371 uint64_t fieldSize = D->getBitWidthValue(Types.getContext()); 372 373 if (fieldSize == 0) 374 return; 375 376 uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 377 CharUnits numBytesToAppend; 378 unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign(); 379 380 if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) { 381 assert(fieldOffset % charAlign == 0 && 382 "Field offset not aligned correctly"); 383 384 CharUnits fieldOffsetInCharUnits = 385 Types.getContext().toCharUnitsFromBits(fieldOffset); 386 387 // Try to resize the last base field. 388 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits)) 389 nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 390 } 391 392 if (fieldOffset < nextFieldOffsetInBits) { 393 assert(BitsAvailableInLastField && "Bitfield size mismatch!"); 394 assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte"); 395 396 // The bitfield begins in the previous bit-field. 397 numBytesToAppend = Types.getContext().toCharUnitsFromBits( 398 llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField, 399 charAlign)); 400 } else { 401 assert(fieldOffset % charAlign == 0 && 402 "Field offset not aligned correctly"); 403 404 // Append padding if necessary. 405 AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset), 406 CharUnits::One()); 407 408 numBytesToAppend = Types.getContext().toCharUnitsFromBits( 409 llvm::RoundUpToAlignment(fieldSize, charAlign)); 410 411 assert(!numBytesToAppend.isZero() && "No bytes to append!"); 412 } 413 414 // Add the bit field info. 415 BitFields.insert(std::make_pair(D, 416 CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize))); 417 418 AppendBytes(numBytesToAppend); 419 420 BitsAvailableInLastField = 421 Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize); 422 } 423 424 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, 425 uint64_t fieldOffset) { 426 // If the field is packed, then we need a packed struct. 427 if (!Packed && D->hasAttr<PackedAttr>()) 428 return false; 429 430 if (D->isBitField()) { 431 // We must use packed structs for unnamed bit fields since they 432 // don't affect the struct alignment. 433 if (!Packed && !D->getDeclName()) 434 return false; 435 436 LayoutBitField(D, fieldOffset); 437 return true; 438 } 439 440 CheckZeroInitializable(D->getType()); 441 442 assert(fieldOffset % Types.getTarget().getCharWidth() == 0 443 && "field offset is not on a byte boundary!"); 444 CharUnits fieldOffsetInBytes 445 = Types.getContext().toCharUnitsFromBits(fieldOffset); 446 447 llvm::Type *Ty = Types.ConvertTypeForMem(D->getType()); 448 CharUnits typeAlignment = getTypeAlignment(Ty); 449 450 // If the type alignment is larger then the struct alignment, we must use 451 // a packed struct. 452 if (typeAlignment > Alignment) { 453 assert(!Packed && "Alignment is wrong even with packed struct!"); 454 return false; 455 } 456 457 if (!Packed) { 458 if (const RecordType *RT = D->getType()->getAs<RecordType>()) { 459 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl()); 460 if (const MaxFieldAlignmentAttr *MFAA = 461 RD->getAttr<MaxFieldAlignmentAttr>()) { 462 if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment)) 463 return false; 464 } 465 } 466 } 467 468 // Round up the field offset to the alignment of the field type. 469 CharUnits alignedNextFieldOffsetInBytes = 470 NextFieldOffset.RoundUpToAlignment(typeAlignment); 471 472 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 473 // Try to resize the last base field. 474 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) { 475 alignedNextFieldOffsetInBytes = 476 NextFieldOffset.RoundUpToAlignment(typeAlignment); 477 } 478 } 479 480 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 481 assert(!Packed && "Could not place field even with packed struct!"); 482 return false; 483 } 484 485 AppendPadding(fieldOffsetInBytes, typeAlignment); 486 487 // Now append the field. 488 Fields[D] = FieldTypes.size(); 489 AppendField(fieldOffsetInBytes, Ty); 490 491 LastLaidOutBase.invalidate(); 492 return true; 493 } 494 495 llvm::Type * 496 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, 497 const ASTRecordLayout &Layout) { 498 if (Field->isBitField()) { 499 uint64_t FieldSize = Field->getBitWidthValue(Types.getContext()); 500 501 // Ignore zero sized bit fields. 502 if (FieldSize == 0) 503 return 0; 504 505 llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); 506 CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits( 507 llvm::RoundUpToAlignment(FieldSize, 508 Types.getContext().getTargetInfo().getCharAlign())); 509 510 if (NumBytesToAppend > CharUnits::One()) 511 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity()); 512 513 // Add the bit field info. 514 BitFields.insert(std::make_pair(Field, 515 CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize))); 516 return FieldTy; 517 } 518 519 // This is a regular union field. 520 Fields[Field] = 0; 521 return Types.ConvertTypeForMem(Field->getType()); 522 } 523 524 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { 525 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!"); 526 527 const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D); 528 529 llvm::Type *unionType = 0; 530 CharUnits unionSize = CharUnits::Zero(); 531 CharUnits unionAlign = CharUnits::Zero(); 532 533 bool hasOnlyZeroSizedBitFields = true; 534 bool checkedFirstFieldZeroInit = false; 535 536 unsigned fieldNo = 0; 537 for (RecordDecl::field_iterator field = D->field_begin(), 538 fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) { 539 assert(layout.getFieldOffset(fieldNo) == 0 && 540 "Union field offset did not start at the beginning of record!"); 541 llvm::Type *fieldType = LayoutUnionField(*field, layout); 542 543 if (!fieldType) 544 continue; 545 546 if (field->getDeclName() && !checkedFirstFieldZeroInit) { 547 CheckZeroInitializable(field->getType()); 548 checkedFirstFieldZeroInit = true; 549 } 550 551 hasOnlyZeroSizedBitFields = false; 552 553 CharUnits fieldAlign = CharUnits::fromQuantity( 554 Types.getTargetData().getABITypeAlignment(fieldType)); 555 CharUnits fieldSize = CharUnits::fromQuantity( 556 Types.getTargetData().getTypeAllocSize(fieldType)); 557 558 if (fieldAlign < unionAlign) 559 continue; 560 561 if (fieldAlign > unionAlign || fieldSize > unionSize) { 562 unionType = fieldType; 563 unionAlign = fieldAlign; 564 unionSize = fieldSize; 565 } 566 } 567 568 // Now add our field. 569 if (unionType) { 570 AppendField(CharUnits::Zero(), unionType); 571 572 if (getTypeAlignment(unionType) > layout.getAlignment()) { 573 // We need a packed struct. 574 Packed = true; 575 unionAlign = CharUnits::One(); 576 } 577 } 578 if (unionAlign.isZero()) { 579 assert(hasOnlyZeroSizedBitFields && 580 "0-align record did not have all zero-sized bit-fields!"); 581 unionAlign = CharUnits::One(); 582 } 583 584 // Append tail padding. 585 CharUnits recordSize = layout.getSize(); 586 if (recordSize > unionSize) 587 AppendPadding(recordSize, unionAlign); 588 } 589 590 bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base, 591 const CGRecordLayout &baseLayout, 592 CharUnits baseOffset) { 593 ResizeLastBaseFieldIfNecessary(baseOffset); 594 595 AppendPadding(baseOffset, CharUnits::One()); 596 597 const ASTRecordLayout &baseASTLayout 598 = Types.getContext().getASTRecordLayout(base); 599 600 LastLaidOutBase.Offset = NextFieldOffset; 601 LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize(); 602 603 llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType(); 604 if (getTypeAlignment(subobjectType) > Alignment) 605 return false; 606 607 AppendField(baseOffset, subobjectType); 608 return true; 609 } 610 611 bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base, 612 CharUnits baseOffset) { 613 // Ignore empty bases. 614 if (base->isEmpty()) return true; 615 616 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 617 if (IsZeroInitializableAsBase) { 618 assert(IsZeroInitializable && 619 "class zero-initializable as base but not as complete object"); 620 621 IsZeroInitializable = IsZeroInitializableAsBase = 622 baseLayout.isZeroInitializableAsBase(); 623 } 624 625 if (!LayoutBase(base, baseLayout, baseOffset)) 626 return false; 627 NonVirtualBases[base] = (FieldTypes.size() - 1); 628 return true; 629 } 630 631 bool 632 CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base, 633 CharUnits baseOffset) { 634 // Ignore empty bases. 635 if (base->isEmpty()) return true; 636 637 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 638 if (IsZeroInitializable) 639 IsZeroInitializable = baseLayout.isZeroInitializableAsBase(); 640 641 if (!LayoutBase(base, baseLayout, baseOffset)) 642 return false; 643 VirtualBases[base] = (FieldTypes.size() - 1); 644 return true; 645 } 646 647 bool 648 CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD, 649 const ASTRecordLayout &Layout) { 650 if (!RD->getNumVBases()) 651 return true; 652 653 // The vbases list is uniqued and ordered by a depth-first 654 // traversal, which is what we need here. 655 for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), 656 E = RD->vbases_end(); I != E; ++I) { 657 658 const CXXRecordDecl *BaseDecl = 659 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 660 661 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 662 if (!LayoutVirtualBase(BaseDecl, vbaseOffset)) 663 return false; 664 } 665 return true; 666 } 667 668 /// LayoutVirtualBases - layout the non-virtual bases of a record decl. 669 bool 670 CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD, 671 const ASTRecordLayout &Layout) { 672 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 673 E = RD->bases_end(); I != E; ++I) { 674 const CXXRecordDecl *BaseDecl = 675 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 676 677 // We only want to lay out virtual bases that aren't indirect primary bases 678 // of some other base. 679 if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) { 680 // Only lay out the base once. 681 if (!LaidOutVirtualBases.insert(BaseDecl)) 682 continue; 683 684 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 685 if (!LayoutVirtualBase(BaseDecl, vbaseOffset)) 686 return false; 687 } 688 689 if (!BaseDecl->getNumVBases()) { 690 // This base isn't interesting since it doesn't have any virtual bases. 691 continue; 692 } 693 694 if (!LayoutVirtualBases(BaseDecl, Layout)) 695 return false; 696 } 697 return true; 698 } 699 700 bool 701 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD, 702 const ASTRecordLayout &Layout) { 703 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); 704 705 // If we have a primary base, lay it out first. 706 if (PrimaryBase) { 707 if (!Layout.isPrimaryBaseVirtual()) { 708 if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero())) 709 return false; 710 } else { 711 if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero())) 712 return false; 713 } 714 715 // Otherwise, add a vtable / vf-table if the layout says to do so. 716 } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft 717 ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1) 718 : RD->isDynamicClass()) { 719 llvm::Type *FunctionType = 720 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()), 721 /*isVarArg=*/true); 722 llvm::Type *VTableTy = FunctionType->getPointerTo(); 723 724 assert(NextFieldOffset.isZero() && 725 "VTable pointer must come first!"); 726 AppendField(CharUnits::Zero(), VTableTy->getPointerTo()); 727 } 728 729 // Layout the non-virtual bases. 730 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 731 E = RD->bases_end(); I != E; ++I) { 732 if (I->isVirtual()) 733 continue; 734 735 const CXXRecordDecl *BaseDecl = 736 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 737 738 // We've already laid out the primary base. 739 if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual()) 740 continue; 741 742 if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl))) 743 return false; 744 } 745 746 // Add a vb-table pointer if the layout insists. 747 if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) { 748 CharUnits VBPtrOffset = Layout.getVBPtrOffset(); 749 llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext()); 750 AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr)); 751 AppendField(VBPtrOffset, Vbptr); 752 } 753 754 return true; 755 } 756 757 bool 758 CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) { 759 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD); 760 761 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 762 CharUnits NonVirtualAlign = Layout.getNonVirtualAlign(); 763 CharUnits AlignedNonVirtualTypeSize = 764 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 765 766 // First check if we can use the same fields as for the complete class. 767 CharUnits RecordSize = Layout.getSize(); 768 if (AlignedNonVirtualTypeSize == RecordSize) 769 return true; 770 771 // Check if we need padding. 772 CharUnits AlignedNextFieldOffset = 773 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 774 775 if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) { 776 assert(!Packed && "cannot layout even as packed struct"); 777 return false; // Needs packing. 778 } 779 780 bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset); 781 if (needsPadding) { 782 CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset; 783 FieldTypes.push_back(getByteArrayType(NumBytes)); 784 } 785 786 BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(), 787 FieldTypes, "", Packed); 788 Types.addRecordTypeName(RD, BaseSubobjectType, ".base"); 789 790 // Pull the padding back off. 791 if (needsPadding) 792 FieldTypes.pop_back(); 793 794 return true; 795 } 796 797 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) { 798 assert(!D->isUnion() && "Can't call LayoutFields on a union!"); 799 assert(!Alignment.isZero() && "Did not set alignment!"); 800 801 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 802 803 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D); 804 if (RD) 805 if (!LayoutNonVirtualBases(RD, Layout)) 806 return false; 807 808 unsigned FieldNo = 0; 809 const FieldDecl *LastFD = 0; 810 811 for (RecordDecl::field_iterator Field = D->field_begin(), 812 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) { 813 if (IsMsStruct) { 814 // Zero-length bitfields following non-bitfield members are 815 // ignored: 816 const FieldDecl *FD = (*Field); 817 if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) { 818 --FieldNo; 819 continue; 820 } 821 LastFD = FD; 822 } 823 824 if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) { 825 assert(!Packed && 826 "Could not layout fields even with a packed LLVM struct!"); 827 return false; 828 } 829 } 830 831 if (RD) { 832 // We've laid out the non-virtual bases and the fields, now compute the 833 // non-virtual base field types. 834 if (!ComputeNonVirtualBaseType(RD)) { 835 assert(!Packed && "Could not layout even with a packed LLVM struct!"); 836 return false; 837 } 838 839 // Lay out the virtual bases. The MS ABI uses a different 840 // algorithm here due to the lack of primary virtual bases. 841 if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) { 842 RD->getIndirectPrimaryBases(IndirectPrimaryBases); 843 if (Layout.isPrimaryBaseVirtual()) 844 IndirectPrimaryBases.insert(Layout.getPrimaryBase()); 845 846 if (!LayoutVirtualBases(RD, Layout)) 847 return false; 848 } else { 849 if (!MSLayoutVirtualBases(RD, Layout)) 850 return false; 851 } 852 } 853 854 // Append tail padding if necessary. 855 AppendTailPadding(Layout.getSize()); 856 857 return true; 858 } 859 860 void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) { 861 ResizeLastBaseFieldIfNecessary(RecordSize); 862 863 assert(NextFieldOffset <= RecordSize && "Size mismatch!"); 864 865 CharUnits AlignedNextFieldOffset = 866 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 867 868 if (AlignedNextFieldOffset == RecordSize) { 869 // We don't need any padding. 870 return; 871 } 872 873 CharUnits NumPadBytes = RecordSize - NextFieldOffset; 874 AppendBytes(NumPadBytes); 875 } 876 877 void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset, 878 llvm::Type *fieldType) { 879 CharUnits fieldSize = 880 CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType)); 881 882 FieldTypes.push_back(fieldType); 883 884 NextFieldOffset = fieldOffset + fieldSize; 885 BitsAvailableInLastField = 0; 886 } 887 888 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset, 889 CharUnits fieldAlignment) { 890 assert(NextFieldOffset <= fieldOffset && 891 "Incorrect field layout!"); 892 893 // Do nothing if we're already at the right offset. 894 if (fieldOffset == NextFieldOffset) return; 895 896 // If we're not emitting a packed LLVM type, try to avoid adding 897 // unnecessary padding fields. 898 if (!Packed) { 899 // Round up the field offset to the alignment of the field type. 900 CharUnits alignedNextFieldOffset = 901 NextFieldOffset.RoundUpToAlignment(fieldAlignment); 902 assert(alignedNextFieldOffset <= fieldOffset); 903 904 // If that's the right offset, we're done. 905 if (alignedNextFieldOffset == fieldOffset) return; 906 } 907 908 // Otherwise we need explicit padding. 909 CharUnits padding = fieldOffset - NextFieldOffset; 910 AppendBytes(padding); 911 } 912 913 bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) { 914 // Check if we have a base to resize. 915 if (!LastLaidOutBase.isValid()) 916 return false; 917 918 // This offset does not overlap with the tail padding. 919 if (offset >= NextFieldOffset) 920 return false; 921 922 // Restore the field offset and append an i8 array instead. 923 FieldTypes.pop_back(); 924 NextFieldOffset = LastLaidOutBase.Offset; 925 AppendBytes(LastLaidOutBase.NonVirtualSize); 926 LastLaidOutBase.invalidate(); 927 928 return true; 929 } 930 931 llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) { 932 assert(!numBytes.isZero() && "Empty byte arrays aren't allowed."); 933 934 llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); 935 if (numBytes > CharUnits::One()) 936 Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity()); 937 938 return Ty; 939 } 940 941 void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) { 942 if (numBytes.isZero()) 943 return; 944 945 // Append the padding field 946 AppendField(NextFieldOffset, getByteArrayType(numBytes)); 947 } 948 949 CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const { 950 if (Packed) 951 return CharUnits::One(); 952 953 return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty)); 954 } 955 956 CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const { 957 if (Packed) 958 return CharUnits::One(); 959 960 CharUnits maxAlignment = CharUnits::One(); 961 for (size_t i = 0; i != FieldTypes.size(); ++i) 962 maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i])); 963 964 return maxAlignment; 965 } 966 967 /// Merge in whether a field of the given type is zero-initializable. 968 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) { 969 // This record already contains a member pointer. 970 if (!IsZeroInitializableAsBase) 971 return; 972 973 // Can only have member pointers if we're compiling C++. 974 if (!Types.getContext().getLangOptions().CPlusPlus) 975 return; 976 977 const Type *elementType = T->getBaseElementTypeUnsafe(); 978 979 if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) { 980 if (!Types.getCXXABI().isZeroInitializable(MPT)) 981 IsZeroInitializable = IsZeroInitializableAsBase = false; 982 } else if (const RecordType *RT = elementType->getAs<RecordType>()) { 983 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 984 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD); 985 if (!Layout.isZeroInitializable()) 986 IsZeroInitializable = IsZeroInitializableAsBase = false; 987 } 988 } 989 990 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, 991 llvm::StructType *Ty) { 992 CGRecordLayoutBuilder Builder(*this); 993 994 Builder.Layout(D); 995 996 Ty->setBody(Builder.FieldTypes, Builder.Packed); 997 998 // If we're in C++, compute the base subobject type. 999 llvm::StructType *BaseTy = 0; 1000 if (isa<CXXRecordDecl>(D) && !D->isUnion()) { 1001 BaseTy = Builder.BaseSubobjectType; 1002 if (!BaseTy) BaseTy = Ty; 1003 } 1004 1005 CGRecordLayout *RL = 1006 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable, 1007 Builder.IsZeroInitializableAsBase); 1008 1009 RL->NonVirtualBases.swap(Builder.NonVirtualBases); 1010 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases); 1011 1012 // Add all the field numbers. 1013 RL->FieldInfo.swap(Builder.Fields); 1014 1015 // Add bitfield info. 1016 RL->BitFields.swap(Builder.BitFields); 1017 1018 // Dump the layout, if requested. 1019 if (getContext().getLangOptions().DumpRecordLayouts) { 1020 llvm::errs() << "\n*** Dumping IRgen Record Layout\n"; 1021 llvm::errs() << "Record: "; 1022 D->dump(); 1023 llvm::errs() << "\nLayout: "; 1024 RL->dump(); 1025 } 1026 1027 #ifndef NDEBUG 1028 // Verify that the computed LLVM struct size matches the AST layout size. 1029 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D); 1030 1031 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize()); 1032 assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) && 1033 "Type size mismatch!"); 1034 1035 if (BaseTy) { 1036 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 1037 CharUnits NonVirtualAlign = Layout.getNonVirtualAlign(); 1038 CharUnits AlignedNonVirtualTypeSize = 1039 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 1040 1041 uint64_t AlignedNonVirtualTypeSizeInBits = 1042 getContext().toBits(AlignedNonVirtualTypeSize); 1043 1044 assert(AlignedNonVirtualTypeSizeInBits == 1045 getTargetData().getTypeAllocSizeInBits(BaseTy) && 1046 "Type size mismatch!"); 1047 } 1048 1049 // Verify that the LLVM and AST field offsets agree. 1050 llvm::StructType *ST = 1051 dyn_cast<llvm::StructType>(RL->getLLVMType()); 1052 const llvm::StructLayout *SL = getTargetData().getStructLayout(ST); 1053 1054 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D); 1055 RecordDecl::field_iterator it = D->field_begin(); 1056 const FieldDecl *LastFD = 0; 1057 bool IsMsStruct = D->hasAttr<MsStructAttr>(); 1058 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) { 1059 const FieldDecl *FD = *it; 1060 1061 // For non-bit-fields, just check that the LLVM struct offset matches the 1062 // AST offset. 1063 if (!FD->isBitField()) { 1064 unsigned FieldNo = RL->getLLVMFieldNo(FD); 1065 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) && 1066 "Invalid field offset!"); 1067 LastFD = FD; 1068 continue; 1069 } 1070 1071 if (IsMsStruct) { 1072 // Zero-length bitfields following non-bitfield members are 1073 // ignored: 1074 if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) { 1075 --i; 1076 continue; 1077 } 1078 LastFD = FD; 1079 } 1080 1081 // Ignore unnamed bit-fields. 1082 if (!FD->getDeclName()) { 1083 LastFD = FD; 1084 continue; 1085 } 1086 1087 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); 1088 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1089 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1090 1091 // Verify that every component access is within the structure. 1092 uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex); 1093 uint64_t AccessBitOffset = FieldOffset + 1094 getContext().toBits(AI.FieldByteOffset); 1095 assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits && 1096 "Invalid bit-field access (out of range)!"); 1097 } 1098 } 1099 #endif 1100 1101 return RL; 1102 } 1103 1104 void CGRecordLayout::print(raw_ostream &OS) const { 1105 OS << "<CGRecordLayout\n"; 1106 OS << " LLVMType:" << *CompleteObjectType << "\n"; 1107 if (BaseSubobjectType) 1108 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n"; 1109 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n"; 1110 OS << " BitFields:[\n"; 1111 1112 // Print bit-field infos in declaration order. 1113 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs; 1114 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator 1115 it = BitFields.begin(), ie = BitFields.end(); 1116 it != ie; ++it) { 1117 const RecordDecl *RD = it->first->getParent(); 1118 unsigned Index = 0; 1119 for (RecordDecl::field_iterator 1120 it2 = RD->field_begin(); *it2 != it->first; ++it2) 1121 ++Index; 1122 BFIs.push_back(std::make_pair(Index, &it->second)); 1123 } 1124 llvm::array_pod_sort(BFIs.begin(), BFIs.end()); 1125 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) { 1126 OS.indent(4); 1127 BFIs[i].second->print(OS); 1128 OS << "\n"; 1129 } 1130 1131 OS << "]>\n"; 1132 } 1133 1134 void CGRecordLayout::dump() const { 1135 print(llvm::errs()); 1136 } 1137 1138 void CGBitFieldInfo::print(raw_ostream &OS) const { 1139 OS << "<CGBitFieldInfo"; 1140 OS << " Size:" << Size; 1141 OS << " IsSigned:" << IsSigned << "\n"; 1142 1143 OS.indent(4 + strlen("<CGBitFieldInfo")); 1144 OS << " NumComponents:" << getNumComponents(); 1145 OS << " Components: ["; 1146 if (getNumComponents()) { 1147 OS << "\n"; 1148 for (unsigned i = 0, e = getNumComponents(); i != e; ++i) { 1149 const AccessInfo &AI = getComponent(i); 1150 OS.indent(8); 1151 OS << "<AccessInfo" 1152 << " FieldIndex:" << AI.FieldIndex 1153 << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity() 1154 << " FieldBitStart:" << AI.FieldBitStart 1155 << " AccessWidth:" << AI.AccessWidth << "\n"; 1156 OS.indent(8 + strlen("<AccessInfo")); 1157 OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity() 1158 << " TargetBitOffset:" << AI.TargetBitOffset 1159 << " TargetBitWidth:" << AI.TargetBitWidth 1160 << ">\n"; 1161 } 1162 OS.indent(4); 1163 } 1164 OS << "]>"; 1165 } 1166 1167 void CGBitFieldInfo::dump() const { 1168 print(llvm::errs()); 1169 } 1170