1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Builder implementation for CGRecordLayout objects. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGRecordLayout.h" 15 #include "clang/AST/ASTContext.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/CXXInheritance.h" 18 #include "clang/AST/DeclCXX.h" 19 #include "clang/AST/Expr.h" 20 #include "clang/AST/RecordLayout.h" 21 #include "clang/Frontend/CodeGenOptions.h" 22 #include "CodeGenTypes.h" 23 #include "CGCXXABI.h" 24 #include "llvm/DerivedTypes.h" 25 #include "llvm/Type.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/raw_ostream.h" 28 #include "llvm/Target/TargetData.h" 29 using namespace clang; 30 using namespace CodeGen; 31 32 namespace { 33 34 class CGRecordLayoutBuilder { 35 public: 36 /// FieldTypes - Holds the LLVM types that the struct is created from. 37 /// 38 SmallVector<llvm::Type *, 16> FieldTypes; 39 40 /// BaseSubobjectType - Holds the LLVM type for the non-virtual part 41 /// of the struct. For example, consider: 42 /// 43 /// struct A { int i; }; 44 /// struct B { void *v; }; 45 /// struct C : virtual A, B { }; 46 /// 47 /// The LLVM type of C will be 48 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B } 49 /// 50 /// And the LLVM type of the non-virtual base struct will be 51 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 } 52 /// 53 /// This only gets initialized if the base subobject type is 54 /// different from the complete-object type. 55 llvm::StructType *BaseSubobjectType; 56 57 /// FieldInfo - Holds a field and its corresponding LLVM field number. 58 llvm::DenseMap<const FieldDecl *, unsigned> Fields; 59 60 /// BitFieldInfo - Holds location and size information about a bit field. 61 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields; 62 63 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases; 64 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases; 65 66 /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are 67 /// primary base classes for some other direct or indirect base class. 68 CXXIndirectPrimaryBaseSet IndirectPrimaryBases; 69 70 /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid 71 /// avoid laying out virtual bases more than once. 72 llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases; 73 74 /// IsZeroInitializable - Whether this struct can be C++ 75 /// zero-initialized with an LLVM zeroinitializer. 76 bool IsZeroInitializable; 77 bool IsZeroInitializableAsBase; 78 79 /// Packed - Whether the resulting LLVM struct will be packed or not. 80 bool Packed; 81 82 /// IsMsStruct - Whether ms_struct is in effect or not 83 bool IsMsStruct; 84 85 private: 86 CodeGenTypes &Types; 87 88 /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the 89 /// last base laid out. Used so that we can replace the last laid out base 90 /// type with an i8 array if needed. 91 struct LastLaidOutBaseInfo { 92 CharUnits Offset; 93 CharUnits NonVirtualSize; 94 95 bool isValid() const { return !NonVirtualSize.isZero(); } 96 void invalidate() { NonVirtualSize = CharUnits::Zero(); } 97 98 } LastLaidOutBase; 99 100 /// Alignment - Contains the alignment of the RecordDecl. 101 CharUnits Alignment; 102 103 /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field, 104 /// this will have the number of bits still available in the field. 105 char BitsAvailableInLastField; 106 107 /// NextFieldOffset - Holds the next field offset. 108 CharUnits NextFieldOffset; 109 110 /// LayoutUnionField - Will layout a field in an union and return the type 111 /// that the field will have. 112 llvm::Type *LayoutUnionField(const FieldDecl *Field, 113 const ASTRecordLayout &Layout); 114 115 /// LayoutUnion - Will layout a union RecordDecl. 116 void LayoutUnion(const RecordDecl *D); 117 118 /// LayoutField - try to layout all fields in the record decl. 119 /// Returns false if the operation failed because the struct is not packed. 120 bool LayoutFields(const RecordDecl *D); 121 122 /// Layout a single base, virtual or non-virtual 123 void LayoutBase(const CXXRecordDecl *base, 124 const CGRecordLayout &baseLayout, 125 CharUnits baseOffset); 126 127 /// LayoutVirtualBase - layout a single virtual base. 128 void LayoutVirtualBase(const CXXRecordDecl *base, 129 CharUnits baseOffset); 130 131 /// LayoutVirtualBases - layout the virtual bases of a record decl. 132 void LayoutVirtualBases(const CXXRecordDecl *RD, 133 const ASTRecordLayout &Layout); 134 135 /// LayoutNonVirtualBase - layout a single non-virtual base. 136 void LayoutNonVirtualBase(const CXXRecordDecl *base, 137 CharUnits baseOffset); 138 139 /// LayoutNonVirtualBases - layout the virtual bases of a record decl. 140 void LayoutNonVirtualBases(const CXXRecordDecl *RD, 141 const ASTRecordLayout &Layout); 142 143 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types. 144 bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD); 145 146 /// LayoutField - layout a single field. Returns false if the operation failed 147 /// because the current struct is not packed. 148 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset); 149 150 /// LayoutBitField - layout a single bit field. 151 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); 152 153 /// AppendField - Appends a field with the given offset and type. 154 void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy); 155 156 /// AppendPadding - Appends enough padding bytes so that the total 157 /// struct size is a multiple of the field alignment. 158 void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment); 159 160 /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the 161 /// tail padding of a previous base. If this happens, the type of the previous 162 /// base needs to be changed to an array of i8. Returns true if the last 163 /// laid out base was resized. 164 bool ResizeLastBaseFieldIfNecessary(CharUnits offset); 165 166 /// getByteArrayType - Returns a byte array type with the given number of 167 /// elements. 168 llvm::Type *getByteArrayType(CharUnits NumBytes); 169 170 /// AppendBytes - Append a given number of bytes to the record. 171 void AppendBytes(CharUnits numBytes); 172 173 /// AppendTailPadding - Append enough tail padding so that the type will have 174 /// the passed size. 175 void AppendTailPadding(CharUnits RecordSize); 176 177 CharUnits getTypeAlignment(llvm::Type *Ty) const; 178 179 /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the 180 /// LLVM element types. 181 CharUnits getAlignmentAsLLVMStruct() const; 182 183 /// CheckZeroInitializable - Check if the given type contains a pointer 184 /// to data member. 185 void CheckZeroInitializable(QualType T); 186 187 public: 188 CGRecordLayoutBuilder(CodeGenTypes &Types) 189 : BaseSubobjectType(0), 190 IsZeroInitializable(true), IsZeroInitializableAsBase(true), 191 Packed(false), IsMsStruct(false), 192 Types(Types), BitsAvailableInLastField(0) { } 193 194 /// Layout - Will layout a RecordDecl. 195 void Layout(const RecordDecl *D); 196 }; 197 198 } 199 200 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) { 201 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment(); 202 Packed = D->hasAttr<PackedAttr>(); 203 204 IsMsStruct = D->hasAttr<MsStructAttr>(); 205 206 if (D->isUnion()) { 207 LayoutUnion(D); 208 return; 209 } 210 211 if (LayoutFields(D)) 212 return; 213 214 // We weren't able to layout the struct. Try again with a packed struct 215 Packed = true; 216 LastLaidOutBase.invalidate(); 217 NextFieldOffset = CharUnits::Zero(); 218 FieldTypes.clear(); 219 Fields.clear(); 220 BitFields.clear(); 221 NonVirtualBases.clear(); 222 VirtualBases.clear(); 223 224 LayoutFields(D); 225 } 226 227 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 228 const FieldDecl *FD, 229 uint64_t FieldOffset, 230 uint64_t FieldSize, 231 uint64_t ContainingTypeSizeInBits, 232 unsigned ContainingTypeAlign) { 233 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType()); 234 CharUnits TypeSizeInBytes = 235 CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty)); 236 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes); 237 238 bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); 239 240 if (FieldSize > TypeSizeInBits) { 241 // We have a wide bit-field. The extra bits are only used for padding, so 242 // if we have a bitfield of type T, with size N: 243 // 244 // T t : N; 245 // 246 // We can just assume that it's: 247 // 248 // T t : sizeof(T); 249 // 250 FieldSize = TypeSizeInBits; 251 } 252 253 // in big-endian machines the first fields are in higher bit positions, 254 // so revert the offset. The byte offsets are reversed(back) later. 255 if (Types.getTargetData().isBigEndian()) { 256 FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize); 257 } 258 259 // Compute the access components. The policy we use is to start by attempting 260 // to access using the width of the bit-field type itself and to always access 261 // at aligned indices of that type. If such an access would fail because it 262 // extends past the bound of the type, then we reduce size to the next smaller 263 // power of two and retry. The current algorithm assumes pow2 sized types, 264 // although this is easy to fix. 265 // 266 assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!"); 267 CGBitFieldInfo::AccessInfo Components[3]; 268 unsigned NumComponents = 0; 269 unsigned AccessedTargetBits = 0; // The number of target bits accessed. 270 unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt. 271 272 // If requested, widen the initial bit-field access to be register sized. The 273 // theory is that this is most likely to allow multiple accesses into the same 274 // structure to be coalesced, and that the backend should be smart enough to 275 // narrow the store if no coalescing is ever done. 276 // 277 // The subsequent code will handle align these access to common boundaries and 278 // guaranteeing that we do not access past the end of the structure. 279 if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) { 280 if (AccessWidth < Types.getTarget().getRegisterWidth()) 281 AccessWidth = Types.getTarget().getRegisterWidth(); 282 } 283 284 // Round down from the field offset to find the first access position that is 285 // at an aligned offset of the initial access type. 286 uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth); 287 288 // Adjust initial access size to fit within record. 289 while (AccessWidth > Types.getTarget().getCharWidth() && 290 AccessStart + AccessWidth > ContainingTypeSizeInBits) { 291 AccessWidth >>= 1; 292 AccessStart = FieldOffset - (FieldOffset % AccessWidth); 293 } 294 295 while (AccessedTargetBits < FieldSize) { 296 // Check that we can access using a type of this size, without reading off 297 // the end of the structure. This can occur with packed structures and 298 // -fno-bitfield-type-align, for example. 299 if (AccessStart + AccessWidth > ContainingTypeSizeInBits) { 300 // If so, reduce access size to the next smaller power-of-two and retry. 301 AccessWidth >>= 1; 302 assert(AccessWidth >= Types.getTarget().getCharWidth() 303 && "Cannot access under byte size!"); 304 continue; 305 } 306 307 // Otherwise, add an access component. 308 309 // First, compute the bits inside this access which are part of the 310 // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the 311 // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits 312 // in the target that we are reading. 313 assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!"); 314 assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!"); 315 uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset); 316 uint64_t AccessBitsInFieldSize = 317 std::min(AccessWidth + AccessStart, 318 FieldOffset + FieldSize) - AccessBitsInFieldStart; 319 320 assert(NumComponents < 3 && "Unexpected number of components!"); 321 CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++]; 322 AI.FieldIndex = 0; 323 // FIXME: We still follow the old access pattern of only using the field 324 // byte offset. We should switch this once we fix the struct layout to be 325 // pretty. 326 327 // on big-endian machines we reverted the bit offset because first fields are 328 // in higher bits. But this also reverts the bytes, so fix this here by reverting 329 // the byte offset on big-endian machines. 330 if (Types.getTargetData().isBigEndian()) { 331 AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits( 332 ContainingTypeSizeInBits - AccessStart - AccessWidth); 333 } else { 334 AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart); 335 } 336 AI.FieldBitStart = AccessBitsInFieldStart - AccessStart; 337 AI.AccessWidth = AccessWidth; 338 AI.AccessAlignment = Types.getContext().toCharUnitsFromBits( 339 llvm::MinAlign(ContainingTypeAlign, AccessStart)); 340 AI.TargetBitOffset = AccessedTargetBits; 341 AI.TargetBitWidth = AccessBitsInFieldSize; 342 343 AccessStart += AccessWidth; 344 AccessedTargetBits += AI.TargetBitWidth; 345 } 346 347 assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!"); 348 return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned); 349 } 350 351 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 352 const FieldDecl *FD, 353 uint64_t FieldOffset, 354 uint64_t FieldSize) { 355 const RecordDecl *RD = FD->getParent(); 356 const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD); 357 uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize()); 358 unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment()); 359 360 return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits, 361 ContainingTypeAlign); 362 } 363 364 void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D, 365 uint64_t fieldOffset) { 366 uint64_t fieldSize = D->getBitWidthValue(Types.getContext()); 367 368 if (fieldSize == 0) 369 return; 370 371 uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 372 CharUnits numBytesToAppend; 373 unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign(); 374 375 if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) { 376 assert(fieldOffset % charAlign == 0 && 377 "Field offset not aligned correctly"); 378 379 CharUnits fieldOffsetInCharUnits = 380 Types.getContext().toCharUnitsFromBits(fieldOffset); 381 382 // Try to resize the last base field. 383 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits)) 384 nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 385 } 386 387 if (fieldOffset < nextFieldOffsetInBits) { 388 assert(BitsAvailableInLastField && "Bitfield size mismatch!"); 389 assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte"); 390 391 // The bitfield begins in the previous bit-field. 392 numBytesToAppend = Types.getContext().toCharUnitsFromBits( 393 llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField, 394 charAlign)); 395 } else { 396 assert(fieldOffset % charAlign == 0 && 397 "Field offset not aligned correctly"); 398 399 // Append padding if necessary. 400 AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset), 401 CharUnits::One()); 402 403 numBytesToAppend = Types.getContext().toCharUnitsFromBits( 404 llvm::RoundUpToAlignment(fieldSize, charAlign)); 405 406 assert(!numBytesToAppend.isZero() && "No bytes to append!"); 407 } 408 409 // Add the bit field info. 410 BitFields.insert(std::make_pair(D, 411 CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize))); 412 413 AppendBytes(numBytesToAppend); 414 415 BitsAvailableInLastField = 416 Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize); 417 } 418 419 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, 420 uint64_t fieldOffset) { 421 // If the field is packed, then we need a packed struct. 422 if (!Packed && D->hasAttr<PackedAttr>()) 423 return false; 424 425 if (D->isBitField()) { 426 // We must use packed structs for unnamed bit fields since they 427 // don't affect the struct alignment. 428 if (!Packed && !D->getDeclName()) 429 return false; 430 431 LayoutBitField(D, fieldOffset); 432 return true; 433 } 434 435 CheckZeroInitializable(D->getType()); 436 437 assert(fieldOffset % Types.getTarget().getCharWidth() == 0 438 && "field offset is not on a byte boundary!"); 439 CharUnits fieldOffsetInBytes 440 = Types.getContext().toCharUnitsFromBits(fieldOffset); 441 442 llvm::Type *Ty = Types.ConvertTypeForMem(D->getType()); 443 CharUnits typeAlignment = getTypeAlignment(Ty); 444 445 // If the type alignment is larger then the struct alignment, we must use 446 // a packed struct. 447 if (typeAlignment > Alignment) { 448 assert(!Packed && "Alignment is wrong even with packed struct!"); 449 return false; 450 } 451 452 if (!Packed) { 453 if (const RecordType *RT = D->getType()->getAs<RecordType>()) { 454 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl()); 455 if (const MaxFieldAlignmentAttr *MFAA = 456 RD->getAttr<MaxFieldAlignmentAttr>()) { 457 if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment)) 458 return false; 459 } 460 } 461 } 462 463 // Round up the field offset to the alignment of the field type. 464 CharUnits alignedNextFieldOffsetInBytes = 465 NextFieldOffset.RoundUpToAlignment(typeAlignment); 466 467 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 468 // Try to resize the last base field. 469 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) { 470 alignedNextFieldOffsetInBytes = 471 NextFieldOffset.RoundUpToAlignment(typeAlignment); 472 } 473 } 474 475 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 476 assert(!Packed && "Could not place field even with packed struct!"); 477 return false; 478 } 479 480 AppendPadding(fieldOffsetInBytes, typeAlignment); 481 482 // Now append the field. 483 Fields[D] = FieldTypes.size(); 484 AppendField(fieldOffsetInBytes, Ty); 485 486 LastLaidOutBase.invalidate(); 487 return true; 488 } 489 490 llvm::Type * 491 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, 492 const ASTRecordLayout &Layout) { 493 if (Field->isBitField()) { 494 uint64_t FieldSize = Field->getBitWidthValue(Types.getContext()); 495 496 // Ignore zero sized bit fields. 497 if (FieldSize == 0) 498 return 0; 499 500 llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); 501 CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits( 502 llvm::RoundUpToAlignment(FieldSize, 503 Types.getContext().getTargetInfo().getCharAlign())); 504 505 if (NumBytesToAppend > CharUnits::One()) 506 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity()); 507 508 // Add the bit field info. 509 BitFields.insert(std::make_pair(Field, 510 CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize))); 511 return FieldTy; 512 } 513 514 // This is a regular union field. 515 Fields[Field] = 0; 516 return Types.ConvertTypeForMem(Field->getType()); 517 } 518 519 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { 520 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!"); 521 522 const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D); 523 524 llvm::Type *unionType = 0; 525 CharUnits unionSize = CharUnits::Zero(); 526 CharUnits unionAlign = CharUnits::Zero(); 527 528 bool hasOnlyZeroSizedBitFields = true; 529 530 unsigned fieldNo = 0; 531 for (RecordDecl::field_iterator field = D->field_begin(), 532 fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) { 533 assert(layout.getFieldOffset(fieldNo) == 0 && 534 "Union field offset did not start at the beginning of record!"); 535 llvm::Type *fieldType = LayoutUnionField(*field, layout); 536 537 if (!fieldType) 538 continue; 539 540 hasOnlyZeroSizedBitFields = false; 541 542 CharUnits fieldAlign = CharUnits::fromQuantity( 543 Types.getTargetData().getABITypeAlignment(fieldType)); 544 CharUnits fieldSize = CharUnits::fromQuantity( 545 Types.getTargetData().getTypeAllocSize(fieldType)); 546 547 if (fieldAlign < unionAlign) 548 continue; 549 550 if (fieldAlign > unionAlign || fieldSize > unionSize) { 551 unionType = fieldType; 552 unionAlign = fieldAlign; 553 unionSize = fieldSize; 554 } 555 } 556 557 // Now add our field. 558 if (unionType) { 559 AppendField(CharUnits::Zero(), unionType); 560 561 if (getTypeAlignment(unionType) > layout.getAlignment()) { 562 // We need a packed struct. 563 Packed = true; 564 unionAlign = CharUnits::One(); 565 } 566 } 567 if (unionAlign.isZero()) { 568 assert(hasOnlyZeroSizedBitFields && 569 "0-align record did not have all zero-sized bit-fields!"); 570 unionAlign = CharUnits::One(); 571 } 572 573 // Append tail padding. 574 CharUnits recordSize = layout.getSize(); 575 if (recordSize > unionSize) 576 AppendPadding(recordSize, unionAlign); 577 } 578 579 void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base, 580 const CGRecordLayout &baseLayout, 581 CharUnits baseOffset) { 582 ResizeLastBaseFieldIfNecessary(baseOffset); 583 584 AppendPadding(baseOffset, CharUnits::One()); 585 586 const ASTRecordLayout &baseASTLayout 587 = Types.getContext().getASTRecordLayout(base); 588 589 LastLaidOutBase.Offset = NextFieldOffset; 590 LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize(); 591 592 // Fields and bases can be laid out in the tail padding of previous 593 // bases. If this happens, we need to allocate the base as an i8 594 // array; otherwise, we can use the subobject type. However, 595 // actually doing that would require knowledge of what immediately 596 // follows this base in the layout, so instead we do a conservative 597 // approximation, which is to use the base subobject type if it 598 // has the same LLVM storage size as the nvsize. 599 600 llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType(); 601 AppendField(baseOffset, subobjectType); 602 } 603 604 void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base, 605 CharUnits baseOffset) { 606 // Ignore empty bases. 607 if (base->isEmpty()) return; 608 609 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 610 if (IsZeroInitializableAsBase) { 611 assert(IsZeroInitializable && 612 "class zero-initializable as base but not as complete object"); 613 614 IsZeroInitializable = IsZeroInitializableAsBase = 615 baseLayout.isZeroInitializableAsBase(); 616 } 617 618 LayoutBase(base, baseLayout, baseOffset); 619 NonVirtualBases[base] = (FieldTypes.size() - 1); 620 } 621 622 void 623 CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base, 624 CharUnits baseOffset) { 625 // Ignore empty bases. 626 if (base->isEmpty()) return; 627 628 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 629 if (IsZeroInitializable) 630 IsZeroInitializable = baseLayout.isZeroInitializableAsBase(); 631 632 LayoutBase(base, baseLayout, baseOffset); 633 VirtualBases[base] = (FieldTypes.size() - 1); 634 } 635 636 /// LayoutVirtualBases - layout the non-virtual bases of a record decl. 637 void 638 CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD, 639 const ASTRecordLayout &Layout) { 640 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 641 E = RD->bases_end(); I != E; ++I) { 642 const CXXRecordDecl *BaseDecl = 643 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 644 645 // We only want to lay out virtual bases that aren't indirect primary bases 646 // of some other base. 647 if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) { 648 // Only lay out the base once. 649 if (!LaidOutVirtualBases.insert(BaseDecl)) 650 continue; 651 652 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 653 LayoutVirtualBase(BaseDecl, vbaseOffset); 654 } 655 656 if (!BaseDecl->getNumVBases()) { 657 // This base isn't interesting since it doesn't have any virtual bases. 658 continue; 659 } 660 661 LayoutVirtualBases(BaseDecl, Layout); 662 } 663 } 664 665 void 666 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD, 667 const ASTRecordLayout &Layout) { 668 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); 669 670 // Check if we need to add a vtable pointer. 671 if (RD->isDynamicClass()) { 672 if (!PrimaryBase) { 673 llvm::Type *FunctionType = 674 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()), 675 /*isVarArg=*/true); 676 llvm::Type *VTableTy = FunctionType->getPointerTo(); 677 678 assert(NextFieldOffset.isZero() && 679 "VTable pointer must come first!"); 680 AppendField(CharUnits::Zero(), VTableTy->getPointerTo()); 681 } else { 682 if (!Layout.isPrimaryBaseVirtual()) 683 LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()); 684 else 685 LayoutVirtualBase(PrimaryBase, CharUnits::Zero()); 686 } 687 } 688 689 // Layout the non-virtual bases. 690 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 691 E = RD->bases_end(); I != E; ++I) { 692 if (I->isVirtual()) 693 continue; 694 695 const CXXRecordDecl *BaseDecl = 696 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 697 698 // We've already laid out the primary base. 699 if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual()) 700 continue; 701 702 LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)); 703 } 704 } 705 706 bool 707 CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) { 708 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD); 709 710 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 711 CharUnits NonVirtualAlign = Layout.getNonVirtualAlign(); 712 CharUnits AlignedNonVirtualTypeSize = 713 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 714 715 // First check if we can use the same fields as for the complete class. 716 CharUnits RecordSize = Layout.getSize(); 717 if (AlignedNonVirtualTypeSize == RecordSize) 718 return true; 719 720 // Check if we need padding. 721 CharUnits AlignedNextFieldOffset = 722 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 723 724 if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) { 725 assert(!Packed && "cannot layout even as packed struct"); 726 return false; // Needs packing. 727 } 728 729 bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset); 730 if (needsPadding) { 731 CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset; 732 FieldTypes.push_back(getByteArrayType(NumBytes)); 733 } 734 735 736 BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(), 737 FieldTypes, "", Packed); 738 Types.addRecordTypeName(RD, BaseSubobjectType, ".base"); 739 740 // Pull the padding back off. 741 if (needsPadding) 742 FieldTypes.pop_back(); 743 744 return true; 745 } 746 747 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) { 748 assert(!D->isUnion() && "Can't call LayoutFields on a union!"); 749 assert(!Alignment.isZero() && "Did not set alignment!"); 750 751 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 752 753 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D); 754 if (RD) 755 LayoutNonVirtualBases(RD, Layout); 756 757 unsigned FieldNo = 0; 758 const FieldDecl *LastFD = 0; 759 760 for (RecordDecl::field_iterator Field = D->field_begin(), 761 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) { 762 if (IsMsStruct) { 763 // Zero-length bitfields following non-bitfield members are 764 // ignored: 765 const FieldDecl *FD = (*Field); 766 if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) { 767 --FieldNo; 768 continue; 769 } 770 LastFD = FD; 771 } 772 773 if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) { 774 assert(!Packed && 775 "Could not layout fields even with a packed LLVM struct!"); 776 return false; 777 } 778 } 779 780 if (RD) { 781 // We've laid out the non-virtual bases and the fields, now compute the 782 // non-virtual base field types. 783 if (!ComputeNonVirtualBaseType(RD)) { 784 assert(!Packed && "Could not layout even with a packed LLVM struct!"); 785 return false; 786 } 787 788 // And lay out the virtual bases. 789 RD->getIndirectPrimaryBases(IndirectPrimaryBases); 790 if (Layout.isPrimaryBaseVirtual()) 791 IndirectPrimaryBases.insert(Layout.getPrimaryBase()); 792 LayoutVirtualBases(RD, Layout); 793 } 794 795 // Append tail padding if necessary. 796 AppendTailPadding(Layout.getSize()); 797 798 return true; 799 } 800 801 void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) { 802 ResizeLastBaseFieldIfNecessary(RecordSize); 803 804 assert(NextFieldOffset <= RecordSize && "Size mismatch!"); 805 806 CharUnits AlignedNextFieldOffset = 807 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 808 809 if (AlignedNextFieldOffset == RecordSize) { 810 // We don't need any padding. 811 return; 812 } 813 814 CharUnits NumPadBytes = RecordSize - NextFieldOffset; 815 AppendBytes(NumPadBytes); 816 } 817 818 void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset, 819 llvm::Type *fieldType) { 820 CharUnits fieldSize = 821 CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType)); 822 823 FieldTypes.push_back(fieldType); 824 825 NextFieldOffset = fieldOffset + fieldSize; 826 BitsAvailableInLastField = 0; 827 } 828 829 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset, 830 CharUnits fieldAlignment) { 831 assert(NextFieldOffset <= fieldOffset && 832 "Incorrect field layout!"); 833 834 // Round up the field offset to the alignment of the field type. 835 CharUnits alignedNextFieldOffset = 836 NextFieldOffset.RoundUpToAlignment(fieldAlignment); 837 838 if (alignedNextFieldOffset < fieldOffset) { 839 // Even with alignment, the field offset is not at the right place, 840 // insert padding. 841 CharUnits padding = fieldOffset - NextFieldOffset; 842 843 AppendBytes(padding); 844 } 845 } 846 847 bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) { 848 // Check if we have a base to resize. 849 if (!LastLaidOutBase.isValid()) 850 return false; 851 852 // This offset does not overlap with the tail padding. 853 if (offset >= NextFieldOffset) 854 return false; 855 856 // Restore the field offset and append an i8 array instead. 857 FieldTypes.pop_back(); 858 NextFieldOffset = LastLaidOutBase.Offset; 859 AppendBytes(LastLaidOutBase.NonVirtualSize); 860 LastLaidOutBase.invalidate(); 861 862 return true; 863 } 864 865 llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) { 866 assert(!numBytes.isZero() && "Empty byte arrays aren't allowed."); 867 868 llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); 869 if (numBytes > CharUnits::One()) 870 Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity()); 871 872 return Ty; 873 } 874 875 void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) { 876 if (numBytes.isZero()) 877 return; 878 879 // Append the padding field 880 AppendField(NextFieldOffset, getByteArrayType(numBytes)); 881 } 882 883 CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const { 884 if (Packed) 885 return CharUnits::One(); 886 887 return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty)); 888 } 889 890 CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const { 891 if (Packed) 892 return CharUnits::One(); 893 894 CharUnits maxAlignment = CharUnits::One(); 895 for (size_t i = 0; i != FieldTypes.size(); ++i) 896 maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i])); 897 898 return maxAlignment; 899 } 900 901 /// Merge in whether a field of the given type is zero-initializable. 902 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) { 903 // This record already contains a member pointer. 904 if (!IsZeroInitializableAsBase) 905 return; 906 907 // Can only have member pointers if we're compiling C++. 908 if (!Types.getContext().getLangOptions().CPlusPlus) 909 return; 910 911 const Type *elementType = T->getBaseElementTypeUnsafe(); 912 913 if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) { 914 if (!Types.getCXXABI().isZeroInitializable(MPT)) 915 IsZeroInitializable = IsZeroInitializableAsBase = false; 916 } else if (const RecordType *RT = elementType->getAs<RecordType>()) { 917 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 918 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD); 919 if (!Layout.isZeroInitializable()) 920 IsZeroInitializable = IsZeroInitializableAsBase = false; 921 } 922 } 923 924 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, 925 llvm::StructType *Ty) { 926 CGRecordLayoutBuilder Builder(*this); 927 928 Builder.Layout(D); 929 930 Ty->setBody(Builder.FieldTypes, Builder.Packed); 931 932 // If we're in C++, compute the base subobject type. 933 llvm::StructType *BaseTy = 0; 934 if (isa<CXXRecordDecl>(D)) { 935 BaseTy = Builder.BaseSubobjectType; 936 if (!BaseTy) BaseTy = Ty; 937 } 938 939 CGRecordLayout *RL = 940 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable, 941 Builder.IsZeroInitializableAsBase); 942 943 RL->NonVirtualBases.swap(Builder.NonVirtualBases); 944 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases); 945 946 // Add all the field numbers. 947 RL->FieldInfo.swap(Builder.Fields); 948 949 // Add bitfield info. 950 RL->BitFields.swap(Builder.BitFields); 951 952 // Dump the layout, if requested. 953 if (getContext().getLangOptions().DumpRecordLayouts) { 954 llvm::errs() << "\n*** Dumping IRgen Record Layout\n"; 955 llvm::errs() << "Record: "; 956 D->dump(); 957 llvm::errs() << "\nLayout: "; 958 RL->dump(); 959 } 960 961 #ifndef NDEBUG 962 // Verify that the computed LLVM struct size matches the AST layout size. 963 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D); 964 965 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize()); 966 assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) && 967 "Type size mismatch!"); 968 969 if (BaseTy) { 970 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 971 CharUnits NonVirtualAlign = Layout.getNonVirtualAlign(); 972 CharUnits AlignedNonVirtualTypeSize = 973 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 974 975 uint64_t AlignedNonVirtualTypeSizeInBits = 976 getContext().toBits(AlignedNonVirtualTypeSize); 977 978 assert(AlignedNonVirtualTypeSizeInBits == 979 getTargetData().getTypeAllocSizeInBits(BaseTy) && 980 "Type size mismatch!"); 981 } 982 983 // Verify that the LLVM and AST field offsets agree. 984 llvm::StructType *ST = 985 dyn_cast<llvm::StructType>(RL->getLLVMType()); 986 const llvm::StructLayout *SL = getTargetData().getStructLayout(ST); 987 988 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D); 989 RecordDecl::field_iterator it = D->field_begin(); 990 const FieldDecl *LastFD = 0; 991 bool IsMsStruct = D->hasAttr<MsStructAttr>(); 992 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) { 993 const FieldDecl *FD = *it; 994 995 // For non-bit-fields, just check that the LLVM struct offset matches the 996 // AST offset. 997 if (!FD->isBitField()) { 998 unsigned FieldNo = RL->getLLVMFieldNo(FD); 999 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) && 1000 "Invalid field offset!"); 1001 LastFD = FD; 1002 continue; 1003 } 1004 1005 if (IsMsStruct) { 1006 // Zero-length bitfields following non-bitfield members are 1007 // ignored: 1008 if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) { 1009 --i; 1010 continue; 1011 } 1012 LastFD = FD; 1013 } 1014 1015 // Ignore unnamed bit-fields. 1016 if (!FD->getDeclName()) { 1017 LastFD = FD; 1018 continue; 1019 } 1020 1021 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); 1022 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1023 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1024 1025 // Verify that every component access is within the structure. 1026 uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex); 1027 uint64_t AccessBitOffset = FieldOffset + 1028 getContext().toBits(AI.FieldByteOffset); 1029 assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits && 1030 "Invalid bit-field access (out of range)!"); 1031 } 1032 } 1033 #endif 1034 1035 return RL; 1036 } 1037 1038 void CGRecordLayout::print(raw_ostream &OS) const { 1039 OS << "<CGRecordLayout\n"; 1040 OS << " LLVMType:" << *CompleteObjectType << "\n"; 1041 if (BaseSubobjectType) 1042 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n"; 1043 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n"; 1044 OS << " BitFields:[\n"; 1045 1046 // Print bit-field infos in declaration order. 1047 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs; 1048 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator 1049 it = BitFields.begin(), ie = BitFields.end(); 1050 it != ie; ++it) { 1051 const RecordDecl *RD = it->first->getParent(); 1052 unsigned Index = 0; 1053 for (RecordDecl::field_iterator 1054 it2 = RD->field_begin(); *it2 != it->first; ++it2) 1055 ++Index; 1056 BFIs.push_back(std::make_pair(Index, &it->second)); 1057 } 1058 llvm::array_pod_sort(BFIs.begin(), BFIs.end()); 1059 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) { 1060 OS.indent(4); 1061 BFIs[i].second->print(OS); 1062 OS << "\n"; 1063 } 1064 1065 OS << "]>\n"; 1066 } 1067 1068 void CGRecordLayout::dump() const { 1069 print(llvm::errs()); 1070 } 1071 1072 void CGBitFieldInfo::print(raw_ostream &OS) const { 1073 OS << "<CGBitFieldInfo"; 1074 OS << " Size:" << Size; 1075 OS << " IsSigned:" << IsSigned << "\n"; 1076 1077 OS.indent(4 + strlen("<CGBitFieldInfo")); 1078 OS << " NumComponents:" << getNumComponents(); 1079 OS << " Components: ["; 1080 if (getNumComponents()) { 1081 OS << "\n"; 1082 for (unsigned i = 0, e = getNumComponents(); i != e; ++i) { 1083 const AccessInfo &AI = getComponent(i); 1084 OS.indent(8); 1085 OS << "<AccessInfo" 1086 << " FieldIndex:" << AI.FieldIndex 1087 << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity() 1088 << " FieldBitStart:" << AI.FieldBitStart 1089 << " AccessWidth:" << AI.AccessWidth << "\n"; 1090 OS.indent(8 + strlen("<AccessInfo")); 1091 OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity() 1092 << " TargetBitOffset:" << AI.TargetBitOffset 1093 << " TargetBitWidth:" << AI.TargetBitWidth 1094 << ">\n"; 1095 } 1096 OS.indent(4); 1097 } 1098 OS << "]>"; 1099 } 1100 1101 void CGBitFieldInfo::dump() const { 1102 print(llvm::errs()); 1103 } 1104