1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Builder implementation for CGRecordLayout objects. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGRecordLayout.h" 15 #include "CGCXXABI.h" 16 #include "CodeGenTypes.h" 17 #include "clang/AST/ASTContext.h" 18 #include "clang/AST/Attr.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/DeclCXX.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "clang/Frontend/CodeGenOptions.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/Type.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/raw_ostream.h" 29 using namespace clang; 30 using namespace CodeGen; 31 32 namespace { 33 34 class CGRecordLayoutBuilder { 35 public: 36 /// FieldTypes - Holds the LLVM types that the struct is created from. 37 /// 38 SmallVector<llvm::Type *, 16> FieldTypes; 39 40 /// BaseSubobjectType - Holds the LLVM type for the non-virtual part 41 /// of the struct. For example, consider: 42 /// 43 /// struct A { int i; }; 44 /// struct B { void *v; }; 45 /// struct C : virtual A, B { }; 46 /// 47 /// The LLVM type of C will be 48 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B } 49 /// 50 /// And the LLVM type of the non-virtual base struct will be 51 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 } 52 /// 53 /// This only gets initialized if the base subobject type is 54 /// different from the complete-object type. 55 llvm::StructType *BaseSubobjectType; 56 57 /// FieldInfo - Holds a field and its corresponding LLVM field number. 58 llvm::DenseMap<const FieldDecl *, unsigned> Fields; 59 60 /// BitFieldInfo - Holds location and size information about a bit field. 61 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields; 62 63 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases; 64 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases; 65 66 /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are 67 /// primary base classes for some other direct or indirect base class. 68 CXXIndirectPrimaryBaseSet IndirectPrimaryBases; 69 70 /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid 71 /// avoid laying out virtual bases more than once. 72 llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases; 73 74 /// IsZeroInitializable - Whether this struct can be C++ 75 /// zero-initialized with an LLVM zeroinitializer. 76 bool IsZeroInitializable; 77 bool IsZeroInitializableAsBase; 78 79 /// Packed - Whether the resulting LLVM struct will be packed or not. 80 bool Packed; 81 82 private: 83 CodeGenTypes &Types; 84 85 /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the 86 /// last base laid out. Used so that we can replace the last laid out base 87 /// type with an i8 array if needed. 88 struct LastLaidOutBaseInfo { 89 CharUnits Offset; 90 CharUnits NonVirtualSize; 91 92 bool isValid() const { return !NonVirtualSize.isZero(); } 93 void invalidate() { NonVirtualSize = CharUnits::Zero(); } 94 95 } LastLaidOutBase; 96 97 /// Alignment - Contains the alignment of the RecordDecl. 98 CharUnits Alignment; 99 100 /// NextFieldOffset - Holds the next field offset. 101 CharUnits NextFieldOffset; 102 103 /// LayoutUnionField - Will layout a field in an union and return the type 104 /// that the field will have. 105 llvm::Type *LayoutUnionField(const FieldDecl *Field, 106 const ASTRecordLayout &Layout); 107 108 /// LayoutUnion - Will layout a union RecordDecl. 109 void LayoutUnion(const RecordDecl *D); 110 111 /// Lay out a sequence of contiguous bitfields. 112 bool LayoutBitfields(const ASTRecordLayout &Layout, 113 unsigned &FirstFieldNo, 114 RecordDecl::field_iterator &FI, 115 RecordDecl::field_iterator FE); 116 117 /// LayoutFields - try to layout all fields in the record decl. 118 /// Returns false if the operation failed because the struct is not packed. 119 bool LayoutFields(const RecordDecl *D); 120 121 /// Layout a single base, virtual or non-virtual 122 bool LayoutBase(const CXXRecordDecl *base, 123 const CGRecordLayout &baseLayout, 124 CharUnits baseOffset); 125 126 /// LayoutVirtualBase - layout a single virtual base. 127 bool LayoutVirtualBase(const CXXRecordDecl *base, 128 CharUnits baseOffset); 129 130 /// LayoutVirtualBases - layout the virtual bases of a record decl. 131 bool LayoutVirtualBases(const CXXRecordDecl *RD, 132 const ASTRecordLayout &Layout); 133 134 /// MSLayoutVirtualBases - layout the virtual bases of a record decl, 135 /// like MSVC. 136 bool MSLayoutVirtualBases(const CXXRecordDecl *RD, 137 const ASTRecordLayout &Layout); 138 139 /// LayoutNonVirtualBase - layout a single non-virtual base. 140 bool LayoutNonVirtualBase(const CXXRecordDecl *base, 141 CharUnits baseOffset); 142 143 /// LayoutNonVirtualBases - layout the virtual bases of a record decl. 144 bool LayoutNonVirtualBases(const CXXRecordDecl *RD, 145 const ASTRecordLayout &Layout); 146 147 /// MSLayoutNonVirtualBases - layout the virtual bases of a record decl, 148 /// like MSVC. 149 bool MSLayoutNonVirtualBases(const CXXRecordDecl *RD, 150 const ASTRecordLayout &Layout); 151 152 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types. 153 bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD); 154 155 /// LayoutField - layout a single field. Returns false if the operation failed 156 /// because the current struct is not packed. 157 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset); 158 159 /// LayoutBitField - layout a single bit field. 160 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); 161 162 /// AppendField - Appends a field with the given offset and type. 163 void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy); 164 165 /// AppendPadding - Appends enough padding bytes so that the total 166 /// struct size is a multiple of the field alignment. 167 void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment); 168 169 /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the 170 /// tail padding of a previous base. If this happens, the type of the previous 171 /// base needs to be changed to an array of i8. Returns true if the last 172 /// laid out base was resized. 173 bool ResizeLastBaseFieldIfNecessary(CharUnits offset); 174 175 /// getByteArrayType - Returns a byte array type with the given number of 176 /// elements. 177 llvm::Type *getByteArrayType(CharUnits NumBytes); 178 179 /// AppendBytes - Append a given number of bytes to the record. 180 void AppendBytes(CharUnits numBytes); 181 182 /// AppendTailPadding - Append enough tail padding so that the type will have 183 /// the passed size. 184 void AppendTailPadding(CharUnits RecordSize); 185 186 CharUnits getTypeAlignment(llvm::Type *Ty) const; 187 188 /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the 189 /// LLVM element types. 190 CharUnits getAlignmentAsLLVMStruct() const; 191 192 /// CheckZeroInitializable - Check if the given type contains a pointer 193 /// to data member. 194 void CheckZeroInitializable(QualType T); 195 196 public: 197 CGRecordLayoutBuilder(CodeGenTypes &Types) 198 : BaseSubobjectType(0), 199 IsZeroInitializable(true), IsZeroInitializableAsBase(true), 200 Packed(false), Types(Types) { } 201 202 /// Layout - Will layout a RecordDecl. 203 void Layout(const RecordDecl *D); 204 }; 205 206 } 207 208 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) { 209 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 210 Alignment = Layout.getAlignment(); 211 Packed = D->hasAttr<PackedAttr>() || Layout.getSize() % Alignment != 0; 212 213 if (D->isUnion()) { 214 LayoutUnion(D); 215 return; 216 } 217 218 if (LayoutFields(D)) 219 return; 220 221 // We weren't able to layout the struct. Try again with a packed struct 222 Packed = true; 223 LastLaidOutBase.invalidate(); 224 NextFieldOffset = CharUnits::Zero(); 225 FieldTypes.clear(); 226 Fields.clear(); 227 BitFields.clear(); 228 NonVirtualBases.clear(); 229 VirtualBases.clear(); 230 231 LayoutFields(D); 232 } 233 234 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 235 const FieldDecl *FD, 236 uint64_t Offset, uint64_t Size, 237 uint64_t StorageSize, 238 uint64_t StorageAlignment) { 239 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType()); 240 CharUnits TypeSizeInBytes = 241 CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty)); 242 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes); 243 244 bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); 245 246 if (Size > TypeSizeInBits) { 247 // We have a wide bit-field. The extra bits are only used for padding, so 248 // if we have a bitfield of type T, with size N: 249 // 250 // T t : N; 251 // 252 // We can just assume that it's: 253 // 254 // T t : sizeof(T); 255 // 256 Size = TypeSizeInBits; 257 } 258 259 // Reverse the bit offsets for big endian machines. Because we represent 260 // a bitfield as a single large integer load, we can imagine the bits 261 // counting from the most-significant-bit instead of the 262 // least-significant-bit. 263 if (Types.getDataLayout().isBigEndian()) { 264 Offset = StorageSize - (Offset + Size); 265 } 266 267 return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageAlignment); 268 } 269 270 /// \brief Layout the range of bitfields from BFI to BFE as contiguous storage. 271 bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout, 272 unsigned &FirstFieldNo, 273 RecordDecl::field_iterator &FI, 274 RecordDecl::field_iterator FE) { 275 assert(FI != FE); 276 uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo); 277 uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 278 279 unsigned CharAlign = Types.getTarget().getCharAlign(); 280 assert(FirstFieldOffset % CharAlign == 0 && 281 "First field offset is misaligned"); 282 CharUnits FirstFieldOffsetInBytes 283 = Types.getContext().toCharUnitsFromBits(FirstFieldOffset); 284 285 unsigned StorageAlignment 286 = llvm::MinAlign(Alignment.getQuantity(), 287 FirstFieldOffsetInBytes.getQuantity()); 288 289 if (FirstFieldOffset < NextFieldOffsetInBits) { 290 CharUnits FieldOffsetInCharUnits = 291 Types.getContext().toCharUnitsFromBits(FirstFieldOffset); 292 293 // Try to resize the last base field. 294 if (!ResizeLastBaseFieldIfNecessary(FieldOffsetInCharUnits)) 295 llvm_unreachable("We must be able to resize the last base if we need to " 296 "pack bits into it."); 297 298 NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 299 assert(FirstFieldOffset >= NextFieldOffsetInBits); 300 } 301 302 // Append padding if necessary. 303 AppendPadding(Types.getContext().toCharUnitsFromBits(FirstFieldOffset), 304 CharUnits::One()); 305 306 // Find the last bitfield in a contiguous run of bitfields. 307 RecordDecl::field_iterator BFI = FI; 308 unsigned LastFieldNo = FirstFieldNo; 309 uint64_t NextContiguousFieldOffset = FirstFieldOffset; 310 for (RecordDecl::field_iterator FJ = FI; 311 (FJ != FE && (*FJ)->isBitField() && 312 NextContiguousFieldOffset == Layout.getFieldOffset(LastFieldNo) && 313 (*FJ)->getBitWidthValue(Types.getContext()) != 0); FI = FJ++) { 314 NextContiguousFieldOffset += (*FJ)->getBitWidthValue(Types.getContext()); 315 ++LastFieldNo; 316 317 // We must use packed structs for packed fields, and also unnamed bit 318 // fields since they don't affect the struct alignment. 319 if (!Packed && ((*FJ)->hasAttr<PackedAttr>() || !(*FJ)->getDeclName())) 320 return false; 321 } 322 RecordDecl::field_iterator BFE = llvm::next(FI); 323 --LastFieldNo; 324 assert(LastFieldNo >= FirstFieldNo && "Empty run of contiguous bitfields"); 325 FieldDecl *LastFD = *FI; 326 327 // Find the last bitfield's offset, add its size, and round it up to the 328 // character alignment to compute the storage required. 329 uint64_t LastFieldOffset = Layout.getFieldOffset(LastFieldNo); 330 uint64_t LastFieldSize = LastFD->getBitWidthValue(Types.getContext()); 331 uint64_t TotalBits = (LastFieldOffset + LastFieldSize) - FirstFieldOffset; 332 CharUnits StorageBytes = Types.getContext().toCharUnitsFromBits( 333 llvm::RoundUpToAlignment(TotalBits, CharAlign)); 334 uint64_t StorageBits = Types.getContext().toBits(StorageBytes); 335 336 // Grow the storage to encompass any known padding in the layout when doing 337 // so will make the storage a power-of-two. There are two cases when we can 338 // do this. The first is when we have a subsequent field and can widen up to 339 // its offset. The second is when the data size of the AST record layout is 340 // past the end of the current storage. The latter is true when there is tail 341 // padding on a struct and no members of a super class can be packed into it. 342 // 343 // Note that we widen the storage as much as possible here to express the 344 // maximum latitude the language provides, and rely on the backend to lower 345 // these in conjunction with shifts and masks to narrower operations where 346 // beneficial. 347 uint64_t EndOffset; 348 if (Types.getContext().getLangOpts().CPlusPlus) 349 // Do not grow the bitfield storage into the following virtual base. 350 EndOffset = Types.getContext().toBits(Layout.getNonVirtualSize()); 351 else 352 EndOffset = Types.getContext().toBits(Layout.getDataSize()); 353 if (BFE != FE) 354 // If there are more fields to be laid out, the offset at the end of the 355 // bitfield is the offset of the next field in the record. 356 EndOffset = Layout.getFieldOffset(LastFieldNo + 1); 357 assert(EndOffset >= (FirstFieldOffset + TotalBits) && 358 "End offset is not past the end of the known storage bits."); 359 uint64_t SpaceBits = EndOffset - FirstFieldOffset; 360 uint64_t LongBits = Types.getTarget().getLongWidth(); 361 uint64_t WidenedBits = (StorageBits / LongBits) * LongBits + 362 llvm::NextPowerOf2(StorageBits % LongBits - 1); 363 assert(WidenedBits >= StorageBits && "Widening shrunk the bits!"); 364 if (WidenedBits <= SpaceBits) { 365 StorageBits = WidenedBits; 366 StorageBytes = Types.getContext().toCharUnitsFromBits(StorageBits); 367 assert(StorageBits == (uint64_t)Types.getContext().toBits(StorageBytes)); 368 } 369 370 unsigned FieldIndex = FieldTypes.size(); 371 AppendBytes(StorageBytes); 372 373 // Now walk the bitfields associating them with this field of storage and 374 // building up the bitfield specific info. 375 unsigned FieldNo = FirstFieldNo; 376 for (; BFI != BFE; ++BFI, ++FieldNo) { 377 FieldDecl *FD = *BFI; 378 uint64_t FieldOffset = Layout.getFieldOffset(FieldNo) - FirstFieldOffset; 379 uint64_t FieldSize = FD->getBitWidthValue(Types.getContext()); 380 Fields[FD] = FieldIndex; 381 BitFields[FD] = CGBitFieldInfo::MakeInfo(Types, FD, FieldOffset, FieldSize, 382 StorageBits, StorageAlignment); 383 } 384 FirstFieldNo = LastFieldNo; 385 return true; 386 } 387 388 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, 389 uint64_t fieldOffset) { 390 // If the field is packed, then we need a packed struct. 391 if (!Packed && D->hasAttr<PackedAttr>()) 392 return false; 393 394 assert(!D->isBitField() && "Bitfields should be laid out separately."); 395 396 CheckZeroInitializable(D->getType()); 397 398 assert(fieldOffset % Types.getTarget().getCharWidth() == 0 399 && "field offset is not on a byte boundary!"); 400 CharUnits fieldOffsetInBytes 401 = Types.getContext().toCharUnitsFromBits(fieldOffset); 402 403 llvm::Type *Ty = Types.ConvertTypeForMem(D->getType()); 404 CharUnits typeAlignment = getTypeAlignment(Ty); 405 406 // If the type alignment is larger then the struct alignment, we must use 407 // a packed struct. 408 if (typeAlignment > Alignment) { 409 assert(!Packed && "Alignment is wrong even with packed struct!"); 410 return false; 411 } 412 413 if (!Packed) { 414 if (const RecordType *RT = D->getType()->getAs<RecordType>()) { 415 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl()); 416 if (const MaxFieldAlignmentAttr *MFAA = 417 RD->getAttr<MaxFieldAlignmentAttr>()) { 418 if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment)) 419 return false; 420 } 421 } 422 } 423 424 // Round up the field offset to the alignment of the field type. 425 CharUnits alignedNextFieldOffsetInBytes = 426 NextFieldOffset.RoundUpToAlignment(typeAlignment); 427 428 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 429 // Try to resize the last base field. 430 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) { 431 alignedNextFieldOffsetInBytes = 432 NextFieldOffset.RoundUpToAlignment(typeAlignment); 433 } 434 } 435 436 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 437 assert(!Packed && "Could not place field even with packed struct!"); 438 return false; 439 } 440 441 AppendPadding(fieldOffsetInBytes, typeAlignment); 442 443 // Now append the field. 444 Fields[D] = FieldTypes.size(); 445 AppendField(fieldOffsetInBytes, Ty); 446 447 LastLaidOutBase.invalidate(); 448 return true; 449 } 450 451 llvm::Type * 452 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, 453 const ASTRecordLayout &Layout) { 454 Fields[Field] = 0; 455 if (Field->isBitField()) { 456 uint64_t FieldSize = Field->getBitWidthValue(Types.getContext()); 457 458 // Ignore zero sized bit fields. 459 if (FieldSize == 0) 460 return 0; 461 462 unsigned StorageBits = llvm::RoundUpToAlignment( 463 FieldSize, Types.getTarget().getCharAlign()); 464 CharUnits NumBytesToAppend 465 = Types.getContext().toCharUnitsFromBits(StorageBits); 466 467 llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); 468 if (NumBytesToAppend > CharUnits::One()) 469 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity()); 470 471 // Add the bit field info. 472 BitFields[Field] = CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize, 473 StorageBits, 474 Alignment.getQuantity()); 475 return FieldTy; 476 } 477 478 // This is a regular union field. 479 return Types.ConvertTypeForMem(Field->getType()); 480 } 481 482 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { 483 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!"); 484 485 const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D); 486 487 llvm::Type *unionType = 0; 488 CharUnits unionSize = CharUnits::Zero(); 489 CharUnits unionAlign = CharUnits::Zero(); 490 491 bool hasOnlyZeroSizedBitFields = true; 492 bool checkedFirstFieldZeroInit = false; 493 494 unsigned fieldNo = 0; 495 for (RecordDecl::field_iterator field = D->field_begin(), 496 fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) { 497 assert(layout.getFieldOffset(fieldNo) == 0 && 498 "Union field offset did not start at the beginning of record!"); 499 llvm::Type *fieldType = LayoutUnionField(*field, layout); 500 501 if (!fieldType) 502 continue; 503 504 if (field->getDeclName() && !checkedFirstFieldZeroInit) { 505 CheckZeroInitializable(field->getType()); 506 checkedFirstFieldZeroInit = true; 507 } 508 509 hasOnlyZeroSizedBitFields = false; 510 511 CharUnits fieldAlign = CharUnits::fromQuantity( 512 Types.getDataLayout().getABITypeAlignment(fieldType)); 513 CharUnits fieldSize = CharUnits::fromQuantity( 514 Types.getDataLayout().getTypeAllocSize(fieldType)); 515 516 if (fieldAlign < unionAlign) 517 continue; 518 519 if (fieldAlign > unionAlign || fieldSize > unionSize) { 520 unionType = fieldType; 521 unionAlign = fieldAlign; 522 unionSize = fieldSize; 523 } 524 } 525 526 // Now add our field. 527 if (unionType) { 528 AppendField(CharUnits::Zero(), unionType); 529 530 if (getTypeAlignment(unionType) > layout.getAlignment()) { 531 // We need a packed struct. 532 Packed = true; 533 unionAlign = CharUnits::One(); 534 } 535 } 536 if (unionAlign.isZero()) { 537 (void)hasOnlyZeroSizedBitFields; 538 assert(hasOnlyZeroSizedBitFields && 539 "0-align record did not have all zero-sized bit-fields!"); 540 unionAlign = CharUnits::One(); 541 } 542 543 // Append tail padding. 544 CharUnits recordSize = layout.getSize(); 545 if (recordSize > unionSize) 546 AppendPadding(recordSize, unionAlign); 547 } 548 549 bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base, 550 const CGRecordLayout &baseLayout, 551 CharUnits baseOffset) { 552 ResizeLastBaseFieldIfNecessary(baseOffset); 553 554 AppendPadding(baseOffset, CharUnits::One()); 555 556 const ASTRecordLayout &baseASTLayout 557 = Types.getContext().getASTRecordLayout(base); 558 559 LastLaidOutBase.Offset = NextFieldOffset; 560 LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize(); 561 562 llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType(); 563 if (getTypeAlignment(subobjectType) > Alignment) 564 return false; 565 566 if (LastLaidOutBase.NonVirtualSize < CharUnits::fromQuantity( 567 Types.getDataLayout().getStructLayout(subobjectType)->getSizeInBytes())) 568 AppendBytes(LastLaidOutBase.NonVirtualSize); 569 else 570 AppendField(baseOffset, subobjectType); 571 572 return true; 573 } 574 575 bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base, 576 CharUnits baseOffset) { 577 // Ignore empty bases. 578 if (base->isEmpty()) return true; 579 580 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 581 if (IsZeroInitializableAsBase) { 582 assert(IsZeroInitializable && 583 "class zero-initializable as base but not as complete object"); 584 585 IsZeroInitializable = IsZeroInitializableAsBase = 586 baseLayout.isZeroInitializableAsBase(); 587 } 588 589 if (!LayoutBase(base, baseLayout, baseOffset)) 590 return false; 591 NonVirtualBases[base] = (FieldTypes.size() - 1); 592 return true; 593 } 594 595 bool 596 CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base, 597 CharUnits baseOffset) { 598 // Ignore empty bases. 599 if (base->isEmpty()) return true; 600 601 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 602 if (IsZeroInitializable) 603 IsZeroInitializable = baseLayout.isZeroInitializableAsBase(); 604 605 if (!LayoutBase(base, baseLayout, baseOffset)) 606 return false; 607 VirtualBases[base] = (FieldTypes.size() - 1); 608 return true; 609 } 610 611 bool 612 CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD, 613 const ASTRecordLayout &Layout) { 614 if (!RD->getNumVBases()) 615 return true; 616 617 // The vbases list is uniqued and ordered by a depth-first 618 // traversal, which is what we need here. 619 for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), 620 E = RD->vbases_end(); I != E; ++I) { 621 622 const CXXRecordDecl *BaseDecl = 623 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 624 625 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 626 if (!LayoutVirtualBase(BaseDecl, vbaseOffset)) 627 return false; 628 } 629 return true; 630 } 631 632 /// LayoutVirtualBases - layout the non-virtual bases of a record decl. 633 bool 634 CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD, 635 const ASTRecordLayout &Layout) { 636 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 637 E = RD->bases_end(); I != E; ++I) { 638 const CXXRecordDecl *BaseDecl = 639 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 640 641 // We only want to lay out virtual bases that aren't indirect primary bases 642 // of some other base. 643 if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) { 644 // Only lay out the base once. 645 if (!LaidOutVirtualBases.insert(BaseDecl)) 646 continue; 647 648 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 649 if (!LayoutVirtualBase(BaseDecl, vbaseOffset)) 650 return false; 651 } 652 653 if (!BaseDecl->getNumVBases()) { 654 // This base isn't interesting since it doesn't have any virtual bases. 655 continue; 656 } 657 658 if (!LayoutVirtualBases(BaseDecl, Layout)) 659 return false; 660 } 661 return true; 662 } 663 664 bool 665 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD, 666 const ASTRecordLayout &Layout) { 667 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); 668 669 // If we have a primary base, lay it out first. 670 if (PrimaryBase) { 671 if (!Layout.isPrimaryBaseVirtual()) { 672 if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero())) 673 return false; 674 } else { 675 if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero())) 676 return false; 677 } 678 679 // Otherwise, add a vtable / vf-table if the layout says to do so. 680 } else if (Layout.hasOwnVFPtr()) { 681 llvm::Type *FunctionType = 682 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()), 683 /*isVarArg=*/true); 684 llvm::Type *VTableTy = FunctionType->getPointerTo(); 685 686 if (getTypeAlignment(VTableTy) > Alignment) { 687 // FIXME: Should we allow this to happen in Sema? 688 assert(!Packed && "Alignment is wrong even with packed struct!"); 689 return false; 690 } 691 692 assert(NextFieldOffset.isZero() && 693 "VTable pointer must come first!"); 694 AppendField(CharUnits::Zero(), VTableTy->getPointerTo()); 695 } 696 697 // Layout the non-virtual bases. 698 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 699 E = RD->bases_end(); I != E; ++I) { 700 if (I->isVirtual()) 701 continue; 702 703 const CXXRecordDecl *BaseDecl = 704 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 705 706 // We've already laid out the primary base. 707 if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual()) 708 continue; 709 710 if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl))) 711 return false; 712 } 713 714 // Add a vb-table pointer if the layout insists. 715 if (Layout.hasOwnVBPtr()) { 716 CharUnits VBPtrOffset = Layout.getVBPtrOffset(); 717 llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext()); 718 AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr)); 719 AppendField(VBPtrOffset, Vbptr); 720 } 721 722 return true; 723 } 724 725 bool 726 CGRecordLayoutBuilder::MSLayoutNonVirtualBases(const CXXRecordDecl *RD, 727 const ASTRecordLayout &Layout) { 728 // Add a vfptr if the layout says to do so. 729 if (Layout.hasOwnVFPtr()) { 730 llvm::Type *FunctionType = 731 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()), 732 /*isVarArg=*/true); 733 llvm::Type *VTableTy = FunctionType->getPointerTo(); 734 735 if (getTypeAlignment(VTableTy) > Alignment) { 736 // FIXME: Should we allow this to happen in Sema? 737 assert(!Packed && "Alignment is wrong even with packed struct!"); 738 return false; 739 } 740 741 assert(NextFieldOffset.isZero() && 742 "VTable pointer must come first!"); 743 AppendField(CharUnits::Zero(), VTableTy->getPointerTo()); 744 } 745 746 // Layout the non-virtual bases that have leading vfptrs. 747 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 748 E = RD->bases_end(); I != E; ++I) { 749 if (I->isVirtual()) 750 continue; 751 const CXXRecordDecl *BaseDecl = 752 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 753 const ASTRecordLayout &BaseLayout 754 = Types.getContext().getASTRecordLayout(BaseDecl); 755 756 if (!BaseLayout.hasExtendableVFPtr()) 757 continue; 758 759 if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl))) 760 return false; 761 } 762 763 // Layout the non-virtual bases that don't have leading vfptrs. 764 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 765 E = RD->bases_end(); I != E; ++I) { 766 if (I->isVirtual()) 767 continue; 768 const CXXRecordDecl *BaseDecl = 769 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 770 const ASTRecordLayout &BaseLayout 771 = Types.getContext().getASTRecordLayout(BaseDecl); 772 773 if (BaseLayout.hasExtendableVFPtr()) 774 continue; 775 776 if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl))) 777 return false; 778 } 779 780 // Add a vb-table pointer if the layout insists. 781 if (Layout.hasOwnVBPtr()) { 782 CharUnits VBPtrOffset = Layout.getVBPtrOffset(); 783 llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext()); 784 AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr)); 785 AppendField(VBPtrOffset, Vbptr); 786 } 787 788 return true; 789 } 790 791 bool 792 CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) { 793 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD); 794 795 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 796 CharUnits NonVirtualAlign = Layout.getNonVirtualAlignment(); 797 CharUnits AlignedNonVirtualTypeSize = 798 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 799 800 // First check if we can use the same fields as for the complete class. 801 CharUnits RecordSize = Layout.getSize(); 802 if (AlignedNonVirtualTypeSize == RecordSize) 803 return true; 804 805 // Check if we need padding. 806 CharUnits AlignedNextFieldOffset = 807 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 808 809 if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) { 810 assert(!Packed && "cannot layout even as packed struct"); 811 return false; // Needs packing. 812 } 813 814 bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset); 815 if (needsPadding) { 816 CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset; 817 FieldTypes.push_back(getByteArrayType(NumBytes)); 818 } 819 820 BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(), 821 FieldTypes, "", Packed); 822 Types.addRecordTypeName(RD, BaseSubobjectType, ".base"); 823 824 // Pull the padding back off. 825 if (needsPadding) 826 FieldTypes.pop_back(); 827 828 return true; 829 } 830 831 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) { 832 assert(!D->isUnion() && "Can't call LayoutFields on a union!"); 833 assert(!Alignment.isZero() && "Did not set alignment!"); 834 835 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 836 837 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D); 838 if (RD) { 839 if (Types.getTarget().getCXXABI().isMicrosoft()) { 840 if (!MSLayoutNonVirtualBases(RD, Layout)) 841 return false; 842 } else if (!LayoutNonVirtualBases(RD, Layout)) 843 return false; 844 } 845 846 unsigned FieldNo = 0; 847 848 for (RecordDecl::field_iterator FI = D->field_begin(), FE = D->field_end(); 849 FI != FE; ++FI, ++FieldNo) { 850 FieldDecl *FD = *FI; 851 852 // If this field is a bitfield, layout all of the consecutive 853 // non-zero-length bitfields and the last zero-length bitfield; these will 854 // all share storage. 855 if (FD->isBitField()) { 856 // If all we have is a zero-width bitfield, skip it. 857 if (FD->getBitWidthValue(Types.getContext()) == 0) 858 continue; 859 860 // Layout this range of bitfields. 861 if (!LayoutBitfields(Layout, FieldNo, FI, FE)) { 862 assert(!Packed && 863 "Could not layout bitfields even with a packed LLVM struct!"); 864 return false; 865 } 866 assert(FI != FE && "Advanced past the last bitfield"); 867 continue; 868 } 869 870 if (!LayoutField(FD, Layout.getFieldOffset(FieldNo))) { 871 assert(!Packed && 872 "Could not layout fields even with a packed LLVM struct!"); 873 return false; 874 } 875 } 876 877 if (RD) { 878 // We've laid out the non-virtual bases and the fields, now compute the 879 // non-virtual base field types. 880 if (!ComputeNonVirtualBaseType(RD)) { 881 assert(!Packed && "Could not layout even with a packed LLVM struct!"); 882 return false; 883 } 884 885 // Lay out the virtual bases. The MS ABI uses a different 886 // algorithm here due to the lack of primary virtual bases. 887 if (Types.getTarget().getCXXABI().hasPrimaryVBases()) { 888 RD->getIndirectPrimaryBases(IndirectPrimaryBases); 889 if (Layout.isPrimaryBaseVirtual()) 890 IndirectPrimaryBases.insert(Layout.getPrimaryBase()); 891 892 if (!LayoutVirtualBases(RD, Layout)) 893 return false; 894 } else { 895 if (!MSLayoutVirtualBases(RD, Layout)) 896 return false; 897 } 898 } 899 900 // Append tail padding if necessary. 901 AppendTailPadding(Layout.getSize()); 902 903 return true; 904 } 905 906 void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) { 907 ResizeLastBaseFieldIfNecessary(RecordSize); 908 909 assert(NextFieldOffset <= RecordSize && "Size mismatch!"); 910 911 CharUnits AlignedNextFieldOffset = 912 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 913 914 if (AlignedNextFieldOffset == RecordSize) { 915 // We don't need any padding. 916 return; 917 } 918 919 CharUnits NumPadBytes = RecordSize - NextFieldOffset; 920 AppendBytes(NumPadBytes); 921 } 922 923 void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset, 924 llvm::Type *fieldType) { 925 CharUnits fieldSize = 926 CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(fieldType)); 927 928 FieldTypes.push_back(fieldType); 929 930 NextFieldOffset = fieldOffset + fieldSize; 931 } 932 933 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset, 934 CharUnits fieldAlignment) { 935 assert(NextFieldOffset <= fieldOffset && 936 "Incorrect field layout!"); 937 938 // Do nothing if we're already at the right offset. 939 if (fieldOffset == NextFieldOffset) return; 940 941 // If we're not emitting a packed LLVM type, try to avoid adding 942 // unnecessary padding fields. 943 if (!Packed) { 944 // Round up the field offset to the alignment of the field type. 945 CharUnits alignedNextFieldOffset = 946 NextFieldOffset.RoundUpToAlignment(fieldAlignment); 947 assert(alignedNextFieldOffset <= fieldOffset); 948 949 // If that's the right offset, we're done. 950 if (alignedNextFieldOffset == fieldOffset) return; 951 } 952 953 // Otherwise we need explicit padding. 954 CharUnits padding = fieldOffset - NextFieldOffset; 955 AppendBytes(padding); 956 } 957 958 bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) { 959 // Check if we have a base to resize. 960 if (!LastLaidOutBase.isValid()) 961 return false; 962 963 // This offset does not overlap with the tail padding. 964 if (offset >= NextFieldOffset) 965 return false; 966 967 // Restore the field offset and append an i8 array instead. 968 FieldTypes.pop_back(); 969 NextFieldOffset = LastLaidOutBase.Offset; 970 AppendBytes(LastLaidOutBase.NonVirtualSize); 971 LastLaidOutBase.invalidate(); 972 973 return true; 974 } 975 976 llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) { 977 assert(!numBytes.isZero() && "Empty byte arrays aren't allowed."); 978 979 llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); 980 if (numBytes > CharUnits::One()) 981 Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity()); 982 983 return Ty; 984 } 985 986 void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) { 987 if (numBytes.isZero()) 988 return; 989 990 // Append the padding field 991 AppendField(NextFieldOffset, getByteArrayType(numBytes)); 992 } 993 994 CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const { 995 if (Packed) 996 return CharUnits::One(); 997 998 return CharUnits::fromQuantity(Types.getDataLayout().getABITypeAlignment(Ty)); 999 } 1000 1001 CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const { 1002 if (Packed) 1003 return CharUnits::One(); 1004 1005 CharUnits maxAlignment = CharUnits::One(); 1006 for (size_t i = 0; i != FieldTypes.size(); ++i) 1007 maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i])); 1008 1009 return maxAlignment; 1010 } 1011 1012 /// Merge in whether a field of the given type is zero-initializable. 1013 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) { 1014 // This record already contains a member pointer. 1015 if (!IsZeroInitializableAsBase) 1016 return; 1017 1018 // Can only have member pointers if we're compiling C++. 1019 if (!Types.getContext().getLangOpts().CPlusPlus) 1020 return; 1021 1022 const Type *elementType = T->getBaseElementTypeUnsafe(); 1023 1024 if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) { 1025 if (!Types.getCXXABI().isZeroInitializable(MPT)) 1026 IsZeroInitializable = IsZeroInitializableAsBase = false; 1027 } else if (const RecordType *RT = elementType->getAs<RecordType>()) { 1028 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1029 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD); 1030 if (!Layout.isZeroInitializable()) 1031 IsZeroInitializable = IsZeroInitializableAsBase = false; 1032 } 1033 } 1034 1035 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, 1036 llvm::StructType *Ty) { 1037 CGRecordLayoutBuilder Builder(*this); 1038 1039 Builder.Layout(D); 1040 1041 Ty->setBody(Builder.FieldTypes, Builder.Packed); 1042 1043 // If we're in C++, compute the base subobject type. 1044 llvm::StructType *BaseTy = 0; 1045 if (isa<CXXRecordDecl>(D) && !D->isUnion()) { 1046 BaseTy = Builder.BaseSubobjectType; 1047 if (!BaseTy) BaseTy = Ty; 1048 } 1049 1050 CGRecordLayout *RL = 1051 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable, 1052 Builder.IsZeroInitializableAsBase); 1053 1054 RL->NonVirtualBases.swap(Builder.NonVirtualBases); 1055 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases); 1056 1057 // Add all the field numbers. 1058 RL->FieldInfo.swap(Builder.Fields); 1059 1060 // Add bitfield info. 1061 RL->BitFields.swap(Builder.BitFields); 1062 1063 // Dump the layout, if requested. 1064 if (getContext().getLangOpts().DumpRecordLayouts) { 1065 llvm::outs() << "\n*** Dumping IRgen Record Layout\n"; 1066 llvm::outs() << "Record: "; 1067 D->dump(llvm::outs()); 1068 llvm::outs() << "\nLayout: "; 1069 RL->print(llvm::outs()); 1070 } 1071 1072 #ifndef NDEBUG 1073 // Verify that the computed LLVM struct size matches the AST layout size. 1074 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D); 1075 1076 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize()); 1077 assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) && 1078 "Type size mismatch!"); 1079 1080 if (BaseTy) { 1081 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 1082 CharUnits NonVirtualAlign = Layout.getNonVirtualAlignment(); 1083 CharUnits AlignedNonVirtualTypeSize = 1084 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 1085 1086 uint64_t AlignedNonVirtualTypeSizeInBits = 1087 getContext().toBits(AlignedNonVirtualTypeSize); 1088 1089 assert(AlignedNonVirtualTypeSizeInBits == 1090 getDataLayout().getTypeAllocSizeInBits(BaseTy) && 1091 "Type size mismatch!"); 1092 } 1093 1094 // Verify that the LLVM and AST field offsets agree. 1095 llvm::StructType *ST = 1096 dyn_cast<llvm::StructType>(RL->getLLVMType()); 1097 const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST); 1098 1099 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D); 1100 RecordDecl::field_iterator it = D->field_begin(); 1101 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) { 1102 const FieldDecl *FD = *it; 1103 1104 // For non-bit-fields, just check that the LLVM struct offset matches the 1105 // AST offset. 1106 if (!FD->isBitField()) { 1107 unsigned FieldNo = RL->getLLVMFieldNo(FD); 1108 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) && 1109 "Invalid field offset!"); 1110 continue; 1111 } 1112 1113 // Ignore unnamed bit-fields. 1114 if (!FD->getDeclName()) 1115 continue; 1116 1117 // Don't inspect zero-length bitfields. 1118 if (FD->getBitWidthValue(getContext()) == 0) 1119 continue; 1120 1121 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); 1122 llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD)); 1123 1124 // Unions have overlapping elements dictating their layout, but for 1125 // non-unions we can verify that this section of the layout is the exact 1126 // expected size. 1127 if (D->isUnion()) { 1128 // For unions we verify that the start is zero and the size 1129 // is in-bounds. However, on BE systems, the offset may be non-zero, but 1130 // the size + offset should match the storage size in that case as it 1131 // "starts" at the back. 1132 if (getDataLayout().isBigEndian()) 1133 assert(static_cast<unsigned>(Info.Offset + Info.Size) == 1134 Info.StorageSize && 1135 "Big endian union bitfield does not end at the back"); 1136 else 1137 assert(Info.Offset == 0 && 1138 "Little endian union bitfield with a non-zero offset"); 1139 assert(Info.StorageSize <= SL->getSizeInBits() && 1140 "Union not large enough for bitfield storage"); 1141 } else { 1142 assert(Info.StorageSize == 1143 getDataLayout().getTypeAllocSizeInBits(ElementTy) && 1144 "Storage size does not match the element type size"); 1145 } 1146 assert(Info.Size > 0 && "Empty bitfield!"); 1147 assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize && 1148 "Bitfield outside of its allocated storage"); 1149 } 1150 #endif 1151 1152 return RL; 1153 } 1154 1155 void CGRecordLayout::print(raw_ostream &OS) const { 1156 OS << "<CGRecordLayout\n"; 1157 OS << " LLVMType:" << *CompleteObjectType << "\n"; 1158 if (BaseSubobjectType) 1159 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n"; 1160 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n"; 1161 OS << " BitFields:[\n"; 1162 1163 // Print bit-field infos in declaration order. 1164 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs; 1165 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator 1166 it = BitFields.begin(), ie = BitFields.end(); 1167 it != ie; ++it) { 1168 const RecordDecl *RD = it->first->getParent(); 1169 unsigned Index = 0; 1170 for (RecordDecl::field_iterator 1171 it2 = RD->field_begin(); *it2 != it->first; ++it2) 1172 ++Index; 1173 BFIs.push_back(std::make_pair(Index, &it->second)); 1174 } 1175 llvm::array_pod_sort(BFIs.begin(), BFIs.end()); 1176 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) { 1177 OS.indent(4); 1178 BFIs[i].second->print(OS); 1179 OS << "\n"; 1180 } 1181 1182 OS << "]>\n"; 1183 } 1184 1185 void CGRecordLayout::dump() const { 1186 print(llvm::errs()); 1187 } 1188 1189 void CGBitFieldInfo::print(raw_ostream &OS) const { 1190 OS << "<CGBitFieldInfo" 1191 << " Offset:" << Offset 1192 << " Size:" << Size 1193 << " IsSigned:" << IsSigned 1194 << " StorageSize:" << StorageSize 1195 << " StorageAlignment:" << StorageAlignment << ">"; 1196 } 1197 1198 void CGBitFieldInfo::dump() const { 1199 print(llvm::errs()); 1200 } 1201