1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Builder implementation for CGRecordLayout objects. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGRecordLayout.h" 15 #include "CGCXXABI.h" 16 #include "CodeGenTypes.h" 17 #include "clang/AST/ASTContext.h" 18 #include "clang/AST/Attr.h" 19 #include "clang/AST/CXXInheritance.h" 20 #include "clang/AST/DeclCXX.h" 21 #include "clang/AST/Expr.h" 22 #include "clang/AST/RecordLayout.h" 23 #include "clang/Frontend/CodeGenOptions.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DerivedTypes.h" 26 #include "llvm/IR/Type.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/raw_ostream.h" 29 using namespace clang; 30 using namespace CodeGen; 31 32 namespace { 33 34 class CGRecordLayoutBuilder { 35 public: 36 /// FieldTypes - Holds the LLVM types that the struct is created from. 37 /// 38 SmallVector<llvm::Type *, 16> FieldTypes; 39 40 /// BaseSubobjectType - Holds the LLVM type for the non-virtual part 41 /// of the struct. For example, consider: 42 /// 43 /// struct A { int i; }; 44 /// struct B { void *v; }; 45 /// struct C : virtual A, B { }; 46 /// 47 /// The LLVM type of C will be 48 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B } 49 /// 50 /// And the LLVM type of the non-virtual base struct will be 51 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 } 52 /// 53 /// This only gets initialized if the base subobject type is 54 /// different from the complete-object type. 55 llvm::StructType *BaseSubobjectType; 56 57 /// FieldInfo - Holds a field and its corresponding LLVM field number. 58 llvm::DenseMap<const FieldDecl *, unsigned> Fields; 59 60 /// BitFieldInfo - Holds location and size information about a bit field. 61 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields; 62 63 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases; 64 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases; 65 66 /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are 67 /// primary base classes for some other direct or indirect base class. 68 CXXIndirectPrimaryBaseSet IndirectPrimaryBases; 69 70 /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid 71 /// avoid laying out virtual bases more than once. 72 llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases; 73 74 /// IsZeroInitializable - Whether this struct can be C++ 75 /// zero-initialized with an LLVM zeroinitializer. 76 bool IsZeroInitializable; 77 bool IsZeroInitializableAsBase; 78 79 /// Packed - Whether the resulting LLVM struct will be packed or not. 80 bool Packed; 81 82 private: 83 CodeGenTypes &Types; 84 85 /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the 86 /// last base laid out. Used so that we can replace the last laid out base 87 /// type with an i8 array if needed. 88 struct LastLaidOutBaseInfo { 89 CharUnits Offset; 90 CharUnits NonVirtualSize; 91 92 bool isValid() const { return !NonVirtualSize.isZero(); } 93 void invalidate() { NonVirtualSize = CharUnits::Zero(); } 94 95 } LastLaidOutBase; 96 97 /// Alignment - Contains the alignment of the RecordDecl. 98 CharUnits Alignment; 99 100 /// NextFieldOffset - Holds the next field offset. 101 CharUnits NextFieldOffset; 102 103 /// LayoutUnionField - Will layout a field in an union and return the type 104 /// that the field will have. 105 llvm::Type *LayoutUnionField(const FieldDecl *Field, 106 const ASTRecordLayout &Layout); 107 108 /// LayoutUnion - Will layout a union RecordDecl. 109 void LayoutUnion(const RecordDecl *D); 110 111 /// Lay out a sequence of contiguous bitfields. 112 bool LayoutBitfields(const ASTRecordLayout &Layout, 113 unsigned &FirstFieldNo, 114 RecordDecl::field_iterator &FI, 115 RecordDecl::field_iterator FE); 116 117 /// LayoutFields - try to layout all fields in the record decl. 118 /// Returns false if the operation failed because the struct is not packed. 119 bool LayoutFields(const RecordDecl *D); 120 121 /// Layout a single base, virtual or non-virtual 122 bool LayoutBase(const CXXRecordDecl *base, 123 const CGRecordLayout &baseLayout, 124 CharUnits baseOffset); 125 126 /// LayoutVirtualBase - layout a single virtual base. 127 bool LayoutVirtualBase(const CXXRecordDecl *base, 128 CharUnits baseOffset); 129 130 /// LayoutVirtualBases - layout the virtual bases of a record decl. 131 bool LayoutVirtualBases(const CXXRecordDecl *RD, 132 const ASTRecordLayout &Layout); 133 134 /// MSLayoutVirtualBases - layout the virtual bases of a record decl, 135 /// like MSVC. 136 bool MSLayoutVirtualBases(const CXXRecordDecl *RD, 137 const ASTRecordLayout &Layout); 138 139 /// LayoutNonVirtualBase - layout a single non-virtual base. 140 bool LayoutNonVirtualBase(const CXXRecordDecl *base, 141 CharUnits baseOffset); 142 143 /// LayoutNonVirtualBases - layout the virtual bases of a record decl. 144 bool LayoutNonVirtualBases(const CXXRecordDecl *RD, 145 const ASTRecordLayout &Layout); 146 147 /// MSLayoutNonVirtualBases - layout the virtual bases of a record decl, 148 /// like MSVC. 149 bool MSLayoutNonVirtualBases(const CXXRecordDecl *RD, 150 const ASTRecordLayout &Layout); 151 152 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types. 153 bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD); 154 155 /// LayoutField - layout a single field. Returns false if the operation failed 156 /// because the current struct is not packed. 157 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset); 158 159 /// LayoutBitField - layout a single bit field. 160 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset); 161 162 /// AppendField - Appends a field with the given offset and type. 163 void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy); 164 165 /// AppendPadding - Appends enough padding bytes so that the total 166 /// struct size is a multiple of the field alignment. 167 void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment); 168 169 /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the 170 /// tail padding of a previous base. If this happens, the type of the previous 171 /// base needs to be changed to an array of i8. Returns true if the last 172 /// laid out base was resized. 173 bool ResizeLastBaseFieldIfNecessary(CharUnits offset); 174 175 /// getByteArrayType - Returns a byte array type with the given number of 176 /// elements. 177 llvm::Type *getByteArrayType(CharUnits NumBytes); 178 179 /// AppendBytes - Append a given number of bytes to the record. 180 void AppendBytes(CharUnits numBytes); 181 182 /// AppendTailPadding - Append enough tail padding so that the type will have 183 /// the passed size. 184 void AppendTailPadding(CharUnits RecordSize); 185 186 CharUnits getTypeAlignment(llvm::Type *Ty) const; 187 188 /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the 189 /// LLVM element types. 190 CharUnits getAlignmentAsLLVMStruct() const; 191 192 /// CheckZeroInitializable - Check if the given type contains a pointer 193 /// to data member. 194 void CheckZeroInitializable(QualType T); 195 196 public: 197 CGRecordLayoutBuilder(CodeGenTypes &Types) 198 : BaseSubobjectType(0), 199 IsZeroInitializable(true), IsZeroInitializableAsBase(true), 200 Packed(false), Types(Types) { } 201 202 /// Layout - Will layout a RecordDecl. 203 void Layout(const RecordDecl *D); 204 }; 205 206 } 207 208 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) { 209 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 210 Alignment = Layout.getAlignment(); 211 Packed = D->hasAttr<PackedAttr>() || Layout.getSize() % Alignment != 0; 212 213 if (D->isUnion()) { 214 LayoutUnion(D); 215 return; 216 } 217 218 if (LayoutFields(D)) 219 return; 220 221 // We weren't able to layout the struct. Try again with a packed struct 222 Packed = true; 223 LastLaidOutBase.invalidate(); 224 NextFieldOffset = CharUnits::Zero(); 225 FieldTypes.clear(); 226 Fields.clear(); 227 BitFields.clear(); 228 NonVirtualBases.clear(); 229 VirtualBases.clear(); 230 231 LayoutFields(D); 232 } 233 234 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, 235 const FieldDecl *FD, 236 uint64_t Offset, uint64_t Size, 237 uint64_t StorageSize, 238 uint64_t StorageAlignment) { 239 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType()); 240 CharUnits TypeSizeInBytes = 241 CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty)); 242 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes); 243 244 bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); 245 246 if (Size > TypeSizeInBits) { 247 // We have a wide bit-field. The extra bits are only used for padding, so 248 // if we have a bitfield of type T, with size N: 249 // 250 // T t : N; 251 // 252 // We can just assume that it's: 253 // 254 // T t : sizeof(T); 255 // 256 Size = TypeSizeInBits; 257 } 258 259 // Reverse the bit offsets for big endian machines. Because we represent 260 // a bitfield as a single large integer load, we can imagine the bits 261 // counting from the most-significant-bit instead of the 262 // least-significant-bit. 263 if (Types.getDataLayout().isBigEndian()) { 264 Offset = StorageSize - (Offset + Size); 265 } 266 267 return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageAlignment); 268 } 269 270 /// \brief Layout the range of bitfields from BFI to BFE as contiguous storage. 271 bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout, 272 unsigned &FirstFieldNo, 273 RecordDecl::field_iterator &FI, 274 RecordDecl::field_iterator FE) { 275 assert(FI != FE); 276 uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo); 277 uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 278 279 unsigned CharAlign = Types.getTarget().getCharAlign(); 280 assert(FirstFieldOffset % CharAlign == 0 && 281 "First field offset is misaligned"); 282 CharUnits FirstFieldOffsetInBytes 283 = Types.getContext().toCharUnitsFromBits(FirstFieldOffset); 284 285 unsigned StorageAlignment 286 = llvm::MinAlign(Alignment.getQuantity(), 287 FirstFieldOffsetInBytes.getQuantity()); 288 289 if (FirstFieldOffset < NextFieldOffsetInBits) { 290 CharUnits FieldOffsetInCharUnits = 291 Types.getContext().toCharUnitsFromBits(FirstFieldOffset); 292 293 // Try to resize the last base field. 294 if (!ResizeLastBaseFieldIfNecessary(FieldOffsetInCharUnits)) 295 llvm_unreachable("We must be able to resize the last base if we need to " 296 "pack bits into it."); 297 298 NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset); 299 assert(FirstFieldOffset >= NextFieldOffsetInBits); 300 } 301 302 // Append padding if necessary. 303 AppendPadding(Types.getContext().toCharUnitsFromBits(FirstFieldOffset), 304 CharUnits::One()); 305 306 // Find the last bitfield in a contiguous run of bitfields. 307 RecordDecl::field_iterator BFI = FI; 308 unsigned LastFieldNo = FirstFieldNo; 309 uint64_t NextContiguousFieldOffset = FirstFieldOffset; 310 for (RecordDecl::field_iterator FJ = FI; 311 (FJ != FE && (*FJ)->isBitField() && 312 NextContiguousFieldOffset == Layout.getFieldOffset(LastFieldNo) && 313 (*FJ)->getBitWidthValue(Types.getContext()) != 0); FI = FJ++) { 314 NextContiguousFieldOffset += (*FJ)->getBitWidthValue(Types.getContext()); 315 ++LastFieldNo; 316 317 // We must use packed structs for packed fields, and also unnamed bit 318 // fields since they don't affect the struct alignment. 319 if (!Packed && ((*FJ)->hasAttr<PackedAttr>() || !(*FJ)->getDeclName())) 320 return false; 321 } 322 RecordDecl::field_iterator BFE = llvm::next(FI); 323 --LastFieldNo; 324 assert(LastFieldNo >= FirstFieldNo && "Empty run of contiguous bitfields"); 325 FieldDecl *LastFD = *FI; 326 327 // Find the last bitfield's offset, add its size, and round it up to the 328 // character alignment to compute the storage required. 329 uint64_t LastFieldOffset = Layout.getFieldOffset(LastFieldNo); 330 uint64_t LastFieldSize = LastFD->getBitWidthValue(Types.getContext()); 331 uint64_t TotalBits = (LastFieldOffset + LastFieldSize) - FirstFieldOffset; 332 CharUnits StorageBytes = Types.getContext().toCharUnitsFromBits( 333 llvm::RoundUpToAlignment(TotalBits, CharAlign)); 334 uint64_t StorageBits = Types.getContext().toBits(StorageBytes); 335 336 // Grow the storage to encompass any known padding in the layout when doing 337 // so will make the storage a power-of-two. There are two cases when we can 338 // do this. The first is when we have a subsequent field and can widen up to 339 // its offset. The second is when the data size of the AST record layout is 340 // past the end of the current storage. The latter is true when there is tail 341 // padding on a struct and no members of a super class can be packed into it. 342 // 343 // Note that we widen the storage as much as possible here to express the 344 // maximum latitude the language provides, and rely on the backend to lower 345 // these in conjunction with shifts and masks to narrower operations where 346 // beneficial. 347 uint64_t EndOffset = Types.getContext().toBits(Layout.getDataSize()); 348 if (BFE != FE) 349 // If there are more fields to be laid out, the offset at the end of the 350 // bitfield is the offset of the next field in the record. 351 EndOffset = Layout.getFieldOffset(LastFieldNo + 1); 352 assert(EndOffset >= (FirstFieldOffset + TotalBits) && 353 "End offset is not past the end of the known storage bits."); 354 uint64_t SpaceBits = EndOffset - FirstFieldOffset; 355 uint64_t LongBits = Types.getTarget().getLongWidth(); 356 uint64_t WidenedBits = (StorageBits / LongBits) * LongBits + 357 llvm::NextPowerOf2(StorageBits % LongBits - 1); 358 assert(WidenedBits >= StorageBits && "Widening shrunk the bits!"); 359 if (WidenedBits <= SpaceBits) { 360 StorageBits = WidenedBits; 361 StorageBytes = Types.getContext().toCharUnitsFromBits(StorageBits); 362 assert(StorageBits == (uint64_t)Types.getContext().toBits(StorageBytes)); 363 } 364 365 unsigned FieldIndex = FieldTypes.size(); 366 AppendBytes(StorageBytes); 367 368 // Now walk the bitfields associating them with this field of storage and 369 // building up the bitfield specific info. 370 unsigned FieldNo = FirstFieldNo; 371 for (; BFI != BFE; ++BFI, ++FieldNo) { 372 FieldDecl *FD = *BFI; 373 uint64_t FieldOffset = Layout.getFieldOffset(FieldNo) - FirstFieldOffset; 374 uint64_t FieldSize = FD->getBitWidthValue(Types.getContext()); 375 Fields[FD] = FieldIndex; 376 BitFields[FD] = CGBitFieldInfo::MakeInfo(Types, FD, FieldOffset, FieldSize, 377 StorageBits, StorageAlignment); 378 } 379 FirstFieldNo = LastFieldNo; 380 return true; 381 } 382 383 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D, 384 uint64_t fieldOffset) { 385 // If the field is packed, then we need a packed struct. 386 if (!Packed && D->hasAttr<PackedAttr>()) 387 return false; 388 389 assert(!D->isBitField() && "Bitfields should be laid out separately."); 390 391 CheckZeroInitializable(D->getType()); 392 393 assert(fieldOffset % Types.getTarget().getCharWidth() == 0 394 && "field offset is not on a byte boundary!"); 395 CharUnits fieldOffsetInBytes 396 = Types.getContext().toCharUnitsFromBits(fieldOffset); 397 398 llvm::Type *Ty = Types.ConvertTypeForMem(D->getType()); 399 CharUnits typeAlignment = getTypeAlignment(Ty); 400 401 // If the type alignment is larger then the struct alignment, we must use 402 // a packed struct. 403 if (typeAlignment > Alignment) { 404 assert(!Packed && "Alignment is wrong even with packed struct!"); 405 return false; 406 } 407 408 if (!Packed) { 409 if (const RecordType *RT = D->getType()->getAs<RecordType>()) { 410 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl()); 411 if (const MaxFieldAlignmentAttr *MFAA = 412 RD->getAttr<MaxFieldAlignmentAttr>()) { 413 if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment)) 414 return false; 415 } 416 } 417 } 418 419 // Round up the field offset to the alignment of the field type. 420 CharUnits alignedNextFieldOffsetInBytes = 421 NextFieldOffset.RoundUpToAlignment(typeAlignment); 422 423 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 424 // Try to resize the last base field. 425 if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) { 426 alignedNextFieldOffsetInBytes = 427 NextFieldOffset.RoundUpToAlignment(typeAlignment); 428 } 429 } 430 431 if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) { 432 assert(!Packed && "Could not place field even with packed struct!"); 433 return false; 434 } 435 436 AppendPadding(fieldOffsetInBytes, typeAlignment); 437 438 // Now append the field. 439 Fields[D] = FieldTypes.size(); 440 AppendField(fieldOffsetInBytes, Ty); 441 442 LastLaidOutBase.invalidate(); 443 return true; 444 } 445 446 llvm::Type * 447 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field, 448 const ASTRecordLayout &Layout) { 449 Fields[Field] = 0; 450 if (Field->isBitField()) { 451 uint64_t FieldSize = Field->getBitWidthValue(Types.getContext()); 452 453 // Ignore zero sized bit fields. 454 if (FieldSize == 0) 455 return 0; 456 457 unsigned StorageBits = llvm::RoundUpToAlignment( 458 FieldSize, Types.getTarget().getCharAlign()); 459 CharUnits NumBytesToAppend 460 = Types.getContext().toCharUnitsFromBits(StorageBits); 461 462 llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext()); 463 if (NumBytesToAppend > CharUnits::One()) 464 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity()); 465 466 // Add the bit field info. 467 BitFields[Field] = CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize, 468 StorageBits, 469 Alignment.getQuantity()); 470 return FieldTy; 471 } 472 473 // This is a regular union field. 474 return Types.ConvertTypeForMem(Field->getType()); 475 } 476 477 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) { 478 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!"); 479 480 const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D); 481 482 llvm::Type *unionType = 0; 483 CharUnits unionSize = CharUnits::Zero(); 484 CharUnits unionAlign = CharUnits::Zero(); 485 486 bool hasOnlyZeroSizedBitFields = true; 487 bool checkedFirstFieldZeroInit = false; 488 489 unsigned fieldNo = 0; 490 for (RecordDecl::field_iterator field = D->field_begin(), 491 fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) { 492 assert(layout.getFieldOffset(fieldNo) == 0 && 493 "Union field offset did not start at the beginning of record!"); 494 llvm::Type *fieldType = LayoutUnionField(*field, layout); 495 496 if (!fieldType) 497 continue; 498 499 if (field->getDeclName() && !checkedFirstFieldZeroInit) { 500 CheckZeroInitializable(field->getType()); 501 checkedFirstFieldZeroInit = true; 502 } 503 504 hasOnlyZeroSizedBitFields = false; 505 506 CharUnits fieldAlign = CharUnits::fromQuantity( 507 Types.getDataLayout().getABITypeAlignment(fieldType)); 508 CharUnits fieldSize = CharUnits::fromQuantity( 509 Types.getDataLayout().getTypeAllocSize(fieldType)); 510 511 if (fieldAlign < unionAlign) 512 continue; 513 514 if (fieldAlign > unionAlign || fieldSize > unionSize) { 515 unionType = fieldType; 516 unionAlign = fieldAlign; 517 unionSize = fieldSize; 518 } 519 } 520 521 // Now add our field. 522 if (unionType) { 523 AppendField(CharUnits::Zero(), unionType); 524 525 if (getTypeAlignment(unionType) > layout.getAlignment()) { 526 // We need a packed struct. 527 Packed = true; 528 unionAlign = CharUnits::One(); 529 } 530 } 531 if (unionAlign.isZero()) { 532 (void)hasOnlyZeroSizedBitFields; 533 assert(hasOnlyZeroSizedBitFields && 534 "0-align record did not have all zero-sized bit-fields!"); 535 unionAlign = CharUnits::One(); 536 } 537 538 // Append tail padding. 539 CharUnits recordSize = layout.getSize(); 540 if (recordSize > unionSize) 541 AppendPadding(recordSize, unionAlign); 542 } 543 544 bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base, 545 const CGRecordLayout &baseLayout, 546 CharUnits baseOffset) { 547 ResizeLastBaseFieldIfNecessary(baseOffset); 548 549 AppendPadding(baseOffset, CharUnits::One()); 550 551 const ASTRecordLayout &baseASTLayout 552 = Types.getContext().getASTRecordLayout(base); 553 554 LastLaidOutBase.Offset = NextFieldOffset; 555 LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize(); 556 557 llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType(); 558 if (getTypeAlignment(subobjectType) > Alignment) 559 return false; 560 561 AppendField(baseOffset, subobjectType); 562 return true; 563 } 564 565 bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base, 566 CharUnits baseOffset) { 567 // Ignore empty bases. 568 if (base->isEmpty()) return true; 569 570 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 571 if (IsZeroInitializableAsBase) { 572 assert(IsZeroInitializable && 573 "class zero-initializable as base but not as complete object"); 574 575 IsZeroInitializable = IsZeroInitializableAsBase = 576 baseLayout.isZeroInitializableAsBase(); 577 } 578 579 if (!LayoutBase(base, baseLayout, baseOffset)) 580 return false; 581 NonVirtualBases[base] = (FieldTypes.size() - 1); 582 return true; 583 } 584 585 bool 586 CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base, 587 CharUnits baseOffset) { 588 // Ignore empty bases. 589 if (base->isEmpty()) return true; 590 591 const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base); 592 if (IsZeroInitializable) 593 IsZeroInitializable = baseLayout.isZeroInitializableAsBase(); 594 595 if (!LayoutBase(base, baseLayout, baseOffset)) 596 return false; 597 VirtualBases[base] = (FieldTypes.size() - 1); 598 return true; 599 } 600 601 bool 602 CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD, 603 const ASTRecordLayout &Layout) { 604 if (!RD->getNumVBases()) 605 return true; 606 607 // The vbases list is uniqued and ordered by a depth-first 608 // traversal, which is what we need here. 609 for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), 610 E = RD->vbases_end(); I != E; ++I) { 611 612 const CXXRecordDecl *BaseDecl = 613 cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl()); 614 615 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 616 if (!LayoutVirtualBase(BaseDecl, vbaseOffset)) 617 return false; 618 } 619 return true; 620 } 621 622 /// LayoutVirtualBases - layout the non-virtual bases of a record decl. 623 bool 624 CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD, 625 const ASTRecordLayout &Layout) { 626 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 627 E = RD->bases_end(); I != E; ++I) { 628 const CXXRecordDecl *BaseDecl = 629 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 630 631 // We only want to lay out virtual bases that aren't indirect primary bases 632 // of some other base. 633 if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) { 634 // Only lay out the base once. 635 if (!LaidOutVirtualBases.insert(BaseDecl)) 636 continue; 637 638 CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl); 639 if (!LayoutVirtualBase(BaseDecl, vbaseOffset)) 640 return false; 641 } 642 643 if (!BaseDecl->getNumVBases()) { 644 // This base isn't interesting since it doesn't have any virtual bases. 645 continue; 646 } 647 648 if (!LayoutVirtualBases(BaseDecl, Layout)) 649 return false; 650 } 651 return true; 652 } 653 654 bool 655 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD, 656 const ASTRecordLayout &Layout) { 657 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase(); 658 659 // If we have a primary base, lay it out first. 660 if (PrimaryBase) { 661 if (!Layout.isPrimaryBaseVirtual()) { 662 if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero())) 663 return false; 664 } else { 665 if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero())) 666 return false; 667 } 668 669 // Otherwise, add a vtable / vf-table if the layout says to do so. 670 } else if (Layout.hasOwnVFPtr()) { 671 llvm::Type *FunctionType = 672 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()), 673 /*isVarArg=*/true); 674 llvm::Type *VTableTy = FunctionType->getPointerTo(); 675 676 if (getTypeAlignment(VTableTy) > Alignment) { 677 // FIXME: Should we allow this to happen in Sema? 678 assert(!Packed && "Alignment is wrong even with packed struct!"); 679 return false; 680 } 681 682 assert(NextFieldOffset.isZero() && 683 "VTable pointer must come first!"); 684 AppendField(CharUnits::Zero(), VTableTy->getPointerTo()); 685 } 686 687 // Layout the non-virtual bases. 688 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 689 E = RD->bases_end(); I != E; ++I) { 690 if (I->isVirtual()) 691 continue; 692 693 const CXXRecordDecl *BaseDecl = 694 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 695 696 // We've already laid out the primary base. 697 if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual()) 698 continue; 699 700 if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl))) 701 return false; 702 } 703 704 // Add a vb-table pointer if the layout insists. 705 if (Layout.hasOwnVBPtr()) { 706 CharUnits VBPtrOffset = Layout.getVBPtrOffset(); 707 llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext()); 708 AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr)); 709 AppendField(VBPtrOffset, Vbptr); 710 } 711 712 return true; 713 } 714 715 bool 716 CGRecordLayoutBuilder::MSLayoutNonVirtualBases(const CXXRecordDecl *RD, 717 const ASTRecordLayout &Layout) { 718 // Add a vfptr if the layout says to do so. 719 if (Layout.hasOwnVFPtr()) { 720 llvm::Type *FunctionType = 721 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()), 722 /*isVarArg=*/true); 723 llvm::Type *VTableTy = FunctionType->getPointerTo(); 724 725 if (getTypeAlignment(VTableTy) > Alignment) { 726 // FIXME: Should we allow this to happen in Sema? 727 assert(!Packed && "Alignment is wrong even with packed struct!"); 728 return false; 729 } 730 731 assert(NextFieldOffset.isZero() && 732 "VTable pointer must come first!"); 733 AppendField(CharUnits::Zero(), VTableTy->getPointerTo()); 734 } 735 736 // Layout the non-virtual bases that have leading vfptrs. 737 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 738 E = RD->bases_end(); I != E; ++I) { 739 if (I->isVirtual()) 740 continue; 741 const CXXRecordDecl *BaseDecl = 742 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 743 const ASTRecordLayout &BaseLayout 744 = Types.getContext().getASTRecordLayout(BaseDecl); 745 746 if (!BaseLayout.hasExtendableVFPtr()) 747 continue; 748 749 if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl))) 750 return false; 751 } 752 753 // Layout the non-virtual bases that don't have leading vfptrs. 754 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), 755 E = RD->bases_end(); I != E; ++I) { 756 if (I->isVirtual()) 757 continue; 758 const CXXRecordDecl *BaseDecl = 759 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl()); 760 const ASTRecordLayout &BaseLayout 761 = Types.getContext().getASTRecordLayout(BaseDecl); 762 763 if (BaseLayout.hasExtendableVFPtr()) 764 continue; 765 766 if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl))) 767 return false; 768 } 769 770 // Add a vb-table pointer if the layout insists. 771 if (Layout.hasOwnVBPtr()) { 772 CharUnits VBPtrOffset = Layout.getVBPtrOffset(); 773 llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext()); 774 AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr)); 775 AppendField(VBPtrOffset, Vbptr); 776 } 777 778 return true; 779 } 780 781 bool 782 CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) { 783 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD); 784 785 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 786 CharUnits NonVirtualAlign = Layout.getNonVirtualAlignment(); 787 CharUnits AlignedNonVirtualTypeSize = 788 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 789 790 // First check if we can use the same fields as for the complete class. 791 CharUnits RecordSize = Layout.getSize(); 792 if (AlignedNonVirtualTypeSize == RecordSize) 793 return true; 794 795 // Check if we need padding. 796 CharUnits AlignedNextFieldOffset = 797 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 798 799 if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) { 800 assert(!Packed && "cannot layout even as packed struct"); 801 return false; // Needs packing. 802 } 803 804 bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset); 805 if (needsPadding) { 806 CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset; 807 FieldTypes.push_back(getByteArrayType(NumBytes)); 808 } 809 810 BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(), 811 FieldTypes, "", Packed); 812 Types.addRecordTypeName(RD, BaseSubobjectType, ".base"); 813 814 // Pull the padding back off. 815 if (needsPadding) 816 FieldTypes.pop_back(); 817 818 return true; 819 } 820 821 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) { 822 assert(!D->isUnion() && "Can't call LayoutFields on a union!"); 823 assert(!Alignment.isZero() && "Did not set alignment!"); 824 825 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D); 826 827 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D); 828 if (RD) { 829 if (Types.getTarget().getCXXABI().isMicrosoft()) { 830 if (!MSLayoutNonVirtualBases(RD, Layout)) 831 return false; 832 } else if (!LayoutNonVirtualBases(RD, Layout)) 833 return false; 834 } 835 836 unsigned FieldNo = 0; 837 838 for (RecordDecl::field_iterator FI = D->field_begin(), FE = D->field_end(); 839 FI != FE; ++FI, ++FieldNo) { 840 FieldDecl *FD = *FI; 841 842 // If this field is a bitfield, layout all of the consecutive 843 // non-zero-length bitfields and the last zero-length bitfield; these will 844 // all share storage. 845 if (FD->isBitField()) { 846 // If all we have is a zero-width bitfield, skip it. 847 if (FD->getBitWidthValue(Types.getContext()) == 0) 848 continue; 849 850 // Layout this range of bitfields. 851 if (!LayoutBitfields(Layout, FieldNo, FI, FE)) { 852 assert(!Packed && 853 "Could not layout bitfields even with a packed LLVM struct!"); 854 return false; 855 } 856 assert(FI != FE && "Advanced past the last bitfield"); 857 continue; 858 } 859 860 if (!LayoutField(FD, Layout.getFieldOffset(FieldNo))) { 861 assert(!Packed && 862 "Could not layout fields even with a packed LLVM struct!"); 863 return false; 864 } 865 } 866 867 if (RD) { 868 // We've laid out the non-virtual bases and the fields, now compute the 869 // non-virtual base field types. 870 if (!ComputeNonVirtualBaseType(RD)) { 871 assert(!Packed && "Could not layout even with a packed LLVM struct!"); 872 return false; 873 } 874 875 // Lay out the virtual bases. The MS ABI uses a different 876 // algorithm here due to the lack of primary virtual bases. 877 if (Types.getTarget().getCXXABI().hasPrimaryVBases()) { 878 RD->getIndirectPrimaryBases(IndirectPrimaryBases); 879 if (Layout.isPrimaryBaseVirtual()) 880 IndirectPrimaryBases.insert(Layout.getPrimaryBase()); 881 882 if (!LayoutVirtualBases(RD, Layout)) 883 return false; 884 } else { 885 if (!MSLayoutVirtualBases(RD, Layout)) 886 return false; 887 } 888 } 889 890 // Append tail padding if necessary. 891 AppendTailPadding(Layout.getSize()); 892 893 return true; 894 } 895 896 void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) { 897 ResizeLastBaseFieldIfNecessary(RecordSize); 898 899 assert(NextFieldOffset <= RecordSize && "Size mismatch!"); 900 901 CharUnits AlignedNextFieldOffset = 902 NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct()); 903 904 if (AlignedNextFieldOffset == RecordSize) { 905 // We don't need any padding. 906 return; 907 } 908 909 CharUnits NumPadBytes = RecordSize - NextFieldOffset; 910 AppendBytes(NumPadBytes); 911 } 912 913 void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset, 914 llvm::Type *fieldType) { 915 CharUnits fieldSize = 916 CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(fieldType)); 917 918 FieldTypes.push_back(fieldType); 919 920 NextFieldOffset = fieldOffset + fieldSize; 921 } 922 923 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset, 924 CharUnits fieldAlignment) { 925 assert(NextFieldOffset <= fieldOffset && 926 "Incorrect field layout!"); 927 928 // Do nothing if we're already at the right offset. 929 if (fieldOffset == NextFieldOffset) return; 930 931 // If we're not emitting a packed LLVM type, try to avoid adding 932 // unnecessary padding fields. 933 if (!Packed) { 934 // Round up the field offset to the alignment of the field type. 935 CharUnits alignedNextFieldOffset = 936 NextFieldOffset.RoundUpToAlignment(fieldAlignment); 937 assert(alignedNextFieldOffset <= fieldOffset); 938 939 // If that's the right offset, we're done. 940 if (alignedNextFieldOffset == fieldOffset) return; 941 } 942 943 // Otherwise we need explicit padding. 944 CharUnits padding = fieldOffset - NextFieldOffset; 945 AppendBytes(padding); 946 } 947 948 bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) { 949 // Check if we have a base to resize. 950 if (!LastLaidOutBase.isValid()) 951 return false; 952 953 // This offset does not overlap with the tail padding. 954 if (offset >= NextFieldOffset) 955 return false; 956 957 // Restore the field offset and append an i8 array instead. 958 FieldTypes.pop_back(); 959 NextFieldOffset = LastLaidOutBase.Offset; 960 AppendBytes(LastLaidOutBase.NonVirtualSize); 961 LastLaidOutBase.invalidate(); 962 963 return true; 964 } 965 966 llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) { 967 assert(!numBytes.isZero() && "Empty byte arrays aren't allowed."); 968 969 llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext()); 970 if (numBytes > CharUnits::One()) 971 Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity()); 972 973 return Ty; 974 } 975 976 void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) { 977 if (numBytes.isZero()) 978 return; 979 980 // Append the padding field 981 AppendField(NextFieldOffset, getByteArrayType(numBytes)); 982 } 983 984 CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const { 985 if (Packed) 986 return CharUnits::One(); 987 988 return CharUnits::fromQuantity(Types.getDataLayout().getABITypeAlignment(Ty)); 989 } 990 991 CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const { 992 if (Packed) 993 return CharUnits::One(); 994 995 CharUnits maxAlignment = CharUnits::One(); 996 for (size_t i = 0; i != FieldTypes.size(); ++i) 997 maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i])); 998 999 return maxAlignment; 1000 } 1001 1002 /// Merge in whether a field of the given type is zero-initializable. 1003 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) { 1004 // This record already contains a member pointer. 1005 if (!IsZeroInitializableAsBase) 1006 return; 1007 1008 // Can only have member pointers if we're compiling C++. 1009 if (!Types.getContext().getLangOpts().CPlusPlus) 1010 return; 1011 1012 const Type *elementType = T->getBaseElementTypeUnsafe(); 1013 1014 if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) { 1015 if (!Types.getCXXABI().isZeroInitializable(MPT)) 1016 IsZeroInitializable = IsZeroInitializableAsBase = false; 1017 } else if (const RecordType *RT = elementType->getAs<RecordType>()) { 1018 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 1019 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD); 1020 if (!Layout.isZeroInitializable()) 1021 IsZeroInitializable = IsZeroInitializableAsBase = false; 1022 } 1023 } 1024 1025 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, 1026 llvm::StructType *Ty) { 1027 CGRecordLayoutBuilder Builder(*this); 1028 1029 Builder.Layout(D); 1030 1031 Ty->setBody(Builder.FieldTypes, Builder.Packed); 1032 1033 // If we're in C++, compute the base subobject type. 1034 llvm::StructType *BaseTy = 0; 1035 if (isa<CXXRecordDecl>(D) && !D->isUnion()) { 1036 BaseTy = Builder.BaseSubobjectType; 1037 if (!BaseTy) BaseTy = Ty; 1038 } 1039 1040 CGRecordLayout *RL = 1041 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable, 1042 Builder.IsZeroInitializableAsBase); 1043 1044 RL->NonVirtualBases.swap(Builder.NonVirtualBases); 1045 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases); 1046 1047 // Add all the field numbers. 1048 RL->FieldInfo.swap(Builder.Fields); 1049 1050 // Add bitfield info. 1051 RL->BitFields.swap(Builder.BitFields); 1052 1053 // Dump the layout, if requested. 1054 if (getContext().getLangOpts().DumpRecordLayouts) { 1055 llvm::outs() << "\n*** Dumping IRgen Record Layout\n"; 1056 llvm::outs() << "Record: "; 1057 D->dump(llvm::outs()); 1058 llvm::outs() << "\nLayout: "; 1059 RL->print(llvm::outs()); 1060 } 1061 1062 #ifndef NDEBUG 1063 // Verify that the computed LLVM struct size matches the AST layout size. 1064 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D); 1065 1066 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize()); 1067 assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) && 1068 "Type size mismatch!"); 1069 1070 if (BaseTy) { 1071 CharUnits NonVirtualSize = Layout.getNonVirtualSize(); 1072 CharUnits NonVirtualAlign = Layout.getNonVirtualAlignment(); 1073 CharUnits AlignedNonVirtualTypeSize = 1074 NonVirtualSize.RoundUpToAlignment(NonVirtualAlign); 1075 1076 uint64_t AlignedNonVirtualTypeSizeInBits = 1077 getContext().toBits(AlignedNonVirtualTypeSize); 1078 1079 assert(AlignedNonVirtualTypeSizeInBits == 1080 getDataLayout().getTypeAllocSizeInBits(BaseTy) && 1081 "Type size mismatch!"); 1082 } 1083 1084 // Verify that the LLVM and AST field offsets agree. 1085 llvm::StructType *ST = 1086 dyn_cast<llvm::StructType>(RL->getLLVMType()); 1087 const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST); 1088 1089 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D); 1090 RecordDecl::field_iterator it = D->field_begin(); 1091 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) { 1092 const FieldDecl *FD = *it; 1093 1094 // For non-bit-fields, just check that the LLVM struct offset matches the 1095 // AST offset. 1096 if (!FD->isBitField()) { 1097 unsigned FieldNo = RL->getLLVMFieldNo(FD); 1098 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) && 1099 "Invalid field offset!"); 1100 continue; 1101 } 1102 1103 // Ignore unnamed bit-fields. 1104 if (!FD->getDeclName()) 1105 continue; 1106 1107 // Don't inspect zero-length bitfields. 1108 if (FD->getBitWidthValue(getContext()) == 0) 1109 continue; 1110 1111 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); 1112 llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD)); 1113 1114 // Unions have overlapping elements dictating their layout, but for 1115 // non-unions we can verify that this section of the layout is the exact 1116 // expected size. 1117 if (D->isUnion()) { 1118 // For unions we verify that the start is zero and the size 1119 // is in-bounds. However, on BE systems, the offset may be non-zero, but 1120 // the size + offset should match the storage size in that case as it 1121 // "starts" at the back. 1122 if (getDataLayout().isBigEndian()) 1123 assert(static_cast<unsigned>(Info.Offset + Info.Size) == 1124 Info.StorageSize && 1125 "Big endian union bitfield does not end at the back"); 1126 else 1127 assert(Info.Offset == 0 && 1128 "Little endian union bitfield with a non-zero offset"); 1129 assert(Info.StorageSize <= SL->getSizeInBits() && 1130 "Union not large enough for bitfield storage"); 1131 } else { 1132 assert(Info.StorageSize == 1133 getDataLayout().getTypeAllocSizeInBits(ElementTy) && 1134 "Storage size does not match the element type size"); 1135 } 1136 assert(Info.Size > 0 && "Empty bitfield!"); 1137 assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize && 1138 "Bitfield outside of its allocated storage"); 1139 } 1140 #endif 1141 1142 return RL; 1143 } 1144 1145 void CGRecordLayout::print(raw_ostream &OS) const { 1146 OS << "<CGRecordLayout\n"; 1147 OS << " LLVMType:" << *CompleteObjectType << "\n"; 1148 if (BaseSubobjectType) 1149 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n"; 1150 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n"; 1151 OS << " BitFields:[\n"; 1152 1153 // Print bit-field infos in declaration order. 1154 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs; 1155 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator 1156 it = BitFields.begin(), ie = BitFields.end(); 1157 it != ie; ++it) { 1158 const RecordDecl *RD = it->first->getParent(); 1159 unsigned Index = 0; 1160 for (RecordDecl::field_iterator 1161 it2 = RD->field_begin(); *it2 != it->first; ++it2) 1162 ++Index; 1163 BFIs.push_back(std::make_pair(Index, &it->second)); 1164 } 1165 llvm::array_pod_sort(BFIs.begin(), BFIs.end()); 1166 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) { 1167 OS.indent(4); 1168 BFIs[i].second->print(OS); 1169 OS << "\n"; 1170 } 1171 1172 OS << "]>\n"; 1173 } 1174 1175 void CGRecordLayout::dump() const { 1176 print(llvm::errs()); 1177 } 1178 1179 void CGBitFieldInfo::print(raw_ostream &OS) const { 1180 OS << "<CGBitFieldInfo" 1181 << " Offset:" << Offset 1182 << " Size:" << Size 1183 << " IsSigned:" << IsSigned 1184 << " StorageSize:" << StorageSize 1185 << " StorageAlignment:" << StorageAlignment << ">"; 1186 } 1187 1188 void CGBitFieldInfo::dump() const { 1189 print(llvm::errs()); 1190 } 1191