1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Builder implementation for CGRecordLayout objects.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGRecordLayout.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/CXXInheritance.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "CodeGenTypes.h"
22 #include "CGCXXABI.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Type.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include "llvm/Target/TargetData.h"
28 using namespace clang;
29 using namespace CodeGen;
30 
31 namespace {
32 
33 class CGRecordLayoutBuilder {
34 public:
35   /// FieldTypes - Holds the LLVM types that the struct is created from.
36   ///
37   llvm::SmallVector<const llvm::Type *, 16> FieldTypes;
38 
39   /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
40   /// of the struct. For example, consider:
41   ///
42   /// struct A { int i; };
43   /// struct B { void *v; };
44   /// struct C : virtual A, B { };
45   ///
46   /// The LLVM type of C will be
47   /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
48   ///
49   /// And the LLVM type of the non-virtual base struct will be
50   /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
51   ///
52   /// This only gets initialized if the base subobject type is
53   /// different from the complete-object type.
54   const llvm::StructType *BaseSubobjectType;
55 
56   /// FieldInfo - Holds a field and its corresponding LLVM field number.
57   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
58 
59   /// BitFieldInfo - Holds location and size information about a bit field.
60   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
61 
62   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
63   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
64 
65   /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
66   /// primary base classes for some other direct or indirect base class.
67   CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
68 
69   /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
70   /// avoid laying out virtual bases more than once.
71   llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
72 
73   /// IsZeroInitializable - Whether this struct can be C++
74   /// zero-initialized with an LLVM zeroinitializer.
75   bool IsZeroInitializable;
76   bool IsZeroInitializableAsBase;
77 
78   /// Packed - Whether the resulting LLVM struct will be packed or not.
79   bool Packed;
80 
81   /// IsMsStruct - Whether ms_struct is in effect or not
82   bool IsMsStruct;
83 
84 private:
85   CodeGenTypes &Types;
86 
87   /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
88   /// last base laid out. Used so that we can replace the last laid out base
89   /// type with an i8 array if needed.
90   struct LastLaidOutBaseInfo {
91     CharUnits Offset;
92     CharUnits NonVirtualSize;
93 
94     bool isValid() const { return !NonVirtualSize.isZero(); }
95     void invalidate() { NonVirtualSize = CharUnits::Zero(); }
96 
97   } LastLaidOutBase;
98 
99   /// Alignment - Contains the alignment of the RecordDecl.
100   CharUnits Alignment;
101 
102   /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
103   /// this will have the number of bits still available in the field.
104   char BitsAvailableInLastField;
105 
106   /// NextFieldOffset - Holds the next field offset.
107   CharUnits NextFieldOffset;
108 
109   /// LayoutUnionField - Will layout a field in an union and return the type
110   /// that the field will have.
111   const llvm::Type *LayoutUnionField(const FieldDecl *Field,
112                                      const ASTRecordLayout &Layout);
113 
114   /// LayoutUnion - Will layout a union RecordDecl.
115   void LayoutUnion(const RecordDecl *D);
116 
117   /// LayoutField - try to layout all fields in the record decl.
118   /// Returns false if the operation failed because the struct is not packed.
119   bool LayoutFields(const RecordDecl *D);
120 
121   /// Layout a single base, virtual or non-virtual
122   void LayoutBase(const CXXRecordDecl *base,
123                   const CGRecordLayout &baseLayout,
124                   CharUnits baseOffset);
125 
126   /// LayoutVirtualBase - layout a single virtual base.
127   void LayoutVirtualBase(const CXXRecordDecl *base,
128                          CharUnits baseOffset);
129 
130   /// LayoutVirtualBases - layout the virtual bases of a record decl.
131   void LayoutVirtualBases(const CXXRecordDecl *RD,
132                           const ASTRecordLayout &Layout);
133 
134   /// LayoutNonVirtualBase - layout a single non-virtual base.
135   void LayoutNonVirtualBase(const CXXRecordDecl *base,
136                             CharUnits baseOffset);
137 
138   /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
139   void LayoutNonVirtualBases(const CXXRecordDecl *RD,
140                              const ASTRecordLayout &Layout);
141 
142   /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
143   bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
144 
145   /// LayoutField - layout a single field. Returns false if the operation failed
146   /// because the current struct is not packed.
147   bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
148 
149   /// LayoutBitField - layout a single bit field.
150   void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
151 
152   /// AppendField - Appends a field with the given offset and type.
153   void AppendField(CharUnits fieldOffset, const llvm::Type *FieldTy);
154 
155   /// AppendPadding - Appends enough padding bytes so that the total
156   /// struct size is a multiple of the field alignment.
157   void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
158 
159   /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
160   /// tail padding of a previous base. If this happens, the type of the previous
161   /// base needs to be changed to an array of i8. Returns true if the last
162   /// laid out base was resized.
163   bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
164 
165   /// getByteArrayType - Returns a byte array type with the given number of
166   /// elements.
167   const llvm::Type *getByteArrayType(CharUnits NumBytes);
168 
169   /// AppendBytes - Append a given number of bytes to the record.
170   void AppendBytes(CharUnits numBytes);
171 
172   /// AppendTailPadding - Append enough tail padding so that the type will have
173   /// the passed size.
174   void AppendTailPadding(CharUnits RecordSize);
175 
176   CharUnits getTypeAlignment(const llvm::Type *Ty) const;
177 
178   /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
179   /// LLVM element types.
180   CharUnits getAlignmentAsLLVMStruct() const;
181 
182   /// CheckZeroInitializable - Check if the given type contains a pointer
183   /// to data member.
184   void CheckZeroInitializable(QualType T);
185 
186 public:
187   CGRecordLayoutBuilder(CodeGenTypes &Types)
188     : BaseSubobjectType(0),
189       IsZeroInitializable(true), IsZeroInitializableAsBase(true),
190       Packed(false), IsMsStruct(false),
191       Types(Types), BitsAvailableInLastField(0) { }
192 
193   /// Layout - Will layout a RecordDecl.
194   void Layout(const RecordDecl *D);
195 };
196 
197 }
198 
199 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
200   Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
201   Packed = D->hasAttr<PackedAttr>();
202 
203   IsMsStruct = D->hasAttr<MsStructAttr>();
204 
205   if (D->isUnion()) {
206     LayoutUnion(D);
207     return;
208   }
209 
210   if (LayoutFields(D))
211     return;
212 
213   // We weren't able to layout the struct. Try again with a packed struct
214   Packed = true;
215   LastLaidOutBase.invalidate();
216   NextFieldOffset = CharUnits::Zero();
217   FieldTypes.clear();
218   Fields.clear();
219   BitFields.clear();
220   NonVirtualBases.clear();
221   VirtualBases.clear();
222 
223   LayoutFields(D);
224 }
225 
226 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
227                                const FieldDecl *FD,
228                                uint64_t FieldOffset,
229                                uint64_t FieldSize,
230                                uint64_t ContainingTypeSizeInBits,
231                                unsigned ContainingTypeAlign) {
232   const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
233   CharUnits TypeSizeInBytes =
234     CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
235   uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
236 
237   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
238 
239   if (FieldSize > TypeSizeInBits) {
240     // We have a wide bit-field. The extra bits are only used for padding, so
241     // if we have a bitfield of type T, with size N:
242     //
243     // T t : N;
244     //
245     // We can just assume that it's:
246     //
247     // T t : sizeof(T);
248     //
249     FieldSize = TypeSizeInBits;
250   }
251 
252   // in big-endian machines the first fields are in higher bit positions,
253   // so revert the offset. The byte offsets are reversed(back) later.
254   if (Types.getTargetData().isBigEndian()) {
255     FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
256   }
257 
258   // Compute the access components. The policy we use is to start by attempting
259   // to access using the width of the bit-field type itself and to always access
260   // at aligned indices of that type. If such an access would fail because it
261   // extends past the bound of the type, then we reduce size to the next smaller
262   // power of two and retry. The current algorithm assumes pow2 sized types,
263   // although this is easy to fix.
264   //
265   assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
266   CGBitFieldInfo::AccessInfo Components[3];
267   unsigned NumComponents = 0;
268   unsigned AccessedTargetBits = 0;       // The number of target bits accessed.
269   unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
270 
271   // Round down from the field offset to find the first access position that is
272   // at an aligned offset of the initial access type.
273   uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
274 
275   // Adjust initial access size to fit within record.
276   while (AccessWidth > Types.getTarget().getCharWidth() &&
277          AccessStart + AccessWidth > ContainingTypeSizeInBits) {
278     AccessWidth >>= 1;
279     AccessStart = FieldOffset - (FieldOffset % AccessWidth);
280   }
281 
282   while (AccessedTargetBits < FieldSize) {
283     // Check that we can access using a type of this size, without reading off
284     // the end of the structure. This can occur with packed structures and
285     // -fno-bitfield-type-align, for example.
286     if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
287       // If so, reduce access size to the next smaller power-of-two and retry.
288       AccessWidth >>= 1;
289       assert(AccessWidth >= Types.getTarget().getCharWidth()
290              && "Cannot access under byte size!");
291       continue;
292     }
293 
294     // Otherwise, add an access component.
295 
296     // First, compute the bits inside this access which are part of the
297     // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
298     // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
299     // in the target that we are reading.
300     assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
301     assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
302     uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
303     uint64_t AccessBitsInFieldSize =
304       std::min(AccessWidth + AccessStart,
305                FieldOffset + FieldSize) - AccessBitsInFieldStart;
306 
307     assert(NumComponents < 3 && "Unexpected number of components!");
308     CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
309     AI.FieldIndex = 0;
310     // FIXME: We still follow the old access pattern of only using the field
311     // byte offset. We should switch this once we fix the struct layout to be
312     // pretty.
313 
314     // on big-endian machines we reverted the bit offset because first fields are
315     // in higher bits. But this also reverts the bytes, so fix this here by reverting
316     // the byte offset on big-endian machines.
317     if (Types.getTargetData().isBigEndian()) {
318       AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
319           ContainingTypeSizeInBits - AccessStart - AccessWidth);
320     } else {
321       AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
322     }
323     AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
324     AI.AccessWidth = AccessWidth;
325     AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
326         llvm::MinAlign(ContainingTypeAlign, AccessStart));
327     AI.TargetBitOffset = AccessedTargetBits;
328     AI.TargetBitWidth = AccessBitsInFieldSize;
329 
330     AccessStart += AccessWidth;
331     AccessedTargetBits += AI.TargetBitWidth;
332   }
333 
334   assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
335   return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
336 }
337 
338 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
339                                         const FieldDecl *FD,
340                                         uint64_t FieldOffset,
341                                         uint64_t FieldSize) {
342   const RecordDecl *RD = FD->getParent();
343   const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
344   uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
345   unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
346 
347   return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
348                   ContainingTypeAlign);
349 }
350 
351 void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
352                                            uint64_t fieldOffset) {
353   uint64_t fieldSize =
354     D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
355 
356   if (fieldSize == 0)
357     return;
358 
359   uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
360   CharUnits numBytesToAppend;
361   unsigned charAlign = Types.getContext().Target.getCharAlign();
362 
363   if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
364     assert(fieldOffset % charAlign == 0 &&
365            "Field offset not aligned correctly");
366 
367     CharUnits fieldOffsetInCharUnits =
368       Types.getContext().toCharUnitsFromBits(fieldOffset);
369 
370     // Try to resize the last base field.
371     if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
372       nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
373   }
374 
375   if (fieldOffset < nextFieldOffsetInBits) {
376     assert(BitsAvailableInLastField && "Bitfield size mismatch!");
377     assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
378 
379     // The bitfield begins in the previous bit-field.
380     numBytesToAppend = Types.getContext().toCharUnitsFromBits(
381       llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField,
382                                charAlign));
383   } else {
384     assert(fieldOffset % charAlign == 0 &&
385            "Field offset not aligned correctly");
386 
387     // Append padding if necessary.
388     AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset),
389                   CharUnits::One());
390 
391     numBytesToAppend = Types.getContext().toCharUnitsFromBits(
392         llvm::RoundUpToAlignment(fieldSize, charAlign));
393 
394     assert(!numBytesToAppend.isZero() && "No bytes to append!");
395   }
396 
397   // Add the bit field info.
398   BitFields.insert(std::make_pair(D,
399                    CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
400 
401   AppendBytes(numBytesToAppend);
402 
403   BitsAvailableInLastField =
404     Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
405 }
406 
407 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
408                                         uint64_t fieldOffset) {
409   // If the field is packed, then we need a packed struct.
410   if (!Packed && D->hasAttr<PackedAttr>())
411     return false;
412 
413   if (D->isBitField()) {
414     // We must use packed structs for unnamed bit fields since they
415     // don't affect the struct alignment.
416     if (!Packed && !D->getDeclName())
417       return false;
418 
419     LayoutBitField(D, fieldOffset);
420     return true;
421   }
422 
423   CheckZeroInitializable(D->getType());
424 
425   assert(fieldOffset % Types.getTarget().getCharWidth() == 0
426          && "field offset is not on a byte boundary!");
427   CharUnits fieldOffsetInBytes
428     = Types.getContext().toCharUnitsFromBits(fieldOffset);
429 
430   const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
431   CharUnits typeAlignment = getTypeAlignment(Ty);
432 
433   // If the type alignment is larger then the struct alignment, we must use
434   // a packed struct.
435   if (typeAlignment > Alignment) {
436     assert(!Packed && "Alignment is wrong even with packed struct!");
437     return false;
438   }
439 
440   if (!Packed) {
441     if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
442       const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
443       if (const MaxFieldAlignmentAttr *MFAA =
444             RD->getAttr<MaxFieldAlignmentAttr>()) {
445         if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
446           return false;
447       }
448     }
449   }
450 
451   // Round up the field offset to the alignment of the field type.
452   CharUnits alignedNextFieldOffsetInBytes =
453     NextFieldOffset.RoundUpToAlignment(typeAlignment);
454 
455   if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
456     // Try to resize the last base field.
457     if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
458       alignedNextFieldOffsetInBytes =
459         NextFieldOffset.RoundUpToAlignment(typeAlignment);
460     }
461   }
462 
463   if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
464     assert(!Packed && "Could not place field even with packed struct!");
465     return false;
466   }
467 
468   AppendPadding(fieldOffsetInBytes, typeAlignment);
469 
470   // Now append the field.
471   Fields[D] = FieldTypes.size();
472   AppendField(fieldOffsetInBytes, Ty);
473 
474   LastLaidOutBase.invalidate();
475   return true;
476 }
477 
478 const llvm::Type *
479 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
480                                         const ASTRecordLayout &Layout) {
481   if (Field->isBitField()) {
482     uint64_t FieldSize =
483       Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
484 
485     // Ignore zero sized bit fields.
486     if (FieldSize == 0)
487       return 0;
488 
489     const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
490     CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
491       llvm::RoundUpToAlignment(FieldSize,
492                                Types.getContext().Target.getCharAlign()));
493 
494     if (NumBytesToAppend > CharUnits::One())
495       FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
496 
497     // Add the bit field info.
498     BitFields.insert(std::make_pair(Field,
499                          CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
500     return FieldTy;
501   }
502 
503   // This is a regular union field.
504   Fields[Field] = 0;
505   return Types.ConvertTypeForMemRecursive(Field->getType());
506 }
507 
508 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
509   assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
510 
511   const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
512 
513   const llvm::Type *unionType = 0;
514   CharUnits unionSize = CharUnits::Zero();
515   CharUnits unionAlign = CharUnits::Zero();
516 
517   bool hasOnlyZeroSizedBitFields = true;
518 
519   unsigned fieldNo = 0;
520   for (RecordDecl::field_iterator field = D->field_begin(),
521        fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
522     assert(layout.getFieldOffset(fieldNo) == 0 &&
523           "Union field offset did not start at the beginning of record!");
524     const llvm::Type *fieldType = LayoutUnionField(*field, layout);
525 
526     if (!fieldType)
527       continue;
528 
529     hasOnlyZeroSizedBitFields = false;
530 
531     CharUnits fieldAlign = CharUnits::fromQuantity(
532                           Types.getTargetData().getABITypeAlignment(fieldType));
533     CharUnits fieldSize = CharUnits::fromQuantity(
534                              Types.getTargetData().getTypeAllocSize(fieldType));
535 
536     if (fieldAlign < unionAlign)
537       continue;
538 
539     if (fieldAlign > unionAlign || fieldSize > unionSize) {
540       unionType = fieldType;
541       unionAlign = fieldAlign;
542       unionSize = fieldSize;
543     }
544   }
545 
546   // Now add our field.
547   if (unionType) {
548     AppendField(CharUnits::Zero(), unionType);
549 
550     if (getTypeAlignment(unionType) > layout.getAlignment()) {
551       // We need a packed struct.
552       Packed = true;
553       unionAlign = CharUnits::One();
554     }
555   }
556   if (unionAlign.isZero()) {
557     assert(hasOnlyZeroSizedBitFields &&
558            "0-align record did not have all zero-sized bit-fields!");
559     unionAlign = CharUnits::One();
560   }
561 
562   // Append tail padding.
563   CharUnits recordSize = layout.getSize();
564   if (recordSize > unionSize)
565     AppendPadding(recordSize, unionAlign);
566 }
567 
568 void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
569                                        const CGRecordLayout &baseLayout,
570                                        CharUnits baseOffset) {
571   ResizeLastBaseFieldIfNecessary(baseOffset);
572 
573   AppendPadding(baseOffset, CharUnits::One());
574 
575   const ASTRecordLayout &baseASTLayout
576     = Types.getContext().getASTRecordLayout(base);
577 
578   LastLaidOutBase.Offset = NextFieldOffset;
579   LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
580 
581   // Fields and bases can be laid out in the tail padding of previous
582   // bases.  If this happens, we need to allocate the base as an i8
583   // array; otherwise, we can use the subobject type.  However,
584   // actually doing that would require knowledge of what immediately
585   // follows this base in the layout, so instead we do a conservative
586   // approximation, which is to use the base subobject type if it
587   // has the same LLVM storage size as the nvsize.
588 
589   const llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
590   AppendField(baseOffset, subobjectType);
591 
592   Types.addBaseSubobjectTypeName(base, baseLayout);
593 }
594 
595 void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
596                                                  CharUnits baseOffset) {
597   // Ignore empty bases.
598   if (base->isEmpty()) return;
599 
600   const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
601   if (IsZeroInitializableAsBase) {
602     assert(IsZeroInitializable &&
603            "class zero-initializable as base but not as complete object");
604 
605     IsZeroInitializable = IsZeroInitializableAsBase =
606       baseLayout.isZeroInitializableAsBase();
607   }
608 
609   LayoutBase(base, baseLayout, baseOffset);
610   NonVirtualBases[base] = (FieldTypes.size() - 1);
611 }
612 
613 void
614 CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
615                                          CharUnits baseOffset) {
616   // Ignore empty bases.
617   if (base->isEmpty()) return;
618 
619   const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
620   if (IsZeroInitializable)
621     IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
622 
623   LayoutBase(base, baseLayout, baseOffset);
624   VirtualBases[base] = (FieldTypes.size() - 1);
625 }
626 
627 /// LayoutVirtualBases - layout the non-virtual bases of a record decl.
628 void
629 CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
630                                           const ASTRecordLayout &Layout) {
631   for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
632        E = RD->bases_end(); I != E; ++I) {
633     const CXXRecordDecl *BaseDecl =
634       cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
635 
636     // We only want to lay out virtual bases that aren't indirect primary bases
637     // of some other base.
638     if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
639       // Only lay out the base once.
640       if (!LaidOutVirtualBases.insert(BaseDecl))
641         continue;
642 
643       CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
644       LayoutVirtualBase(BaseDecl, vbaseOffset);
645     }
646 
647     if (!BaseDecl->getNumVBases()) {
648       // This base isn't interesting since it doesn't have any virtual bases.
649       continue;
650     }
651 
652     LayoutVirtualBases(BaseDecl, Layout);
653   }
654 }
655 
656 void
657 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
658                                              const ASTRecordLayout &Layout) {
659   const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
660 
661   // Check if we need to add a vtable pointer.
662   if (RD->isDynamicClass()) {
663     if (!PrimaryBase) {
664       const llvm::Type *FunctionType =
665         llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
666                                 /*isVarArg=*/true);
667       const llvm::Type *VTableTy = FunctionType->getPointerTo();
668 
669       assert(NextFieldOffset.isZero() &&
670              "VTable pointer must come first!");
671       AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
672     } else {
673       if (!Layout.isPrimaryBaseVirtual())
674         LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero());
675       else
676         LayoutVirtualBase(PrimaryBase, CharUnits::Zero());
677     }
678   }
679 
680   // Layout the non-virtual bases.
681   for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
682        E = RD->bases_end(); I != E; ++I) {
683     if (I->isVirtual())
684       continue;
685 
686     const CXXRecordDecl *BaseDecl =
687       cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
688 
689     // We've already laid out the primary base.
690     if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
691       continue;
692 
693     LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
694   }
695 }
696 
697 bool
698 CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
699   const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
700 
701   CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
702   CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
703   CharUnits AlignedNonVirtualTypeSize =
704     NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
705 
706   // First check if we can use the same fields as for the complete class.
707   CharUnits RecordSize = Layout.getSize();
708   if (AlignedNonVirtualTypeSize == RecordSize)
709     return true;
710 
711   // Check if we need padding.
712   CharUnits AlignedNextFieldOffset =
713     NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
714 
715   if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
716     assert(!Packed && "cannot layout even as packed struct");
717     return false; // Needs packing.
718   }
719 
720   bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
721   if (needsPadding) {
722     CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
723     FieldTypes.push_back(getByteArrayType(NumBytes));
724   }
725 
726   BaseSubobjectType = llvm::StructType::get(Types.getLLVMContext(),
727                                             FieldTypes, Packed);
728 
729   if (needsPadding) {
730     // Pull the padding back off.
731     FieldTypes.pop_back();
732   }
733 
734   return true;
735 }
736 
737 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
738   assert(!D->isUnion() && "Can't call LayoutFields on a union!");
739   assert(!Alignment.isZero() && "Did not set alignment!");
740 
741   const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
742 
743   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
744   if (RD)
745     LayoutNonVirtualBases(RD, Layout);
746 
747   unsigned FieldNo = 0;
748   const FieldDecl *LastFD = 0;
749 
750   for (RecordDecl::field_iterator Field = D->field_begin(),
751        FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
752     if (IsMsStruct) {
753       // Zero-length bitfields following non-bitfield members are
754       // ignored:
755       const FieldDecl *FD =  (*Field);
756       if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
757         --FieldNo;
758         continue;
759       }
760       LastFD = FD;
761     }
762 
763     if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
764       assert(!Packed &&
765              "Could not layout fields even with a packed LLVM struct!");
766       return false;
767     }
768   }
769 
770   if (RD) {
771     // We've laid out the non-virtual bases and the fields, now compute the
772     // non-virtual base field types.
773     if (!ComputeNonVirtualBaseType(RD)) {
774       assert(!Packed && "Could not layout even with a packed LLVM struct!");
775       return false;
776     }
777 
778     // And lay out the virtual bases.
779     RD->getIndirectPrimaryBases(IndirectPrimaryBases);
780     if (Layout.isPrimaryBaseVirtual())
781       IndirectPrimaryBases.insert(Layout.getPrimaryBase());
782     LayoutVirtualBases(RD, Layout);
783   }
784 
785   // Append tail padding if necessary.
786   AppendTailPadding(Layout.getSize());
787 
788   return true;
789 }
790 
791 void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
792   ResizeLastBaseFieldIfNecessary(RecordSize);
793 
794   assert(NextFieldOffset <= RecordSize && "Size mismatch!");
795 
796   CharUnits AlignedNextFieldOffset =
797     NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
798 
799   if (AlignedNextFieldOffset == RecordSize) {
800     // We don't need any padding.
801     return;
802   }
803 
804   CharUnits NumPadBytes = RecordSize - NextFieldOffset;
805   AppendBytes(NumPadBytes);
806 }
807 
808 void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
809                                         const llvm::Type *fieldType) {
810   CharUnits fieldSize =
811     CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
812 
813   FieldTypes.push_back(fieldType);
814 
815   NextFieldOffset = fieldOffset + fieldSize;
816   BitsAvailableInLastField = 0;
817 }
818 
819 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
820                                           CharUnits fieldAlignment) {
821   assert(NextFieldOffset <= fieldOffset &&
822          "Incorrect field layout!");
823 
824   // Round up the field offset to the alignment of the field type.
825   CharUnits alignedNextFieldOffset =
826     NextFieldOffset.RoundUpToAlignment(fieldAlignment);
827 
828   if (alignedNextFieldOffset < fieldOffset) {
829     // Even with alignment, the field offset is not at the right place,
830     // insert padding.
831     CharUnits padding = fieldOffset - NextFieldOffset;
832 
833     AppendBytes(padding);
834   }
835 }
836 
837 bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
838   // Check if we have a base to resize.
839   if (!LastLaidOutBase.isValid())
840     return false;
841 
842   // This offset does not overlap with the tail padding.
843   if (offset >= NextFieldOffset)
844     return false;
845 
846   // Restore the field offset and append an i8 array instead.
847   FieldTypes.pop_back();
848   NextFieldOffset = LastLaidOutBase.Offset;
849   AppendBytes(LastLaidOutBase.NonVirtualSize);
850   LastLaidOutBase.invalidate();
851 
852   return true;
853 }
854 
855 const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
856   assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
857 
858   const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
859   if (numBytes > CharUnits::One())
860     Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
861 
862   return Ty;
863 }
864 
865 void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
866   if (numBytes.isZero())
867     return;
868 
869   // Append the padding field
870   AppendField(NextFieldOffset, getByteArrayType(numBytes));
871 }
872 
873 CharUnits CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
874   if (Packed)
875     return CharUnits::One();
876 
877   return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
878 }
879 
880 CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
881   if (Packed)
882     return CharUnits::One();
883 
884   CharUnits maxAlignment = CharUnits::One();
885   for (size_t i = 0; i != FieldTypes.size(); ++i)
886     maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
887 
888   return maxAlignment;
889 }
890 
891 /// Merge in whether a field of the given type is zero-initializable.
892 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
893   // This record already contains a member pointer.
894   if (!IsZeroInitializableAsBase)
895     return;
896 
897   // Can only have member pointers if we're compiling C++.
898   if (!Types.getContext().getLangOptions().CPlusPlus)
899     return;
900 
901   const Type *elementType = T->getBaseElementTypeUnsafe();
902 
903   if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
904     if (!Types.getCXXABI().isZeroInitializable(MPT))
905       IsZeroInitializable = IsZeroInitializableAsBase = false;
906   } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
907     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
908     const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
909     if (!Layout.isZeroInitializable())
910       IsZeroInitializable = IsZeroInitializableAsBase = false;
911   }
912 }
913 
914 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
915   CGRecordLayoutBuilder Builder(*this);
916 
917   Builder.Layout(D);
918 
919   const llvm::StructType *Ty = llvm::StructType::get(getLLVMContext(),
920                                                      Builder.FieldTypes,
921                                                      Builder.Packed);
922 
923   // If we're in C++, compute the base subobject type.
924   const llvm::StructType *BaseTy = 0;
925   if (isa<CXXRecordDecl>(D)) {
926     BaseTy = Builder.BaseSubobjectType;
927     if (!BaseTy) BaseTy = Ty;
928   }
929 
930   CGRecordLayout *RL =
931     new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
932                        Builder.IsZeroInitializableAsBase);
933 
934   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
935   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
936 
937   // Add all the field numbers.
938   RL->FieldInfo.swap(Builder.Fields);
939 
940   // Add bitfield info.
941   RL->BitFields.swap(Builder.BitFields);
942 
943   // Dump the layout, if requested.
944   if (getContext().getLangOptions().DumpRecordLayouts) {
945     llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
946     llvm::errs() << "Record: ";
947     D->dump();
948     llvm::errs() << "\nLayout: ";
949     RL->dump();
950   }
951 
952 #ifndef NDEBUG
953   // Verify that the computed LLVM struct size matches the AST layout size.
954   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
955 
956   uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
957   assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
958          "Type size mismatch!");
959 
960   if (BaseTy) {
961     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
962     CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
963     CharUnits AlignedNonVirtualTypeSize =
964       NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
965 
966     uint64_t AlignedNonVirtualTypeSizeInBits =
967       getContext().toBits(AlignedNonVirtualTypeSize);
968 
969     assert(AlignedNonVirtualTypeSizeInBits ==
970            getTargetData().getTypeAllocSizeInBits(BaseTy) &&
971            "Type size mismatch!");
972   }
973 
974   // Verify that the LLVM and AST field offsets agree.
975   const llvm::StructType *ST =
976     dyn_cast<llvm::StructType>(RL->getLLVMType());
977   const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
978 
979   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
980   RecordDecl::field_iterator it = D->field_begin();
981   const FieldDecl *LastFD = 0;
982   bool IsMsStruct = D->hasAttr<MsStructAttr>();
983   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
984     const FieldDecl *FD = *it;
985 
986     // For non-bit-fields, just check that the LLVM struct offset matches the
987     // AST offset.
988     if (!FD->isBitField()) {
989       unsigned FieldNo = RL->getLLVMFieldNo(FD);
990       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
991              "Invalid field offset!");
992       LastFD = FD;
993       continue;
994     }
995 
996     if (IsMsStruct) {
997       // Zero-length bitfields following non-bitfield members are
998       // ignored:
999       if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
1000         --i;
1001         continue;
1002       }
1003       LastFD = FD;
1004     }
1005 
1006     // Ignore unnamed bit-fields.
1007     if (!FD->getDeclName()) {
1008       LastFD = FD;
1009       continue;
1010     }
1011 
1012     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
1013     for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1014       const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1015 
1016       // Verify that every component access is within the structure.
1017       uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
1018       uint64_t AccessBitOffset = FieldOffset +
1019         getContext().toBits(AI.FieldByteOffset);
1020       assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
1021              "Invalid bit-field access (out of range)!");
1022     }
1023   }
1024 #endif
1025 
1026   return RL;
1027 }
1028 
1029 void CGRecordLayout::print(llvm::raw_ostream &OS) const {
1030   OS << "<CGRecordLayout\n";
1031   OS << "  LLVMType:" << *CompleteObjectType << "\n";
1032   if (BaseSubobjectType)
1033     OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
1034   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
1035   OS << "  BitFields:[\n";
1036 
1037   // Print bit-field infos in declaration order.
1038   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1039   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1040          it = BitFields.begin(), ie = BitFields.end();
1041        it != ie; ++it) {
1042     const RecordDecl *RD = it->first->getParent();
1043     unsigned Index = 0;
1044     for (RecordDecl::field_iterator
1045            it2 = RD->field_begin(); *it2 != it->first; ++it2)
1046       ++Index;
1047     BFIs.push_back(std::make_pair(Index, &it->second));
1048   }
1049   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1050   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1051     OS.indent(4);
1052     BFIs[i].second->print(OS);
1053     OS << "\n";
1054   }
1055 
1056   OS << "]>\n";
1057 }
1058 
1059 void CGRecordLayout::dump() const {
1060   print(llvm::errs());
1061 }
1062 
1063 void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
1064   OS << "<CGBitFieldInfo";
1065   OS << " Size:" << Size;
1066   OS << " IsSigned:" << IsSigned << "\n";
1067 
1068   OS.indent(4 + strlen("<CGBitFieldInfo"));
1069   OS << " NumComponents:" << getNumComponents();
1070   OS << " Components: [";
1071   if (getNumComponents()) {
1072     OS << "\n";
1073     for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
1074       const AccessInfo &AI = getComponent(i);
1075       OS.indent(8);
1076       OS << "<AccessInfo"
1077          << " FieldIndex:" << AI.FieldIndex
1078          << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
1079          << " FieldBitStart:" << AI.FieldBitStart
1080          << " AccessWidth:" << AI.AccessWidth << "\n";
1081       OS.indent(8 + strlen("<AccessInfo"));
1082       OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
1083          << " TargetBitOffset:" << AI.TargetBitOffset
1084          << " TargetBitWidth:" << AI.TargetBitWidth
1085          << ">\n";
1086     }
1087     OS.indent(4);
1088   }
1089   OS << "]>";
1090 }
1091 
1092 void CGBitFieldInfo::dump() const {
1093   print(llvm::errs());
1094 }
1095