1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Builder implementation for CGRecordLayout objects.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGRecordLayout.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/CXXInheritance.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/Frontend/CodeGenOptions.h"
22 #include "CodeGenTypes.h"
23 #include "CGCXXABI.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/Type.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Target/TargetData.h"
29 using namespace clang;
30 using namespace CodeGen;
31 
32 namespace {
33 
34 class CGRecordLayoutBuilder {
35 public:
36   /// FieldTypes - Holds the LLVM types that the struct is created from.
37   ///
38   SmallVector<llvm::Type *, 16> FieldTypes;
39 
40   /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
41   /// of the struct. For example, consider:
42   ///
43   /// struct A { int i; };
44   /// struct B { void *v; };
45   /// struct C : virtual A, B { };
46   ///
47   /// The LLVM type of C will be
48   /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
49   ///
50   /// And the LLVM type of the non-virtual base struct will be
51   /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
52   ///
53   /// This only gets initialized if the base subobject type is
54   /// different from the complete-object type.
55   llvm::StructType *BaseSubobjectType;
56 
57   /// FieldInfo - Holds a field and its corresponding LLVM field number.
58   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
59 
60   /// BitFieldInfo - Holds location and size information about a bit field.
61   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
62 
63   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
64   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
65 
66   /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
67   /// primary base classes for some other direct or indirect base class.
68   CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
69 
70   /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
71   /// avoid laying out virtual bases more than once.
72   llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
73 
74   /// IsZeroInitializable - Whether this struct can be C++
75   /// zero-initialized with an LLVM zeroinitializer.
76   bool IsZeroInitializable;
77   bool IsZeroInitializableAsBase;
78 
79   /// Packed - Whether the resulting LLVM struct will be packed or not.
80   bool Packed;
81 
82   /// IsMsStruct - Whether ms_struct is in effect or not
83   bool IsMsStruct;
84 
85 private:
86   CodeGenTypes &Types;
87 
88   /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
89   /// last base laid out. Used so that we can replace the last laid out base
90   /// type with an i8 array if needed.
91   struct LastLaidOutBaseInfo {
92     CharUnits Offset;
93     CharUnits NonVirtualSize;
94 
95     bool isValid() const { return !NonVirtualSize.isZero(); }
96     void invalidate() { NonVirtualSize = CharUnits::Zero(); }
97 
98   } LastLaidOutBase;
99 
100   /// Alignment - Contains the alignment of the RecordDecl.
101   CharUnits Alignment;
102 
103   /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
104   /// this will have the number of bits still available in the field.
105   char BitsAvailableInLastField;
106 
107   /// NextFieldOffset - Holds the next field offset.
108   CharUnits NextFieldOffset;
109 
110   /// LayoutUnionField - Will layout a field in an union and return the type
111   /// that the field will have.
112   llvm::Type *LayoutUnionField(const FieldDecl *Field,
113                                const ASTRecordLayout &Layout);
114 
115   /// LayoutUnion - Will layout a union RecordDecl.
116   void LayoutUnion(const RecordDecl *D);
117 
118   /// LayoutField - try to layout all fields in the record decl.
119   /// Returns false if the operation failed because the struct is not packed.
120   bool LayoutFields(const RecordDecl *D);
121 
122   /// Layout a single base, virtual or non-virtual
123   bool LayoutBase(const CXXRecordDecl *base,
124                   const CGRecordLayout &baseLayout,
125                   CharUnits baseOffset);
126 
127   /// LayoutVirtualBase - layout a single virtual base.
128   bool LayoutVirtualBase(const CXXRecordDecl *base,
129                          CharUnits baseOffset);
130 
131   /// LayoutVirtualBases - layout the virtual bases of a record decl.
132   bool LayoutVirtualBases(const CXXRecordDecl *RD,
133                           const ASTRecordLayout &Layout);
134 
135   /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
136   /// like MSVC.
137   bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
138                             const ASTRecordLayout &Layout);
139 
140   /// LayoutNonVirtualBase - layout a single non-virtual base.
141   bool LayoutNonVirtualBase(const CXXRecordDecl *base,
142                             CharUnits baseOffset);
143 
144   /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
145   bool LayoutNonVirtualBases(const CXXRecordDecl *RD,
146                              const ASTRecordLayout &Layout);
147 
148   /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
149   bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
150 
151   /// LayoutField - layout a single field. Returns false if the operation failed
152   /// because the current struct is not packed.
153   bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
154 
155   /// LayoutBitField - layout a single bit field.
156   void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
157 
158   /// AppendField - Appends a field with the given offset and type.
159   void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
160 
161   /// AppendPadding - Appends enough padding bytes so that the total
162   /// struct size is a multiple of the field alignment.
163   void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
164 
165   /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
166   /// tail padding of a previous base. If this happens, the type of the previous
167   /// base needs to be changed to an array of i8. Returns true if the last
168   /// laid out base was resized.
169   bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
170 
171   /// getByteArrayType - Returns a byte array type with the given number of
172   /// elements.
173   llvm::Type *getByteArrayType(CharUnits NumBytes);
174 
175   /// AppendBytes - Append a given number of bytes to the record.
176   void AppendBytes(CharUnits numBytes);
177 
178   /// AppendTailPadding - Append enough tail padding so that the type will have
179   /// the passed size.
180   void AppendTailPadding(CharUnits RecordSize);
181 
182   CharUnits getTypeAlignment(llvm::Type *Ty) const;
183 
184   /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
185   /// LLVM element types.
186   CharUnits getAlignmentAsLLVMStruct() const;
187 
188   /// CheckZeroInitializable - Check if the given type contains a pointer
189   /// to data member.
190   void CheckZeroInitializable(QualType T);
191 
192 public:
193   CGRecordLayoutBuilder(CodeGenTypes &Types)
194     : BaseSubobjectType(0),
195       IsZeroInitializable(true), IsZeroInitializableAsBase(true),
196       Packed(false), IsMsStruct(false),
197       Types(Types), BitsAvailableInLastField(0) { }
198 
199   /// Layout - Will layout a RecordDecl.
200   void Layout(const RecordDecl *D);
201 };
202 
203 }
204 
205 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
206   Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
207   Packed = D->hasAttr<PackedAttr>();
208 
209   IsMsStruct = D->hasAttr<MsStructAttr>();
210 
211   if (D->isUnion()) {
212     LayoutUnion(D);
213     return;
214   }
215 
216   if (LayoutFields(D))
217     return;
218 
219   // We weren't able to layout the struct. Try again with a packed struct
220   Packed = true;
221   LastLaidOutBase.invalidate();
222   NextFieldOffset = CharUnits::Zero();
223   FieldTypes.clear();
224   Fields.clear();
225   BitFields.clear();
226   NonVirtualBases.clear();
227   VirtualBases.clear();
228 
229   LayoutFields(D);
230 }
231 
232 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
233                                const FieldDecl *FD,
234                                uint64_t FieldOffset,
235                                uint64_t FieldSize,
236                                uint64_t ContainingTypeSizeInBits,
237                                unsigned ContainingTypeAlign) {
238   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
239   CharUnits TypeSizeInBytes =
240     CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
241   uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
242 
243   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
244 
245   if (FieldSize > TypeSizeInBits) {
246     // We have a wide bit-field. The extra bits are only used for padding, so
247     // if we have a bitfield of type T, with size N:
248     //
249     // T t : N;
250     //
251     // We can just assume that it's:
252     //
253     // T t : sizeof(T);
254     //
255     FieldSize = TypeSizeInBits;
256   }
257 
258   // in big-endian machines the first fields are in higher bit positions,
259   // so revert the offset. The byte offsets are reversed(back) later.
260   if (Types.getTargetData().isBigEndian()) {
261     FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
262   }
263 
264   // Compute the access components. The policy we use is to start by attempting
265   // to access using the width of the bit-field type itself and to always access
266   // at aligned indices of that type. If such an access would fail because it
267   // extends past the bound of the type, then we reduce size to the next smaller
268   // power of two and retry. The current algorithm assumes pow2 sized types,
269   // although this is easy to fix.
270   //
271   assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
272   CGBitFieldInfo::AccessInfo Components[3];
273   unsigned NumComponents = 0;
274   unsigned AccessedTargetBits = 0;       // The number of target bits accessed.
275   unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
276 
277   // If requested, widen the initial bit-field access to be register sized. The
278   // theory is that this is most likely to allow multiple accesses into the same
279   // structure to be coalesced, and that the backend should be smart enough to
280   // narrow the store if no coalescing is ever done.
281   //
282   // The subsequent code will handle align these access to common boundaries and
283   // guaranteeing that we do not access past the end of the structure.
284   if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
285     if (AccessWidth < Types.getTarget().getRegisterWidth())
286       AccessWidth = Types.getTarget().getRegisterWidth();
287   }
288 
289   // Round down from the field offset to find the first access position that is
290   // at an aligned offset of the initial access type.
291   uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
292 
293   // Adjust initial access size to fit within record.
294   while (AccessWidth > Types.getTarget().getCharWidth() &&
295          AccessStart + AccessWidth > ContainingTypeSizeInBits) {
296     AccessWidth >>= 1;
297     AccessStart = FieldOffset - (FieldOffset % AccessWidth);
298   }
299 
300   while (AccessedTargetBits < FieldSize) {
301     // Check that we can access using a type of this size, without reading off
302     // the end of the structure. This can occur with packed structures and
303     // -fno-bitfield-type-align, for example.
304     if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
305       // If so, reduce access size to the next smaller power-of-two and retry.
306       AccessWidth >>= 1;
307       assert(AccessWidth >= Types.getTarget().getCharWidth()
308              && "Cannot access under byte size!");
309       continue;
310     }
311 
312     // Otherwise, add an access component.
313 
314     // First, compute the bits inside this access which are part of the
315     // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
316     // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
317     // in the target that we are reading.
318     assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
319     assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
320     uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
321     uint64_t AccessBitsInFieldSize =
322       std::min(AccessWidth + AccessStart,
323                FieldOffset + FieldSize) - AccessBitsInFieldStart;
324 
325     assert(NumComponents < 3 && "Unexpected number of components!");
326     CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
327     AI.FieldIndex = 0;
328     // FIXME: We still follow the old access pattern of only using the field
329     // byte offset. We should switch this once we fix the struct layout to be
330     // pretty.
331 
332     // on big-endian machines we reverted the bit offset because first fields are
333     // in higher bits. But this also reverts the bytes, so fix this here by reverting
334     // the byte offset on big-endian machines.
335     if (Types.getTargetData().isBigEndian()) {
336       AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
337           ContainingTypeSizeInBits - AccessStart - AccessWidth);
338     } else {
339       AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
340     }
341     AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
342     AI.AccessWidth = AccessWidth;
343     AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
344         llvm::MinAlign(ContainingTypeAlign, AccessStart));
345     AI.TargetBitOffset = AccessedTargetBits;
346     AI.TargetBitWidth = AccessBitsInFieldSize;
347 
348     AccessStart += AccessWidth;
349     AccessedTargetBits += AI.TargetBitWidth;
350   }
351 
352   assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
353   return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
354 }
355 
356 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
357                                         const FieldDecl *FD,
358                                         uint64_t FieldOffset,
359                                         uint64_t FieldSize) {
360   const RecordDecl *RD = FD->getParent();
361   const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
362   uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
363   unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
364 
365   return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
366                   ContainingTypeAlign);
367 }
368 
369 void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
370                                            uint64_t fieldOffset) {
371   uint64_t fieldSize = D->getBitWidthValue(Types.getContext());
372 
373   if (fieldSize == 0)
374     return;
375 
376   uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
377   CharUnits numBytesToAppend;
378   unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign();
379 
380   if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
381     assert(fieldOffset % charAlign == 0 &&
382            "Field offset not aligned correctly");
383 
384     CharUnits fieldOffsetInCharUnits =
385       Types.getContext().toCharUnitsFromBits(fieldOffset);
386 
387     // Try to resize the last base field.
388     if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
389       nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
390   }
391 
392   if (fieldOffset < nextFieldOffsetInBits) {
393     assert(BitsAvailableInLastField && "Bitfield size mismatch!");
394     assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
395 
396     // The bitfield begins in the previous bit-field.
397     numBytesToAppend = Types.getContext().toCharUnitsFromBits(
398       llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField,
399                                charAlign));
400   } else {
401     assert(fieldOffset % charAlign == 0 &&
402            "Field offset not aligned correctly");
403 
404     // Append padding if necessary.
405     AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset),
406                   CharUnits::One());
407 
408     numBytesToAppend = Types.getContext().toCharUnitsFromBits(
409         llvm::RoundUpToAlignment(fieldSize, charAlign));
410 
411     assert(!numBytesToAppend.isZero() && "No bytes to append!");
412   }
413 
414   // Add the bit field info.
415   BitFields.insert(std::make_pair(D,
416                    CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
417 
418   AppendBytes(numBytesToAppend);
419 
420   BitsAvailableInLastField =
421     Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
422 }
423 
424 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
425                                         uint64_t fieldOffset) {
426   // If the field is packed, then we need a packed struct.
427   if (!Packed && D->hasAttr<PackedAttr>())
428     return false;
429 
430   if (D->isBitField()) {
431     // We must use packed structs for unnamed bit fields since they
432     // don't affect the struct alignment.
433     if (!Packed && !D->getDeclName())
434       return false;
435 
436     LayoutBitField(D, fieldOffset);
437     return true;
438   }
439 
440   CheckZeroInitializable(D->getType());
441 
442   assert(fieldOffset % Types.getTarget().getCharWidth() == 0
443          && "field offset is not on a byte boundary!");
444   CharUnits fieldOffsetInBytes
445     = Types.getContext().toCharUnitsFromBits(fieldOffset);
446 
447   llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
448   CharUnits typeAlignment = getTypeAlignment(Ty);
449 
450   // If the type alignment is larger then the struct alignment, we must use
451   // a packed struct.
452   if (typeAlignment > Alignment) {
453     assert(!Packed && "Alignment is wrong even with packed struct!");
454     return false;
455   }
456 
457   if (!Packed) {
458     if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
459       const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
460       if (const MaxFieldAlignmentAttr *MFAA =
461             RD->getAttr<MaxFieldAlignmentAttr>()) {
462         if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
463           return false;
464       }
465     }
466   }
467 
468   // Round up the field offset to the alignment of the field type.
469   CharUnits alignedNextFieldOffsetInBytes =
470     NextFieldOffset.RoundUpToAlignment(typeAlignment);
471 
472   if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
473     // Try to resize the last base field.
474     if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
475       alignedNextFieldOffsetInBytes =
476         NextFieldOffset.RoundUpToAlignment(typeAlignment);
477     }
478   }
479 
480   if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
481     assert(!Packed && "Could not place field even with packed struct!");
482     return false;
483   }
484 
485   AppendPadding(fieldOffsetInBytes, typeAlignment);
486 
487   // Now append the field.
488   Fields[D] = FieldTypes.size();
489   AppendField(fieldOffsetInBytes, Ty);
490 
491   LastLaidOutBase.invalidate();
492   return true;
493 }
494 
495 llvm::Type *
496 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
497                                         const ASTRecordLayout &Layout) {
498   if (Field->isBitField()) {
499     uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
500 
501     // Ignore zero sized bit fields.
502     if (FieldSize == 0)
503       return 0;
504 
505     llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
506     CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
507       llvm::RoundUpToAlignment(FieldSize,
508                                Types.getContext().getTargetInfo().getCharAlign()));
509 
510     if (NumBytesToAppend > CharUnits::One())
511       FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
512 
513     // Add the bit field info.
514     BitFields.insert(std::make_pair(Field,
515                          CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
516     return FieldTy;
517   }
518 
519   // This is a regular union field.
520   Fields[Field] = 0;
521   return Types.ConvertTypeForMem(Field->getType());
522 }
523 
524 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
525   assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
526 
527   const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
528 
529   llvm::Type *unionType = 0;
530   CharUnits unionSize = CharUnits::Zero();
531   CharUnits unionAlign = CharUnits::Zero();
532 
533   bool hasOnlyZeroSizedBitFields = true;
534   bool checkedFirstFieldZeroInit = false;
535 
536   unsigned fieldNo = 0;
537   for (RecordDecl::field_iterator field = D->field_begin(),
538        fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
539     assert(layout.getFieldOffset(fieldNo) == 0 &&
540           "Union field offset did not start at the beginning of record!");
541     llvm::Type *fieldType = LayoutUnionField(&*field, layout);
542 
543     if (!fieldType)
544       continue;
545 
546     if (field->getDeclName() && !checkedFirstFieldZeroInit) {
547       CheckZeroInitializable(field->getType());
548       checkedFirstFieldZeroInit = true;
549     }
550 
551     hasOnlyZeroSizedBitFields = false;
552 
553     CharUnits fieldAlign = CharUnits::fromQuantity(
554                           Types.getTargetData().getABITypeAlignment(fieldType));
555     CharUnits fieldSize = CharUnits::fromQuantity(
556                              Types.getTargetData().getTypeAllocSize(fieldType));
557 
558     if (fieldAlign < unionAlign)
559       continue;
560 
561     if (fieldAlign > unionAlign || fieldSize > unionSize) {
562       unionType = fieldType;
563       unionAlign = fieldAlign;
564       unionSize = fieldSize;
565     }
566   }
567 
568   // Now add our field.
569   if (unionType) {
570     AppendField(CharUnits::Zero(), unionType);
571 
572     if (getTypeAlignment(unionType) > layout.getAlignment()) {
573       // We need a packed struct.
574       Packed = true;
575       unionAlign = CharUnits::One();
576     }
577   }
578   if (unionAlign.isZero()) {
579     (void)hasOnlyZeroSizedBitFields;
580     assert(hasOnlyZeroSizedBitFields &&
581            "0-align record did not have all zero-sized bit-fields!");
582     unionAlign = CharUnits::One();
583   }
584 
585   // Append tail padding.
586   CharUnits recordSize = layout.getSize();
587   if (recordSize > unionSize)
588     AppendPadding(recordSize, unionAlign);
589 }
590 
591 bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
592                                        const CGRecordLayout &baseLayout,
593                                        CharUnits baseOffset) {
594   ResizeLastBaseFieldIfNecessary(baseOffset);
595 
596   AppendPadding(baseOffset, CharUnits::One());
597 
598   const ASTRecordLayout &baseASTLayout
599     = Types.getContext().getASTRecordLayout(base);
600 
601   LastLaidOutBase.Offset = NextFieldOffset;
602   LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
603 
604   llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
605   if (getTypeAlignment(subobjectType) > Alignment)
606     return false;
607 
608   AppendField(baseOffset, subobjectType);
609   return true;
610 }
611 
612 bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
613                                                  CharUnits baseOffset) {
614   // Ignore empty bases.
615   if (base->isEmpty()) return true;
616 
617   const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
618   if (IsZeroInitializableAsBase) {
619     assert(IsZeroInitializable &&
620            "class zero-initializable as base but not as complete object");
621 
622     IsZeroInitializable = IsZeroInitializableAsBase =
623       baseLayout.isZeroInitializableAsBase();
624   }
625 
626   if (!LayoutBase(base, baseLayout, baseOffset))
627     return false;
628   NonVirtualBases[base] = (FieldTypes.size() - 1);
629   return true;
630 }
631 
632 bool
633 CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
634                                          CharUnits baseOffset) {
635   // Ignore empty bases.
636   if (base->isEmpty()) return true;
637 
638   const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
639   if (IsZeroInitializable)
640     IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
641 
642   if (!LayoutBase(base, baseLayout, baseOffset))
643     return false;
644   VirtualBases[base] = (FieldTypes.size() - 1);
645   return true;
646 }
647 
648 bool
649 CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
650                                           const ASTRecordLayout &Layout) {
651   if (!RD->getNumVBases())
652     return true;
653 
654   // The vbases list is uniqued and ordered by a depth-first
655   // traversal, which is what we need here.
656   for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
657         E = RD->vbases_end(); I != E; ++I) {
658 
659     const CXXRecordDecl *BaseDecl =
660       cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
661 
662     CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
663     if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
664       return false;
665   }
666   return true;
667 }
668 
669 /// LayoutVirtualBases - layout the non-virtual bases of a record decl.
670 bool
671 CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
672                                           const ASTRecordLayout &Layout) {
673   for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
674        E = RD->bases_end(); I != E; ++I) {
675     const CXXRecordDecl *BaseDecl =
676       cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
677 
678     // We only want to lay out virtual bases that aren't indirect primary bases
679     // of some other base.
680     if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
681       // Only lay out the base once.
682       if (!LaidOutVirtualBases.insert(BaseDecl))
683         continue;
684 
685       CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
686       if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
687         return false;
688     }
689 
690     if (!BaseDecl->getNumVBases()) {
691       // This base isn't interesting since it doesn't have any virtual bases.
692       continue;
693     }
694 
695     if (!LayoutVirtualBases(BaseDecl, Layout))
696       return false;
697   }
698   return true;
699 }
700 
701 bool
702 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
703                                              const ASTRecordLayout &Layout) {
704   const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
705 
706   // If we have a primary base, lay it out first.
707   if (PrimaryBase) {
708     if (!Layout.isPrimaryBaseVirtual()) {
709       if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
710         return false;
711     } else {
712       if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
713         return false;
714     }
715 
716   // Otherwise, add a vtable / vf-table if the layout says to do so.
717   } else if (Layout.hasOwnVFPtr()) {
718     llvm::Type *FunctionType =
719       llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
720                               /*isVarArg=*/true);
721     llvm::Type *VTableTy = FunctionType->getPointerTo();
722 
723     if (getTypeAlignment(VTableTy) > Alignment) {
724       // FIXME: Should we allow this to happen in Sema?
725       assert(!Packed && "Alignment is wrong even with packed struct!");
726       return false;
727     }
728 
729     assert(NextFieldOffset.isZero() &&
730            "VTable pointer must come first!");
731     AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
732   }
733 
734   // Layout the non-virtual bases.
735   for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
736        E = RD->bases_end(); I != E; ++I) {
737     if (I->isVirtual())
738       continue;
739 
740     const CXXRecordDecl *BaseDecl =
741       cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
742 
743     // We've already laid out the primary base.
744     if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
745       continue;
746 
747     if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
748       return false;
749   }
750 
751   // Add a vb-table pointer if the layout insists.
752   if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
753     CharUnits VBPtrOffset = Layout.getVBPtrOffset();
754     llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
755     AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
756     AppendField(VBPtrOffset, Vbptr);
757   }
758 
759   return true;
760 }
761 
762 bool
763 CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
764   const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
765 
766   CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
767   CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
768   CharUnits AlignedNonVirtualTypeSize =
769     NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
770 
771   // First check if we can use the same fields as for the complete class.
772   CharUnits RecordSize = Layout.getSize();
773   if (AlignedNonVirtualTypeSize == RecordSize)
774     return true;
775 
776   // Check if we need padding.
777   CharUnits AlignedNextFieldOffset =
778     NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
779 
780   if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
781     assert(!Packed && "cannot layout even as packed struct");
782     return false; // Needs packing.
783   }
784 
785   bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
786   if (needsPadding) {
787     CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
788     FieldTypes.push_back(getByteArrayType(NumBytes));
789   }
790 
791   BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
792                                                FieldTypes, "", Packed);
793   Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
794 
795   // Pull the padding back off.
796   if (needsPadding)
797     FieldTypes.pop_back();
798 
799   return true;
800 }
801 
802 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
803   assert(!D->isUnion() && "Can't call LayoutFields on a union!");
804   assert(!Alignment.isZero() && "Did not set alignment!");
805 
806   const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
807 
808   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
809   if (RD)
810     if (!LayoutNonVirtualBases(RD, Layout))
811       return false;
812 
813   unsigned FieldNo = 0;
814   const FieldDecl *LastFD = 0;
815 
816   for (RecordDecl::field_iterator Field = D->field_begin(),
817        FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
818     if (IsMsStruct) {
819       // Zero-length bitfields following non-bitfield members are
820       // ignored:
821       const FieldDecl *FD = &*Field;
822       if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
823         --FieldNo;
824         continue;
825       }
826       LastFD = FD;
827     }
828 
829     if (!LayoutField(&*Field, Layout.getFieldOffset(FieldNo))) {
830       assert(!Packed &&
831              "Could not layout fields even with a packed LLVM struct!");
832       return false;
833     }
834   }
835 
836   if (RD) {
837     // We've laid out the non-virtual bases and the fields, now compute the
838     // non-virtual base field types.
839     if (!ComputeNonVirtualBaseType(RD)) {
840       assert(!Packed && "Could not layout even with a packed LLVM struct!");
841       return false;
842     }
843 
844     // Lay out the virtual bases.  The MS ABI uses a different
845     // algorithm here due to the lack of primary virtual bases.
846     if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
847       RD->getIndirectPrimaryBases(IndirectPrimaryBases);
848       if (Layout.isPrimaryBaseVirtual())
849         IndirectPrimaryBases.insert(Layout.getPrimaryBase());
850 
851       if (!LayoutVirtualBases(RD, Layout))
852         return false;
853     } else {
854       if (!MSLayoutVirtualBases(RD, Layout))
855         return false;
856     }
857   }
858 
859   // Append tail padding if necessary.
860   AppendTailPadding(Layout.getSize());
861 
862   return true;
863 }
864 
865 void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
866   ResizeLastBaseFieldIfNecessary(RecordSize);
867 
868   assert(NextFieldOffset <= RecordSize && "Size mismatch!");
869 
870   CharUnits AlignedNextFieldOffset =
871     NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
872 
873   if (AlignedNextFieldOffset == RecordSize) {
874     // We don't need any padding.
875     return;
876   }
877 
878   CharUnits NumPadBytes = RecordSize - NextFieldOffset;
879   AppendBytes(NumPadBytes);
880 }
881 
882 void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
883                                         llvm::Type *fieldType) {
884   CharUnits fieldSize =
885     CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
886 
887   FieldTypes.push_back(fieldType);
888 
889   NextFieldOffset = fieldOffset + fieldSize;
890   BitsAvailableInLastField = 0;
891 }
892 
893 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
894                                           CharUnits fieldAlignment) {
895   assert(NextFieldOffset <= fieldOffset &&
896          "Incorrect field layout!");
897 
898   // Do nothing if we're already at the right offset.
899   if (fieldOffset == NextFieldOffset) return;
900 
901   // If we're not emitting a packed LLVM type, try to avoid adding
902   // unnecessary padding fields.
903   if (!Packed) {
904     // Round up the field offset to the alignment of the field type.
905     CharUnits alignedNextFieldOffset =
906       NextFieldOffset.RoundUpToAlignment(fieldAlignment);
907     assert(alignedNextFieldOffset <= fieldOffset);
908 
909     // If that's the right offset, we're done.
910     if (alignedNextFieldOffset == fieldOffset) return;
911   }
912 
913   // Otherwise we need explicit padding.
914   CharUnits padding = fieldOffset - NextFieldOffset;
915   AppendBytes(padding);
916 }
917 
918 bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
919   // Check if we have a base to resize.
920   if (!LastLaidOutBase.isValid())
921     return false;
922 
923   // This offset does not overlap with the tail padding.
924   if (offset >= NextFieldOffset)
925     return false;
926 
927   // Restore the field offset and append an i8 array instead.
928   FieldTypes.pop_back();
929   NextFieldOffset = LastLaidOutBase.Offset;
930   AppendBytes(LastLaidOutBase.NonVirtualSize);
931   LastLaidOutBase.invalidate();
932 
933   return true;
934 }
935 
936 llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
937   assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
938 
939   llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
940   if (numBytes > CharUnits::One())
941     Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
942 
943   return Ty;
944 }
945 
946 void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
947   if (numBytes.isZero())
948     return;
949 
950   // Append the padding field
951   AppendField(NextFieldOffset, getByteArrayType(numBytes));
952 }
953 
954 CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
955   if (Packed)
956     return CharUnits::One();
957 
958   return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
959 }
960 
961 CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
962   if (Packed)
963     return CharUnits::One();
964 
965   CharUnits maxAlignment = CharUnits::One();
966   for (size_t i = 0; i != FieldTypes.size(); ++i)
967     maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
968 
969   return maxAlignment;
970 }
971 
972 /// Merge in whether a field of the given type is zero-initializable.
973 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
974   // This record already contains a member pointer.
975   if (!IsZeroInitializableAsBase)
976     return;
977 
978   // Can only have member pointers if we're compiling C++.
979   if (!Types.getContext().getLangOpts().CPlusPlus)
980     return;
981 
982   const Type *elementType = T->getBaseElementTypeUnsafe();
983 
984   if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
985     if (!Types.getCXXABI().isZeroInitializable(MPT))
986       IsZeroInitializable = IsZeroInitializableAsBase = false;
987   } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
988     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
989     const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
990     if (!Layout.isZeroInitializable())
991       IsZeroInitializable = IsZeroInitializableAsBase = false;
992   }
993 }
994 
995 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
996                                                   llvm::StructType *Ty) {
997   CGRecordLayoutBuilder Builder(*this);
998 
999   Builder.Layout(D);
1000 
1001   Ty->setBody(Builder.FieldTypes, Builder.Packed);
1002 
1003   // If we're in C++, compute the base subobject type.
1004   llvm::StructType *BaseTy = 0;
1005   if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
1006     BaseTy = Builder.BaseSubobjectType;
1007     if (!BaseTy) BaseTy = Ty;
1008   }
1009 
1010   CGRecordLayout *RL =
1011     new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
1012                        Builder.IsZeroInitializableAsBase);
1013 
1014   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
1015   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
1016 
1017   // Add all the field numbers.
1018   RL->FieldInfo.swap(Builder.Fields);
1019 
1020   // Add bitfield info.
1021   RL->BitFields.swap(Builder.BitFields);
1022 
1023   // Dump the layout, if requested.
1024   if (getContext().getLangOpts().DumpRecordLayouts) {
1025     llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
1026     llvm::errs() << "Record: ";
1027     D->dump();
1028     llvm::errs() << "\nLayout: ";
1029     RL->dump();
1030   }
1031 
1032 #ifndef NDEBUG
1033   // Verify that the computed LLVM struct size matches the AST layout size.
1034   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
1035 
1036   uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
1037   assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
1038          "Type size mismatch!");
1039 
1040   if (BaseTy) {
1041     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
1042     CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
1043     CharUnits AlignedNonVirtualTypeSize =
1044       NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
1045 
1046     uint64_t AlignedNonVirtualTypeSizeInBits =
1047       getContext().toBits(AlignedNonVirtualTypeSize);
1048 
1049     assert(AlignedNonVirtualTypeSizeInBits ==
1050            getTargetData().getTypeAllocSizeInBits(BaseTy) &&
1051            "Type size mismatch!");
1052   }
1053 
1054   // Verify that the LLVM and AST field offsets agree.
1055   llvm::StructType *ST =
1056     dyn_cast<llvm::StructType>(RL->getLLVMType());
1057   const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
1058 
1059   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
1060   RecordDecl::field_iterator it = D->field_begin();
1061   const FieldDecl *LastFD = 0;
1062   bool IsMsStruct = D->hasAttr<MsStructAttr>();
1063   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
1064     const FieldDecl *FD = &*it;
1065 
1066     // For non-bit-fields, just check that the LLVM struct offset matches the
1067     // AST offset.
1068     if (!FD->isBitField()) {
1069       unsigned FieldNo = RL->getLLVMFieldNo(FD);
1070       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
1071              "Invalid field offset!");
1072       LastFD = FD;
1073       continue;
1074     }
1075 
1076     if (IsMsStruct) {
1077       // Zero-length bitfields following non-bitfield members are
1078       // ignored:
1079       if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
1080         --i;
1081         continue;
1082       }
1083       LastFD = FD;
1084     }
1085 
1086     // Ignore unnamed bit-fields.
1087     if (!FD->getDeclName()) {
1088       LastFD = FD;
1089       continue;
1090     }
1091 
1092     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
1093     for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1094       const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1095 
1096       // Verify that every component access is within the structure.
1097       uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
1098       uint64_t AccessBitOffset = FieldOffset +
1099         getContext().toBits(AI.FieldByteOffset);
1100       assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
1101              "Invalid bit-field access (out of range)!");
1102     }
1103   }
1104 #endif
1105 
1106   return RL;
1107 }
1108 
1109 void CGRecordLayout::print(raw_ostream &OS) const {
1110   OS << "<CGRecordLayout\n";
1111   OS << "  LLVMType:" << *CompleteObjectType << "\n";
1112   if (BaseSubobjectType)
1113     OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
1114   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
1115   OS << "  BitFields:[\n";
1116 
1117   // Print bit-field infos in declaration order.
1118   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1119   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1120          it = BitFields.begin(), ie = BitFields.end();
1121        it != ie; ++it) {
1122     const RecordDecl *RD = it->first->getParent();
1123     unsigned Index = 0;
1124     for (RecordDecl::field_iterator
1125            it2 = RD->field_begin(); &*it2 != it->first; ++it2)
1126       ++Index;
1127     BFIs.push_back(std::make_pair(Index, &it->second));
1128   }
1129   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1130   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1131     OS.indent(4);
1132     BFIs[i].second->print(OS);
1133     OS << "\n";
1134   }
1135 
1136   OS << "]>\n";
1137 }
1138 
1139 void CGRecordLayout::dump() const {
1140   print(llvm::errs());
1141 }
1142 
1143 void CGBitFieldInfo::print(raw_ostream &OS) const {
1144   OS << "<CGBitFieldInfo";
1145   OS << " Size:" << Size;
1146   OS << " IsSigned:" << IsSigned << "\n";
1147 
1148   OS.indent(4 + strlen("<CGBitFieldInfo"));
1149   OS << " NumComponents:" << getNumComponents();
1150   OS << " Components: [";
1151   if (getNumComponents()) {
1152     OS << "\n";
1153     for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
1154       const AccessInfo &AI = getComponent(i);
1155       OS.indent(8);
1156       OS << "<AccessInfo"
1157          << " FieldIndex:" << AI.FieldIndex
1158          << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
1159          << " FieldBitStart:" << AI.FieldBitStart
1160          << " AccessWidth:" << AI.AccessWidth << "\n";
1161       OS.indent(8 + strlen("<AccessInfo"));
1162       OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
1163          << " TargetBitOffset:" << AI.TargetBitOffset
1164          << " TargetBitWidth:" << AI.TargetBitWidth
1165          << ">\n";
1166     }
1167     OS.indent(4);
1168   }
1169   OS << "]>";
1170 }
1171 
1172 void CGBitFieldInfo::dump() const {
1173   print(llvm::errs());
1174 }
1175