1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Builder implementation for CGRecordLayout objects.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGRecordLayout.h"
15 #include "CGCXXABI.h"
16 #include "CodeGenTypes.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/CXXInheritance.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/Expr.h"
22 #include "clang/AST/RecordLayout.h"
23 #include "clang/Frontend/CodeGenOptions.h"
24 #include "llvm/DataLayout.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Type.h"
29 using namespace clang;
30 using namespace CodeGen;
31 
32 namespace {
33 
34 class CGRecordLayoutBuilder {
35 public:
36   /// FieldTypes - Holds the LLVM types that the struct is created from.
37   ///
38   SmallVector<llvm::Type *, 16> FieldTypes;
39 
40   /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
41   /// of the struct. For example, consider:
42   ///
43   /// struct A { int i; };
44   /// struct B { void *v; };
45   /// struct C : virtual A, B { };
46   ///
47   /// The LLVM type of C will be
48   /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
49   ///
50   /// And the LLVM type of the non-virtual base struct will be
51   /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
52   ///
53   /// This only gets initialized if the base subobject type is
54   /// different from the complete-object type.
55   llvm::StructType *BaseSubobjectType;
56 
57   /// FieldInfo - Holds a field and its corresponding LLVM field number.
58   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
59 
60   /// BitFieldInfo - Holds location and size information about a bit field.
61   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
62 
63   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
64   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
65 
66   /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
67   /// primary base classes for some other direct or indirect base class.
68   CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
69 
70   /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
71   /// avoid laying out virtual bases more than once.
72   llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
73 
74   /// IsZeroInitializable - Whether this struct can be C++
75   /// zero-initialized with an LLVM zeroinitializer.
76   bool IsZeroInitializable;
77   bool IsZeroInitializableAsBase;
78 
79   /// Packed - Whether the resulting LLVM struct will be packed or not.
80   bool Packed;
81 
82   /// IsMsStruct - Whether ms_struct is in effect or not
83   bool IsMsStruct;
84 
85 private:
86   CodeGenTypes &Types;
87 
88   /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
89   /// last base laid out. Used so that we can replace the last laid out base
90   /// type with an i8 array if needed.
91   struct LastLaidOutBaseInfo {
92     CharUnits Offset;
93     CharUnits NonVirtualSize;
94 
95     bool isValid() const { return !NonVirtualSize.isZero(); }
96     void invalidate() { NonVirtualSize = CharUnits::Zero(); }
97 
98   } LastLaidOutBase;
99 
100   /// Alignment - Contains the alignment of the RecordDecl.
101   CharUnits Alignment;
102 
103   /// NextFieldOffset - Holds the next field offset.
104   CharUnits NextFieldOffset;
105 
106   /// LayoutUnionField - Will layout a field in an union and return the type
107   /// that the field will have.
108   llvm::Type *LayoutUnionField(const FieldDecl *Field,
109                                const ASTRecordLayout &Layout);
110 
111   /// LayoutUnion - Will layout a union RecordDecl.
112   void LayoutUnion(const RecordDecl *D);
113 
114   /// Lay out a sequence of contiguous bitfields.
115   bool LayoutBitfields(const ASTRecordLayout &Layout,
116                        unsigned &FirstFieldNo,
117                        RecordDecl::field_iterator &FI,
118                        RecordDecl::field_iterator FE);
119 
120   /// LayoutField - try to layout all fields in the record decl.
121   /// Returns false if the operation failed because the struct is not packed.
122   bool LayoutFields(const RecordDecl *D);
123 
124   /// Layout a single base, virtual or non-virtual
125   bool LayoutBase(const CXXRecordDecl *base,
126                   const CGRecordLayout &baseLayout,
127                   CharUnits baseOffset);
128 
129   /// LayoutVirtualBase - layout a single virtual base.
130   bool LayoutVirtualBase(const CXXRecordDecl *base,
131                          CharUnits baseOffset);
132 
133   /// LayoutVirtualBases - layout the virtual bases of a record decl.
134   bool LayoutVirtualBases(const CXXRecordDecl *RD,
135                           const ASTRecordLayout &Layout);
136 
137   /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
138   /// like MSVC.
139   bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
140                             const ASTRecordLayout &Layout);
141 
142   /// LayoutNonVirtualBase - layout a single non-virtual base.
143   bool LayoutNonVirtualBase(const CXXRecordDecl *base,
144                             CharUnits baseOffset);
145 
146   /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
147   bool LayoutNonVirtualBases(const CXXRecordDecl *RD,
148                              const ASTRecordLayout &Layout);
149 
150   /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
151   bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
152 
153   /// LayoutField - layout a single field. Returns false if the operation failed
154   /// because the current struct is not packed.
155   bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
156 
157   /// LayoutBitField - layout a single bit field.
158   void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
159 
160   /// AppendField - Appends a field with the given offset and type.
161   void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
162 
163   /// AppendPadding - Appends enough padding bytes so that the total
164   /// struct size is a multiple of the field alignment.
165   void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
166 
167   /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
168   /// tail padding of a previous base. If this happens, the type of the previous
169   /// base needs to be changed to an array of i8. Returns true if the last
170   /// laid out base was resized.
171   bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
172 
173   /// getByteArrayType - Returns a byte array type with the given number of
174   /// elements.
175   llvm::Type *getByteArrayType(CharUnits NumBytes);
176 
177   /// AppendBytes - Append a given number of bytes to the record.
178   void AppendBytes(CharUnits numBytes);
179 
180   /// AppendTailPadding - Append enough tail padding so that the type will have
181   /// the passed size.
182   void AppendTailPadding(CharUnits RecordSize);
183 
184   CharUnits getTypeAlignment(llvm::Type *Ty) const;
185 
186   /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
187   /// LLVM element types.
188   CharUnits getAlignmentAsLLVMStruct() const;
189 
190   /// CheckZeroInitializable - Check if the given type contains a pointer
191   /// to data member.
192   void CheckZeroInitializable(QualType T);
193 
194 public:
195   CGRecordLayoutBuilder(CodeGenTypes &Types)
196     : BaseSubobjectType(0),
197       IsZeroInitializable(true), IsZeroInitializableAsBase(true),
198       Packed(false), IsMsStruct(false),
199       Types(Types) { }
200 
201   /// Layout - Will layout a RecordDecl.
202   void Layout(const RecordDecl *D);
203 };
204 
205 }
206 
207 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
208   Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
209   Packed = D->hasAttr<PackedAttr>();
210 
211   IsMsStruct = D->isMsStruct(Types.getContext());
212 
213   if (D->isUnion()) {
214     LayoutUnion(D);
215     return;
216   }
217 
218   if (LayoutFields(D))
219     return;
220 
221   // We weren't able to layout the struct. Try again with a packed struct
222   Packed = true;
223   LastLaidOutBase.invalidate();
224   NextFieldOffset = CharUnits::Zero();
225   FieldTypes.clear();
226   Fields.clear();
227   BitFields.clear();
228   NonVirtualBases.clear();
229   VirtualBases.clear();
230 
231   LayoutFields(D);
232 }
233 
234 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
235                                         const FieldDecl *FD,
236                                         uint64_t Offset, uint64_t Size,
237                                         uint64_t StorageSize,
238                                         uint64_t StorageAlignment) {
239   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
240   CharUnits TypeSizeInBytes =
241     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
242   uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
243 
244   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
245 
246   if (Size > TypeSizeInBits) {
247     // We have a wide bit-field. The extra bits are only used for padding, so
248     // if we have a bitfield of type T, with size N:
249     //
250     // T t : N;
251     //
252     // We can just assume that it's:
253     //
254     // T t : sizeof(T);
255     //
256     Size = TypeSizeInBits;
257   }
258 
259   // Reverse the bit offsets for big endian machines.
260   if (Types.getDataLayout().isBigEndian())
261     Offset = Size - Offset - 1;
262 
263   return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageAlignment);
264 }
265 
266 /// \brief Layout the range of bitfields from BFI to BFE as contiguous storage.
267 bool CGRecordLayoutBuilder::LayoutBitfields(const ASTRecordLayout &Layout,
268                                             unsigned &FirstFieldNo,
269                                             RecordDecl::field_iterator &FI,
270                                             RecordDecl::field_iterator FE) {
271   assert(FI != FE);
272   uint64_t FirstFieldOffset = Layout.getFieldOffset(FirstFieldNo);
273   uint64_t NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
274 
275   unsigned CharAlign = Types.getContext().getTargetInfo().getCharAlign();
276   assert(FirstFieldOffset % CharAlign == 0 &&
277          "First field offset is misaligned");
278   CharUnits FirstFieldOffsetInBytes
279     = Types.getContext().toCharUnitsFromBits(FirstFieldOffset);
280 
281   unsigned StorageAlignment
282     = llvm::MinAlign(Alignment.getQuantity(),
283                      FirstFieldOffsetInBytes.getQuantity());
284 
285   if (FirstFieldOffset < NextFieldOffsetInBits) {
286     CharUnits FieldOffsetInCharUnits =
287       Types.getContext().toCharUnitsFromBits(FirstFieldOffset);
288 
289     // Try to resize the last base field.
290     if (!ResizeLastBaseFieldIfNecessary(FieldOffsetInCharUnits))
291       llvm_unreachable("We must be able to resize the last base if we need to "
292                        "pack bits into it.");
293 
294     NextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
295     assert(FirstFieldOffset >= NextFieldOffsetInBits);
296   }
297 
298   // Append padding if necessary.
299   AppendPadding(Types.getContext().toCharUnitsFromBits(FirstFieldOffset),
300                 CharUnits::One());
301 
302   // Find the last bitfield in a contiguous run of bitfields.
303   RecordDecl::field_iterator BFI = FI;
304   unsigned LastFieldNo = FirstFieldNo;
305   uint64_t NextContiguousFieldOffset = FirstFieldOffset;
306   for (RecordDecl::field_iterator FJ = FI;
307        (FJ != FE && (*FJ)->isBitField() &&
308         NextContiguousFieldOffset == Layout.getFieldOffset(LastFieldNo) &&
309         (*FJ)->getBitWidthValue(Types.getContext()) != 0); FI = FJ++) {
310     NextContiguousFieldOffset += (*FJ)->getBitWidthValue(Types.getContext());
311     ++LastFieldNo;
312 
313     // We must use packed structs for packed fields, and also unnamed bit
314     // fields since they don't affect the struct alignment.
315     if (!Packed && ((*FJ)->hasAttr<PackedAttr>() || !(*FJ)->getDeclName()))
316       return false;
317   }
318   RecordDecl::field_iterator BFE = llvm::next(FI);
319   --LastFieldNo;
320   assert(LastFieldNo >= FirstFieldNo && "Empty run of contiguous bitfields");
321   FieldDecl *LastFD = *FI;
322 
323   // Find the last bitfield's offset, add its size, and round it up to the
324   // character alignment to compute the storage required.
325   uint64_t LastFieldOffset = Layout.getFieldOffset(LastFieldNo);
326   uint64_t LastFieldSize = LastFD->getBitWidthValue(Types.getContext());
327   uint64_t TotalBits = (LastFieldOffset + LastFieldSize) - FirstFieldOffset;
328   CharUnits StorageBytes = Types.getContext().toCharUnitsFromBits(
329     llvm::RoundUpToAlignment(TotalBits, CharAlign));
330   uint64_t StorageBits = Types.getContext().toBits(StorageBytes);
331 
332   // Grow the storage to encompass any known padding in the layout when doing
333   // so will make the storage a power-of-two. There are two cases when we can
334   // do this. The first is when we have a subsequent field and can widen up to
335   // its offset. The second is when the data size of the AST record layout is
336   // past the end of the current storage. The latter is true when there is tail
337   // padding on a struct and no members of a super class can be packed into it.
338   //
339   // Note that we widen the storage as much as possible here to express the
340   // maximum latitude the language provides, and rely on the backend to lower
341   // these in conjunction with shifts and masks to narrower operations where
342   // beneficial.
343   uint64_t EndOffset = Types.getContext().toBits(Layout.getDataSize());
344   if (BFE != FE)
345     // If there are more fields to be laid out, the offset at the end of the
346     // bitfield is the offset of the next field in the record.
347     EndOffset = Layout.getFieldOffset(LastFieldNo + 1);
348   assert(EndOffset >= (FirstFieldOffset + TotalBits) &&
349          "End offset is not past the end of the known storage bits.");
350   uint64_t SpaceBits = EndOffset - FirstFieldOffset;
351   uint64_t LongBits = Types.getContext().getTargetInfo().getLongWidth();
352   uint64_t WidenedBits = (StorageBits / LongBits) * LongBits +
353                          llvm::NextPowerOf2(StorageBits % LongBits - 1);
354   assert(WidenedBits >= StorageBits && "Widening shrunk the bits!");
355   if (WidenedBits <= SpaceBits) {
356     StorageBits = WidenedBits;
357     StorageBytes = Types.getContext().toCharUnitsFromBits(StorageBits);
358     assert(StorageBits == (uint64_t)Types.getContext().toBits(StorageBytes));
359   }
360 
361   unsigned FieldIndex = FieldTypes.size();
362   AppendBytes(StorageBytes);
363 
364   // Now walk the bitfields associating them with this field of storage and
365   // building up the bitfield specific info.
366   unsigned FieldNo = FirstFieldNo;
367   for (; BFI != BFE; ++BFI, ++FieldNo) {
368     FieldDecl *FD = *BFI;
369     uint64_t FieldOffset = Layout.getFieldOffset(FieldNo) - FirstFieldOffset;
370     uint64_t FieldSize = FD->getBitWidthValue(Types.getContext());
371     Fields[FD] = FieldIndex;
372     BitFields[FD] = CGBitFieldInfo::MakeInfo(Types, FD, FieldOffset, FieldSize,
373                                              StorageBits, StorageAlignment);
374   }
375   FirstFieldNo = LastFieldNo;
376   return true;
377 }
378 
379 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
380                                         uint64_t fieldOffset) {
381   // If the field is packed, then we need a packed struct.
382   if (!Packed && D->hasAttr<PackedAttr>())
383     return false;
384 
385   assert(!D->isBitField() && "Bitfields should be laid out seperately.");
386 
387   CheckZeroInitializable(D->getType());
388 
389   assert(fieldOffset % Types.getTarget().getCharWidth() == 0
390          && "field offset is not on a byte boundary!");
391   CharUnits fieldOffsetInBytes
392     = Types.getContext().toCharUnitsFromBits(fieldOffset);
393 
394   llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
395   CharUnits typeAlignment = getTypeAlignment(Ty);
396 
397   // If the type alignment is larger then the struct alignment, we must use
398   // a packed struct.
399   if (typeAlignment > Alignment) {
400     assert(!Packed && "Alignment is wrong even with packed struct!");
401     return false;
402   }
403 
404   if (!Packed) {
405     if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
406       const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
407       if (const MaxFieldAlignmentAttr *MFAA =
408             RD->getAttr<MaxFieldAlignmentAttr>()) {
409         if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
410           return false;
411       }
412     }
413   }
414 
415   // Round up the field offset to the alignment of the field type.
416   CharUnits alignedNextFieldOffsetInBytes =
417     NextFieldOffset.RoundUpToAlignment(typeAlignment);
418 
419   if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
420     // Try to resize the last base field.
421     if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
422       alignedNextFieldOffsetInBytes =
423         NextFieldOffset.RoundUpToAlignment(typeAlignment);
424     }
425   }
426 
427   if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
428     assert(!Packed && "Could not place field even with packed struct!");
429     return false;
430   }
431 
432   AppendPadding(fieldOffsetInBytes, typeAlignment);
433 
434   // Now append the field.
435   Fields[D] = FieldTypes.size();
436   AppendField(fieldOffsetInBytes, Ty);
437 
438   LastLaidOutBase.invalidate();
439   return true;
440 }
441 
442 llvm::Type *
443 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
444                                         const ASTRecordLayout &Layout) {
445   Fields[Field] = 0;
446   if (Field->isBitField()) {
447     uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
448 
449     // Ignore zero sized bit fields.
450     if (FieldSize == 0)
451       return 0;
452 
453     unsigned StorageBits = llvm::RoundUpToAlignment(
454       FieldSize, Types.getContext().getTargetInfo().getCharAlign());
455     CharUnits NumBytesToAppend
456       = Types.getContext().toCharUnitsFromBits(StorageBits);
457 
458     llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
459     if (NumBytesToAppend > CharUnits::One())
460       FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
461 
462     // Add the bit field info.
463     BitFields[Field] = CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize,
464                                                 StorageBits,
465                                                 Alignment.getQuantity());
466     return FieldTy;
467   }
468 
469   // This is a regular union field.
470   return Types.ConvertTypeForMem(Field->getType());
471 }
472 
473 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
474   assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
475 
476   const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
477 
478   llvm::Type *unionType = 0;
479   CharUnits unionSize = CharUnits::Zero();
480   CharUnits unionAlign = CharUnits::Zero();
481 
482   bool hasOnlyZeroSizedBitFields = true;
483   bool checkedFirstFieldZeroInit = false;
484 
485   unsigned fieldNo = 0;
486   for (RecordDecl::field_iterator field = D->field_begin(),
487        fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
488     assert(layout.getFieldOffset(fieldNo) == 0 &&
489           "Union field offset did not start at the beginning of record!");
490     llvm::Type *fieldType = LayoutUnionField(*field, layout);
491 
492     if (!fieldType)
493       continue;
494 
495     if (field->getDeclName() && !checkedFirstFieldZeroInit) {
496       CheckZeroInitializable(field->getType());
497       checkedFirstFieldZeroInit = true;
498     }
499 
500     hasOnlyZeroSizedBitFields = false;
501 
502     CharUnits fieldAlign = CharUnits::fromQuantity(
503                           Types.getDataLayout().getABITypeAlignment(fieldType));
504     CharUnits fieldSize = CharUnits::fromQuantity(
505                              Types.getDataLayout().getTypeAllocSize(fieldType));
506 
507     if (fieldAlign < unionAlign)
508       continue;
509 
510     if (fieldAlign > unionAlign || fieldSize > unionSize) {
511       unionType = fieldType;
512       unionAlign = fieldAlign;
513       unionSize = fieldSize;
514     }
515   }
516 
517   // Now add our field.
518   if (unionType) {
519     AppendField(CharUnits::Zero(), unionType);
520 
521     if (getTypeAlignment(unionType) > layout.getAlignment()) {
522       // We need a packed struct.
523       Packed = true;
524       unionAlign = CharUnits::One();
525     }
526   }
527   if (unionAlign.isZero()) {
528     (void)hasOnlyZeroSizedBitFields;
529     assert(hasOnlyZeroSizedBitFields &&
530            "0-align record did not have all zero-sized bit-fields!");
531     unionAlign = CharUnits::One();
532   }
533 
534   // Append tail padding.
535   CharUnits recordSize = layout.getSize();
536   if (recordSize > unionSize)
537     AppendPadding(recordSize, unionAlign);
538 }
539 
540 bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
541                                        const CGRecordLayout &baseLayout,
542                                        CharUnits baseOffset) {
543   ResizeLastBaseFieldIfNecessary(baseOffset);
544 
545   AppendPadding(baseOffset, CharUnits::One());
546 
547   const ASTRecordLayout &baseASTLayout
548     = Types.getContext().getASTRecordLayout(base);
549 
550   LastLaidOutBase.Offset = NextFieldOffset;
551   LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
552 
553   llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
554   if (getTypeAlignment(subobjectType) > Alignment)
555     return false;
556 
557   AppendField(baseOffset, subobjectType);
558   return true;
559 }
560 
561 bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
562                                                  CharUnits baseOffset) {
563   // Ignore empty bases.
564   if (base->isEmpty()) return true;
565 
566   const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
567   if (IsZeroInitializableAsBase) {
568     assert(IsZeroInitializable &&
569            "class zero-initializable as base but not as complete object");
570 
571     IsZeroInitializable = IsZeroInitializableAsBase =
572       baseLayout.isZeroInitializableAsBase();
573   }
574 
575   if (!LayoutBase(base, baseLayout, baseOffset))
576     return false;
577   NonVirtualBases[base] = (FieldTypes.size() - 1);
578   return true;
579 }
580 
581 bool
582 CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
583                                          CharUnits baseOffset) {
584   // Ignore empty bases.
585   if (base->isEmpty()) return true;
586 
587   const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
588   if (IsZeroInitializable)
589     IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
590 
591   if (!LayoutBase(base, baseLayout, baseOffset))
592     return false;
593   VirtualBases[base] = (FieldTypes.size() - 1);
594   return true;
595 }
596 
597 bool
598 CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
599                                           const ASTRecordLayout &Layout) {
600   if (!RD->getNumVBases())
601     return true;
602 
603   // The vbases list is uniqued and ordered by a depth-first
604   // traversal, which is what we need here.
605   for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
606         E = RD->vbases_end(); I != E; ++I) {
607 
608     const CXXRecordDecl *BaseDecl =
609       cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
610 
611     CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
612     if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
613       return false;
614   }
615   return true;
616 }
617 
618 /// LayoutVirtualBases - layout the non-virtual bases of a record decl.
619 bool
620 CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
621                                           const ASTRecordLayout &Layout) {
622   for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
623        E = RD->bases_end(); I != E; ++I) {
624     const CXXRecordDecl *BaseDecl =
625       cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
626 
627     // We only want to lay out virtual bases that aren't indirect primary bases
628     // of some other base.
629     if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
630       // Only lay out the base once.
631       if (!LaidOutVirtualBases.insert(BaseDecl))
632         continue;
633 
634       CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
635       if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
636         return false;
637     }
638 
639     if (!BaseDecl->getNumVBases()) {
640       // This base isn't interesting since it doesn't have any virtual bases.
641       continue;
642     }
643 
644     if (!LayoutVirtualBases(BaseDecl, Layout))
645       return false;
646   }
647   return true;
648 }
649 
650 bool
651 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
652                                              const ASTRecordLayout &Layout) {
653   const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
654 
655   // If we have a primary base, lay it out first.
656   if (PrimaryBase) {
657     if (!Layout.isPrimaryBaseVirtual()) {
658       if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
659         return false;
660     } else {
661       if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
662         return false;
663     }
664 
665   // Otherwise, add a vtable / vf-table if the layout says to do so.
666   } else if (Layout.hasOwnVFPtr()) {
667     llvm::Type *FunctionType =
668       llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
669                               /*isVarArg=*/true);
670     llvm::Type *VTableTy = FunctionType->getPointerTo();
671 
672     if (getTypeAlignment(VTableTy) > Alignment) {
673       // FIXME: Should we allow this to happen in Sema?
674       assert(!Packed && "Alignment is wrong even with packed struct!");
675       return false;
676     }
677 
678     assert(NextFieldOffset.isZero() &&
679            "VTable pointer must come first!");
680     AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
681   }
682 
683   // Layout the non-virtual bases.
684   for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
685        E = RD->bases_end(); I != E; ++I) {
686     if (I->isVirtual())
687       continue;
688 
689     const CXXRecordDecl *BaseDecl =
690       cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
691 
692     // We've already laid out the primary base.
693     if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
694       continue;
695 
696     if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
697       return false;
698   }
699 
700   // Add a vb-table pointer if the layout insists.
701   if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
702     CharUnits VBPtrOffset = Layout.getVBPtrOffset();
703     llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
704     AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
705     AppendField(VBPtrOffset, Vbptr);
706   }
707 
708   return true;
709 }
710 
711 bool
712 CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
713   const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
714 
715   CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
716   CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
717   CharUnits AlignedNonVirtualTypeSize =
718     NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
719 
720   // First check if we can use the same fields as for the complete class.
721   CharUnits RecordSize = Layout.getSize();
722   if (AlignedNonVirtualTypeSize == RecordSize)
723     return true;
724 
725   // Check if we need padding.
726   CharUnits AlignedNextFieldOffset =
727     NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
728 
729   if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
730     assert(!Packed && "cannot layout even as packed struct");
731     return false; // Needs packing.
732   }
733 
734   bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
735   if (needsPadding) {
736     CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
737     FieldTypes.push_back(getByteArrayType(NumBytes));
738   }
739 
740   BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
741                                                FieldTypes, "", Packed);
742   Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
743 
744   // Pull the padding back off.
745   if (needsPadding)
746     FieldTypes.pop_back();
747 
748   return true;
749 }
750 
751 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
752   assert(!D->isUnion() && "Can't call LayoutFields on a union!");
753   assert(!Alignment.isZero() && "Did not set alignment!");
754 
755   const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
756 
757   const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
758   if (RD)
759     if (!LayoutNonVirtualBases(RD, Layout))
760       return false;
761 
762   unsigned FieldNo = 0;
763   const FieldDecl *LastFD = 0;
764 
765   for (RecordDecl::field_iterator FI = D->field_begin(), FE = D->field_end();
766        FI != FE; ++FI, ++FieldNo) {
767     FieldDecl *FD = *FI;
768     if (IsMsStruct) {
769       // Zero-length bitfields following non-bitfield members are
770       // ignored:
771       if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
772         --FieldNo;
773         continue;
774       }
775       LastFD = FD;
776     }
777 
778     // If this field is a bitfield, layout all of the consecutive
779     // non-zero-length bitfields and the last zero-length bitfield; these will
780     // all share storage.
781     if (FD->isBitField()) {
782       // If all we have is a zero-width bitfield, skip it.
783       if (FD->getBitWidthValue(Types.getContext()) == 0)
784         continue;
785 
786       // Layout this range of bitfields.
787       if (!LayoutBitfields(Layout, FieldNo, FI, FE)) {
788         assert(!Packed &&
789                "Could not layout bitfields even with a packed LLVM struct!");
790         return false;
791       }
792       assert(FI != FE && "Advanced past the last bitfield");
793       continue;
794     }
795 
796     if (!LayoutField(FD, Layout.getFieldOffset(FieldNo))) {
797       assert(!Packed &&
798              "Could not layout fields even with a packed LLVM struct!");
799       return false;
800     }
801   }
802 
803   if (RD) {
804     // We've laid out the non-virtual bases and the fields, now compute the
805     // non-virtual base field types.
806     if (!ComputeNonVirtualBaseType(RD)) {
807       assert(!Packed && "Could not layout even with a packed LLVM struct!");
808       return false;
809     }
810 
811     // Lay out the virtual bases.  The MS ABI uses a different
812     // algorithm here due to the lack of primary virtual bases.
813     if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
814       RD->getIndirectPrimaryBases(IndirectPrimaryBases);
815       if (Layout.isPrimaryBaseVirtual())
816         IndirectPrimaryBases.insert(Layout.getPrimaryBase());
817 
818       if (!LayoutVirtualBases(RD, Layout))
819         return false;
820     } else {
821       if (!MSLayoutVirtualBases(RD, Layout))
822         return false;
823     }
824   }
825 
826   // Append tail padding if necessary.
827   AppendTailPadding(Layout.getSize());
828 
829   return true;
830 }
831 
832 void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
833   ResizeLastBaseFieldIfNecessary(RecordSize);
834 
835   assert(NextFieldOffset <= RecordSize && "Size mismatch!");
836 
837   CharUnits AlignedNextFieldOffset =
838     NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
839 
840   if (AlignedNextFieldOffset == RecordSize) {
841     // We don't need any padding.
842     return;
843   }
844 
845   CharUnits NumPadBytes = RecordSize - NextFieldOffset;
846   AppendBytes(NumPadBytes);
847 }
848 
849 void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
850                                         llvm::Type *fieldType) {
851   CharUnits fieldSize =
852     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(fieldType));
853 
854   FieldTypes.push_back(fieldType);
855 
856   NextFieldOffset = fieldOffset + fieldSize;
857 }
858 
859 void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
860                                           CharUnits fieldAlignment) {
861   assert(NextFieldOffset <= fieldOffset &&
862          "Incorrect field layout!");
863 
864   // Do nothing if we're already at the right offset.
865   if (fieldOffset == NextFieldOffset) return;
866 
867   // If we're not emitting a packed LLVM type, try to avoid adding
868   // unnecessary padding fields.
869   if (!Packed) {
870     // Round up the field offset to the alignment of the field type.
871     CharUnits alignedNextFieldOffset =
872       NextFieldOffset.RoundUpToAlignment(fieldAlignment);
873     assert(alignedNextFieldOffset <= fieldOffset);
874 
875     // If that's the right offset, we're done.
876     if (alignedNextFieldOffset == fieldOffset) return;
877   }
878 
879   // Otherwise we need explicit padding.
880   CharUnits padding = fieldOffset - NextFieldOffset;
881   AppendBytes(padding);
882 }
883 
884 bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
885   // Check if we have a base to resize.
886   if (!LastLaidOutBase.isValid())
887     return false;
888 
889   // This offset does not overlap with the tail padding.
890   if (offset >= NextFieldOffset)
891     return false;
892 
893   // Restore the field offset and append an i8 array instead.
894   FieldTypes.pop_back();
895   NextFieldOffset = LastLaidOutBase.Offset;
896   AppendBytes(LastLaidOutBase.NonVirtualSize);
897   LastLaidOutBase.invalidate();
898 
899   return true;
900 }
901 
902 llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
903   assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
904 
905   llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
906   if (numBytes > CharUnits::One())
907     Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
908 
909   return Ty;
910 }
911 
912 void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
913   if (numBytes.isZero())
914     return;
915 
916   // Append the padding field
917   AppendField(NextFieldOffset, getByteArrayType(numBytes));
918 }
919 
920 CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
921   if (Packed)
922     return CharUnits::One();
923 
924   return CharUnits::fromQuantity(Types.getDataLayout().getABITypeAlignment(Ty));
925 }
926 
927 CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
928   if (Packed)
929     return CharUnits::One();
930 
931   CharUnits maxAlignment = CharUnits::One();
932   for (size_t i = 0; i != FieldTypes.size(); ++i)
933     maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
934 
935   return maxAlignment;
936 }
937 
938 /// Merge in whether a field of the given type is zero-initializable.
939 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
940   // This record already contains a member pointer.
941   if (!IsZeroInitializableAsBase)
942     return;
943 
944   // Can only have member pointers if we're compiling C++.
945   if (!Types.getContext().getLangOpts().CPlusPlus)
946     return;
947 
948   const Type *elementType = T->getBaseElementTypeUnsafe();
949 
950   if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
951     if (!Types.getCXXABI().isZeroInitializable(MPT))
952       IsZeroInitializable = IsZeroInitializableAsBase = false;
953   } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
954     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
955     const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
956     if (!Layout.isZeroInitializable())
957       IsZeroInitializable = IsZeroInitializableAsBase = false;
958   }
959 }
960 
961 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
962                                                   llvm::StructType *Ty) {
963   CGRecordLayoutBuilder Builder(*this);
964 
965   Builder.Layout(D);
966 
967   Ty->setBody(Builder.FieldTypes, Builder.Packed);
968 
969   // If we're in C++, compute the base subobject type.
970   llvm::StructType *BaseTy = 0;
971   if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
972     BaseTy = Builder.BaseSubobjectType;
973     if (!BaseTy) BaseTy = Ty;
974   }
975 
976   CGRecordLayout *RL =
977     new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
978                        Builder.IsZeroInitializableAsBase);
979 
980   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
981   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
982 
983   // Add all the field numbers.
984   RL->FieldInfo.swap(Builder.Fields);
985 
986   // Add bitfield info.
987   RL->BitFields.swap(Builder.BitFields);
988 
989   // Dump the layout, if requested.
990   if (getContext().getLangOpts().DumpRecordLayouts) {
991     llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
992     llvm::errs() << "Record: ";
993     D->dump();
994     llvm::errs() << "\nLayout: ";
995     RL->dump();
996   }
997 
998 #ifndef NDEBUG
999   // Verify that the computed LLVM struct size matches the AST layout size.
1000   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
1001 
1002   uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
1003   assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
1004          "Type size mismatch!");
1005 
1006   if (BaseTy) {
1007     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
1008     CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
1009     CharUnits AlignedNonVirtualTypeSize =
1010       NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
1011 
1012     uint64_t AlignedNonVirtualTypeSizeInBits =
1013       getContext().toBits(AlignedNonVirtualTypeSize);
1014 
1015     assert(AlignedNonVirtualTypeSizeInBits ==
1016            getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
1017            "Type size mismatch!");
1018   }
1019 
1020   // Verify that the LLVM and AST field offsets agree.
1021   llvm::StructType *ST =
1022     dyn_cast<llvm::StructType>(RL->getLLVMType());
1023   const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
1024 
1025   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
1026   RecordDecl::field_iterator it = D->field_begin();
1027   const FieldDecl *LastFD = 0;
1028   bool IsMsStruct = D->isMsStruct(getContext());
1029   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
1030     const FieldDecl *FD = *it;
1031 
1032     // For non-bit-fields, just check that the LLVM struct offset matches the
1033     // AST offset.
1034     if (!FD->isBitField()) {
1035       unsigned FieldNo = RL->getLLVMFieldNo(FD);
1036       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
1037              "Invalid field offset!");
1038       LastFD = FD;
1039       continue;
1040     }
1041 
1042     if (IsMsStruct) {
1043       // Zero-length bitfields following non-bitfield members are
1044       // ignored:
1045       if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
1046         --i;
1047         continue;
1048       }
1049       LastFD = FD;
1050     }
1051 
1052     // Ignore unnamed bit-fields.
1053     if (!FD->getDeclName()) {
1054       LastFD = FD;
1055       continue;
1056     }
1057 
1058     // Don't inspect zero-length bitfields.
1059     if (FD->getBitWidthValue(getContext()) == 0)
1060       continue;
1061 
1062       unsigned FieldNo = RL->getLLVMFieldNo(FD);
1063     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
1064     llvm::Type *ElementTy = ST->getTypeAtIndex(FieldNo);
1065     // Unions have overlapping elements dictating their layout, but for
1066     // non-unions we can verify that this section of the layout is the exact
1067     // expected size. For unions we verify that the start is zero and the size
1068     // is in-bounds.
1069     if (D->isUnion()) {
1070       assert(Info.Offset == 0 && "Union bitfield with a non-zero offset");
1071       assert(Info.StorageSize <= SL->getSizeInBits() &&
1072              "Union not large enough for bitfield storage");
1073     } else {
1074       assert(Info.StorageSize ==
1075              getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
1076              "Storage size does not match the element type size");
1077     }
1078     assert(Info.Size > 0 && "Empty bitfield!");
1079     assert(Info.Offset + Info.Size <= Info.StorageSize &&
1080            "Bitfield outside of its allocated storage");
1081   }
1082 #endif
1083 
1084   return RL;
1085 }
1086 
1087 void CGRecordLayout::print(raw_ostream &OS) const {
1088   OS << "<CGRecordLayout\n";
1089   OS << "  LLVMType:" << *CompleteObjectType << "\n";
1090   if (BaseSubobjectType)
1091     OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
1092   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
1093   OS << "  BitFields:[\n";
1094 
1095   // Print bit-field infos in declaration order.
1096   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1097   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1098          it = BitFields.begin(), ie = BitFields.end();
1099        it != ie; ++it) {
1100     const RecordDecl *RD = it->first->getParent();
1101     unsigned Index = 0;
1102     for (RecordDecl::field_iterator
1103            it2 = RD->field_begin(); *it2 != it->first; ++it2)
1104       ++Index;
1105     BFIs.push_back(std::make_pair(Index, &it->second));
1106   }
1107   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1108   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1109     OS.indent(4);
1110     BFIs[i].second->print(OS);
1111     OS << "\n";
1112   }
1113 
1114   OS << "]>\n";
1115 }
1116 
1117 void CGRecordLayout::dump() const {
1118   print(llvm::errs());
1119 }
1120 
1121 void CGBitFieldInfo::print(raw_ostream &OS) const {
1122   OS << "<CGBitFieldInfo"
1123      << " Offset:" << Offset
1124      << " Size:" << Size
1125      << " IsSigned:" << IsSigned
1126      << " StorageSize:" << StorageSize
1127      << " StorageAlignment:" << StorageAlignment << ">";
1128 }
1129 
1130 void CGBitFieldInfo::dump() const {
1131   print(llvm::errs());
1132 }
1133