1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Builder implementation for CGRecordLayout objects.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGRecordLayout.h"
15 #include "CGCXXABI.h"
16 #include "CodeGenTypes.h"
17 #include "clang/AST/ASTContext.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/CXXInheritance.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/Expr.h"
22 #include "clang/AST/RecordLayout.h"
23 #include "clang/Frontend/CodeGenOptions.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/MathExtras.h"
29 #include "llvm/Support/raw_ostream.h"
30 using namespace clang;
31 using namespace CodeGen;
32 
33 namespace {
34 /// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
35 /// llvm::Type.  Some of the lowering is straightforward, some is not.  Here we
36 /// detail some of the complexities and weirdnesses here.
37 /// * LLVM does not have unions - Unions can, in theory be represented by any
38 ///   llvm::Type with correct size.  We choose a field via a specific heuristic
39 ///   and add padding if necessary.
40 /// * LLVM does not have bitfields - Bitfields are collected into contiguous
41 ///   runs and allocated as a single storage type for the run.  ASTRecordLayout
42 ///   contains enough information to determine where the runs break.  Microsoft
43 ///   and Itanium follow different rules and use different codepaths.
44 /// * It is desired that, when possible, bitfields use the appropriate iN type
45 ///   when lowered to llvm types.  For example unsigned x : 24 gets lowered to
46 ///   i24.  This isn't always possible because i24 has storage size of 32 bit
47 ///   and if it is possible to use that extra byte of padding we must use
48 ///   [i8 x 3] instead of i24.  The function clipTailPadding does this.
49 ///   C++ examples that require clipping:
50 ///   struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
51 ///   struct A { int a : 24; }; // a must be clipped because a struct like B
52 //    could exist: struct B : A { char b; }; // b goes at offset 3
53 /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
54 ///   fields.  The existing asserts suggest that LLVM assumes that *every* field
55 ///   has an underlying storage type.  Therefore empty structures containing
56 ///   zero sized subobjects such as empty records or zero sized arrays still get
57 ///   a zero sized (empty struct) storage type.
58 /// * Clang reads the complete type rather than the base type when generating
59 ///   code to access fields.  Bitfields in tail position with tail padding may
60 ///   be clipped in the base class but not the complete class (we may discover
61 ///   that the tail padding is not used in the complete class.) However,
62 ///   because LLVM reads from the complete type it can generate incorrect code
63 ///   if we do not clip the tail padding off of the bitfield in the complete
64 ///   layout.  This introduces a somewhat awkward extra unnecessary clip stage.
65 ///   The location of the clip is stored internally as a sentinal of type
66 ///   SCISSOR.  If LLVM were updated to read base types (which it probably
67 ///   should because locations of things such as VBases are bogus in the llvm
68 ///   type anyway) then we could eliminate the SCISSOR.
69 /// * Itanium allows nearly empty primary virtual bases.  These bases don't get
70 ///   get their own storage because they're laid out as part of another base
71 ///   or at the beginning of the structure.  Determining if a VBase actually
72 ///   gets storage awkwardly involves a walk of all bases.
73 /// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
74 struct CGRecordLowering {
75   // MemberInfo is a helper structure that contains information about a record
76   // member.  In additional to the standard member types, there exists a
77   // sentinal member type that ensures correct rounding.
78   struct MemberInfo {
79     CharUnits Offset;
80     enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
81     llvm::Type *Data;
82     union {
83       const FieldDecl *FD;
84       const CXXRecordDecl *RD;
85     };
86     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
87                const FieldDecl *FD = nullptr)
88       : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
89     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
90                const CXXRecordDecl *RD)
91       : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
92     // MemberInfos are sorted so we define a < operator.
93     bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
94   };
95   // The constructor.
96   CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
97   // Short helper routines.
98   /// \brief Constructs a MemberInfo instance from an offset and llvm::Type *.
99   MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
100     return MemberInfo(Offset, MemberInfo::Field, Data);
101   }
102 
103   /// The Microsoft bitfield layout rule allocates discrete storage
104   /// units of the field's formal type and only combines adjacent
105   /// fields of the same formal type.  We want to emit a layout with
106   /// these discrete storage units instead of combining them into a
107   /// continuous run.
108   bool isDiscreteBitFieldABI() {
109     return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
110            D->isMsStruct(Context);
111   }
112 
113   /// The Itanium base layout rule allows virtual bases to overlap
114   /// other bases, which complicates layout in specific ways.
115   ///
116   /// Note specifically that the ms_struct attribute doesn't change this.
117   bool isOverlappingVBaseABI() {
118     return !Context.getTargetInfo().getCXXABI().isMicrosoft();
119   }
120 
121   /// \brief Wraps llvm::Type::getIntNTy with some implicit arguments.
122   llvm::Type *getIntNType(uint64_t NumBits) {
123     return llvm::Type::getIntNTy(Types.getLLVMContext(),
124                                  (unsigned)llvm::alignTo(NumBits, 8));
125   }
126   /// \brief Gets an llvm type of size NumBytes and alignment 1.
127   llvm::Type *getByteArrayType(CharUnits NumBytes) {
128     assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed.");
129     llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext());
130     return NumBytes == CharUnits::One() ? Type :
131         (llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity());
132   }
133   /// \brief Gets the storage type for a field decl and handles storage
134   /// for itanium bitfields that are smaller than their declared type.
135   llvm::Type *getStorageType(const FieldDecl *FD) {
136     llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
137     if (!FD->isBitField()) return Type;
138     if (isDiscreteBitFieldABI()) return Type;
139     return getIntNType(std::min(FD->getBitWidthValue(Context),
140                              (unsigned)Context.toBits(getSize(Type))));
141   }
142   /// \brief Gets the llvm Basesubobject type from a CXXRecordDecl.
143   llvm::Type *getStorageType(const CXXRecordDecl *RD) {
144     return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
145   }
146   CharUnits bitsToCharUnits(uint64_t BitOffset) {
147     return Context.toCharUnitsFromBits(BitOffset);
148   }
149   CharUnits getSize(llvm::Type *Type) {
150     return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
151   }
152   CharUnits getAlignment(llvm::Type *Type) {
153     return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
154   }
155   bool isZeroInitializable(const FieldDecl *FD) {
156     return Types.isZeroInitializable(FD->getType());
157   }
158   bool isZeroInitializable(const RecordDecl *RD) {
159     return Types.isZeroInitializable(RD);
160   }
161   void appendPaddingBytes(CharUnits Size) {
162     if (!Size.isZero())
163       FieldTypes.push_back(getByteArrayType(Size));
164   }
165   uint64_t getFieldBitOffset(const FieldDecl *FD) {
166     return Layout.getFieldOffset(FD->getFieldIndex());
167   }
168   // Layout routines.
169   void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
170                        llvm::Type *StorageType);
171   /// \brief Lowers an ASTRecordLayout to a llvm type.
172   void lower(bool NonVirtualBaseType);
173   void lowerUnion();
174   void accumulateFields();
175   void accumulateBitFields(RecordDecl::field_iterator Field,
176                         RecordDecl::field_iterator FieldEnd);
177   void accumulateBases();
178   void accumulateVPtrs();
179   void accumulateVBases();
180   /// \brief Recursively searches all of the bases to find out if a vbase is
181   /// not the primary vbase of some base class.
182   bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
183   void calculateZeroInit();
184   /// \brief Lowers bitfield storage types to I8 arrays for bitfields with tail
185   /// padding that is or can potentially be used.
186   void clipTailPadding();
187   /// \brief Determines if we need a packed llvm struct.
188   void determinePacked(bool NVBaseType);
189   /// \brief Inserts padding everwhere it's needed.
190   void insertPadding();
191   /// \brief Fills out the structures that are ultimately consumed.
192   void fillOutputFields();
193   // Input memoization fields.
194   CodeGenTypes &Types;
195   const ASTContext &Context;
196   const RecordDecl *D;
197   const CXXRecordDecl *RD;
198   const ASTRecordLayout &Layout;
199   const llvm::DataLayout &DataLayout;
200   // Helpful intermediate data-structures.
201   std::vector<MemberInfo> Members;
202   // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
203   SmallVector<llvm::Type *, 16> FieldTypes;
204   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
205   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
206   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
207   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
208   bool IsZeroInitializable : 1;
209   bool IsZeroInitializableAsBase : 1;
210   bool Packed : 1;
211 private:
212   CGRecordLowering(const CGRecordLowering &) = delete;
213   void operator =(const CGRecordLowering &) = delete;
214 };
215 } // namespace {
216 
217 CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,                                 bool Packed)
218   : Types(Types), Context(Types.getContext()), D(D),
219     RD(dyn_cast<CXXRecordDecl>(D)),
220     Layout(Types.getContext().getASTRecordLayout(D)),
221     DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
222     IsZeroInitializableAsBase(true), Packed(Packed) {}
223 
224 void CGRecordLowering::setBitFieldInfo(
225     const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
226   CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
227   Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
228   Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
229   Info.Size = FD->getBitWidthValue(Context);
230   Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
231   Info.StorageOffset = StartOffset;
232   if (Info.Size > Info.StorageSize)
233     Info.Size = Info.StorageSize;
234   // Reverse the bit offsets for big endian machines. Because we represent
235   // a bitfield as a single large integer load, we can imagine the bits
236   // counting from the most-significant-bit instead of the
237   // least-significant-bit.
238   if (DataLayout.isBigEndian())
239     Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
240 }
241 
242 void CGRecordLowering::lower(bool NVBaseType) {
243   // The lowering process implemented in this function takes a variety of
244   // carefully ordered phases.
245   // 1) Store all members (fields and bases) in a list and sort them by offset.
246   // 2) Add a 1-byte capstone member at the Size of the structure.
247   // 3) Clip bitfield storages members if their tail padding is or might be
248   //    used by another field or base.  The clipping process uses the capstone
249   //    by treating it as another object that occurs after the record.
250   // 4) Determine if the llvm-struct requires packing.  It's important that this
251   //    phase occur after clipping, because clipping changes the llvm type.
252   //    This phase reads the offset of the capstone when determining packedness
253   //    and updates the alignment of the capstone to be equal of the alignment
254   //    of the record after doing so.
255   // 5) Insert padding everywhere it is needed.  This phase requires 'Packed' to
256   //    have been computed and needs to know the alignment of the record in
257   //    order to understand if explicit tail padding is needed.
258   // 6) Remove the capstone, we don't need it anymore.
259   // 7) Determine if this record can be zero-initialized.  This phase could have
260   //    been placed anywhere after phase 1.
261   // 8) Format the complete list of members in a way that can be consumed by
262   //    CodeGenTypes::ComputeRecordLayout.
263   CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
264   if (D->isUnion())
265     return lowerUnion();
266   accumulateFields();
267   // RD implies C++.
268   if (RD) {
269     accumulateVPtrs();
270     accumulateBases();
271     if (Members.empty())
272       return appendPaddingBytes(Size);
273     if (!NVBaseType)
274       accumulateVBases();
275   }
276   std::stable_sort(Members.begin(), Members.end());
277   Members.push_back(StorageInfo(Size, getIntNType(8)));
278   clipTailPadding();
279   determinePacked(NVBaseType);
280   insertPadding();
281   Members.pop_back();
282   calculateZeroInit();
283   fillOutputFields();
284 }
285 
286 void CGRecordLowering::lowerUnion() {
287   CharUnits LayoutSize = Layout.getSize();
288   llvm::Type *StorageType = nullptr;
289   bool SeenNamedMember = false;
290   // Iterate through the fields setting bitFieldInfo and the Fields array. Also
291   // locate the "most appropriate" storage type.  The heuristic for finding the
292   // storage type isn't necessary, the first (non-0-length-bitfield) field's
293   // type would work fine and be simpler but would be different than what we've
294   // been doing and cause lit tests to change.
295   for (const auto *Field : D->fields()) {
296     if (Field->isBitField()) {
297       if (Field->isZeroLengthBitField(Context))
298         continue;
299       llvm::Type *FieldType = getStorageType(Field);
300       if (LayoutSize < getSize(FieldType))
301         FieldType = getByteArrayType(LayoutSize);
302       setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
303     }
304     Fields[Field->getCanonicalDecl()] = 0;
305     llvm::Type *FieldType = getStorageType(Field);
306     // Compute zero-initializable status.
307     // This union might not be zero initialized: it may contain a pointer to
308     // data member which might have some exotic initialization sequence.
309     // If this is the case, then we aught not to try and come up with a "better"
310     // type, it might not be very easy to come up with a Constant which
311     // correctly initializes it.
312     if (!SeenNamedMember) {
313       SeenNamedMember = Field->getIdentifier();
314       if (!SeenNamedMember)
315         if (const auto *FieldRD =
316                 dyn_cast_or_null<RecordDecl>(Field->getType()->getAsTagDecl()))
317         SeenNamedMember = FieldRD->findFirstNamedDataMember();
318       if (SeenNamedMember && !isZeroInitializable(Field)) {
319         IsZeroInitializable = IsZeroInitializableAsBase = false;
320         StorageType = FieldType;
321       }
322     }
323     // Because our union isn't zero initializable, we won't be getting a better
324     // storage type.
325     if (!IsZeroInitializable)
326       continue;
327     // Conditionally update our storage type if we've got a new "better" one.
328     if (!StorageType ||
329         getAlignment(FieldType) >  getAlignment(StorageType) ||
330         (getAlignment(FieldType) == getAlignment(StorageType) &&
331         getSize(FieldType) > getSize(StorageType)))
332       StorageType = FieldType;
333   }
334   // If we have no storage type just pad to the appropriate size and return.
335   if (!StorageType)
336     return appendPaddingBytes(LayoutSize);
337   // If our storage size was bigger than our required size (can happen in the
338   // case of packed bitfields on Itanium) then just use an I8 array.
339   if (LayoutSize < getSize(StorageType))
340     StorageType = getByteArrayType(LayoutSize);
341   FieldTypes.push_back(StorageType);
342   appendPaddingBytes(LayoutSize - getSize(StorageType));
343   // Set packed if we need it.
344   if (LayoutSize % getAlignment(StorageType))
345     Packed = true;
346 }
347 
348 void CGRecordLowering::accumulateFields() {
349   for (RecordDecl::field_iterator Field = D->field_begin(),
350                                   FieldEnd = D->field_end();
351     Field != FieldEnd;)
352     if (Field->isBitField()) {
353       RecordDecl::field_iterator Start = Field;
354       // Iterate to gather the list of bitfields.
355       for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
356       accumulateBitFields(Start, Field);
357     } else {
358       Members.push_back(MemberInfo(
359           bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
360           getStorageType(*Field), *Field));
361       ++Field;
362     }
363 }
364 
365 void
366 CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
367                                       RecordDecl::field_iterator FieldEnd) {
368   // Run stores the first element of the current run of bitfields.  FieldEnd is
369   // used as a special value to note that we don't have a current run.  A
370   // bitfield run is a contiguous collection of bitfields that can be stored in
371   // the same storage block.  Zero-sized bitfields and bitfields that would
372   // cross an alignment boundary break a run and start a new one.
373   RecordDecl::field_iterator Run = FieldEnd;
374   // Tail is the offset of the first bit off the end of the current run.  It's
375   // used to determine if the ASTRecordLayout is treating these two bitfields as
376   // contiguous.  StartBitOffset is offset of the beginning of the Run.
377   uint64_t StartBitOffset, Tail = 0;
378   if (isDiscreteBitFieldABI()) {
379     for (; Field != FieldEnd; ++Field) {
380       uint64_t BitOffset = getFieldBitOffset(*Field);
381       // Zero-width bitfields end runs.
382       if (Field->isZeroLengthBitField(Context)) {
383         Run = FieldEnd;
384         continue;
385       }
386       llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
387       // If we don't have a run yet, or don't live within the previous run's
388       // allocated storage then we allocate some storage and start a new run.
389       if (Run == FieldEnd || BitOffset >= Tail) {
390         Run = Field;
391         StartBitOffset = BitOffset;
392         Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
393         // Add the storage member to the record.  This must be added to the
394         // record before the bitfield members so that it gets laid out before
395         // the bitfields it contains get laid out.
396         Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
397       }
398       // Bitfields get the offset of their storage but come afterward and remain
399       // there after a stable sort.
400       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
401                                    MemberInfo::Field, nullptr, *Field));
402     }
403     return;
404   }
405 
406   // Check if current Field is better as a single field run. When current field
407   // has legal integer width, and its bitfield offset is naturally aligned, it
408   // is better to make the bitfield a separate storage component so as it can be
409   // accessed directly with lower cost.
410   auto IsBetterAsSingleFieldRun = [&](RecordDecl::field_iterator Field) {
411     if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
412       return false;
413     unsigned Width = Field->getBitWidthValue(Context);
414     if (!DataLayout.isLegalInteger(Width))
415       return false;
416     // Make sure Field is natually aligned if it is treated as an IType integer.
417     if (getFieldBitOffset(*Field) %
418             Context.toBits(getAlignment(getIntNType(Width))) !=
419         0)
420       return false;
421     return true;
422   };
423 
424   // The start field is better as a single field run.
425   bool StartFieldAsSingleRun = false;
426   for (;;) {
427     // Check to see if we need to start a new run.
428     if (Run == FieldEnd) {
429       // If we're out of fields, return.
430       if (Field == FieldEnd)
431         break;
432       // Any non-zero-length bitfield can start a new run.
433       if (!Field->isZeroLengthBitField(Context)) {
434         Run = Field;
435         StartBitOffset = getFieldBitOffset(*Field);
436         Tail = StartBitOffset + Field->getBitWidthValue(Context);
437         StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Run);
438       }
439       ++Field;
440       continue;
441     }
442 
443     // If the start field of a new run is better as a single run, or
444     // if current field is better as a single run, or
445     // if current field has zero width bitfield and either
446     // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
447     // true, or
448     // if the offset of current field is inconsistent with the offset of
449     // previous field plus its offset,
450     // skip the block below and go ahead to emit the storage.
451     // Otherwise, try to add bitfields to the run.
452     if (!StartFieldAsSingleRun && Field != FieldEnd &&
453         !IsBetterAsSingleFieldRun(Field) &&
454         (!Field->isZeroLengthBitField(Context) ||
455          (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
456           !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
457         Tail == getFieldBitOffset(*Field)) {
458       Tail += Field->getBitWidthValue(Context);
459       ++Field;
460       continue;
461     }
462 
463     // We've hit a break-point in the run and need to emit a storage field.
464     llvm::Type *Type = getIntNType(Tail - StartBitOffset);
465     // Add the storage member to the record and set the bitfield info for all of
466     // the bitfields in the run.  Bitfields get the offset of their storage but
467     // come afterward and remain there after a stable sort.
468     Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
469     for (; Run != Field; ++Run)
470       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
471                                    MemberInfo::Field, nullptr, *Run));
472     Run = FieldEnd;
473     StartFieldAsSingleRun = false;
474   }
475 }
476 
477 void CGRecordLowering::accumulateBases() {
478   // If we've got a primary virtual base, we need to add it with the bases.
479   if (Layout.isPrimaryBaseVirtual()) {
480     const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
481     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
482                                  getStorageType(BaseDecl), BaseDecl));
483   }
484   // Accumulate the non-virtual bases.
485   for (const auto &Base : RD->bases()) {
486     if (Base.isVirtual())
487       continue;
488 
489     // Bases can be zero-sized even if not technically empty if they
490     // contain only a trailing array member.
491     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
492     if (!BaseDecl->isEmpty() &&
493         !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
494       Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
495           MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
496   }
497 }
498 
499 void CGRecordLowering::accumulateVPtrs() {
500   if (Layout.hasOwnVFPtr())
501     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
502         llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
503             getPointerTo()->getPointerTo()));
504   if (Layout.hasOwnVBPtr())
505     Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
506         llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
507 }
508 
509 void CGRecordLowering::accumulateVBases() {
510   CharUnits ScissorOffset = Layout.getNonVirtualSize();
511   // In the itanium ABI, it's possible to place a vbase at a dsize that is
512   // smaller than the nvsize.  Here we check to see if such a base is placed
513   // before the nvsize and set the scissor offset to that, instead of the
514   // nvsize.
515   if (isOverlappingVBaseABI())
516     for (const auto &Base : RD->vbases()) {
517       const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
518       if (BaseDecl->isEmpty())
519         continue;
520       // If the vbase is a primary virtual base of some base, then it doesn't
521       // get its own storage location but instead lives inside of that base.
522       if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
523         continue;
524       ScissorOffset = std::min(ScissorOffset,
525                                Layout.getVBaseClassOffset(BaseDecl));
526     }
527   Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
528                                RD));
529   for (const auto &Base : RD->vbases()) {
530     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
531     if (BaseDecl->isEmpty())
532       continue;
533     CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
534     // If the vbase is a primary virtual base of some base, then it doesn't
535     // get its own storage location but instead lives inside of that base.
536     if (isOverlappingVBaseABI() &&
537         Context.isNearlyEmpty(BaseDecl) &&
538         !hasOwnStorage(RD, BaseDecl)) {
539       Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
540                                    BaseDecl));
541       continue;
542     }
543     // If we've got a vtordisp, add it as a storage type.
544     if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
545       Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
546                                     getIntNType(32)));
547     Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
548                                  getStorageType(BaseDecl), BaseDecl));
549   }
550 }
551 
552 bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
553                                      const CXXRecordDecl *Query) {
554   const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
555   if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
556     return false;
557   for (const auto &Base : Decl->bases())
558     if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
559       return false;
560   return true;
561 }
562 
563 void CGRecordLowering::calculateZeroInit() {
564   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
565                                                MemberEnd = Members.end();
566        IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
567     if (Member->Kind == MemberInfo::Field) {
568       if (!Member->FD || isZeroInitializable(Member->FD))
569         continue;
570       IsZeroInitializable = IsZeroInitializableAsBase = false;
571     } else if (Member->Kind == MemberInfo::Base ||
572                Member->Kind == MemberInfo::VBase) {
573       if (isZeroInitializable(Member->RD))
574         continue;
575       IsZeroInitializable = false;
576       if (Member->Kind == MemberInfo::Base)
577         IsZeroInitializableAsBase = false;
578     }
579   }
580 }
581 
582 void CGRecordLowering::clipTailPadding() {
583   std::vector<MemberInfo>::iterator Prior = Members.begin();
584   CharUnits Tail = getSize(Prior->Data);
585   for (std::vector<MemberInfo>::iterator Member = Prior + 1,
586                                          MemberEnd = Members.end();
587        Member != MemberEnd; ++Member) {
588     // Only members with data and the scissor can cut into tail padding.
589     if (!Member->Data && Member->Kind != MemberInfo::Scissor)
590       continue;
591     if (Member->Offset < Tail) {
592       assert(Prior->Kind == MemberInfo::Field && !Prior->FD &&
593              "Only storage fields have tail padding!");
594       Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
595           cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
596     }
597     if (Member->Data)
598       Prior = Member;
599     Tail = Prior->Offset + getSize(Prior->Data);
600   }
601 }
602 
603 void CGRecordLowering::determinePacked(bool NVBaseType) {
604   if (Packed)
605     return;
606   CharUnits Alignment = CharUnits::One();
607   CharUnits NVAlignment = CharUnits::One();
608   CharUnits NVSize =
609       !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
610   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
611                                                MemberEnd = Members.end();
612        Member != MemberEnd; ++Member) {
613     if (!Member->Data)
614       continue;
615     // If any member falls at an offset that it not a multiple of its alignment,
616     // then the entire record must be packed.
617     if (Member->Offset % getAlignment(Member->Data))
618       Packed = true;
619     if (Member->Offset < NVSize)
620       NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
621     Alignment = std::max(Alignment, getAlignment(Member->Data));
622   }
623   // If the size of the record (the capstone's offset) is not a multiple of the
624   // record's alignment, it must be packed.
625   if (Members.back().Offset % Alignment)
626     Packed = true;
627   // If the non-virtual sub-object is not a multiple of the non-virtual
628   // sub-object's alignment, it must be packed.  We cannot have a packed
629   // non-virtual sub-object and an unpacked complete object or vise versa.
630   if (NVSize % NVAlignment)
631     Packed = true;
632   // Update the alignment of the sentinal.
633   if (!Packed)
634     Members.back().Data = getIntNType(Context.toBits(Alignment));
635 }
636 
637 void CGRecordLowering::insertPadding() {
638   std::vector<std::pair<CharUnits, CharUnits> > Padding;
639   CharUnits Size = CharUnits::Zero();
640   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
641                                                MemberEnd = Members.end();
642        Member != MemberEnd; ++Member) {
643     if (!Member->Data)
644       continue;
645     CharUnits Offset = Member->Offset;
646     assert(Offset >= Size);
647     // Insert padding if we need to.
648     if (Offset !=
649         Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
650       Padding.push_back(std::make_pair(Size, Offset - Size));
651     Size = Offset + getSize(Member->Data);
652   }
653   if (Padding.empty())
654     return;
655   // Add the padding to the Members list and sort it.
656   for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
657         Pad = Padding.begin(), PadEnd = Padding.end();
658         Pad != PadEnd; ++Pad)
659     Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
660   std::stable_sort(Members.begin(), Members.end());
661 }
662 
663 void CGRecordLowering::fillOutputFields() {
664   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
665                                                MemberEnd = Members.end();
666        Member != MemberEnd; ++Member) {
667     if (Member->Data)
668       FieldTypes.push_back(Member->Data);
669     if (Member->Kind == MemberInfo::Field) {
670       if (Member->FD)
671         Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
672       // A field without storage must be a bitfield.
673       if (!Member->Data)
674         setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
675     } else if (Member->Kind == MemberInfo::Base)
676       NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
677     else if (Member->Kind == MemberInfo::VBase)
678       VirtualBases[Member->RD] = FieldTypes.size() - 1;
679   }
680 }
681 
682 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
683                                         const FieldDecl *FD,
684                                         uint64_t Offset, uint64_t Size,
685                                         uint64_t StorageSize,
686                                         CharUnits StorageOffset) {
687   // This function is vestigial from CGRecordLayoutBuilder days but is still
688   // used in GCObjCRuntime.cpp.  That usage has a "fixme" attached to it that
689   // when addressed will allow for the removal of this function.
690   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
691   CharUnits TypeSizeInBytes =
692     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
693   uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
694 
695   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
696 
697   if (Size > TypeSizeInBits) {
698     // We have a wide bit-field. The extra bits are only used for padding, so
699     // if we have a bitfield of type T, with size N:
700     //
701     // T t : N;
702     //
703     // We can just assume that it's:
704     //
705     // T t : sizeof(T);
706     //
707     Size = TypeSizeInBits;
708   }
709 
710   // Reverse the bit offsets for big endian machines. Because we represent
711   // a bitfield as a single large integer load, we can imagine the bits
712   // counting from the most-significant-bit instead of the
713   // least-significant-bit.
714   if (Types.getDataLayout().isBigEndian()) {
715     Offset = StorageSize - (Offset + Size);
716   }
717 
718   return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
719 }
720 
721 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
722                                                   llvm::StructType *Ty) {
723   CGRecordLowering Builder(*this, D, /*Packed=*/false);
724 
725   Builder.lower(/*NonVirtualBaseType=*/false);
726 
727   // If we're in C++, compute the base subobject type.
728   llvm::StructType *BaseTy = nullptr;
729   if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
730     BaseTy = Ty;
731     if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
732       CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
733       BaseBuilder.lower(/*NonVirtualBaseType=*/true);
734       BaseTy = llvm::StructType::create(
735           getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
736       addRecordTypeName(D, BaseTy, ".base");
737       // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
738       // on both of them with the same index.
739       assert(Builder.Packed == BaseBuilder.Packed &&
740              "Non-virtual and complete types must agree on packedness");
741     }
742   }
743 
744   // Fill in the struct *after* computing the base type.  Filling in the body
745   // signifies that the type is no longer opaque and record layout is complete,
746   // but we may need to recursively layout D while laying D out as a base type.
747   Ty->setBody(Builder.FieldTypes, Builder.Packed);
748 
749   CGRecordLayout *RL =
750     new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
751                         Builder.IsZeroInitializableAsBase);
752 
753   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
754   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
755 
756   // Add all the field numbers.
757   RL->FieldInfo.swap(Builder.Fields);
758 
759   // Add bitfield info.
760   RL->BitFields.swap(Builder.BitFields);
761 
762   // Dump the layout, if requested.
763   if (getContext().getLangOpts().DumpRecordLayouts) {
764     llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
765     llvm::outs() << "Record: ";
766     D->dump(llvm::outs());
767     llvm::outs() << "\nLayout: ";
768     RL->print(llvm::outs());
769   }
770 
771 #ifndef NDEBUG
772   // Verify that the computed LLVM struct size matches the AST layout size.
773   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
774 
775   uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
776   assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
777          "Type size mismatch!");
778 
779   if (BaseTy) {
780     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
781 
782     uint64_t AlignedNonVirtualTypeSizeInBits =
783       getContext().toBits(NonVirtualSize);
784 
785     assert(AlignedNonVirtualTypeSizeInBits ==
786            getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
787            "Type size mismatch!");
788   }
789 
790   // Verify that the LLVM and AST field offsets agree.
791   llvm::StructType *ST = RL->getLLVMType();
792   const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
793 
794   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
795   RecordDecl::field_iterator it = D->field_begin();
796   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
797     const FieldDecl *FD = *it;
798 
799     // For non-bit-fields, just check that the LLVM struct offset matches the
800     // AST offset.
801     if (!FD->isBitField()) {
802       unsigned FieldNo = RL->getLLVMFieldNo(FD);
803       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
804              "Invalid field offset!");
805       continue;
806     }
807 
808     // Ignore unnamed bit-fields.
809     if (!FD->getDeclName())
810       continue;
811 
812     // Don't inspect zero-length bitfields.
813     if (FD->isZeroLengthBitField(getContext()))
814       continue;
815 
816     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
817     llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
818 
819     // Unions have overlapping elements dictating their layout, but for
820     // non-unions we can verify that this section of the layout is the exact
821     // expected size.
822     if (D->isUnion()) {
823       // For unions we verify that the start is zero and the size
824       // is in-bounds. However, on BE systems, the offset may be non-zero, but
825       // the size + offset should match the storage size in that case as it
826       // "starts" at the back.
827       if (getDataLayout().isBigEndian())
828         assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
829                Info.StorageSize &&
830                "Big endian union bitfield does not end at the back");
831       else
832         assert(Info.Offset == 0 &&
833                "Little endian union bitfield with a non-zero offset");
834       assert(Info.StorageSize <= SL->getSizeInBits() &&
835              "Union not large enough for bitfield storage");
836     } else {
837       assert(Info.StorageSize ==
838              getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
839              "Storage size does not match the element type size");
840     }
841     assert(Info.Size > 0 && "Empty bitfield!");
842     assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
843            "Bitfield outside of its allocated storage");
844   }
845 #endif
846 
847   return RL;
848 }
849 
850 void CGRecordLayout::print(raw_ostream &OS) const {
851   OS << "<CGRecordLayout\n";
852   OS << "  LLVMType:" << *CompleteObjectType << "\n";
853   if (BaseSubobjectType)
854     OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
855   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
856   OS << "  BitFields:[\n";
857 
858   // Print bit-field infos in declaration order.
859   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
860   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
861          it = BitFields.begin(), ie = BitFields.end();
862        it != ie; ++it) {
863     const RecordDecl *RD = it->first->getParent();
864     unsigned Index = 0;
865     for (RecordDecl::field_iterator
866            it2 = RD->field_begin(); *it2 != it->first; ++it2)
867       ++Index;
868     BFIs.push_back(std::make_pair(Index, &it->second));
869   }
870   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
871   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
872     OS.indent(4);
873     BFIs[i].second->print(OS);
874     OS << "\n";
875   }
876 
877   OS << "]>\n";
878 }
879 
880 LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
881   print(llvm::errs());
882 }
883 
884 void CGBitFieldInfo::print(raw_ostream &OS) const {
885   OS << "<CGBitFieldInfo"
886      << " Offset:" << Offset
887      << " Size:" << Size
888      << " IsSigned:" << IsSigned
889      << " StorageSize:" << StorageSize
890      << " StorageOffset:" << StorageOffset.getQuantity() << ">";
891 }
892 
893 LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
894   print(llvm::errs());
895 }
896