1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Builder implementation for CGRecordLayout objects.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGRecordLayout.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/Attr.h"
17 #include "clang/AST/DeclCXX.h"
18 #include "clang/AST/Expr.h"
19 #include "clang/AST/RecordLayout.h"
20 #include "CodeGenTypes.h"
21 #include "CGCXXABI.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Type.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/Target/TargetData.h"
27 using namespace clang;
28 using namespace CodeGen;
29 
30 namespace clang {
31 namespace CodeGen {
32 
33 class CGRecordLayoutBuilder {
34 public:
35   /// FieldTypes - Holds the LLVM types that the struct is created from.
36   std::vector<const llvm::Type *> FieldTypes;
37 
38   /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
39   typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
40   llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
41 
42   /// LLVMBitFieldInfo - Holds location and size information about a bit field.
43   typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
44   llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
45 
46   typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
47   llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
48 
49   /// IsZeroInitializable - Whether this struct can be C++
50   /// zero-initialized with an LLVM zeroinitializer.
51   bool IsZeroInitializable;
52 
53   /// Packed - Whether the resulting LLVM struct will be packed or not.
54   bool Packed;
55 
56 private:
57   CodeGenTypes &Types;
58 
59   /// Alignment - Contains the alignment of the RecordDecl.
60   //
61   // FIXME: This is not needed and should be removed.
62   unsigned Alignment;
63 
64   /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
65   /// LLVM types.
66   unsigned AlignmentAsLLVMStruct;
67 
68   /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
69   /// this will have the number of bits still available in the field.
70   char BitsAvailableInLastField;
71 
72   /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
73   uint64_t NextFieldOffsetInBytes;
74 
75   /// LayoutUnionField - Will layout a field in an union and return the type
76   /// that the field will have.
77   const llvm::Type *LayoutUnionField(const FieldDecl *Field,
78                                      const ASTRecordLayout &Layout);
79 
80   /// LayoutUnion - Will layout a union RecordDecl.
81   void LayoutUnion(const RecordDecl *D);
82 
83   /// LayoutField - try to layout all fields in the record decl.
84   /// Returns false if the operation failed because the struct is not packed.
85   bool LayoutFields(const RecordDecl *D);
86 
87   /// LayoutNonVirtualBase - layout a single non-virtual base.
88   void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
89                             uint64_t BaseOffset);
90 
91   /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
92   void LayoutNonVirtualBases(const CXXRecordDecl *RD,
93                              const ASTRecordLayout &Layout);
94 
95   /// LayoutField - layout a single field. Returns false if the operation failed
96   /// because the current struct is not packed.
97   bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
98 
99   /// LayoutBitField - layout a single bit field.
100   void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
101 
102   /// AppendField - Appends a field with the given offset and type.
103   void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
104 
105   /// AppendPadding - Appends enough padding bytes so that the total
106   /// struct size is a multiple of the field alignment.
107   void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
108 
109   /// AppendBytes - Append a given number of bytes to the record.
110   void AppendBytes(uint64_t NumBytes);
111 
112   /// AppendTailPadding - Append enough tail padding so that the type will have
113   /// the passed size.
114   void AppendTailPadding(uint64_t RecordSize);
115 
116   unsigned getTypeAlignment(const llvm::Type *Ty) const;
117 
118   /// CheckZeroInitializable - Check if the given type contains a pointer
119   /// to data member.
120   void CheckZeroInitializable(QualType T);
121   void CheckZeroInitializable(const CXXRecordDecl *RD);
122 
123 public:
124   CGRecordLayoutBuilder(CodeGenTypes &Types)
125     : IsZeroInitializable(true), Packed(false), Types(Types),
126       Alignment(0), AlignmentAsLLVMStruct(1),
127       BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
128 
129   /// Layout - Will layout a RecordDecl.
130   void Layout(const RecordDecl *D);
131 };
132 
133 }
134 }
135 
136 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
137   Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
138   Packed = D->hasAttr<PackedAttr>();
139 
140   if (D->isUnion()) {
141     LayoutUnion(D);
142     return;
143   }
144 
145   if (LayoutFields(D))
146     return;
147 
148   // We weren't able to layout the struct. Try again with a packed struct
149   Packed = true;
150   AlignmentAsLLVMStruct = 1;
151   NextFieldOffsetInBytes = 0;
152   FieldTypes.clear();
153   LLVMFields.clear();
154   LLVMBitFields.clear();
155   LLVMNonVirtualBases.clear();
156 
157   LayoutFields(D);
158 }
159 
160 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
161                                const FieldDecl *FD,
162                                uint64_t FieldOffset,
163                                uint64_t FieldSize,
164                                uint64_t ContainingTypeSizeInBits,
165                                unsigned ContainingTypeAlign) {
166   const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
167   uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
168   uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
169 
170   bool IsSigned = FD->getType()->isSignedIntegerType();
171 
172   if (FieldSize > TypeSizeInBits) {
173     // We have a wide bit-field. The extra bits are only used for padding, so
174     // if we have a bitfield of type T, with size N:
175     //
176     // T t : N;
177     //
178     // We can just assume that it's:
179     //
180     // T t : sizeof(T);
181     //
182     FieldSize = TypeSizeInBits;
183   }
184 
185   // Compute the access components. The policy we use is to start by attempting
186   // to access using the width of the bit-field type itself and to always access
187   // at aligned indices of that type. If such an access would fail because it
188   // extends past the bound of the type, then we reduce size to the next smaller
189   // power of two and retry. The current algorithm assumes pow2 sized types,
190   // although this is easy to fix.
191   //
192   // FIXME: This algorithm is wrong on big-endian systems, I think.
193   assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
194   CGBitFieldInfo::AccessInfo Components[3];
195   unsigned NumComponents = 0;
196   unsigned AccessedTargetBits = 0;       // The tumber of target bits accessed.
197   unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
198 
199   // Round down from the field offset to find the first access position that is
200   // at an aligned offset of the initial access type.
201   uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
202 
203   // Adjust initial access size to fit within record.
204   while (AccessWidth > 8 &&
205          AccessStart + AccessWidth > ContainingTypeSizeInBits) {
206     AccessWidth >>= 1;
207     AccessStart = FieldOffset - (FieldOffset % AccessWidth);
208   }
209 
210   while (AccessedTargetBits < FieldSize) {
211     // Check that we can access using a type of this size, without reading off
212     // the end of the structure. This can occur with packed structures and
213     // -fno-bitfield-type-align, for example.
214     if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
215       // If so, reduce access size to the next smaller power-of-two and retry.
216       AccessWidth >>= 1;
217       assert(AccessWidth >= 8 && "Cannot access under byte size!");
218       continue;
219     }
220 
221     // Otherwise, add an access component.
222 
223     // First, compute the bits inside this access which are part of the
224     // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
225     // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
226     // in the target that we are reading.
227     assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
228     assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
229     uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
230     uint64_t AccessBitsInFieldSize =
231       std::min(AccessWidth + AccessStart,
232                FieldOffset + FieldSize) - AccessBitsInFieldStart;
233 
234     assert(NumComponents < 3 && "Unexpected number of components!");
235     CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
236     AI.FieldIndex = 0;
237     // FIXME: We still follow the old access pattern of only using the field
238     // byte offset. We should switch this once we fix the struct layout to be
239     // pretty.
240     AI.FieldByteOffset = AccessStart / 8;
241     AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
242     AI.AccessWidth = AccessWidth;
243     AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
244     AI.TargetBitOffset = AccessedTargetBits;
245     AI.TargetBitWidth = AccessBitsInFieldSize;
246 
247     AccessStart += AccessWidth;
248     AccessedTargetBits += AI.TargetBitWidth;
249   }
250 
251   assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
252   return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
253 }
254 
255 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
256                                         const FieldDecl *FD,
257                                         uint64_t FieldOffset,
258                                         uint64_t FieldSize) {
259   const RecordDecl *RD = FD->getParent();
260   const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
261   uint64_t ContainingTypeSizeInBits = RL.getSize();
262   unsigned ContainingTypeAlign = RL.getAlignment();
263 
264   return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
265                   ContainingTypeAlign);
266 }
267 
268 void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
269                                            uint64_t FieldOffset) {
270   uint64_t FieldSize =
271     D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
272 
273   if (FieldSize == 0)
274     return;
275 
276   uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
277   unsigned NumBytesToAppend;
278 
279   if (FieldOffset < NextFieldOffset) {
280     assert(BitsAvailableInLastField && "Bitfield size mismatch!");
281     assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
282 
283     // The bitfield begins in the previous bit-field.
284     NumBytesToAppend =
285       llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
286   } else {
287     assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
288 
289     // Append padding if necessary.
290     AppendBytes((FieldOffset - NextFieldOffset) / 8);
291 
292     NumBytesToAppend =
293       llvm::RoundUpToAlignment(FieldSize, 8) / 8;
294 
295     assert(NumBytesToAppend && "No bytes to append!");
296   }
297 
298   // Add the bit field info.
299   LLVMBitFields.push_back(
300     LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
301                                                  FieldSize)));
302 
303   AppendBytes(NumBytesToAppend);
304 
305   BitsAvailableInLastField =
306     NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
307 }
308 
309 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
310                                         uint64_t FieldOffset) {
311   // If the field is packed, then we need a packed struct.
312   if (!Packed && D->hasAttr<PackedAttr>())
313     return false;
314 
315   if (D->isBitField()) {
316     // We must use packed structs for unnamed bit fields since they
317     // don't affect the struct alignment.
318     if (!Packed && !D->getDeclName())
319       return false;
320 
321     LayoutBitField(D, FieldOffset);
322     return true;
323   }
324 
325   CheckZeroInitializable(D->getType());
326 
327   assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
328   uint64_t FieldOffsetInBytes = FieldOffset / 8;
329 
330   const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
331   unsigned TypeAlignment = getTypeAlignment(Ty);
332 
333   // If the type alignment is larger then the struct alignment, we must use
334   // a packed struct.
335   if (TypeAlignment > Alignment) {
336     assert(!Packed && "Alignment is wrong even with packed struct!");
337     return false;
338   }
339 
340   if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
341     const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
342     if (const MaxFieldAlignmentAttr *MFAA =
343           RD->getAttr<MaxFieldAlignmentAttr>()) {
344       if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
345         return false;
346     }
347   }
348 
349   // Round up the field offset to the alignment of the field type.
350   uint64_t AlignedNextFieldOffsetInBytes =
351     llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
352 
353   if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
354     assert(!Packed && "Could not place field even with packed struct!");
355     return false;
356   }
357 
358   if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
359     // Even with alignment, the field offset is not at the right place,
360     // insert padding.
361     uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
362 
363     AppendBytes(PaddingInBytes);
364   }
365 
366   // Now append the field.
367   LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
368   AppendField(FieldOffsetInBytes, Ty);
369 
370   return true;
371 }
372 
373 const llvm::Type *
374 CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
375                                         const ASTRecordLayout &Layout) {
376   if (Field->isBitField()) {
377     uint64_t FieldSize =
378       Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
379 
380     // Ignore zero sized bit fields.
381     if (FieldSize == 0)
382       return 0;
383 
384     const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
385     unsigned NumBytesToAppend =
386       llvm::RoundUpToAlignment(FieldSize, 8) / 8;
387 
388     if (NumBytesToAppend > 1)
389       FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
390 
391     // Add the bit field info.
392     LLVMBitFields.push_back(
393       LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
394                                                        0, FieldSize)));
395     return FieldTy;
396   }
397 
398   // This is a regular union field.
399   LLVMFields.push_back(LLVMFieldInfo(Field, 0));
400   return Types.ConvertTypeForMemRecursive(Field->getType());
401 }
402 
403 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
404   assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
405 
406   const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
407 
408   const llvm::Type *Ty = 0;
409   uint64_t Size = 0;
410   unsigned Align = 0;
411 
412   bool HasOnlyZeroSizedBitFields = true;
413 
414   unsigned FieldNo = 0;
415   for (RecordDecl::field_iterator Field = D->field_begin(),
416        FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
417     assert(Layout.getFieldOffset(FieldNo) == 0 &&
418           "Union field offset did not start at the beginning of record!");
419     const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
420 
421     if (!FieldTy)
422       continue;
423 
424     HasOnlyZeroSizedBitFields = false;
425 
426     unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
427     uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
428 
429     if (FieldAlign < Align)
430       continue;
431 
432     if (FieldAlign > Align || FieldSize > Size) {
433       Ty = FieldTy;
434       Align = FieldAlign;
435       Size = FieldSize;
436     }
437   }
438 
439   // Now add our field.
440   if (Ty) {
441     AppendField(0, Ty);
442 
443     if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
444       // We need a packed struct.
445       Packed = true;
446       Align = 1;
447     }
448   }
449   if (!Align) {
450     assert(HasOnlyZeroSizedBitFields &&
451            "0-align record did not have all zero-sized bit-fields!");
452     Align = 1;
453   }
454 
455   // Append tail padding.
456   if (Layout.getSize() / 8 > Size)
457     AppendPadding(Layout.getSize() / 8, Align);
458 }
459 
460 void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
461                                                  uint64_t BaseOffset) {
462   const ASTRecordLayout &Layout =
463     Types.getContext().getASTRecordLayout(BaseDecl);
464 
465   uint64_t NonVirtualSize = Layout.getNonVirtualSize();
466 
467   if (BaseDecl->isEmpty()) {
468     // FIXME: Lay out empty bases.
469     return;
470   }
471 
472   CheckZeroInitializable(BaseDecl);
473 
474   // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
475   AppendPadding(BaseOffset / 8, 1);
476 
477   // Append the base field.
478   LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
479 
480   AppendBytes(NonVirtualSize / 8);
481 }
482 
483 void
484 CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
485                                              const ASTRecordLayout &Layout) {
486   const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
487 
488   // Check if we need to add a vtable pointer.
489   if (RD->isDynamicClass()) {
490     if (!PrimaryBase) {
491       const llvm::Type *FunctionType =
492         llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
493                                 /*isVarArg=*/true);
494       const llvm::Type *VTableTy = FunctionType->getPointerTo();
495 
496       assert(NextFieldOffsetInBytes == 0 &&
497              "VTable pointer must come first!");
498       AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
499     } else {
500       // FIXME: Handle a virtual primary base.
501       if (!Layout.getPrimaryBaseWasVirtual())
502         LayoutNonVirtualBase(PrimaryBase, 0);
503     }
504   }
505 
506   // Layout the non-virtual bases.
507   for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
508        E = RD->bases_end(); I != E; ++I) {
509     if (I->isVirtual())
510       continue;
511 
512     const CXXRecordDecl *BaseDecl =
513       cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
514 
515     // We've already laid out the primary base.
516     if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
517       continue;
518 
519     LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
520   }
521 }
522 
523 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
524   assert(!D->isUnion() && "Can't call LayoutFields on a union!");
525   assert(Alignment && "Did not set alignment!");
526 
527   const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
528 
529   if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
530     LayoutNonVirtualBases(RD, Layout);
531 
532   unsigned FieldNo = 0;
533 
534   for (RecordDecl::field_iterator Field = D->field_begin(),
535        FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
536     if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
537       assert(!Packed &&
538              "Could not layout fields even with a packed LLVM struct!");
539       return false;
540     }
541   }
542 
543   // Append tail padding if necessary.
544   AppendTailPadding(Layout.getSize());
545 
546   return true;
547 }
548 
549 void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
550   assert(RecordSize % 8 == 0 && "Invalid record size!");
551 
552   uint64_t RecordSizeInBytes = RecordSize / 8;
553   assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
554 
555   uint64_t AlignedNextFieldOffset =
556     llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
557 
558   if (AlignedNextFieldOffset == RecordSizeInBytes) {
559     // We don't need any padding.
560     return;
561   }
562 
563   unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
564   AppendBytes(NumPadBytes);
565 }
566 
567 void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
568                                         const llvm::Type *FieldTy) {
569   AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
570                                    getTypeAlignment(FieldTy));
571 
572   uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
573 
574   FieldTypes.push_back(FieldTy);
575 
576   NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
577   BitsAvailableInLastField = 0;
578 }
579 
580 void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
581                                           unsigned FieldAlignment) {
582   assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
583          "Incorrect field layout!");
584 
585   // Round up the field offset to the alignment of the field type.
586   uint64_t AlignedNextFieldOffsetInBytes =
587     llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
588 
589   if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
590     // Even with alignment, the field offset is not at the right place,
591     // insert padding.
592     uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
593 
594     AppendBytes(PaddingInBytes);
595   }
596 }
597 
598 void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
599   if (NumBytes == 0)
600     return;
601 
602   const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
603   if (NumBytes > 1)
604     Ty = llvm::ArrayType::get(Ty, NumBytes);
605 
606   // Append the padding field
607   AppendField(NextFieldOffsetInBytes, Ty);
608 }
609 
610 unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
611   if (Packed)
612     return 1;
613 
614   return Types.getTargetData().getABITypeAlignment(Ty);
615 }
616 
617 void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
618   // This record already contains a member pointer.
619   if (!IsZeroInitializable)
620     return;
621 
622   // Can only have member pointers if we're compiling C++.
623   if (!Types.getContext().getLangOptions().CPlusPlus)
624     return;
625 
626   T = Types.getContext().getBaseElementType(T);
627 
628   if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
629     if (!Types.getCXXABI().isZeroInitializable(MPT))
630       IsZeroInitializable = false;
631   } else if (const RecordType *RT = T->getAs<RecordType>()) {
632     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
633     CheckZeroInitializable(RD);
634   }
635 }
636 
637 void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
638   // This record already contains a member pointer.
639   if (!IsZeroInitializable)
640     return;
641 
642   // FIXME: It would be better if there was a way to explicitly compute the
643   // record layout instead of converting to a type.
644   Types.ConvertTagDeclType(RD);
645 
646   const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
647 
648   if (!Layout.isZeroInitializable())
649     IsZeroInitializable = false;
650 }
651 
652 CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
653   CGRecordLayoutBuilder Builder(*this);
654 
655   Builder.Layout(D);
656 
657   const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
658                                                Builder.FieldTypes,
659                                                Builder.Packed);
660 
661   CGRecordLayout *RL =
662     new CGRecordLayout(Ty, Builder.IsZeroInitializable);
663 
664   // Add all the non-virtual base field numbers.
665   RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
666                                   Builder.LLVMNonVirtualBases.end());
667 
668   // Add all the field numbers.
669   RL->FieldInfo.insert(Builder.LLVMFields.begin(),
670                        Builder.LLVMFields.end());
671 
672   // Add bitfield info.
673   RL->BitFields.insert(Builder.LLVMBitFields.begin(),
674                        Builder.LLVMBitFields.end());
675 
676   // Dump the layout, if requested.
677   if (getContext().getLangOptions().DumpRecordLayouts) {
678     llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
679     llvm::errs() << "Record: ";
680     D->dump();
681     llvm::errs() << "\nLayout: ";
682     RL->dump();
683   }
684 
685 #ifndef NDEBUG
686   // Verify that the computed LLVM struct size matches the AST layout size.
687   uint64_t TypeSizeInBits = getContext().getASTRecordLayout(D).getSize();
688   assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
689          "Type size mismatch!");
690 
691   // Verify that the LLVM and AST field offsets agree.
692   const llvm::StructType *ST =
693     dyn_cast<llvm::StructType>(RL->getLLVMType());
694   const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
695 
696   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
697   RecordDecl::field_iterator it = D->field_begin();
698   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
699     const FieldDecl *FD = *it;
700 
701     // For non-bit-fields, just check that the LLVM struct offset matches the
702     // AST offset.
703     if (!FD->isBitField()) {
704       unsigned FieldNo = RL->getLLVMFieldNo(FD);
705       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
706              "Invalid field offset!");
707       continue;
708     }
709 
710     // Ignore unnamed bit-fields.
711     if (!FD->getDeclName())
712       continue;
713 
714     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
715     for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
716       const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
717 
718       // Verify that every component access is within the structure.
719       uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
720       uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
721       assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
722              "Invalid bit-field access (out of range)!");
723     }
724   }
725 #endif
726 
727   return RL;
728 }
729 
730 void CGRecordLayout::print(llvm::raw_ostream &OS) const {
731   OS << "<CGRecordLayout\n";
732   OS << "  LLVMType:" << *LLVMType << "\n";
733   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
734   OS << "  BitFields:[\n";
735 
736   // Print bit-field infos in declaration order.
737   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
738   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
739          it = BitFields.begin(), ie = BitFields.end();
740        it != ie; ++it) {
741     const RecordDecl *RD = it->first->getParent();
742     unsigned Index = 0;
743     for (RecordDecl::field_iterator
744            it2 = RD->field_begin(); *it2 != it->first; ++it2)
745       ++Index;
746     BFIs.push_back(std::make_pair(Index, &it->second));
747   }
748   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
749   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
750     OS.indent(4);
751     BFIs[i].second->print(OS);
752     OS << "\n";
753   }
754 
755   OS << "]>\n";
756 }
757 
758 void CGRecordLayout::dump() const {
759   print(llvm::errs());
760 }
761 
762 void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
763   OS << "<CGBitFieldInfo";
764   OS << " Size:" << Size;
765   OS << " IsSigned:" << IsSigned << "\n";
766 
767   OS.indent(4 + strlen("<CGBitFieldInfo"));
768   OS << " NumComponents:" << getNumComponents();
769   OS << " Components: [";
770   if (getNumComponents()) {
771     OS << "\n";
772     for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
773       const AccessInfo &AI = getComponent(i);
774       OS.indent(8);
775       OS << "<AccessInfo"
776          << " FieldIndex:" << AI.FieldIndex
777          << " FieldByteOffset:" << AI.FieldByteOffset
778          << " FieldBitStart:" << AI.FieldBitStart
779          << " AccessWidth:" << AI.AccessWidth << "\n";
780       OS.indent(8 + strlen("<AccessInfo"));
781       OS << " AccessAlignment:" << AI.AccessAlignment
782          << " TargetBitOffset:" << AI.TargetBitOffset
783          << " TargetBitWidth:" << AI.TargetBitWidth
784          << ">\n";
785     }
786     OS.indent(4);
787   }
788   OS << "]>";
789 }
790 
791 void CGBitFieldInfo::dump() const {
792   print(llvm::errs());
793 }
794