1 //===--- CGRecordLayoutBuilder.cpp - Record builder helper ------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is a helper class used to build CGRecordLayout objects and LLVM types.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGRecordLayoutBuilder.h"
15 
16 #include "clang/AST/ASTContext.h"
17 #include "clang/AST/Attr.h"
18 #include "clang/AST/DeclCXX.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "CodeGenTypes.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Target/TargetData.h"
24 
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
30   Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
31   Packed = D->hasAttr<PackedAttr>();
32 
33   if (D->isUnion()) {
34     LayoutUnion(D);
35     return;
36   }
37 
38   if (LayoutFields(D))
39     return;
40 
41   // We weren't able to layout the struct. Try again with a packed struct
42   Packed = true;
43   AlignmentAsLLVMStruct = 1;
44   NextFieldOffsetInBytes = 0;
45   FieldTypes.clear();
46   LLVMFields.clear();
47   LLVMBitFields.clear();
48 
49   LayoutFields(D);
50 }
51 
52 void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
53                                            uint64_t FieldOffset) {
54   uint64_t FieldSize =
55     D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
56 
57   if (FieldSize == 0)
58     return;
59 
60   uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
61   unsigned NumBytesToAppend;
62 
63   if (FieldOffset < NextFieldOffset) {
64     assert(BitsAvailableInLastField && "Bitfield size mismatch!");
65     assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
66 
67     // The bitfield begins in the previous bit-field.
68     NumBytesToAppend =
69       llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
70   } else {
71     assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
72 
73     // Append padding if necessary.
74     AppendBytes((FieldOffset - NextFieldOffset) / 8);
75 
76     NumBytesToAppend =
77       llvm::RoundUpToAlignment(FieldSize, 8) / 8;
78 
79     assert(NumBytesToAppend && "No bytes to append!");
80   }
81 
82   const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
83   uint64_t TypeSizeInBits = getTypeSizeInBytes(Ty) * 8;
84 
85   LLVMBitFields.push_back(LLVMBitFieldInfo(D, FieldOffset / TypeSizeInBits,
86                                            FieldOffset % TypeSizeInBits,
87                                            FieldSize));
88 
89   AppendBytes(NumBytesToAppend);
90 
91   BitsAvailableInLastField =
92     NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
93 }
94 
95 bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
96                                         uint64_t FieldOffset) {
97   // If the field is packed, then we need a packed struct.
98   if (!Packed && D->hasAttr<PackedAttr>())
99     return false;
100 
101   if (D->isBitField()) {
102     // We must use packed structs for unnamed bit fields since they
103     // don't affect the struct alignment.
104     if (!Packed && !D->getDeclName())
105       return false;
106 
107     LayoutBitField(D, FieldOffset);
108     return true;
109   }
110 
111   // Check if we have a pointer to data member in this field.
112   CheckForPointerToDataMember(D->getType());
113 
114   assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
115   uint64_t FieldOffsetInBytes = FieldOffset / 8;
116 
117   const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
118   unsigned TypeAlignment = getTypeAlignment(Ty);
119 
120   // If the type alignment is larger then the struct alignment, we must use
121   // a packed struct.
122   if (TypeAlignment > Alignment) {
123     assert(!Packed && "Alignment is wrong even with packed struct!");
124     return false;
125   }
126 
127   if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
128     const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
129     if (const PragmaPackAttr *PPA = RD->getAttr<PragmaPackAttr>()) {
130       if (PPA->getAlignment() != TypeAlignment * 8 && !Packed)
131         return false;
132     }
133   }
134 
135   // Round up the field offset to the alignment of the field type.
136   uint64_t AlignedNextFieldOffsetInBytes =
137     llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
138 
139   if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
140     assert(!Packed && "Could not place field even with packed struct!");
141     return false;
142   }
143 
144   if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
145     // Even with alignment, the field offset is not at the right place,
146     // insert padding.
147     uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
148 
149     AppendBytes(PaddingInBytes);
150   }
151 
152   // Now append the field.
153   LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
154   AppendField(FieldOffsetInBytes, Ty);
155 
156   return true;
157 }
158 
159 void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
160   assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
161 
162   const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
163 
164   const llvm::Type *Ty = 0;
165   uint64_t Size = 0;
166   unsigned Align = 0;
167 
168   bool HasOnlyZeroSizedBitFields = true;
169 
170   unsigned FieldNo = 0;
171   for (RecordDecl::field_iterator Field = D->field_begin(),
172        FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
173     assert(Layout.getFieldOffset(FieldNo) == 0 &&
174           "Union field offset did not start at the beginning of record!");
175 
176     if (Field->isBitField()) {
177       uint64_t FieldSize =
178         Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
179 
180       // Ignore zero sized bit fields.
181       if (FieldSize == 0)
182         continue;
183 
184       // Add the bit field info.
185       Types.addBitFieldInfo(*Field, 0, 0, FieldSize);
186     } else
187       Types.addFieldInfo(*Field, 0);
188 
189     HasOnlyZeroSizedBitFields = false;
190 
191     const llvm::Type *FieldTy =
192       Types.ConvertTypeForMemRecursive(Field->getType());
193     unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
194     uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
195 
196     if (FieldAlign < Align)
197       continue;
198 
199     if (FieldAlign > Align || FieldSize > Size) {
200       Ty = FieldTy;
201       Align = FieldAlign;
202       Size = FieldSize;
203     }
204   }
205 
206   // Now add our field.
207   if (Ty) {
208     AppendField(0, Ty);
209 
210     if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
211       // We need a packed struct.
212       Packed = true;
213       Align = 1;
214     }
215   }
216   if (!Align) {
217     assert(HasOnlyZeroSizedBitFields &&
218            "0-align record did not have all zero-sized bit-fields!");
219     Align = 1;
220   }
221 
222   // Append tail padding.
223   if (Layout.getSize() / 8 > Size)
224     AppendPadding(Layout.getSize() / 8, Align);
225 }
226 
227 void CGRecordLayoutBuilder::LayoutBases(const CXXRecordDecl *RD,
228                                         const ASTRecordLayout &Layout) {
229   // Check if we need to add a vtable pointer.
230   if (RD->isDynamicClass() && !Layout.getPrimaryBase()) {
231     const llvm::Type *Int8PtrTy =
232       llvm::Type::getInt8PtrTy(Types.getLLVMContext());
233 
234     assert(NextFieldOffsetInBytes == 0 &&
235            "Vtable pointer must come first!");
236     AppendField(NextFieldOffsetInBytes, Int8PtrTy->getPointerTo());
237   }
238 }
239 
240 bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
241   assert(!D->isUnion() && "Can't call LayoutFields on a union!");
242   assert(Alignment && "Did not set alignment!");
243 
244   const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
245 
246   if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
247     LayoutBases(RD, Layout);
248 
249   unsigned FieldNo = 0;
250 
251   for (RecordDecl::field_iterator Field = D->field_begin(),
252        FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
253     if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
254       assert(!Packed &&
255              "Could not layout fields even with a packed LLVM struct!");
256       return false;
257     }
258   }
259 
260   // Append tail padding if necessary.
261   AppendTailPadding(Layout.getSize());
262 
263   return true;
264 }
265 
266 void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
267   assert(RecordSize % 8 == 0 && "Invalid record size!");
268 
269   uint64_t RecordSizeInBytes = RecordSize / 8;
270   assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
271 
272   uint64_t AlignedNextFieldOffset =
273     llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
274 
275   if (AlignedNextFieldOffset == RecordSizeInBytes) {
276     // We don't need any padding.
277     return;
278   }
279 
280   unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
281   AppendBytes(NumPadBytes);
282 }
283 
284 void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
285                                         const llvm::Type *FieldTy) {
286   AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
287                                    getTypeAlignment(FieldTy));
288 
289   uint64_t FieldSizeInBytes = getTypeSizeInBytes(FieldTy);
290 
291   FieldTypes.push_back(FieldTy);
292 
293   NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
294   BitsAvailableInLastField = 0;
295 }
296 
297 void
298 CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
299                                      const llvm::Type *FieldTy) {
300   AppendPadding(FieldOffsetInBytes, getTypeAlignment(FieldTy));
301 }
302 
303 void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
304                                           unsigned FieldAlignment) {
305   assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
306          "Incorrect field layout!");
307 
308   // Round up the field offset to the alignment of the field type.
309   uint64_t AlignedNextFieldOffsetInBytes =
310     llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
311 
312   if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
313     // Even with alignment, the field offset is not at the right place,
314     // insert padding.
315     uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
316 
317     AppendBytes(PaddingInBytes);
318   }
319 }
320 
321 void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
322   if (NumBytes == 0)
323     return;
324 
325   const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
326   if (NumBytes > 1)
327     Ty = llvm::ArrayType::get(Ty, NumBytes);
328 
329   // Append the padding field
330   AppendField(NextFieldOffsetInBytes, Ty);
331 }
332 
333 unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
334   if (Packed)
335     return 1;
336 
337   return Types.getTargetData().getABITypeAlignment(Ty);
338 }
339 
340 uint64_t CGRecordLayoutBuilder::getTypeSizeInBytes(const llvm::Type *Ty) const {
341   return Types.getTargetData().getTypeAllocSize(Ty);
342 }
343 
344 void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) {
345   // This record already contains a member pointer.
346   if (ContainsPointerToDataMember)
347     return;
348 
349   // Can only have member pointers if we're compiling C++.
350   if (!Types.getContext().getLangOptions().CPlusPlus)
351     return;
352 
353   T = Types.getContext().getBaseElementType(T);
354 
355   if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
356     if (!MPT->getPointeeType()->isFunctionType()) {
357       // We have a pointer to data member.
358       ContainsPointerToDataMember = true;
359     }
360   } else if (const RecordType *RT = T->getAs<RecordType>()) {
361     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
362 
363     // FIXME: It would be better if there was a way to explicitly compute the
364     // record layout instead of converting to a type.
365     Types.ConvertTagDeclType(RD);
366 
367     const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
368 
369     if (Layout.containsPointerToDataMember())
370       ContainsPointerToDataMember = true;
371   }
372 }
373 
374 CGRecordLayout *
375 CGRecordLayoutBuilder::ComputeLayout(CodeGenTypes &Types,
376                                      const RecordDecl *D) {
377   CGRecordLayoutBuilder Builder(Types);
378 
379   Builder.Layout(D);
380 
381   const llvm::Type *Ty = llvm::StructType::get(Types.getLLVMContext(),
382                                                Builder.FieldTypes,
383                                                Builder.Packed);
384   assert(Types.getContext().getASTRecordLayout(D).getSize() / 8 ==
385          Types.getTargetData().getTypeAllocSize(Ty) &&
386          "Type size mismatch!");
387 
388   // Add all the field numbers.
389   for (unsigned i = 0, e = Builder.LLVMFields.size(); i != e; ++i) {
390     const FieldDecl *FD = Builder.LLVMFields[i].first;
391     unsigned FieldNo = Builder.LLVMFields[i].second;
392 
393     Types.addFieldInfo(FD, FieldNo);
394   }
395 
396   // Add bitfield info.
397   for (unsigned i = 0, e = Builder.LLVMBitFields.size(); i != e; ++i) {
398     const LLVMBitFieldInfo &Info = Builder.LLVMBitFields[i];
399 
400     Types.addBitFieldInfo(Info.FD, Info.FieldNo, Info.Start, Info.Size);
401   }
402 
403   return new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember);
404 }
405