1 //===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Constant Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGCXXABI.h"
17 #include "CGObjCRuntime.h"
18 #include "CGRecordLayout.h"
19 #include "clang/AST/APValue.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/AST/StmtVisitor.h"
23 #include "clang/Basic/Builtins.h"
24 #include "llvm/Constants.h"
25 #include "llvm/Function.h"
26 #include "llvm/GlobalVariable.h"
27 #include "llvm/Target/TargetData.h"
28 using namespace clang;
29 using namespace CodeGen;
30 
31 //===----------------------------------------------------------------------===//
32 //                            ConstStructBuilder
33 //===----------------------------------------------------------------------===//
34 
35 namespace {
36 class ConstStructBuilder {
37   CodeGenModule &CGM;
38   CodeGenFunction *CGF;
39 
40   bool Packed;
41   CharUnits NextFieldOffsetInChars;
42   CharUnits LLVMStructAlignment;
43   SmallVector<llvm::Constant *, 32> Elements;
44 public:
45   static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
46                                      InitListExpr *ILE);
47   static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
48                                      const APValue &Value, QualType ValTy);
49 
50 private:
51   ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
52     : CGM(CGM), CGF(CGF), Packed(false),
53     NextFieldOffsetInChars(CharUnits::Zero()),
54     LLVMStructAlignment(CharUnits::One()) { }
55 
56   void AppendVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
57                            const CXXRecordDecl *VTableClass);
58 
59   void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
60                    llvm::Constant *InitExpr);
61 
62   void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
63 
64   void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
65                       llvm::ConstantInt *InitExpr);
66 
67   void AppendPadding(CharUnits PadSize);
68 
69   void AppendTailPadding(CharUnits RecordSize);
70 
71   void ConvertStructToPacked();
72 
73   bool Build(InitListExpr *ILE);
74   void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
75              llvm::Constant *VTable, const CXXRecordDecl *VTableClass,
76              CharUnits BaseOffset);
77   llvm::Constant *Finalize(QualType Ty);
78 
79   CharUnits getAlignment(const llvm::Constant *C) const {
80     if (Packed)  return CharUnits::One();
81     return CharUnits::fromQuantity(
82         CGM.getTargetData().getABITypeAlignment(C->getType()));
83   }
84 
85   CharUnits getSizeInChars(const llvm::Constant *C) const {
86     return CharUnits::fromQuantity(
87         CGM.getTargetData().getTypeAllocSize(C->getType()));
88   }
89 };
90 
91 void ConstStructBuilder::AppendVTablePointer(BaseSubobject Base,
92                                              llvm::Constant *VTable,
93                                              const CXXRecordDecl *VTableClass) {
94   // Find the appropriate vtable within the vtable group.
95   uint64_t AddressPoint =
96     CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
97   llvm::Value *Indices[] = {
98     llvm::ConstantInt::get(CGM.Int64Ty, 0),
99     llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint)
100   };
101   llvm::Constant *VTableAddressPoint =
102     llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Indices);
103 
104   // Add the vtable at the start of the object.
105   AppendBytes(Base.getBaseOffset(), VTableAddressPoint);
106 }
107 
108 void ConstStructBuilder::
109 AppendField(const FieldDecl *Field, uint64_t FieldOffset,
110             llvm::Constant *InitCst) {
111   const ASTContext &Context = CGM.getContext();
112 
113   CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
114 
115   AppendBytes(FieldOffsetInChars, InitCst);
116 }
117 
118 void ConstStructBuilder::
119 AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
120 
121   assert(NextFieldOffsetInChars <= FieldOffsetInChars
122          && "Field offset mismatch!");
123 
124   CharUnits FieldAlignment = getAlignment(InitCst);
125 
126   // Round up the field offset to the alignment of the field type.
127   CharUnits AlignedNextFieldOffsetInChars =
128     NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
129 
130   if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
131     assert(!Packed && "Alignment is wrong even with a packed struct!");
132 
133     // Convert the struct to a packed struct.
134     ConvertStructToPacked();
135 
136     AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
137   }
138 
139   if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
140     // We need to append padding.
141     AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
142 
143     assert(NextFieldOffsetInChars == FieldOffsetInChars &&
144            "Did not add enough padding!");
145 
146     AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
147   }
148 
149   // Add the field.
150   Elements.push_back(InitCst);
151   NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
152                            getSizeInChars(InitCst);
153 
154   if (Packed)
155     assert(LLVMStructAlignment == CharUnits::One() &&
156            "Packed struct not byte-aligned!");
157   else
158     LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
159 }
160 
161 void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
162                                         uint64_t FieldOffset,
163                                         llvm::ConstantInt *CI) {
164   const ASTContext &Context = CGM.getContext();
165   const uint64_t CharWidth = Context.getCharWidth();
166   uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
167   if (FieldOffset > NextFieldOffsetInBits) {
168     // We need to add padding.
169     CharUnits PadSize = Context.toCharUnitsFromBits(
170       llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
171                                Context.getTargetInfo().getCharAlign()));
172 
173     AppendPadding(PadSize);
174   }
175 
176   uint64_t FieldSize = Field->getBitWidthValue(Context);
177 
178   llvm::APInt FieldValue = CI->getValue();
179 
180   // Promote the size of FieldValue if necessary
181   // FIXME: This should never occur, but currently it can because initializer
182   // constants are cast to bool, and because clang is not enforcing bitfield
183   // width limits.
184   if (FieldSize > FieldValue.getBitWidth())
185     FieldValue = FieldValue.zext(FieldSize);
186 
187   // Truncate the size of FieldValue to the bit field size.
188   if (FieldSize < FieldValue.getBitWidth())
189     FieldValue = FieldValue.trunc(FieldSize);
190 
191   NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
192   if (FieldOffset < NextFieldOffsetInBits) {
193     // Either part of the field or the entire field can go into the previous
194     // byte.
195     assert(!Elements.empty() && "Elements can't be empty!");
196 
197     unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
198 
199     bool FitsCompletelyInPreviousByte =
200       BitsInPreviousByte >= FieldValue.getBitWidth();
201 
202     llvm::APInt Tmp = FieldValue;
203 
204     if (!FitsCompletelyInPreviousByte) {
205       unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
206 
207       if (CGM.getTargetData().isBigEndian()) {
208         Tmp = Tmp.lshr(NewFieldWidth);
209         Tmp = Tmp.trunc(BitsInPreviousByte);
210 
211         // We want the remaining high bits.
212         FieldValue = FieldValue.trunc(NewFieldWidth);
213       } else {
214         Tmp = Tmp.trunc(BitsInPreviousByte);
215 
216         // We want the remaining low bits.
217         FieldValue = FieldValue.lshr(BitsInPreviousByte);
218         FieldValue = FieldValue.trunc(NewFieldWidth);
219       }
220     }
221 
222     Tmp = Tmp.zext(CharWidth);
223     if (CGM.getTargetData().isBigEndian()) {
224       if (FitsCompletelyInPreviousByte)
225         Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
226     } else {
227       Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
228     }
229 
230     // 'or' in the bits that go into the previous byte.
231     llvm::Value *LastElt = Elements.back();
232     if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
233       Tmp |= Val->getValue();
234     else {
235       assert(isa<llvm::UndefValue>(LastElt));
236       // If there is an undef field that we're adding to, it can either be a
237       // scalar undef (in which case, we just replace it with our field) or it
238       // is an array.  If it is an array, we have to pull one byte off the
239       // array so that the other undef bytes stay around.
240       if (!isa<llvm::IntegerType>(LastElt->getType())) {
241         // The undef padding will be a multibyte array, create a new smaller
242         // padding and then an hole for our i8 to get plopped into.
243         assert(isa<llvm::ArrayType>(LastElt->getType()) &&
244                "Expected array padding of undefs");
245         llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
246         assert(AT->getElementType()->isIntegerTy(CharWidth) &&
247                AT->getNumElements() != 0 &&
248                "Expected non-empty array padding of undefs");
249 
250         // Remove the padding array.
251         NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
252         Elements.pop_back();
253 
254         // Add the padding back in two chunks.
255         AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
256         AppendPadding(CharUnits::One());
257         assert(isa<llvm::UndefValue>(Elements.back()) &&
258                Elements.back()->getType()->isIntegerTy(CharWidth) &&
259                "Padding addition didn't work right");
260       }
261     }
262 
263     Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
264 
265     if (FitsCompletelyInPreviousByte)
266       return;
267   }
268 
269   while (FieldValue.getBitWidth() > CharWidth) {
270     llvm::APInt Tmp;
271 
272     if (CGM.getTargetData().isBigEndian()) {
273       // We want the high bits.
274       Tmp =
275         FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
276     } else {
277       // We want the low bits.
278       Tmp = FieldValue.trunc(CharWidth);
279 
280       FieldValue = FieldValue.lshr(CharWidth);
281     }
282 
283     Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
284     ++NextFieldOffsetInChars;
285 
286     FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
287   }
288 
289   assert(FieldValue.getBitWidth() > 0 &&
290          "Should have at least one bit left!");
291   assert(FieldValue.getBitWidth() <= CharWidth &&
292          "Should not have more than a byte left!");
293 
294   if (FieldValue.getBitWidth() < CharWidth) {
295     if (CGM.getTargetData().isBigEndian()) {
296       unsigned BitWidth = FieldValue.getBitWidth();
297 
298       FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
299     } else
300       FieldValue = FieldValue.zext(CharWidth);
301   }
302 
303   // Append the last element.
304   Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
305                                             FieldValue));
306   ++NextFieldOffsetInChars;
307 }
308 
309 void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
310   if (PadSize.isZero())
311     return;
312 
313   llvm::Type *Ty = CGM.Int8Ty;
314   if (PadSize > CharUnits::One())
315     Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
316 
317   llvm::Constant *C = llvm::UndefValue::get(Ty);
318   Elements.push_back(C);
319   assert(getAlignment(C) == CharUnits::One() &&
320          "Padding must have 1 byte alignment!");
321 
322   NextFieldOffsetInChars += getSizeInChars(C);
323 }
324 
325 void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
326   assert(NextFieldOffsetInChars <= RecordSize &&
327          "Size mismatch!");
328 
329   AppendPadding(RecordSize - NextFieldOffsetInChars);
330 }
331 
332 void ConstStructBuilder::ConvertStructToPacked() {
333   SmallVector<llvm::Constant *, 16> PackedElements;
334   CharUnits ElementOffsetInChars = CharUnits::Zero();
335 
336   for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
337     llvm::Constant *C = Elements[i];
338 
339     CharUnits ElementAlign = CharUnits::fromQuantity(
340       CGM.getTargetData().getABITypeAlignment(C->getType()));
341     CharUnits AlignedElementOffsetInChars =
342       ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
343 
344     if (AlignedElementOffsetInChars > ElementOffsetInChars) {
345       // We need some padding.
346       CharUnits NumChars =
347         AlignedElementOffsetInChars - ElementOffsetInChars;
348 
349       llvm::Type *Ty = CGM.Int8Ty;
350       if (NumChars > CharUnits::One())
351         Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
352 
353       llvm::Constant *Padding = llvm::UndefValue::get(Ty);
354       PackedElements.push_back(Padding);
355       ElementOffsetInChars += getSizeInChars(Padding);
356     }
357 
358     PackedElements.push_back(C);
359     ElementOffsetInChars += getSizeInChars(C);
360   }
361 
362   assert(ElementOffsetInChars == NextFieldOffsetInChars &&
363          "Packing the struct changed its size!");
364 
365   Elements.swap(PackedElements);
366   LLVMStructAlignment = CharUnits::One();
367   Packed = true;
368 }
369 
370 bool ConstStructBuilder::Build(InitListExpr *ILE) {
371   if (ILE->initializesStdInitializerList()) {
372     //CGM.ErrorUnsupported(ILE, "global std::initializer_list");
373     return false;
374   }
375 
376   RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
377   const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
378 
379   unsigned FieldNo = 0;
380   unsigned ElementNo = 0;
381   const FieldDecl *LastFD = 0;
382   bool IsMsStruct = RD->hasAttr<MsStructAttr>();
383 
384   for (RecordDecl::field_iterator Field = RD->field_begin(),
385        FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
386     if (IsMsStruct) {
387       // Zero-length bitfields following non-bitfield members are
388       // ignored:
389       if (CGM.getContext().ZeroBitfieldFollowsNonBitfield(*Field, LastFD)) {
390         --FieldNo;
391         continue;
392       }
393       LastFD = *Field;
394     }
395 
396     // If this is a union, skip all the fields that aren't being initialized.
397     if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
398       continue;
399 
400     // Don't emit anonymous bitfields, they just affect layout.
401     if (Field->isUnnamedBitfield()) {
402       LastFD = *Field;
403       continue;
404     }
405 
406     // Get the initializer.  A struct can include fields without initializers,
407     // we just use explicit null values for them.
408     llvm::Constant *EltInit;
409     if (ElementNo < ILE->getNumInits())
410       EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
411                                      Field->getType(), CGF);
412     else
413       EltInit = CGM.EmitNullConstant(Field->getType());
414 
415     if (!EltInit)
416       return false;
417 
418     if (!Field->isBitField()) {
419       // Handle non-bitfield members.
420       AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
421     } else {
422       // Otherwise we have a bitfield.
423       AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
424                      cast<llvm::ConstantInt>(EltInit));
425     }
426   }
427 
428   return true;
429 }
430 
431 namespace {
432 struct BaseInfo {
433   BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
434     : Decl(Decl), Offset(Offset), Index(Index) {
435   }
436 
437   const CXXRecordDecl *Decl;
438   CharUnits Offset;
439   unsigned Index;
440 
441   bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
442 };
443 }
444 
445 void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
446                                bool IsPrimaryBase, llvm::Constant *VTable,
447                                const CXXRecordDecl *VTableClass,
448                                CharUnits Offset) {
449   const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
450 
451   if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
452     // Add a vtable pointer, if we need one and it hasn't already been added.
453     if (CD->isDynamicClass() && !IsPrimaryBase)
454       AppendVTablePointer(BaseSubobject(CD, Offset), VTable, VTableClass);
455 
456     // Accumulate and sort bases, in order to visit them in address order, which
457     // may not be the same as declaration order.
458     llvm::SmallVector<BaseInfo, 8> Bases;
459     Bases.reserve(CD->getNumBases());
460     unsigned BaseNo = 0;
461     for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
462          BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
463       assert(!Base->isVirtual() && "should not have virtual bases here");
464       const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
465       CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
466       Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
467     }
468     std::stable_sort(Bases.begin(), Bases.end());
469 
470     for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
471       BaseInfo &Base = Bases[I];
472 
473       bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
474       Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
475             VTable, VTableClass, Offset + Base.Offset);
476     }
477   }
478 
479   unsigned FieldNo = 0;
480   const FieldDecl *LastFD = 0;
481   bool IsMsStruct = RD->hasAttr<MsStructAttr>();
482   uint64_t OffsetBits = CGM.getContext().toBits(Offset);
483 
484   for (RecordDecl::field_iterator Field = RD->field_begin(),
485        FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
486     if (IsMsStruct) {
487       // Zero-length bitfields following non-bitfield members are
488       // ignored:
489       if (CGM.getContext().ZeroBitfieldFollowsNonBitfield(*Field, LastFD)) {
490         --FieldNo;
491         continue;
492       }
493       LastFD = *Field;
494     }
495 
496     // If this is a union, skip all the fields that aren't being initialized.
497     if (RD->isUnion() && Val.getUnionField() != *Field)
498       continue;
499 
500     // Don't emit anonymous bitfields, they just affect layout.
501     if (Field->isUnnamedBitfield()) {
502       LastFD = *Field;
503       continue;
504     }
505 
506     // Emit the value of the initializer.
507     const APValue &FieldValue =
508       RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
509     llvm::Constant *EltInit =
510       CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
511     assert(EltInit && "EmitConstantValue can't fail");
512 
513     if (!Field->isBitField()) {
514       // Handle non-bitfield members.
515       AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
516     } else {
517       // Otherwise we have a bitfield.
518       AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
519                      cast<llvm::ConstantInt>(EltInit));
520     }
521   }
522 }
523 
524 llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
525   RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
526   const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
527 
528   CharUnits LayoutSizeInChars = Layout.getSize();
529 
530   if (NextFieldOffsetInChars > LayoutSizeInChars) {
531     // If the struct is bigger than the size of the record type,
532     // we must have a flexible array member at the end.
533     assert(RD->hasFlexibleArrayMember() &&
534            "Must have flexible array member if struct is bigger than type!");
535 
536     // No tail padding is necessary.
537   } else {
538     // Append tail padding if necessary.
539     AppendTailPadding(LayoutSizeInChars);
540 
541     CharUnits LLVMSizeInChars =
542       NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
543 
544     // Check if we need to convert the struct to a packed struct.
545     if (NextFieldOffsetInChars <= LayoutSizeInChars &&
546         LLVMSizeInChars > LayoutSizeInChars) {
547       assert(!Packed && "Size mismatch!");
548 
549       ConvertStructToPacked();
550       assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
551              "Converting to packed did not help!");
552     }
553 
554     assert(LayoutSizeInChars == NextFieldOffsetInChars &&
555            "Tail padding mismatch!");
556   }
557 
558   // Pick the type to use.  If the type is layout identical to the ConvertType
559   // type then use it, otherwise use whatever the builder produced for us.
560   llvm::StructType *STy =
561       llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
562                                                Elements, Packed);
563   llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
564   if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
565     if (ValSTy->isLayoutIdentical(STy))
566       STy = ValSTy;
567   }
568 
569   llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
570 
571   assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) ==
572          getSizeInChars(Result) && "Size mismatch!");
573 
574   return Result;
575 }
576 
577 llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
578                                                 CodeGenFunction *CGF,
579                                                 InitListExpr *ILE) {
580   ConstStructBuilder Builder(CGM, CGF);
581 
582   if (!Builder.Build(ILE))
583     return 0;
584 
585   return Builder.Finalize(ILE->getType());
586 }
587 
588 llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
589                                                 CodeGenFunction *CGF,
590                                                 const APValue &Val,
591                                                 QualType ValTy) {
592   ConstStructBuilder Builder(CGM, CGF);
593 
594   const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
595   const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
596   llvm::Constant *VTable = 0;
597   if (CD && CD->isDynamicClass())
598     VTable = CGM.getVTables().GetAddrOfVTable(CD);
599 
600   Builder.Build(Val, RD, false, VTable, CD, CharUnits::Zero());
601 
602   return Builder.Finalize(ValTy);
603 }
604 
605 
606 //===----------------------------------------------------------------------===//
607 //                             ConstExprEmitter
608 //===----------------------------------------------------------------------===//
609 
610 /// This class only needs to handle two cases:
611 /// 1) Literals (this is used by APValue emission to emit literals).
612 /// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
613 ///    constant fold these types).
614 class ConstExprEmitter :
615   public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
616   CodeGenModule &CGM;
617   CodeGenFunction *CGF;
618   llvm::LLVMContext &VMContext;
619 public:
620   ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
621     : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
622   }
623 
624   //===--------------------------------------------------------------------===//
625   //                            Visitor Methods
626   //===--------------------------------------------------------------------===//
627 
628   llvm::Constant *VisitStmt(Stmt *S) {
629     return 0;
630   }
631 
632   llvm::Constant *VisitParenExpr(ParenExpr *PE) {
633     return Visit(PE->getSubExpr());
634   }
635 
636   llvm::Constant *
637   VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
638     return Visit(PE->getReplacement());
639   }
640 
641   llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
642     return Visit(GE->getResultExpr());
643   }
644 
645   llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
646     return Visit(E->getInitializer());
647   }
648 
649   llvm::Constant *VisitCastExpr(CastExpr* E) {
650     Expr *subExpr = E->getSubExpr();
651     llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
652     if (!C) return 0;
653 
654     llvm::Type *destType = ConvertType(E->getType());
655 
656     switch (E->getCastKind()) {
657     case CK_ToUnion: {
658       // GCC cast to union extension
659       assert(E->getType()->isUnionType() &&
660              "Destination type is not union type!");
661 
662       // Build a struct with the union sub-element as the first member,
663       // and padded to the appropriate size
664       SmallVector<llvm::Constant*, 2> Elts;
665       SmallVector<llvm::Type*, 2> Types;
666       Elts.push_back(C);
667       Types.push_back(C->getType());
668       unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
669       unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(destType);
670 
671       assert(CurSize <= TotalSize && "Union size mismatch!");
672       if (unsigned NumPadBytes = TotalSize - CurSize) {
673         llvm::Type *Ty = CGM.Int8Ty;
674         if (NumPadBytes > 1)
675           Ty = llvm::ArrayType::get(Ty, NumPadBytes);
676 
677         Elts.push_back(llvm::UndefValue::get(Ty));
678         Types.push_back(Ty);
679       }
680 
681       llvm::StructType* STy =
682         llvm::StructType::get(C->getType()->getContext(), Types, false);
683       return llvm::ConstantStruct::get(STy, Elts);
684     }
685 
686     case CK_LValueToRValue:
687     case CK_AtomicToNonAtomic:
688     case CK_NonAtomicToAtomic:
689     case CK_NoOp:
690       return C;
691 
692     case CK_Dependent: llvm_unreachable("saw dependent cast!");
693 
694     case CK_ReinterpretMemberPointer:
695     case CK_DerivedToBaseMemberPointer:
696     case CK_BaseToDerivedMemberPointer:
697       return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
698 
699     // These will never be supported.
700     case CK_ObjCObjectLValueCast:
701     case CK_ARCProduceObject:
702     case CK_ARCConsumeObject:
703     case CK_ARCReclaimReturnedObject:
704     case CK_ARCExtendBlockObject:
705     case CK_CopyAndAutoreleaseBlockObject:
706       return 0;
707 
708     // These don't need to be handled here because Evaluate knows how to
709     // evaluate them in the cases where they can be folded.
710     case CK_BitCast:
711     case CK_ToVoid:
712     case CK_Dynamic:
713     case CK_LValueBitCast:
714     case CK_NullToMemberPointer:
715     case CK_UserDefinedConversion:
716     case CK_ConstructorConversion:
717     case CK_CPointerToObjCPointerCast:
718     case CK_BlockPointerToObjCPointerCast:
719     case CK_AnyPointerToBlockPointerCast:
720     case CK_ArrayToPointerDecay:
721     case CK_FunctionToPointerDecay:
722     case CK_BaseToDerived:
723     case CK_DerivedToBase:
724     case CK_UncheckedDerivedToBase:
725     case CK_MemberPointerToBoolean:
726     case CK_VectorSplat:
727     case CK_FloatingRealToComplex:
728     case CK_FloatingComplexToReal:
729     case CK_FloatingComplexToBoolean:
730     case CK_FloatingComplexCast:
731     case CK_FloatingComplexToIntegralComplex:
732     case CK_IntegralRealToComplex:
733     case CK_IntegralComplexToReal:
734     case CK_IntegralComplexToBoolean:
735     case CK_IntegralComplexCast:
736     case CK_IntegralComplexToFloatingComplex:
737     case CK_PointerToIntegral:
738     case CK_PointerToBoolean:
739     case CK_NullToPointer:
740     case CK_IntegralCast:
741     case CK_IntegralToPointer:
742     case CK_IntegralToBoolean:
743     case CK_IntegralToFloating:
744     case CK_FloatingToIntegral:
745     case CK_FloatingToBoolean:
746     case CK_FloatingCast:
747       return 0;
748     }
749     llvm_unreachable("Invalid CastKind");
750   }
751 
752   llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
753     return Visit(DAE->getExpr());
754   }
755 
756   llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
757     return Visit(E->GetTemporaryExpr());
758   }
759 
760   llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
761     if (ILE->isStringLiteralInit())
762       return Visit(ILE->getInit(0));
763 
764     llvm::ArrayType *AType =
765         cast<llvm::ArrayType>(ConvertType(ILE->getType()));
766     llvm::Type *ElemTy = AType->getElementType();
767     unsigned NumInitElements = ILE->getNumInits();
768     unsigned NumElements = AType->getNumElements();
769 
770     // Initialising an array requires us to automatically
771     // initialise any elements that have not been initialised explicitly
772     unsigned NumInitableElts = std::min(NumInitElements, NumElements);
773 
774     // Copy initializer elements.
775     std::vector<llvm::Constant*> Elts;
776     Elts.reserve(NumInitableElts + NumElements);
777 
778     bool RewriteType = false;
779     for (unsigned i = 0; i < NumInitableElts; ++i) {
780       Expr *Init = ILE->getInit(i);
781       llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
782       if (!C)
783         return 0;
784       RewriteType |= (C->getType() != ElemTy);
785       Elts.push_back(C);
786     }
787 
788     // Initialize remaining array elements.
789     // FIXME: This doesn't handle member pointers correctly!
790     llvm::Constant *fillC;
791     if (Expr *filler = ILE->getArrayFiller())
792       fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
793     else
794       fillC = llvm::Constant::getNullValue(ElemTy);
795     if (!fillC)
796       return 0;
797     RewriteType |= (fillC->getType() != ElemTy);
798     Elts.resize(NumElements, fillC);
799 
800     if (RewriteType) {
801       // FIXME: Try to avoid packing the array
802       std::vector<llvm::Type*> Types;
803       Types.reserve(NumInitableElts + NumElements);
804       for (unsigned i = 0, e = Elts.size(); i < e; ++i)
805         Types.push_back(Elts[i]->getType());
806       llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
807                                                             Types, true);
808       return llvm::ConstantStruct::get(SType, Elts);
809     }
810 
811     return llvm::ConstantArray::get(AType, Elts);
812   }
813 
814   llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
815     return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
816   }
817 
818   llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
819     return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
820   }
821 
822   llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
823     return CGM.EmitNullConstant(E->getType());
824   }
825 
826   llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
827     if (ILE->getType()->isArrayType())
828       return EmitArrayInitialization(ILE);
829 
830     if (ILE->getType()->isRecordType())
831       return EmitStructInitialization(ILE);
832 
833     if (ILE->getType()->isUnionType())
834       return EmitUnionInitialization(ILE);
835 
836     return 0;
837   }
838 
839   llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
840     if (!E->getConstructor()->isTrivial())
841       return 0;
842 
843     QualType Ty = E->getType();
844 
845     // FIXME: We should not have to call getBaseElementType here.
846     const RecordType *RT =
847       CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
848     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
849 
850     // If the class doesn't have a trivial destructor, we can't emit it as a
851     // constant expr.
852     if (!RD->hasTrivialDestructor())
853       return 0;
854 
855     // Only copy and default constructors can be trivial.
856 
857 
858     if (E->getNumArgs()) {
859       assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
860       assert(E->getConstructor()->isCopyOrMoveConstructor() &&
861              "trivial ctor has argument but isn't a copy/move ctor");
862 
863       Expr *Arg = E->getArg(0);
864       assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
865              "argument to copy ctor is of wrong type");
866 
867       return Visit(Arg);
868     }
869 
870     return CGM.EmitNullConstant(Ty);
871   }
872 
873   llvm::Constant *VisitStringLiteral(StringLiteral *E) {
874     return CGM.GetConstantArrayFromStringLiteral(E);
875   }
876 
877   llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
878     // This must be an @encode initializing an array in a static initializer.
879     // Don't emit it as the address of the string, emit the string data itself
880     // as an inline array.
881     std::string Str;
882     CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
883     const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
884 
885     // Resize the string to the right size, adding zeros at the end, or
886     // truncating as needed.
887     Str.resize(CAT->getSize().getZExtValue(), '\0');
888     return llvm::ConstantDataArray::getString(VMContext, Str, false);
889   }
890 
891   llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
892     return Visit(E->getSubExpr());
893   }
894 
895   // Utility methods
896   llvm::Type *ConvertType(QualType T) {
897     return CGM.getTypes().ConvertType(T);
898   }
899 
900 public:
901   llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
902     if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
903       if (Decl->hasAttr<WeakRefAttr>())
904         return CGM.GetWeakRefReference(Decl);
905       if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
906         return CGM.GetAddrOfFunction(FD);
907       if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
908         // We can never refer to a variable with local storage.
909         if (!VD->hasLocalStorage()) {
910           if (VD->isFileVarDecl() || VD->hasExternalStorage())
911             return CGM.GetAddrOfGlobalVar(VD);
912           else if (VD->isLocalVarDecl()) {
913             assert(CGF && "Can't access static local vars without CGF");
914             return CGF->GetAddrOfStaticLocalVar(VD);
915           }
916         }
917       }
918       return 0;
919     }
920 
921     Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
922     switch (E->getStmtClass()) {
923     default: break;
924     case Expr::CompoundLiteralExprClass: {
925       // Note that due to the nature of compound literals, this is guaranteed
926       // to be the only use of the variable, so we just generate it here.
927       CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
928       llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
929                                                CLE->getType(), CGF);
930       // FIXME: "Leaked" on failure.
931       if (C)
932         C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
933                                      E->getType().isConstant(CGM.getContext()),
934                                      llvm::GlobalValue::InternalLinkage,
935                                      C, ".compoundliteral", 0,
936                                      llvm::GlobalVariable::NotThreadLocal,
937                           CGM.getContext().getTargetAddressSpace(E->getType()));
938       return C;
939     }
940     case Expr::StringLiteralClass:
941       return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
942     case Expr::ObjCEncodeExprClass:
943       return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
944     case Expr::ObjCStringLiteralClass: {
945       ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
946       llvm::Constant *C =
947           CGM.getObjCRuntime().GenerateConstantString(SL->getString());
948       return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
949     }
950     case Expr::PredefinedExprClass: {
951       unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
952       if (CGF) {
953         LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
954         return cast<llvm::Constant>(Res.getAddress());
955       } else if (Type == PredefinedExpr::PrettyFunction) {
956         return CGM.GetAddrOfConstantCString("top level", ".tmp");
957       }
958 
959       return CGM.GetAddrOfConstantCString("", ".tmp");
960     }
961     case Expr::AddrLabelExprClass: {
962       assert(CGF && "Invalid address of label expression outside function.");
963       llvm::Constant *Ptr =
964         CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
965       return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
966     }
967     case Expr::CallExprClass: {
968       CallExpr* CE = cast<CallExpr>(E);
969       unsigned builtin = CE->isBuiltinCall();
970       if (builtin !=
971             Builtin::BI__builtin___CFStringMakeConstantString &&
972           builtin !=
973             Builtin::BI__builtin___NSStringMakeConstantString)
974         break;
975       const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
976       const StringLiteral *Literal = cast<StringLiteral>(Arg);
977       if (builtin ==
978             Builtin::BI__builtin___NSStringMakeConstantString) {
979         return CGM.getObjCRuntime().GenerateConstantString(Literal);
980       }
981       // FIXME: need to deal with UCN conversion issues.
982       return CGM.GetAddrOfConstantCFString(Literal);
983     }
984     case Expr::BlockExprClass: {
985       std::string FunctionName;
986       if (CGF)
987         FunctionName = CGF->CurFn->getName();
988       else
989         FunctionName = "global";
990 
991       return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
992     }
993     case Expr::CXXTypeidExprClass: {
994       CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
995       QualType T;
996       if (Typeid->isTypeOperand())
997         T = Typeid->getTypeOperand();
998       else
999         T = Typeid->getExprOperand()->getType();
1000       return CGM.GetAddrOfRTTIDescriptor(T);
1001     }
1002     }
1003 
1004     return 0;
1005   }
1006 };
1007 
1008 }  // end anonymous namespace.
1009 
1010 llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
1011                                                 CodeGenFunction *CGF) {
1012   if (const APValue *Value = D.evaluateValue())
1013     return EmitConstantValueForMemory(*Value, D.getType(), CGF);
1014 
1015   // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
1016   // reference is a constant expression, and the reference binds to a temporary,
1017   // then constant initialization is performed. ConstExprEmitter will
1018   // incorrectly emit a prvalue constant in this case, and the calling code
1019   // interprets that as the (pointer) value of the reference, rather than the
1020   // desired value of the referee.
1021   if (D.getType()->isReferenceType())
1022     return 0;
1023 
1024   const Expr *E = D.getInit();
1025   assert(E && "No initializer to emit");
1026 
1027   llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1028   if (C && C->getType()->isIntegerTy(1)) {
1029     llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1030     C = llvm::ConstantExpr::getZExt(C, BoolTy);
1031   }
1032   return C;
1033 }
1034 
1035 llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
1036                                                 QualType DestType,
1037                                                 CodeGenFunction *CGF) {
1038   Expr::EvalResult Result;
1039 
1040   bool Success = false;
1041 
1042   if (DestType->isReferenceType())
1043     Success = E->EvaluateAsLValue(Result, Context);
1044   else
1045     Success = E->EvaluateAsRValue(Result, Context);
1046 
1047   llvm::Constant *C = 0;
1048   if (Success && !Result.HasSideEffects)
1049     C = EmitConstantValue(Result.Val, DestType, CGF);
1050   else
1051     C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
1052 
1053   if (C && C->getType()->isIntegerTy(1)) {
1054     llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
1055     C = llvm::ConstantExpr::getZExt(C, BoolTy);
1056   }
1057   return C;
1058 }
1059 
1060 llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
1061                                                  QualType DestType,
1062                                                  CodeGenFunction *CGF) {
1063   switch (Value.getKind()) {
1064   case APValue::Uninitialized:
1065     llvm_unreachable("Constant expressions should be initialized.");
1066   case APValue::LValue: {
1067     llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
1068     llvm::Constant *Offset =
1069       llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
1070 
1071     llvm::Constant *C;
1072     if (APValue::LValueBase LVBase = Value.getLValueBase()) {
1073       // An array can be represented as an lvalue referring to the base.
1074       if (isa<llvm::ArrayType>(DestTy)) {
1075         assert(Offset->isNullValue() && "offset on array initializer");
1076         return ConstExprEmitter(*this, CGF).Visit(
1077           const_cast<Expr*>(LVBase.get<const Expr*>()));
1078       }
1079 
1080       C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
1081 
1082       // Apply offset if necessary.
1083       if (!Offset->isNullValue()) {
1084         llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Int8PtrTy);
1085         Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
1086         C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
1087       }
1088 
1089       // Convert to the appropriate type; this could be an lvalue for
1090       // an integer.
1091       if (isa<llvm::PointerType>(DestTy))
1092         return llvm::ConstantExpr::getBitCast(C, DestTy);
1093 
1094       return llvm::ConstantExpr::getPtrToInt(C, DestTy);
1095     } else {
1096       C = Offset;
1097 
1098       // Convert to the appropriate type; this could be an lvalue for
1099       // an integer.
1100       if (isa<llvm::PointerType>(DestTy))
1101         return llvm::ConstantExpr::getIntToPtr(C, DestTy);
1102 
1103       // If the types don't match this should only be a truncate.
1104       if (C->getType() != DestTy)
1105         return llvm::ConstantExpr::getTrunc(C, DestTy);
1106 
1107       return C;
1108     }
1109   }
1110   case APValue::Int:
1111     return llvm::ConstantInt::get(VMContext, Value.getInt());
1112   case APValue::ComplexInt: {
1113     llvm::Constant *Complex[2];
1114 
1115     Complex[0] = llvm::ConstantInt::get(VMContext,
1116                                         Value.getComplexIntReal());
1117     Complex[1] = llvm::ConstantInt::get(VMContext,
1118                                         Value.getComplexIntImag());
1119 
1120     // FIXME: the target may want to specify that this is packed.
1121     llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1122                                                   Complex[1]->getType(),
1123                                                   NULL);
1124     return llvm::ConstantStruct::get(STy, Complex);
1125   }
1126   case APValue::Float: {
1127     const llvm::APFloat &Init = Value.getFloat();
1128     if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf)
1129       return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
1130     else
1131       return llvm::ConstantFP::get(VMContext, Init);
1132   }
1133   case APValue::ComplexFloat: {
1134     llvm::Constant *Complex[2];
1135 
1136     Complex[0] = llvm::ConstantFP::get(VMContext,
1137                                        Value.getComplexFloatReal());
1138     Complex[1] = llvm::ConstantFP::get(VMContext,
1139                                        Value.getComplexFloatImag());
1140 
1141     // FIXME: the target may want to specify that this is packed.
1142     llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
1143                                                   Complex[1]->getType(),
1144                                                   NULL);
1145     return llvm::ConstantStruct::get(STy, Complex);
1146   }
1147   case APValue::Vector: {
1148     SmallVector<llvm::Constant *, 4> Inits;
1149     unsigned NumElts = Value.getVectorLength();
1150 
1151     for (unsigned i = 0; i != NumElts; ++i) {
1152       const APValue &Elt = Value.getVectorElt(i);
1153       if (Elt.isInt())
1154         Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
1155       else
1156         Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
1157     }
1158     return llvm::ConstantVector::get(Inits);
1159   }
1160   case APValue::AddrLabelDiff: {
1161     const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
1162     const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
1163     llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
1164     llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
1165 
1166     // Compute difference
1167     llvm::Type *ResultType = getTypes().ConvertType(DestType);
1168     LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
1169     RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
1170     llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
1171 
1172     // LLVM is a bit sensitive about the exact format of the
1173     // address-of-label difference; make sure to truncate after
1174     // the subtraction.
1175     return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
1176   }
1177   case APValue::Struct:
1178   case APValue::Union:
1179     return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
1180   case APValue::Array: {
1181     const ArrayType *CAT = Context.getAsArrayType(DestType);
1182     unsigned NumElements = Value.getArraySize();
1183     unsigned NumInitElts = Value.getArrayInitializedElts();
1184 
1185     std::vector<llvm::Constant*> Elts;
1186     Elts.reserve(NumElements);
1187 
1188     // Emit array filler, if there is one.
1189     llvm::Constant *Filler = 0;
1190     if (Value.hasArrayFiller())
1191       Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
1192                                           CAT->getElementType(), CGF);
1193 
1194     // Emit initializer elements.
1195     llvm::Type *CommonElementType = 0;
1196     for (unsigned I = 0; I < NumElements; ++I) {
1197       llvm::Constant *C = Filler;
1198       if (I < NumInitElts)
1199         C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
1200                                        CAT->getElementType(), CGF);
1201       if (I == 0)
1202         CommonElementType = C->getType();
1203       else if (C->getType() != CommonElementType)
1204         CommonElementType = 0;
1205       Elts.push_back(C);
1206     }
1207 
1208     if (!CommonElementType) {
1209       // FIXME: Try to avoid packing the array
1210       std::vector<llvm::Type*> Types;
1211       Types.reserve(NumElements);
1212       for (unsigned i = 0, e = Elts.size(); i < e; ++i)
1213         Types.push_back(Elts[i]->getType());
1214       llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
1215       return llvm::ConstantStruct::get(SType, Elts);
1216     }
1217 
1218     llvm::ArrayType *AType =
1219       llvm::ArrayType::get(CommonElementType, NumElements);
1220     return llvm::ConstantArray::get(AType, Elts);
1221   }
1222   case APValue::MemberPointer:
1223     return getCXXABI().EmitMemberPointer(Value, DestType);
1224   }
1225   llvm_unreachable("Unknown APValue kind");
1226 }
1227 
1228 llvm::Constant *
1229 CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
1230                                           QualType DestType,
1231                                           CodeGenFunction *CGF) {
1232   llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
1233   if (C->getType()->isIntegerTy(1)) {
1234     llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
1235     C = llvm::ConstantExpr::getZExt(C, BoolTy);
1236   }
1237   return C;
1238 }
1239 
1240 llvm::Constant *
1241 CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
1242   assert(E->isFileScope() && "not a file-scope compound literal expr");
1243   return ConstExprEmitter(*this, 0).EmitLValue(E);
1244 }
1245 
1246 llvm::Constant *
1247 CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
1248   // Member pointer constants always have a very particular form.
1249   const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
1250   const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
1251 
1252   // A member function pointer.
1253   if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
1254     return getCXXABI().EmitMemberPointer(method);
1255 
1256   // Otherwise, a member data pointer.
1257   uint64_t fieldOffset = getContext().getFieldOffset(decl);
1258   CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
1259   return getCXXABI().EmitMemberDataPointer(type, chars);
1260 }
1261 
1262 static void
1263 FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
1264                              SmallVectorImpl<llvm::Constant *> &Elements,
1265                              uint64_t StartOffset) {
1266   assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
1267          "StartOffset not byte aligned!");
1268 
1269   if (CGM.getTypes().isZeroInitializable(T))
1270     return;
1271 
1272   if (const ConstantArrayType *CAT =
1273         CGM.getContext().getAsConstantArrayType(T)) {
1274     QualType ElementTy = CAT->getElementType();
1275     uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
1276 
1277     for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
1278       FillInNullDataMemberPointers(CGM, ElementTy, Elements,
1279                                    StartOffset + I * ElementSize);
1280     }
1281   } else if (const RecordType *RT = T->getAs<RecordType>()) {
1282     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1283     const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
1284 
1285     // Go through all bases and fill in any null pointer to data members.
1286     for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
1287          E = RD->bases_end(); I != E; ++I) {
1288       if (I->isVirtual()) {
1289         // Ignore virtual bases.
1290         continue;
1291       }
1292 
1293       const CXXRecordDecl *BaseDecl =
1294       cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
1295 
1296       // Ignore empty bases.
1297       if (BaseDecl->isEmpty())
1298         continue;
1299 
1300       // Ignore bases that don't have any pointer to data members.
1301       if (CGM.getTypes().isZeroInitializable(BaseDecl))
1302         continue;
1303 
1304       uint64_t BaseOffset =
1305         CGM.getContext().toBits(Layout.getBaseClassOffset(BaseDecl));
1306       FillInNullDataMemberPointers(CGM, I->getType(),
1307                                    Elements, StartOffset + BaseOffset);
1308     }
1309 
1310     // Visit all fields.
1311     unsigned FieldNo = 0;
1312     for (RecordDecl::field_iterator I = RD->field_begin(),
1313          E = RD->field_end(); I != E; ++I, ++FieldNo) {
1314       QualType FieldType = I->getType();
1315 
1316       if (CGM.getTypes().isZeroInitializable(FieldType))
1317         continue;
1318 
1319       uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
1320       FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
1321     }
1322   } else {
1323     assert(T->isMemberPointerType() && "Should only see member pointers here!");
1324     assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1325            "Should only see pointers to data members here!");
1326 
1327     CharUnits StartIndex = CGM.getContext().toCharUnitsFromBits(StartOffset);
1328     CharUnits EndIndex = StartIndex + CGM.getContext().getTypeSizeInChars(T);
1329 
1330     // FIXME: hardcodes Itanium member pointer representation!
1331     llvm::Constant *NegativeOne =
1332       llvm::ConstantInt::get(CGM.Int8Ty, -1ULL, /*isSigned*/true);
1333 
1334     // Fill in the null data member pointer.
1335     for (CharUnits I = StartIndex; I != EndIndex; ++I)
1336       Elements[I.getQuantity()] = NegativeOne;
1337   }
1338 }
1339 
1340 static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1341                                                llvm::Type *baseType,
1342                                                const CXXRecordDecl *base);
1343 
1344 static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
1345                                         const CXXRecordDecl *record,
1346                                         bool asCompleteObject) {
1347   const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
1348   llvm::StructType *structure =
1349     (asCompleteObject ? layout.getLLVMType()
1350                       : layout.getBaseSubobjectLLVMType());
1351 
1352   unsigned numElements = structure->getNumElements();
1353   std::vector<llvm::Constant *> elements(numElements);
1354 
1355   // Fill in all the bases.
1356   for (CXXRecordDecl::base_class_const_iterator
1357          I = record->bases_begin(), E = record->bases_end(); I != E; ++I) {
1358     if (I->isVirtual()) {
1359       // Ignore virtual bases; if we're laying out for a complete
1360       // object, we'll lay these out later.
1361       continue;
1362     }
1363 
1364     const CXXRecordDecl *base =
1365       cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1366 
1367     // Ignore empty bases.
1368     if (base->isEmpty())
1369       continue;
1370 
1371     unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
1372     llvm::Type *baseType = structure->getElementType(fieldIndex);
1373     elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1374   }
1375 
1376   // Fill in all the fields.
1377   for (RecordDecl::field_iterator I = record->field_begin(),
1378          E = record->field_end(); I != E; ++I) {
1379     const FieldDecl *field = *I;
1380 
1381     // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
1382     // will fill in later.)
1383     if (!field->isBitField()) {
1384       unsigned fieldIndex = layout.getLLVMFieldNo(field);
1385       elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
1386     }
1387 
1388     // For unions, stop after the first named field.
1389     if (record->isUnion() && field->getDeclName())
1390       break;
1391   }
1392 
1393   // Fill in the virtual bases, if we're working with the complete object.
1394   if (asCompleteObject) {
1395     for (CXXRecordDecl::base_class_const_iterator
1396            I = record->vbases_begin(), E = record->vbases_end(); I != E; ++I) {
1397       const CXXRecordDecl *base =
1398         cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
1399 
1400       // Ignore empty bases.
1401       if (base->isEmpty())
1402         continue;
1403 
1404       unsigned fieldIndex = layout.getVirtualBaseIndex(base);
1405 
1406       // We might have already laid this field out.
1407       if (elements[fieldIndex]) continue;
1408 
1409       llvm::Type *baseType = structure->getElementType(fieldIndex);
1410       elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
1411     }
1412   }
1413 
1414   // Now go through all other fields and zero them out.
1415   for (unsigned i = 0; i != numElements; ++i) {
1416     if (!elements[i])
1417       elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
1418   }
1419 
1420   return llvm::ConstantStruct::get(structure, elements);
1421 }
1422 
1423 /// Emit the null constant for a base subobject.
1424 static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
1425                                                llvm::Type *baseType,
1426                                                const CXXRecordDecl *base) {
1427   const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
1428 
1429   // Just zero out bases that don't have any pointer to data members.
1430   if (baseLayout.isZeroInitializableAsBase())
1431     return llvm::Constant::getNullValue(baseType);
1432 
1433   // If the base type is a struct, we can just use its null constant.
1434   if (isa<llvm::StructType>(baseType)) {
1435     return EmitNullConstant(CGM, base, /*complete*/ false);
1436   }
1437 
1438   // Otherwise, some bases are represented as arrays of i8 if the size
1439   // of the base is smaller than its corresponding LLVM type.  Figure
1440   // out how many elements this base array has.
1441   llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
1442   unsigned numBaseElements = baseArrayType->getNumElements();
1443 
1444   // Fill in null data member pointers.
1445   SmallVector<llvm::Constant *, 16> baseElements(numBaseElements);
1446   FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
1447                                baseElements, 0);
1448 
1449   // Now go through all other elements and zero them out.
1450   if (numBaseElements) {
1451     llvm::Constant *i8_zero = llvm::Constant::getNullValue(CGM.Int8Ty);
1452     for (unsigned i = 0; i != numBaseElements; ++i) {
1453       if (!baseElements[i])
1454         baseElements[i] = i8_zero;
1455     }
1456   }
1457 
1458   return llvm::ConstantArray::get(baseArrayType, baseElements);
1459 }
1460 
1461 llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
1462   if (getTypes().isZeroInitializable(T))
1463     return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
1464 
1465   if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
1466     llvm::ArrayType *ATy =
1467       cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
1468 
1469     QualType ElementTy = CAT->getElementType();
1470 
1471     llvm::Constant *Element = EmitNullConstant(ElementTy);
1472     unsigned NumElements = CAT->getSize().getZExtValue();
1473 
1474     if (Element->isNullValue())
1475       return llvm::ConstantAggregateZero::get(ATy);
1476 
1477     SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
1478     return llvm::ConstantArray::get(ATy, Array);
1479   }
1480 
1481   if (const RecordType *RT = T->getAs<RecordType>()) {
1482     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1483     return ::EmitNullConstant(*this, RD, /*complete object*/ true);
1484   }
1485 
1486   assert(T->isMemberPointerType() && "Should only see member pointers here!");
1487   assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
1488          "Should only see pointers to data members here!");
1489 
1490   // Itanium C++ ABI 2.3:
1491   //   A NULL pointer is represented as -1.
1492   return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
1493 }
1494 
1495 llvm::Constant *
1496 CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
1497   return ::EmitNullConstant(*this, Record, false);
1498 }
1499