1 //===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Constant Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGObjCRuntime.h"
17 #include "clang/AST/APValue.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/RecordLayout.h"
20 #include "clang/AST/StmtVisitor.h"
21 #include "clang/Basic/Builtins.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/GlobalVariable.h"
25 #include "llvm/Support/Compiler.h"
26 #include "llvm/Target/TargetData.h"
27 using namespace clang;
28 using namespace CodeGen;
29 
30 namespace  {
31 
32 class VISIBILITY_HIDDEN ConstStructBuilder {
33   CodeGenModule &CGM;
34   CodeGenFunction *CGF;
35 
36   bool Packed;
37 
38   unsigned NextFieldOffsetInBytes;
39 
40   std::vector<llvm::Constant *> Elements;
41 
42   ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
43     : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0) { }
44 
45   bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
46                    const Expr *InitExpr) {
47     uint64_t FieldOffsetInBytes = FieldOffset / 8;
48 
49     assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
50            && "Field offset mismatch!");
51 
52     // Emit the field.
53     llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF);
54     if (!C)
55       return false;
56 
57     unsigned FieldAlignment = getAlignment(C);
58 
59     // Round up the field offset to the alignment of the field type.
60     uint64_t AlignedNextFieldOffsetInBytes =
61       llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
62 
63     if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
64       std::vector<llvm::Constant *> PackedElements;
65 
66       assert(!Packed && "Alignment is wrong even with a packed struct!");
67 
68       // Convert the struct to a packed struct.
69       uint64_t ElementOffsetInBytes = 0;
70 
71       for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
72         llvm::Constant *C = Elements[i];
73 
74         unsigned ElementAlign =
75           CGM.getTargetData().getABITypeAlignment(C->getType());
76         uint64_t AlignedElementOffsetInBytes =
77           llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
78 
79         if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
80           // We need some padding.
81           uint64_t NumBytes =
82             AlignedElementOffsetInBytes - ElementOffsetInBytes;
83 
84           const llvm::Type *Ty = llvm::Type::getInt8Ty(CGF->getLLVMContext());
85           if (NumBytes > 1)
86             Ty = llvm::ArrayType::get(Ty, NumBytes);
87 
88           llvm::Constant *Padding = llvm::Constant::getNullValue(Ty);
89           PackedElements.push_back(Padding);
90           ElementOffsetInBytes += getSizeInBytes(Padding);
91         }
92 
93         PackedElements.push_back(C);
94         ElementOffsetInBytes += getSizeInBytes(C);
95       }
96 
97       assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
98              "Packing the struct changed its size!");
99 
100       Elements = PackedElements;
101       Packed = true;
102       AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
103     }
104 
105     if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
106       // We need to append padding.
107       AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
108 
109       assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
110              "Did not add enough padding!");
111 
112       AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
113     }
114 
115     // Add the field.
116     Elements.push_back(C);
117     NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C);
118 
119     return true;
120   }
121 
122   bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
123                       const Expr *InitExpr) {
124     llvm::ConstantInt *CI =
125       cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr,
126                                                            Field->getType(),
127                                                            CGF));
128     // FIXME: Can this ever happen?
129     if (!CI)
130       return false;
131 
132     if (FieldOffset > NextFieldOffsetInBytes * 8) {
133       // We need to add padding.
134       uint64_t NumBytes =
135         llvm::RoundUpToAlignment(FieldOffset -
136                                  NextFieldOffsetInBytes * 8, 8) / 8;
137 
138       AppendPadding(NumBytes);
139     }
140 
141     uint64_t FieldSize =
142       Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
143 
144     llvm::APInt FieldValue = CI->getValue();
145 
146     // Promote the size of FieldValue if necessary
147     // FIXME: This should never occur, but currently it can because initializer
148     // constants are cast to bool, and because clang is not enforcing bitfield
149     // width limits.
150     if (FieldSize > FieldValue.getBitWidth())
151       FieldValue.zext(FieldSize);
152 
153     // Truncate the size of FieldValue to the bit field size.
154     if (FieldSize < FieldValue.getBitWidth())
155       FieldValue.trunc(FieldSize);
156 
157     if (FieldOffset < NextFieldOffsetInBytes * 8) {
158       // Either part of the field or the entire field can go into the previous
159       // byte.
160       assert(!Elements.empty() && "Elements can't be empty!");
161 
162       unsigned BitsInPreviousByte =
163         NextFieldOffsetInBytes * 8 - FieldOffset;
164 
165       bool FitsCompletelyInPreviousByte =
166         BitsInPreviousByte >= FieldValue.getBitWidth();
167 
168       llvm::APInt Tmp = FieldValue;
169 
170       if (!FitsCompletelyInPreviousByte) {
171         unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
172 
173         if (CGM.getTargetData().isBigEndian()) {
174           Tmp = Tmp.lshr(NewFieldWidth);
175           Tmp.trunc(BitsInPreviousByte);
176 
177           // We want the remaining high bits.
178           FieldValue.trunc(NewFieldWidth);
179         } else {
180           Tmp.trunc(BitsInPreviousByte);
181 
182           // We want the remaining low bits.
183           FieldValue = FieldValue.lshr(BitsInPreviousByte);
184           FieldValue.trunc(NewFieldWidth);
185         }
186       }
187 
188       Tmp.zext(8);
189       if (CGM.getTargetData().isBigEndian()) {
190         if (FitsCompletelyInPreviousByte)
191           Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
192       } else {
193         Tmp = Tmp.shl(8 - BitsInPreviousByte);
194       }
195 
196       // Or in the bits that go into the previous byte.
197       Tmp |= cast<llvm::ConstantInt>(Elements.back())->getValue();
198       Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
199 
200       if (FitsCompletelyInPreviousByte)
201         return true;
202     }
203 
204     while (FieldValue.getBitWidth() > 8) {
205       llvm::APInt Tmp;
206 
207       if (CGM.getTargetData().isBigEndian()) {
208         // We want the high bits.
209         Tmp = FieldValue;
210         Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
211         Tmp.trunc(8);
212       } else {
213         // We want the low bits.
214         Tmp = FieldValue;
215         Tmp.trunc(8);
216 
217         FieldValue = FieldValue.lshr(8);
218       }
219 
220       Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
221       NextFieldOffsetInBytes++;
222 
223       FieldValue.trunc(FieldValue.getBitWidth() - 8);
224     }
225 
226     assert(FieldValue.getBitWidth() > 0 &&
227            "Should have at least one bit left!");
228     assert(FieldValue.getBitWidth() <= 8 &&
229            "Should not have more than a byte left!");
230 
231     if (FieldValue.getBitWidth() < 8) {
232       if (CGM.getTargetData().isBigEndian()) {
233         unsigned BitWidth = FieldValue.getBitWidth();
234 
235         FieldValue.zext(8);
236         FieldValue = FieldValue << (8 - BitWidth);
237       } else
238         FieldValue.zext(8);
239     }
240 
241     // Append the last element.
242     Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
243                                               FieldValue));
244     NextFieldOffsetInBytes++;
245     return true;
246   }
247 
248   void AppendPadding(uint64_t NumBytes) {
249     if (!NumBytes)
250       return;
251 
252     const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
253     if (NumBytes > 1)
254       Ty = llvm::ArrayType::get(Ty, NumBytes);
255 
256     llvm::Constant *C = llvm::Constant::getNullValue(Ty);
257     Elements.push_back(C);
258     assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
259 
260     NextFieldOffsetInBytes += getSizeInBytes(C);
261   }
262 
263   void AppendTailPadding(uint64_t RecordSize) {
264     assert(RecordSize % 8 == 0 && "Invalid record size!");
265 
266     uint64_t RecordSizeInBytes = RecordSize / 8;
267     assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
268 
269     unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
270     AppendPadding(NumPadBytes);
271   }
272 
273   bool Build(InitListExpr *ILE) {
274     RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
275     const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
276 
277     unsigned FieldNo = 0;
278     unsigned ElementNo = 0;
279     for (RecordDecl::field_iterator Field = RD->field_begin(),
280          FieldEnd = RD->field_end();
281          ElementNo < ILE->getNumInits() && Field != FieldEnd;
282          ++Field, ++FieldNo) {
283       if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
284         continue;
285 
286       if (Field->isBitField()) {
287         if (!Field->getIdentifier())
288           continue;
289 
290         if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
291                             ILE->getInit(ElementNo)))
292           return false;
293       } else {
294         if (!AppendField(*Field, Layout.getFieldOffset(FieldNo),
295                          ILE->getInit(ElementNo)))
296           return false;
297       }
298 
299       ElementNo++;
300     }
301 
302     uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
303 
304     if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
305       // If the struct is bigger than the size of the record type,
306       // we must have a flexible array member at the end.
307       assert(RD->hasFlexibleArrayMember() &&
308              "Must have flexible array member if struct is bigger than type!");
309 
310       // No tail padding is necessary.
311       return true;
312     }
313 
314     // Append tail padding if necessary.
315     AppendTailPadding(Layout.getSize());
316 
317     assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
318            "Tail padding mismatch!");
319 
320     return true;
321   }
322 
323   unsigned getAlignment(const llvm::Constant *C) const {
324     if (Packed)
325       return 1;
326 
327     return CGM.getTargetData().getABITypeAlignment(C->getType());
328   }
329 
330   uint64_t getSizeInBytes(const llvm::Constant *C) const {
331     return CGM.getTargetData().getTypeAllocSize(C->getType());
332   }
333 
334 public:
335   static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
336                                      InitListExpr *ILE) {
337     ConstStructBuilder Builder(CGM, CGF);
338 
339     if (!Builder.Build(ILE))
340       return 0;
341 
342     llvm::Constant *Result =
343       llvm::ConstantStruct::get(CGM.getLLVMContext(),
344                                 Builder.Elements, Builder.Packed);
345 
346     assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
347                                     Builder.getAlignment(Result)) ==
348            Builder.getSizeInBytes(Result) && "Size mismatch!");
349 
350     return Result;
351   }
352 };
353 
354 class VISIBILITY_HIDDEN ConstExprEmitter :
355   public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
356   CodeGenModule &CGM;
357   CodeGenFunction *CGF;
358   llvm::LLVMContext &VMContext;
359 public:
360   ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
361     : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
362   }
363 
364   //===--------------------------------------------------------------------===//
365   //                            Visitor Methods
366   //===--------------------------------------------------------------------===//
367 
368   llvm::Constant *VisitStmt(Stmt *S) {
369     return 0;
370   }
371 
372   llvm::Constant *VisitParenExpr(ParenExpr *PE) {
373     return Visit(PE->getSubExpr());
374   }
375 
376   llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
377     return Visit(E->getInitializer());
378   }
379 
380   llvm::Constant *VisitCastExpr(CastExpr* E) {
381     switch (E->getCastKind()) {
382     case CastExpr::CK_ToUnion: {
383       // GCC cast to union extension
384       assert(E->getType()->isUnionType() &&
385              "Destination type is not union type!");
386       const llvm::Type *Ty = ConvertType(E->getType());
387       Expr *SubExpr = E->getSubExpr();
388 
389       llvm::Constant *C =
390         CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
391       if (!C)
392         return 0;
393 
394       // Build a struct with the union sub-element as the first member,
395       // and padded to the appropriate size
396       std::vector<llvm::Constant*> Elts;
397       std::vector<const llvm::Type*> Types;
398       Elts.push_back(C);
399       Types.push_back(C->getType());
400       unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
401       unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
402 
403       assert(CurSize <= TotalSize && "Union size mismatch!");
404       if (unsigned NumPadBytes = TotalSize - CurSize) {
405         const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
406         if (NumPadBytes > 1)
407           Ty = llvm::ArrayType::get(Ty, NumPadBytes);
408 
409         Elts.push_back(llvm::Constant::getNullValue(Ty));
410         Types.push_back(Ty);
411       }
412 
413       llvm::StructType* STy =
414         llvm::StructType::get(C->getType()->getContext(), Types, false);
415       return llvm::ConstantStruct::get(STy, Elts);
416     }
417     case CastExpr::CK_NullToMemberPointer:
418       return CGM.EmitNullConstant(E->getType());
419     default: {
420       // FIXME: This should be handled by the CK_NoOp cast kind.
421       // Explicit and implicit no-op casts
422       QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
423       if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
424           return Visit(E->getSubExpr());
425       return 0;
426     }
427     }
428   }
429 
430   llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
431     return Visit(DAE->getExpr());
432   }
433 
434   llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
435     std::vector<llvm::Constant*> Elts;
436     const llvm::ArrayType *AType =
437         cast<llvm::ArrayType>(ConvertType(ILE->getType()));
438     unsigned NumInitElements = ILE->getNumInits();
439     // FIXME: Check for wide strings
440     // FIXME: Check for NumInitElements exactly equal to 1??
441     if (NumInitElements > 0 &&
442         (isa<StringLiteral>(ILE->getInit(0)) ||
443          isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
444         ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
445       return Visit(ILE->getInit(0));
446     const llvm::Type *ElemTy = AType->getElementType();
447     unsigned NumElements = AType->getNumElements();
448 
449     // Initialising an array requires us to automatically
450     // initialise any elements that have not been initialised explicitly
451     unsigned NumInitableElts = std::min(NumInitElements, NumElements);
452 
453     // Copy initializer elements.
454     unsigned i = 0;
455     bool RewriteType = false;
456     for (; i < NumInitableElts; ++i) {
457       Expr *Init = ILE->getInit(i);
458       llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
459       if (!C)
460         return 0;
461       RewriteType |= (C->getType() != ElemTy);
462       Elts.push_back(C);
463     }
464 
465     // Initialize remaining array elements.
466     // FIXME: This doesn't handle member pointers correctly!
467     for (; i < NumElements; ++i)
468       Elts.push_back(llvm::Constant::getNullValue(ElemTy));
469 
470     if (RewriteType) {
471       // FIXME: Try to avoid packing the array
472       std::vector<const llvm::Type*> Types;
473       for (unsigned i = 0; i < Elts.size(); ++i)
474         Types.push_back(Elts[i]->getType());
475       const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
476                                                             Types, true);
477       return llvm::ConstantStruct::get(SType, Elts);
478     }
479 
480     return llvm::ConstantArray::get(AType, Elts);
481   }
482 
483   llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
484     return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
485   }
486 
487   llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
488     return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
489   }
490 
491   llvm::Constant *EmitVectorInitialization(InitListExpr *ILE) {
492     const llvm::VectorType *VType =
493         cast<llvm::VectorType>(ConvertType(ILE->getType()));
494     const llvm::Type *ElemTy = VType->getElementType();
495     std::vector<llvm::Constant*> Elts;
496     unsigned NumElements = VType->getNumElements();
497     unsigned NumInitElements = ILE->getNumInits();
498 
499     unsigned NumInitableElts = std::min(NumInitElements, NumElements);
500 
501     // Copy initializer elements.
502     unsigned i = 0;
503     for (; i < NumInitableElts; ++i) {
504       Expr *Init = ILE->getInit(i);
505       llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
506       if (!C)
507         return 0;
508       Elts.push_back(C);
509     }
510 
511     for (; i < NumElements; ++i)
512       Elts.push_back(llvm::Constant::getNullValue(ElemTy));
513 
514     return llvm::ConstantVector::get(VType, Elts);
515   }
516 
517   llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
518     return CGM.EmitNullConstant(E->getType());
519   }
520 
521   llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
522     if (ILE->getType()->isScalarType()) {
523       // We have a scalar in braces. Just use the first element.
524       if (ILE->getNumInits() > 0) {
525         Expr *Init = ILE->getInit(0);
526         return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
527       }
528       return CGM.EmitNullConstant(ILE->getType());
529     }
530 
531     if (ILE->getType()->isArrayType())
532       return EmitArrayInitialization(ILE);
533 
534     if (ILE->getType()->isStructureType())
535       return EmitStructInitialization(ILE);
536 
537     if (ILE->getType()->isUnionType())
538       return EmitUnionInitialization(ILE);
539 
540     if (ILE->getType()->isVectorType())
541       return EmitVectorInitialization(ILE);
542 
543     assert(0 && "Unable to handle InitListExpr");
544     // Get rid of control reaches end of void function warning.
545     // Not reached.
546     return 0;
547   }
548 
549   llvm::Constant *VisitStringLiteral(StringLiteral *E) {
550     assert(!E->getType()->isPointerType() && "Strings are always arrays");
551 
552     // This must be a string initializing an array in a static initializer.
553     // Don't emit it as the address of the string, emit the string data itself
554     // as an inline array.
555     return llvm::ConstantArray::get(VMContext,
556                                     CGM.GetStringForStringLiteral(E), false);
557   }
558 
559   llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
560     // This must be an @encode initializing an array in a static initializer.
561     // Don't emit it as the address of the string, emit the string data itself
562     // as an inline array.
563     std::string Str;
564     CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
565     const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
566 
567     // Resize the string to the right size, adding zeros at the end, or
568     // truncating as needed.
569     Str.resize(CAT->getSize().getZExtValue(), '\0');
570     return llvm::ConstantArray::get(VMContext, Str, false);
571   }
572 
573   llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
574     return Visit(E->getSubExpr());
575   }
576 
577   // Utility methods
578   const llvm::Type *ConvertType(QualType T) {
579     return CGM.getTypes().ConvertType(T);
580   }
581 
582 public:
583   llvm::Constant *EmitLValue(Expr *E) {
584     switch (E->getStmtClass()) {
585     default: break;
586     case Expr::CompoundLiteralExprClass: {
587       // Note that due to the nature of compound literals, this is guaranteed
588       // to be the only use of the variable, so we just generate it here.
589       CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
590       llvm::Constant* C = Visit(CLE->getInitializer());
591       // FIXME: "Leaked" on failure.
592       if (C)
593         C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
594                                      E->getType().isConstant(CGM.getContext()),
595                                      llvm::GlobalValue::InternalLinkage,
596                                      C, ".compoundliteral", 0, false,
597                                      E->getType().getAddressSpace());
598       return C;
599     }
600     case Expr::DeclRefExprClass:
601     case Expr::QualifiedDeclRefExprClass: {
602       NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
603       if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
604         return CGM.GetAddrOfFunction(FD);
605       if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
606         // We can never refer to a variable with local storage.
607         if (!VD->hasLocalStorage()) {
608           if (VD->isFileVarDecl() || VD->hasExternalStorage())
609             return CGM.GetAddrOfGlobalVar(VD);
610           else if (VD->isBlockVarDecl()) {
611             assert(CGF && "Can't access static local vars without CGF");
612             return CGF->GetAddrOfStaticLocalVar(VD);
613           }
614         }
615       }
616       break;
617     }
618     case Expr::StringLiteralClass:
619       return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
620     case Expr::ObjCEncodeExprClass:
621       return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
622     case Expr::ObjCStringLiteralClass: {
623       ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
624       llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL);
625       return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
626     }
627     case Expr::PredefinedExprClass: {
628       // __func__/__FUNCTION__ -> "".  __PRETTY_FUNCTION__ -> "top level".
629       std::string Str;
630       if (cast<PredefinedExpr>(E)->getIdentType() ==
631           PredefinedExpr::PrettyFunction)
632         Str = "top level";
633 
634       return CGM.GetAddrOfConstantCString(Str, ".tmp");
635     }
636     case Expr::AddrLabelExprClass: {
637       assert(CGF && "Invalid address of label expression outside function.");
638       unsigned id =
639           CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
640       llvm::Constant *C =
641             llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), id);
642       return llvm::ConstantExpr::getIntToPtr(C, ConvertType(E->getType()));
643     }
644     case Expr::CallExprClass: {
645       CallExpr* CE = cast<CallExpr>(E);
646       if (CE->isBuiltinCall(CGM.getContext()) !=
647             Builtin::BI__builtin___CFStringMakeConstantString)
648         break;
649       const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
650       const StringLiteral *Literal = cast<StringLiteral>(Arg);
651       // FIXME: need to deal with UCN conversion issues.
652       return CGM.GetAddrOfConstantCFString(Literal);
653     }
654     case Expr::BlockExprClass: {
655       std::string FunctionName;
656       if (CGF)
657         FunctionName = CGF->CurFn->getName();
658       else
659         FunctionName = "global";
660 
661       return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
662     }
663     }
664 
665     return 0;
666   }
667 };
668 
669 }  // end anonymous namespace.
670 
671 llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
672                                                 QualType DestType,
673                                                 CodeGenFunction *CGF) {
674   Expr::EvalResult Result;
675 
676   bool Success = false;
677 
678   if (DestType->isReferenceType())
679     Success = E->EvaluateAsLValue(Result, Context);
680   else
681     Success = E->Evaluate(Result, Context);
682 
683   if (Success) {
684     assert(!Result.HasSideEffects &&
685            "Constant expr should not have any side effects!");
686     switch (Result.Val.getKind()) {
687     case APValue::Uninitialized:
688       assert(0 && "Constant expressions should be initialized.");
689       return 0;
690     case APValue::LValue: {
691       const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
692       llvm::Constant *Offset =
693         llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
694                                Result.Val.getLValueOffset());
695 
696       llvm::Constant *C;
697       if (const Expr *LVBase = Result.Val.getLValueBase()) {
698         C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
699 
700         // Apply offset if necessary.
701         if (!Offset->isNullValue()) {
702           const llvm::Type *Type =
703             llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
704           llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
705           Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
706           C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
707         }
708 
709         // Convert to the appropriate type; this could be an lvalue for
710         // an integer.
711         if (isa<llvm::PointerType>(DestTy))
712           return llvm::ConstantExpr::getBitCast(C, DestTy);
713 
714         return llvm::ConstantExpr::getPtrToInt(C, DestTy);
715       } else {
716         C = Offset;
717 
718         // Convert to the appropriate type; this could be an lvalue for
719         // an integer.
720         if (isa<llvm::PointerType>(DestTy))
721           return llvm::ConstantExpr::getIntToPtr(C, DestTy);
722 
723         // If the types don't match this should only be a truncate.
724         if (C->getType() != DestTy)
725           return llvm::ConstantExpr::getTrunc(C, DestTy);
726 
727         return C;
728       }
729     }
730     case APValue::Int: {
731       llvm::Constant *C = llvm::ConstantInt::get(VMContext,
732                                                  Result.Val.getInt());
733 
734       if (C->getType() == llvm::Type::getInt1Ty(VMContext)) {
735         const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
736         C = llvm::ConstantExpr::getZExt(C, BoolTy);
737       }
738       return C;
739     }
740     case APValue::ComplexInt: {
741       llvm::Constant *Complex[2];
742 
743       Complex[0] = llvm::ConstantInt::get(VMContext,
744                                           Result.Val.getComplexIntReal());
745       Complex[1] = llvm::ConstantInt::get(VMContext,
746                                           Result.Val.getComplexIntImag());
747 
748       return llvm::ConstantStruct::get(VMContext, Complex, 2);
749     }
750     case APValue::Float:
751       return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
752     case APValue::ComplexFloat: {
753       llvm::Constant *Complex[2];
754 
755       Complex[0] = llvm::ConstantFP::get(VMContext,
756                                          Result.Val.getComplexFloatReal());
757       Complex[1] = llvm::ConstantFP::get(VMContext,
758                                          Result.Val.getComplexFloatImag());
759 
760       return llvm::ConstantStruct::get(VMContext, Complex, 2);
761     }
762     case APValue::Vector: {
763       llvm::SmallVector<llvm::Constant *, 4> Inits;
764       unsigned NumElts = Result.Val.getVectorLength();
765 
766       for (unsigned i = 0; i != NumElts; ++i) {
767         APValue &Elt = Result.Val.getVectorElt(i);
768         if (Elt.isInt())
769           Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
770         else
771           Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
772       }
773       return llvm::ConstantVector::get(&Inits[0], Inits.size());
774     }
775     }
776   }
777 
778   llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
779   if (C && C->getType() == llvm::Type::getInt1Ty(VMContext)) {
780     const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
781     C = llvm::ConstantExpr::getZExt(C, BoolTy);
782   }
783   return C;
784 }
785 
786 llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
787   // No need to check for member pointers when not compiling C++.
788   if (!getContext().getLangOptions().CPlusPlus)
789     return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
790 
791   if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
792 
793     QualType ElementTy = CAT->getElementType();
794 
795     // FIXME: Handle arrays of structs that contain member pointers.
796     if (Context.getBaseElementType(ElementTy)->isMemberPointerType()) {
797       llvm::Constant *Element = EmitNullConstant(ElementTy);
798       uint64_t NumElements = CAT->getSize().getZExtValue();
799       std::vector<llvm::Constant *> Array(NumElements);
800       for (uint64_t i = 0; i != NumElements; ++i)
801         Array[i] = Element;
802 
803       const llvm::ArrayType *ATy =
804         cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
805       return llvm::ConstantArray::get(ATy, Array);
806     }
807   }
808 
809   if (const RecordType *RT = T->getAs<RecordType>()) {
810     const RecordDecl *RD = RT->getDecl();
811     // FIXME: It would be better if there was a way to explicitly compute the
812     // record layout instead of converting to a type.
813     Types.ConvertTagDeclType(RD);
814 
815     const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
816     if (Layout.containsMemberPointer()) {
817       assert(0 && "FIXME: No support for structs with member pointers yet!");
818     }
819   }
820 
821   // FIXME: Handle structs that contain member pointers.
822   if (T->isMemberPointerType())
823     return llvm::Constant::getAllOnesValue(getTypes().ConvertTypeForMem(T));
824 
825   return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
826 }
827