1 //===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Constant Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGObjCRuntime.h"
17 #include "clang/AST/APValue.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/RecordLayout.h"
20 #include "clang/AST/StmtVisitor.h"
21 #include "clang/Basic/Builtins.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/GlobalVariable.h"
25 #include "llvm/Support/Compiler.h"
26 #include "llvm/Target/TargetData.h"
27 using namespace clang;
28 using namespace CodeGen;
29 
30 namespace  {
31 
32 class VISIBILITY_HIDDEN ConstStructBuilder {
33   CodeGenModule &CGM;
34   CodeGenFunction *CGF;
35 
36   bool Packed;
37 
38   unsigned NextFieldOffsetInBytes;
39 
40   std::vector<llvm::Constant *> Elements;
41 
42   ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
43     : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0) { }
44 
45   bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
46                    const Expr *InitExpr) {
47     uint64_t FieldOffsetInBytes = FieldOffset / 8;
48 
49     assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
50            && "Field offset mismatch!");
51 
52     // Emit the field.
53     llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF);
54     if (!C)
55       return false;
56 
57     unsigned FieldAlignment = getAlignment(C);
58 
59     // Round up the field offset to the alignment of the field type.
60     uint64_t AlignedNextFieldOffsetInBytes =
61       llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
62 
63     if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
64       std::vector<llvm::Constant *> PackedElements;
65 
66       assert(!Packed && "Alignment is wrong even with a packed struct!");
67 
68       // Convert the struct to a packed struct.
69       uint64_t ElementOffsetInBytes = 0;
70 
71       for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
72         llvm::Constant *C = Elements[i];
73 
74         unsigned ElementAlign =
75           CGM.getTargetData().getABITypeAlignment(C->getType());
76         uint64_t AlignedElementOffsetInBytes =
77           llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
78 
79         if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
80           // We need some padding.
81           uint64_t NumBytes =
82             AlignedElementOffsetInBytes - ElementOffsetInBytes;
83 
84           const llvm::Type *Ty = llvm::Type::Int8Ty;
85           if (NumBytes > 1)
86             Ty = CGM.getLLVMContext().getArrayType(Ty, NumBytes);
87 
88           llvm::Constant *Padding = CGM.getLLVMContext().getNullValue(Ty);
89           PackedElements.push_back(Padding);
90           ElementOffsetInBytes += getSizeInBytes(Padding);
91         }
92 
93         PackedElements.push_back(C);
94         ElementOffsetInBytes += getSizeInBytes(C);
95       }
96 
97       assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
98              "Packing the struct changed its size!");
99 
100       Elements = PackedElements;
101       Packed = true;
102       AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
103     }
104 
105     if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
106       // We need to append padding.
107       AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
108 
109       assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
110              "Did not add enough padding!");
111 
112       AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
113     }
114 
115     // Add the field.
116     Elements.push_back(C);
117     NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C);
118 
119     return true;
120   }
121 
122   void AppendPadding(uint64_t NumBytes) {
123     if (!NumBytes)
124       return;
125 
126     const llvm::Type *Ty = llvm::Type::Int8Ty;
127     if (NumBytes > 1)
128       Ty = CGM.getLLVMContext().getArrayType(Ty, NumBytes);
129 
130     llvm::Constant *C = CGM.getLLVMContext().getNullValue(Ty);
131     Elements.push_back(C);
132     assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
133 
134     NextFieldOffsetInBytes += getSizeInBytes(C);
135   }
136 
137   void AppendTailPadding(uint64_t RecordSize) {
138     assert(RecordSize % 8 == 0 && "Invalid record size!");
139 
140     uint64_t RecordSizeInBytes = RecordSize / 8;
141     assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
142 
143     unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
144     AppendPadding(NumPadBytes);
145   }
146 
147   bool Build(const InitListExpr *ILE) {
148     RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
149     const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
150 
151     unsigned FieldNo = 0;
152     unsigned ElementNo = 0;
153     for (RecordDecl::field_iterator Field = RD->field_begin(),
154          FieldEnd = RD->field_end();
155          ElementNo < ILE->getNumInits() && Field != FieldEnd;
156          ++Field, ++FieldNo) {
157       if (Field->isBitField()) {
158         if (!Field->getIdentifier())
159           continue;
160 
161         // FIXME: Bitfield support.
162         return false;
163       } else {
164         if (!AppendField(*Field, Layout.getFieldOffset(FieldNo),
165                          ILE->getInit(ElementNo)))
166           return false;
167       }
168 
169       ElementNo++;
170     }
171 
172     uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
173 
174     if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
175       // If the struct is bigger than the size of the record type,
176       // we must have a flexible array member at the end.
177       assert(RD->hasFlexibleArrayMember() &&
178              "Must have flexible array member if struct is bigger than type!");
179 
180       // No tail padding is necessary.
181       return true;
182     }
183 
184     // Append tail padding if necessary.
185     AppendTailPadding(Layout.getSize());
186 
187     assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
188            "Tail padding mismatch!");
189 
190     return true;
191   }
192 
193   unsigned getAlignment(const llvm::Constant *C) const {
194     if (Packed)
195       return 1;
196 
197     return CGM.getTargetData().getABITypeAlignment(C->getType());
198   }
199 
200   uint64_t getSizeInBytes(const llvm::Constant *C) const {
201     return CGM.getTargetData().getTypeAllocSize(C->getType());
202   }
203 
204 public:
205   static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
206                                      const InitListExpr *ILE) {
207     ConstStructBuilder Builder(CGM, CGF);
208 
209     if (!Builder.Build(ILE))
210       return 0;
211 
212     llvm::Constant *Result =
213       CGM.getLLVMContext().getConstantStruct(Builder.Elements, Builder.Packed);
214 
215     assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
216                                     Builder.getAlignment(Result)) ==
217            Builder.getSizeInBytes(Result) && "Size mismatch!");
218 
219     return Result;
220   }
221 };
222 
223 class VISIBILITY_HIDDEN ConstExprEmitter :
224   public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
225   CodeGenModule &CGM;
226   CodeGenFunction *CGF;
227   llvm::LLVMContext &VMContext;
228 public:
229   ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
230     : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
231   }
232 
233   //===--------------------------------------------------------------------===//
234   //                            Visitor Methods
235   //===--------------------------------------------------------------------===//
236 
237   llvm::Constant *VisitStmt(Stmt *S) {
238     return 0;
239   }
240 
241   llvm::Constant *VisitParenExpr(ParenExpr *PE) {
242     return Visit(PE->getSubExpr());
243   }
244 
245   llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
246     return Visit(E->getInitializer());
247   }
248 
249   llvm::Constant *VisitCastExpr(CastExpr* E) {
250     // GCC cast to union extension
251     if (E->getType()->isUnionType()) {
252       const llvm::Type *Ty = ConvertType(E->getType());
253       Expr *SubExpr = E->getSubExpr();
254       return EmitUnion(CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF),
255                        Ty);
256     }
257     // Explicit and implicit no-op casts
258     QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
259     if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy)) {
260       return Visit(E->getSubExpr());
261     }
262     return 0;
263   }
264 
265   llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
266     return Visit(DAE->getExpr());
267   }
268 
269   llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
270     std::vector<llvm::Constant*> Elts;
271     const llvm::ArrayType *AType =
272         cast<llvm::ArrayType>(ConvertType(ILE->getType()));
273     unsigned NumInitElements = ILE->getNumInits();
274     // FIXME: Check for wide strings
275     // FIXME: Check for NumInitElements exactly equal to 1??
276     if (NumInitElements > 0 &&
277         (isa<StringLiteral>(ILE->getInit(0)) ||
278          isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
279         ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
280       return Visit(ILE->getInit(0));
281     const llvm::Type *ElemTy = AType->getElementType();
282     unsigned NumElements = AType->getNumElements();
283 
284     // Initialising an array requires us to automatically
285     // initialise any elements that have not been initialised explicitly
286     unsigned NumInitableElts = std::min(NumInitElements, NumElements);
287 
288     // Copy initializer elements.
289     unsigned i = 0;
290     bool RewriteType = false;
291     for (; i < NumInitableElts; ++i) {
292       Expr *Init = ILE->getInit(i);
293       llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
294       if (!C)
295         return 0;
296       RewriteType |= (C->getType() != ElemTy);
297       Elts.push_back(C);
298     }
299 
300     // Initialize remaining array elements.
301     // FIXME: This doesn't handle member pointers correctly!
302     for (; i < NumElements; ++i)
303       Elts.push_back(VMContext.getNullValue(ElemTy));
304 
305     if (RewriteType) {
306       // FIXME: Try to avoid packing the array
307       std::vector<const llvm::Type*> Types;
308       for (unsigned i = 0; i < Elts.size(); ++i)
309         Types.push_back(Elts[i]->getType());
310       const llvm::StructType *SType = VMContext.getStructType(Types, true);
311       return VMContext.getConstantStruct(SType, Elts);
312     }
313 
314     return VMContext.getConstantArray(AType, Elts);
315   }
316 
317   void InsertBitfieldIntoStruct(std::vector<llvm::Constant*>& Elts,
318                                 FieldDecl* Field, Expr* E) {
319     // Calculate the value to insert
320     llvm::Constant *C = CGM.EmitConstantExpr(E, Field->getType(), CGF);
321     if (!C)
322       return;
323 
324     llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C);
325     if (!CI) {
326       CGM.ErrorUnsupported(E, "bitfield initialization");
327       return;
328     }
329     llvm::APInt V = CI->getValue();
330 
331     // Calculate information about the relevant field
332     const llvm::Type* Ty = CI->getType();
333     const llvm::TargetData &TD = CGM.getTypes().getTargetData();
334     unsigned size = TD.getTypeAllocSizeInBits(Ty);
335     CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field);
336     unsigned FieldOffset = Info.FieldNo * size;
337 
338     FieldOffset += Info.Start;
339 
340     // Find where to start the insertion
341     // FIXME: This is O(n^2) in the number of bit-fields!
342     // FIXME: This won't work if the struct isn't completely packed!
343     unsigned offset = 0, i = 0;
344     while (offset < (FieldOffset & -8))
345       offset += TD.getTypeAllocSizeInBits(Elts[i++]->getType());
346 
347     // Advance over 0 sized elements (must terminate in bounds since
348     // the bitfield must have a size).
349     while (TD.getTypeAllocSizeInBits(Elts[i]->getType()) == 0)
350       ++i;
351 
352     // Promote the size of V if necessary
353     // FIXME: This should never occur, but currently it can because initializer
354     // constants are cast to bool, and because clang is not enforcing bitfield
355     // width limits.
356     if (Info.Size > V.getBitWidth())
357       V.zext(Info.Size);
358 
359     // Insert the bits into the struct
360     // FIXME: This algorthm is only correct on X86!
361     // FIXME: THis algorthm assumes bit-fields only have byte-size elements!
362     unsigned bitsToInsert = Info.Size;
363     unsigned curBits = std::min(8 - (FieldOffset & 7), bitsToInsert);
364     unsigned byte = V.getLoBits(curBits).getZExtValue() << (FieldOffset & 7);
365     do {
366       llvm::Constant* byteC =
367         llvm::ConstantInt::get(llvm::Type::Int8Ty, byte);
368       Elts[i] = VMContext.getConstantExprOr(Elts[i], byteC);
369       ++i;
370       V = V.lshr(curBits);
371       bitsToInsert -= curBits;
372 
373       if (!bitsToInsert)
374         break;
375 
376       curBits = bitsToInsert > 8 ? 8 : bitsToInsert;
377       byte = V.getLoBits(curBits).getZExtValue();
378     } while (true);
379   }
380 
381   llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
382     // FIXME: Use the returned struct when the builder works well enough.
383     ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
384 
385     const llvm::StructType *SType =
386         cast<llvm::StructType>(ConvertType(ILE->getType()));
387     RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
388     std::vector<llvm::Constant*> Elts;
389 
390     // Initialize the whole structure to zero.
391     // FIXME: This doesn't handle member pointers correctly!
392     for (unsigned i = 0; i < SType->getNumElements(); ++i) {
393       const llvm::Type *FieldTy = SType->getElementType(i);
394       Elts.push_back(VMContext.getNullValue(FieldTy));
395     }
396 
397     // Copy initializer elements. Skip padding fields.
398     unsigned EltNo = 0;  // Element no in ILE
399     bool RewriteType = false;
400     for (RecordDecl::field_iterator Field = RD->field_begin(),
401                                  FieldEnd = RD->field_end();
402          EltNo < ILE->getNumInits() && Field != FieldEnd; ++Field) {
403       if (Field->isBitField()) {
404         if (!Field->getIdentifier())
405           continue;
406         InsertBitfieldIntoStruct(Elts, *Field, ILE->getInit(EltNo));
407       } else {
408         unsigned FieldNo = CGM.getTypes().getLLVMFieldNo(*Field);
409         llvm::Constant *C = CGM.EmitConstantExpr(ILE->getInit(EltNo),
410                                                  Field->getType(), CGF);
411         if (!C) return 0;
412         RewriteType |= (C->getType() != Elts[FieldNo]->getType());
413         Elts[FieldNo] = C;
414       }
415       EltNo++;
416     }
417 
418     if (RewriteType) {
419       // FIXME: Make this work for non-packed structs
420       assert(SType->isPacked() && "Cannot recreate unpacked structs");
421       std::vector<const llvm::Type*> Types;
422       for (unsigned i = 0; i < Elts.size(); ++i)
423         Types.push_back(Elts[i]->getType());
424       SType = VMContext.getStructType(Types, true);
425     }
426 
427     return VMContext.getConstantStruct(SType, Elts);
428   }
429 
430   llvm::Constant *EmitUnion(llvm::Constant *C, const llvm::Type *Ty) {
431     if (!C)
432       return 0;
433 
434     // Build a struct with the union sub-element as the first member,
435     // and padded to the appropriate size
436     std::vector<llvm::Constant*> Elts;
437     std::vector<const llvm::Type*> Types;
438     Elts.push_back(C);
439     Types.push_back(C->getType());
440     unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
441     unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
442 
443     assert(CurSize <= TotalSize && "Union size mismatch!");
444     if (unsigned NumPadBytes = TotalSize - CurSize) {
445       const llvm::Type *Ty = llvm::Type::Int8Ty;
446       if (NumPadBytes > 1)
447         Ty = VMContext.getArrayType(Ty, NumPadBytes);
448 
449       Elts.push_back(VMContext.getNullValue(Ty));
450       Types.push_back(Ty);
451     }
452 
453     llvm::StructType* STy = VMContext.getStructType(Types, false);
454     return VMContext.getConstantStruct(STy, Elts);
455   }
456 
457   llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
458     const llvm::Type *Ty = ConvertType(ILE->getType());
459 
460     FieldDecl* curField = ILE->getInitializedFieldInUnion();
461     if (!curField) {
462       // There's no field to initialize, so value-initialize the union.
463 #ifndef NDEBUG
464       // Make sure that it's really an empty and not a failure of
465       // semantic analysis.
466       RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
467       for (RecordDecl::field_iterator Field = RD->field_begin(),
468                                    FieldEnd = RD->field_end();
469            Field != FieldEnd; ++Field)
470         assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
471 #endif
472       return VMContext.getNullValue(Ty);
473     }
474 
475     if (curField->isBitField()) {
476       // Create a dummy struct for bit-field insertion
477       unsigned NumElts = CGM.getTargetData().getTypeAllocSize(Ty);
478       llvm::Constant* NV =
479         VMContext.getNullValue(llvm::Type::Int8Ty);
480       std::vector<llvm::Constant*> Elts(NumElts, NV);
481 
482       InsertBitfieldIntoStruct(Elts, curField, ILE->getInit(0));
483       const llvm::ArrayType *RetTy =
484           VMContext.getArrayType(NV->getType(), NumElts);
485       return VMContext.getConstantArray(RetTy, Elts);
486     }
487 
488     llvm::Constant *InitElem;
489     if (ILE->getNumInits() > 0) {
490       Expr *Init = ILE->getInit(0);
491       InitElem = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
492     } else {
493       InitElem = CGM.EmitNullConstant(curField->getType());
494     }
495     return EmitUnion(InitElem, Ty);
496   }
497 
498   llvm::Constant *EmitVectorInitialization(InitListExpr *ILE) {
499     const llvm::VectorType *VType =
500         cast<llvm::VectorType>(ConvertType(ILE->getType()));
501     const llvm::Type *ElemTy = VType->getElementType();
502     std::vector<llvm::Constant*> Elts;
503     unsigned NumElements = VType->getNumElements();
504     unsigned NumInitElements = ILE->getNumInits();
505 
506     unsigned NumInitableElts = std::min(NumInitElements, NumElements);
507 
508     // Copy initializer elements.
509     unsigned i = 0;
510     for (; i < NumInitableElts; ++i) {
511       Expr *Init = ILE->getInit(i);
512       llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
513       if (!C)
514         return 0;
515       Elts.push_back(C);
516     }
517 
518     for (; i < NumElements; ++i)
519       Elts.push_back(VMContext.getNullValue(ElemTy));
520 
521     return VMContext.getConstantVector(VType, Elts);
522   }
523 
524   llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
525     return CGM.EmitNullConstant(E->getType());
526   }
527 
528   llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
529     if (ILE->getType()->isScalarType()) {
530       // We have a scalar in braces. Just use the first element.
531       if (ILE->getNumInits() > 0) {
532         Expr *Init = ILE->getInit(0);
533         return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
534       }
535       return CGM.EmitNullConstant(ILE->getType());
536     }
537 
538     if (ILE->getType()->isArrayType())
539       return EmitArrayInitialization(ILE);
540 
541     if (ILE->getType()->isStructureType())
542       return EmitStructInitialization(ILE);
543 
544     if (ILE->getType()->isUnionType())
545       return EmitUnionInitialization(ILE);
546 
547     if (ILE->getType()->isVectorType())
548       return EmitVectorInitialization(ILE);
549 
550     assert(0 && "Unable to handle InitListExpr");
551     // Get rid of control reaches end of void function warning.
552     // Not reached.
553     return 0;
554   }
555 
556   llvm::Constant *VisitStringLiteral(StringLiteral *E) {
557     assert(!E->getType()->isPointerType() && "Strings are always arrays");
558 
559     // This must be a string initializing an array in a static initializer.
560     // Don't emit it as the address of the string, emit the string data itself
561     // as an inline array.
562     return VMContext.getConstantArray(CGM.GetStringForStringLiteral(E), false);
563   }
564 
565   llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
566     // This must be an @encode initializing an array in a static initializer.
567     // Don't emit it as the address of the string, emit the string data itself
568     // as an inline array.
569     std::string Str;
570     CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
571     const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
572 
573     // Resize the string to the right size, adding zeros at the end, or
574     // truncating as needed.
575     Str.resize(CAT->getSize().getZExtValue(), '\0');
576     return VMContext.getConstantArray(Str, false);
577   }
578 
579   llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
580     return Visit(E->getSubExpr());
581   }
582 
583   // Utility methods
584   const llvm::Type *ConvertType(QualType T) {
585     return CGM.getTypes().ConvertType(T);
586   }
587 
588 public:
589   llvm::Constant *EmitLValue(Expr *E) {
590     switch (E->getStmtClass()) {
591     default: break;
592     case Expr::CompoundLiteralExprClass: {
593       // Note that due to the nature of compound literals, this is guaranteed
594       // to be the only use of the variable, so we just generate it here.
595       CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
596       llvm::Constant* C = Visit(CLE->getInitializer());
597       // FIXME: "Leaked" on failure.
598       if (C)
599         C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
600                                      E->getType().isConstQualified(),
601                                      llvm::GlobalValue::InternalLinkage,
602                                      C, ".compoundliteral");
603       return C;
604     }
605     case Expr::DeclRefExprClass:
606     case Expr::QualifiedDeclRefExprClass: {
607       NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
608       if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
609         return CGM.GetAddrOfFunction(GlobalDecl(FD));
610       if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
611         // We can never refer to a variable with local storage.
612         if (!VD->hasLocalStorage()) {
613           if (VD->isFileVarDecl() || VD->hasExternalStorage())
614             return CGM.GetAddrOfGlobalVar(VD);
615           else if (VD->isBlockVarDecl()) {
616             assert(CGF && "Can't access static local vars without CGF");
617             return CGF->GetAddrOfStaticLocalVar(VD);
618           }
619         }
620       }
621       break;
622     }
623     case Expr::StringLiteralClass:
624       return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
625     case Expr::ObjCEncodeExprClass:
626       return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
627     case Expr::ObjCStringLiteralClass: {
628       ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
629       llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL);
630       return VMContext.getConstantExprBitCast(C, ConvertType(E->getType()));
631     }
632     case Expr::PredefinedExprClass: {
633       // __func__/__FUNCTION__ -> "".  __PRETTY_FUNCTION__ -> "top level".
634       std::string Str;
635       if (cast<PredefinedExpr>(E)->getIdentType() ==
636           PredefinedExpr::PrettyFunction)
637         Str = "top level";
638 
639       return CGM.GetAddrOfConstantCString(Str, ".tmp");
640     }
641     case Expr::AddrLabelExprClass: {
642       assert(CGF && "Invalid address of label expression outside function.");
643       unsigned id = CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
644       llvm::Constant *C = llvm::ConstantInt::get(llvm::Type::Int32Ty, id);
645       return VMContext.getConstantExprIntToPtr(C, ConvertType(E->getType()));
646     }
647     case Expr::CallExprClass: {
648       CallExpr* CE = cast<CallExpr>(E);
649       if (CE->isBuiltinCall(CGM.getContext()) !=
650             Builtin::BI__builtin___CFStringMakeConstantString)
651         break;
652       const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
653       const StringLiteral *Literal = cast<StringLiteral>(Arg);
654       // FIXME: need to deal with UCN conversion issues.
655       return CGM.GetAddrOfConstantCFString(Literal);
656     }
657     case Expr::BlockExprClass: {
658       std::string FunctionName;
659       if (CGF)
660         FunctionName = CGF->CurFn->getName();
661       else
662         FunctionName = "global";
663 
664       return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
665     }
666     }
667 
668     return 0;
669   }
670 };
671 
672 }  // end anonymous namespace.
673 
674 llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
675                                                 QualType DestType,
676                                                 CodeGenFunction *CGF) {
677   Expr::EvalResult Result;
678 
679   bool Success = false;
680 
681   if (DestType->isReferenceType())
682     Success = E->EvaluateAsLValue(Result, Context);
683   else
684     Success = E->Evaluate(Result, Context);
685 
686   if (Success) {
687     assert(!Result.HasSideEffects &&
688            "Constant expr should not have any side effects!");
689     switch (Result.Val.getKind()) {
690     case APValue::Uninitialized:
691       assert(0 && "Constant expressions should be initialized.");
692       return 0;
693     case APValue::LValue: {
694       const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
695       llvm::Constant *Offset =
696         llvm::ConstantInt::get(llvm::Type::Int64Ty,
697                                Result.Val.getLValueOffset());
698 
699       llvm::Constant *C;
700       if (const Expr *LVBase = Result.Val.getLValueBase()) {
701         C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
702 
703         // Apply offset if necessary.
704         if (!Offset->isNullValue()) {
705           const llvm::Type *Type =
706             VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty);
707           llvm::Constant *Casted = VMContext.getConstantExprBitCast(C, Type);
708           Casted = VMContext.getConstantExprGetElementPtr(Casted, &Offset, 1);
709           C = VMContext.getConstantExprBitCast(Casted, C->getType());
710         }
711 
712         // Convert to the appropriate type; this could be an lvalue for
713         // an integer.
714         if (isa<llvm::PointerType>(DestTy))
715           return VMContext.getConstantExprBitCast(C, DestTy);
716 
717         return VMContext.getConstantExprPtrToInt(C, DestTy);
718       } else {
719         C = Offset;
720 
721         // Convert to the appropriate type; this could be an lvalue for
722         // an integer.
723         if (isa<llvm::PointerType>(DestTy))
724           return VMContext.getConstantExprIntToPtr(C, DestTy);
725 
726         // If the types don't match this should only be a truncate.
727         if (C->getType() != DestTy)
728           return VMContext.getConstantExprTrunc(C, DestTy);
729 
730         return C;
731       }
732     }
733     case APValue::Int: {
734       llvm::Constant *C = llvm::ConstantInt::get(VMContext,
735                                                  Result.Val.getInt());
736 
737       if (C->getType() == llvm::Type::Int1Ty) {
738         const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
739         C = VMContext.getConstantExprZExt(C, BoolTy);
740       }
741       return C;
742     }
743     case APValue::ComplexInt: {
744       llvm::Constant *Complex[2];
745 
746       Complex[0] = llvm::ConstantInt::get(VMContext,
747                                           Result.Val.getComplexIntReal());
748       Complex[1] = llvm::ConstantInt::get(VMContext,
749                                           Result.Val.getComplexIntImag());
750 
751       return VMContext.getConstantStruct(Complex, 2);
752     }
753     case APValue::Float:
754       return VMContext.getConstantFP(Result.Val.getFloat());
755     case APValue::ComplexFloat: {
756       llvm::Constant *Complex[2];
757 
758       Complex[0] = VMContext.getConstantFP(Result.Val.getComplexFloatReal());
759       Complex[1] = VMContext.getConstantFP(Result.Val.getComplexFloatImag());
760 
761       return VMContext.getConstantStruct(Complex, 2);
762     }
763     case APValue::Vector: {
764       llvm::SmallVector<llvm::Constant *, 4> Inits;
765       unsigned NumElts = Result.Val.getVectorLength();
766 
767       for (unsigned i = 0; i != NumElts; ++i) {
768         APValue &Elt = Result.Val.getVectorElt(i);
769         if (Elt.isInt())
770           Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
771         else
772           Inits.push_back(VMContext.getConstantFP(Elt.getFloat()));
773       }
774       return VMContext.getConstantVector(&Inits[0], Inits.size());
775     }
776     }
777   }
778 
779   llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
780   if (C && C->getType() == llvm::Type::Int1Ty) {
781     const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
782     C = VMContext.getConstantExprZExt(C, BoolTy);
783   }
784   return C;
785 }
786 
787 llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
788   // Always return an LLVM null constant for now; this will change when we
789   // get support for IRGen of member pointers.
790   return getLLVMContext().getNullValue(getTypes().ConvertType(T));
791 }
792