1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the internal per-function state used for llvm translation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
15 #define CLANG_CODEGEN_CODEGENFUNCTION_H
16 
17 #include "clang/AST/Type.h"
18 #include "clang/AST/ExprCXX.h"
19 #include "clang/AST/ExprObjC.h"
20 #include "clang/AST/CharUnits.h"
21 #include "clang/Frontend/CodeGenOptions.h"
22 #include "clang/Basic/ABI.h"
23 #include "clang/Basic/TargetInfo.h"
24 #include "llvm/ADT/ArrayRef.h"
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/Support/ValueHandle.h"
28 #include "llvm/Support/Debug.h"
29 #include "CodeGenModule.h"
30 #include "CGBuilder.h"
31 #include "CGDebugInfo.h"
32 #include "CGValue.h"
33 
34 namespace llvm {
35   class BasicBlock;
36   class LLVMContext;
37   class MDNode;
38   class Module;
39   class SwitchInst;
40   class Twine;
41   class Value;
42   class CallSite;
43 }
44 
45 namespace clang {
46   class ASTContext;
47   class BlockDecl;
48   class CXXDestructorDecl;
49   class CXXForRangeStmt;
50   class CXXTryStmt;
51   class Decl;
52   class LabelDecl;
53   class EnumConstantDecl;
54   class FunctionDecl;
55   class FunctionProtoType;
56   class LabelStmt;
57   class ObjCContainerDecl;
58   class ObjCInterfaceDecl;
59   class ObjCIvarDecl;
60   class ObjCMethodDecl;
61   class ObjCImplementationDecl;
62   class ObjCPropertyImplDecl;
63   class TargetInfo;
64   class TargetCodeGenInfo;
65   class VarDecl;
66   class ObjCForCollectionStmt;
67   class ObjCAtTryStmt;
68   class ObjCAtThrowStmt;
69   class ObjCAtSynchronizedStmt;
70   class ObjCAutoreleasePoolStmt;
71 
72 namespace CodeGen {
73   class CodeGenTypes;
74   class CGFunctionInfo;
75   class CGRecordLayout;
76   class CGBlockInfo;
77   class CGCXXABI;
78   class BlockFlags;
79   class BlockFieldFlags;
80 
81 /// A branch fixup.  These are required when emitting a goto to a
82 /// label which hasn't been emitted yet.  The goto is optimistically
83 /// emitted as a branch to the basic block for the label, and (if it
84 /// occurs in a scope with non-trivial cleanups) a fixup is added to
85 /// the innermost cleanup.  When a (normal) cleanup is popped, any
86 /// unresolved fixups in that scope are threaded through the cleanup.
87 struct BranchFixup {
88   /// The block containing the terminator which needs to be modified
89   /// into a switch if this fixup is resolved into the current scope.
90   /// If null, LatestBranch points directly to the destination.
91   llvm::BasicBlock *OptimisticBranchBlock;
92 
93   /// The ultimate destination of the branch.
94   ///
95   /// This can be set to null to indicate that this fixup was
96   /// successfully resolved.
97   llvm::BasicBlock *Destination;
98 
99   /// The destination index value.
100   unsigned DestinationIndex;
101 
102   /// The initial branch of the fixup.
103   llvm::BranchInst *InitialBranch;
104 };
105 
106 template <class T> struct InvariantValue {
107   typedef T type;
108   typedef T saved_type;
109   static bool needsSaving(type value) { return false; }
110   static saved_type save(CodeGenFunction &CGF, type value) { return value; }
111   static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
112 };
113 
114 /// A metaprogramming class for ensuring that a value will dominate an
115 /// arbitrary position in a function.
116 template <class T> struct DominatingValue : InvariantValue<T> {};
117 
118 template <class T, bool mightBeInstruction =
119             llvm::is_base_of<llvm::Value, T>::value &&
120             !llvm::is_base_of<llvm::Constant, T>::value &&
121             !llvm::is_base_of<llvm::BasicBlock, T>::value>
122 struct DominatingPointer;
123 template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
124 // template <class T> struct DominatingPointer<T,true> at end of file
125 
126 template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
127 
128 enum CleanupKind {
129   EHCleanup = 0x1,
130   NormalCleanup = 0x2,
131   NormalAndEHCleanup = EHCleanup | NormalCleanup,
132 
133   InactiveCleanup = 0x4,
134   InactiveEHCleanup = EHCleanup | InactiveCleanup,
135   InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
136   InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
137 };
138 
139 /// A stack of scopes which respond to exceptions, including cleanups
140 /// and catch blocks.
141 class EHScopeStack {
142 public:
143   /// A saved depth on the scope stack.  This is necessary because
144   /// pushing scopes onto the stack invalidates iterators.
145   class stable_iterator {
146     friend class EHScopeStack;
147 
148     /// Offset from StartOfData to EndOfBuffer.
149     ptrdiff_t Size;
150 
151     stable_iterator(ptrdiff_t Size) : Size(Size) {}
152 
153   public:
154     static stable_iterator invalid() { return stable_iterator(-1); }
155     stable_iterator() : Size(-1) {}
156 
157     bool isValid() const { return Size >= 0; }
158 
159     /// Returns true if this scope encloses I.
160     /// Returns false if I is invalid.
161     /// This scope must be valid.
162     bool encloses(stable_iterator I) const { return Size <= I.Size; }
163 
164     /// Returns true if this scope strictly encloses I: that is,
165     /// if it encloses I and is not I.
166     /// Returns false is I is invalid.
167     /// This scope must be valid.
168     bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
169 
170     friend bool operator==(stable_iterator A, stable_iterator B) {
171       return A.Size == B.Size;
172     }
173     friend bool operator!=(stable_iterator A, stable_iterator B) {
174       return A.Size != B.Size;
175     }
176   };
177 
178   /// Information for lazily generating a cleanup.  Subclasses must be
179   /// POD-like: cleanups will not be destructed, and they will be
180   /// allocated on the cleanup stack and freely copied and moved
181   /// around.
182   ///
183   /// Cleanup implementations should generally be declared in an
184   /// anonymous namespace.
185   class Cleanup {
186     // Anchor the construction vtable.
187     virtual void anchor();
188   public:
189     /// Generation flags.
190     class Flags {
191       enum {
192         F_IsForEH             = 0x1,
193         F_IsNormalCleanupKind = 0x2,
194         F_IsEHCleanupKind     = 0x4
195       };
196       unsigned flags;
197 
198     public:
199       Flags() : flags(0) {}
200 
201       /// isForEH - true if the current emission is for an EH cleanup.
202       bool isForEHCleanup() const { return flags & F_IsForEH; }
203       bool isForNormalCleanup() const { return !isForEHCleanup(); }
204       void setIsForEHCleanup() { flags |= F_IsForEH; }
205 
206       bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
207       void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
208 
209       /// isEHCleanupKind - true if the cleanup was pushed as an EH
210       /// cleanup.
211       bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
212       void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
213     };
214 
215     // Provide a virtual destructor to suppress a very common warning
216     // that unfortunately cannot be suppressed without this.  Cleanups
217     // should not rely on this destructor ever being called.
218     virtual ~Cleanup() {}
219 
220     /// Emit the cleanup.  For normal cleanups, this is run in the
221     /// same EH context as when the cleanup was pushed, i.e. the
222     /// immediately-enclosing context of the cleanup scope.  For
223     /// EH cleanups, this is run in a terminate context.
224     ///
225     // \param IsForEHCleanup true if this is for an EH cleanup, false
226     ///  if for a normal cleanup.
227     virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
228   };
229 
230   /// ConditionalCleanupN stores the saved form of its N parameters,
231   /// then restores them and performs the cleanup.
232   template <class T, class A0>
233   class ConditionalCleanup1 : public Cleanup {
234     typedef typename DominatingValue<A0>::saved_type A0_saved;
235     A0_saved a0_saved;
236 
237     void Emit(CodeGenFunction &CGF, Flags flags) {
238       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
239       T(a0).Emit(CGF, flags);
240     }
241 
242   public:
243     ConditionalCleanup1(A0_saved a0)
244       : a0_saved(a0) {}
245   };
246 
247   template <class T, class A0, class A1>
248   class ConditionalCleanup2 : public Cleanup {
249     typedef typename DominatingValue<A0>::saved_type A0_saved;
250     typedef typename DominatingValue<A1>::saved_type A1_saved;
251     A0_saved a0_saved;
252     A1_saved a1_saved;
253 
254     void Emit(CodeGenFunction &CGF, Flags flags) {
255       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
256       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
257       T(a0, a1).Emit(CGF, flags);
258     }
259 
260   public:
261     ConditionalCleanup2(A0_saved a0, A1_saved a1)
262       : a0_saved(a0), a1_saved(a1) {}
263   };
264 
265   template <class T, class A0, class A1, class A2>
266   class ConditionalCleanup3 : public Cleanup {
267     typedef typename DominatingValue<A0>::saved_type A0_saved;
268     typedef typename DominatingValue<A1>::saved_type A1_saved;
269     typedef typename DominatingValue<A2>::saved_type A2_saved;
270     A0_saved a0_saved;
271     A1_saved a1_saved;
272     A2_saved a2_saved;
273 
274     void Emit(CodeGenFunction &CGF, Flags flags) {
275       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
276       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
277       A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
278       T(a0, a1, a2).Emit(CGF, flags);
279     }
280 
281   public:
282     ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
283       : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
284   };
285 
286   template <class T, class A0, class A1, class A2, class A3>
287   class ConditionalCleanup4 : public Cleanup {
288     typedef typename DominatingValue<A0>::saved_type A0_saved;
289     typedef typename DominatingValue<A1>::saved_type A1_saved;
290     typedef typename DominatingValue<A2>::saved_type A2_saved;
291     typedef typename DominatingValue<A3>::saved_type A3_saved;
292     A0_saved a0_saved;
293     A1_saved a1_saved;
294     A2_saved a2_saved;
295     A3_saved a3_saved;
296 
297     void Emit(CodeGenFunction &CGF, Flags flags) {
298       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
299       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
300       A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
301       A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
302       T(a0, a1, a2, a3).Emit(CGF, flags);
303     }
304 
305   public:
306     ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
307       : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
308   };
309 
310 private:
311   // The implementation for this class is in CGException.h and
312   // CGException.cpp; the definition is here because it's used as a
313   // member of CodeGenFunction.
314 
315   /// The start of the scope-stack buffer, i.e. the allocated pointer
316   /// for the buffer.  All of these pointers are either simultaneously
317   /// null or simultaneously valid.
318   char *StartOfBuffer;
319 
320   /// The end of the buffer.
321   char *EndOfBuffer;
322 
323   /// The first valid entry in the buffer.
324   char *StartOfData;
325 
326   /// The innermost normal cleanup on the stack.
327   stable_iterator InnermostNormalCleanup;
328 
329   /// The innermost EH scope on the stack.
330   stable_iterator InnermostEHScope;
331 
332   /// The current set of branch fixups.  A branch fixup is a jump to
333   /// an as-yet unemitted label, i.e. a label for which we don't yet
334   /// know the EH stack depth.  Whenever we pop a cleanup, we have
335   /// to thread all the current branch fixups through it.
336   ///
337   /// Fixups are recorded as the Use of the respective branch or
338   /// switch statement.  The use points to the final destination.
339   /// When popping out of a cleanup, these uses are threaded through
340   /// the cleanup and adjusted to point to the new cleanup.
341   ///
342   /// Note that branches are allowed to jump into protected scopes
343   /// in certain situations;  e.g. the following code is legal:
344   ///     struct A { ~A(); }; // trivial ctor, non-trivial dtor
345   ///     goto foo;
346   ///     A a;
347   ///    foo:
348   ///     bar();
349   SmallVector<BranchFixup, 8> BranchFixups;
350 
351   char *allocate(size_t Size);
352 
353   void *pushCleanup(CleanupKind K, size_t DataSize);
354 
355 public:
356   EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
357                    InnermostNormalCleanup(stable_end()),
358                    InnermostEHScope(stable_end()) {}
359   ~EHScopeStack() { delete[] StartOfBuffer; }
360 
361   // Variadic templates would make this not terrible.
362 
363   /// Push a lazily-created cleanup on the stack.
364   template <class T>
365   void pushCleanup(CleanupKind Kind) {
366     void *Buffer = pushCleanup(Kind, sizeof(T));
367     Cleanup *Obj = new(Buffer) T();
368     (void) Obj;
369   }
370 
371   /// Push a lazily-created cleanup on the stack.
372   template <class T, class A0>
373   void pushCleanup(CleanupKind Kind, A0 a0) {
374     void *Buffer = pushCleanup(Kind, sizeof(T));
375     Cleanup *Obj = new(Buffer) T(a0);
376     (void) Obj;
377   }
378 
379   /// Push a lazily-created cleanup on the stack.
380   template <class T, class A0, class A1>
381   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
382     void *Buffer = pushCleanup(Kind, sizeof(T));
383     Cleanup *Obj = new(Buffer) T(a0, a1);
384     (void) Obj;
385   }
386 
387   /// Push a lazily-created cleanup on the stack.
388   template <class T, class A0, class A1, class A2>
389   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
390     void *Buffer = pushCleanup(Kind, sizeof(T));
391     Cleanup *Obj = new(Buffer) T(a0, a1, a2);
392     (void) Obj;
393   }
394 
395   /// Push a lazily-created cleanup on the stack.
396   template <class T, class A0, class A1, class A2, class A3>
397   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
398     void *Buffer = pushCleanup(Kind, sizeof(T));
399     Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
400     (void) Obj;
401   }
402 
403   /// Push a lazily-created cleanup on the stack.
404   template <class T, class A0, class A1, class A2, class A3, class A4>
405   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
406     void *Buffer = pushCleanup(Kind, sizeof(T));
407     Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
408     (void) Obj;
409   }
410 
411   // Feel free to add more variants of the following:
412 
413   /// Push a cleanup with non-constant storage requirements on the
414   /// stack.  The cleanup type must provide an additional static method:
415   ///   static size_t getExtraSize(size_t);
416   /// The argument to this method will be the value N, which will also
417   /// be passed as the first argument to the constructor.
418   ///
419   /// The data stored in the extra storage must obey the same
420   /// restrictions as normal cleanup member data.
421   ///
422   /// The pointer returned from this method is valid until the cleanup
423   /// stack is modified.
424   template <class T, class A0, class A1, class A2>
425   T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
426     void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
427     return new (Buffer) T(N, a0, a1, a2);
428   }
429 
430   /// Pops a cleanup scope off the stack.  This is private to CGCleanup.cpp.
431   void popCleanup();
432 
433   /// Push a set of catch handlers on the stack.  The catch is
434   /// uninitialized and will need to have the given number of handlers
435   /// set on it.
436   class EHCatchScope *pushCatch(unsigned NumHandlers);
437 
438   /// Pops a catch scope off the stack.  This is private to CGException.cpp.
439   void popCatch();
440 
441   /// Push an exceptions filter on the stack.
442   class EHFilterScope *pushFilter(unsigned NumFilters);
443 
444   /// Pops an exceptions filter off the stack.
445   void popFilter();
446 
447   /// Push a terminate handler on the stack.
448   void pushTerminate();
449 
450   /// Pops a terminate handler off the stack.
451   void popTerminate();
452 
453   /// Determines whether the exception-scopes stack is empty.
454   bool empty() const { return StartOfData == EndOfBuffer; }
455 
456   bool requiresLandingPad() const {
457     return InnermostEHScope != stable_end();
458   }
459 
460   /// Determines whether there are any normal cleanups on the stack.
461   bool hasNormalCleanups() const {
462     return InnermostNormalCleanup != stable_end();
463   }
464 
465   /// Returns the innermost normal cleanup on the stack, or
466   /// stable_end() if there are no normal cleanups.
467   stable_iterator getInnermostNormalCleanup() const {
468     return InnermostNormalCleanup;
469   }
470   stable_iterator getInnermostActiveNormalCleanup() const;
471 
472   stable_iterator getInnermostEHScope() const {
473     return InnermostEHScope;
474   }
475 
476   stable_iterator getInnermostActiveEHScope() const;
477 
478   /// An unstable reference to a scope-stack depth.  Invalidated by
479   /// pushes but not pops.
480   class iterator;
481 
482   /// Returns an iterator pointing to the innermost EH scope.
483   iterator begin() const;
484 
485   /// Returns an iterator pointing to the outermost EH scope.
486   iterator end() const;
487 
488   /// Create a stable reference to the top of the EH stack.  The
489   /// returned reference is valid until that scope is popped off the
490   /// stack.
491   stable_iterator stable_begin() const {
492     return stable_iterator(EndOfBuffer - StartOfData);
493   }
494 
495   /// Create a stable reference to the bottom of the EH stack.
496   static stable_iterator stable_end() {
497     return stable_iterator(0);
498   }
499 
500   /// Translates an iterator into a stable_iterator.
501   stable_iterator stabilize(iterator it) const;
502 
503   /// Turn a stable reference to a scope depth into a unstable pointer
504   /// to the EH stack.
505   iterator find(stable_iterator save) const;
506 
507   /// Removes the cleanup pointed to by the given stable_iterator.
508   void removeCleanup(stable_iterator save);
509 
510   /// Add a branch fixup to the current cleanup scope.
511   BranchFixup &addBranchFixup() {
512     assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
513     BranchFixups.push_back(BranchFixup());
514     return BranchFixups.back();
515   }
516 
517   unsigned getNumBranchFixups() const { return BranchFixups.size(); }
518   BranchFixup &getBranchFixup(unsigned I) {
519     assert(I < getNumBranchFixups());
520     return BranchFixups[I];
521   }
522 
523   /// Pops lazily-removed fixups from the end of the list.  This
524   /// should only be called by procedures which have just popped a
525   /// cleanup or resolved one or more fixups.
526   void popNullFixups();
527 
528   /// Clears the branch-fixups list.  This should only be called by
529   /// ResolveAllBranchFixups.
530   void clearFixups() { BranchFixups.clear(); }
531 };
532 
533 /// CodeGenFunction - This class organizes the per-function state that is used
534 /// while generating LLVM code.
535 class CodeGenFunction : public CodeGenTypeCache {
536   CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
537   void operator=(const CodeGenFunction&);  // DO NOT IMPLEMENT
538 
539   friend class CGCXXABI;
540 public:
541   /// A jump destination is an abstract label, branching to which may
542   /// require a jump out through normal cleanups.
543   struct JumpDest {
544     JumpDest() : Block(0), ScopeDepth(), Index(0) {}
545     JumpDest(llvm::BasicBlock *Block,
546              EHScopeStack::stable_iterator Depth,
547              unsigned Index)
548       : Block(Block), ScopeDepth(Depth), Index(Index) {}
549 
550     bool isValid() const { return Block != 0; }
551     llvm::BasicBlock *getBlock() const { return Block; }
552     EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
553     unsigned getDestIndex() const { return Index; }
554 
555   private:
556     llvm::BasicBlock *Block;
557     EHScopeStack::stable_iterator ScopeDepth;
558     unsigned Index;
559   };
560 
561   CodeGenModule &CGM;  // Per-module state.
562   const TargetInfo &Target;
563 
564   typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
565   CGBuilderTy Builder;
566 
567   /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
568   /// This excludes BlockDecls.
569   const Decl *CurFuncDecl;
570   /// CurCodeDecl - This is the inner-most code context, which includes blocks.
571   const Decl *CurCodeDecl;
572   const CGFunctionInfo *CurFnInfo;
573   QualType FnRetTy;
574   llvm::Function *CurFn;
575 
576   /// CurGD - The GlobalDecl for the current function being compiled.
577   GlobalDecl CurGD;
578 
579   /// PrologueCleanupDepth - The cleanup depth enclosing all the
580   /// cleanups associated with the parameters.
581   EHScopeStack::stable_iterator PrologueCleanupDepth;
582 
583   /// ReturnBlock - Unified return block.
584   JumpDest ReturnBlock;
585 
586   /// ReturnValue - The temporary alloca to hold the return value. This is null
587   /// iff the function has no return value.
588   llvm::Value *ReturnValue;
589 
590   /// AllocaInsertPoint - This is an instruction in the entry block before which
591   /// we prefer to insert allocas.
592   llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
593 
594   bool CatchUndefined;
595 
596   /// In ARC, whether we should autorelease the return value.
597   bool AutoreleaseResult;
598 
599   const CodeGen::CGBlockInfo *BlockInfo;
600   llvm::Value *BlockPointer;
601 
602   llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
603   FieldDecl *LambdaThisCaptureField;
604 
605   /// \brief A mapping from NRVO variables to the flags used to indicate
606   /// when the NRVO has been applied to this variable.
607   llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
608 
609   EHScopeStack EHStack;
610 
611   /// i32s containing the indexes of the cleanup destinations.
612   llvm::AllocaInst *NormalCleanupDest;
613 
614   unsigned NextCleanupDestIndex;
615 
616   /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
617   CGBlockInfo *FirstBlockInfo;
618 
619   /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
620   llvm::BasicBlock *EHResumeBlock;
621 
622   /// The exception slot.  All landing pads write the current exception pointer
623   /// into this alloca.
624   llvm::Value *ExceptionSlot;
625 
626   /// The selector slot.  Under the MandatoryCleanup model, all landing pads
627   /// write the current selector value into this alloca.
628   llvm::AllocaInst *EHSelectorSlot;
629 
630   /// Emits a landing pad for the current EH stack.
631   llvm::BasicBlock *EmitLandingPad();
632 
633   llvm::BasicBlock *getInvokeDestImpl();
634 
635   template <class T>
636   typename DominatingValue<T>::saved_type saveValueInCond(T value) {
637     return DominatingValue<T>::save(*this, value);
638   }
639 
640 public:
641   /// ObjCEHValueStack - Stack of Objective-C exception values, used for
642   /// rethrows.
643   SmallVector<llvm::Value*, 8> ObjCEHValueStack;
644 
645   /// A class controlling the emission of a finally block.
646   class FinallyInfo {
647     /// Where the catchall's edge through the cleanup should go.
648     JumpDest RethrowDest;
649 
650     /// A function to call to enter the catch.
651     llvm::Constant *BeginCatchFn;
652 
653     /// An i1 variable indicating whether or not the @finally is
654     /// running for an exception.
655     llvm::AllocaInst *ForEHVar;
656 
657     /// An i8* variable into which the exception pointer to rethrow
658     /// has been saved.
659     llvm::AllocaInst *SavedExnVar;
660 
661   public:
662     void enter(CodeGenFunction &CGF, const Stmt *Finally,
663                llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
664                llvm::Constant *rethrowFn);
665     void exit(CodeGenFunction &CGF);
666   };
667 
668   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
669   /// current full-expression.  Safe against the possibility that
670   /// we're currently inside a conditionally-evaluated expression.
671   template <class T, class A0>
672   void pushFullExprCleanup(CleanupKind kind, A0 a0) {
673     // If we're not in a conditional branch, or if none of the
674     // arguments requires saving, then use the unconditional cleanup.
675     if (!isInConditionalBranch())
676       return EHStack.pushCleanup<T>(kind, a0);
677 
678     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
679 
680     typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
681     EHStack.pushCleanup<CleanupType>(kind, a0_saved);
682     initFullExprCleanup();
683   }
684 
685   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
686   /// current full-expression.  Safe against the possibility that
687   /// we're currently inside a conditionally-evaluated expression.
688   template <class T, class A0, class A1>
689   void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
690     // If we're not in a conditional branch, or if none of the
691     // arguments requires saving, then use the unconditional cleanup.
692     if (!isInConditionalBranch())
693       return EHStack.pushCleanup<T>(kind, a0, a1);
694 
695     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
696     typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
697 
698     typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
699     EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
700     initFullExprCleanup();
701   }
702 
703   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
704   /// current full-expression.  Safe against the possibility that
705   /// we're currently inside a conditionally-evaluated expression.
706   template <class T, class A0, class A1, class A2>
707   void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
708     // If we're not in a conditional branch, or if none of the
709     // arguments requires saving, then use the unconditional cleanup.
710     if (!isInConditionalBranch()) {
711       return EHStack.pushCleanup<T>(kind, a0, a1, a2);
712     }
713 
714     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
715     typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
716     typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
717 
718     typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
719     EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
720     initFullExprCleanup();
721   }
722 
723   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
724   /// current full-expression.  Safe against the possibility that
725   /// we're currently inside a conditionally-evaluated expression.
726   template <class T, class A0, class A1, class A2, class A3>
727   void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
728     // If we're not in a conditional branch, or if none of the
729     // arguments requires saving, then use the unconditional cleanup.
730     if (!isInConditionalBranch()) {
731       return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
732     }
733 
734     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
735     typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
736     typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
737     typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
738 
739     typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
740     EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
741                                      a2_saved, a3_saved);
742     initFullExprCleanup();
743   }
744 
745   /// Set up the last cleaup that was pushed as a conditional
746   /// full-expression cleanup.
747   void initFullExprCleanup();
748 
749   /// PushDestructorCleanup - Push a cleanup to call the
750   /// complete-object destructor of an object of the given type at the
751   /// given address.  Does nothing if T is not a C++ class type with a
752   /// non-trivial destructor.
753   void PushDestructorCleanup(QualType T, llvm::Value *Addr);
754 
755   /// PushDestructorCleanup - Push a cleanup to call the
756   /// complete-object variant of the given destructor on the object at
757   /// the given address.
758   void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
759                              llvm::Value *Addr);
760 
761   /// PopCleanupBlock - Will pop the cleanup entry on the stack and
762   /// process all branch fixups.
763   void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
764 
765   /// DeactivateCleanupBlock - Deactivates the given cleanup block.
766   /// The block cannot be reactivated.  Pops it if it's the top of the
767   /// stack.
768   ///
769   /// \param DominatingIP - An instruction which is known to
770   ///   dominate the current IP (if set) and which lies along
771   ///   all paths of execution between the current IP and the
772   ///   the point at which the cleanup comes into scope.
773   void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
774                               llvm::Instruction *DominatingIP);
775 
776   /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
777   /// Cannot be used to resurrect a deactivated cleanup.
778   ///
779   /// \param DominatingIP - An instruction which is known to
780   ///   dominate the current IP (if set) and which lies along
781   ///   all paths of execution between the current IP and the
782   ///   the point at which the cleanup comes into scope.
783   void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
784                             llvm::Instruction *DominatingIP);
785 
786   /// \brief Enters a new scope for capturing cleanups, all of which
787   /// will be executed once the scope is exited.
788   class RunCleanupsScope {
789     EHScopeStack::stable_iterator CleanupStackDepth;
790     bool OldDidCallStackSave;
791     bool PerformCleanup;
792 
793     RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
794     RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
795 
796   protected:
797     CodeGenFunction& CGF;
798 
799   public:
800     /// \brief Enter a new cleanup scope.
801     explicit RunCleanupsScope(CodeGenFunction &CGF)
802       : PerformCleanup(true), CGF(CGF)
803     {
804       CleanupStackDepth = CGF.EHStack.stable_begin();
805       OldDidCallStackSave = CGF.DidCallStackSave;
806       CGF.DidCallStackSave = false;
807     }
808 
809     /// \brief Exit this cleanup scope, emitting any accumulated
810     /// cleanups.
811     ~RunCleanupsScope() {
812       if (PerformCleanup) {
813         CGF.DidCallStackSave = OldDidCallStackSave;
814         CGF.PopCleanupBlocks(CleanupStackDepth);
815       }
816     }
817 
818     /// \brief Determine whether this scope requires any cleanups.
819     bool requiresCleanups() const {
820       return CGF.EHStack.stable_begin() != CleanupStackDepth;
821     }
822 
823     /// \brief Force the emission of cleanups now, instead of waiting
824     /// until this object is destroyed.
825     void ForceCleanup() {
826       assert(PerformCleanup && "Already forced cleanup");
827       CGF.DidCallStackSave = OldDidCallStackSave;
828       CGF.PopCleanupBlocks(CleanupStackDepth);
829       PerformCleanup = false;
830     }
831   };
832 
833   class LexicalScope: protected RunCleanupsScope {
834     SourceRange Range;
835     bool PopDebugStack;
836 
837     LexicalScope(const LexicalScope &); // DO NOT IMPLEMENT THESE
838     LexicalScope &operator=(const LexicalScope &);
839 
840   public:
841     /// \brief Enter a new cleanup scope.
842     explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
843       : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
844       if (CGDebugInfo *DI = CGF.getDebugInfo())
845         DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
846     }
847 
848     /// \brief Exit this cleanup scope, emitting any accumulated
849     /// cleanups.
850     ~LexicalScope() {
851       if (PopDebugStack) {
852         CGDebugInfo *DI = CGF.getDebugInfo();
853         if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
854       }
855     }
856 
857     /// \brief Force the emission of cleanups now, instead of waiting
858     /// until this object is destroyed.
859     void ForceCleanup() {
860       RunCleanupsScope::ForceCleanup();
861       if (CGDebugInfo *DI = CGF.getDebugInfo()) {
862         DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
863         PopDebugStack = false;
864       }
865     }
866   };
867 
868 
869   /// PopCleanupBlocks - Takes the old cleanup stack size and emits
870   /// the cleanup blocks that have been added.
871   void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
872 
873   void ResolveBranchFixups(llvm::BasicBlock *Target);
874 
875   /// The given basic block lies in the current EH scope, but may be a
876   /// target of a potentially scope-crossing jump; get a stable handle
877   /// to which we can perform this jump later.
878   JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
879     return JumpDest(Target,
880                     EHStack.getInnermostNormalCleanup(),
881                     NextCleanupDestIndex++);
882   }
883 
884   /// The given basic block lies in the current EH scope, but may be a
885   /// target of a potentially scope-crossing jump; get a stable handle
886   /// to which we can perform this jump later.
887   JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
888     return getJumpDestInCurrentScope(createBasicBlock(Name));
889   }
890 
891   /// EmitBranchThroughCleanup - Emit a branch from the current insert
892   /// block through the normal cleanup handling code (if any) and then
893   /// on to \arg Dest.
894   void EmitBranchThroughCleanup(JumpDest Dest);
895 
896   /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
897   /// specified destination obviously has no cleanups to run.  'false' is always
898   /// a conservatively correct answer for this method.
899   bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
900 
901   /// popCatchScope - Pops the catch scope at the top of the EHScope
902   /// stack, emitting any required code (other than the catch handlers
903   /// themselves).
904   void popCatchScope();
905 
906   llvm::BasicBlock *getEHResumeBlock();
907   llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
908 
909   /// An object to manage conditionally-evaluated expressions.
910   class ConditionalEvaluation {
911     llvm::BasicBlock *StartBB;
912 
913   public:
914     ConditionalEvaluation(CodeGenFunction &CGF)
915       : StartBB(CGF.Builder.GetInsertBlock()) {}
916 
917     void begin(CodeGenFunction &CGF) {
918       assert(CGF.OutermostConditional != this);
919       if (!CGF.OutermostConditional)
920         CGF.OutermostConditional = this;
921     }
922 
923     void end(CodeGenFunction &CGF) {
924       assert(CGF.OutermostConditional != 0);
925       if (CGF.OutermostConditional == this)
926         CGF.OutermostConditional = 0;
927     }
928 
929     /// Returns a block which will be executed prior to each
930     /// evaluation of the conditional code.
931     llvm::BasicBlock *getStartingBlock() const {
932       return StartBB;
933     }
934   };
935 
936   /// isInConditionalBranch - Return true if we're currently emitting
937   /// one branch or the other of a conditional expression.
938   bool isInConditionalBranch() const { return OutermostConditional != 0; }
939 
940   void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
941     assert(isInConditionalBranch());
942     llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
943     new llvm::StoreInst(value, addr, &block->back());
944   }
945 
946   /// An RAII object to record that we're evaluating a statement
947   /// expression.
948   class StmtExprEvaluation {
949     CodeGenFunction &CGF;
950 
951     /// We have to save the outermost conditional: cleanups in a
952     /// statement expression aren't conditional just because the
953     /// StmtExpr is.
954     ConditionalEvaluation *SavedOutermostConditional;
955 
956   public:
957     StmtExprEvaluation(CodeGenFunction &CGF)
958       : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
959       CGF.OutermostConditional = 0;
960     }
961 
962     ~StmtExprEvaluation() {
963       CGF.OutermostConditional = SavedOutermostConditional;
964       CGF.EnsureInsertPoint();
965     }
966   };
967 
968   /// An object which temporarily prevents a value from being
969   /// destroyed by aggressive peephole optimizations that assume that
970   /// all uses of a value have been realized in the IR.
971   class PeepholeProtection {
972     llvm::Instruction *Inst;
973     friend class CodeGenFunction;
974 
975   public:
976     PeepholeProtection() : Inst(0) {}
977   };
978 
979   /// A non-RAII class containing all the information about a bound
980   /// opaque value.  OpaqueValueMapping, below, is a RAII wrapper for
981   /// this which makes individual mappings very simple; using this
982   /// class directly is useful when you have a variable number of
983   /// opaque values or don't want the RAII functionality for some
984   /// reason.
985   class OpaqueValueMappingData {
986     const OpaqueValueExpr *OpaqueValue;
987     bool BoundLValue;
988     CodeGenFunction::PeepholeProtection Protection;
989 
990     OpaqueValueMappingData(const OpaqueValueExpr *ov,
991                            bool boundLValue)
992       : OpaqueValue(ov), BoundLValue(boundLValue) {}
993   public:
994     OpaqueValueMappingData() : OpaqueValue(0) {}
995 
996     static bool shouldBindAsLValue(const Expr *expr) {
997       // gl-values should be bound as l-values for obvious reasons.
998       // Records should be bound as l-values because IR generation
999       // always keeps them in memory.  Expressions of function type
1000       // act exactly like l-values but are formally required to be
1001       // r-values in C.
1002       return expr->isGLValue() ||
1003              expr->getType()->isRecordType() ||
1004              expr->getType()->isFunctionType();
1005     }
1006 
1007     static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1008                                        const OpaqueValueExpr *ov,
1009                                        const Expr *e) {
1010       if (shouldBindAsLValue(ov))
1011         return bind(CGF, ov, CGF.EmitLValue(e));
1012       return bind(CGF, ov, CGF.EmitAnyExpr(e));
1013     }
1014 
1015     static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1016                                        const OpaqueValueExpr *ov,
1017                                        const LValue &lv) {
1018       assert(shouldBindAsLValue(ov));
1019       CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1020       return OpaqueValueMappingData(ov, true);
1021     }
1022 
1023     static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1024                                        const OpaqueValueExpr *ov,
1025                                        const RValue &rv) {
1026       assert(!shouldBindAsLValue(ov));
1027       CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1028 
1029       OpaqueValueMappingData data(ov, false);
1030 
1031       // Work around an extremely aggressive peephole optimization in
1032       // EmitScalarConversion which assumes that all other uses of a
1033       // value are extant.
1034       data.Protection = CGF.protectFromPeepholes(rv);
1035 
1036       return data;
1037     }
1038 
1039     bool isValid() const { return OpaqueValue != 0; }
1040     void clear() { OpaqueValue = 0; }
1041 
1042     void unbind(CodeGenFunction &CGF) {
1043       assert(OpaqueValue && "no data to unbind!");
1044 
1045       if (BoundLValue) {
1046         CGF.OpaqueLValues.erase(OpaqueValue);
1047       } else {
1048         CGF.OpaqueRValues.erase(OpaqueValue);
1049         CGF.unprotectFromPeepholes(Protection);
1050       }
1051     }
1052   };
1053 
1054   /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1055   class OpaqueValueMapping {
1056     CodeGenFunction &CGF;
1057     OpaqueValueMappingData Data;
1058 
1059   public:
1060     static bool shouldBindAsLValue(const Expr *expr) {
1061       return OpaqueValueMappingData::shouldBindAsLValue(expr);
1062     }
1063 
1064     /// Build the opaque value mapping for the given conditional
1065     /// operator if it's the GNU ?: extension.  This is a common
1066     /// enough pattern that the convenience operator is really
1067     /// helpful.
1068     ///
1069     OpaqueValueMapping(CodeGenFunction &CGF,
1070                        const AbstractConditionalOperator *op) : CGF(CGF) {
1071       if (isa<ConditionalOperator>(op))
1072         // Leave Data empty.
1073         return;
1074 
1075       const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1076       Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1077                                           e->getCommon());
1078     }
1079 
1080     OpaqueValueMapping(CodeGenFunction &CGF,
1081                        const OpaqueValueExpr *opaqueValue,
1082                        LValue lvalue)
1083       : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1084     }
1085 
1086     OpaqueValueMapping(CodeGenFunction &CGF,
1087                        const OpaqueValueExpr *opaqueValue,
1088                        RValue rvalue)
1089       : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1090     }
1091 
1092     void pop() {
1093       Data.unbind(CGF);
1094       Data.clear();
1095     }
1096 
1097     ~OpaqueValueMapping() {
1098       if (Data.isValid()) Data.unbind(CGF);
1099     }
1100   };
1101 
1102   /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
1103   /// number that holds the value.
1104   unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
1105 
1106   /// BuildBlockByrefAddress - Computes address location of the
1107   /// variable which is declared as __block.
1108   llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
1109                                       const VarDecl *V);
1110 private:
1111   CGDebugInfo *DebugInfo;
1112   bool DisableDebugInfo;
1113 
1114   /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1115   /// calling llvm.stacksave for multiple VLAs in the same scope.
1116   bool DidCallStackSave;
1117 
1118   /// IndirectBranch - The first time an indirect goto is seen we create a block
1119   /// with an indirect branch.  Every time we see the address of a label taken,
1120   /// we add the label to the indirect goto.  Every subsequent indirect goto is
1121   /// codegen'd as a jump to the IndirectBranch's basic block.
1122   llvm::IndirectBrInst *IndirectBranch;
1123 
1124   /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1125   /// decls.
1126   typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
1127   DeclMapTy LocalDeclMap;
1128 
1129   /// LabelMap - This keeps track of the LLVM basic block for each C label.
1130   llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1131 
1132   // BreakContinueStack - This keeps track of where break and continue
1133   // statements should jump to.
1134   struct BreakContinue {
1135     BreakContinue(JumpDest Break, JumpDest Continue)
1136       : BreakBlock(Break), ContinueBlock(Continue) {}
1137 
1138     JumpDest BreakBlock;
1139     JumpDest ContinueBlock;
1140   };
1141   SmallVector<BreakContinue, 8> BreakContinueStack;
1142 
1143   /// SwitchInsn - This is nearest current switch instruction. It is null if
1144   /// current context is not in a switch.
1145   llvm::SwitchInst *SwitchInsn;
1146 
1147   /// CaseRangeBlock - This block holds if condition check for last case
1148   /// statement range in current switch instruction.
1149   llvm::BasicBlock *CaseRangeBlock;
1150 
1151   /// OpaqueLValues - Keeps track of the current set of opaque value
1152   /// expressions.
1153   llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1154   llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1155 
1156   // VLASizeMap - This keeps track of the associated size for each VLA type.
1157   // We track this by the size expression rather than the type itself because
1158   // in certain situations, like a const qualifier applied to an VLA typedef,
1159   // multiple VLA types can share the same size expression.
1160   // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1161   // enter/leave scopes.
1162   llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1163 
1164   /// A block containing a single 'unreachable' instruction.  Created
1165   /// lazily by getUnreachableBlock().
1166   llvm::BasicBlock *UnreachableBlock;
1167 
1168   /// CXXThisDecl - When generating code for a C++ member function,
1169   /// this will hold the implicit 'this' declaration.
1170   ImplicitParamDecl *CXXABIThisDecl;
1171   llvm::Value *CXXABIThisValue;
1172   llvm::Value *CXXThisValue;
1173 
1174   /// CXXVTTDecl - When generating code for a base object constructor or
1175   /// base object destructor with virtual bases, this will hold the implicit
1176   /// VTT parameter.
1177   ImplicitParamDecl *CXXVTTDecl;
1178   llvm::Value *CXXVTTValue;
1179 
1180   /// OutermostConditional - Points to the outermost active
1181   /// conditional control.  This is used so that we know if a
1182   /// temporary should be destroyed conditionally.
1183   ConditionalEvaluation *OutermostConditional;
1184 
1185 
1186   /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
1187   /// type as well as the field number that contains the actual data.
1188   llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
1189                                               unsigned> > ByRefValueInfo;
1190 
1191   llvm::BasicBlock *TerminateLandingPad;
1192   llvm::BasicBlock *TerminateHandler;
1193   llvm::BasicBlock *TrapBB;
1194 
1195 public:
1196   CodeGenFunction(CodeGenModule &cgm);
1197   ~CodeGenFunction();
1198 
1199   CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1200   ASTContext &getContext() const { return CGM.getContext(); }
1201   CGDebugInfo *getDebugInfo() {
1202     if (DisableDebugInfo)
1203       return NULL;
1204     return DebugInfo;
1205   }
1206   void disableDebugInfo() { DisableDebugInfo = true; }
1207   void enableDebugInfo() { DisableDebugInfo = false; }
1208 
1209   bool shouldUseFusedARCCalls() {
1210     return CGM.getCodeGenOpts().OptimizationLevel == 0;
1211   }
1212 
1213   const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1214 
1215   /// Returns a pointer to the function's exception object and selector slot,
1216   /// which is assigned in every landing pad.
1217   llvm::Value *getExceptionSlot();
1218   llvm::Value *getEHSelectorSlot();
1219 
1220   /// Returns the contents of the function's exception object and selector
1221   /// slots.
1222   llvm::Value *getExceptionFromSlot();
1223   llvm::Value *getSelectorFromSlot();
1224 
1225   llvm::Value *getNormalCleanupDestSlot();
1226 
1227   llvm::BasicBlock *getUnreachableBlock() {
1228     if (!UnreachableBlock) {
1229       UnreachableBlock = createBasicBlock("unreachable");
1230       new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1231     }
1232     return UnreachableBlock;
1233   }
1234 
1235   llvm::BasicBlock *getInvokeDest() {
1236     if (!EHStack.requiresLandingPad()) return 0;
1237     return getInvokeDestImpl();
1238   }
1239 
1240   llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1241 
1242   //===--------------------------------------------------------------------===//
1243   //                                  Cleanups
1244   //===--------------------------------------------------------------------===//
1245 
1246   typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
1247 
1248   void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1249                                         llvm::Value *arrayEndPointer,
1250                                         QualType elementType,
1251                                         Destroyer *destroyer);
1252   void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1253                                       llvm::Value *arrayEnd,
1254                                       QualType elementType,
1255                                       Destroyer *destroyer);
1256 
1257   void pushDestroy(QualType::DestructionKind dtorKind,
1258                    llvm::Value *addr, QualType type);
1259   void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
1260                    Destroyer *destroyer, bool useEHCleanupForArray);
1261   void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
1262                    bool useEHCleanupForArray);
1263   llvm::Function *generateDestroyHelper(llvm::Constant *addr,
1264                                         QualType type,
1265                                         Destroyer *destroyer,
1266                                         bool useEHCleanupForArray);
1267   void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1268                         QualType type, Destroyer *destroyer,
1269                         bool checkZeroLength, bool useEHCleanup);
1270 
1271   Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1272 
1273   /// Determines whether an EH cleanup is required to destroy a type
1274   /// with the given destruction kind.
1275   bool needsEHCleanup(QualType::DestructionKind kind) {
1276     switch (kind) {
1277     case QualType::DK_none:
1278       return false;
1279     case QualType::DK_cxx_destructor:
1280     case QualType::DK_objc_weak_lifetime:
1281       return getLangOpts().Exceptions;
1282     case QualType::DK_objc_strong_lifetime:
1283       return getLangOpts().Exceptions &&
1284              CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1285     }
1286     llvm_unreachable("bad destruction kind");
1287   }
1288 
1289   CleanupKind getCleanupKind(QualType::DestructionKind kind) {
1290     return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1291   }
1292 
1293   //===--------------------------------------------------------------------===//
1294   //                                  Objective-C
1295   //===--------------------------------------------------------------------===//
1296 
1297   void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1298 
1299   void StartObjCMethod(const ObjCMethodDecl *MD,
1300                        const ObjCContainerDecl *CD,
1301                        SourceLocation StartLoc);
1302 
1303   /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1304   void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1305                           const ObjCPropertyImplDecl *PID);
1306   void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1307                               const ObjCPropertyImplDecl *propImpl,
1308                               llvm::Constant *AtomicHelperFn);
1309 
1310   void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1311                                   ObjCMethodDecl *MD, bool ctor);
1312 
1313   /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1314   /// for the given property.
1315   void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1316                           const ObjCPropertyImplDecl *PID);
1317   void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1318                               const ObjCPropertyImplDecl *propImpl,
1319                               llvm::Constant *AtomicHelperFn);
1320   bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
1321   bool IvarTypeWithAggrGCObjects(QualType Ty);
1322 
1323   //===--------------------------------------------------------------------===//
1324   //                                  Block Bits
1325   //===--------------------------------------------------------------------===//
1326 
1327   llvm::Value *EmitBlockLiteral(const BlockExpr *);
1328   llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
1329   static void destroyBlockInfos(CGBlockInfo *info);
1330   llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
1331                                            const CGBlockInfo &Info,
1332                                            llvm::StructType *,
1333                                            llvm::Constant *BlockVarLayout);
1334 
1335   llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1336                                         const CGBlockInfo &Info,
1337                                         const Decl *OuterFuncDecl,
1338                                         const DeclMapTy &ldm,
1339                                         bool IsLambdaConversionToBlock);
1340 
1341   llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1342   llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1343   llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1344                                              const ObjCPropertyImplDecl *PID);
1345   llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1346                                              const ObjCPropertyImplDecl *PID);
1347   llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1348 
1349   void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1350 
1351   class AutoVarEmission;
1352 
1353   void emitByrefStructureInit(const AutoVarEmission &emission);
1354   void enterByrefCleanup(const AutoVarEmission &emission);
1355 
1356   llvm::Value *LoadBlockStruct() {
1357     assert(BlockPointer && "no block pointer set!");
1358     return BlockPointer;
1359   }
1360 
1361   void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
1362   void AllocateBlockDecl(const DeclRefExpr *E);
1363   llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1364   llvm::Type *BuildByRefType(const VarDecl *var);
1365 
1366   void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1367                     const CGFunctionInfo &FnInfo);
1368   void StartFunction(GlobalDecl GD, QualType RetTy,
1369                      llvm::Function *Fn,
1370                      const CGFunctionInfo &FnInfo,
1371                      const FunctionArgList &Args,
1372                      SourceLocation StartLoc);
1373 
1374   void EmitConstructorBody(FunctionArgList &Args);
1375   void EmitDestructorBody(FunctionArgList &Args);
1376   void EmitFunctionBody(FunctionArgList &Args);
1377 
1378   void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
1379                                   CallArgList &CallArgs);
1380   void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
1381   void EmitLambdaBlockInvokeBody();
1382   void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1383   void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
1384 
1385   /// EmitReturnBlock - Emit the unified return block, trying to avoid its
1386   /// emission when possible.
1387   void EmitReturnBlock();
1388 
1389   /// FinishFunction - Complete IR generation of the current function. It is
1390   /// legal to call this function even if there is no current insertion point.
1391   void FinishFunction(SourceLocation EndLoc=SourceLocation());
1392 
1393   /// GenerateThunk - Generate a thunk for the given method.
1394   void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1395                      GlobalDecl GD, const ThunkInfo &Thunk);
1396 
1397   void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1398                             GlobalDecl GD, const ThunkInfo &Thunk);
1399 
1400   void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1401                         FunctionArgList &Args);
1402 
1403   void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
1404                                ArrayRef<VarDecl *> ArrayIndexes);
1405 
1406   /// InitializeVTablePointer - Initialize the vtable pointer of the given
1407   /// subobject.
1408   ///
1409   void InitializeVTablePointer(BaseSubobject Base,
1410                                const CXXRecordDecl *NearestVBase,
1411                                CharUnits OffsetFromNearestVBase,
1412                                llvm::Constant *VTable,
1413                                const CXXRecordDecl *VTableClass);
1414 
1415   typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1416   void InitializeVTablePointers(BaseSubobject Base,
1417                                 const CXXRecordDecl *NearestVBase,
1418                                 CharUnits OffsetFromNearestVBase,
1419                                 bool BaseIsNonVirtualPrimaryBase,
1420                                 llvm::Constant *VTable,
1421                                 const CXXRecordDecl *VTableClass,
1422                                 VisitedVirtualBasesSetTy& VBases);
1423 
1424   void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1425 
1426   /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1427   /// to by This.
1428   llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
1429 
1430   /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1431   /// given phase of destruction for a destructor.  The end result
1432   /// should call destructors on members and base classes in reverse
1433   /// order of their construction.
1434   void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1435 
1436   /// ShouldInstrumentFunction - Return true if the current function should be
1437   /// instrumented with __cyg_profile_func_* calls
1438   bool ShouldInstrumentFunction();
1439 
1440   /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
1441   /// instrumentation function with the current function and the call site, if
1442   /// function instrumentation is enabled.
1443   void EmitFunctionInstrumentation(const char *Fn);
1444 
1445   /// EmitMCountInstrumentation - Emit call to .mcount.
1446   void EmitMCountInstrumentation();
1447 
1448   /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1449   /// arguments for the given function. This is also responsible for naming the
1450   /// LLVM function arguments.
1451   void EmitFunctionProlog(const CGFunctionInfo &FI,
1452                           llvm::Function *Fn,
1453                           const FunctionArgList &Args);
1454 
1455   /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1456   /// given temporary.
1457   void EmitFunctionEpilog(const CGFunctionInfo &FI);
1458 
1459   /// EmitStartEHSpec - Emit the start of the exception spec.
1460   void EmitStartEHSpec(const Decl *D);
1461 
1462   /// EmitEndEHSpec - Emit the end of the exception spec.
1463   void EmitEndEHSpec(const Decl *D);
1464 
1465   /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1466   llvm::BasicBlock *getTerminateLandingPad();
1467 
1468   /// getTerminateHandler - Return a handler (not a landing pad, just
1469   /// a catch handler) that just calls terminate.  This is used when
1470   /// a terminate scope encloses a try.
1471   llvm::BasicBlock *getTerminateHandler();
1472 
1473   llvm::Type *ConvertTypeForMem(QualType T);
1474   llvm::Type *ConvertType(QualType T);
1475   llvm::Type *ConvertType(const TypeDecl *T) {
1476     return ConvertType(getContext().getTypeDeclType(T));
1477   }
1478 
1479   /// LoadObjCSelf - Load the value of self. This function is only valid while
1480   /// generating code for an Objective-C method.
1481   llvm::Value *LoadObjCSelf();
1482 
1483   /// TypeOfSelfObject - Return type of object that this self represents.
1484   QualType TypeOfSelfObject();
1485 
1486   /// hasAggregateLLVMType - Return true if the specified AST type will map into
1487   /// an aggregate LLVM type or is void.
1488   static bool hasAggregateLLVMType(QualType T);
1489 
1490   /// createBasicBlock - Create an LLVM basic block.
1491   llvm::BasicBlock *createBasicBlock(StringRef name = "",
1492                                      llvm::Function *parent = 0,
1493                                      llvm::BasicBlock *before = 0) {
1494 #ifdef NDEBUG
1495     return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
1496 #else
1497     return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1498 #endif
1499   }
1500 
1501   /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1502   /// label maps to.
1503   JumpDest getJumpDestForLabel(const LabelDecl *S);
1504 
1505   /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1506   /// another basic block, simplify it. This assumes that no other code could
1507   /// potentially reference the basic block.
1508   void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1509 
1510   /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1511   /// adding a fall-through branch from the current insert block if
1512   /// necessary. It is legal to call this function even if there is no current
1513   /// insertion point.
1514   ///
1515   /// IsFinished - If true, indicates that the caller has finished emitting
1516   /// branches to the given block and does not expect to emit code into it. This
1517   /// means the block can be ignored if it is unreachable.
1518   void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1519 
1520   /// EmitBlockAfterUses - Emit the given block somewhere hopefully
1521   /// near its uses, and leave the insertion point in it.
1522   void EmitBlockAfterUses(llvm::BasicBlock *BB);
1523 
1524   /// EmitBranch - Emit a branch to the specified basic block from the current
1525   /// insert block, taking care to avoid creation of branches from dummy
1526   /// blocks. It is legal to call this function even if there is no current
1527   /// insertion point.
1528   ///
1529   /// This function clears the current insertion point. The caller should follow
1530   /// calls to this function with calls to Emit*Block prior to generation new
1531   /// code.
1532   void EmitBranch(llvm::BasicBlock *Block);
1533 
1534   /// HaveInsertPoint - True if an insertion point is defined. If not, this
1535   /// indicates that the current code being emitted is unreachable.
1536   bool HaveInsertPoint() const {
1537     return Builder.GetInsertBlock() != 0;
1538   }
1539 
1540   /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1541   /// emitted IR has a place to go. Note that by definition, if this function
1542   /// creates a block then that block is unreachable; callers may do better to
1543   /// detect when no insertion point is defined and simply skip IR generation.
1544   void EnsureInsertPoint() {
1545     if (!HaveInsertPoint())
1546       EmitBlock(createBasicBlock());
1547   }
1548 
1549   /// ErrorUnsupported - Print out an error that codegen doesn't support the
1550   /// specified stmt yet.
1551   void ErrorUnsupported(const Stmt *S, const char *Type,
1552                         bool OmitOnError=false);
1553 
1554   //===--------------------------------------------------------------------===//
1555   //                                  Helpers
1556   //===--------------------------------------------------------------------===//
1557 
1558   LValue MakeAddrLValue(llvm::Value *V, QualType T,
1559                         CharUnits Alignment = CharUnits()) {
1560     return LValue::MakeAddr(V, T, Alignment, getContext(),
1561                             CGM.getTBAAInfo(T));
1562   }
1563   LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
1564     CharUnits Alignment;
1565     if (!T->isIncompleteType())
1566       Alignment = getContext().getTypeAlignInChars(T);
1567     return LValue::MakeAddr(V, T, Alignment, getContext(),
1568                             CGM.getTBAAInfo(T));
1569   }
1570 
1571   /// CreateTempAlloca - This creates a alloca and inserts it into the entry
1572   /// block. The caller is responsible for setting an appropriate alignment on
1573   /// the alloca.
1574   llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
1575                                      const Twine &Name = "tmp");
1576 
1577   /// InitTempAlloca - Provide an initial value for the given alloca.
1578   void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
1579 
1580   /// CreateIRTemp - Create a temporary IR object of the given type, with
1581   /// appropriate alignment. This routine should only be used when an temporary
1582   /// value needs to be stored into an alloca (for example, to avoid explicit
1583   /// PHI construction), but the type is the IR type, not the type appropriate
1584   /// for storing in memory.
1585   llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
1586 
1587   /// CreateMemTemp - Create a temporary memory object of the given type, with
1588   /// appropriate alignment.
1589   llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
1590 
1591   /// CreateAggTemp - Create a temporary memory object for the given
1592   /// aggregate type.
1593   AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
1594     CharUnits Alignment = getContext().getTypeAlignInChars(T);
1595     return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
1596                                  T.getQualifiers(),
1597                                  AggValueSlot::IsNotDestructed,
1598                                  AggValueSlot::DoesNotNeedGCBarriers,
1599                                  AggValueSlot::IsNotAliased,
1600                                  AggValueSlot::IsCompleteObject,
1601                                  AggValueSlot::IsNotZeroed);
1602   }
1603 
1604   /// Emit a cast to void* in the appropriate address space.
1605   llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
1606 
1607   /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
1608   /// expression and compare the result against zero, returning an Int1Ty value.
1609   llvm::Value *EvaluateExprAsBool(const Expr *E);
1610 
1611   /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
1612   void EmitIgnoredExpr(const Expr *E);
1613 
1614   /// EmitAnyExpr - Emit code to compute the specified expression which can have
1615   /// any type.  The result is returned as an RValue struct.  If this is an
1616   /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
1617   /// the result should be returned.
1618   ///
1619   /// \param IgnoreResult - True if the resulting value isn't used.
1620   RValue EmitAnyExpr(const Expr *E,
1621                      AggValueSlot AggSlot = AggValueSlot::ignored(),
1622                      bool IgnoreResult = false);
1623 
1624   // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
1625   // or the value of the expression, depending on how va_list is defined.
1626   llvm::Value *EmitVAListRef(const Expr *E);
1627 
1628   /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
1629   /// always be accessible even if no aggregate location is provided.
1630   RValue EmitAnyExprToTemp(const Expr *E);
1631 
1632   /// EmitAnyExprToMem - Emits the code necessary to evaluate an
1633   /// arbitrary expression as an initialization of the given memory
1634   /// location.
1635   void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
1636                         Qualifiers Quals);
1637 
1638   /// EmitExprAsInit - Emits the code necessary to initialize a
1639   /// location in memory with the given initializer.
1640   void EmitExprAsInit(const Expr *init, const ValueDecl *D,
1641                       LValue lvalue, bool capturedByInit);
1642 
1643   /// EmitAggregateCopy - Emit an aggrate copy.
1644   ///
1645   /// \param isVolatile - True iff either the source or the destination is
1646   /// volatile.
1647   /// \param destIsCompleteObject - True if the destination is known to be
1648   /// a complete object.
1649   void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1650                          QualType EltTy, bool isVolatile=false,
1651                          unsigned alignment = 0,
1652                          bool destIsCompleteObject = false);
1653 
1654   /// StartBlock - Start new block named N. If insert block is a dummy block
1655   /// then reuse it.
1656   void StartBlock(const char *N);
1657 
1658   /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
1659   llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
1660     return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
1661   }
1662 
1663   /// GetAddrOfLocalVar - Return the address of a local variable.
1664   llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
1665     llvm::Value *Res = LocalDeclMap[VD];
1666     assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
1667     return Res;
1668   }
1669 
1670   /// getOpaqueLValueMapping - Given an opaque value expression (which
1671   /// must be mapped to an l-value), return its mapping.
1672   const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
1673     assert(OpaqueValueMapping::shouldBindAsLValue(e));
1674 
1675     llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
1676       it = OpaqueLValues.find(e);
1677     assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
1678     return it->second;
1679   }
1680 
1681   /// getOpaqueRValueMapping - Given an opaque value expression (which
1682   /// must be mapped to an r-value), return its mapping.
1683   const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
1684     assert(!OpaqueValueMapping::shouldBindAsLValue(e));
1685 
1686     llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
1687       it = OpaqueRValues.find(e);
1688     assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
1689     return it->second;
1690   }
1691 
1692   /// getAccessedFieldNo - Given an encoded value and a result number, return
1693   /// the input field number being accessed.
1694   static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
1695 
1696   llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
1697   llvm::BasicBlock *GetIndirectGotoBlock();
1698 
1699   /// EmitNullInitialization - Generate code to set a value of the given type to
1700   /// null, If the type contains data member pointers, they will be initialized
1701   /// to -1 in accordance with the Itanium C++ ABI.
1702   void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
1703 
1704   // EmitVAArg - Generate code to get an argument from the passed in pointer
1705   // and update it accordingly. The return value is a pointer to the argument.
1706   // FIXME: We should be able to get rid of this method and use the va_arg
1707   // instruction in LLVM instead once it works well enough.
1708   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
1709 
1710   /// emitArrayLength - Compute the length of an array, even if it's a
1711   /// VLA, and drill down to the base element type.
1712   llvm::Value *emitArrayLength(const ArrayType *arrayType,
1713                                QualType &baseType,
1714                                llvm::Value *&addr);
1715 
1716   /// EmitVLASize - Capture all the sizes for the VLA expressions in
1717   /// the given variably-modified type and store them in the VLASizeMap.
1718   ///
1719   /// This function can be called with a null (unreachable) insert point.
1720   void EmitVariablyModifiedType(QualType Ty);
1721 
1722   /// getVLASize - Returns an LLVM value that corresponds to the size,
1723   /// in non-variably-sized elements, of a variable length array type,
1724   /// plus that largest non-variably-sized element type.  Assumes that
1725   /// the type has already been emitted with EmitVariablyModifiedType.
1726   std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
1727   std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
1728 
1729   /// LoadCXXThis - Load the value of 'this'. This function is only valid while
1730   /// generating code for an C++ member function.
1731   llvm::Value *LoadCXXThis() {
1732     assert(CXXThisValue && "no 'this' value for this function");
1733     return CXXThisValue;
1734   }
1735 
1736   /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
1737   /// virtual bases.
1738   llvm::Value *LoadCXXVTT() {
1739     assert(CXXVTTValue && "no VTT value for this function");
1740     return CXXVTTValue;
1741   }
1742 
1743   /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
1744   /// complete class to the given direct base.
1745   llvm::Value *
1746   GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
1747                                         const CXXRecordDecl *Derived,
1748                                         const CXXRecordDecl *Base,
1749                                         bool BaseIsVirtual);
1750 
1751   /// GetAddressOfBaseClass - This function will add the necessary delta to the
1752   /// load of 'this' and returns address of the base class.
1753   llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
1754                                      const CXXRecordDecl *Derived,
1755                                      CastExpr::path_const_iterator PathBegin,
1756                                      CastExpr::path_const_iterator PathEnd,
1757                                      bool NullCheckValue);
1758 
1759   llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
1760                                         const CXXRecordDecl *Derived,
1761                                         CastExpr::path_const_iterator PathBegin,
1762                                         CastExpr::path_const_iterator PathEnd,
1763                                         bool NullCheckValue);
1764 
1765   llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
1766                                          const CXXRecordDecl *ClassDecl,
1767                                          const CXXRecordDecl *BaseClassDecl);
1768 
1769   void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1770                                       CXXCtorType CtorType,
1771                                       const FunctionArgList &Args);
1772   // It's important not to confuse this and the previous function. Delegating
1773   // constructors are the C++0x feature. The constructor delegate optimization
1774   // is used to reduce duplication in the base and complete consturctors where
1775   // they are substantially the same.
1776   void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1777                                         const FunctionArgList &Args);
1778   void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
1779                               bool ForVirtualBase, llvm::Value *This,
1780                               CallExpr::const_arg_iterator ArgBeg,
1781                               CallExpr::const_arg_iterator ArgEnd);
1782 
1783   void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1784                               llvm::Value *This, llvm::Value *Src,
1785                               CallExpr::const_arg_iterator ArgBeg,
1786                               CallExpr::const_arg_iterator ArgEnd);
1787 
1788   void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1789                                   const ConstantArrayType *ArrayTy,
1790                                   llvm::Value *ArrayPtr,
1791                                   CallExpr::const_arg_iterator ArgBeg,
1792                                   CallExpr::const_arg_iterator ArgEnd,
1793                                   bool ZeroInitialization = false);
1794 
1795   void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1796                                   llvm::Value *NumElements,
1797                                   llvm::Value *ArrayPtr,
1798                                   CallExpr::const_arg_iterator ArgBeg,
1799                                   CallExpr::const_arg_iterator ArgEnd,
1800                                   bool ZeroInitialization = false);
1801 
1802   static Destroyer destroyCXXObject;
1803 
1804   void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
1805                              bool ForVirtualBase, llvm::Value *This);
1806 
1807   void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
1808                                llvm::Value *NewPtr, llvm::Value *NumElements);
1809 
1810   void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
1811                         llvm::Value *Ptr);
1812 
1813   llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
1814   void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
1815 
1816   void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
1817                       QualType DeleteTy);
1818 
1819   llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
1820   llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
1821 
1822   void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
1823   void EmitStdInitializerListCleanup(llvm::Value *loc,
1824                                      const InitListExpr *init);
1825 
1826   void EmitCheck(llvm::Value *, unsigned Size);
1827 
1828   llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1829                                        bool isInc, bool isPre);
1830   ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1831                                          bool isInc, bool isPre);
1832   //===--------------------------------------------------------------------===//
1833   //                            Declaration Emission
1834   //===--------------------------------------------------------------------===//
1835 
1836   /// EmitDecl - Emit a declaration.
1837   ///
1838   /// This function can be called with a null (unreachable) insert point.
1839   void EmitDecl(const Decl &D);
1840 
1841   /// EmitVarDecl - Emit a local variable declaration.
1842   ///
1843   /// This function can be called with a null (unreachable) insert point.
1844   void EmitVarDecl(const VarDecl &D);
1845 
1846   void EmitScalarInit(const Expr *init, const ValueDecl *D,
1847                       LValue lvalue, bool capturedByInit);
1848   void EmitScalarInit(llvm::Value *init, LValue lvalue);
1849 
1850   typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
1851                              llvm::Value *Address);
1852 
1853   /// EmitAutoVarDecl - Emit an auto variable declaration.
1854   ///
1855   /// This function can be called with a null (unreachable) insert point.
1856   void EmitAutoVarDecl(const VarDecl &D);
1857 
1858   class AutoVarEmission {
1859     friend class CodeGenFunction;
1860 
1861     const VarDecl *Variable;
1862 
1863     /// The alignment of the variable.
1864     CharUnits Alignment;
1865 
1866     /// The address of the alloca.  Null if the variable was emitted
1867     /// as a global constant.
1868     llvm::Value *Address;
1869 
1870     llvm::Value *NRVOFlag;
1871 
1872     /// True if the variable is a __block variable.
1873     bool IsByRef;
1874 
1875     /// True if the variable is of aggregate type and has a constant
1876     /// initializer.
1877     bool IsConstantAggregate;
1878 
1879     struct Invalid {};
1880     AutoVarEmission(Invalid) : Variable(0) {}
1881 
1882     AutoVarEmission(const VarDecl &variable)
1883       : Variable(&variable), Address(0), NRVOFlag(0),
1884         IsByRef(false), IsConstantAggregate(false) {}
1885 
1886     bool wasEmittedAsGlobal() const { return Address == 0; }
1887 
1888   public:
1889     static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
1890 
1891     /// Returns the address of the object within this declaration.
1892     /// Note that this does not chase the forwarding pointer for
1893     /// __block decls.
1894     llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
1895       if (!IsByRef) return Address;
1896 
1897       return CGF.Builder.CreateStructGEP(Address,
1898                                          CGF.getByRefValueLLVMField(Variable),
1899                                          Variable->getNameAsString());
1900     }
1901   };
1902   AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
1903   void EmitAutoVarInit(const AutoVarEmission &emission);
1904   void EmitAutoVarCleanups(const AutoVarEmission &emission);
1905   void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1906                               QualType::DestructionKind dtorKind);
1907 
1908   void EmitStaticVarDecl(const VarDecl &D,
1909                          llvm::GlobalValue::LinkageTypes Linkage);
1910 
1911   /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
1912   void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
1913 
1914   /// protectFromPeepholes - Protect a value that we're intending to
1915   /// store to the side, but which will probably be used later, from
1916   /// aggressive peepholing optimizations that might delete it.
1917   ///
1918   /// Pass the result to unprotectFromPeepholes to declare that
1919   /// protection is no longer required.
1920   ///
1921   /// There's no particular reason why this shouldn't apply to
1922   /// l-values, it's just that no existing peepholes work on pointers.
1923   PeepholeProtection protectFromPeepholes(RValue rvalue);
1924   void unprotectFromPeepholes(PeepholeProtection protection);
1925 
1926   //===--------------------------------------------------------------------===//
1927   //                             Statement Emission
1928   //===--------------------------------------------------------------------===//
1929 
1930   /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
1931   void EmitStopPoint(const Stmt *S);
1932 
1933   /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
1934   /// this function even if there is no current insertion point.
1935   ///
1936   /// This function may clear the current insertion point; callers should use
1937   /// EnsureInsertPoint if they wish to subsequently generate code without first
1938   /// calling EmitBlock, EmitBranch, or EmitStmt.
1939   void EmitStmt(const Stmt *S);
1940 
1941   /// EmitSimpleStmt - Try to emit a "simple" statement which does not
1942   /// necessarily require an insertion point or debug information; typically
1943   /// because the statement amounts to a jump or a container of other
1944   /// statements.
1945   ///
1946   /// \return True if the statement was handled.
1947   bool EmitSimpleStmt(const Stmt *S);
1948 
1949   RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
1950                           AggValueSlot AVS = AggValueSlot::ignored());
1951 
1952   /// EmitLabel - Emit the block for the given label. It is legal to call this
1953   /// function even if there is no current insertion point.
1954   void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
1955 
1956   void EmitLabelStmt(const LabelStmt &S);
1957   void EmitGotoStmt(const GotoStmt &S);
1958   void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
1959   void EmitIfStmt(const IfStmt &S);
1960   void EmitWhileStmt(const WhileStmt &S);
1961   void EmitDoStmt(const DoStmt &S);
1962   void EmitForStmt(const ForStmt &S);
1963   void EmitReturnStmt(const ReturnStmt &S);
1964   void EmitDeclStmt(const DeclStmt &S);
1965   void EmitBreakStmt(const BreakStmt &S);
1966   void EmitContinueStmt(const ContinueStmt &S);
1967   void EmitSwitchStmt(const SwitchStmt &S);
1968   void EmitDefaultStmt(const DefaultStmt &S);
1969   void EmitCaseStmt(const CaseStmt &S);
1970   void EmitCaseStmtRange(const CaseStmt &S);
1971   void EmitAsmStmt(const AsmStmt &S);
1972 
1973   void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
1974   void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
1975   void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
1976   void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
1977   void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
1978 
1979   llvm::Constant *getUnwindResumeFn();
1980   llvm::Constant *getUnwindResumeOrRethrowFn();
1981   void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1982   void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1983 
1984   void EmitCXXTryStmt(const CXXTryStmt &S);
1985   void EmitCXXForRangeStmt(const CXXForRangeStmt &S);
1986 
1987   //===--------------------------------------------------------------------===//
1988   //                         LValue Expression Emission
1989   //===--------------------------------------------------------------------===//
1990 
1991   /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
1992   RValue GetUndefRValue(QualType Ty);
1993 
1994   /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
1995   /// and issue an ErrorUnsupported style diagnostic (using the
1996   /// provided Name).
1997   RValue EmitUnsupportedRValue(const Expr *E,
1998                                const char *Name);
1999 
2000   /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
2001   /// an ErrorUnsupported style diagnostic (using the provided Name).
2002   LValue EmitUnsupportedLValue(const Expr *E,
2003                                const char *Name);
2004 
2005   /// EmitLValue - Emit code to compute a designator that specifies the location
2006   /// of the expression.
2007   ///
2008   /// This can return one of two things: a simple address or a bitfield
2009   /// reference.  In either case, the LLVM Value* in the LValue structure is
2010   /// guaranteed to be an LLVM pointer type.
2011   ///
2012   /// If this returns a bitfield reference, nothing about the pointee type of
2013   /// the LLVM value is known: For example, it may not be a pointer to an
2014   /// integer.
2015   ///
2016   /// If this returns a normal address, and if the lvalue's C type is fixed
2017   /// size, this method guarantees that the returned pointer type will point to
2018   /// an LLVM type of the same size of the lvalue's type.  If the lvalue has a
2019   /// variable length type, this is not possible.
2020   ///
2021   LValue EmitLValue(const Expr *E);
2022 
2023   /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
2024   /// checking code to guard against undefined behavior.  This is only
2025   /// suitable when we know that the address will be used to access the
2026   /// object.
2027   LValue EmitCheckedLValue(const Expr *E);
2028 
2029   /// EmitToMemory - Change a scalar value from its value
2030   /// representation to its in-memory representation.
2031   llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
2032 
2033   /// EmitFromMemory - Change a scalar value from its memory
2034   /// representation to its value representation.
2035   llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
2036 
2037   /// EmitLoadOfScalar - Load a scalar value from an address, taking
2038   /// care to appropriately convert from the memory representation to
2039   /// the LLVM value representation.
2040   llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
2041                                 unsigned Alignment, QualType Ty,
2042                                 llvm::MDNode *TBAAInfo = 0);
2043 
2044   /// EmitLoadOfScalar - Load a scalar value from an address, taking
2045   /// care to appropriately convert from the memory representation to
2046   /// the LLVM value representation.  The l-value must be a simple
2047   /// l-value.
2048   llvm::Value *EmitLoadOfScalar(LValue lvalue);
2049 
2050   /// EmitStoreOfScalar - Store a scalar value to an address, taking
2051   /// care to appropriately convert from the memory representation to
2052   /// the LLVM value representation.
2053   void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
2054                          bool Volatile, unsigned Alignment, QualType Ty,
2055                          llvm::MDNode *TBAAInfo = 0, bool isInit=false);
2056 
2057   /// EmitStoreOfScalar - Store a scalar value to an address, taking
2058   /// care to appropriately convert from the memory representation to
2059   /// the LLVM value representation.  The l-value must be a simple
2060   /// l-value.  The isInit flag indicates whether this is an initialization.
2061   /// If so, atomic qualifiers are ignored and the store is always non-atomic.
2062   void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
2063 
2064   /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
2065   /// this method emits the address of the lvalue, then loads the result as an
2066   /// rvalue, returning the rvalue.
2067   RValue EmitLoadOfLValue(LValue V);
2068   RValue EmitLoadOfExtVectorElementLValue(LValue V);
2069   RValue EmitLoadOfBitfieldLValue(LValue LV);
2070 
2071   /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2072   /// lvalue, where both are guaranteed to the have the same type, and that type
2073   /// is 'Ty'.
2074   void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
2075   void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
2076 
2077   /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
2078   /// EmitStoreThroughLValue.
2079   ///
2080   /// \param Result [out] - If non-null, this will be set to a Value* for the
2081   /// bit-field contents after the store, appropriate for use as the result of
2082   /// an assignment to the bit-field.
2083   void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2084                                       llvm::Value **Result=0);
2085 
2086   /// Emit an l-value for an assignment (simple or compound) of complex type.
2087   LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
2088   LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
2089 
2090   // Note: only available for agg return types
2091   LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
2092   LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
2093   // Note: only available for agg return types
2094   LValue EmitCallExprLValue(const CallExpr *E);
2095   // Note: only available for agg return types
2096   LValue EmitVAArgExprLValue(const VAArgExpr *E);
2097   LValue EmitDeclRefLValue(const DeclRefExpr *E);
2098   LValue EmitStringLiteralLValue(const StringLiteral *E);
2099   LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
2100   LValue EmitPredefinedLValue(const PredefinedExpr *E);
2101   LValue EmitUnaryOpLValue(const UnaryOperator *E);
2102   LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
2103   LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
2104   LValue EmitMemberExpr(const MemberExpr *E);
2105   LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
2106   LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
2107   LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
2108   LValue EmitCastLValue(const CastExpr *E);
2109   LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
2110   LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
2111   LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
2112 
2113   class ConstantEmission {
2114     llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
2115     ConstantEmission(llvm::Constant *C, bool isReference)
2116       : ValueAndIsReference(C, isReference) {}
2117   public:
2118     ConstantEmission() {}
2119     static ConstantEmission forReference(llvm::Constant *C) {
2120       return ConstantEmission(C, true);
2121     }
2122     static ConstantEmission forValue(llvm::Constant *C) {
2123       return ConstantEmission(C, false);
2124     }
2125 
2126     operator bool() const { return ValueAndIsReference.getOpaqueValue() != 0; }
2127 
2128     bool isReference() const { return ValueAndIsReference.getInt(); }
2129     LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
2130       assert(isReference());
2131       return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
2132                                             refExpr->getType());
2133     }
2134 
2135     llvm::Constant *getValue() const {
2136       assert(!isReference());
2137       return ValueAndIsReference.getPointer();
2138     }
2139   };
2140 
2141   ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
2142 
2143   RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
2144                                 AggValueSlot slot = AggValueSlot::ignored());
2145   LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
2146 
2147   llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2148                               const ObjCIvarDecl *Ivar);
2149   LValue EmitLValueForAnonRecordField(llvm::Value* Base,
2150                                       const IndirectFieldDecl* Field,
2151                                       unsigned CVRQualifiers);
2152   LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
2153                             unsigned CVRQualifiers);
2154 
2155   /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
2156   /// if the Field is a reference, this will return the address of the reference
2157   /// and not the address of the value stored in the reference.
2158   LValue EmitLValueForFieldInitialization(llvm::Value* Base,
2159                                           const FieldDecl* Field,
2160                                           unsigned CVRQualifiers);
2161 
2162   LValue EmitLValueForIvar(QualType ObjectTy,
2163                            llvm::Value* Base, const ObjCIvarDecl *Ivar,
2164                            unsigned CVRQualifiers);
2165 
2166   LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
2167                                 unsigned CVRQualifiers);
2168 
2169   LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
2170   LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
2171   LValue EmitLambdaLValue(const LambdaExpr *E);
2172   LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
2173 
2174   LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
2175   LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
2176   LValue EmitStmtExprLValue(const StmtExpr *E);
2177   LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
2178   LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
2179   void   EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
2180 
2181   //===--------------------------------------------------------------------===//
2182   //                         Scalar Expression Emission
2183   //===--------------------------------------------------------------------===//
2184 
2185   /// EmitCall - Generate a call of the given function, expecting the given
2186   /// result type, and using the given argument list which specifies both the
2187   /// LLVM arguments and the types they were derived from.
2188   ///
2189   /// \param TargetDecl - If given, the decl of the function in a direct call;
2190   /// used to set attributes on the call (noreturn, etc.).
2191   RValue EmitCall(const CGFunctionInfo &FnInfo,
2192                   llvm::Value *Callee,
2193                   ReturnValueSlot ReturnValue,
2194                   const CallArgList &Args,
2195                   const Decl *TargetDecl = 0,
2196                   llvm::Instruction **callOrInvoke = 0);
2197 
2198   RValue EmitCall(QualType FnType, llvm::Value *Callee,
2199                   ReturnValueSlot ReturnValue,
2200                   CallExpr::const_arg_iterator ArgBeg,
2201                   CallExpr::const_arg_iterator ArgEnd,
2202                   const Decl *TargetDecl = 0);
2203   RValue EmitCallExpr(const CallExpr *E,
2204                       ReturnValueSlot ReturnValue = ReturnValueSlot());
2205 
2206   llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2207                                   ArrayRef<llvm::Value *> Args,
2208                                   const Twine &Name = "");
2209   llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2210                                   const Twine &Name = "");
2211 
2212   llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
2213                                 llvm::Type *Ty);
2214   llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
2215                                 llvm::Value *This, llvm::Type *Ty);
2216   llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
2217                                          NestedNameSpecifier *Qual,
2218                                          llvm::Type *Ty);
2219 
2220   llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
2221                                                    CXXDtorType Type,
2222                                                    const CXXRecordDecl *RD);
2223 
2224   RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
2225                            llvm::Value *Callee,
2226                            ReturnValueSlot ReturnValue,
2227                            llvm::Value *This,
2228                            llvm::Value *VTT,
2229                            CallExpr::const_arg_iterator ArgBeg,
2230                            CallExpr::const_arg_iterator ArgEnd);
2231   RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
2232                                ReturnValueSlot ReturnValue);
2233   RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
2234                                       ReturnValueSlot ReturnValue);
2235 
2236   llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2237                                            const CXXMethodDecl *MD,
2238                                            llvm::Value *This);
2239   RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
2240                                        const CXXMethodDecl *MD,
2241                                        ReturnValueSlot ReturnValue);
2242 
2243   RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
2244                                 ReturnValueSlot ReturnValue);
2245 
2246 
2247   RValue EmitBuiltinExpr(const FunctionDecl *FD,
2248                          unsigned BuiltinID, const CallExpr *E);
2249 
2250   RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
2251 
2252   /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
2253   /// is unhandled by the current target.
2254   llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2255 
2256   llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2257   llvm::Value *EmitNeonCall(llvm::Function *F,
2258                             SmallVectorImpl<llvm::Value*> &O,
2259                             const char *name,
2260                             unsigned shift = 0, bool rightshift = false);
2261   llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
2262   llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
2263                                    bool negateForRightShift);
2264 
2265   llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
2266   llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2267   llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2268   llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2269 
2270   llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
2271   llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
2272   llvm::Value *EmitObjCNumericLiteral(const ObjCNumericLiteral *E);
2273   llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
2274   llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
2275   llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
2276                                 const ObjCMethodDecl *MethodWithObjects);
2277   llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
2278   RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
2279                              ReturnValueSlot Return = ReturnValueSlot());
2280 
2281   /// Retrieves the default cleanup kind for an ARC cleanup.
2282   /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
2283   CleanupKind getARCCleanupKind() {
2284     return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
2285              ? NormalAndEHCleanup : NormalCleanup;
2286   }
2287 
2288   // ARC primitives.
2289   void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
2290   void EmitARCDestroyWeak(llvm::Value *addr);
2291   llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
2292   llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
2293   llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
2294                                 bool ignored);
2295   void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
2296   void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
2297   llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
2298   llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
2299   llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
2300                                   bool ignored);
2301   llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
2302                                       bool ignored);
2303   llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
2304   llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
2305   llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
2306   void EmitARCRelease(llvm::Value *value, bool precise);
2307   llvm::Value *EmitARCAutorelease(llvm::Value *value);
2308   llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
2309   llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
2310   llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
2311 
2312   std::pair<LValue,llvm::Value*>
2313   EmitARCStoreAutoreleasing(const BinaryOperator *e);
2314   std::pair<LValue,llvm::Value*>
2315   EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
2316 
2317   llvm::Value *EmitObjCThrowOperand(const Expr *expr);
2318 
2319   llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
2320   llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
2321   llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
2322 
2323   llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
2324   llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
2325   llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
2326 
2327   static Destroyer destroyARCStrongImprecise;
2328   static Destroyer destroyARCStrongPrecise;
2329   static Destroyer destroyARCWeak;
2330 
2331   void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
2332   llvm::Value *EmitObjCAutoreleasePoolPush();
2333   llvm::Value *EmitObjCMRRAutoreleasePoolPush();
2334   void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
2335   void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
2336 
2337   /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
2338   /// expression. Will emit a temporary variable if E is not an LValue.
2339   RValue EmitReferenceBindingToExpr(const Expr* E,
2340                                     const NamedDecl *InitializedDecl);
2341 
2342   //===--------------------------------------------------------------------===//
2343   //                           Expression Emission
2344   //===--------------------------------------------------------------------===//
2345 
2346   // Expressions are broken into three classes: scalar, complex, aggregate.
2347 
2348   /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
2349   /// scalar type, returning the result.
2350   llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
2351 
2352   /// EmitScalarConversion - Emit a conversion from the specified type to the
2353   /// specified destination type, both of which are LLVM scalar types.
2354   llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
2355                                     QualType DstTy);
2356 
2357   /// EmitComplexToScalarConversion - Emit a conversion from the specified
2358   /// complex type to the specified destination type, where the destination type
2359   /// is an LLVM scalar type.
2360   llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
2361                                              QualType DstTy);
2362 
2363 
2364   /// EmitAggExpr - Emit the computation of the specified expression
2365   /// of aggregate type.  The result is computed into the given slot,
2366   /// which may be null to indicate that the value is not needed.
2367   void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
2368 
2369   /// EmitAggExprToLValue - Emit the computation of the specified expression of
2370   /// aggregate type into a temporary LValue.
2371   LValue EmitAggExprToLValue(const Expr *E);
2372 
2373   /// EmitGCMemmoveCollectable - Emit special API for structs with object
2374   /// pointers.
2375   void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
2376                                 QualType Ty);
2377 
2378   /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
2379   /// make sure it survives garbage collection until this point.
2380   void EmitExtendGCLifetime(llvm::Value *object);
2381 
2382   /// EmitComplexExpr - Emit the computation of the specified expression of
2383   /// complex type, returning the result.
2384   ComplexPairTy EmitComplexExpr(const Expr *E,
2385                                 bool IgnoreReal = false,
2386                                 bool IgnoreImag = false);
2387 
2388   /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
2389   /// of complex type, storing into the specified Value*.
2390   void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
2391                                bool DestIsVolatile);
2392 
2393   /// StoreComplexToAddr - Store a complex number into the specified address.
2394   void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
2395                           bool DestIsVolatile);
2396   /// LoadComplexFromAddr - Load a complex number from the specified address.
2397   ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
2398 
2399   /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
2400   /// a static local variable.
2401   llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
2402                                             const char *Separator,
2403                                        llvm::GlobalValue::LinkageTypes Linkage);
2404 
2405   /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
2406   /// global variable that has already been created for it.  If the initializer
2407   /// has a different type than GV does, this may free GV and return a different
2408   /// one.  Otherwise it just returns GV.
2409   llvm::GlobalVariable *
2410   AddInitializerToStaticVarDecl(const VarDecl &D,
2411                                 llvm::GlobalVariable *GV);
2412 
2413 
2414   /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
2415   /// variable with global storage.
2416   void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
2417                                 bool PerformInit);
2418 
2419   /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
2420   /// with the C++ runtime so that its destructor will be called at exit.
2421   void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
2422                                      llvm::Constant *DeclPtr);
2423 
2424   /// Emit code in this function to perform a guarded variable
2425   /// initialization.  Guarded initializations are used when it's not
2426   /// possible to prove that an initialization will be done exactly
2427   /// once, e.g. with a static local variable or a static data member
2428   /// of a class template.
2429   void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
2430                           bool PerformInit);
2431 
2432   /// GenerateCXXGlobalInitFunc - Generates code for initializing global
2433   /// variables.
2434   void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
2435                                  llvm::Constant **Decls,
2436                                  unsigned NumDecls);
2437 
2438   /// GenerateCXXGlobalDtorFunc - Generates code for destroying global
2439   /// variables.
2440   void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
2441                                  const std::vector<std::pair<llvm::WeakVH,
2442                                    llvm::Constant*> > &DtorsAndObjects);
2443 
2444   void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
2445                                         const VarDecl *D,
2446                                         llvm::GlobalVariable *Addr,
2447                                         bool PerformInit);
2448 
2449   void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
2450 
2451   void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
2452                                   const Expr *Exp);
2453 
2454   void enterFullExpression(const ExprWithCleanups *E) {
2455     if (E->getNumObjects() == 0) return;
2456     enterNonTrivialFullExpression(E);
2457   }
2458   void enterNonTrivialFullExpression(const ExprWithCleanups *E);
2459 
2460   void EmitCXXThrowExpr(const CXXThrowExpr *E);
2461 
2462   void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
2463 
2464   RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
2465 
2466   //===--------------------------------------------------------------------===//
2467   //                         Annotations Emission
2468   //===--------------------------------------------------------------------===//
2469 
2470   /// Emit an annotation call (intrinsic or builtin).
2471   llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
2472                                   llvm::Value *AnnotatedVal,
2473                                   llvm::StringRef AnnotationStr,
2474                                   SourceLocation Location);
2475 
2476   /// Emit local annotations for the local variable V, declared by D.
2477   void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
2478 
2479   /// Emit field annotations for the given field & value. Returns the
2480   /// annotation result.
2481   llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
2482 
2483   //===--------------------------------------------------------------------===//
2484   //                             Internal Helpers
2485   //===--------------------------------------------------------------------===//
2486 
2487   /// ContainsLabel - Return true if the statement contains a label in it.  If
2488   /// this statement is not executed normally, it not containing a label means
2489   /// that we can just remove the code.
2490   static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
2491 
2492   /// containsBreak - Return true if the statement contains a break out of it.
2493   /// If the statement (recursively) contains a switch or loop with a break
2494   /// inside of it, this is fine.
2495   static bool containsBreak(const Stmt *S);
2496 
2497   /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2498   /// to a constant, or if it does but contains a label, return false.  If it
2499   /// constant folds return true and set the boolean result in Result.
2500   bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
2501 
2502   /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2503   /// to a constant, or if it does but contains a label, return false.  If it
2504   /// constant folds return true and set the folded value.
2505   bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &Result);
2506 
2507   /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
2508   /// if statement) to the specified blocks.  Based on the condition, this might
2509   /// try to simplify the codegen of the conditional based on the branch.
2510   void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
2511                             llvm::BasicBlock *FalseBlock);
2512 
2513   /// getTrapBB - Create a basic block that will call the trap intrinsic.  We'll
2514   /// generate a branch around the created basic block as necessary.
2515   llvm::BasicBlock *getTrapBB();
2516 
2517   /// EmitCallArg - Emit a single call argument.
2518   void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
2519 
2520   /// EmitDelegateCallArg - We are performing a delegate call; that
2521   /// is, the current function is delegating to another one.  Produce
2522   /// a r-value suitable for passing the given parameter.
2523   void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
2524 
2525   /// SetFPAccuracy - Set the minimum required accuracy of the given floating
2526   /// point operation, expressed as the maximum relative error in ulp.
2527   void SetFPAccuracy(llvm::Value *Val, unsigned AccuracyN,
2528                      unsigned AccuracyD = 1);
2529 
2530 private:
2531   llvm::MDNode *getRangeForLoadFromType(QualType Ty);
2532   void EmitReturnOfRValue(RValue RV, QualType Ty);
2533 
2534   /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
2535   /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
2536   ///
2537   /// \param AI - The first function argument of the expansion.
2538   /// \return The argument following the last expanded function
2539   /// argument.
2540   llvm::Function::arg_iterator
2541   ExpandTypeFromArgs(QualType Ty, LValue Dst,
2542                      llvm::Function::arg_iterator AI);
2543 
2544   /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
2545   /// Ty, into individual arguments on the provided vector \arg Args. See
2546   /// ABIArgInfo::Expand.
2547   void ExpandTypeToArgs(QualType Ty, RValue Src,
2548                         SmallVector<llvm::Value*, 16> &Args,
2549                         llvm::FunctionType *IRFuncTy);
2550 
2551   llvm::Value* EmitAsmInput(const AsmStmt &S,
2552                             const TargetInfo::ConstraintInfo &Info,
2553                             const Expr *InputExpr, std::string &ConstraintStr);
2554 
2555   llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
2556                                   const TargetInfo::ConstraintInfo &Info,
2557                                   LValue InputValue, QualType InputType,
2558                                   std::string &ConstraintStr);
2559 
2560   /// EmitCallArgs - Emit call arguments for a function.
2561   /// The CallArgTypeInfo parameter is used for iterating over the known
2562   /// argument types of the function being called.
2563   template<typename T>
2564   void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
2565                     CallExpr::const_arg_iterator ArgBeg,
2566                     CallExpr::const_arg_iterator ArgEnd) {
2567       CallExpr::const_arg_iterator Arg = ArgBeg;
2568 
2569     // First, use the argument types that the type info knows about
2570     if (CallArgTypeInfo) {
2571       for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
2572            E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
2573         assert(Arg != ArgEnd && "Running over edge of argument list!");
2574         QualType ArgType = *I;
2575 #ifndef NDEBUG
2576         QualType ActualArgType = Arg->getType();
2577         if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
2578           QualType ActualBaseType =
2579             ActualArgType->getAs<PointerType>()->getPointeeType();
2580           QualType ArgBaseType =
2581             ArgType->getAs<PointerType>()->getPointeeType();
2582           if (ArgBaseType->isVariableArrayType()) {
2583             if (const VariableArrayType *VAT =
2584                 getContext().getAsVariableArrayType(ActualBaseType)) {
2585               if (!VAT->getSizeExpr())
2586                 ActualArgType = ArgType;
2587             }
2588           }
2589         }
2590         assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
2591                getTypePtr() ==
2592                getContext().getCanonicalType(ActualArgType).getTypePtr() &&
2593                "type mismatch in call argument!");
2594 #endif
2595         EmitCallArg(Args, *Arg, ArgType);
2596       }
2597 
2598       // Either we've emitted all the call args, or we have a call to a
2599       // variadic function.
2600       assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
2601              "Extra arguments in non-variadic function!");
2602 
2603     }
2604 
2605     // If we still have any arguments, emit them using the type of the argument.
2606     for (; Arg != ArgEnd; ++Arg)
2607       EmitCallArg(Args, *Arg, Arg->getType());
2608   }
2609 
2610   const TargetCodeGenInfo &getTargetHooks() const {
2611     return CGM.getTargetCodeGenInfo();
2612   }
2613 
2614   void EmitDeclMetadata();
2615 
2616   CodeGenModule::ByrefHelpers *
2617   buildByrefHelpers(llvm::StructType &byrefType,
2618                     const AutoVarEmission &emission);
2619 
2620   void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
2621 
2622   /// GetPointeeAlignment - Given an expression with a pointer type, find the
2623   /// alignment of the type referenced by the pointer.  Skip over implicit
2624   /// casts.
2625   unsigned GetPointeeAlignment(const Expr *Addr);
2626 
2627   /// GetPointeeAlignmentValue - Given an expression with a pointer type, find
2628   /// the alignment of the type referenced by the pointer.  Skip over implicit
2629   /// casts.  Return the alignment as an llvm::Value.
2630   llvm::Value *GetPointeeAlignmentValue(const Expr *Addr);
2631 };
2632 
2633 /// Helper class with most of the code for saving a value for a
2634 /// conditional expression cleanup.
2635 struct DominatingLLVMValue {
2636   typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
2637 
2638   /// Answer whether the given value needs extra work to be saved.
2639   static bool needsSaving(llvm::Value *value) {
2640     // If it's not an instruction, we don't need to save.
2641     if (!isa<llvm::Instruction>(value)) return false;
2642 
2643     // If it's an instruction in the entry block, we don't need to save.
2644     llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
2645     return (block != &block->getParent()->getEntryBlock());
2646   }
2647 
2648   /// Try to save the given value.
2649   static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
2650     if (!needsSaving(value)) return saved_type(value, false);
2651 
2652     // Otherwise we need an alloca.
2653     llvm::Value *alloca =
2654       CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
2655     CGF.Builder.CreateStore(value, alloca);
2656 
2657     return saved_type(alloca, true);
2658   }
2659 
2660   static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
2661     if (!value.getInt()) return value.getPointer();
2662     return CGF.Builder.CreateLoad(value.getPointer());
2663   }
2664 };
2665 
2666 /// A partial specialization of DominatingValue for llvm::Values that
2667 /// might be llvm::Instructions.
2668 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
2669   typedef T *type;
2670   static type restore(CodeGenFunction &CGF, saved_type value) {
2671     return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
2672   }
2673 };
2674 
2675 /// A specialization of DominatingValue for RValue.
2676 template <> struct DominatingValue<RValue> {
2677   typedef RValue type;
2678   class saved_type {
2679     enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
2680                 AggregateAddress, ComplexAddress };
2681 
2682     llvm::Value *Value;
2683     Kind K;
2684     saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
2685 
2686   public:
2687     static bool needsSaving(RValue value);
2688     static saved_type save(CodeGenFunction &CGF, RValue value);
2689     RValue restore(CodeGenFunction &CGF);
2690 
2691     // implementations in CGExprCXX.cpp
2692   };
2693 
2694   static bool needsSaving(type value) {
2695     return saved_type::needsSaving(value);
2696   }
2697   static saved_type save(CodeGenFunction &CGF, type value) {
2698     return saved_type::save(CGF, value);
2699   }
2700   static type restore(CodeGenFunction &CGF, saved_type value) {
2701     return value.restore(CGF);
2702   }
2703 };
2704 
2705 }  // end namespace CodeGen
2706 }  // end namespace clang
2707 
2708 #endif
2709