1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the internal per-function state used for llvm translation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
15 #define CLANG_CODEGEN_CODEGENFUNCTION_H
16 
17 #include "clang/AST/Type.h"
18 #include "clang/AST/ExprCXX.h"
19 #include "clang/AST/ExprObjC.h"
20 #include "clang/AST/CharUnits.h"
21 #include "clang/Frontend/CodeGenOptions.h"
22 #include "clang/Basic/ABI.h"
23 #include "clang/Basic/TargetInfo.h"
24 #include "llvm/ADT/ArrayRef.h"
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/Support/ValueHandle.h"
28 #include "llvm/Support/Debug.h"
29 #include "CodeGenModule.h"
30 #include "CGBuilder.h"
31 #include "CGDebugInfo.h"
32 #include "CGValue.h"
33 
34 namespace llvm {
35   class BasicBlock;
36   class LLVMContext;
37   class MDNode;
38   class Module;
39   class SwitchInst;
40   class Twine;
41   class Value;
42   class CallSite;
43 }
44 
45 namespace clang {
46   class ASTContext;
47   class BlockDecl;
48   class CXXDestructorDecl;
49   class CXXForRangeStmt;
50   class CXXTryStmt;
51   class Decl;
52   class LabelDecl;
53   class EnumConstantDecl;
54   class FunctionDecl;
55   class FunctionProtoType;
56   class LabelStmt;
57   class ObjCContainerDecl;
58   class ObjCInterfaceDecl;
59   class ObjCIvarDecl;
60   class ObjCMethodDecl;
61   class ObjCImplementationDecl;
62   class ObjCPropertyImplDecl;
63   class TargetInfo;
64   class TargetCodeGenInfo;
65   class VarDecl;
66   class ObjCForCollectionStmt;
67   class ObjCAtTryStmt;
68   class ObjCAtThrowStmt;
69   class ObjCAtSynchronizedStmt;
70   class ObjCAutoreleasePoolStmt;
71 
72 namespace CodeGen {
73   class CodeGenTypes;
74   class CGFunctionInfo;
75   class CGRecordLayout;
76   class CGBlockInfo;
77   class CGCXXABI;
78   class BlockFlags;
79   class BlockFieldFlags;
80 
81 /// A branch fixup.  These are required when emitting a goto to a
82 /// label which hasn't been emitted yet.  The goto is optimistically
83 /// emitted as a branch to the basic block for the label, and (if it
84 /// occurs in a scope with non-trivial cleanups) a fixup is added to
85 /// the innermost cleanup.  When a (normal) cleanup is popped, any
86 /// unresolved fixups in that scope are threaded through the cleanup.
87 struct BranchFixup {
88   /// The block containing the terminator which needs to be modified
89   /// into a switch if this fixup is resolved into the current scope.
90   /// If null, LatestBranch points directly to the destination.
91   llvm::BasicBlock *OptimisticBranchBlock;
92 
93   /// The ultimate destination of the branch.
94   ///
95   /// This can be set to null to indicate that this fixup was
96   /// successfully resolved.
97   llvm::BasicBlock *Destination;
98 
99   /// The destination index value.
100   unsigned DestinationIndex;
101 
102   /// The initial branch of the fixup.
103   llvm::BranchInst *InitialBranch;
104 };
105 
106 template <class T> struct InvariantValue {
107   typedef T type;
108   typedef T saved_type;
109   static bool needsSaving(type value) { return false; }
110   static saved_type save(CodeGenFunction &CGF, type value) { return value; }
111   static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
112 };
113 
114 /// A metaprogramming class for ensuring that a value will dominate an
115 /// arbitrary position in a function.
116 template <class T> struct DominatingValue : InvariantValue<T> {};
117 
118 template <class T, bool mightBeInstruction =
119             llvm::is_base_of<llvm::Value, T>::value &&
120             !llvm::is_base_of<llvm::Constant, T>::value &&
121             !llvm::is_base_of<llvm::BasicBlock, T>::value>
122 struct DominatingPointer;
123 template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
124 // template <class T> struct DominatingPointer<T,true> at end of file
125 
126 template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
127 
128 enum CleanupKind {
129   EHCleanup = 0x1,
130   NormalCleanup = 0x2,
131   NormalAndEHCleanup = EHCleanup | NormalCleanup,
132 
133   InactiveCleanup = 0x4,
134   InactiveEHCleanup = EHCleanup | InactiveCleanup,
135   InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
136   InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
137 };
138 
139 /// A stack of scopes which respond to exceptions, including cleanups
140 /// and catch blocks.
141 class EHScopeStack {
142 public:
143   /// A saved depth on the scope stack.  This is necessary because
144   /// pushing scopes onto the stack invalidates iterators.
145   class stable_iterator {
146     friend class EHScopeStack;
147 
148     /// Offset from StartOfData to EndOfBuffer.
149     ptrdiff_t Size;
150 
151     stable_iterator(ptrdiff_t Size) : Size(Size) {}
152 
153   public:
154     static stable_iterator invalid() { return stable_iterator(-1); }
155     stable_iterator() : Size(-1) {}
156 
157     bool isValid() const { return Size >= 0; }
158 
159     /// Returns true if this scope encloses I.
160     /// Returns false if I is invalid.
161     /// This scope must be valid.
162     bool encloses(stable_iterator I) const { return Size <= I.Size; }
163 
164     /// Returns true if this scope strictly encloses I: that is,
165     /// if it encloses I and is not I.
166     /// Returns false is I is invalid.
167     /// This scope must be valid.
168     bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
169 
170     friend bool operator==(stable_iterator A, stable_iterator B) {
171       return A.Size == B.Size;
172     }
173     friend bool operator!=(stable_iterator A, stable_iterator B) {
174       return A.Size != B.Size;
175     }
176   };
177 
178   /// Information for lazily generating a cleanup.  Subclasses must be
179   /// POD-like: cleanups will not be destructed, and they will be
180   /// allocated on the cleanup stack and freely copied and moved
181   /// around.
182   ///
183   /// Cleanup implementations should generally be declared in an
184   /// anonymous namespace.
185   class Cleanup {
186     // Anchor the construction vtable.
187     virtual void anchor();
188   public:
189     /// Generation flags.
190     class Flags {
191       enum {
192         F_IsForEH             = 0x1,
193         F_IsNormalCleanupKind = 0x2,
194         F_IsEHCleanupKind     = 0x4
195       };
196       unsigned flags;
197 
198     public:
199       Flags() : flags(0) {}
200 
201       /// isForEH - true if the current emission is for an EH cleanup.
202       bool isForEHCleanup() const { return flags & F_IsForEH; }
203       bool isForNormalCleanup() const { return !isForEHCleanup(); }
204       void setIsForEHCleanup() { flags |= F_IsForEH; }
205 
206       bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
207       void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
208 
209       /// isEHCleanupKind - true if the cleanup was pushed as an EH
210       /// cleanup.
211       bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
212       void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
213     };
214 
215     // Provide a virtual destructor to suppress a very common warning
216     // that unfortunately cannot be suppressed without this.  Cleanups
217     // should not rely on this destructor ever being called.
218     virtual ~Cleanup() {}
219 
220     /// Emit the cleanup.  For normal cleanups, this is run in the
221     /// same EH context as when the cleanup was pushed, i.e. the
222     /// immediately-enclosing context of the cleanup scope.  For
223     /// EH cleanups, this is run in a terminate context.
224     ///
225     // \param IsForEHCleanup true if this is for an EH cleanup, false
226     ///  if for a normal cleanup.
227     virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
228   };
229 
230   /// ConditionalCleanupN stores the saved form of its N parameters,
231   /// then restores them and performs the cleanup.
232   template <class T, class A0>
233   class ConditionalCleanup1 : public Cleanup {
234     typedef typename DominatingValue<A0>::saved_type A0_saved;
235     A0_saved a0_saved;
236 
237     void Emit(CodeGenFunction &CGF, Flags flags) {
238       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
239       T(a0).Emit(CGF, flags);
240     }
241 
242   public:
243     ConditionalCleanup1(A0_saved a0)
244       : a0_saved(a0) {}
245   };
246 
247   template <class T, class A0, class A1>
248   class ConditionalCleanup2 : public Cleanup {
249     typedef typename DominatingValue<A0>::saved_type A0_saved;
250     typedef typename DominatingValue<A1>::saved_type A1_saved;
251     A0_saved a0_saved;
252     A1_saved a1_saved;
253 
254     void Emit(CodeGenFunction &CGF, Flags flags) {
255       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
256       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
257       T(a0, a1).Emit(CGF, flags);
258     }
259 
260   public:
261     ConditionalCleanup2(A0_saved a0, A1_saved a1)
262       : a0_saved(a0), a1_saved(a1) {}
263   };
264 
265   template <class T, class A0, class A1, class A2>
266   class ConditionalCleanup3 : public Cleanup {
267     typedef typename DominatingValue<A0>::saved_type A0_saved;
268     typedef typename DominatingValue<A1>::saved_type A1_saved;
269     typedef typename DominatingValue<A2>::saved_type A2_saved;
270     A0_saved a0_saved;
271     A1_saved a1_saved;
272     A2_saved a2_saved;
273 
274     void Emit(CodeGenFunction &CGF, Flags flags) {
275       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
276       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
277       A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
278       T(a0, a1, a2).Emit(CGF, flags);
279     }
280 
281   public:
282     ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
283       : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
284   };
285 
286   template <class T, class A0, class A1, class A2, class A3>
287   class ConditionalCleanup4 : public Cleanup {
288     typedef typename DominatingValue<A0>::saved_type A0_saved;
289     typedef typename DominatingValue<A1>::saved_type A1_saved;
290     typedef typename DominatingValue<A2>::saved_type A2_saved;
291     typedef typename DominatingValue<A3>::saved_type A3_saved;
292     A0_saved a0_saved;
293     A1_saved a1_saved;
294     A2_saved a2_saved;
295     A3_saved a3_saved;
296 
297     void Emit(CodeGenFunction &CGF, Flags flags) {
298       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
299       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
300       A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
301       A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
302       T(a0, a1, a2, a3).Emit(CGF, flags);
303     }
304 
305   public:
306     ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
307       : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
308   };
309 
310 private:
311   // The implementation for this class is in CGException.h and
312   // CGException.cpp; the definition is here because it's used as a
313   // member of CodeGenFunction.
314 
315   /// The start of the scope-stack buffer, i.e. the allocated pointer
316   /// for the buffer.  All of these pointers are either simultaneously
317   /// null or simultaneously valid.
318   char *StartOfBuffer;
319 
320   /// The end of the buffer.
321   char *EndOfBuffer;
322 
323   /// The first valid entry in the buffer.
324   char *StartOfData;
325 
326   /// The innermost normal cleanup on the stack.
327   stable_iterator InnermostNormalCleanup;
328 
329   /// The innermost EH scope on the stack.
330   stable_iterator InnermostEHScope;
331 
332   /// The current set of branch fixups.  A branch fixup is a jump to
333   /// an as-yet unemitted label, i.e. a label for which we don't yet
334   /// know the EH stack depth.  Whenever we pop a cleanup, we have
335   /// to thread all the current branch fixups through it.
336   ///
337   /// Fixups are recorded as the Use of the respective branch or
338   /// switch statement.  The use points to the final destination.
339   /// When popping out of a cleanup, these uses are threaded through
340   /// the cleanup and adjusted to point to the new cleanup.
341   ///
342   /// Note that branches are allowed to jump into protected scopes
343   /// in certain situations;  e.g. the following code is legal:
344   ///     struct A { ~A(); }; // trivial ctor, non-trivial dtor
345   ///     goto foo;
346   ///     A a;
347   ///    foo:
348   ///     bar();
349   SmallVector<BranchFixup, 8> BranchFixups;
350 
351   char *allocate(size_t Size);
352 
353   void *pushCleanup(CleanupKind K, size_t DataSize);
354 
355 public:
356   EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
357                    InnermostNormalCleanup(stable_end()),
358                    InnermostEHScope(stable_end()) {}
359   ~EHScopeStack() { delete[] StartOfBuffer; }
360 
361   // Variadic templates would make this not terrible.
362 
363   /// Push a lazily-created cleanup on the stack.
364   template <class T>
365   void pushCleanup(CleanupKind Kind) {
366     void *Buffer = pushCleanup(Kind, sizeof(T));
367     Cleanup *Obj = new(Buffer) T();
368     (void) Obj;
369   }
370 
371   /// Push a lazily-created cleanup on the stack.
372   template <class T, class A0>
373   void pushCleanup(CleanupKind Kind, A0 a0) {
374     void *Buffer = pushCleanup(Kind, sizeof(T));
375     Cleanup *Obj = new(Buffer) T(a0);
376     (void) Obj;
377   }
378 
379   /// Push a lazily-created cleanup on the stack.
380   template <class T, class A0, class A1>
381   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
382     void *Buffer = pushCleanup(Kind, sizeof(T));
383     Cleanup *Obj = new(Buffer) T(a0, a1);
384     (void) Obj;
385   }
386 
387   /// Push a lazily-created cleanup on the stack.
388   template <class T, class A0, class A1, class A2>
389   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
390     void *Buffer = pushCleanup(Kind, sizeof(T));
391     Cleanup *Obj = new(Buffer) T(a0, a1, a2);
392     (void) Obj;
393   }
394 
395   /// Push a lazily-created cleanup on the stack.
396   template <class T, class A0, class A1, class A2, class A3>
397   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
398     void *Buffer = pushCleanup(Kind, sizeof(T));
399     Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
400     (void) Obj;
401   }
402 
403   /// Push a lazily-created cleanup on the stack.
404   template <class T, class A0, class A1, class A2, class A3, class A4>
405   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
406     void *Buffer = pushCleanup(Kind, sizeof(T));
407     Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
408     (void) Obj;
409   }
410 
411   // Feel free to add more variants of the following:
412 
413   /// Push a cleanup with non-constant storage requirements on the
414   /// stack.  The cleanup type must provide an additional static method:
415   ///   static size_t getExtraSize(size_t);
416   /// The argument to this method will be the value N, which will also
417   /// be passed as the first argument to the constructor.
418   ///
419   /// The data stored in the extra storage must obey the same
420   /// restrictions as normal cleanup member data.
421   ///
422   /// The pointer returned from this method is valid until the cleanup
423   /// stack is modified.
424   template <class T, class A0, class A1, class A2>
425   T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
426     void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
427     return new (Buffer) T(N, a0, a1, a2);
428   }
429 
430   /// Pops a cleanup scope off the stack.  This is private to CGCleanup.cpp.
431   void popCleanup();
432 
433   /// Push a set of catch handlers on the stack.  The catch is
434   /// uninitialized and will need to have the given number of handlers
435   /// set on it.
436   class EHCatchScope *pushCatch(unsigned NumHandlers);
437 
438   /// Pops a catch scope off the stack.  This is private to CGException.cpp.
439   void popCatch();
440 
441   /// Push an exceptions filter on the stack.
442   class EHFilterScope *pushFilter(unsigned NumFilters);
443 
444   /// Pops an exceptions filter off the stack.
445   void popFilter();
446 
447   /// Push a terminate handler on the stack.
448   void pushTerminate();
449 
450   /// Pops a terminate handler off the stack.
451   void popTerminate();
452 
453   /// Determines whether the exception-scopes stack is empty.
454   bool empty() const { return StartOfData == EndOfBuffer; }
455 
456   bool requiresLandingPad() const {
457     return InnermostEHScope != stable_end();
458   }
459 
460   /// Determines whether there are any normal cleanups on the stack.
461   bool hasNormalCleanups() const {
462     return InnermostNormalCleanup != stable_end();
463   }
464 
465   /// Returns the innermost normal cleanup on the stack, or
466   /// stable_end() if there are no normal cleanups.
467   stable_iterator getInnermostNormalCleanup() const {
468     return InnermostNormalCleanup;
469   }
470   stable_iterator getInnermostActiveNormalCleanup() const;
471 
472   stable_iterator getInnermostEHScope() const {
473     return InnermostEHScope;
474   }
475 
476   stable_iterator getInnermostActiveEHScope() const;
477 
478   /// An unstable reference to a scope-stack depth.  Invalidated by
479   /// pushes but not pops.
480   class iterator;
481 
482   /// Returns an iterator pointing to the innermost EH scope.
483   iterator begin() const;
484 
485   /// Returns an iterator pointing to the outermost EH scope.
486   iterator end() const;
487 
488   /// Create a stable reference to the top of the EH stack.  The
489   /// returned reference is valid until that scope is popped off the
490   /// stack.
491   stable_iterator stable_begin() const {
492     return stable_iterator(EndOfBuffer - StartOfData);
493   }
494 
495   /// Create a stable reference to the bottom of the EH stack.
496   static stable_iterator stable_end() {
497     return stable_iterator(0);
498   }
499 
500   /// Translates an iterator into a stable_iterator.
501   stable_iterator stabilize(iterator it) const;
502 
503   /// Turn a stable reference to a scope depth into a unstable pointer
504   /// to the EH stack.
505   iterator find(stable_iterator save) const;
506 
507   /// Removes the cleanup pointed to by the given stable_iterator.
508   void removeCleanup(stable_iterator save);
509 
510   /// Add a branch fixup to the current cleanup scope.
511   BranchFixup &addBranchFixup() {
512     assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
513     BranchFixups.push_back(BranchFixup());
514     return BranchFixups.back();
515   }
516 
517   unsigned getNumBranchFixups() const { return BranchFixups.size(); }
518   BranchFixup &getBranchFixup(unsigned I) {
519     assert(I < getNumBranchFixups());
520     return BranchFixups[I];
521   }
522 
523   /// Pops lazily-removed fixups from the end of the list.  This
524   /// should only be called by procedures which have just popped a
525   /// cleanup or resolved one or more fixups.
526   void popNullFixups();
527 
528   /// Clears the branch-fixups list.  This should only be called by
529   /// ResolveAllBranchFixups.
530   void clearFixups() { BranchFixups.clear(); }
531 };
532 
533 /// CodeGenFunction - This class organizes the per-function state that is used
534 /// while generating LLVM code.
535 class CodeGenFunction : public CodeGenTypeCache {
536   CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
537   void operator=(const CodeGenFunction&);  // DO NOT IMPLEMENT
538 
539   friend class CGCXXABI;
540 public:
541   /// A jump destination is an abstract label, branching to which may
542   /// require a jump out through normal cleanups.
543   struct JumpDest {
544     JumpDest() : Block(0), ScopeDepth(), Index(0) {}
545     JumpDest(llvm::BasicBlock *Block,
546              EHScopeStack::stable_iterator Depth,
547              unsigned Index)
548       : Block(Block), ScopeDepth(Depth), Index(Index) {}
549 
550     bool isValid() const { return Block != 0; }
551     llvm::BasicBlock *getBlock() const { return Block; }
552     EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
553     unsigned getDestIndex() const { return Index; }
554 
555   private:
556     llvm::BasicBlock *Block;
557     EHScopeStack::stable_iterator ScopeDepth;
558     unsigned Index;
559   };
560 
561   CodeGenModule &CGM;  // Per-module state.
562   const TargetInfo &Target;
563 
564   typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
565   CGBuilderTy Builder;
566 
567   /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
568   /// This excludes BlockDecls.
569   const Decl *CurFuncDecl;
570   /// CurCodeDecl - This is the inner-most code context, which includes blocks.
571   const Decl *CurCodeDecl;
572   const CGFunctionInfo *CurFnInfo;
573   QualType FnRetTy;
574   llvm::Function *CurFn;
575 
576   /// CurGD - The GlobalDecl for the current function being compiled.
577   GlobalDecl CurGD;
578 
579   /// PrologueCleanupDepth - The cleanup depth enclosing all the
580   /// cleanups associated with the parameters.
581   EHScopeStack::stable_iterator PrologueCleanupDepth;
582 
583   /// ReturnBlock - Unified return block.
584   JumpDest ReturnBlock;
585 
586   /// ReturnValue - The temporary alloca to hold the return value. This is null
587   /// iff the function has no return value.
588   llvm::Value *ReturnValue;
589 
590   /// AllocaInsertPoint - This is an instruction in the entry block before which
591   /// we prefer to insert allocas.
592   llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
593 
594   bool CatchUndefined;
595 
596   /// In ARC, whether we should autorelease the return value.
597   bool AutoreleaseResult;
598 
599   const CodeGen::CGBlockInfo *BlockInfo;
600   llvm::Value *BlockPointer;
601 
602   llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
603   FieldDecl *LambdaThisCaptureField;
604 
605   /// \brief A mapping from NRVO variables to the flags used to indicate
606   /// when the NRVO has been applied to this variable.
607   llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
608 
609   EHScopeStack EHStack;
610 
611   /// i32s containing the indexes of the cleanup destinations.
612   llvm::AllocaInst *NormalCleanupDest;
613 
614   unsigned NextCleanupDestIndex;
615 
616   /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
617   CGBlockInfo *FirstBlockInfo;
618 
619   /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
620   llvm::BasicBlock *EHResumeBlock;
621 
622   /// The exception slot.  All landing pads write the current exception pointer
623   /// into this alloca.
624   llvm::Value *ExceptionSlot;
625 
626   /// The selector slot.  Under the MandatoryCleanup model, all landing pads
627   /// write the current selector value into this alloca.
628   llvm::AllocaInst *EHSelectorSlot;
629 
630   /// Emits a landing pad for the current EH stack.
631   llvm::BasicBlock *EmitLandingPad();
632 
633   llvm::BasicBlock *getInvokeDestImpl();
634 
635   template <class T>
636   typename DominatingValue<T>::saved_type saveValueInCond(T value) {
637     return DominatingValue<T>::save(*this, value);
638   }
639 
640 public:
641   /// ObjCEHValueStack - Stack of Objective-C exception values, used for
642   /// rethrows.
643   SmallVector<llvm::Value*, 8> ObjCEHValueStack;
644 
645   /// A class controlling the emission of a finally block.
646   class FinallyInfo {
647     /// Where the catchall's edge through the cleanup should go.
648     JumpDest RethrowDest;
649 
650     /// A function to call to enter the catch.
651     llvm::Constant *BeginCatchFn;
652 
653     /// An i1 variable indicating whether or not the @finally is
654     /// running for an exception.
655     llvm::AllocaInst *ForEHVar;
656 
657     /// An i8* variable into which the exception pointer to rethrow
658     /// has been saved.
659     llvm::AllocaInst *SavedExnVar;
660 
661   public:
662     void enter(CodeGenFunction &CGF, const Stmt *Finally,
663                llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
664                llvm::Constant *rethrowFn);
665     void exit(CodeGenFunction &CGF);
666   };
667 
668   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
669   /// current full-expression.  Safe against the possibility that
670   /// we're currently inside a conditionally-evaluated expression.
671   template <class T, class A0>
672   void pushFullExprCleanup(CleanupKind kind, A0 a0) {
673     // If we're not in a conditional branch, or if none of the
674     // arguments requires saving, then use the unconditional cleanup.
675     if (!isInConditionalBranch())
676       return EHStack.pushCleanup<T>(kind, a0);
677 
678     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
679 
680     typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
681     EHStack.pushCleanup<CleanupType>(kind, a0_saved);
682     initFullExprCleanup();
683   }
684 
685   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
686   /// current full-expression.  Safe against the possibility that
687   /// we're currently inside a conditionally-evaluated expression.
688   template <class T, class A0, class A1>
689   void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
690     // If we're not in a conditional branch, or if none of the
691     // arguments requires saving, then use the unconditional cleanup.
692     if (!isInConditionalBranch())
693       return EHStack.pushCleanup<T>(kind, a0, a1);
694 
695     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
696     typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
697 
698     typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
699     EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
700     initFullExprCleanup();
701   }
702 
703   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
704   /// current full-expression.  Safe against the possibility that
705   /// we're currently inside a conditionally-evaluated expression.
706   template <class T, class A0, class A1, class A2>
707   void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
708     // If we're not in a conditional branch, or if none of the
709     // arguments requires saving, then use the unconditional cleanup.
710     if (!isInConditionalBranch()) {
711       return EHStack.pushCleanup<T>(kind, a0, a1, a2);
712     }
713 
714     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
715     typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
716     typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
717 
718     typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
719     EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
720     initFullExprCleanup();
721   }
722 
723   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
724   /// current full-expression.  Safe against the possibility that
725   /// we're currently inside a conditionally-evaluated expression.
726   template <class T, class A0, class A1, class A2, class A3>
727   void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
728     // If we're not in a conditional branch, or if none of the
729     // arguments requires saving, then use the unconditional cleanup.
730     if (!isInConditionalBranch()) {
731       return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
732     }
733 
734     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
735     typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
736     typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
737     typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
738 
739     typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
740     EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
741                                      a2_saved, a3_saved);
742     initFullExprCleanup();
743   }
744 
745   /// Set up the last cleaup that was pushed as a conditional
746   /// full-expression cleanup.
747   void initFullExprCleanup();
748 
749   /// PushDestructorCleanup - Push a cleanup to call the
750   /// complete-object destructor of an object of the given type at the
751   /// given address.  Does nothing if T is not a C++ class type with a
752   /// non-trivial destructor.
753   void PushDestructorCleanup(QualType T, llvm::Value *Addr);
754 
755   /// PushDestructorCleanup - Push a cleanup to call the
756   /// complete-object variant of the given destructor on the object at
757   /// the given address.
758   void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
759                              llvm::Value *Addr);
760 
761   /// PopCleanupBlock - Will pop the cleanup entry on the stack and
762   /// process all branch fixups.
763   void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
764 
765   /// DeactivateCleanupBlock - Deactivates the given cleanup block.
766   /// The block cannot be reactivated.  Pops it if it's the top of the
767   /// stack.
768   ///
769   /// \param DominatingIP - An instruction which is known to
770   ///   dominate the current IP (if set) and which lies along
771   ///   all paths of execution between the current IP and the
772   ///   the point at which the cleanup comes into scope.
773   void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
774                               llvm::Instruction *DominatingIP);
775 
776   /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
777   /// Cannot be used to resurrect a deactivated cleanup.
778   ///
779   /// \param DominatingIP - An instruction which is known to
780   ///   dominate the current IP (if set) and which lies along
781   ///   all paths of execution between the current IP and the
782   ///   the point at which the cleanup comes into scope.
783   void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
784                             llvm::Instruction *DominatingIP);
785 
786   /// \brief Enters a new scope for capturing cleanups, all of which
787   /// will be executed once the scope is exited.
788   class RunCleanupsScope {
789     EHScopeStack::stable_iterator CleanupStackDepth;
790     bool OldDidCallStackSave;
791     bool PerformCleanup;
792 
793     RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
794     RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
795 
796   protected:
797     CodeGenFunction& CGF;
798 
799   public:
800     /// \brief Enter a new cleanup scope.
801     explicit RunCleanupsScope(CodeGenFunction &CGF)
802       : PerformCleanup(true), CGF(CGF)
803     {
804       CleanupStackDepth = CGF.EHStack.stable_begin();
805       OldDidCallStackSave = CGF.DidCallStackSave;
806       CGF.DidCallStackSave = false;
807     }
808 
809     /// \brief Exit this cleanup scope, emitting any accumulated
810     /// cleanups.
811     ~RunCleanupsScope() {
812       if (PerformCleanup) {
813         CGF.DidCallStackSave = OldDidCallStackSave;
814         CGF.PopCleanupBlocks(CleanupStackDepth);
815       }
816     }
817 
818     /// \brief Determine whether this scope requires any cleanups.
819     bool requiresCleanups() const {
820       return CGF.EHStack.stable_begin() != CleanupStackDepth;
821     }
822 
823     /// \brief Force the emission of cleanups now, instead of waiting
824     /// until this object is destroyed.
825     void ForceCleanup() {
826       assert(PerformCleanup && "Already forced cleanup");
827       CGF.DidCallStackSave = OldDidCallStackSave;
828       CGF.PopCleanupBlocks(CleanupStackDepth);
829       PerformCleanup = false;
830     }
831   };
832 
833   class LexicalScope: protected RunCleanupsScope {
834     SourceRange Range;
835     bool PopDebugStack;
836 
837     LexicalScope(const LexicalScope &); // DO NOT IMPLEMENT THESE
838     LexicalScope &operator=(const LexicalScope &);
839 
840   public:
841     /// \brief Enter a new cleanup scope.
842     explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
843       : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
844       if (CGDebugInfo *DI = CGF.getDebugInfo())
845         DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
846     }
847 
848     /// \brief Exit this cleanup scope, emitting any accumulated
849     /// cleanups.
850     ~LexicalScope() {
851       if (PopDebugStack) {
852         CGDebugInfo *DI = CGF.getDebugInfo();
853         if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
854       }
855     }
856 
857     /// \brief Force the emission of cleanups now, instead of waiting
858     /// until this object is destroyed.
859     void ForceCleanup() {
860       RunCleanupsScope::ForceCleanup();
861       if (CGDebugInfo *DI = CGF.getDebugInfo()) {
862         DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
863         PopDebugStack = false;
864       }
865     }
866   };
867 
868 
869   /// PopCleanupBlocks - Takes the old cleanup stack size and emits
870   /// the cleanup blocks that have been added.
871   void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
872 
873   void ResolveBranchFixups(llvm::BasicBlock *Target);
874 
875   /// The given basic block lies in the current EH scope, but may be a
876   /// target of a potentially scope-crossing jump; get a stable handle
877   /// to which we can perform this jump later.
878   JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
879     return JumpDest(Target,
880                     EHStack.getInnermostNormalCleanup(),
881                     NextCleanupDestIndex++);
882   }
883 
884   /// The given basic block lies in the current EH scope, but may be a
885   /// target of a potentially scope-crossing jump; get a stable handle
886   /// to which we can perform this jump later.
887   JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
888     return getJumpDestInCurrentScope(createBasicBlock(Name));
889   }
890 
891   /// EmitBranchThroughCleanup - Emit a branch from the current insert
892   /// block through the normal cleanup handling code (if any) and then
893   /// on to \arg Dest.
894   void EmitBranchThroughCleanup(JumpDest Dest);
895 
896   /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
897   /// specified destination obviously has no cleanups to run.  'false' is always
898   /// a conservatively correct answer for this method.
899   bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
900 
901   /// popCatchScope - Pops the catch scope at the top of the EHScope
902   /// stack, emitting any required code (other than the catch handlers
903   /// themselves).
904   void popCatchScope();
905 
906   llvm::BasicBlock *getEHResumeBlock();
907   llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
908 
909   /// An object to manage conditionally-evaluated expressions.
910   class ConditionalEvaluation {
911     llvm::BasicBlock *StartBB;
912 
913   public:
914     ConditionalEvaluation(CodeGenFunction &CGF)
915       : StartBB(CGF.Builder.GetInsertBlock()) {}
916 
917     void begin(CodeGenFunction &CGF) {
918       assert(CGF.OutermostConditional != this);
919       if (!CGF.OutermostConditional)
920         CGF.OutermostConditional = this;
921     }
922 
923     void end(CodeGenFunction &CGF) {
924       assert(CGF.OutermostConditional != 0);
925       if (CGF.OutermostConditional == this)
926         CGF.OutermostConditional = 0;
927     }
928 
929     /// Returns a block which will be executed prior to each
930     /// evaluation of the conditional code.
931     llvm::BasicBlock *getStartingBlock() const {
932       return StartBB;
933     }
934   };
935 
936   /// isInConditionalBranch - Return true if we're currently emitting
937   /// one branch or the other of a conditional expression.
938   bool isInConditionalBranch() const { return OutermostConditional != 0; }
939 
940   void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
941     assert(isInConditionalBranch());
942     llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
943     new llvm::StoreInst(value, addr, &block->back());
944   }
945 
946   /// An RAII object to record that we're evaluating a statement
947   /// expression.
948   class StmtExprEvaluation {
949     CodeGenFunction &CGF;
950 
951     /// We have to save the outermost conditional: cleanups in a
952     /// statement expression aren't conditional just because the
953     /// StmtExpr is.
954     ConditionalEvaluation *SavedOutermostConditional;
955 
956   public:
957     StmtExprEvaluation(CodeGenFunction &CGF)
958       : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
959       CGF.OutermostConditional = 0;
960     }
961 
962     ~StmtExprEvaluation() {
963       CGF.OutermostConditional = SavedOutermostConditional;
964       CGF.EnsureInsertPoint();
965     }
966   };
967 
968   /// An object which temporarily prevents a value from being
969   /// destroyed by aggressive peephole optimizations that assume that
970   /// all uses of a value have been realized in the IR.
971   class PeepholeProtection {
972     llvm::Instruction *Inst;
973     friend class CodeGenFunction;
974 
975   public:
976     PeepholeProtection() : Inst(0) {}
977   };
978 
979   /// A non-RAII class containing all the information about a bound
980   /// opaque value.  OpaqueValueMapping, below, is a RAII wrapper for
981   /// this which makes individual mappings very simple; using this
982   /// class directly is useful when you have a variable number of
983   /// opaque values or don't want the RAII functionality for some
984   /// reason.
985   class OpaqueValueMappingData {
986     const OpaqueValueExpr *OpaqueValue;
987     bool BoundLValue;
988     CodeGenFunction::PeepholeProtection Protection;
989 
990     OpaqueValueMappingData(const OpaqueValueExpr *ov,
991                            bool boundLValue)
992       : OpaqueValue(ov), BoundLValue(boundLValue) {}
993   public:
994     OpaqueValueMappingData() : OpaqueValue(0) {}
995 
996     static bool shouldBindAsLValue(const Expr *expr) {
997       // gl-values should be bound as l-values for obvious reasons.
998       // Records should be bound as l-values because IR generation
999       // always keeps them in memory.  Expressions of function type
1000       // act exactly like l-values but are formally required to be
1001       // r-values in C.
1002       return expr->isGLValue() ||
1003              expr->getType()->isRecordType() ||
1004              expr->getType()->isFunctionType();
1005     }
1006 
1007     static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1008                                        const OpaqueValueExpr *ov,
1009                                        const Expr *e) {
1010       if (shouldBindAsLValue(ov))
1011         return bind(CGF, ov, CGF.EmitLValue(e));
1012       return bind(CGF, ov, CGF.EmitAnyExpr(e));
1013     }
1014 
1015     static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1016                                        const OpaqueValueExpr *ov,
1017                                        const LValue &lv) {
1018       assert(shouldBindAsLValue(ov));
1019       CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1020       return OpaqueValueMappingData(ov, true);
1021     }
1022 
1023     static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1024                                        const OpaqueValueExpr *ov,
1025                                        const RValue &rv) {
1026       assert(!shouldBindAsLValue(ov));
1027       CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1028 
1029       OpaqueValueMappingData data(ov, false);
1030 
1031       // Work around an extremely aggressive peephole optimization in
1032       // EmitScalarConversion which assumes that all other uses of a
1033       // value are extant.
1034       data.Protection = CGF.protectFromPeepholes(rv);
1035 
1036       return data;
1037     }
1038 
1039     bool isValid() const { return OpaqueValue != 0; }
1040     void clear() { OpaqueValue = 0; }
1041 
1042     void unbind(CodeGenFunction &CGF) {
1043       assert(OpaqueValue && "no data to unbind!");
1044 
1045       if (BoundLValue) {
1046         CGF.OpaqueLValues.erase(OpaqueValue);
1047       } else {
1048         CGF.OpaqueRValues.erase(OpaqueValue);
1049         CGF.unprotectFromPeepholes(Protection);
1050       }
1051     }
1052   };
1053 
1054   /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1055   class OpaqueValueMapping {
1056     CodeGenFunction &CGF;
1057     OpaqueValueMappingData Data;
1058 
1059   public:
1060     static bool shouldBindAsLValue(const Expr *expr) {
1061       return OpaqueValueMappingData::shouldBindAsLValue(expr);
1062     }
1063 
1064     /// Build the opaque value mapping for the given conditional
1065     /// operator if it's the GNU ?: extension.  This is a common
1066     /// enough pattern that the convenience operator is really
1067     /// helpful.
1068     ///
1069     OpaqueValueMapping(CodeGenFunction &CGF,
1070                        const AbstractConditionalOperator *op) : CGF(CGF) {
1071       if (isa<ConditionalOperator>(op))
1072         // Leave Data empty.
1073         return;
1074 
1075       const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1076       Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1077                                           e->getCommon());
1078     }
1079 
1080     OpaqueValueMapping(CodeGenFunction &CGF,
1081                        const OpaqueValueExpr *opaqueValue,
1082                        LValue lvalue)
1083       : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1084     }
1085 
1086     OpaqueValueMapping(CodeGenFunction &CGF,
1087                        const OpaqueValueExpr *opaqueValue,
1088                        RValue rvalue)
1089       : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1090     }
1091 
1092     void pop() {
1093       Data.unbind(CGF);
1094       Data.clear();
1095     }
1096 
1097     ~OpaqueValueMapping() {
1098       if (Data.isValid()) Data.unbind(CGF);
1099     }
1100   };
1101 
1102   /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
1103   /// number that holds the value.
1104   unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
1105 
1106   /// BuildBlockByrefAddress - Computes address location of the
1107   /// variable which is declared as __block.
1108   llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
1109                                       const VarDecl *V);
1110 private:
1111   CGDebugInfo *DebugInfo;
1112   bool DisableDebugInfo;
1113 
1114   /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1115   /// calling llvm.stacksave for multiple VLAs in the same scope.
1116   bool DidCallStackSave;
1117 
1118   /// IndirectBranch - The first time an indirect goto is seen we create a block
1119   /// with an indirect branch.  Every time we see the address of a label taken,
1120   /// we add the label to the indirect goto.  Every subsequent indirect goto is
1121   /// codegen'd as a jump to the IndirectBranch's basic block.
1122   llvm::IndirectBrInst *IndirectBranch;
1123 
1124   /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1125   /// decls.
1126   typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
1127   DeclMapTy LocalDeclMap;
1128 
1129   /// LabelMap - This keeps track of the LLVM basic block for each C label.
1130   llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1131 
1132   // BreakContinueStack - This keeps track of where break and continue
1133   // statements should jump to.
1134   struct BreakContinue {
1135     BreakContinue(JumpDest Break, JumpDest Continue)
1136       : BreakBlock(Break), ContinueBlock(Continue) {}
1137 
1138     JumpDest BreakBlock;
1139     JumpDest ContinueBlock;
1140   };
1141   SmallVector<BreakContinue, 8> BreakContinueStack;
1142 
1143   /// SwitchInsn - This is nearest current switch instruction. It is null if
1144   /// current context is not in a switch.
1145   llvm::SwitchInst *SwitchInsn;
1146 
1147   /// CaseRangeBlock - This block holds if condition check for last case
1148   /// statement range in current switch instruction.
1149   llvm::BasicBlock *CaseRangeBlock;
1150 
1151   /// OpaqueLValues - Keeps track of the current set of opaque value
1152   /// expressions.
1153   llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1154   llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1155 
1156   // VLASizeMap - This keeps track of the associated size for each VLA type.
1157   // We track this by the size expression rather than the type itself because
1158   // in certain situations, like a const qualifier applied to an VLA typedef,
1159   // multiple VLA types can share the same size expression.
1160   // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1161   // enter/leave scopes.
1162   llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1163 
1164   /// A block containing a single 'unreachable' instruction.  Created
1165   /// lazily by getUnreachableBlock().
1166   llvm::BasicBlock *UnreachableBlock;
1167 
1168   /// CXXThisDecl - When generating code for a C++ member function,
1169   /// this will hold the implicit 'this' declaration.
1170   ImplicitParamDecl *CXXABIThisDecl;
1171   llvm::Value *CXXABIThisValue;
1172   llvm::Value *CXXThisValue;
1173 
1174   /// CXXVTTDecl - When generating code for a base object constructor or
1175   /// base object destructor with virtual bases, this will hold the implicit
1176   /// VTT parameter.
1177   ImplicitParamDecl *CXXVTTDecl;
1178   llvm::Value *CXXVTTValue;
1179 
1180   /// OutermostConditional - Points to the outermost active
1181   /// conditional control.  This is used so that we know if a
1182   /// temporary should be destroyed conditionally.
1183   ConditionalEvaluation *OutermostConditional;
1184 
1185 
1186   /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
1187   /// type as well as the field number that contains the actual data.
1188   llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
1189                                               unsigned> > ByRefValueInfo;
1190 
1191   llvm::BasicBlock *TerminateLandingPad;
1192   llvm::BasicBlock *TerminateHandler;
1193   llvm::BasicBlock *TrapBB;
1194 
1195 public:
1196   CodeGenFunction(CodeGenModule &cgm);
1197   ~CodeGenFunction();
1198 
1199   CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1200   ASTContext &getContext() const { return CGM.getContext(); }
1201   CGDebugInfo *getDebugInfo() {
1202     if (DisableDebugInfo)
1203       return NULL;
1204     return DebugInfo;
1205   }
1206   void disableDebugInfo() { DisableDebugInfo = true; }
1207   void enableDebugInfo() { DisableDebugInfo = false; }
1208 
1209   bool shouldUseFusedARCCalls() {
1210     return CGM.getCodeGenOpts().OptimizationLevel == 0;
1211   }
1212 
1213   const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
1214 
1215   /// Returns a pointer to the function's exception object and selector slot,
1216   /// which is assigned in every landing pad.
1217   llvm::Value *getExceptionSlot();
1218   llvm::Value *getEHSelectorSlot();
1219 
1220   /// Returns the contents of the function's exception object and selector
1221   /// slots.
1222   llvm::Value *getExceptionFromSlot();
1223   llvm::Value *getSelectorFromSlot();
1224 
1225   llvm::Value *getNormalCleanupDestSlot();
1226 
1227   llvm::BasicBlock *getUnreachableBlock() {
1228     if (!UnreachableBlock) {
1229       UnreachableBlock = createBasicBlock("unreachable");
1230       new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1231     }
1232     return UnreachableBlock;
1233   }
1234 
1235   llvm::BasicBlock *getInvokeDest() {
1236     if (!EHStack.requiresLandingPad()) return 0;
1237     return getInvokeDestImpl();
1238   }
1239 
1240   llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1241 
1242   //===--------------------------------------------------------------------===//
1243   //                                  Cleanups
1244   //===--------------------------------------------------------------------===//
1245 
1246   typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
1247 
1248   void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1249                                         llvm::Value *arrayEndPointer,
1250                                         QualType elementType,
1251                                         Destroyer *destroyer);
1252   void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1253                                       llvm::Value *arrayEnd,
1254                                       QualType elementType,
1255                                       Destroyer *destroyer);
1256 
1257   void pushDestroy(QualType::DestructionKind dtorKind,
1258                    llvm::Value *addr, QualType type);
1259   void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
1260                    Destroyer *destroyer, bool useEHCleanupForArray);
1261   void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
1262                    bool useEHCleanupForArray);
1263   llvm::Function *generateDestroyHelper(llvm::Constant *addr,
1264                                         QualType type,
1265                                         Destroyer *destroyer,
1266                                         bool useEHCleanupForArray);
1267   void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1268                         QualType type, Destroyer *destroyer,
1269                         bool checkZeroLength, bool useEHCleanup);
1270 
1271   Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1272 
1273   /// Determines whether an EH cleanup is required to destroy a type
1274   /// with the given destruction kind.
1275   bool needsEHCleanup(QualType::DestructionKind kind) {
1276     switch (kind) {
1277     case QualType::DK_none:
1278       return false;
1279     case QualType::DK_cxx_destructor:
1280     case QualType::DK_objc_weak_lifetime:
1281       return getLangOptions().Exceptions;
1282     case QualType::DK_objc_strong_lifetime:
1283       return getLangOptions().Exceptions &&
1284              CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1285     }
1286     llvm_unreachable("bad destruction kind");
1287   }
1288 
1289   CleanupKind getCleanupKind(QualType::DestructionKind kind) {
1290     return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1291   }
1292 
1293   //===--------------------------------------------------------------------===//
1294   //                                  Objective-C
1295   //===--------------------------------------------------------------------===//
1296 
1297   void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1298 
1299   void StartObjCMethod(const ObjCMethodDecl *MD,
1300                        const ObjCContainerDecl *CD,
1301                        SourceLocation StartLoc);
1302 
1303   /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1304   void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1305                           const ObjCPropertyImplDecl *PID);
1306   void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1307                               const ObjCPropertyImplDecl *propImpl,
1308                               llvm::Constant *AtomicHelperFn);
1309 
1310   void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1311                                   ObjCMethodDecl *MD, bool ctor);
1312 
1313   /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1314   /// for the given property.
1315   void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1316                           const ObjCPropertyImplDecl *PID);
1317   void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1318                               const ObjCPropertyImplDecl *propImpl,
1319                               llvm::Constant *AtomicHelperFn);
1320   bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
1321   bool IvarTypeWithAggrGCObjects(QualType Ty);
1322 
1323   //===--------------------------------------------------------------------===//
1324   //                                  Block Bits
1325   //===--------------------------------------------------------------------===//
1326 
1327   llvm::Value *EmitBlockLiteral(const BlockExpr *);
1328   llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
1329   static void destroyBlockInfos(CGBlockInfo *info);
1330   llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
1331                                            const CGBlockInfo &Info,
1332                                            llvm::StructType *,
1333                                            llvm::Constant *BlockVarLayout);
1334 
1335   llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1336                                         const CGBlockInfo &Info,
1337                                         const Decl *OuterFuncDecl,
1338                                         const DeclMapTy &ldm,
1339                                         bool IsLambdaConversionToBlock);
1340 
1341   llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1342   llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1343   llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1344                                              const ObjCPropertyImplDecl *PID);
1345   llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1346                                              const ObjCPropertyImplDecl *PID);
1347   llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1348 
1349   void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1350 
1351   class AutoVarEmission;
1352 
1353   void emitByrefStructureInit(const AutoVarEmission &emission);
1354   void enterByrefCleanup(const AutoVarEmission &emission);
1355 
1356   llvm::Value *LoadBlockStruct() {
1357     assert(BlockPointer && "no block pointer set!");
1358     return BlockPointer;
1359   }
1360 
1361   void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
1362   void AllocateBlockDecl(const BlockDeclRefExpr *E);
1363   llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
1364     return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
1365   }
1366   llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1367   llvm::Type *BuildByRefType(const VarDecl *var);
1368 
1369   void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1370                     const CGFunctionInfo &FnInfo);
1371   void StartFunction(GlobalDecl GD, QualType RetTy,
1372                      llvm::Function *Fn,
1373                      const CGFunctionInfo &FnInfo,
1374                      const FunctionArgList &Args,
1375                      SourceLocation StartLoc);
1376 
1377   void EmitConstructorBody(FunctionArgList &Args);
1378   void EmitDestructorBody(FunctionArgList &Args);
1379   void EmitFunctionBody(FunctionArgList &Args);
1380 
1381   void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
1382                                   CallArgList &CallArgs);
1383   void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
1384   void EmitLambdaBlockInvokeBody();
1385   void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1386   void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
1387 
1388   /// EmitReturnBlock - Emit the unified return block, trying to avoid its
1389   /// emission when possible.
1390   void EmitReturnBlock();
1391 
1392   /// FinishFunction - Complete IR generation of the current function. It is
1393   /// legal to call this function even if there is no current insertion point.
1394   void FinishFunction(SourceLocation EndLoc=SourceLocation());
1395 
1396   /// GenerateThunk - Generate a thunk for the given method.
1397   void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1398                      GlobalDecl GD, const ThunkInfo &Thunk);
1399 
1400   void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1401                             GlobalDecl GD, const ThunkInfo &Thunk);
1402 
1403   void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1404                         FunctionArgList &Args);
1405 
1406   void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
1407                                ArrayRef<VarDecl *> ArrayIndexes);
1408 
1409   /// InitializeVTablePointer - Initialize the vtable pointer of the given
1410   /// subobject.
1411   ///
1412   void InitializeVTablePointer(BaseSubobject Base,
1413                                const CXXRecordDecl *NearestVBase,
1414                                CharUnits OffsetFromNearestVBase,
1415                                llvm::Constant *VTable,
1416                                const CXXRecordDecl *VTableClass);
1417 
1418   typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1419   void InitializeVTablePointers(BaseSubobject Base,
1420                                 const CXXRecordDecl *NearestVBase,
1421                                 CharUnits OffsetFromNearestVBase,
1422                                 bool BaseIsNonVirtualPrimaryBase,
1423                                 llvm::Constant *VTable,
1424                                 const CXXRecordDecl *VTableClass,
1425                                 VisitedVirtualBasesSetTy& VBases);
1426 
1427   void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1428 
1429   /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1430   /// to by This.
1431   llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
1432 
1433   /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1434   /// given phase of destruction for a destructor.  The end result
1435   /// should call destructors on members and base classes in reverse
1436   /// order of their construction.
1437   void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1438 
1439   /// ShouldInstrumentFunction - Return true if the current function should be
1440   /// instrumented with __cyg_profile_func_* calls
1441   bool ShouldInstrumentFunction();
1442 
1443   /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
1444   /// instrumentation function with the current function and the call site, if
1445   /// function instrumentation is enabled.
1446   void EmitFunctionInstrumentation(const char *Fn);
1447 
1448   /// EmitMCountInstrumentation - Emit call to .mcount.
1449   void EmitMCountInstrumentation();
1450 
1451   /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1452   /// arguments for the given function. This is also responsible for naming the
1453   /// LLVM function arguments.
1454   void EmitFunctionProlog(const CGFunctionInfo &FI,
1455                           llvm::Function *Fn,
1456                           const FunctionArgList &Args);
1457 
1458   /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1459   /// given temporary.
1460   void EmitFunctionEpilog(const CGFunctionInfo &FI);
1461 
1462   /// EmitStartEHSpec - Emit the start of the exception spec.
1463   void EmitStartEHSpec(const Decl *D);
1464 
1465   /// EmitEndEHSpec - Emit the end of the exception spec.
1466   void EmitEndEHSpec(const Decl *D);
1467 
1468   /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1469   llvm::BasicBlock *getTerminateLandingPad();
1470 
1471   /// getTerminateHandler - Return a handler (not a landing pad, just
1472   /// a catch handler) that just calls terminate.  This is used when
1473   /// a terminate scope encloses a try.
1474   llvm::BasicBlock *getTerminateHandler();
1475 
1476   llvm::Type *ConvertTypeForMem(QualType T);
1477   llvm::Type *ConvertType(QualType T);
1478   llvm::Type *ConvertType(const TypeDecl *T) {
1479     return ConvertType(getContext().getTypeDeclType(T));
1480   }
1481 
1482   /// LoadObjCSelf - Load the value of self. This function is only valid while
1483   /// generating code for an Objective-C method.
1484   llvm::Value *LoadObjCSelf();
1485 
1486   /// TypeOfSelfObject - Return type of object that this self represents.
1487   QualType TypeOfSelfObject();
1488 
1489   /// hasAggregateLLVMType - Return true if the specified AST type will map into
1490   /// an aggregate LLVM type or is void.
1491   static bool hasAggregateLLVMType(QualType T);
1492 
1493   /// createBasicBlock - Create an LLVM basic block.
1494   llvm::BasicBlock *createBasicBlock(StringRef name = "",
1495                                      llvm::Function *parent = 0,
1496                                      llvm::BasicBlock *before = 0) {
1497 #ifdef NDEBUG
1498     return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
1499 #else
1500     return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1501 #endif
1502   }
1503 
1504   /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1505   /// label maps to.
1506   JumpDest getJumpDestForLabel(const LabelDecl *S);
1507 
1508   /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1509   /// another basic block, simplify it. This assumes that no other code could
1510   /// potentially reference the basic block.
1511   void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1512 
1513   /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1514   /// adding a fall-through branch from the current insert block if
1515   /// necessary. It is legal to call this function even if there is no current
1516   /// insertion point.
1517   ///
1518   /// IsFinished - If true, indicates that the caller has finished emitting
1519   /// branches to the given block and does not expect to emit code into it. This
1520   /// means the block can be ignored if it is unreachable.
1521   void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1522 
1523   /// EmitBlockAfterUses - Emit the given block somewhere hopefully
1524   /// near its uses, and leave the insertion point in it.
1525   void EmitBlockAfterUses(llvm::BasicBlock *BB);
1526 
1527   /// EmitBranch - Emit a branch to the specified basic block from the current
1528   /// insert block, taking care to avoid creation of branches from dummy
1529   /// blocks. It is legal to call this function even if there is no current
1530   /// insertion point.
1531   ///
1532   /// This function clears the current insertion point. The caller should follow
1533   /// calls to this function with calls to Emit*Block prior to generation new
1534   /// code.
1535   void EmitBranch(llvm::BasicBlock *Block);
1536 
1537   /// HaveInsertPoint - True if an insertion point is defined. If not, this
1538   /// indicates that the current code being emitted is unreachable.
1539   bool HaveInsertPoint() const {
1540     return Builder.GetInsertBlock() != 0;
1541   }
1542 
1543   /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1544   /// emitted IR has a place to go. Note that by definition, if this function
1545   /// creates a block then that block is unreachable; callers may do better to
1546   /// detect when no insertion point is defined and simply skip IR generation.
1547   void EnsureInsertPoint() {
1548     if (!HaveInsertPoint())
1549       EmitBlock(createBasicBlock());
1550   }
1551 
1552   /// ErrorUnsupported - Print out an error that codegen doesn't support the
1553   /// specified stmt yet.
1554   void ErrorUnsupported(const Stmt *S, const char *Type,
1555                         bool OmitOnError=false);
1556 
1557   //===--------------------------------------------------------------------===//
1558   //                                  Helpers
1559   //===--------------------------------------------------------------------===//
1560 
1561   LValue MakeAddrLValue(llvm::Value *V, QualType T,
1562                         CharUnits Alignment = CharUnits()) {
1563     return LValue::MakeAddr(V, T, Alignment, getContext(),
1564                             CGM.getTBAAInfo(T));
1565   }
1566   LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
1567     CharUnits Alignment;
1568     if (!T->isIncompleteType())
1569       Alignment = getContext().getTypeAlignInChars(T);
1570     return LValue::MakeAddr(V, T, Alignment, getContext(),
1571                             CGM.getTBAAInfo(T));
1572   }
1573 
1574   /// CreateTempAlloca - This creates a alloca and inserts it into the entry
1575   /// block. The caller is responsible for setting an appropriate alignment on
1576   /// the alloca.
1577   llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
1578                                      const Twine &Name = "tmp");
1579 
1580   /// InitTempAlloca - Provide an initial value for the given alloca.
1581   void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
1582 
1583   /// CreateIRTemp - Create a temporary IR object of the given type, with
1584   /// appropriate alignment. This routine should only be used when an temporary
1585   /// value needs to be stored into an alloca (for example, to avoid explicit
1586   /// PHI construction), but the type is the IR type, not the type appropriate
1587   /// for storing in memory.
1588   llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
1589 
1590   /// CreateMemTemp - Create a temporary memory object of the given type, with
1591   /// appropriate alignment.
1592   llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
1593 
1594   /// CreateAggTemp - Create a temporary memory object for the given
1595   /// aggregate type.
1596   AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
1597     CharUnits Alignment = getContext().getTypeAlignInChars(T);
1598     return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
1599                                  T.getQualifiers(),
1600                                  AggValueSlot::IsNotDestructed,
1601                                  AggValueSlot::DoesNotNeedGCBarriers,
1602                                  AggValueSlot::IsNotAliased);
1603   }
1604 
1605   /// Emit a cast to void* in the appropriate address space.
1606   llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
1607 
1608   /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
1609   /// expression and compare the result against zero, returning an Int1Ty value.
1610   llvm::Value *EvaluateExprAsBool(const Expr *E);
1611 
1612   /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
1613   void EmitIgnoredExpr(const Expr *E);
1614 
1615   /// EmitAnyExpr - Emit code to compute the specified expression which can have
1616   /// any type.  The result is returned as an RValue struct.  If this is an
1617   /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
1618   /// the result should be returned.
1619   ///
1620   /// \param IgnoreResult - True if the resulting value isn't used.
1621   RValue EmitAnyExpr(const Expr *E,
1622                      AggValueSlot AggSlot = AggValueSlot::ignored(),
1623                      bool IgnoreResult = false);
1624 
1625   // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
1626   // or the value of the expression, depending on how va_list is defined.
1627   llvm::Value *EmitVAListRef(const Expr *E);
1628 
1629   /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
1630   /// always be accessible even if no aggregate location is provided.
1631   RValue EmitAnyExprToTemp(const Expr *E);
1632 
1633   /// EmitAnyExprToMem - Emits the code necessary to evaluate an
1634   /// arbitrary expression into the given memory location.
1635   void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
1636                         Qualifiers Quals, bool IsInitializer);
1637 
1638   /// EmitExprAsInit - Emits the code necessary to initialize a
1639   /// location in memory with the given initializer.
1640   void EmitExprAsInit(const Expr *init, const ValueDecl *D,
1641                       LValue lvalue, bool capturedByInit);
1642 
1643   /// EmitAggregateCopy - Emit an aggrate copy.
1644   ///
1645   /// \param isVolatile - True iff either the source or the destination is
1646   /// volatile.
1647   void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1648                          QualType EltTy, bool isVolatile=false,
1649                          unsigned Alignment = 0);
1650 
1651   /// StartBlock - Start new block named N. If insert block is a dummy block
1652   /// then reuse it.
1653   void StartBlock(const char *N);
1654 
1655   /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
1656   llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
1657     return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
1658   }
1659 
1660   /// GetAddrOfLocalVar - Return the address of a local variable.
1661   llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
1662     llvm::Value *Res = LocalDeclMap[VD];
1663     assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
1664     return Res;
1665   }
1666 
1667   /// getOpaqueLValueMapping - Given an opaque value expression (which
1668   /// must be mapped to an l-value), return its mapping.
1669   const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
1670     assert(OpaqueValueMapping::shouldBindAsLValue(e));
1671 
1672     llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
1673       it = OpaqueLValues.find(e);
1674     assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
1675     return it->second;
1676   }
1677 
1678   /// getOpaqueRValueMapping - Given an opaque value expression (which
1679   /// must be mapped to an r-value), return its mapping.
1680   const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
1681     assert(!OpaqueValueMapping::shouldBindAsLValue(e));
1682 
1683     llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
1684       it = OpaqueRValues.find(e);
1685     assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
1686     return it->second;
1687   }
1688 
1689   /// getAccessedFieldNo - Given an encoded value and a result number, return
1690   /// the input field number being accessed.
1691   static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
1692 
1693   llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
1694   llvm::BasicBlock *GetIndirectGotoBlock();
1695 
1696   /// EmitNullInitialization - Generate code to set a value of the given type to
1697   /// null, If the type contains data member pointers, they will be initialized
1698   /// to -1 in accordance with the Itanium C++ ABI.
1699   void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
1700 
1701   // EmitVAArg - Generate code to get an argument from the passed in pointer
1702   // and update it accordingly. The return value is a pointer to the argument.
1703   // FIXME: We should be able to get rid of this method and use the va_arg
1704   // instruction in LLVM instead once it works well enough.
1705   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
1706 
1707   /// emitArrayLength - Compute the length of an array, even if it's a
1708   /// VLA, and drill down to the base element type.
1709   llvm::Value *emitArrayLength(const ArrayType *arrayType,
1710                                QualType &baseType,
1711                                llvm::Value *&addr);
1712 
1713   /// EmitVLASize - Capture all the sizes for the VLA expressions in
1714   /// the given variably-modified type and store them in the VLASizeMap.
1715   ///
1716   /// This function can be called with a null (unreachable) insert point.
1717   void EmitVariablyModifiedType(QualType Ty);
1718 
1719   /// getVLASize - Returns an LLVM value that corresponds to the size,
1720   /// in non-variably-sized elements, of a variable length array type,
1721   /// plus that largest non-variably-sized element type.  Assumes that
1722   /// the type has already been emitted with EmitVariablyModifiedType.
1723   std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
1724   std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
1725 
1726   /// LoadCXXThis - Load the value of 'this'. This function is only valid while
1727   /// generating code for an C++ member function.
1728   llvm::Value *LoadCXXThis() {
1729     assert(CXXThisValue && "no 'this' value for this function");
1730     return CXXThisValue;
1731   }
1732 
1733   /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
1734   /// virtual bases.
1735   llvm::Value *LoadCXXVTT() {
1736     assert(CXXVTTValue && "no VTT value for this function");
1737     return CXXVTTValue;
1738   }
1739 
1740   /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
1741   /// complete class to the given direct base.
1742   llvm::Value *
1743   GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
1744                                         const CXXRecordDecl *Derived,
1745                                         const CXXRecordDecl *Base,
1746                                         bool BaseIsVirtual);
1747 
1748   /// GetAddressOfBaseClass - This function will add the necessary delta to the
1749   /// load of 'this' and returns address of the base class.
1750   llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
1751                                      const CXXRecordDecl *Derived,
1752                                      CastExpr::path_const_iterator PathBegin,
1753                                      CastExpr::path_const_iterator PathEnd,
1754                                      bool NullCheckValue);
1755 
1756   llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
1757                                         const CXXRecordDecl *Derived,
1758                                         CastExpr::path_const_iterator PathBegin,
1759                                         CastExpr::path_const_iterator PathEnd,
1760                                         bool NullCheckValue);
1761 
1762   llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
1763                                          const CXXRecordDecl *ClassDecl,
1764                                          const CXXRecordDecl *BaseClassDecl);
1765 
1766   void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1767                                       CXXCtorType CtorType,
1768                                       const FunctionArgList &Args);
1769   // It's important not to confuse this and the previous function. Delegating
1770   // constructors are the C++0x feature. The constructor delegate optimization
1771   // is used to reduce duplication in the base and complete consturctors where
1772   // they are substantially the same.
1773   void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1774                                         const FunctionArgList &Args);
1775   void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
1776                               bool ForVirtualBase, llvm::Value *This,
1777                               CallExpr::const_arg_iterator ArgBeg,
1778                               CallExpr::const_arg_iterator ArgEnd);
1779 
1780   void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1781                               llvm::Value *This, llvm::Value *Src,
1782                               CallExpr::const_arg_iterator ArgBeg,
1783                               CallExpr::const_arg_iterator ArgEnd);
1784 
1785   void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1786                                   const ConstantArrayType *ArrayTy,
1787                                   llvm::Value *ArrayPtr,
1788                                   CallExpr::const_arg_iterator ArgBeg,
1789                                   CallExpr::const_arg_iterator ArgEnd,
1790                                   bool ZeroInitialization = false);
1791 
1792   void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1793                                   llvm::Value *NumElements,
1794                                   llvm::Value *ArrayPtr,
1795                                   CallExpr::const_arg_iterator ArgBeg,
1796                                   CallExpr::const_arg_iterator ArgEnd,
1797                                   bool ZeroInitialization = false);
1798 
1799   static Destroyer destroyCXXObject;
1800 
1801   void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
1802                              bool ForVirtualBase, llvm::Value *This);
1803 
1804   void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
1805                                llvm::Value *NewPtr, llvm::Value *NumElements);
1806 
1807   void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
1808                         llvm::Value *Ptr);
1809 
1810   llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
1811   void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
1812 
1813   void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
1814                       QualType DeleteTy);
1815 
1816   llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
1817   llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
1818 
1819   void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
1820   void EmitStdInitializerListCleanup(llvm::Value *loc,
1821                                      const InitListExpr *init);
1822 
1823   void EmitCheck(llvm::Value *, unsigned Size);
1824 
1825   llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1826                                        bool isInc, bool isPre);
1827   ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1828                                          bool isInc, bool isPre);
1829   //===--------------------------------------------------------------------===//
1830   //                            Declaration Emission
1831   //===--------------------------------------------------------------------===//
1832 
1833   /// EmitDecl - Emit a declaration.
1834   ///
1835   /// This function can be called with a null (unreachable) insert point.
1836   void EmitDecl(const Decl &D);
1837 
1838   /// EmitVarDecl - Emit a local variable declaration.
1839   ///
1840   /// This function can be called with a null (unreachable) insert point.
1841   void EmitVarDecl(const VarDecl &D);
1842 
1843   void EmitScalarInit(const Expr *init, const ValueDecl *D,
1844                       LValue lvalue, bool capturedByInit);
1845   void EmitScalarInit(llvm::Value *init, LValue lvalue);
1846 
1847   typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
1848                              llvm::Value *Address);
1849 
1850   /// EmitAutoVarDecl - Emit an auto variable declaration.
1851   ///
1852   /// This function can be called with a null (unreachable) insert point.
1853   void EmitAutoVarDecl(const VarDecl &D);
1854 
1855   class AutoVarEmission {
1856     friend class CodeGenFunction;
1857 
1858     const VarDecl *Variable;
1859 
1860     /// The alignment of the variable.
1861     CharUnits Alignment;
1862 
1863     /// The address of the alloca.  Null if the variable was emitted
1864     /// as a global constant.
1865     llvm::Value *Address;
1866 
1867     llvm::Value *NRVOFlag;
1868 
1869     /// True if the variable is a __block variable.
1870     bool IsByRef;
1871 
1872     /// True if the variable is of aggregate type and has a constant
1873     /// initializer.
1874     bool IsConstantAggregate;
1875 
1876     struct Invalid {};
1877     AutoVarEmission(Invalid) : Variable(0) {}
1878 
1879     AutoVarEmission(const VarDecl &variable)
1880       : Variable(&variable), Address(0), NRVOFlag(0),
1881         IsByRef(false), IsConstantAggregate(false) {}
1882 
1883     bool wasEmittedAsGlobal() const { return Address == 0; }
1884 
1885   public:
1886     static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
1887 
1888     /// Returns the address of the object within this declaration.
1889     /// Note that this does not chase the forwarding pointer for
1890     /// __block decls.
1891     llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
1892       if (!IsByRef) return Address;
1893 
1894       return CGF.Builder.CreateStructGEP(Address,
1895                                          CGF.getByRefValueLLVMField(Variable),
1896                                          Variable->getNameAsString());
1897     }
1898   };
1899   AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
1900   void EmitAutoVarInit(const AutoVarEmission &emission);
1901   void EmitAutoVarCleanups(const AutoVarEmission &emission);
1902   void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1903                               QualType::DestructionKind dtorKind);
1904 
1905   void EmitStaticVarDecl(const VarDecl &D,
1906                          llvm::GlobalValue::LinkageTypes Linkage);
1907 
1908   /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
1909   void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
1910 
1911   /// protectFromPeepholes - Protect a value that we're intending to
1912   /// store to the side, but which will probably be used later, from
1913   /// aggressive peepholing optimizations that might delete it.
1914   ///
1915   /// Pass the result to unprotectFromPeepholes to declare that
1916   /// protection is no longer required.
1917   ///
1918   /// There's no particular reason why this shouldn't apply to
1919   /// l-values, it's just that no existing peepholes work on pointers.
1920   PeepholeProtection protectFromPeepholes(RValue rvalue);
1921   void unprotectFromPeepholes(PeepholeProtection protection);
1922 
1923   //===--------------------------------------------------------------------===//
1924   //                             Statement Emission
1925   //===--------------------------------------------------------------------===//
1926 
1927   /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
1928   void EmitStopPoint(const Stmt *S);
1929 
1930   /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
1931   /// this function even if there is no current insertion point.
1932   ///
1933   /// This function may clear the current insertion point; callers should use
1934   /// EnsureInsertPoint if they wish to subsequently generate code without first
1935   /// calling EmitBlock, EmitBranch, or EmitStmt.
1936   void EmitStmt(const Stmt *S);
1937 
1938   /// EmitSimpleStmt - Try to emit a "simple" statement which does not
1939   /// necessarily require an insertion point or debug information; typically
1940   /// because the statement amounts to a jump or a container of other
1941   /// statements.
1942   ///
1943   /// \return True if the statement was handled.
1944   bool EmitSimpleStmt(const Stmt *S);
1945 
1946   RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
1947                           AggValueSlot AVS = AggValueSlot::ignored());
1948 
1949   /// EmitLabel - Emit the block for the given label. It is legal to call this
1950   /// function even if there is no current insertion point.
1951   void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
1952 
1953   void EmitLabelStmt(const LabelStmt &S);
1954   void EmitGotoStmt(const GotoStmt &S);
1955   void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
1956   void EmitIfStmt(const IfStmt &S);
1957   void EmitWhileStmt(const WhileStmt &S);
1958   void EmitDoStmt(const DoStmt &S);
1959   void EmitForStmt(const ForStmt &S);
1960   void EmitReturnStmt(const ReturnStmt &S);
1961   void EmitDeclStmt(const DeclStmt &S);
1962   void EmitBreakStmt(const BreakStmt &S);
1963   void EmitContinueStmt(const ContinueStmt &S);
1964   void EmitSwitchStmt(const SwitchStmt &S);
1965   void EmitDefaultStmt(const DefaultStmt &S);
1966   void EmitCaseStmt(const CaseStmt &S);
1967   void EmitCaseStmtRange(const CaseStmt &S);
1968   void EmitAsmStmt(const AsmStmt &S);
1969 
1970   void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
1971   void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
1972   void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
1973   void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
1974   void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
1975 
1976   llvm::Constant *getUnwindResumeFn();
1977   llvm::Constant *getUnwindResumeOrRethrowFn();
1978   void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1979   void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1980 
1981   void EmitCXXTryStmt(const CXXTryStmt &S);
1982   void EmitCXXForRangeStmt(const CXXForRangeStmt &S);
1983 
1984   //===--------------------------------------------------------------------===//
1985   //                         LValue Expression Emission
1986   //===--------------------------------------------------------------------===//
1987 
1988   /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
1989   RValue GetUndefRValue(QualType Ty);
1990 
1991   /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
1992   /// and issue an ErrorUnsupported style diagnostic (using the
1993   /// provided Name).
1994   RValue EmitUnsupportedRValue(const Expr *E,
1995                                const char *Name);
1996 
1997   /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
1998   /// an ErrorUnsupported style diagnostic (using the provided Name).
1999   LValue EmitUnsupportedLValue(const Expr *E,
2000                                const char *Name);
2001 
2002   /// EmitLValue - Emit code to compute a designator that specifies the location
2003   /// of the expression.
2004   ///
2005   /// This can return one of two things: a simple address or a bitfield
2006   /// reference.  In either case, the LLVM Value* in the LValue structure is
2007   /// guaranteed to be an LLVM pointer type.
2008   ///
2009   /// If this returns a bitfield reference, nothing about the pointee type of
2010   /// the LLVM value is known: For example, it may not be a pointer to an
2011   /// integer.
2012   ///
2013   /// If this returns a normal address, and if the lvalue's C type is fixed
2014   /// size, this method guarantees that the returned pointer type will point to
2015   /// an LLVM type of the same size of the lvalue's type.  If the lvalue has a
2016   /// variable length type, this is not possible.
2017   ///
2018   LValue EmitLValue(const Expr *E);
2019 
2020   /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
2021   /// checking code to guard against undefined behavior.  This is only
2022   /// suitable when we know that the address will be used to access the
2023   /// object.
2024   LValue EmitCheckedLValue(const Expr *E);
2025 
2026   /// EmitToMemory - Change a scalar value from its value
2027   /// representation to its in-memory representation.
2028   llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
2029 
2030   /// EmitFromMemory - Change a scalar value from its memory
2031   /// representation to its value representation.
2032   llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
2033 
2034   /// EmitLoadOfScalar - Load a scalar value from an address, taking
2035   /// care to appropriately convert from the memory representation to
2036   /// the LLVM value representation.
2037   llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
2038                                 unsigned Alignment, QualType Ty,
2039                                 llvm::MDNode *TBAAInfo = 0);
2040 
2041   /// EmitLoadOfScalar - Load a scalar value from an address, taking
2042   /// care to appropriately convert from the memory representation to
2043   /// the LLVM value representation.  The l-value must be a simple
2044   /// l-value.
2045   llvm::Value *EmitLoadOfScalar(LValue lvalue);
2046 
2047   /// EmitStoreOfScalar - Store a scalar value to an address, taking
2048   /// care to appropriately convert from the memory representation to
2049   /// the LLVM value representation.
2050   void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
2051                          bool Volatile, unsigned Alignment, QualType Ty,
2052                          llvm::MDNode *TBAAInfo = 0, bool isInit=false);
2053 
2054   /// EmitStoreOfScalar - Store a scalar value to an address, taking
2055   /// care to appropriately convert from the memory representation to
2056   /// the LLVM value representation.  The l-value must be a simple
2057   /// l-value.  The isInit flag indicates whether this is an initialization.
2058   /// If so, atomic qualifiers are ignored and the store is always non-atomic.
2059   void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
2060 
2061   /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
2062   /// this method emits the address of the lvalue, then loads the result as an
2063   /// rvalue, returning the rvalue.
2064   RValue EmitLoadOfLValue(LValue V);
2065   RValue EmitLoadOfExtVectorElementLValue(LValue V);
2066   RValue EmitLoadOfBitfieldLValue(LValue LV);
2067 
2068   /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2069   /// lvalue, where both are guaranteed to the have the same type, and that type
2070   /// is 'Ty'.
2071   void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
2072   void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
2073 
2074   /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
2075   /// EmitStoreThroughLValue.
2076   ///
2077   /// \param Result [out] - If non-null, this will be set to a Value* for the
2078   /// bit-field contents after the store, appropriate for use as the result of
2079   /// an assignment to the bit-field.
2080   void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2081                                       llvm::Value **Result=0);
2082 
2083   /// Emit an l-value for an assignment (simple or compound) of complex type.
2084   LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
2085   LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
2086 
2087   // Note: only available for agg return types
2088   LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
2089   LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
2090   // Note: only available for agg return types
2091   LValue EmitCallExprLValue(const CallExpr *E);
2092   // Note: only available for agg return types
2093   LValue EmitVAArgExprLValue(const VAArgExpr *E);
2094   LValue EmitDeclRefLValue(const DeclRefExpr *E);
2095   LValue EmitStringLiteralLValue(const StringLiteral *E);
2096   LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
2097   LValue EmitPredefinedLValue(const PredefinedExpr *E);
2098   LValue EmitUnaryOpLValue(const UnaryOperator *E);
2099   LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
2100   LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
2101   LValue EmitMemberExpr(const MemberExpr *E);
2102   LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
2103   LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
2104   LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
2105   LValue EmitCastLValue(const CastExpr *E);
2106   LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
2107   LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
2108   LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
2109 
2110   RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
2111                                 AggValueSlot slot = AggValueSlot::ignored());
2112   LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
2113 
2114   llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2115                               const ObjCIvarDecl *Ivar);
2116   LValue EmitLValueForAnonRecordField(llvm::Value* Base,
2117                                       const IndirectFieldDecl* Field,
2118                                       unsigned CVRQualifiers);
2119   LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
2120                             unsigned CVRQualifiers);
2121 
2122   /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
2123   /// if the Field is a reference, this will return the address of the reference
2124   /// and not the address of the value stored in the reference.
2125   LValue EmitLValueForFieldInitialization(llvm::Value* Base,
2126                                           const FieldDecl* Field,
2127                                           unsigned CVRQualifiers);
2128 
2129   LValue EmitLValueForIvar(QualType ObjectTy,
2130                            llvm::Value* Base, const ObjCIvarDecl *Ivar,
2131                            unsigned CVRQualifiers);
2132 
2133   LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
2134                                 unsigned CVRQualifiers);
2135 
2136   LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
2137 
2138   LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
2139   LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
2140   LValue EmitLambdaLValue(const LambdaExpr *E);
2141   LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
2142 
2143   LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
2144   LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
2145   LValue EmitStmtExprLValue(const StmtExpr *E);
2146   LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
2147   LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
2148   void   EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
2149 
2150   //===--------------------------------------------------------------------===//
2151   //                         Scalar Expression Emission
2152   //===--------------------------------------------------------------------===//
2153 
2154   /// EmitCall - Generate a call of the given function, expecting the given
2155   /// result type, and using the given argument list which specifies both the
2156   /// LLVM arguments and the types they were derived from.
2157   ///
2158   /// \param TargetDecl - If given, the decl of the function in a direct call;
2159   /// used to set attributes on the call (noreturn, etc.).
2160   RValue EmitCall(const CGFunctionInfo &FnInfo,
2161                   llvm::Value *Callee,
2162                   ReturnValueSlot ReturnValue,
2163                   const CallArgList &Args,
2164                   const Decl *TargetDecl = 0,
2165                   llvm::Instruction **callOrInvoke = 0);
2166 
2167   RValue EmitCall(QualType FnType, llvm::Value *Callee,
2168                   ReturnValueSlot ReturnValue,
2169                   CallExpr::const_arg_iterator ArgBeg,
2170                   CallExpr::const_arg_iterator ArgEnd,
2171                   const Decl *TargetDecl = 0);
2172   RValue EmitCallExpr(const CallExpr *E,
2173                       ReturnValueSlot ReturnValue = ReturnValueSlot());
2174 
2175   llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2176                                   ArrayRef<llvm::Value *> Args,
2177                                   const Twine &Name = "");
2178   llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2179                                   const Twine &Name = "");
2180 
2181   llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
2182                                 llvm::Type *Ty);
2183   llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
2184                                 llvm::Value *This, llvm::Type *Ty);
2185   llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
2186                                          NestedNameSpecifier *Qual,
2187                                          llvm::Type *Ty);
2188 
2189   llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
2190                                                    CXXDtorType Type,
2191                                                    const CXXRecordDecl *RD);
2192 
2193   RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
2194                            llvm::Value *Callee,
2195                            ReturnValueSlot ReturnValue,
2196                            llvm::Value *This,
2197                            llvm::Value *VTT,
2198                            CallExpr::const_arg_iterator ArgBeg,
2199                            CallExpr::const_arg_iterator ArgEnd);
2200   RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
2201                                ReturnValueSlot ReturnValue);
2202   RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
2203                                       ReturnValueSlot ReturnValue);
2204 
2205   llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2206                                            const CXXMethodDecl *MD,
2207                                            llvm::Value *This);
2208   RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
2209                                        const CXXMethodDecl *MD,
2210                                        ReturnValueSlot ReturnValue);
2211 
2212   RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
2213                                 ReturnValueSlot ReturnValue);
2214 
2215 
2216   RValue EmitBuiltinExpr(const FunctionDecl *FD,
2217                          unsigned BuiltinID, const CallExpr *E);
2218 
2219   RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
2220 
2221   /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
2222   /// is unhandled by the current target.
2223   llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2224 
2225   llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2226   llvm::Value *EmitNeonCall(llvm::Function *F,
2227                             SmallVectorImpl<llvm::Value*> &O,
2228                             const char *name,
2229                             unsigned shift = 0, bool rightshift = false);
2230   llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
2231   llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
2232                                    bool negateForRightShift);
2233 
2234   llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
2235   llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2236   llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2237   llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2238 
2239   llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
2240   llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
2241   llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
2242   RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
2243                              ReturnValueSlot Return = ReturnValueSlot());
2244 
2245   /// Retrieves the default cleanup kind for an ARC cleanup.
2246   /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
2247   CleanupKind getARCCleanupKind() {
2248     return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
2249              ? NormalAndEHCleanup : NormalCleanup;
2250   }
2251 
2252   // ARC primitives.
2253   void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
2254   void EmitARCDestroyWeak(llvm::Value *addr);
2255   llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
2256   llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
2257   llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
2258                                 bool ignored);
2259   void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
2260   void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
2261   llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
2262   llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
2263   llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
2264                                   bool ignored);
2265   llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
2266                                       bool ignored);
2267   llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
2268   llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
2269   llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
2270   void EmitARCRelease(llvm::Value *value, bool precise);
2271   llvm::Value *EmitARCAutorelease(llvm::Value *value);
2272   llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
2273   llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
2274   llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
2275 
2276   std::pair<LValue,llvm::Value*>
2277   EmitARCStoreAutoreleasing(const BinaryOperator *e);
2278   std::pair<LValue,llvm::Value*>
2279   EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
2280 
2281   llvm::Value *EmitObjCThrowOperand(const Expr *expr);
2282 
2283   llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
2284   llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
2285   llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
2286 
2287   llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
2288   llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
2289   llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
2290 
2291   static Destroyer destroyARCStrongImprecise;
2292   static Destroyer destroyARCStrongPrecise;
2293   static Destroyer destroyARCWeak;
2294 
2295   void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
2296   llvm::Value *EmitObjCAutoreleasePoolPush();
2297   llvm::Value *EmitObjCMRRAutoreleasePoolPush();
2298   void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
2299   void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
2300 
2301   /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
2302   /// expression. Will emit a temporary variable if E is not an LValue.
2303   RValue EmitReferenceBindingToExpr(const Expr* E,
2304                                     const NamedDecl *InitializedDecl);
2305 
2306   //===--------------------------------------------------------------------===//
2307   //                           Expression Emission
2308   //===--------------------------------------------------------------------===//
2309 
2310   // Expressions are broken into three classes: scalar, complex, aggregate.
2311 
2312   /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
2313   /// scalar type, returning the result.
2314   llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
2315 
2316   /// EmitScalarConversion - Emit a conversion from the specified type to the
2317   /// specified destination type, both of which are LLVM scalar types.
2318   llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
2319                                     QualType DstTy);
2320 
2321   /// EmitComplexToScalarConversion - Emit a conversion from the specified
2322   /// complex type to the specified destination type, where the destination type
2323   /// is an LLVM scalar type.
2324   llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
2325                                              QualType DstTy);
2326 
2327 
2328   /// EmitAggExpr - Emit the computation of the specified expression
2329   /// of aggregate type.  The result is computed into the given slot,
2330   /// which may be null to indicate that the value is not needed.
2331   void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
2332 
2333   /// EmitAggExprToLValue - Emit the computation of the specified expression of
2334   /// aggregate type into a temporary LValue.
2335   LValue EmitAggExprToLValue(const Expr *E);
2336 
2337   /// EmitGCMemmoveCollectable - Emit special API for structs with object
2338   /// pointers.
2339   void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
2340                                 QualType Ty);
2341 
2342   /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
2343   /// make sure it survives garbage collection until this point.
2344   void EmitExtendGCLifetime(llvm::Value *object);
2345 
2346   /// EmitComplexExpr - Emit the computation of the specified expression of
2347   /// complex type, returning the result.
2348   ComplexPairTy EmitComplexExpr(const Expr *E,
2349                                 bool IgnoreReal = false,
2350                                 bool IgnoreImag = false);
2351 
2352   /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
2353   /// of complex type, storing into the specified Value*.
2354   void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
2355                                bool DestIsVolatile);
2356 
2357   /// StoreComplexToAddr - Store a complex number into the specified address.
2358   void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
2359                           bool DestIsVolatile);
2360   /// LoadComplexFromAddr - Load a complex number from the specified address.
2361   ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
2362 
2363   /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
2364   /// a static local variable.
2365   llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
2366                                             const char *Separator,
2367                                        llvm::GlobalValue::LinkageTypes Linkage);
2368 
2369   /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
2370   /// global variable that has already been created for it.  If the initializer
2371   /// has a different type than GV does, this may free GV and return a different
2372   /// one.  Otherwise it just returns GV.
2373   llvm::GlobalVariable *
2374   AddInitializerToStaticVarDecl(const VarDecl &D,
2375                                 llvm::GlobalVariable *GV);
2376 
2377 
2378   /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
2379   /// variable with global storage.
2380   void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
2381                                 bool PerformInit);
2382 
2383   /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
2384   /// with the C++ runtime so that its destructor will be called at exit.
2385   void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
2386                                      llvm::Constant *DeclPtr);
2387 
2388   /// Emit code in this function to perform a guarded variable
2389   /// initialization.  Guarded initializations are used when it's not
2390   /// possible to prove that an initialization will be done exactly
2391   /// once, e.g. with a static local variable or a static data member
2392   /// of a class template.
2393   void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
2394                           bool PerformInit);
2395 
2396   /// GenerateCXXGlobalInitFunc - Generates code for initializing global
2397   /// variables.
2398   void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
2399                                  llvm::Constant **Decls,
2400                                  unsigned NumDecls);
2401 
2402   /// GenerateCXXGlobalDtorFunc - Generates code for destroying global
2403   /// variables.
2404   void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
2405                                  const std::vector<std::pair<llvm::WeakVH,
2406                                    llvm::Constant*> > &DtorsAndObjects);
2407 
2408   void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
2409                                         const VarDecl *D,
2410                                         llvm::GlobalVariable *Addr,
2411                                         bool PerformInit);
2412 
2413   void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
2414 
2415   void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
2416                                   const Expr *Exp);
2417 
2418   void enterFullExpression(const ExprWithCleanups *E) {
2419     if (E->getNumObjects() == 0) return;
2420     enterNonTrivialFullExpression(E);
2421   }
2422   void enterNonTrivialFullExpression(const ExprWithCleanups *E);
2423 
2424   void EmitCXXThrowExpr(const CXXThrowExpr *E);
2425 
2426   void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
2427 
2428   RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
2429 
2430   //===--------------------------------------------------------------------===//
2431   //                         Annotations Emission
2432   //===--------------------------------------------------------------------===//
2433 
2434   /// Emit an annotation call (intrinsic or builtin).
2435   llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
2436                                   llvm::Value *AnnotatedVal,
2437                                   llvm::StringRef AnnotationStr,
2438                                   SourceLocation Location);
2439 
2440   /// Emit local annotations for the local variable V, declared by D.
2441   void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
2442 
2443   /// Emit field annotations for the given field & value. Returns the
2444   /// annotation result.
2445   llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
2446 
2447   //===--------------------------------------------------------------------===//
2448   //                             Internal Helpers
2449   //===--------------------------------------------------------------------===//
2450 
2451   /// ContainsLabel - Return true if the statement contains a label in it.  If
2452   /// this statement is not executed normally, it not containing a label means
2453   /// that we can just remove the code.
2454   static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
2455 
2456   /// containsBreak - Return true if the statement contains a break out of it.
2457   /// If the statement (recursively) contains a switch or loop with a break
2458   /// inside of it, this is fine.
2459   static bool containsBreak(const Stmt *S);
2460 
2461   /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2462   /// to a constant, or if it does but contains a label, return false.  If it
2463   /// constant folds return true and set the boolean result in Result.
2464   bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
2465 
2466   /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2467   /// to a constant, or if it does but contains a label, return false.  If it
2468   /// constant folds return true and set the folded value.
2469   bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &Result);
2470 
2471   /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
2472   /// if statement) to the specified blocks.  Based on the condition, this might
2473   /// try to simplify the codegen of the conditional based on the branch.
2474   void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
2475                             llvm::BasicBlock *FalseBlock);
2476 
2477   /// getTrapBB - Create a basic block that will call the trap intrinsic.  We'll
2478   /// generate a branch around the created basic block as necessary.
2479   llvm::BasicBlock *getTrapBB();
2480 
2481   /// EmitCallArg - Emit a single call argument.
2482   void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
2483 
2484   /// EmitDelegateCallArg - We are performing a delegate call; that
2485   /// is, the current function is delegating to another one.  Produce
2486   /// a r-value suitable for passing the given parameter.
2487   void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
2488 
2489   /// SetFPAccuracy - Set the minimum required accuracy of the given floating
2490   /// point operation, expressed as the maximum relative error in ulp.
2491   void SetFPAccuracy(llvm::Value *Val, unsigned AccuracyN,
2492                      unsigned AccuracyD = 1);
2493 
2494 private:
2495   void EmitReturnOfRValue(RValue RV, QualType Ty);
2496 
2497   /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
2498   /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
2499   ///
2500   /// \param AI - The first function argument of the expansion.
2501   /// \return The argument following the last expanded function
2502   /// argument.
2503   llvm::Function::arg_iterator
2504   ExpandTypeFromArgs(QualType Ty, LValue Dst,
2505                      llvm::Function::arg_iterator AI);
2506 
2507   /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
2508   /// Ty, into individual arguments on the provided vector \arg Args. See
2509   /// ABIArgInfo::Expand.
2510   void ExpandTypeToArgs(QualType Ty, RValue Src,
2511                         SmallVector<llvm::Value*, 16> &Args,
2512                         llvm::FunctionType *IRFuncTy);
2513 
2514   llvm::Value* EmitAsmInput(const AsmStmt &S,
2515                             const TargetInfo::ConstraintInfo &Info,
2516                             const Expr *InputExpr, std::string &ConstraintStr);
2517 
2518   llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
2519                                   const TargetInfo::ConstraintInfo &Info,
2520                                   LValue InputValue, QualType InputType,
2521                                   std::string &ConstraintStr);
2522 
2523   /// EmitCallArgs - Emit call arguments for a function.
2524   /// The CallArgTypeInfo parameter is used for iterating over the known
2525   /// argument types of the function being called.
2526   template<typename T>
2527   void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
2528                     CallExpr::const_arg_iterator ArgBeg,
2529                     CallExpr::const_arg_iterator ArgEnd) {
2530       CallExpr::const_arg_iterator Arg = ArgBeg;
2531 
2532     // First, use the argument types that the type info knows about
2533     if (CallArgTypeInfo) {
2534       for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
2535            E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
2536         assert(Arg != ArgEnd && "Running over edge of argument list!");
2537         QualType ArgType = *I;
2538 #ifndef NDEBUG
2539         QualType ActualArgType = Arg->getType();
2540         if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
2541           QualType ActualBaseType =
2542             ActualArgType->getAs<PointerType>()->getPointeeType();
2543           QualType ArgBaseType =
2544             ArgType->getAs<PointerType>()->getPointeeType();
2545           if (ArgBaseType->isVariableArrayType()) {
2546             if (const VariableArrayType *VAT =
2547                 getContext().getAsVariableArrayType(ActualBaseType)) {
2548               if (!VAT->getSizeExpr())
2549                 ActualArgType = ArgType;
2550             }
2551           }
2552         }
2553         assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
2554                getTypePtr() ==
2555                getContext().getCanonicalType(ActualArgType).getTypePtr() &&
2556                "type mismatch in call argument!");
2557 #endif
2558         EmitCallArg(Args, *Arg, ArgType);
2559       }
2560 
2561       // Either we've emitted all the call args, or we have a call to a
2562       // variadic function.
2563       assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
2564              "Extra arguments in non-variadic function!");
2565 
2566     }
2567 
2568     // If we still have any arguments, emit them using the type of the argument.
2569     for (; Arg != ArgEnd; ++Arg)
2570       EmitCallArg(Args, *Arg, Arg->getType());
2571   }
2572 
2573   const TargetCodeGenInfo &getTargetHooks() const {
2574     return CGM.getTargetCodeGenInfo();
2575   }
2576 
2577   void EmitDeclMetadata();
2578 
2579   CodeGenModule::ByrefHelpers *
2580   buildByrefHelpers(llvm::StructType &byrefType,
2581                     const AutoVarEmission &emission);
2582 
2583   void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
2584 };
2585 
2586 /// Helper class with most of the code for saving a value for a
2587 /// conditional expression cleanup.
2588 struct DominatingLLVMValue {
2589   typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
2590 
2591   /// Answer whether the given value needs extra work to be saved.
2592   static bool needsSaving(llvm::Value *value) {
2593     // If it's not an instruction, we don't need to save.
2594     if (!isa<llvm::Instruction>(value)) return false;
2595 
2596     // If it's an instruction in the entry block, we don't need to save.
2597     llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
2598     return (block != &block->getParent()->getEntryBlock());
2599   }
2600 
2601   /// Try to save the given value.
2602   static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
2603     if (!needsSaving(value)) return saved_type(value, false);
2604 
2605     // Otherwise we need an alloca.
2606     llvm::Value *alloca =
2607       CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
2608     CGF.Builder.CreateStore(value, alloca);
2609 
2610     return saved_type(alloca, true);
2611   }
2612 
2613   static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
2614     if (!value.getInt()) return value.getPointer();
2615     return CGF.Builder.CreateLoad(value.getPointer());
2616   }
2617 };
2618 
2619 /// A partial specialization of DominatingValue for llvm::Values that
2620 /// might be llvm::Instructions.
2621 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
2622   typedef T *type;
2623   static type restore(CodeGenFunction &CGF, saved_type value) {
2624     return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
2625   }
2626 };
2627 
2628 /// A specialization of DominatingValue for RValue.
2629 template <> struct DominatingValue<RValue> {
2630   typedef RValue type;
2631   class saved_type {
2632     enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
2633                 AggregateAddress, ComplexAddress };
2634 
2635     llvm::Value *Value;
2636     Kind K;
2637     saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
2638 
2639   public:
2640     static bool needsSaving(RValue value);
2641     static saved_type save(CodeGenFunction &CGF, RValue value);
2642     RValue restore(CodeGenFunction &CGF);
2643 
2644     // implementations in CGExprCXX.cpp
2645   };
2646 
2647   static bool needsSaving(type value) {
2648     return saved_type::needsSaving(value);
2649   }
2650   static saved_type save(CodeGenFunction &CGF, type value) {
2651     return saved_type::save(CGF, value);
2652   }
2653   static type restore(CodeGenFunction &CGF, saved_type value) {
2654     return value.restore(CGF);
2655   }
2656 };
2657 
2658 }  // end namespace CodeGen
2659 }  // end namespace clang
2660 
2661 #endif
2662