1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the internal per-function state used for llvm translation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
15 #define CLANG_CODEGEN_CODEGENFUNCTION_H
16 
17 #include "CGBuilder.h"
18 #include "CGDebugInfo.h"
19 #include "CGValue.h"
20 #include "CodeGenModule.h"
21 #include "clang/AST/CharUnits.h"
22 #include "clang/AST/ExprCXX.h"
23 #include "clang/AST/ExprObjC.h"
24 #include "clang/AST/Type.h"
25 #include "clang/Basic/ABI.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "clang/Frontend/CodeGenOptions.h"
28 #include "llvm/ADT/ArrayRef.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ValueHandle.h"
33 
34 namespace llvm {
35   class BasicBlock;
36   class LLVMContext;
37   class MDNode;
38   class Module;
39   class SwitchInst;
40   class Twine;
41   class Value;
42   class CallSite;
43 }
44 
45 namespace clang {
46   class ASTContext;
47   class BlockDecl;
48   class CXXDestructorDecl;
49   class CXXForRangeStmt;
50   class CXXTryStmt;
51   class Decl;
52   class LabelDecl;
53   class EnumConstantDecl;
54   class FunctionDecl;
55   class FunctionProtoType;
56   class LabelStmt;
57   class ObjCContainerDecl;
58   class ObjCInterfaceDecl;
59   class ObjCIvarDecl;
60   class ObjCMethodDecl;
61   class ObjCImplementationDecl;
62   class ObjCPropertyImplDecl;
63   class TargetInfo;
64   class TargetCodeGenInfo;
65   class VarDecl;
66   class ObjCForCollectionStmt;
67   class ObjCAtTryStmt;
68   class ObjCAtThrowStmt;
69   class ObjCAtSynchronizedStmt;
70   class ObjCAutoreleasePoolStmt;
71 
72 namespace CodeGen {
73   class CodeGenTypes;
74   class CGFunctionInfo;
75   class CGRecordLayout;
76   class CGBlockInfo;
77   class CGCXXABI;
78   class BlockFlags;
79   class BlockFieldFlags;
80 
81 /// A branch fixup.  These are required when emitting a goto to a
82 /// label which hasn't been emitted yet.  The goto is optimistically
83 /// emitted as a branch to the basic block for the label, and (if it
84 /// occurs in a scope with non-trivial cleanups) a fixup is added to
85 /// the innermost cleanup.  When a (normal) cleanup is popped, any
86 /// unresolved fixups in that scope are threaded through the cleanup.
87 struct BranchFixup {
88   /// The block containing the terminator which needs to be modified
89   /// into a switch if this fixup is resolved into the current scope.
90   /// If null, LatestBranch points directly to the destination.
91   llvm::BasicBlock *OptimisticBranchBlock;
92 
93   /// The ultimate destination of the branch.
94   ///
95   /// This can be set to null to indicate that this fixup was
96   /// successfully resolved.
97   llvm::BasicBlock *Destination;
98 
99   /// The destination index value.
100   unsigned DestinationIndex;
101 
102   /// The initial branch of the fixup.
103   llvm::BranchInst *InitialBranch;
104 };
105 
106 template <class T> struct InvariantValue {
107   typedef T type;
108   typedef T saved_type;
109   static bool needsSaving(type value) { return false; }
110   static saved_type save(CodeGenFunction &CGF, type value) { return value; }
111   static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
112 };
113 
114 /// A metaprogramming class for ensuring that a value will dominate an
115 /// arbitrary position in a function.
116 template <class T> struct DominatingValue : InvariantValue<T> {};
117 
118 template <class T, bool mightBeInstruction =
119             llvm::is_base_of<llvm::Value, T>::value &&
120             !llvm::is_base_of<llvm::Constant, T>::value &&
121             !llvm::is_base_of<llvm::BasicBlock, T>::value>
122 struct DominatingPointer;
123 template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
124 // template <class T> struct DominatingPointer<T,true> at end of file
125 
126 template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
127 
128 enum CleanupKind {
129   EHCleanup = 0x1,
130   NormalCleanup = 0x2,
131   NormalAndEHCleanup = EHCleanup | NormalCleanup,
132 
133   InactiveCleanup = 0x4,
134   InactiveEHCleanup = EHCleanup | InactiveCleanup,
135   InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
136   InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
137 };
138 
139 /// A stack of scopes which respond to exceptions, including cleanups
140 /// and catch blocks.
141 class EHScopeStack {
142 public:
143   /// A saved depth on the scope stack.  This is necessary because
144   /// pushing scopes onto the stack invalidates iterators.
145   class stable_iterator {
146     friend class EHScopeStack;
147 
148     /// Offset from StartOfData to EndOfBuffer.
149     ptrdiff_t Size;
150 
151     stable_iterator(ptrdiff_t Size) : Size(Size) {}
152 
153   public:
154     static stable_iterator invalid() { return stable_iterator(-1); }
155     stable_iterator() : Size(-1) {}
156 
157     bool isValid() const { return Size >= 0; }
158 
159     /// Returns true if this scope encloses I.
160     /// Returns false if I is invalid.
161     /// This scope must be valid.
162     bool encloses(stable_iterator I) const { return Size <= I.Size; }
163 
164     /// Returns true if this scope strictly encloses I: that is,
165     /// if it encloses I and is not I.
166     /// Returns false is I is invalid.
167     /// This scope must be valid.
168     bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
169 
170     friend bool operator==(stable_iterator A, stable_iterator B) {
171       return A.Size == B.Size;
172     }
173     friend bool operator!=(stable_iterator A, stable_iterator B) {
174       return A.Size != B.Size;
175     }
176   };
177 
178   /// Information for lazily generating a cleanup.  Subclasses must be
179   /// POD-like: cleanups will not be destructed, and they will be
180   /// allocated on the cleanup stack and freely copied and moved
181   /// around.
182   ///
183   /// Cleanup implementations should generally be declared in an
184   /// anonymous namespace.
185   class Cleanup {
186     // Anchor the construction vtable.
187     virtual void anchor();
188   public:
189     /// Generation flags.
190     class Flags {
191       enum {
192         F_IsForEH             = 0x1,
193         F_IsNormalCleanupKind = 0x2,
194         F_IsEHCleanupKind     = 0x4
195       };
196       unsigned flags;
197 
198     public:
199       Flags() : flags(0) {}
200 
201       /// isForEH - true if the current emission is for an EH cleanup.
202       bool isForEHCleanup() const { return flags & F_IsForEH; }
203       bool isForNormalCleanup() const { return !isForEHCleanup(); }
204       void setIsForEHCleanup() { flags |= F_IsForEH; }
205 
206       bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
207       void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
208 
209       /// isEHCleanupKind - true if the cleanup was pushed as an EH
210       /// cleanup.
211       bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
212       void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
213     };
214 
215     // Provide a virtual destructor to suppress a very common warning
216     // that unfortunately cannot be suppressed without this.  Cleanups
217     // should not rely on this destructor ever being called.
218     virtual ~Cleanup() {}
219 
220     /// Emit the cleanup.  For normal cleanups, this is run in the
221     /// same EH context as when the cleanup was pushed, i.e. the
222     /// immediately-enclosing context of the cleanup scope.  For
223     /// EH cleanups, this is run in a terminate context.
224     ///
225     // \param flags cleanup kind.
226     virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
227   };
228 
229   /// ConditionalCleanupN stores the saved form of its N parameters,
230   /// then restores them and performs the cleanup.
231   template <class T, class A0>
232   class ConditionalCleanup1 : public Cleanup {
233     typedef typename DominatingValue<A0>::saved_type A0_saved;
234     A0_saved a0_saved;
235 
236     void Emit(CodeGenFunction &CGF, Flags flags) {
237       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
238       T(a0).Emit(CGF, flags);
239     }
240 
241   public:
242     ConditionalCleanup1(A0_saved a0)
243       : a0_saved(a0) {}
244   };
245 
246   template <class T, class A0, class A1>
247   class ConditionalCleanup2 : public Cleanup {
248     typedef typename DominatingValue<A0>::saved_type A0_saved;
249     typedef typename DominatingValue<A1>::saved_type A1_saved;
250     A0_saved a0_saved;
251     A1_saved a1_saved;
252 
253     void Emit(CodeGenFunction &CGF, Flags flags) {
254       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
255       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
256       T(a0, a1).Emit(CGF, flags);
257     }
258 
259   public:
260     ConditionalCleanup2(A0_saved a0, A1_saved a1)
261       : a0_saved(a0), a1_saved(a1) {}
262   };
263 
264   template <class T, class A0, class A1, class A2>
265   class ConditionalCleanup3 : public Cleanup {
266     typedef typename DominatingValue<A0>::saved_type A0_saved;
267     typedef typename DominatingValue<A1>::saved_type A1_saved;
268     typedef typename DominatingValue<A2>::saved_type A2_saved;
269     A0_saved a0_saved;
270     A1_saved a1_saved;
271     A2_saved a2_saved;
272 
273     void Emit(CodeGenFunction &CGF, Flags flags) {
274       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
275       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
276       A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
277       T(a0, a1, a2).Emit(CGF, flags);
278     }
279 
280   public:
281     ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
282       : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
283   };
284 
285   template <class T, class A0, class A1, class A2, class A3>
286   class ConditionalCleanup4 : public Cleanup {
287     typedef typename DominatingValue<A0>::saved_type A0_saved;
288     typedef typename DominatingValue<A1>::saved_type A1_saved;
289     typedef typename DominatingValue<A2>::saved_type A2_saved;
290     typedef typename DominatingValue<A3>::saved_type A3_saved;
291     A0_saved a0_saved;
292     A1_saved a1_saved;
293     A2_saved a2_saved;
294     A3_saved a3_saved;
295 
296     void Emit(CodeGenFunction &CGF, Flags flags) {
297       A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
298       A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
299       A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
300       A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
301       T(a0, a1, a2, a3).Emit(CGF, flags);
302     }
303 
304   public:
305     ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
306       : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
307   };
308 
309 private:
310   // The implementation for this class is in CGException.h and
311   // CGException.cpp; the definition is here because it's used as a
312   // member of CodeGenFunction.
313 
314   /// The start of the scope-stack buffer, i.e. the allocated pointer
315   /// for the buffer.  All of these pointers are either simultaneously
316   /// null or simultaneously valid.
317   char *StartOfBuffer;
318 
319   /// The end of the buffer.
320   char *EndOfBuffer;
321 
322   /// The first valid entry in the buffer.
323   char *StartOfData;
324 
325   /// The innermost normal cleanup on the stack.
326   stable_iterator InnermostNormalCleanup;
327 
328   /// The innermost EH scope on the stack.
329   stable_iterator InnermostEHScope;
330 
331   /// The current set of branch fixups.  A branch fixup is a jump to
332   /// an as-yet unemitted label, i.e. a label for which we don't yet
333   /// know the EH stack depth.  Whenever we pop a cleanup, we have
334   /// to thread all the current branch fixups through it.
335   ///
336   /// Fixups are recorded as the Use of the respective branch or
337   /// switch statement.  The use points to the final destination.
338   /// When popping out of a cleanup, these uses are threaded through
339   /// the cleanup and adjusted to point to the new cleanup.
340   ///
341   /// Note that branches are allowed to jump into protected scopes
342   /// in certain situations;  e.g. the following code is legal:
343   ///     struct A { ~A(); }; // trivial ctor, non-trivial dtor
344   ///     goto foo;
345   ///     A a;
346   ///    foo:
347   ///     bar();
348   SmallVector<BranchFixup, 8> BranchFixups;
349 
350   char *allocate(size_t Size);
351 
352   void *pushCleanup(CleanupKind K, size_t DataSize);
353 
354 public:
355   EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
356                    InnermostNormalCleanup(stable_end()),
357                    InnermostEHScope(stable_end()) {}
358   ~EHScopeStack() { delete[] StartOfBuffer; }
359 
360   // Variadic templates would make this not terrible.
361 
362   /// Push a lazily-created cleanup on the stack.
363   template <class T>
364   void pushCleanup(CleanupKind Kind) {
365     void *Buffer = pushCleanup(Kind, sizeof(T));
366     Cleanup *Obj = new(Buffer) T();
367     (void) Obj;
368   }
369 
370   /// Push a lazily-created cleanup on the stack.
371   template <class T, class A0>
372   void pushCleanup(CleanupKind Kind, A0 a0) {
373     void *Buffer = pushCleanup(Kind, sizeof(T));
374     Cleanup *Obj = new(Buffer) T(a0);
375     (void) Obj;
376   }
377 
378   /// Push a lazily-created cleanup on the stack.
379   template <class T, class A0, class A1>
380   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
381     void *Buffer = pushCleanup(Kind, sizeof(T));
382     Cleanup *Obj = new(Buffer) T(a0, a1);
383     (void) Obj;
384   }
385 
386   /// Push a lazily-created cleanup on the stack.
387   template <class T, class A0, class A1, class A2>
388   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
389     void *Buffer = pushCleanup(Kind, sizeof(T));
390     Cleanup *Obj = new(Buffer) T(a0, a1, a2);
391     (void) Obj;
392   }
393 
394   /// Push a lazily-created cleanup on the stack.
395   template <class T, class A0, class A1, class A2, class A3>
396   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
397     void *Buffer = pushCleanup(Kind, sizeof(T));
398     Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
399     (void) Obj;
400   }
401 
402   /// Push a lazily-created cleanup on the stack.
403   template <class T, class A0, class A1, class A2, class A3, class A4>
404   void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
405     void *Buffer = pushCleanup(Kind, sizeof(T));
406     Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
407     (void) Obj;
408   }
409 
410   // Feel free to add more variants of the following:
411 
412   /// Push a cleanup with non-constant storage requirements on the
413   /// stack.  The cleanup type must provide an additional static method:
414   ///   static size_t getExtraSize(size_t);
415   /// The argument to this method will be the value N, which will also
416   /// be passed as the first argument to the constructor.
417   ///
418   /// The data stored in the extra storage must obey the same
419   /// restrictions as normal cleanup member data.
420   ///
421   /// The pointer returned from this method is valid until the cleanup
422   /// stack is modified.
423   template <class T, class A0, class A1, class A2>
424   T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
425     void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
426     return new (Buffer) T(N, a0, a1, a2);
427   }
428 
429   /// Pops a cleanup scope off the stack.  This is private to CGCleanup.cpp.
430   void popCleanup();
431 
432   /// Push a set of catch handlers on the stack.  The catch is
433   /// uninitialized and will need to have the given number of handlers
434   /// set on it.
435   class EHCatchScope *pushCatch(unsigned NumHandlers);
436 
437   /// Pops a catch scope off the stack.  This is private to CGException.cpp.
438   void popCatch();
439 
440   /// Push an exceptions filter on the stack.
441   class EHFilterScope *pushFilter(unsigned NumFilters);
442 
443   /// Pops an exceptions filter off the stack.
444   void popFilter();
445 
446   /// Push a terminate handler on the stack.
447   void pushTerminate();
448 
449   /// Pops a terminate handler off the stack.
450   void popTerminate();
451 
452   /// Determines whether the exception-scopes stack is empty.
453   bool empty() const { return StartOfData == EndOfBuffer; }
454 
455   bool requiresLandingPad() const {
456     return InnermostEHScope != stable_end();
457   }
458 
459   /// Determines whether there are any normal cleanups on the stack.
460   bool hasNormalCleanups() const {
461     return InnermostNormalCleanup != stable_end();
462   }
463 
464   /// Returns the innermost normal cleanup on the stack, or
465   /// stable_end() if there are no normal cleanups.
466   stable_iterator getInnermostNormalCleanup() const {
467     return InnermostNormalCleanup;
468   }
469   stable_iterator getInnermostActiveNormalCleanup() const;
470 
471   stable_iterator getInnermostEHScope() const {
472     return InnermostEHScope;
473   }
474 
475   stable_iterator getInnermostActiveEHScope() const;
476 
477   /// An unstable reference to a scope-stack depth.  Invalidated by
478   /// pushes but not pops.
479   class iterator;
480 
481   /// Returns an iterator pointing to the innermost EH scope.
482   iterator begin() const;
483 
484   /// Returns an iterator pointing to the outermost EH scope.
485   iterator end() const;
486 
487   /// Create a stable reference to the top of the EH stack.  The
488   /// returned reference is valid until that scope is popped off the
489   /// stack.
490   stable_iterator stable_begin() const {
491     return stable_iterator(EndOfBuffer - StartOfData);
492   }
493 
494   /// Create a stable reference to the bottom of the EH stack.
495   static stable_iterator stable_end() {
496     return stable_iterator(0);
497   }
498 
499   /// Translates an iterator into a stable_iterator.
500   stable_iterator stabilize(iterator it) const;
501 
502   /// Turn a stable reference to a scope depth into a unstable pointer
503   /// to the EH stack.
504   iterator find(stable_iterator save) const;
505 
506   /// Removes the cleanup pointed to by the given stable_iterator.
507   void removeCleanup(stable_iterator save);
508 
509   /// Add a branch fixup to the current cleanup scope.
510   BranchFixup &addBranchFixup() {
511     assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
512     BranchFixups.push_back(BranchFixup());
513     return BranchFixups.back();
514   }
515 
516   unsigned getNumBranchFixups() const { return BranchFixups.size(); }
517   BranchFixup &getBranchFixup(unsigned I) {
518     assert(I < getNumBranchFixups());
519     return BranchFixups[I];
520   }
521 
522   /// Pops lazily-removed fixups from the end of the list.  This
523   /// should only be called by procedures which have just popped a
524   /// cleanup or resolved one or more fixups.
525   void popNullFixups();
526 
527   /// Clears the branch-fixups list.  This should only be called by
528   /// ResolveAllBranchFixups.
529   void clearFixups() { BranchFixups.clear(); }
530 };
531 
532 /// CodeGenFunction - This class organizes the per-function state that is used
533 /// while generating LLVM code.
534 class CodeGenFunction : public CodeGenTypeCache {
535   CodeGenFunction(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
536   void operator=(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
537 
538   friend class CGCXXABI;
539 public:
540   /// A jump destination is an abstract label, branching to which may
541   /// require a jump out through normal cleanups.
542   struct JumpDest {
543     JumpDest() : Block(0), ScopeDepth(), Index(0) {}
544     JumpDest(llvm::BasicBlock *Block,
545              EHScopeStack::stable_iterator Depth,
546              unsigned Index)
547       : Block(Block), ScopeDepth(Depth), Index(Index) {}
548 
549     bool isValid() const { return Block != 0; }
550     llvm::BasicBlock *getBlock() const { return Block; }
551     EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
552     unsigned getDestIndex() const { return Index; }
553 
554   private:
555     llvm::BasicBlock *Block;
556     EHScopeStack::stable_iterator ScopeDepth;
557     unsigned Index;
558   };
559 
560   CodeGenModule &CGM;  // Per-module state.
561   const TargetInfo &Target;
562 
563   typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
564   CGBuilderTy Builder;
565 
566   /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
567   /// This excludes BlockDecls.
568   const Decl *CurFuncDecl;
569   /// CurCodeDecl - This is the inner-most code context, which includes blocks.
570   const Decl *CurCodeDecl;
571   const CGFunctionInfo *CurFnInfo;
572   QualType FnRetTy;
573   llvm::Function *CurFn;
574 
575   /// CurGD - The GlobalDecl for the current function being compiled.
576   GlobalDecl CurGD;
577 
578   /// PrologueCleanupDepth - The cleanup depth enclosing all the
579   /// cleanups associated with the parameters.
580   EHScopeStack::stable_iterator PrologueCleanupDepth;
581 
582   /// ReturnBlock - Unified return block.
583   JumpDest ReturnBlock;
584 
585   /// ReturnValue - The temporary alloca to hold the return value. This is null
586   /// iff the function has no return value.
587   llvm::Value *ReturnValue;
588 
589   /// AllocaInsertPoint - This is an instruction in the entry block before which
590   /// we prefer to insert allocas.
591   llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
592 
593   /// BoundsChecking - Emit run-time bounds checks. Higher values mean
594   /// potentially higher performance penalties.
595   unsigned char BoundsChecking;
596 
597   /// \brief Whether any type-checking sanitizers are enabled. If \c false,
598   /// calls to EmitTypeCheck can be skipped.
599   bool SanitizePerformTypeCheck;
600 
601   /// \brief Sanitizer options to use for this function.
602   const SanitizerOptions *SanOpts;
603 
604   /// In ARC, whether we should autorelease the return value.
605   bool AutoreleaseResult;
606 
607   const CodeGen::CGBlockInfo *BlockInfo;
608   llvm::Value *BlockPointer;
609 
610   llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
611   FieldDecl *LambdaThisCaptureField;
612 
613   /// \brief A mapping from NRVO variables to the flags used to indicate
614   /// when the NRVO has been applied to this variable.
615   llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
616 
617   EHScopeStack EHStack;
618 
619   /// i32s containing the indexes of the cleanup destinations.
620   llvm::AllocaInst *NormalCleanupDest;
621 
622   unsigned NextCleanupDestIndex;
623 
624   /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
625   CGBlockInfo *FirstBlockInfo;
626 
627   /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
628   llvm::BasicBlock *EHResumeBlock;
629 
630   /// The exception slot.  All landing pads write the current exception pointer
631   /// into this alloca.
632   llvm::Value *ExceptionSlot;
633 
634   /// The selector slot.  Under the MandatoryCleanup model, all landing pads
635   /// write the current selector value into this alloca.
636   llvm::AllocaInst *EHSelectorSlot;
637 
638   /// Emits a landing pad for the current EH stack.
639   llvm::BasicBlock *EmitLandingPad();
640 
641   llvm::BasicBlock *getInvokeDestImpl();
642 
643   template <class T>
644   typename DominatingValue<T>::saved_type saveValueInCond(T value) {
645     return DominatingValue<T>::save(*this, value);
646   }
647 
648 public:
649   /// ObjCEHValueStack - Stack of Objective-C exception values, used for
650   /// rethrows.
651   SmallVector<llvm::Value*, 8> ObjCEHValueStack;
652 
653   /// A class controlling the emission of a finally block.
654   class FinallyInfo {
655     /// Where the catchall's edge through the cleanup should go.
656     JumpDest RethrowDest;
657 
658     /// A function to call to enter the catch.
659     llvm::Constant *BeginCatchFn;
660 
661     /// An i1 variable indicating whether or not the @finally is
662     /// running for an exception.
663     llvm::AllocaInst *ForEHVar;
664 
665     /// An i8* variable into which the exception pointer to rethrow
666     /// has been saved.
667     llvm::AllocaInst *SavedExnVar;
668 
669   public:
670     void enter(CodeGenFunction &CGF, const Stmt *Finally,
671                llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
672                llvm::Constant *rethrowFn);
673     void exit(CodeGenFunction &CGF);
674   };
675 
676   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
677   /// current full-expression.  Safe against the possibility that
678   /// we're currently inside a conditionally-evaluated expression.
679   template <class T, class A0>
680   void pushFullExprCleanup(CleanupKind kind, A0 a0) {
681     // If we're not in a conditional branch, or if none of the
682     // arguments requires saving, then use the unconditional cleanup.
683     if (!isInConditionalBranch())
684       return EHStack.pushCleanup<T>(kind, a0);
685 
686     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
687 
688     typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
689     EHStack.pushCleanup<CleanupType>(kind, a0_saved);
690     initFullExprCleanup();
691   }
692 
693   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
694   /// current full-expression.  Safe against the possibility that
695   /// we're currently inside a conditionally-evaluated expression.
696   template <class T, class A0, class A1>
697   void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
698     // If we're not in a conditional branch, or if none of the
699     // arguments requires saving, then use the unconditional cleanup.
700     if (!isInConditionalBranch())
701       return EHStack.pushCleanup<T>(kind, a0, a1);
702 
703     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
704     typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
705 
706     typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
707     EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
708     initFullExprCleanup();
709   }
710 
711   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
712   /// current full-expression.  Safe against the possibility that
713   /// we're currently inside a conditionally-evaluated expression.
714   template <class T, class A0, class A1, class A2>
715   void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
716     // If we're not in a conditional branch, or if none of the
717     // arguments requires saving, then use the unconditional cleanup.
718     if (!isInConditionalBranch()) {
719       return EHStack.pushCleanup<T>(kind, a0, a1, a2);
720     }
721 
722     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
723     typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
724     typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
725 
726     typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
727     EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
728     initFullExprCleanup();
729   }
730 
731   /// pushFullExprCleanup - Push a cleanup to be run at the end of the
732   /// current full-expression.  Safe against the possibility that
733   /// we're currently inside a conditionally-evaluated expression.
734   template <class T, class A0, class A1, class A2, class A3>
735   void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
736     // If we're not in a conditional branch, or if none of the
737     // arguments requires saving, then use the unconditional cleanup.
738     if (!isInConditionalBranch()) {
739       return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
740     }
741 
742     typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
743     typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
744     typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
745     typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
746 
747     typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
748     EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
749                                      a2_saved, a3_saved);
750     initFullExprCleanup();
751   }
752 
753   /// Set up the last cleaup that was pushed as a conditional
754   /// full-expression cleanup.
755   void initFullExprCleanup();
756 
757   /// PushDestructorCleanup - Push a cleanup to call the
758   /// complete-object destructor of an object of the given type at the
759   /// given address.  Does nothing if T is not a C++ class type with a
760   /// non-trivial destructor.
761   void PushDestructorCleanup(QualType T, llvm::Value *Addr);
762 
763   /// PushDestructorCleanup - Push a cleanup to call the
764   /// complete-object variant of the given destructor on the object at
765   /// the given address.
766   void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
767                              llvm::Value *Addr);
768 
769   /// PopCleanupBlock - Will pop the cleanup entry on the stack and
770   /// process all branch fixups.
771   void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
772 
773   /// DeactivateCleanupBlock - Deactivates the given cleanup block.
774   /// The block cannot be reactivated.  Pops it if it's the top of the
775   /// stack.
776   ///
777   /// \param DominatingIP - An instruction which is known to
778   ///   dominate the current IP (if set) and which lies along
779   ///   all paths of execution between the current IP and the
780   ///   the point at which the cleanup comes into scope.
781   void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
782                               llvm::Instruction *DominatingIP);
783 
784   /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
785   /// Cannot be used to resurrect a deactivated cleanup.
786   ///
787   /// \param DominatingIP - An instruction which is known to
788   ///   dominate the current IP (if set) and which lies along
789   ///   all paths of execution between the current IP and the
790   ///   the point at which the cleanup comes into scope.
791   void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
792                             llvm::Instruction *DominatingIP);
793 
794   /// \brief Enters a new scope for capturing cleanups, all of which
795   /// will be executed once the scope is exited.
796   class RunCleanupsScope {
797     EHScopeStack::stable_iterator CleanupStackDepth;
798     bool OldDidCallStackSave;
799     bool PerformCleanup;
800 
801     RunCleanupsScope(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
802     void operator=(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
803 
804   protected:
805     CodeGenFunction& CGF;
806 
807   public:
808     /// \brief Enter a new cleanup scope.
809     explicit RunCleanupsScope(CodeGenFunction &CGF)
810       : PerformCleanup(true), CGF(CGF)
811     {
812       CleanupStackDepth = CGF.EHStack.stable_begin();
813       OldDidCallStackSave = CGF.DidCallStackSave;
814       CGF.DidCallStackSave = false;
815     }
816 
817     /// \brief Exit this cleanup scope, emitting any accumulated
818     /// cleanups.
819     ~RunCleanupsScope() {
820       if (PerformCleanup) {
821         CGF.DidCallStackSave = OldDidCallStackSave;
822         CGF.PopCleanupBlocks(CleanupStackDepth);
823       }
824     }
825 
826     /// \brief Determine whether this scope requires any cleanups.
827     bool requiresCleanups() const {
828       return CGF.EHStack.stable_begin() != CleanupStackDepth;
829     }
830 
831     /// \brief Force the emission of cleanups now, instead of waiting
832     /// until this object is destroyed.
833     void ForceCleanup() {
834       assert(PerformCleanup && "Already forced cleanup");
835       CGF.DidCallStackSave = OldDidCallStackSave;
836       CGF.PopCleanupBlocks(CleanupStackDepth);
837       PerformCleanup = false;
838     }
839   };
840 
841   class LexicalScope: protected RunCleanupsScope {
842     SourceRange Range;
843     bool PopDebugStack;
844 
845     LexicalScope(const LexicalScope &) LLVM_DELETED_FUNCTION;
846     void operator=(const LexicalScope &) LLVM_DELETED_FUNCTION;
847 
848   public:
849     /// \brief Enter a new cleanup scope.
850     explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
851       : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
852       if (CGDebugInfo *DI = CGF.getDebugInfo())
853         DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
854     }
855 
856     /// \brief Exit this cleanup scope, emitting any accumulated
857     /// cleanups.
858     ~LexicalScope() {
859       if (PopDebugStack) {
860         CGDebugInfo *DI = CGF.getDebugInfo();
861         if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
862       }
863     }
864 
865     /// \brief Force the emission of cleanups now, instead of waiting
866     /// until this object is destroyed.
867     void ForceCleanup() {
868       RunCleanupsScope::ForceCleanup();
869       if (CGDebugInfo *DI = CGF.getDebugInfo()) {
870         DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
871         PopDebugStack = false;
872       }
873     }
874   };
875 
876 
877   /// PopCleanupBlocks - Takes the old cleanup stack size and emits
878   /// the cleanup blocks that have been added.
879   void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
880 
881   void ResolveBranchFixups(llvm::BasicBlock *Target);
882 
883   /// The given basic block lies in the current EH scope, but may be a
884   /// target of a potentially scope-crossing jump; get a stable handle
885   /// to which we can perform this jump later.
886   JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
887     return JumpDest(Target,
888                     EHStack.getInnermostNormalCleanup(),
889                     NextCleanupDestIndex++);
890   }
891 
892   /// The given basic block lies in the current EH scope, but may be a
893   /// target of a potentially scope-crossing jump; get a stable handle
894   /// to which we can perform this jump later.
895   JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
896     return getJumpDestInCurrentScope(createBasicBlock(Name));
897   }
898 
899   /// EmitBranchThroughCleanup - Emit a branch from the current insert
900   /// block through the normal cleanup handling code (if any) and then
901   /// on to \arg Dest.
902   void EmitBranchThroughCleanup(JumpDest Dest);
903 
904   /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
905   /// specified destination obviously has no cleanups to run.  'false' is always
906   /// a conservatively correct answer for this method.
907   bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
908 
909   /// popCatchScope - Pops the catch scope at the top of the EHScope
910   /// stack, emitting any required code (other than the catch handlers
911   /// themselves).
912   void popCatchScope();
913 
914   llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
915   llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
916 
917   /// An object to manage conditionally-evaluated expressions.
918   class ConditionalEvaluation {
919     llvm::BasicBlock *StartBB;
920 
921   public:
922     ConditionalEvaluation(CodeGenFunction &CGF)
923       : StartBB(CGF.Builder.GetInsertBlock()) {}
924 
925     void begin(CodeGenFunction &CGF) {
926       assert(CGF.OutermostConditional != this);
927       if (!CGF.OutermostConditional)
928         CGF.OutermostConditional = this;
929     }
930 
931     void end(CodeGenFunction &CGF) {
932       assert(CGF.OutermostConditional != 0);
933       if (CGF.OutermostConditional == this)
934         CGF.OutermostConditional = 0;
935     }
936 
937     /// Returns a block which will be executed prior to each
938     /// evaluation of the conditional code.
939     llvm::BasicBlock *getStartingBlock() const {
940       return StartBB;
941     }
942   };
943 
944   /// isInConditionalBranch - Return true if we're currently emitting
945   /// one branch or the other of a conditional expression.
946   bool isInConditionalBranch() const { return OutermostConditional != 0; }
947 
948   void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
949     assert(isInConditionalBranch());
950     llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
951     new llvm::StoreInst(value, addr, &block->back());
952   }
953 
954   /// An RAII object to record that we're evaluating a statement
955   /// expression.
956   class StmtExprEvaluation {
957     CodeGenFunction &CGF;
958 
959     /// We have to save the outermost conditional: cleanups in a
960     /// statement expression aren't conditional just because the
961     /// StmtExpr is.
962     ConditionalEvaluation *SavedOutermostConditional;
963 
964   public:
965     StmtExprEvaluation(CodeGenFunction &CGF)
966       : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
967       CGF.OutermostConditional = 0;
968     }
969 
970     ~StmtExprEvaluation() {
971       CGF.OutermostConditional = SavedOutermostConditional;
972       CGF.EnsureInsertPoint();
973     }
974   };
975 
976   /// An object which temporarily prevents a value from being
977   /// destroyed by aggressive peephole optimizations that assume that
978   /// all uses of a value have been realized in the IR.
979   class PeepholeProtection {
980     llvm::Instruction *Inst;
981     friend class CodeGenFunction;
982 
983   public:
984     PeepholeProtection() : Inst(0) {}
985   };
986 
987   /// A non-RAII class containing all the information about a bound
988   /// opaque value.  OpaqueValueMapping, below, is a RAII wrapper for
989   /// this which makes individual mappings very simple; using this
990   /// class directly is useful when you have a variable number of
991   /// opaque values or don't want the RAII functionality for some
992   /// reason.
993   class OpaqueValueMappingData {
994     const OpaqueValueExpr *OpaqueValue;
995     bool BoundLValue;
996     CodeGenFunction::PeepholeProtection Protection;
997 
998     OpaqueValueMappingData(const OpaqueValueExpr *ov,
999                            bool boundLValue)
1000       : OpaqueValue(ov), BoundLValue(boundLValue) {}
1001   public:
1002     OpaqueValueMappingData() : OpaqueValue(0) {}
1003 
1004     static bool shouldBindAsLValue(const Expr *expr) {
1005       // gl-values should be bound as l-values for obvious reasons.
1006       // Records should be bound as l-values because IR generation
1007       // always keeps them in memory.  Expressions of function type
1008       // act exactly like l-values but are formally required to be
1009       // r-values in C.
1010       return expr->isGLValue() ||
1011              expr->getType()->isRecordType() ||
1012              expr->getType()->isFunctionType();
1013     }
1014 
1015     static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1016                                        const OpaqueValueExpr *ov,
1017                                        const Expr *e) {
1018       if (shouldBindAsLValue(ov))
1019         return bind(CGF, ov, CGF.EmitLValue(e));
1020       return bind(CGF, ov, CGF.EmitAnyExpr(e));
1021     }
1022 
1023     static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1024                                        const OpaqueValueExpr *ov,
1025                                        const LValue &lv) {
1026       assert(shouldBindAsLValue(ov));
1027       CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
1028       return OpaqueValueMappingData(ov, true);
1029     }
1030 
1031     static OpaqueValueMappingData bind(CodeGenFunction &CGF,
1032                                        const OpaqueValueExpr *ov,
1033                                        const RValue &rv) {
1034       assert(!shouldBindAsLValue(ov));
1035       CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
1036 
1037       OpaqueValueMappingData data(ov, false);
1038 
1039       // Work around an extremely aggressive peephole optimization in
1040       // EmitScalarConversion which assumes that all other uses of a
1041       // value are extant.
1042       data.Protection = CGF.protectFromPeepholes(rv);
1043 
1044       return data;
1045     }
1046 
1047     bool isValid() const { return OpaqueValue != 0; }
1048     void clear() { OpaqueValue = 0; }
1049 
1050     void unbind(CodeGenFunction &CGF) {
1051       assert(OpaqueValue && "no data to unbind!");
1052 
1053       if (BoundLValue) {
1054         CGF.OpaqueLValues.erase(OpaqueValue);
1055       } else {
1056         CGF.OpaqueRValues.erase(OpaqueValue);
1057         CGF.unprotectFromPeepholes(Protection);
1058       }
1059     }
1060   };
1061 
1062   /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
1063   class OpaqueValueMapping {
1064     CodeGenFunction &CGF;
1065     OpaqueValueMappingData Data;
1066 
1067   public:
1068     static bool shouldBindAsLValue(const Expr *expr) {
1069       return OpaqueValueMappingData::shouldBindAsLValue(expr);
1070     }
1071 
1072     /// Build the opaque value mapping for the given conditional
1073     /// operator if it's the GNU ?: extension.  This is a common
1074     /// enough pattern that the convenience operator is really
1075     /// helpful.
1076     ///
1077     OpaqueValueMapping(CodeGenFunction &CGF,
1078                        const AbstractConditionalOperator *op) : CGF(CGF) {
1079       if (isa<ConditionalOperator>(op))
1080         // Leave Data empty.
1081         return;
1082 
1083       const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
1084       Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
1085                                           e->getCommon());
1086     }
1087 
1088     OpaqueValueMapping(CodeGenFunction &CGF,
1089                        const OpaqueValueExpr *opaqueValue,
1090                        LValue lvalue)
1091       : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
1092     }
1093 
1094     OpaqueValueMapping(CodeGenFunction &CGF,
1095                        const OpaqueValueExpr *opaqueValue,
1096                        RValue rvalue)
1097       : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
1098     }
1099 
1100     void pop() {
1101       Data.unbind(CGF);
1102       Data.clear();
1103     }
1104 
1105     ~OpaqueValueMapping() {
1106       if (Data.isValid()) Data.unbind(CGF);
1107     }
1108   };
1109 
1110   /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
1111   /// number that holds the value.
1112   unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
1113 
1114   /// BuildBlockByrefAddress - Computes address location of the
1115   /// variable which is declared as __block.
1116   llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
1117                                       const VarDecl *V);
1118 private:
1119   CGDebugInfo *DebugInfo;
1120   bool DisableDebugInfo;
1121 
1122   /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
1123   /// calling llvm.stacksave for multiple VLAs in the same scope.
1124   bool DidCallStackSave;
1125 
1126   /// IndirectBranch - The first time an indirect goto is seen we create a block
1127   /// with an indirect branch.  Every time we see the address of a label taken,
1128   /// we add the label to the indirect goto.  Every subsequent indirect goto is
1129   /// codegen'd as a jump to the IndirectBranch's basic block.
1130   llvm::IndirectBrInst *IndirectBranch;
1131 
1132   /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
1133   /// decls.
1134   typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
1135   DeclMapTy LocalDeclMap;
1136 
1137   /// LabelMap - This keeps track of the LLVM basic block for each C label.
1138   llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
1139 
1140   // BreakContinueStack - This keeps track of where break and continue
1141   // statements should jump to.
1142   struct BreakContinue {
1143     BreakContinue(JumpDest Break, JumpDest Continue)
1144       : BreakBlock(Break), ContinueBlock(Continue) {}
1145 
1146     JumpDest BreakBlock;
1147     JumpDest ContinueBlock;
1148   };
1149   SmallVector<BreakContinue, 8> BreakContinueStack;
1150 
1151   /// SwitchInsn - This is nearest current switch instruction. It is null if
1152   /// current context is not in a switch.
1153   llvm::SwitchInst *SwitchInsn;
1154 
1155   /// CaseRangeBlock - This block holds if condition check for last case
1156   /// statement range in current switch instruction.
1157   llvm::BasicBlock *CaseRangeBlock;
1158 
1159   /// OpaqueLValues - Keeps track of the current set of opaque value
1160   /// expressions.
1161   llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
1162   llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
1163 
1164   // VLASizeMap - This keeps track of the associated size for each VLA type.
1165   // We track this by the size expression rather than the type itself because
1166   // in certain situations, like a const qualifier applied to an VLA typedef,
1167   // multiple VLA types can share the same size expression.
1168   // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
1169   // enter/leave scopes.
1170   llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
1171 
1172   /// A block containing a single 'unreachable' instruction.  Created
1173   /// lazily by getUnreachableBlock().
1174   llvm::BasicBlock *UnreachableBlock;
1175 
1176   /// CXXThisDecl - When generating code for a C++ member function,
1177   /// this will hold the implicit 'this' declaration.
1178   ImplicitParamDecl *CXXABIThisDecl;
1179   llvm::Value *CXXABIThisValue;
1180   llvm::Value *CXXThisValue;
1181 
1182   /// CXXVTTDecl - When generating code for a base object constructor or
1183   /// base object destructor with virtual bases, this will hold the implicit
1184   /// VTT parameter.
1185   ImplicitParamDecl *CXXVTTDecl;
1186   llvm::Value *CXXVTTValue;
1187 
1188   /// OutermostConditional - Points to the outermost active
1189   /// conditional control.  This is used so that we know if a
1190   /// temporary should be destroyed conditionally.
1191   ConditionalEvaluation *OutermostConditional;
1192 
1193 
1194   /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
1195   /// type as well as the field number that contains the actual data.
1196   llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
1197                                               unsigned> > ByRefValueInfo;
1198 
1199   llvm::BasicBlock *TerminateLandingPad;
1200   llvm::BasicBlock *TerminateHandler;
1201   llvm::BasicBlock *TrapBB;
1202 
1203   /// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
1204   /// In the kernel metadata node, reference the kernel function and metadata
1205   /// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
1206   /// - A node for the work_group_size_hint(X,Y,Z) qualifier contains string
1207   ///   "work_group_size_hint", and three 32-bit integers X, Y and Z.
1208   /// - A node for the reqd_work_group_size(X,Y,Z) qualifier contains string
1209   ///   "reqd_work_group_size", and three 32-bit integers X, Y and Z.
1210   void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
1211                                 llvm::Function *Fn);
1212 
1213 public:
1214   CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
1215   ~CodeGenFunction();
1216 
1217   CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1218   ASTContext &getContext() const { return CGM.getContext(); }
1219   /// Returns true if DebugInfo is actually initialized.
1220   bool maybeInitializeDebugInfo() {
1221     if (CGM.getModuleDebugInfo()) {
1222       DebugInfo = CGM.getModuleDebugInfo();
1223       return true;
1224     }
1225     return false;
1226   }
1227   CGDebugInfo *getDebugInfo() {
1228     if (DisableDebugInfo)
1229       return NULL;
1230     return DebugInfo;
1231   }
1232   void disableDebugInfo() { DisableDebugInfo = true; }
1233   void enableDebugInfo() { DisableDebugInfo = false; }
1234 
1235   bool shouldUseFusedARCCalls() {
1236     return CGM.getCodeGenOpts().OptimizationLevel == 0;
1237   }
1238 
1239   const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1240 
1241   /// Returns a pointer to the function's exception object and selector slot,
1242   /// which is assigned in every landing pad.
1243   llvm::Value *getExceptionSlot();
1244   llvm::Value *getEHSelectorSlot();
1245 
1246   /// Returns the contents of the function's exception object and selector
1247   /// slots.
1248   llvm::Value *getExceptionFromSlot();
1249   llvm::Value *getSelectorFromSlot();
1250 
1251   llvm::Value *getNormalCleanupDestSlot();
1252 
1253   llvm::BasicBlock *getUnreachableBlock() {
1254     if (!UnreachableBlock) {
1255       UnreachableBlock = createBasicBlock("unreachable");
1256       new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1257     }
1258     return UnreachableBlock;
1259   }
1260 
1261   llvm::BasicBlock *getInvokeDest() {
1262     if (!EHStack.requiresLandingPad()) return 0;
1263     return getInvokeDestImpl();
1264   }
1265 
1266   llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1267 
1268   //===--------------------------------------------------------------------===//
1269   //                                  Cleanups
1270   //===--------------------------------------------------------------------===//
1271 
1272   typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
1273 
1274   void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1275                                         llvm::Value *arrayEndPointer,
1276                                         QualType elementType,
1277                                         Destroyer *destroyer);
1278   void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1279                                       llvm::Value *arrayEnd,
1280                                       QualType elementType,
1281                                       Destroyer *destroyer);
1282 
1283   void pushDestroy(QualType::DestructionKind dtorKind,
1284                    llvm::Value *addr, QualType type);
1285   void pushEHDestroy(QualType::DestructionKind dtorKind,
1286                      llvm::Value *addr, QualType type);
1287   void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
1288                    Destroyer *destroyer, bool useEHCleanupForArray);
1289   void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
1290                    bool useEHCleanupForArray);
1291   llvm::Function *generateDestroyHelper(llvm::Constant *addr,
1292                                         QualType type,
1293                                         Destroyer *destroyer,
1294                                         bool useEHCleanupForArray);
1295   void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1296                         QualType type, Destroyer *destroyer,
1297                         bool checkZeroLength, bool useEHCleanup);
1298 
1299   Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1300 
1301   /// Determines whether an EH cleanup is required to destroy a type
1302   /// with the given destruction kind.
1303   bool needsEHCleanup(QualType::DestructionKind kind) {
1304     switch (kind) {
1305     case QualType::DK_none:
1306       return false;
1307     case QualType::DK_cxx_destructor:
1308     case QualType::DK_objc_weak_lifetime:
1309       return getLangOpts().Exceptions;
1310     case QualType::DK_objc_strong_lifetime:
1311       return getLangOpts().Exceptions &&
1312              CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1313     }
1314     llvm_unreachable("bad destruction kind");
1315   }
1316 
1317   CleanupKind getCleanupKind(QualType::DestructionKind kind) {
1318     return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1319   }
1320 
1321   //===--------------------------------------------------------------------===//
1322   //                                  Objective-C
1323   //===--------------------------------------------------------------------===//
1324 
1325   void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1326 
1327   void StartObjCMethod(const ObjCMethodDecl *MD,
1328                        const ObjCContainerDecl *CD,
1329                        SourceLocation StartLoc);
1330 
1331   /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1332   void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1333                           const ObjCPropertyImplDecl *PID);
1334   void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1335                               const ObjCPropertyImplDecl *propImpl,
1336                               const ObjCMethodDecl *GetterMothodDecl,
1337                               llvm::Constant *AtomicHelperFn);
1338 
1339   void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1340                                   ObjCMethodDecl *MD, bool ctor);
1341 
1342   /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1343   /// for the given property.
1344   void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1345                           const ObjCPropertyImplDecl *PID);
1346   void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1347                               const ObjCPropertyImplDecl *propImpl,
1348                               llvm::Constant *AtomicHelperFn);
1349   bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
1350   bool IvarTypeWithAggrGCObjects(QualType Ty);
1351 
1352   //===--------------------------------------------------------------------===//
1353   //                                  Block Bits
1354   //===--------------------------------------------------------------------===//
1355 
1356   llvm::Value *EmitBlockLiteral(const BlockExpr *);
1357   llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
1358   static void destroyBlockInfos(CGBlockInfo *info);
1359   llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
1360                                            const CGBlockInfo &Info,
1361                                            llvm::StructType *,
1362                                            llvm::Constant *BlockVarLayout);
1363 
1364   llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1365                                         const CGBlockInfo &Info,
1366                                         const Decl *OuterFuncDecl,
1367                                         const DeclMapTy &ldm,
1368                                         bool IsLambdaConversionToBlock);
1369 
1370   llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1371   llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1372   llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1373                                              const ObjCPropertyImplDecl *PID);
1374   llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1375                                              const ObjCPropertyImplDecl *PID);
1376   llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1377 
1378   void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1379 
1380   class AutoVarEmission;
1381 
1382   void emitByrefStructureInit(const AutoVarEmission &emission);
1383   void enterByrefCleanup(const AutoVarEmission &emission);
1384 
1385   llvm::Value *LoadBlockStruct() {
1386     assert(BlockPointer && "no block pointer set!");
1387     return BlockPointer;
1388   }
1389 
1390   void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
1391   void AllocateBlockDecl(const DeclRefExpr *E);
1392   llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1393   llvm::Type *BuildByRefType(const VarDecl *var);
1394 
1395   void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1396                     const CGFunctionInfo &FnInfo);
1397   void StartFunction(GlobalDecl GD, QualType RetTy,
1398                      llvm::Function *Fn,
1399                      const CGFunctionInfo &FnInfo,
1400                      const FunctionArgList &Args,
1401                      SourceLocation StartLoc);
1402 
1403   void EmitConstructorBody(FunctionArgList &Args);
1404   void EmitDestructorBody(FunctionArgList &Args);
1405   void EmitFunctionBody(FunctionArgList &Args);
1406 
1407   void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
1408                                   CallArgList &CallArgs);
1409   void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
1410   void EmitLambdaBlockInvokeBody();
1411   void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1412   void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
1413 
1414   /// EmitReturnBlock - Emit the unified return block, trying to avoid its
1415   /// emission when possible.
1416   void EmitReturnBlock();
1417 
1418   /// FinishFunction - Complete IR generation of the current function. It is
1419   /// legal to call this function even if there is no current insertion point.
1420   void FinishFunction(SourceLocation EndLoc=SourceLocation());
1421 
1422   /// GenerateThunk - Generate a thunk for the given method.
1423   void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1424                      GlobalDecl GD, const ThunkInfo &Thunk);
1425 
1426   void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1427                             GlobalDecl GD, const ThunkInfo &Thunk);
1428 
1429   void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1430                         FunctionArgList &Args);
1431 
1432   void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
1433                                ArrayRef<VarDecl *> ArrayIndexes);
1434 
1435   /// InitializeVTablePointer - Initialize the vtable pointer of the given
1436   /// subobject.
1437   ///
1438   void InitializeVTablePointer(BaseSubobject Base,
1439                                const CXXRecordDecl *NearestVBase,
1440                                CharUnits OffsetFromNearestVBase,
1441                                llvm::Constant *VTable,
1442                                const CXXRecordDecl *VTableClass);
1443 
1444   typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1445   void InitializeVTablePointers(BaseSubobject Base,
1446                                 const CXXRecordDecl *NearestVBase,
1447                                 CharUnits OffsetFromNearestVBase,
1448                                 bool BaseIsNonVirtualPrimaryBase,
1449                                 llvm::Constant *VTable,
1450                                 const CXXRecordDecl *VTableClass,
1451                                 VisitedVirtualBasesSetTy& VBases);
1452 
1453   void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1454 
1455   /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1456   /// to by This.
1457   llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
1458 
1459   /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1460   /// given phase of destruction for a destructor.  The end result
1461   /// should call destructors on members and base classes in reverse
1462   /// order of their construction.
1463   void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1464 
1465   /// ShouldInstrumentFunction - Return true if the current function should be
1466   /// instrumented with __cyg_profile_func_* calls
1467   bool ShouldInstrumentFunction();
1468 
1469   /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
1470   /// instrumentation function with the current function and the call site, if
1471   /// function instrumentation is enabled.
1472   void EmitFunctionInstrumentation(const char *Fn);
1473 
1474   /// EmitMCountInstrumentation - Emit call to .mcount.
1475   void EmitMCountInstrumentation();
1476 
1477   /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1478   /// arguments for the given function. This is also responsible for naming the
1479   /// LLVM function arguments.
1480   void EmitFunctionProlog(const CGFunctionInfo &FI,
1481                           llvm::Function *Fn,
1482                           const FunctionArgList &Args);
1483 
1484   /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1485   /// given temporary.
1486   void EmitFunctionEpilog(const CGFunctionInfo &FI);
1487 
1488   /// EmitStartEHSpec - Emit the start of the exception spec.
1489   void EmitStartEHSpec(const Decl *D);
1490 
1491   /// EmitEndEHSpec - Emit the end of the exception spec.
1492   void EmitEndEHSpec(const Decl *D);
1493 
1494   /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1495   llvm::BasicBlock *getTerminateLandingPad();
1496 
1497   /// getTerminateHandler - Return a handler (not a landing pad, just
1498   /// a catch handler) that just calls terminate.  This is used when
1499   /// a terminate scope encloses a try.
1500   llvm::BasicBlock *getTerminateHandler();
1501 
1502   llvm::Type *ConvertTypeForMem(QualType T);
1503   llvm::Type *ConvertType(QualType T);
1504   llvm::Type *ConvertType(const TypeDecl *T) {
1505     return ConvertType(getContext().getTypeDeclType(T));
1506   }
1507 
1508   /// LoadObjCSelf - Load the value of self. This function is only valid while
1509   /// generating code for an Objective-C method.
1510   llvm::Value *LoadObjCSelf();
1511 
1512   /// TypeOfSelfObject - Return type of object that this self represents.
1513   QualType TypeOfSelfObject();
1514 
1515   /// hasAggregateLLVMType - Return true if the specified AST type will map into
1516   /// an aggregate LLVM type or is void.
1517   static bool hasAggregateLLVMType(QualType T);
1518 
1519   /// createBasicBlock - Create an LLVM basic block.
1520   llvm::BasicBlock *createBasicBlock(const Twine &name = "",
1521                                      llvm::Function *parent = 0,
1522                                      llvm::BasicBlock *before = 0) {
1523 #ifdef NDEBUG
1524     return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
1525 #else
1526     return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1527 #endif
1528   }
1529 
1530   /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1531   /// label maps to.
1532   JumpDest getJumpDestForLabel(const LabelDecl *S);
1533 
1534   /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1535   /// another basic block, simplify it. This assumes that no other code could
1536   /// potentially reference the basic block.
1537   void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1538 
1539   /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1540   /// adding a fall-through branch from the current insert block if
1541   /// necessary. It is legal to call this function even if there is no current
1542   /// insertion point.
1543   ///
1544   /// IsFinished - If true, indicates that the caller has finished emitting
1545   /// branches to the given block and does not expect to emit code into it. This
1546   /// means the block can be ignored if it is unreachable.
1547   void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1548 
1549   /// EmitBlockAfterUses - Emit the given block somewhere hopefully
1550   /// near its uses, and leave the insertion point in it.
1551   void EmitBlockAfterUses(llvm::BasicBlock *BB);
1552 
1553   /// EmitBranch - Emit a branch to the specified basic block from the current
1554   /// insert block, taking care to avoid creation of branches from dummy
1555   /// blocks. It is legal to call this function even if there is no current
1556   /// insertion point.
1557   ///
1558   /// This function clears the current insertion point. The caller should follow
1559   /// calls to this function with calls to Emit*Block prior to generation new
1560   /// code.
1561   void EmitBranch(llvm::BasicBlock *Block);
1562 
1563   /// HaveInsertPoint - True if an insertion point is defined. If not, this
1564   /// indicates that the current code being emitted is unreachable.
1565   bool HaveInsertPoint() const {
1566     return Builder.GetInsertBlock() != 0;
1567   }
1568 
1569   /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1570   /// emitted IR has a place to go. Note that by definition, if this function
1571   /// creates a block then that block is unreachable; callers may do better to
1572   /// detect when no insertion point is defined and simply skip IR generation.
1573   void EnsureInsertPoint() {
1574     if (!HaveInsertPoint())
1575       EmitBlock(createBasicBlock());
1576   }
1577 
1578   /// ErrorUnsupported - Print out an error that codegen doesn't support the
1579   /// specified stmt yet.
1580   void ErrorUnsupported(const Stmt *S, const char *Type,
1581                         bool OmitOnError=false);
1582 
1583   //===--------------------------------------------------------------------===//
1584   //                                  Helpers
1585   //===--------------------------------------------------------------------===//
1586 
1587   LValue MakeAddrLValue(llvm::Value *V, QualType T,
1588                         CharUnits Alignment = CharUnits()) {
1589     return LValue::MakeAddr(V, T, Alignment, getContext(),
1590                             CGM.getTBAAInfo(T));
1591   }
1592 
1593   LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
1594     CharUnits Alignment;
1595     if (!T->isIncompleteType())
1596       Alignment = getContext().getTypeAlignInChars(T);
1597     return LValue::MakeAddr(V, T, Alignment, getContext(),
1598                             CGM.getTBAAInfo(T));
1599   }
1600 
1601   /// CreateTempAlloca - This creates a alloca and inserts it into the entry
1602   /// block. The caller is responsible for setting an appropriate alignment on
1603   /// the alloca.
1604   llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
1605                                      const Twine &Name = "tmp");
1606 
1607   /// InitTempAlloca - Provide an initial value for the given alloca.
1608   void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
1609 
1610   /// CreateIRTemp - Create a temporary IR object of the given type, with
1611   /// appropriate alignment. This routine should only be used when an temporary
1612   /// value needs to be stored into an alloca (for example, to avoid explicit
1613   /// PHI construction), but the type is the IR type, not the type appropriate
1614   /// for storing in memory.
1615   llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
1616 
1617   /// CreateMemTemp - Create a temporary memory object of the given type, with
1618   /// appropriate alignment.
1619   llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
1620 
1621   /// CreateAggTemp - Create a temporary memory object for the given
1622   /// aggregate type.
1623   AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
1624     CharUnits Alignment = getContext().getTypeAlignInChars(T);
1625     return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
1626                                  T.getQualifiers(),
1627                                  AggValueSlot::IsNotDestructed,
1628                                  AggValueSlot::DoesNotNeedGCBarriers,
1629                                  AggValueSlot::IsNotAliased);
1630   }
1631 
1632   /// Emit a cast to void* in the appropriate address space.
1633   llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
1634 
1635   /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
1636   /// expression and compare the result against zero, returning an Int1Ty value.
1637   llvm::Value *EvaluateExprAsBool(const Expr *E);
1638 
1639   /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
1640   void EmitIgnoredExpr(const Expr *E);
1641 
1642   /// EmitAnyExpr - Emit code to compute the specified expression which can have
1643   /// any type.  The result is returned as an RValue struct.  If this is an
1644   /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
1645   /// the result should be returned.
1646   ///
1647   /// \param ignoreResult True if the resulting value isn't used.
1648   RValue EmitAnyExpr(const Expr *E,
1649                      AggValueSlot aggSlot = AggValueSlot::ignored(),
1650                      bool ignoreResult = false);
1651 
1652   // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
1653   // or the value of the expression, depending on how va_list is defined.
1654   llvm::Value *EmitVAListRef(const Expr *E);
1655 
1656   /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
1657   /// always be accessible even if no aggregate location is provided.
1658   RValue EmitAnyExprToTemp(const Expr *E);
1659 
1660   /// EmitAnyExprToMem - Emits the code necessary to evaluate an
1661   /// arbitrary expression into the given memory location.
1662   void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
1663                         Qualifiers Quals, bool IsInitializer);
1664 
1665   /// EmitExprAsInit - Emits the code necessary to initialize a
1666   /// location in memory with the given initializer.
1667   void EmitExprAsInit(const Expr *init, const ValueDecl *D,
1668                       LValue lvalue, bool capturedByInit);
1669 
1670   /// hasVolatileMember - returns true if aggregate type has a volatile
1671   /// member.
1672   bool hasVolatileMember(QualType T) {
1673     if (const RecordType *RT = T->getAs<RecordType>()) {
1674       const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
1675       return RD->hasVolatileMember();
1676     }
1677     return false;
1678   }
1679   /// EmitAggregateCopy - Emit an aggregate assignment.
1680   ///
1681   /// The difference to EmitAggregateCopy is that tail padding is not copied.
1682   /// This is required for correctness when assigning non-POD structures in C++.
1683   void EmitAggregateAssign(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1684                            QualType EltTy) {
1685     bool IsVolatile = hasVolatileMember(EltTy);
1686     EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, CharUnits::Zero(),
1687                       true);
1688   }
1689 
1690   /// EmitAggregateCopy - Emit an aggregate copy.
1691   ///
1692   /// \param isVolatile - True iff either the source or the destination is
1693   /// volatile.
1694   /// \param isAssignment - If false, allow padding to be copied.  This often
1695   /// yields more efficient.
1696   void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1697                          QualType EltTy, bool isVolatile=false,
1698                          CharUnits Alignment = CharUnits::Zero(),
1699                          bool isAssignment = false);
1700 
1701   /// StartBlock - Start new block named N. If insert block is a dummy block
1702   /// then reuse it.
1703   void StartBlock(const char *N);
1704 
1705   /// GetAddrOfLocalVar - Return the address of a local variable.
1706   llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
1707     llvm::Value *Res = LocalDeclMap[VD];
1708     assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
1709     return Res;
1710   }
1711 
1712   /// getOpaqueLValueMapping - Given an opaque value expression (which
1713   /// must be mapped to an l-value), return its mapping.
1714   const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
1715     assert(OpaqueValueMapping::shouldBindAsLValue(e));
1716 
1717     llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
1718       it = OpaqueLValues.find(e);
1719     assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
1720     return it->second;
1721   }
1722 
1723   /// getOpaqueRValueMapping - Given an opaque value expression (which
1724   /// must be mapped to an r-value), return its mapping.
1725   const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
1726     assert(!OpaqueValueMapping::shouldBindAsLValue(e));
1727 
1728     llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
1729       it = OpaqueRValues.find(e);
1730     assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
1731     return it->second;
1732   }
1733 
1734   /// getAccessedFieldNo - Given an encoded value and a result number, return
1735   /// the input field number being accessed.
1736   static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
1737 
1738   llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
1739   llvm::BasicBlock *GetIndirectGotoBlock();
1740 
1741   /// EmitNullInitialization - Generate code to set a value of the given type to
1742   /// null, If the type contains data member pointers, they will be initialized
1743   /// to -1 in accordance with the Itanium C++ ABI.
1744   void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
1745 
1746   // EmitVAArg - Generate code to get an argument from the passed in pointer
1747   // and update it accordingly. The return value is a pointer to the argument.
1748   // FIXME: We should be able to get rid of this method and use the va_arg
1749   // instruction in LLVM instead once it works well enough.
1750   llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
1751 
1752   /// emitArrayLength - Compute the length of an array, even if it's a
1753   /// VLA, and drill down to the base element type.
1754   llvm::Value *emitArrayLength(const ArrayType *arrayType,
1755                                QualType &baseType,
1756                                llvm::Value *&addr);
1757 
1758   /// EmitVLASize - Capture all the sizes for the VLA expressions in
1759   /// the given variably-modified type and store them in the VLASizeMap.
1760   ///
1761   /// This function can be called with a null (unreachable) insert point.
1762   void EmitVariablyModifiedType(QualType Ty);
1763 
1764   /// getVLASize - Returns an LLVM value that corresponds to the size,
1765   /// in non-variably-sized elements, of a variable length array type,
1766   /// plus that largest non-variably-sized element type.  Assumes that
1767   /// the type has already been emitted with EmitVariablyModifiedType.
1768   std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
1769   std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
1770 
1771   /// LoadCXXThis - Load the value of 'this'. This function is only valid while
1772   /// generating code for an C++ member function.
1773   llvm::Value *LoadCXXThis() {
1774     assert(CXXThisValue && "no 'this' value for this function");
1775     return CXXThisValue;
1776   }
1777 
1778   /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
1779   /// virtual bases.
1780   llvm::Value *LoadCXXVTT() {
1781     assert(CXXVTTValue && "no VTT value for this function");
1782     return CXXVTTValue;
1783   }
1784 
1785   /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
1786   /// complete class to the given direct base.
1787   llvm::Value *
1788   GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
1789                                         const CXXRecordDecl *Derived,
1790                                         const CXXRecordDecl *Base,
1791                                         bool BaseIsVirtual);
1792 
1793   /// GetAddressOfBaseClass - This function will add the necessary delta to the
1794   /// load of 'this' and returns address of the base class.
1795   llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
1796                                      const CXXRecordDecl *Derived,
1797                                      CastExpr::path_const_iterator PathBegin,
1798                                      CastExpr::path_const_iterator PathEnd,
1799                                      bool NullCheckValue);
1800 
1801   llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
1802                                         const CXXRecordDecl *Derived,
1803                                         CastExpr::path_const_iterator PathBegin,
1804                                         CastExpr::path_const_iterator PathEnd,
1805                                         bool NullCheckValue);
1806 
1807   llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
1808                                          const CXXRecordDecl *ClassDecl,
1809                                          const CXXRecordDecl *BaseClassDecl);
1810 
1811   void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1812                                       CXXCtorType CtorType,
1813                                       const FunctionArgList &Args);
1814   // It's important not to confuse this and the previous function. Delegating
1815   // constructors are the C++0x feature. The constructor delegate optimization
1816   // is used to reduce duplication in the base and complete consturctors where
1817   // they are substantially the same.
1818   void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1819                                         const FunctionArgList &Args);
1820   void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
1821                               bool ForVirtualBase, bool Delegating,
1822                               llvm::Value *This,
1823                               CallExpr::const_arg_iterator ArgBeg,
1824                               CallExpr::const_arg_iterator ArgEnd);
1825 
1826   void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1827                               llvm::Value *This, llvm::Value *Src,
1828                               CallExpr::const_arg_iterator ArgBeg,
1829                               CallExpr::const_arg_iterator ArgEnd);
1830 
1831   void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1832                                   const ConstantArrayType *ArrayTy,
1833                                   llvm::Value *ArrayPtr,
1834                                   CallExpr::const_arg_iterator ArgBeg,
1835                                   CallExpr::const_arg_iterator ArgEnd,
1836                                   bool ZeroInitialization = false);
1837 
1838   void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1839                                   llvm::Value *NumElements,
1840                                   llvm::Value *ArrayPtr,
1841                                   CallExpr::const_arg_iterator ArgBeg,
1842                                   CallExpr::const_arg_iterator ArgEnd,
1843                                   bool ZeroInitialization = false);
1844 
1845   static Destroyer destroyCXXObject;
1846 
1847   void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
1848                              bool ForVirtualBase, bool Delegating,
1849                              llvm::Value *This);
1850 
1851   void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
1852                                llvm::Value *NewPtr, llvm::Value *NumElements);
1853 
1854   void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
1855                         llvm::Value *Ptr);
1856 
1857   llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
1858   void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
1859 
1860   void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
1861                       QualType DeleteTy);
1862 
1863   llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
1864   llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
1865   llvm::Value* EmitCXXUuidofExpr(const CXXUuidofExpr *E);
1866 
1867   void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
1868   void EmitStdInitializerListCleanup(llvm::Value *loc,
1869                                      const InitListExpr *init);
1870 
1871   /// \brief Situations in which we might emit a check for the suitability of a
1872   ///        pointer or glvalue.
1873   enum TypeCheckKind {
1874     /// Checking the operand of a load. Must be suitably sized and aligned.
1875     TCK_Load,
1876     /// Checking the destination of a store. Must be suitably sized and aligned.
1877     TCK_Store,
1878     /// Checking the bound value in a reference binding. Must be suitably sized
1879     /// and aligned, but is not required to refer to an object (until the
1880     /// reference is used), per core issue 453.
1881     TCK_ReferenceBinding,
1882     /// Checking the object expression in a non-static data member access. Must
1883     /// be an object within its lifetime.
1884     TCK_MemberAccess,
1885     /// Checking the 'this' pointer for a call to a non-static member function.
1886     /// Must be an object within its lifetime.
1887     TCK_MemberCall,
1888     /// Checking the 'this' pointer for a constructor call.
1889     TCK_ConstructorCall
1890   };
1891 
1892   /// \brief Emit a check that \p V is the address of storage of the
1893   /// appropriate size and alignment for an object of type \p Type.
1894   void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
1895                      QualType Type, CharUnits Alignment = CharUnits::Zero());
1896 
1897   llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1898                                        bool isInc, bool isPre);
1899   ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1900                                          bool isInc, bool isPre);
1901   //===--------------------------------------------------------------------===//
1902   //                            Declaration Emission
1903   //===--------------------------------------------------------------------===//
1904 
1905   /// EmitDecl - Emit a declaration.
1906   ///
1907   /// This function can be called with a null (unreachable) insert point.
1908   void EmitDecl(const Decl &D);
1909 
1910   /// EmitVarDecl - Emit a local variable declaration.
1911   ///
1912   /// This function can be called with a null (unreachable) insert point.
1913   void EmitVarDecl(const VarDecl &D);
1914 
1915   void EmitScalarInit(const Expr *init, const ValueDecl *D,
1916                       LValue lvalue, bool capturedByInit);
1917   void EmitScalarInit(llvm::Value *init, LValue lvalue);
1918 
1919   typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
1920                              llvm::Value *Address);
1921 
1922   /// EmitAutoVarDecl - Emit an auto variable declaration.
1923   ///
1924   /// This function can be called with a null (unreachable) insert point.
1925   void EmitAutoVarDecl(const VarDecl &D);
1926 
1927   class AutoVarEmission {
1928     friend class CodeGenFunction;
1929 
1930     const VarDecl *Variable;
1931 
1932     /// The alignment of the variable.
1933     CharUnits Alignment;
1934 
1935     /// The address of the alloca.  Null if the variable was emitted
1936     /// as a global constant.
1937     llvm::Value *Address;
1938 
1939     llvm::Value *NRVOFlag;
1940 
1941     /// True if the variable is a __block variable.
1942     bool IsByRef;
1943 
1944     /// True if the variable is of aggregate type and has a constant
1945     /// initializer.
1946     bool IsConstantAggregate;
1947 
1948     struct Invalid {};
1949     AutoVarEmission(Invalid) : Variable(0) {}
1950 
1951     AutoVarEmission(const VarDecl &variable)
1952       : Variable(&variable), Address(0), NRVOFlag(0),
1953         IsByRef(false), IsConstantAggregate(false) {}
1954 
1955     bool wasEmittedAsGlobal() const { return Address == 0; }
1956 
1957   public:
1958     static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
1959 
1960     /// Returns the address of the object within this declaration.
1961     /// Note that this does not chase the forwarding pointer for
1962     /// __block decls.
1963     llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
1964       if (!IsByRef) return Address;
1965 
1966       return CGF.Builder.CreateStructGEP(Address,
1967                                          CGF.getByRefValueLLVMField(Variable),
1968                                          Variable->getNameAsString());
1969     }
1970   };
1971   AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
1972   void EmitAutoVarInit(const AutoVarEmission &emission);
1973   void EmitAutoVarCleanups(const AutoVarEmission &emission);
1974   void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1975                               QualType::DestructionKind dtorKind);
1976 
1977   void EmitStaticVarDecl(const VarDecl &D,
1978                          llvm::GlobalValue::LinkageTypes Linkage);
1979 
1980   /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
1981   void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
1982 
1983   /// protectFromPeepholes - Protect a value that we're intending to
1984   /// store to the side, but which will probably be used later, from
1985   /// aggressive peepholing optimizations that might delete it.
1986   ///
1987   /// Pass the result to unprotectFromPeepholes to declare that
1988   /// protection is no longer required.
1989   ///
1990   /// There's no particular reason why this shouldn't apply to
1991   /// l-values, it's just that no existing peepholes work on pointers.
1992   PeepholeProtection protectFromPeepholes(RValue rvalue);
1993   void unprotectFromPeepholes(PeepholeProtection protection);
1994 
1995   //===--------------------------------------------------------------------===//
1996   //                             Statement Emission
1997   //===--------------------------------------------------------------------===//
1998 
1999   /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
2000   void EmitStopPoint(const Stmt *S);
2001 
2002   /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
2003   /// this function even if there is no current insertion point.
2004   ///
2005   /// This function may clear the current insertion point; callers should use
2006   /// EnsureInsertPoint if they wish to subsequently generate code without first
2007   /// calling EmitBlock, EmitBranch, or EmitStmt.
2008   void EmitStmt(const Stmt *S);
2009 
2010   /// EmitSimpleStmt - Try to emit a "simple" statement which does not
2011   /// necessarily require an insertion point or debug information; typically
2012   /// because the statement amounts to a jump or a container of other
2013   /// statements.
2014   ///
2015   /// \return True if the statement was handled.
2016   bool EmitSimpleStmt(const Stmt *S);
2017 
2018   RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
2019                           AggValueSlot AVS = AggValueSlot::ignored());
2020   RValue EmitCompoundStmtWithoutScope(const CompoundStmt &S,
2021                                       bool GetLast = false, AggValueSlot AVS =
2022                                           AggValueSlot::ignored());
2023 
2024   /// EmitLabel - Emit the block for the given label. It is legal to call this
2025   /// function even if there is no current insertion point.
2026   void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
2027 
2028   void EmitLabelStmt(const LabelStmt &S);
2029   void EmitAttributedStmt(const AttributedStmt &S);
2030   void EmitGotoStmt(const GotoStmt &S);
2031   void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
2032   void EmitIfStmt(const IfStmt &S);
2033   void EmitWhileStmt(const WhileStmt &S);
2034   void EmitDoStmt(const DoStmt &S);
2035   void EmitForStmt(const ForStmt &S);
2036   void EmitReturnStmt(const ReturnStmt &S);
2037   void EmitDeclStmt(const DeclStmt &S);
2038   void EmitBreakStmt(const BreakStmt &S);
2039   void EmitContinueStmt(const ContinueStmt &S);
2040   void EmitSwitchStmt(const SwitchStmt &S);
2041   void EmitDefaultStmt(const DefaultStmt &S);
2042   void EmitCaseStmt(const CaseStmt &S);
2043   void EmitCaseStmtRange(const CaseStmt &S);
2044   void EmitAsmStmt(const AsmStmt &S);
2045 
2046   void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
2047   void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
2048   void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
2049   void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
2050   void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
2051 
2052   llvm::Constant *getUnwindResumeFn();
2053   llvm::Constant *getUnwindResumeOrRethrowFn();
2054   void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2055   void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
2056 
2057   void EmitCXXTryStmt(const CXXTryStmt &S);
2058   void EmitCXXForRangeStmt(const CXXForRangeStmt &S);
2059 
2060   //===--------------------------------------------------------------------===//
2061   //                         LValue Expression Emission
2062   //===--------------------------------------------------------------------===//
2063 
2064   /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
2065   RValue GetUndefRValue(QualType Ty);
2066 
2067   /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
2068   /// and issue an ErrorUnsupported style diagnostic (using the
2069   /// provided Name).
2070   RValue EmitUnsupportedRValue(const Expr *E,
2071                                const char *Name);
2072 
2073   /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
2074   /// an ErrorUnsupported style diagnostic (using the provided Name).
2075   LValue EmitUnsupportedLValue(const Expr *E,
2076                                const char *Name);
2077 
2078   /// EmitLValue - Emit code to compute a designator that specifies the location
2079   /// of the expression.
2080   ///
2081   /// This can return one of two things: a simple address or a bitfield
2082   /// reference.  In either case, the LLVM Value* in the LValue structure is
2083   /// guaranteed to be an LLVM pointer type.
2084   ///
2085   /// If this returns a bitfield reference, nothing about the pointee type of
2086   /// the LLVM value is known: For example, it may not be a pointer to an
2087   /// integer.
2088   ///
2089   /// If this returns a normal address, and if the lvalue's C type is fixed
2090   /// size, this method guarantees that the returned pointer type will point to
2091   /// an LLVM type of the same size of the lvalue's type.  If the lvalue has a
2092   /// variable length type, this is not possible.
2093   ///
2094   LValue EmitLValue(const Expr *E);
2095 
2096   /// \brief Same as EmitLValue but additionally we generate checking code to
2097   /// guard against undefined behavior.  This is only suitable when we know
2098   /// that the address will be used to access the object.
2099   LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
2100 
2101   /// EmitToMemory - Change a scalar value from its value
2102   /// representation to its in-memory representation.
2103   llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
2104 
2105   /// EmitFromMemory - Change a scalar value from its memory
2106   /// representation to its value representation.
2107   llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
2108 
2109   /// EmitLoadOfScalar - Load a scalar value from an address, taking
2110   /// care to appropriately convert from the memory representation to
2111   /// the LLVM value representation.
2112   llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
2113                                 unsigned Alignment, QualType Ty,
2114                                 llvm::MDNode *TBAAInfo = 0);
2115 
2116   /// EmitLoadOfScalar - Load a scalar value from an address, taking
2117   /// care to appropriately convert from the memory representation to
2118   /// the LLVM value representation.  The l-value must be a simple
2119   /// l-value.
2120   llvm::Value *EmitLoadOfScalar(LValue lvalue);
2121 
2122   /// EmitStoreOfScalar - Store a scalar value to an address, taking
2123   /// care to appropriately convert from the memory representation to
2124   /// the LLVM value representation.
2125   void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
2126                          bool Volatile, unsigned Alignment, QualType Ty,
2127                          llvm::MDNode *TBAAInfo = 0, bool isInit=false);
2128 
2129   /// EmitStoreOfScalar - Store a scalar value to an address, taking
2130   /// care to appropriately convert from the memory representation to
2131   /// the LLVM value representation.  The l-value must be a simple
2132   /// l-value.  The isInit flag indicates whether this is an initialization.
2133   /// If so, atomic qualifiers are ignored and the store is always non-atomic.
2134   void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
2135 
2136   /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
2137   /// this method emits the address of the lvalue, then loads the result as an
2138   /// rvalue, returning the rvalue.
2139   RValue EmitLoadOfLValue(LValue V);
2140   RValue EmitLoadOfExtVectorElementLValue(LValue V);
2141   RValue EmitLoadOfBitfieldLValue(LValue LV);
2142 
2143   /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2144   /// lvalue, where both are guaranteed to the have the same type, and that type
2145   /// is 'Ty'.
2146   void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
2147   void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
2148 
2149   /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
2150   /// EmitStoreThroughLValue.
2151   ///
2152   /// \param Result [out] - If non-null, this will be set to a Value* for the
2153   /// bit-field contents after the store, appropriate for use as the result of
2154   /// an assignment to the bit-field.
2155   void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2156                                       llvm::Value **Result=0);
2157 
2158   /// Emit an l-value for an assignment (simple or compound) of complex type.
2159   LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
2160   LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
2161 
2162   // Note: only available for agg return types
2163   LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
2164   LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
2165   // Note: only available for agg return types
2166   LValue EmitCallExprLValue(const CallExpr *E);
2167   // Note: only available for agg return types
2168   LValue EmitVAArgExprLValue(const VAArgExpr *E);
2169   LValue EmitDeclRefLValue(const DeclRefExpr *E);
2170   LValue EmitStringLiteralLValue(const StringLiteral *E);
2171   LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
2172   LValue EmitPredefinedLValue(const PredefinedExpr *E);
2173   LValue EmitUnaryOpLValue(const UnaryOperator *E);
2174   LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
2175   LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
2176   LValue EmitMemberExpr(const MemberExpr *E);
2177   LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
2178   LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
2179   LValue EmitInitListLValue(const InitListExpr *E);
2180   LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
2181   LValue EmitCastLValue(const CastExpr *E);
2182   LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
2183   LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
2184   LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
2185 
2186   RValue EmitRValueForField(LValue LV, const FieldDecl *FD);
2187 
2188   class ConstantEmission {
2189     llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
2190     ConstantEmission(llvm::Constant *C, bool isReference)
2191       : ValueAndIsReference(C, isReference) {}
2192   public:
2193     ConstantEmission() {}
2194     static ConstantEmission forReference(llvm::Constant *C) {
2195       return ConstantEmission(C, true);
2196     }
2197     static ConstantEmission forValue(llvm::Constant *C) {
2198       return ConstantEmission(C, false);
2199     }
2200 
2201     operator bool() const { return ValueAndIsReference.getOpaqueValue() != 0; }
2202 
2203     bool isReference() const { return ValueAndIsReference.getInt(); }
2204     LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
2205       assert(isReference());
2206       return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
2207                                             refExpr->getType());
2208     }
2209 
2210     llvm::Constant *getValue() const {
2211       assert(!isReference());
2212       return ValueAndIsReference.getPointer();
2213     }
2214   };
2215 
2216   ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
2217 
2218   RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
2219                                 AggValueSlot slot = AggValueSlot::ignored());
2220   LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
2221 
2222   llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2223                               const ObjCIvarDecl *Ivar);
2224   LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
2225 
2226   /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
2227   /// if the Field is a reference, this will return the address of the reference
2228   /// and not the address of the value stored in the reference.
2229   LValue EmitLValueForFieldInitialization(LValue Base,
2230                                           const FieldDecl* Field);
2231 
2232   LValue EmitLValueForIvar(QualType ObjectTy,
2233                            llvm::Value* Base, const ObjCIvarDecl *Ivar,
2234                            unsigned CVRQualifiers);
2235 
2236   LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
2237   LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
2238   LValue EmitLambdaLValue(const LambdaExpr *E);
2239   LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
2240   LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
2241 
2242   LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
2243   LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
2244   LValue EmitStmtExprLValue(const StmtExpr *E);
2245   LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
2246   LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
2247   void   EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
2248 
2249   //===--------------------------------------------------------------------===//
2250   //                         Scalar Expression Emission
2251   //===--------------------------------------------------------------------===//
2252 
2253   /// EmitCall - Generate a call of the given function, expecting the given
2254   /// result type, and using the given argument list which specifies both the
2255   /// LLVM arguments and the types they were derived from.
2256   ///
2257   /// \param TargetDecl - If given, the decl of the function in a direct call;
2258   /// used to set attributes on the call (noreturn, etc.).
2259   RValue EmitCall(const CGFunctionInfo &FnInfo,
2260                   llvm::Value *Callee,
2261                   ReturnValueSlot ReturnValue,
2262                   const CallArgList &Args,
2263                   const Decl *TargetDecl = 0,
2264                   llvm::Instruction **callOrInvoke = 0);
2265 
2266   RValue EmitCall(QualType FnType, llvm::Value *Callee,
2267                   ReturnValueSlot ReturnValue,
2268                   CallExpr::const_arg_iterator ArgBeg,
2269                   CallExpr::const_arg_iterator ArgEnd,
2270                   const Decl *TargetDecl = 0);
2271   RValue EmitCallExpr(const CallExpr *E,
2272                       ReturnValueSlot ReturnValue = ReturnValueSlot());
2273 
2274   llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2275                                   ArrayRef<llvm::Value *> Args,
2276                                   const Twine &Name = "");
2277   llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2278                                   const Twine &Name = "");
2279 
2280   llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
2281                                 llvm::Type *Ty);
2282   llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
2283                                 llvm::Value *This, llvm::Type *Ty);
2284   llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
2285                                          NestedNameSpecifier *Qual,
2286                                          llvm::Type *Ty);
2287 
2288   llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
2289                                                    CXXDtorType Type,
2290                                                    const CXXRecordDecl *RD);
2291 
2292   RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
2293                            SourceLocation CallLoc,
2294                            llvm::Value *Callee,
2295                            ReturnValueSlot ReturnValue,
2296                            llvm::Value *This,
2297                            llvm::Value *VTT,
2298                            CallExpr::const_arg_iterator ArgBeg,
2299                            CallExpr::const_arg_iterator ArgEnd);
2300   RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
2301                                ReturnValueSlot ReturnValue);
2302   RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
2303                                       ReturnValueSlot ReturnValue);
2304 
2305   llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2306                                            const CXXMethodDecl *MD,
2307                                            llvm::Value *This);
2308   RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
2309                                        const CXXMethodDecl *MD,
2310                                        ReturnValueSlot ReturnValue);
2311 
2312   RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
2313                                 ReturnValueSlot ReturnValue);
2314 
2315 
2316   RValue EmitBuiltinExpr(const FunctionDecl *FD,
2317                          unsigned BuiltinID, const CallExpr *E);
2318 
2319   RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
2320 
2321   /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
2322   /// is unhandled by the current target.
2323   llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2324 
2325   llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2326   llvm::Value *EmitNeonCall(llvm::Function *F,
2327                             SmallVectorImpl<llvm::Value*> &O,
2328                             const char *name,
2329                             unsigned shift = 0, bool rightshift = false);
2330   llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
2331   llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
2332                                    bool negateForRightShift);
2333 
2334   llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
2335   llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2336   llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2337 
2338   llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
2339   llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
2340   llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
2341   llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
2342   llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
2343   llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
2344                                 const ObjCMethodDecl *MethodWithObjects);
2345   llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
2346   RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
2347                              ReturnValueSlot Return = ReturnValueSlot());
2348 
2349   /// Retrieves the default cleanup kind for an ARC cleanup.
2350   /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
2351   CleanupKind getARCCleanupKind() {
2352     return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
2353              ? NormalAndEHCleanup : NormalCleanup;
2354   }
2355 
2356   // ARC primitives.
2357   void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
2358   void EmitARCDestroyWeak(llvm::Value *addr);
2359   llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
2360   llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
2361   llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
2362                                 bool ignored);
2363   void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
2364   void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
2365   llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
2366   llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
2367   llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
2368                                   bool ignored);
2369   llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
2370                                       bool ignored);
2371   llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
2372   llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
2373   llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
2374   void EmitARCDestroyStrong(llvm::Value *addr, bool precise);
2375   void EmitARCRelease(llvm::Value *value, bool precise);
2376   llvm::Value *EmitARCAutorelease(llvm::Value *value);
2377   llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
2378   llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
2379   llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
2380 
2381   std::pair<LValue,llvm::Value*>
2382   EmitARCStoreAutoreleasing(const BinaryOperator *e);
2383   std::pair<LValue,llvm::Value*>
2384   EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
2385 
2386   llvm::Value *EmitObjCThrowOperand(const Expr *expr);
2387 
2388   llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
2389   llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
2390   llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
2391 
2392   llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
2393   llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
2394   llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
2395 
2396   static Destroyer destroyARCStrongImprecise;
2397   static Destroyer destroyARCStrongPrecise;
2398   static Destroyer destroyARCWeak;
2399 
2400   void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
2401   llvm::Value *EmitObjCAutoreleasePoolPush();
2402   llvm::Value *EmitObjCMRRAutoreleasePoolPush();
2403   void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
2404   void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
2405 
2406   /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
2407   /// expression. Will emit a temporary variable if E is not an LValue.
2408   RValue EmitReferenceBindingToExpr(const Expr* E,
2409                                     const NamedDecl *InitializedDecl);
2410 
2411   //===--------------------------------------------------------------------===//
2412   //                           Expression Emission
2413   //===--------------------------------------------------------------------===//
2414 
2415   // Expressions are broken into three classes: scalar, complex, aggregate.
2416 
2417   /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
2418   /// scalar type, returning the result.
2419   llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
2420 
2421   /// EmitScalarConversion - Emit a conversion from the specified type to the
2422   /// specified destination type, both of which are LLVM scalar types.
2423   llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
2424                                     QualType DstTy);
2425 
2426   /// EmitComplexToScalarConversion - Emit a conversion from the specified
2427   /// complex type to the specified destination type, where the destination type
2428   /// is an LLVM scalar type.
2429   llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
2430                                              QualType DstTy);
2431 
2432 
2433   /// EmitAggExpr - Emit the computation of the specified expression
2434   /// of aggregate type.  The result is computed into the given slot,
2435   /// which may be null to indicate that the value is not needed.
2436   void EmitAggExpr(const Expr *E, AggValueSlot AS);
2437 
2438   /// EmitAggExprToLValue - Emit the computation of the specified expression of
2439   /// aggregate type into a temporary LValue.
2440   LValue EmitAggExprToLValue(const Expr *E);
2441 
2442   /// EmitGCMemmoveCollectable - Emit special API for structs with object
2443   /// pointers.
2444   void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
2445                                 QualType Ty);
2446 
2447   /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
2448   /// make sure it survives garbage collection until this point.
2449   void EmitExtendGCLifetime(llvm::Value *object);
2450 
2451   /// EmitComplexExpr - Emit the computation of the specified expression of
2452   /// complex type, returning the result.
2453   ComplexPairTy EmitComplexExpr(const Expr *E,
2454                                 bool IgnoreReal = false,
2455                                 bool IgnoreImag = false);
2456 
2457   /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
2458   /// of complex type, storing into the specified Value*.
2459   void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
2460                                bool DestIsVolatile);
2461 
2462   /// StoreComplexToAddr - Store a complex number into the specified address.
2463   void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
2464                           bool DestIsVolatile);
2465   /// LoadComplexFromAddr - Load a complex number from the specified address.
2466   ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
2467 
2468   /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
2469   /// a static local variable.
2470   llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
2471                                             const char *Separator,
2472                                        llvm::GlobalValue::LinkageTypes Linkage);
2473 
2474   /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
2475   /// global variable that has already been created for it.  If the initializer
2476   /// has a different type than GV does, this may free GV and return a different
2477   /// one.  Otherwise it just returns GV.
2478   llvm::GlobalVariable *
2479   AddInitializerToStaticVarDecl(const VarDecl &D,
2480                                 llvm::GlobalVariable *GV);
2481 
2482 
2483   /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
2484   /// variable with global storage.
2485   void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
2486                                 bool PerformInit);
2487 
2488   /// Call atexit() with a function that passes the given argument to
2489   /// the given function.
2490   void registerGlobalDtorWithAtExit(llvm::Constant *fn, llvm::Constant *addr);
2491 
2492   /// Emit code in this function to perform a guarded variable
2493   /// initialization.  Guarded initializations are used when it's not
2494   /// possible to prove that an initialization will be done exactly
2495   /// once, e.g. with a static local variable or a static data member
2496   /// of a class template.
2497   void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
2498                           bool PerformInit);
2499 
2500   /// GenerateCXXGlobalInitFunc - Generates code for initializing global
2501   /// variables.
2502   void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
2503                                  llvm::Constant **Decls,
2504                                  unsigned NumDecls);
2505 
2506   /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
2507   /// variables.
2508   void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
2509                                   const std::vector<std::pair<llvm::WeakVH,
2510                                   llvm::Constant*> > &DtorsAndObjects);
2511 
2512   void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
2513                                         const VarDecl *D,
2514                                         llvm::GlobalVariable *Addr,
2515                                         bool PerformInit);
2516 
2517   void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
2518 
2519   void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
2520                                   const Expr *Exp);
2521 
2522   void enterFullExpression(const ExprWithCleanups *E) {
2523     if (E->getNumObjects() == 0) return;
2524     enterNonTrivialFullExpression(E);
2525   }
2526   void enterNonTrivialFullExpression(const ExprWithCleanups *E);
2527 
2528   void EmitCXXThrowExpr(const CXXThrowExpr *E);
2529 
2530   void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
2531 
2532   RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
2533 
2534   //===--------------------------------------------------------------------===//
2535   //                         Annotations Emission
2536   //===--------------------------------------------------------------------===//
2537 
2538   /// Emit an annotation call (intrinsic or builtin).
2539   llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
2540                                   llvm::Value *AnnotatedVal,
2541                                   StringRef AnnotationStr,
2542                                   SourceLocation Location);
2543 
2544   /// Emit local annotations for the local variable V, declared by D.
2545   void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
2546 
2547   /// Emit field annotations for the given field & value. Returns the
2548   /// annotation result.
2549   llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
2550 
2551   //===--------------------------------------------------------------------===//
2552   //                             Internal Helpers
2553   //===--------------------------------------------------------------------===//
2554 
2555   /// ContainsLabel - Return true if the statement contains a label in it.  If
2556   /// this statement is not executed normally, it not containing a label means
2557   /// that we can just remove the code.
2558   static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
2559 
2560   /// containsBreak - Return true if the statement contains a break out of it.
2561   /// If the statement (recursively) contains a switch or loop with a break
2562   /// inside of it, this is fine.
2563   static bool containsBreak(const Stmt *S);
2564 
2565   /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2566   /// to a constant, or if it does but contains a label, return false.  If it
2567   /// constant folds return true and set the boolean result in Result.
2568   bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
2569 
2570   /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2571   /// to a constant, or if it does but contains a label, return false.  If it
2572   /// constant folds return true and set the folded value.
2573   bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result);
2574 
2575   /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
2576   /// if statement) to the specified blocks.  Based on the condition, this might
2577   /// try to simplify the codegen of the conditional based on the branch.
2578   void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
2579                             llvm::BasicBlock *FalseBlock);
2580 
2581   /// \brief Emit a description of a type in a format suitable for passing to
2582   /// a runtime sanitizer handler.
2583   llvm::Constant *EmitCheckTypeDescriptor(QualType T);
2584 
2585   /// \brief Convert a value into a format suitable for passing to a runtime
2586   /// sanitizer handler.
2587   llvm::Value *EmitCheckValue(llvm::Value *V);
2588 
2589   /// \brief Emit a description of a source location in a format suitable for
2590   /// passing to a runtime sanitizer handler.
2591   llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
2592 
2593   /// \brief Specify under what conditions this check can be recovered
2594   enum CheckRecoverableKind {
2595     /// Always terminate program execution if this check fails
2596     CRK_Unrecoverable,
2597     /// Check supports recovering, allows user to specify which
2598     CRK_Recoverable,
2599     /// Runtime conditionally aborts, always need to support recovery.
2600     CRK_AlwaysRecoverable
2601   };
2602 
2603   /// \brief Create a basic block that will call a handler function in a
2604   /// sanitizer runtime with the provided arguments, and create a conditional
2605   /// branch to it.
2606   void EmitCheck(llvm::Value *Checked, StringRef CheckName,
2607                  ArrayRef<llvm::Constant *> StaticArgs,
2608                  ArrayRef<llvm::Value *> DynamicArgs,
2609                  CheckRecoverableKind Recoverable);
2610 
2611   /// \brief Create a basic block that will call the trap intrinsic, and emit a
2612   /// conditional branch to it, for the -ftrapv checks.
2613   void EmitTrapCheck(llvm::Value *Checked);
2614 
2615   /// EmitCallArg - Emit a single call argument.
2616   void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
2617 
2618   /// EmitDelegateCallArg - We are performing a delegate call; that
2619   /// is, the current function is delegating to another one.  Produce
2620   /// a r-value suitable for passing the given parameter.
2621   void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
2622 
2623   /// SetFPAccuracy - Set the minimum required accuracy of the given floating
2624   /// point operation, expressed as the maximum relative error in ulp.
2625   void SetFPAccuracy(llvm::Value *Val, float Accuracy);
2626 
2627 private:
2628   llvm::MDNode *getRangeForLoadFromType(QualType Ty);
2629   void EmitReturnOfRValue(RValue RV, QualType Ty);
2630 
2631   /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
2632   /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
2633   ///
2634   /// \param AI - The first function argument of the expansion.
2635   /// \return The argument following the last expanded function
2636   /// argument.
2637   llvm::Function::arg_iterator
2638   ExpandTypeFromArgs(QualType Ty, LValue Dst,
2639                      llvm::Function::arg_iterator AI);
2640 
2641   /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
2642   /// Ty, into individual arguments on the provided vector \arg Args. See
2643   /// ABIArgInfo::Expand.
2644   void ExpandTypeToArgs(QualType Ty, RValue Src,
2645                         SmallVector<llvm::Value*, 16> &Args,
2646                         llvm::FunctionType *IRFuncTy);
2647 
2648   llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2649                             const Expr *InputExpr, std::string &ConstraintStr);
2650 
2651   llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
2652                                   LValue InputValue, QualType InputType,
2653                                   std::string &ConstraintStr);
2654 
2655   /// EmitCallArgs - Emit call arguments for a function.
2656   /// The CallArgTypeInfo parameter is used for iterating over the known
2657   /// argument types of the function being called.
2658   template<typename T>
2659   void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
2660                     CallExpr::const_arg_iterator ArgBeg,
2661                     CallExpr::const_arg_iterator ArgEnd) {
2662       CallExpr::const_arg_iterator Arg = ArgBeg;
2663 
2664     // First, use the argument types that the type info knows about
2665     if (CallArgTypeInfo) {
2666       for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
2667            E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
2668         assert(Arg != ArgEnd && "Running over edge of argument list!");
2669         QualType ArgType = *I;
2670 #ifndef NDEBUG
2671         QualType ActualArgType = Arg->getType();
2672         if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
2673           QualType ActualBaseType =
2674             ActualArgType->getAs<PointerType>()->getPointeeType();
2675           QualType ArgBaseType =
2676             ArgType->getAs<PointerType>()->getPointeeType();
2677           if (ArgBaseType->isVariableArrayType()) {
2678             if (const VariableArrayType *VAT =
2679                 getContext().getAsVariableArrayType(ActualBaseType)) {
2680               if (!VAT->getSizeExpr())
2681                 ActualArgType = ArgType;
2682             }
2683           }
2684         }
2685         assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
2686                getTypePtr() ==
2687                getContext().getCanonicalType(ActualArgType).getTypePtr() &&
2688                "type mismatch in call argument!");
2689 #endif
2690         EmitCallArg(Args, *Arg, ArgType);
2691       }
2692 
2693       // Either we've emitted all the call args, or we have a call to a
2694       // variadic function.
2695       assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
2696              "Extra arguments in non-variadic function!");
2697 
2698     }
2699 
2700     // If we still have any arguments, emit them using the type of the argument.
2701     for (; Arg != ArgEnd; ++Arg)
2702       EmitCallArg(Args, *Arg, Arg->getType());
2703   }
2704 
2705   const TargetCodeGenInfo &getTargetHooks() const {
2706     return CGM.getTargetCodeGenInfo();
2707   }
2708 
2709   void EmitDeclMetadata();
2710 
2711   CodeGenModule::ByrefHelpers *
2712   buildByrefHelpers(llvm::StructType &byrefType,
2713                     const AutoVarEmission &emission);
2714 
2715   void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
2716 
2717   /// GetPointeeAlignment - Given an expression with a pointer type, emit the
2718   /// value and compute our best estimate of the alignment of the pointee.
2719   std::pair<llvm::Value*, unsigned> EmitPointerWithAlignment(const Expr *Addr);
2720 };
2721 
2722 /// Helper class with most of the code for saving a value for a
2723 /// conditional expression cleanup.
2724 struct DominatingLLVMValue {
2725   typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
2726 
2727   /// Answer whether the given value needs extra work to be saved.
2728   static bool needsSaving(llvm::Value *value) {
2729     // If it's not an instruction, we don't need to save.
2730     if (!isa<llvm::Instruction>(value)) return false;
2731 
2732     // If it's an instruction in the entry block, we don't need to save.
2733     llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
2734     return (block != &block->getParent()->getEntryBlock());
2735   }
2736 
2737   /// Try to save the given value.
2738   static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
2739     if (!needsSaving(value)) return saved_type(value, false);
2740 
2741     // Otherwise we need an alloca.
2742     llvm::Value *alloca =
2743       CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
2744     CGF.Builder.CreateStore(value, alloca);
2745 
2746     return saved_type(alloca, true);
2747   }
2748 
2749   static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
2750     if (!value.getInt()) return value.getPointer();
2751     return CGF.Builder.CreateLoad(value.getPointer());
2752   }
2753 };
2754 
2755 /// A partial specialization of DominatingValue for llvm::Values that
2756 /// might be llvm::Instructions.
2757 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
2758   typedef T *type;
2759   static type restore(CodeGenFunction &CGF, saved_type value) {
2760     return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
2761   }
2762 };
2763 
2764 /// A specialization of DominatingValue for RValue.
2765 template <> struct DominatingValue<RValue> {
2766   typedef RValue type;
2767   class saved_type {
2768     enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
2769                 AggregateAddress, ComplexAddress };
2770 
2771     llvm::Value *Value;
2772     Kind K;
2773     saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
2774 
2775   public:
2776     static bool needsSaving(RValue value);
2777     static saved_type save(CodeGenFunction &CGF, RValue value);
2778     RValue restore(CodeGenFunction &CGF);
2779 
2780     // implementations in CGExprCXX.cpp
2781   };
2782 
2783   static bool needsSaving(type value) {
2784     return saved_type::needsSaving(value);
2785   }
2786   static saved_type save(CodeGenFunction &CGF, type value) {
2787     return saved_type::save(CGF, value);
2788   }
2789   static type restore(CodeGenFunction &CGF, saved_type value) {
2790     return value.restore(CGF);
2791   }
2792 };
2793 
2794 }  // end namespace CodeGen
2795 }  // end namespace clang
2796 
2797 #endif
2798