1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Objective-C code as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGDebugInfo.h"
15 #include "CGObjCRuntime.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/DeclObjC.h"
21 #include "clang/AST/StmtObjC.h"
22 #include "clang/Basic/Diagnostic.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/Target/TargetData.h"
25 #include "llvm/InlineAsm.h"
26 using namespace clang;
27 using namespace CodeGen;
28 
29 typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
30 static TryEmitResult
31 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
32 
33 /// Given the address of a variable of pointer type, find the correct
34 /// null to store into it.
35 static llvm::Constant *getNullForVariable(llvm::Value *addr) {
36   llvm::Type *type =
37     cast<llvm::PointerType>(addr->getType())->getElementType();
38   return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
39 }
40 
41 /// Emits an instance of NSConstantString representing the object.
42 llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
43 {
44   llvm::Constant *C =
45       CGM.getObjCRuntime().GenerateConstantString(E->getString());
46   // FIXME: This bitcast should just be made an invariant on the Runtime.
47   return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
48 }
49 
50 /// Emit a selector.
51 llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
52   // Untyped selector.
53   // Note that this implementation allows for non-constant strings to be passed
54   // as arguments to @selector().  Currently, the only thing preventing this
55   // behaviour is the type checking in the front end.
56   return CGM.getObjCRuntime().GetSelector(Builder, E->getSelector());
57 }
58 
59 llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
60   // FIXME: This should pass the Decl not the name.
61   return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol());
62 }
63 
64 /// \brief Adjust the type of the result of an Objective-C message send
65 /// expression when the method has a related result type.
66 static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
67                                       const Expr *E,
68                                       const ObjCMethodDecl *Method,
69                                       RValue Result) {
70   if (!Method)
71     return Result;
72 
73   if (!Method->hasRelatedResultType() ||
74       CGF.getContext().hasSameType(E->getType(), Method->getResultType()) ||
75       !Result.isScalar())
76     return Result;
77 
78   // We have applied a related result type. Cast the rvalue appropriately.
79   return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
80                                                CGF.ConvertType(E->getType())));
81 }
82 
83 /// Decide whether to extend the lifetime of the receiver of a
84 /// returns-inner-pointer message.
85 static bool
86 shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
87   switch (message->getReceiverKind()) {
88 
89   // For a normal instance message, we should extend unless the
90   // receiver is loaded from a variable with precise lifetime.
91   case ObjCMessageExpr::Instance: {
92     const Expr *receiver = message->getInstanceReceiver();
93     const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
94     if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
95     receiver = ice->getSubExpr()->IgnoreParens();
96 
97     // Only __strong variables.
98     if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
99       return true;
100 
101     // All ivars and fields have precise lifetime.
102     if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
103       return false;
104 
105     // Otherwise, check for variables.
106     const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
107     if (!declRef) return true;
108     const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
109     if (!var) return true;
110 
111     // All variables have precise lifetime except local variables with
112     // automatic storage duration that aren't specially marked.
113     return (var->hasLocalStorage() &&
114             !var->hasAttr<ObjCPreciseLifetimeAttr>());
115   }
116 
117   case ObjCMessageExpr::Class:
118   case ObjCMessageExpr::SuperClass:
119     // It's never necessary for class objects.
120     return false;
121 
122   case ObjCMessageExpr::SuperInstance:
123     // We generally assume that 'self' lives throughout a method call.
124     return false;
125   }
126 
127   llvm_unreachable("invalid receiver kind");
128 }
129 
130 RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
131                                             ReturnValueSlot Return) {
132   // Only the lookup mechanism and first two arguments of the method
133   // implementation vary between runtimes.  We can get the receiver and
134   // arguments in generic code.
135 
136   bool isDelegateInit = E->isDelegateInitCall();
137 
138   const ObjCMethodDecl *method = E->getMethodDecl();
139 
140   // We don't retain the receiver in delegate init calls, and this is
141   // safe because the receiver value is always loaded from 'self',
142   // which we zero out.  We don't want to Block_copy block receivers,
143   // though.
144   bool retainSelf =
145     (!isDelegateInit &&
146      CGM.getLangOptions().ObjCAutoRefCount &&
147      method &&
148      method->hasAttr<NSConsumesSelfAttr>());
149 
150   CGObjCRuntime &Runtime = CGM.getObjCRuntime();
151   bool isSuperMessage = false;
152   bool isClassMessage = false;
153   ObjCInterfaceDecl *OID = 0;
154   // Find the receiver
155   QualType ReceiverType;
156   llvm::Value *Receiver = 0;
157   switch (E->getReceiverKind()) {
158   case ObjCMessageExpr::Instance:
159     ReceiverType = E->getInstanceReceiver()->getType();
160     if (retainSelf) {
161       TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
162                                                    E->getInstanceReceiver());
163       Receiver = ter.getPointer();
164       if (ter.getInt()) retainSelf = false;
165     } else
166       Receiver = EmitScalarExpr(E->getInstanceReceiver());
167     break;
168 
169   case ObjCMessageExpr::Class: {
170     ReceiverType = E->getClassReceiver();
171     const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
172     assert(ObjTy && "Invalid Objective-C class message send");
173     OID = ObjTy->getInterface();
174     assert(OID && "Invalid Objective-C class message send");
175     Receiver = Runtime.GetClass(Builder, OID);
176     isClassMessage = true;
177     break;
178   }
179 
180   case ObjCMessageExpr::SuperInstance:
181     ReceiverType = E->getSuperType();
182     Receiver = LoadObjCSelf();
183     isSuperMessage = true;
184     break;
185 
186   case ObjCMessageExpr::SuperClass:
187     ReceiverType = E->getSuperType();
188     Receiver = LoadObjCSelf();
189     isSuperMessage = true;
190     isClassMessage = true;
191     break;
192   }
193 
194   if (retainSelf)
195     Receiver = EmitARCRetainNonBlock(Receiver);
196 
197   // In ARC, we sometimes want to "extend the lifetime"
198   // (i.e. retain+autorelease) of receivers of returns-inner-pointer
199   // messages.
200   if (getLangOptions().ObjCAutoRefCount && method &&
201       method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
202       shouldExtendReceiverForInnerPointerMessage(E))
203     Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
204 
205   QualType ResultType =
206     method ? method->getResultType() : E->getType();
207 
208   CallArgList Args;
209   EmitCallArgs(Args, method, E->arg_begin(), E->arg_end());
210 
211   // For delegate init calls in ARC, do an unsafe store of null into
212   // self.  This represents the call taking direct ownership of that
213   // value.  We have to do this after emitting the other call
214   // arguments because they might also reference self, but we don't
215   // have to worry about any of them modifying self because that would
216   // be an undefined read and write of an object in unordered
217   // expressions.
218   if (isDelegateInit) {
219     assert(getLangOptions().ObjCAutoRefCount &&
220            "delegate init calls should only be marked in ARC");
221 
222     // Do an unsafe store of null into self.
223     llvm::Value *selfAddr =
224       LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
225     assert(selfAddr && "no self entry for a delegate init call?");
226 
227     Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
228   }
229 
230   RValue result;
231   if (isSuperMessage) {
232     // super is only valid in an Objective-C method
233     const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
234     bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
235     result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
236                                               E->getSelector(),
237                                               OMD->getClassInterface(),
238                                               isCategoryImpl,
239                                               Receiver,
240                                               isClassMessage,
241                                               Args,
242                                               method);
243   } else {
244     result = Runtime.GenerateMessageSend(*this, Return, ResultType,
245                                          E->getSelector(),
246                                          Receiver, Args, OID,
247                                          method);
248   }
249 
250   // For delegate init calls in ARC, implicitly store the result of
251   // the call back into self.  This takes ownership of the value.
252   if (isDelegateInit) {
253     llvm::Value *selfAddr =
254       LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
255     llvm::Value *newSelf = result.getScalarVal();
256 
257     // The delegate return type isn't necessarily a matching type; in
258     // fact, it's quite likely to be 'id'.
259     llvm::Type *selfTy =
260       cast<llvm::PointerType>(selfAddr->getType())->getElementType();
261     newSelf = Builder.CreateBitCast(newSelf, selfTy);
262 
263     Builder.CreateStore(newSelf, selfAddr);
264   }
265 
266   return AdjustRelatedResultType(*this, E, method, result);
267 }
268 
269 namespace {
270 struct FinishARCDealloc : EHScopeStack::Cleanup {
271   void Emit(CodeGenFunction &CGF, Flags flags) {
272     const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
273 
274     const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
275     const ObjCInterfaceDecl *iface = impl->getClassInterface();
276     if (!iface->getSuperClass()) return;
277 
278     bool isCategory = isa<ObjCCategoryImplDecl>(impl);
279 
280     // Call [super dealloc] if we have a superclass.
281     llvm::Value *self = CGF.LoadObjCSelf();
282 
283     CallArgList args;
284     CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
285                                                       CGF.getContext().VoidTy,
286                                                       method->getSelector(),
287                                                       iface,
288                                                       isCategory,
289                                                       self,
290                                                       /*is class msg*/ false,
291                                                       args,
292                                                       method);
293   }
294 };
295 }
296 
297 /// StartObjCMethod - Begin emission of an ObjCMethod. This generates
298 /// the LLVM function and sets the other context used by
299 /// CodeGenFunction.
300 void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
301                                       const ObjCContainerDecl *CD,
302                                       SourceLocation StartLoc) {
303   FunctionArgList args;
304   // Check if we should generate debug info for this method.
305   if (CGM.getModuleDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
306     DebugInfo = CGM.getModuleDebugInfo();
307 
308   llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
309 
310   const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
311   CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
312 
313   args.push_back(OMD->getSelfDecl());
314   args.push_back(OMD->getCmdDecl());
315 
316   for (ObjCMethodDecl::param_const_iterator PI = OMD->param_begin(),
317        E = OMD->param_end(); PI != E; ++PI)
318     args.push_back(*PI);
319 
320   CurGD = OMD;
321 
322   StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
323 
324   // In ARC, certain methods get an extra cleanup.
325   if (CGM.getLangOptions().ObjCAutoRefCount &&
326       OMD->isInstanceMethod() &&
327       OMD->getSelector().isUnarySelector()) {
328     const IdentifierInfo *ident =
329       OMD->getSelector().getIdentifierInfoForSlot(0);
330     if (ident->isStr("dealloc"))
331       EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
332   }
333 }
334 
335 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
336                                               LValue lvalue, QualType type);
337 
338 /// Generate an Objective-C method.  An Objective-C method is a C function with
339 /// its pointer, name, and types registered in the class struture.
340 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
341   StartObjCMethod(OMD, OMD->getClassInterface(), OMD->getLocStart());
342   EmitStmt(OMD->getBody());
343   FinishFunction(OMD->getBodyRBrace());
344 }
345 
346 /// emitStructGetterCall - Call the runtime function to load a property
347 /// into the return value slot.
348 static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
349                                  bool isAtomic, bool hasStrong) {
350   ASTContext &Context = CGF.getContext();
351 
352   llvm::Value *src =
353     CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(),
354                           ivar, 0).getAddress();
355 
356   // objc_copyStruct (ReturnValue, &structIvar,
357   //                  sizeof (Type of Ivar), isAtomic, false);
358   CallArgList args;
359 
360   llvm::Value *dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
361   args.add(RValue::get(dest), Context.VoidPtrTy);
362 
363   src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
364   args.add(RValue::get(src), Context.VoidPtrTy);
365 
366   CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
367   args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
368   args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
369   args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
370 
371   llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
372   CGF.EmitCall(CGF.getTypes().getFunctionInfo(Context.VoidTy, args,
373                                               FunctionType::ExtInfo()),
374                fn, ReturnValueSlot(), args);
375 }
376 
377 /// Determine whether the given architecture supports unaligned atomic
378 /// accesses.  They don't have to be fast, just faster than a function
379 /// call and a mutex.
380 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
381   // FIXME: Allow unaligned atomic load/store on x86.  (It is not
382   // currently supported by the backend.)
383   return 0;
384 }
385 
386 /// Return the maximum size that permits atomic accesses for the given
387 /// architecture.
388 static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
389                                         llvm::Triple::ArchType arch) {
390   // ARM has 8-byte atomic accesses, but it's not clear whether we
391   // want to rely on them here.
392 
393   // In the default case, just assume that any size up to a pointer is
394   // fine given adequate alignment.
395   return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
396 }
397 
398 namespace {
399   class PropertyImplStrategy {
400   public:
401     enum StrategyKind {
402       /// The 'native' strategy is to use the architecture's provided
403       /// reads and writes.
404       Native,
405 
406       /// Use objc_setProperty and objc_getProperty.
407       GetSetProperty,
408 
409       /// Use objc_setProperty for the setter, but use expression
410       /// evaluation for the getter.
411       SetPropertyAndExpressionGet,
412 
413       /// Use objc_copyStruct.
414       CopyStruct,
415 
416       /// The 'expression' strategy is to emit normal assignment or
417       /// lvalue-to-rvalue expressions.
418       Expression
419     };
420 
421     StrategyKind getKind() const { return StrategyKind(Kind); }
422 
423     bool hasStrongMember() const { return HasStrong; }
424     bool isAtomic() const { return IsAtomic; }
425     bool isCopy() const { return IsCopy; }
426 
427     CharUnits getIvarSize() const { return IvarSize; }
428     CharUnits getIvarAlignment() const { return IvarAlignment; }
429 
430     PropertyImplStrategy(CodeGenModule &CGM,
431                          const ObjCPropertyImplDecl *propImpl);
432 
433   private:
434     unsigned Kind : 8;
435     unsigned IsAtomic : 1;
436     unsigned IsCopy : 1;
437     unsigned HasStrong : 1;
438 
439     CharUnits IvarSize;
440     CharUnits IvarAlignment;
441   };
442 }
443 
444 /// Pick an implementation strategy for the the given property synthesis.
445 PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
446                                      const ObjCPropertyImplDecl *propImpl) {
447   const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
448   ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
449 
450   IsCopy = (setterKind == ObjCPropertyDecl::Copy);
451   IsAtomic = prop->isAtomic();
452   HasStrong = false; // doesn't matter here.
453 
454   // Evaluate the ivar's size and alignment.
455   ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
456   QualType ivarType = ivar->getType();
457   llvm::tie(IvarSize, IvarAlignment)
458     = CGM.getContext().getTypeInfoInChars(ivarType);
459 
460   // If we have a copy property, we always have to use getProperty/setProperty.
461   // TODO: we could actually use setProperty and an expression for non-atomics.
462   if (IsCopy) {
463     Kind = GetSetProperty;
464     return;
465   }
466 
467   // Handle retain.
468   if (setterKind == ObjCPropertyDecl::Retain) {
469     // In GC-only, there's nothing special that needs to be done.
470     if (CGM.getLangOptions().getGC() == LangOptions::GCOnly) {
471       // fallthrough
472 
473     // In ARC, if the property is non-atomic, use expression emission,
474     // which translates to objc_storeStrong.  This isn't required, but
475     // it's slightly nicer.
476     } else if (CGM.getLangOptions().ObjCAutoRefCount && !IsAtomic) {
477       Kind = Expression;
478       return;
479 
480     // Otherwise, we need to at least use setProperty.  However, if
481     // the property isn't atomic, we can use normal expression
482     // emission for the getter.
483     } else if (!IsAtomic) {
484       Kind = SetPropertyAndExpressionGet;
485       return;
486 
487     // Otherwise, we have to use both setProperty and getProperty.
488     } else {
489       Kind = GetSetProperty;
490       return;
491     }
492   }
493 
494   // If we're not atomic, just use expression accesses.
495   if (!IsAtomic) {
496     Kind = Expression;
497     return;
498   }
499 
500   // Properties on bitfield ivars need to be emitted using expression
501   // accesses even if they're nominally atomic.
502   if (ivar->isBitField()) {
503     Kind = Expression;
504     return;
505   }
506 
507   // GC-qualified or ARC-qualified ivars need to be emitted as
508   // expressions.  This actually works out to being atomic anyway,
509   // except for ARC __strong, but that should trigger the above code.
510   if (ivarType.hasNonTrivialObjCLifetime() ||
511       (CGM.getLangOptions().getGC() &&
512        CGM.getContext().getObjCGCAttrKind(ivarType))) {
513     Kind = Expression;
514     return;
515   }
516 
517   // Compute whether the ivar has strong members.
518   if (CGM.getLangOptions().getGC())
519     if (const RecordType *recordType = ivarType->getAs<RecordType>())
520       HasStrong = recordType->getDecl()->hasObjectMember();
521 
522   // We can never access structs with object members with a native
523   // access, because we need to use write barriers.  This is what
524   // objc_copyStruct is for.
525   if (HasStrong) {
526     Kind = CopyStruct;
527     return;
528   }
529 
530   // Otherwise, this is target-dependent and based on the size and
531   // alignment of the ivar.
532 
533   // If the size of the ivar is not a power of two, give up.  We don't
534   // want to get into the business of doing compare-and-swaps.
535   if (!IvarSize.isPowerOfTwo()) {
536     Kind = CopyStruct;
537     return;
538   }
539 
540   llvm::Triple::ArchType arch =
541     CGM.getContext().getTargetInfo().getTriple().getArch();
542 
543   // Most architectures require memory to fit within a single cache
544   // line, so the alignment has to be at least the size of the access.
545   // Otherwise we have to grab a lock.
546   if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
547     Kind = CopyStruct;
548     return;
549   }
550 
551   // If the ivar's size exceeds the architecture's maximum atomic
552   // access size, we have to use CopyStruct.
553   if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
554     Kind = CopyStruct;
555     return;
556   }
557 
558   // Otherwise, we can use native loads and stores.
559   Kind = Native;
560 }
561 
562 /// GenerateObjCGetter - Generate an Objective-C property getter
563 /// function. The given Decl must be an ObjCImplementationDecl. @synthesize
564 /// is illegal within a category.
565 void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
566                                          const ObjCPropertyImplDecl *PID) {
567   llvm::Constant *AtomicHelperFn =
568     GenerateObjCAtomicGetterCopyHelperFunction(PID);
569   const ObjCPropertyDecl *PD = PID->getPropertyDecl();
570   ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
571   assert(OMD && "Invalid call to generate getter (empty method)");
572   StartObjCMethod(OMD, IMP->getClassInterface(), PID->getLocStart());
573 
574   generateObjCGetterBody(IMP, PID, AtomicHelperFn);
575 
576   FinishFunction();
577 }
578 
579 static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
580   const Expr *getter = propImpl->getGetterCXXConstructor();
581   if (!getter) return true;
582 
583   // Sema only makes only of these when the ivar has a C++ class type,
584   // so the form is pretty constrained.
585 
586   // If the property has a reference type, we might just be binding a
587   // reference, in which case the result will be a gl-value.  We should
588   // treat this as a non-trivial operation.
589   if (getter->isGLValue())
590     return false;
591 
592   // If we selected a trivial copy-constructor, we're okay.
593   if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
594     return (construct->getConstructor()->isTrivial());
595 
596   // The constructor might require cleanups (in which case it's never
597   // trivial).
598   assert(isa<ExprWithCleanups>(getter));
599   return false;
600 }
601 
602 /// emitCPPObjectAtomicGetterCall - Call the runtime function to
603 /// copy the ivar into the resturn slot.
604 static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
605                                           llvm::Value *returnAddr,
606                                           ObjCIvarDecl *ivar,
607                                           llvm::Constant *AtomicHelperFn) {
608   // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
609   //                           AtomicHelperFn);
610   CallArgList args;
611 
612   // The 1st argument is the return Slot.
613   args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
614 
615   // The 2nd argument is the address of the ivar.
616   llvm::Value *ivarAddr =
617   CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
618                         CGF.LoadObjCSelf(), ivar, 0).getAddress();
619   ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
620   args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
621 
622   // Third argument is the helper function.
623   args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
624 
625   llvm::Value *copyCppAtomicObjectFn =
626   CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
627   CGF.EmitCall(CGF.getTypes().getFunctionInfo(CGF.getContext().VoidTy, args,
628                                               FunctionType::ExtInfo()),
629                copyCppAtomicObjectFn, ReturnValueSlot(), args);
630 }
631 
632 void
633 CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
634                                         const ObjCPropertyImplDecl *propImpl,
635                                         llvm::Constant *AtomicHelperFn) {
636   // If there's a non-trivial 'get' expression, we just have to emit that.
637   if (!hasTrivialGetExpr(propImpl)) {
638     if (!AtomicHelperFn) {
639       ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
640                      /*nrvo*/ 0);
641       EmitReturnStmt(ret);
642     }
643     else {
644       ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
645       emitCPPObjectAtomicGetterCall(*this, ReturnValue,
646                                     ivar, AtomicHelperFn);
647     }
648     return;
649   }
650 
651   const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
652   QualType propType = prop->getType();
653   ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
654 
655   ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
656 
657   // Pick an implementation strategy.
658   PropertyImplStrategy strategy(CGM, propImpl);
659   switch (strategy.getKind()) {
660   case PropertyImplStrategy::Native: {
661     LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
662 
663     // Currently, all atomic accesses have to be through integer
664     // types, so there's no point in trying to pick a prettier type.
665     llvm::Type *bitcastType =
666       llvm::Type::getIntNTy(getLLVMContext(),
667                             getContext().toBits(strategy.getIvarSize()));
668     bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
669 
670     // Perform an atomic load.  This does not impose ordering constraints.
671     llvm::Value *ivarAddr = LV.getAddress();
672     ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
673     llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
674     load->setAlignment(strategy.getIvarAlignment().getQuantity());
675     load->setAtomic(llvm::Unordered);
676 
677     // Store that value into the return address.  Doing this with a
678     // bitcast is likely to produce some pretty ugly IR, but it's not
679     // the *most* terrible thing in the world.
680     Builder.CreateStore(load, Builder.CreateBitCast(ReturnValue, bitcastType));
681 
682     // Make sure we don't do an autorelease.
683     AutoreleaseResult = false;
684     return;
685   }
686 
687   case PropertyImplStrategy::GetSetProperty: {
688     llvm::Value *getPropertyFn =
689       CGM.getObjCRuntime().GetPropertyGetFunction();
690     if (!getPropertyFn) {
691       CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
692       return;
693     }
694 
695     // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
696     // FIXME: Can't this be simpler? This might even be worse than the
697     // corresponding gcc code.
698     llvm::Value *cmd =
699       Builder.CreateLoad(LocalDeclMap[getterMethod->getCmdDecl()], "cmd");
700     llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
701     llvm::Value *ivarOffset =
702       EmitIvarOffset(classImpl->getClassInterface(), ivar);
703 
704     CallArgList args;
705     args.add(RValue::get(self), getContext().getObjCIdType());
706     args.add(RValue::get(cmd), getContext().getObjCSelType());
707     args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
708     args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
709              getContext().BoolTy);
710 
711     // FIXME: We shouldn't need to get the function info here, the
712     // runtime already should have computed it to build the function.
713     RValue RV = EmitCall(getTypes().getFunctionInfo(propType, args,
714                                                     FunctionType::ExtInfo()),
715                          getPropertyFn, ReturnValueSlot(), args);
716 
717     // We need to fix the type here. Ivars with copy & retain are
718     // always objects so we don't need to worry about complex or
719     // aggregates.
720     RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
721                                            getTypes().ConvertType(propType)));
722 
723     EmitReturnOfRValue(RV, propType);
724 
725     // objc_getProperty does an autorelease, so we should suppress ours.
726     AutoreleaseResult = false;
727 
728     return;
729   }
730 
731   case PropertyImplStrategy::CopyStruct:
732     emitStructGetterCall(*this, ivar, strategy.isAtomic(),
733                          strategy.hasStrongMember());
734     return;
735 
736   case PropertyImplStrategy::Expression:
737   case PropertyImplStrategy::SetPropertyAndExpressionGet: {
738     LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
739 
740     QualType ivarType = ivar->getType();
741     if (ivarType->isAnyComplexType()) {
742       ComplexPairTy pair = LoadComplexFromAddr(LV.getAddress(),
743                                                LV.isVolatileQualified());
744       StoreComplexToAddr(pair, ReturnValue, LV.isVolatileQualified());
745     } else if (hasAggregateLLVMType(ivarType)) {
746       // The return value slot is guaranteed to not be aliased, but
747       // that's not necessarily the same as "on the stack", so
748       // we still potentially need objc_memmove_collectable.
749       EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType);
750     } else {
751       llvm::Value *value;
752       if (propType->isReferenceType()) {
753         value = LV.getAddress();
754       } else {
755         // We want to load and autoreleaseReturnValue ARC __weak ivars.
756         if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
757           value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
758 
759         // Otherwise we want to do a simple load, suppressing the
760         // final autorelease.
761         } else {
762           value = EmitLoadOfLValue(LV).getScalarVal();
763           AutoreleaseResult = false;
764         }
765 
766         value = Builder.CreateBitCast(value, ConvertType(propType));
767       }
768 
769       EmitReturnOfRValue(RValue::get(value), propType);
770     }
771     return;
772   }
773 
774   }
775   llvm_unreachable("bad @property implementation strategy!");
776 }
777 
778 /// emitStructSetterCall - Call the runtime function to store the value
779 /// from the first formal parameter into the given ivar.
780 static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
781                                  ObjCIvarDecl *ivar) {
782   // objc_copyStruct (&structIvar, &Arg,
783   //                  sizeof (struct something), true, false);
784   CallArgList args;
785 
786   // The first argument is the address of the ivar.
787   llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
788                                                 CGF.LoadObjCSelf(), ivar, 0)
789     .getAddress();
790   ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
791   args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
792 
793   // The second argument is the address of the parameter variable.
794   ParmVarDecl *argVar = *OMD->param_begin();
795   DeclRefExpr argRef(argVar, argVar->getType().getNonReferenceType(),
796                      VK_LValue, SourceLocation());
797   llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
798   argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
799   args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
800 
801   // The third argument is the sizeof the type.
802   llvm::Value *size =
803     CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
804   args.add(RValue::get(size), CGF.getContext().getSizeType());
805 
806   // The fourth argument is the 'isAtomic' flag.
807   args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
808 
809   // The fifth argument is the 'hasStrong' flag.
810   // FIXME: should this really always be false?
811   args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
812 
813   llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
814   CGF.EmitCall(CGF.getTypes().getFunctionInfo(CGF.getContext().VoidTy, args,
815                                               FunctionType::ExtInfo()),
816                copyStructFn, ReturnValueSlot(), args);
817 }
818 
819 /// emitCPPObjectAtomicSetterCall - Call the runtime function to store
820 /// the value from the first formal parameter into the given ivar, using
821 /// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
822 static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
823                                           ObjCMethodDecl *OMD,
824                                           ObjCIvarDecl *ivar,
825                                           llvm::Constant *AtomicHelperFn) {
826   // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
827   //                           AtomicHelperFn);
828   CallArgList args;
829 
830   // The first argument is the address of the ivar.
831   llvm::Value *ivarAddr =
832     CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
833                           CGF.LoadObjCSelf(), ivar, 0).getAddress();
834   ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
835   args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
836 
837   // The second argument is the address of the parameter variable.
838   ParmVarDecl *argVar = *OMD->param_begin();
839   DeclRefExpr argRef(argVar, argVar->getType().getNonReferenceType(),
840                      VK_LValue, SourceLocation());
841   llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
842   argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
843   args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
844 
845   // Third argument is the helper function.
846   args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
847 
848   llvm::Value *copyCppAtomicObjectFn =
849     CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
850   CGF.EmitCall(CGF.getTypes().getFunctionInfo(CGF.getContext().VoidTy, args,
851                                               FunctionType::ExtInfo()),
852                copyCppAtomicObjectFn, ReturnValueSlot(), args);
853 
854 
855 }
856 
857 
858 static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
859   Expr *setter = PID->getSetterCXXAssignment();
860   if (!setter) return true;
861 
862   // Sema only makes only of these when the ivar has a C++ class type,
863   // so the form is pretty constrained.
864 
865   // An operator call is trivial if the function it calls is trivial.
866   // This also implies that there's nothing non-trivial going on with
867   // the arguments, because operator= can only be trivial if it's a
868   // synthesized assignment operator and therefore both parameters are
869   // references.
870   if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
871     if (const FunctionDecl *callee
872           = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
873       if (callee->isTrivial())
874         return true;
875     return false;
876   }
877 
878   assert(isa<ExprWithCleanups>(setter));
879   return false;
880 }
881 
882 void
883 CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
884                                         const ObjCPropertyImplDecl *propImpl,
885                                         llvm::Constant *AtomicHelperFn) {
886   const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
887   ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
888   ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
889 
890   // Just use the setter expression if Sema gave us one and it's
891   // non-trivial.
892   if (!hasTrivialSetExpr(propImpl)) {
893     if (!AtomicHelperFn)
894       // If non-atomic, assignment is called directly.
895       EmitStmt(propImpl->getSetterCXXAssignment());
896     else
897       // If atomic, assignment is called via a locking api.
898       emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
899                                     AtomicHelperFn);
900     return;
901   }
902 
903   PropertyImplStrategy strategy(CGM, propImpl);
904   switch (strategy.getKind()) {
905   case PropertyImplStrategy::Native: {
906     llvm::Value *argAddr = LocalDeclMap[*setterMethod->param_begin()];
907 
908     LValue ivarLValue =
909       EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
910     llvm::Value *ivarAddr = ivarLValue.getAddress();
911 
912     // Currently, all atomic accesses have to be through integer
913     // types, so there's no point in trying to pick a prettier type.
914     llvm::Type *bitcastType =
915       llvm::Type::getIntNTy(getLLVMContext(),
916                             getContext().toBits(strategy.getIvarSize()));
917     bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
918 
919     // Cast both arguments to the chosen operation type.
920     argAddr = Builder.CreateBitCast(argAddr, bitcastType);
921     ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
922 
923     // This bitcast load is likely to cause some nasty IR.
924     llvm::Value *load = Builder.CreateLoad(argAddr);
925 
926     // Perform an atomic store.  There are no memory ordering requirements.
927     llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
928     store->setAlignment(strategy.getIvarAlignment().getQuantity());
929     store->setAtomic(llvm::Unordered);
930     return;
931   }
932 
933   case PropertyImplStrategy::GetSetProperty:
934   case PropertyImplStrategy::SetPropertyAndExpressionGet: {
935     llvm::Value *setPropertyFn =
936       CGM.getObjCRuntime().GetPropertySetFunction();
937     if (!setPropertyFn) {
938       CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
939       return;
940     }
941 
942     // Emit objc_setProperty((id) self, _cmd, offset, arg,
943     //                       <is-atomic>, <is-copy>).
944     llvm::Value *cmd =
945       Builder.CreateLoad(LocalDeclMap[setterMethod->getCmdDecl()]);
946     llvm::Value *self =
947       Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
948     llvm::Value *ivarOffset =
949       EmitIvarOffset(classImpl->getClassInterface(), ivar);
950     llvm::Value *arg = LocalDeclMap[*setterMethod->param_begin()];
951     arg = Builder.CreateBitCast(Builder.CreateLoad(arg, "arg"), VoidPtrTy);
952 
953     CallArgList args;
954     args.add(RValue::get(self), getContext().getObjCIdType());
955     args.add(RValue::get(cmd), getContext().getObjCSelType());
956     args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
957     args.add(RValue::get(arg), getContext().getObjCIdType());
958     args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
959              getContext().BoolTy);
960     args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
961              getContext().BoolTy);
962     // FIXME: We shouldn't need to get the function info here, the runtime
963     // already should have computed it to build the function.
964     EmitCall(getTypes().getFunctionInfo(getContext().VoidTy, args,
965                                         FunctionType::ExtInfo()),
966              setPropertyFn, ReturnValueSlot(), args);
967     return;
968   }
969 
970   case PropertyImplStrategy::CopyStruct:
971     emitStructSetterCall(*this, setterMethod, ivar);
972     return;
973 
974   case PropertyImplStrategy::Expression:
975     break;
976   }
977 
978   // Otherwise, fake up some ASTs and emit a normal assignment.
979   ValueDecl *selfDecl = setterMethod->getSelfDecl();
980   DeclRefExpr self(selfDecl, selfDecl->getType(), VK_LValue, SourceLocation());
981   ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
982                             selfDecl->getType(), CK_LValueToRValue, &self,
983                             VK_RValue);
984   ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
985                           SourceLocation(), &selfLoad, true, true);
986 
987   ParmVarDecl *argDecl = *setterMethod->param_begin();
988   QualType argType = argDecl->getType().getNonReferenceType();
989   DeclRefExpr arg(argDecl, argType, VK_LValue, SourceLocation());
990   ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
991                            argType.getUnqualifiedType(), CK_LValueToRValue,
992                            &arg, VK_RValue);
993 
994   // The property type can differ from the ivar type in some situations with
995   // Objective-C pointer types, we can always bit cast the RHS in these cases.
996   // The following absurdity is just to ensure well-formed IR.
997   CastKind argCK = CK_NoOp;
998   if (ivarRef.getType()->isObjCObjectPointerType()) {
999     if (argLoad.getType()->isObjCObjectPointerType())
1000       argCK = CK_BitCast;
1001     else if (argLoad.getType()->isBlockPointerType())
1002       argCK = CK_BlockPointerToObjCPointerCast;
1003     else
1004       argCK = CK_CPointerToObjCPointerCast;
1005   } else if (ivarRef.getType()->isBlockPointerType()) {
1006      if (argLoad.getType()->isBlockPointerType())
1007       argCK = CK_BitCast;
1008     else
1009       argCK = CK_AnyPointerToBlockPointerCast;
1010   } else if (ivarRef.getType()->isPointerType()) {
1011     argCK = CK_BitCast;
1012   }
1013   ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
1014                            ivarRef.getType(), argCK, &argLoad,
1015                            VK_RValue);
1016   Expr *finalArg = &argLoad;
1017   if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
1018                                            argLoad.getType()))
1019     finalArg = &argCast;
1020 
1021 
1022   BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
1023                         ivarRef.getType(), VK_RValue, OK_Ordinary,
1024                         SourceLocation());
1025   EmitStmt(&assign);
1026 }
1027 
1028 /// GenerateObjCSetter - Generate an Objective-C property setter
1029 /// function. The given Decl must be an ObjCImplementationDecl. @synthesize
1030 /// is illegal within a category.
1031 void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
1032                                          const ObjCPropertyImplDecl *PID) {
1033   llvm::Constant *AtomicHelperFn =
1034     GenerateObjCAtomicSetterCopyHelperFunction(PID);
1035   const ObjCPropertyDecl *PD = PID->getPropertyDecl();
1036   ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
1037   assert(OMD && "Invalid call to generate setter (empty method)");
1038   StartObjCMethod(OMD, IMP->getClassInterface(), PID->getLocStart());
1039 
1040   generateObjCSetterBody(IMP, PID, AtomicHelperFn);
1041 
1042   FinishFunction();
1043 }
1044 
1045 namespace {
1046   struct DestroyIvar : EHScopeStack::Cleanup {
1047   private:
1048     llvm::Value *addr;
1049     const ObjCIvarDecl *ivar;
1050     CodeGenFunction::Destroyer *destroyer;
1051     bool useEHCleanupForArray;
1052   public:
1053     DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
1054                 CodeGenFunction::Destroyer *destroyer,
1055                 bool useEHCleanupForArray)
1056       : addr(addr), ivar(ivar), destroyer(destroyer),
1057         useEHCleanupForArray(useEHCleanupForArray) {}
1058 
1059     void Emit(CodeGenFunction &CGF, Flags flags) {
1060       LValue lvalue
1061         = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
1062       CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
1063                       flags.isForNormalCleanup() && useEHCleanupForArray);
1064     }
1065   };
1066 }
1067 
1068 /// Like CodeGenFunction::destroyARCStrong, but do it with a call.
1069 static void destroyARCStrongWithStore(CodeGenFunction &CGF,
1070                                       llvm::Value *addr,
1071                                       QualType type) {
1072   llvm::Value *null = getNullForVariable(addr);
1073   CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
1074 }
1075 
1076 static void emitCXXDestructMethod(CodeGenFunction &CGF,
1077                                   ObjCImplementationDecl *impl) {
1078   CodeGenFunction::RunCleanupsScope scope(CGF);
1079 
1080   llvm::Value *self = CGF.LoadObjCSelf();
1081 
1082   const ObjCInterfaceDecl *iface = impl->getClassInterface();
1083   for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
1084        ivar; ivar = ivar->getNextIvar()) {
1085     QualType type = ivar->getType();
1086 
1087     // Check whether the ivar is a destructible type.
1088     QualType::DestructionKind dtorKind = type.isDestructedType();
1089     if (!dtorKind) continue;
1090 
1091     CodeGenFunction::Destroyer *destroyer = 0;
1092 
1093     // Use a call to objc_storeStrong to destroy strong ivars, for the
1094     // general benefit of the tools.
1095     if (dtorKind == QualType::DK_objc_strong_lifetime) {
1096       destroyer = destroyARCStrongWithStore;
1097 
1098     // Otherwise use the default for the destruction kind.
1099     } else {
1100       destroyer = CGF.getDestroyer(dtorKind);
1101     }
1102 
1103     CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
1104 
1105     CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
1106                                          cleanupKind & EHCleanup);
1107   }
1108 
1109   assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
1110 }
1111 
1112 void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1113                                                  ObjCMethodDecl *MD,
1114                                                  bool ctor) {
1115   MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
1116   StartObjCMethod(MD, IMP->getClassInterface(), MD->getLocStart());
1117 
1118   // Emit .cxx_construct.
1119   if (ctor) {
1120     // Suppress the final autorelease in ARC.
1121     AutoreleaseResult = false;
1122 
1123     SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
1124     for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
1125            E = IMP->init_end(); B != E; ++B) {
1126       CXXCtorInitializer *IvarInit = (*B);
1127       FieldDecl *Field = IvarInit->getAnyMember();
1128       ObjCIvarDecl  *Ivar = cast<ObjCIvarDecl>(Field);
1129       LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
1130                                     LoadObjCSelf(), Ivar, 0);
1131       EmitAggExpr(IvarInit->getInit(),
1132                   AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
1133                                           AggValueSlot::DoesNotNeedGCBarriers,
1134                                           AggValueSlot::IsNotAliased));
1135     }
1136     // constructor returns 'self'.
1137     CodeGenTypes &Types = CGM.getTypes();
1138     QualType IdTy(CGM.getContext().getObjCIdType());
1139     llvm::Value *SelfAsId =
1140       Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
1141     EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
1142 
1143   // Emit .cxx_destruct.
1144   } else {
1145     emitCXXDestructMethod(*this, IMP);
1146   }
1147   FinishFunction();
1148 }
1149 
1150 bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
1151   CGFunctionInfo::const_arg_iterator it = FI.arg_begin();
1152   it++; it++;
1153   const ABIArgInfo &AI = it->info;
1154   // FIXME. Is this sufficient check?
1155   return (AI.getKind() == ABIArgInfo::Indirect);
1156 }
1157 
1158 bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
1159   if (CGM.getLangOptions().getGC() == LangOptions::NonGC)
1160     return false;
1161   if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
1162     return FDTTy->getDecl()->hasObjectMember();
1163   return false;
1164 }
1165 
1166 llvm::Value *CodeGenFunction::LoadObjCSelf() {
1167   const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
1168   return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
1169 }
1170 
1171 QualType CodeGenFunction::TypeOfSelfObject() {
1172   const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
1173   ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
1174   const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
1175     getContext().getCanonicalType(selfDecl->getType()));
1176   return PTy->getPointeeType();
1177 }
1178 
1179 void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
1180   llvm::Constant *EnumerationMutationFn =
1181     CGM.getObjCRuntime().EnumerationMutationFunction();
1182 
1183   if (!EnumerationMutationFn) {
1184     CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
1185     return;
1186   }
1187 
1188   CGDebugInfo *DI = getDebugInfo();
1189   if (DI)
1190     DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
1191 
1192   // The local variable comes into scope immediately.
1193   AutoVarEmission variable = AutoVarEmission::invalid();
1194   if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
1195     variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
1196 
1197   JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
1198 
1199   // Fast enumeration state.
1200   QualType StateTy = CGM.getObjCFastEnumerationStateType();
1201   llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
1202   EmitNullInitialization(StatePtr, StateTy);
1203 
1204   // Number of elements in the items array.
1205   static const unsigned NumItems = 16;
1206 
1207   // Fetch the countByEnumeratingWithState:objects:count: selector.
1208   IdentifierInfo *II[] = {
1209     &CGM.getContext().Idents.get("countByEnumeratingWithState"),
1210     &CGM.getContext().Idents.get("objects"),
1211     &CGM.getContext().Idents.get("count")
1212   };
1213   Selector FastEnumSel =
1214     CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
1215 
1216   QualType ItemsTy =
1217     getContext().getConstantArrayType(getContext().getObjCIdType(),
1218                                       llvm::APInt(32, NumItems),
1219                                       ArrayType::Normal, 0);
1220   llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
1221 
1222   // Emit the collection pointer.  In ARC, we do a retain.
1223   llvm::Value *Collection;
1224   if (getLangOptions().ObjCAutoRefCount) {
1225     Collection = EmitARCRetainScalarExpr(S.getCollection());
1226 
1227     // Enter a cleanup to do the release.
1228     EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
1229   } else {
1230     Collection = EmitScalarExpr(S.getCollection());
1231   }
1232 
1233   // The 'continue' label needs to appear within the cleanup for the
1234   // collection object.
1235   JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
1236 
1237   // Send it our message:
1238   CallArgList Args;
1239 
1240   // The first argument is a temporary of the enumeration-state type.
1241   Args.add(RValue::get(StatePtr), getContext().getPointerType(StateTy));
1242 
1243   // The second argument is a temporary array with space for NumItems
1244   // pointers.  We'll actually be loading elements from the array
1245   // pointer written into the control state; this buffer is so that
1246   // collections that *aren't* backed by arrays can still queue up
1247   // batches of elements.
1248   Args.add(RValue::get(ItemsPtr), getContext().getPointerType(ItemsTy));
1249 
1250   // The third argument is the capacity of that temporary array.
1251   llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
1252   llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
1253   Args.add(RValue::get(Count), getContext().UnsignedLongTy);
1254 
1255   // Start the enumeration.
1256   RValue CountRV =
1257     CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
1258                                              getContext().UnsignedLongTy,
1259                                              FastEnumSel,
1260                                              Collection, Args);
1261 
1262   // The initial number of objects that were returned in the buffer.
1263   llvm::Value *initialBufferLimit = CountRV.getScalarVal();
1264 
1265   llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
1266   llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
1267 
1268   llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy);
1269 
1270   // If the limit pointer was zero to begin with, the collection is
1271   // empty; skip all this.
1272   Builder.CreateCondBr(Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"),
1273                        EmptyBB, LoopInitBB);
1274 
1275   // Otherwise, initialize the loop.
1276   EmitBlock(LoopInitBB);
1277 
1278   // Save the initial mutations value.  This is the value at an
1279   // address that was written into the state object by
1280   // countByEnumeratingWithState:objects:count:.
1281   llvm::Value *StateMutationsPtrPtr =
1282     Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
1283   llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
1284                                                       "mutationsptr");
1285 
1286   llvm::Value *initialMutations =
1287     Builder.CreateLoad(StateMutationsPtr, "forcoll.initial-mutations");
1288 
1289   // Start looping.  This is the point we return to whenever we have a
1290   // fresh, non-empty batch of objects.
1291   llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
1292   EmitBlock(LoopBodyBB);
1293 
1294   // The current index into the buffer.
1295   llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.index");
1296   index->addIncoming(zero, LoopInitBB);
1297 
1298   // The current buffer size.
1299   llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count");
1300   count->addIncoming(initialBufferLimit, LoopInitBB);
1301 
1302   // Check whether the mutations value has changed from where it was
1303   // at start.  StateMutationsPtr should actually be invariant between
1304   // refreshes.
1305   StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
1306   llvm::Value *currentMutations
1307     = Builder.CreateLoad(StateMutationsPtr, "statemutations");
1308 
1309   llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
1310   llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
1311 
1312   Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
1313                        WasNotMutatedBB, WasMutatedBB);
1314 
1315   // If so, call the enumeration-mutation function.
1316   EmitBlock(WasMutatedBB);
1317   llvm::Value *V =
1318     Builder.CreateBitCast(Collection,
1319                           ConvertType(getContext().getObjCIdType()));
1320   CallArgList Args2;
1321   Args2.add(RValue::get(V), getContext().getObjCIdType());
1322   // FIXME: We shouldn't need to get the function info here, the runtime already
1323   // should have computed it to build the function.
1324   EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2,
1325                                           FunctionType::ExtInfo()),
1326            EnumerationMutationFn, ReturnValueSlot(), Args2);
1327 
1328   // Otherwise, or if the mutation function returns, just continue.
1329   EmitBlock(WasNotMutatedBB);
1330 
1331   // Initialize the element variable.
1332   RunCleanupsScope elementVariableScope(*this);
1333   bool elementIsVariable;
1334   LValue elementLValue;
1335   QualType elementType;
1336   if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
1337     // Initialize the variable, in case it's a __block variable or something.
1338     EmitAutoVarInit(variable);
1339 
1340     const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
1341     DeclRefExpr tempDRE(const_cast<VarDecl*>(D), D->getType(),
1342                         VK_LValue, SourceLocation());
1343     elementLValue = EmitLValue(&tempDRE);
1344     elementType = D->getType();
1345     elementIsVariable = true;
1346 
1347     if (D->isARCPseudoStrong())
1348       elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
1349   } else {
1350     elementLValue = LValue(); // suppress warning
1351     elementType = cast<Expr>(S.getElement())->getType();
1352     elementIsVariable = false;
1353   }
1354   llvm::Type *convertedElementType = ConvertType(elementType);
1355 
1356   // Fetch the buffer out of the enumeration state.
1357   // TODO: this pointer should actually be invariant between
1358   // refreshes, which would help us do certain loop optimizations.
1359   llvm::Value *StateItemsPtr =
1360     Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
1361   llvm::Value *EnumStateItems =
1362     Builder.CreateLoad(StateItemsPtr, "stateitems");
1363 
1364   // Fetch the value at the current index from the buffer.
1365   llvm::Value *CurrentItemPtr =
1366     Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
1367   llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr);
1368 
1369   // Cast that value to the right type.
1370   CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
1371                                       "currentitem");
1372 
1373   // Make sure we have an l-value.  Yes, this gets evaluated every
1374   // time through the loop.
1375   if (!elementIsVariable) {
1376     elementLValue = EmitLValue(cast<Expr>(S.getElement()));
1377     EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
1378   } else {
1379     EmitScalarInit(CurrentItem, elementLValue);
1380   }
1381 
1382   // If we do have an element variable, this assignment is the end of
1383   // its initialization.
1384   if (elementIsVariable)
1385     EmitAutoVarCleanups(variable);
1386 
1387   // Perform the loop body, setting up break and continue labels.
1388   BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
1389   {
1390     RunCleanupsScope Scope(*this);
1391     EmitStmt(S.getBody());
1392   }
1393   BreakContinueStack.pop_back();
1394 
1395   // Destroy the element variable now.
1396   elementVariableScope.ForceCleanup();
1397 
1398   // Check whether there are more elements.
1399   EmitBlock(AfterBody.getBlock());
1400 
1401   llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
1402 
1403   // First we check in the local buffer.
1404   llvm::Value *indexPlusOne
1405     = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1));
1406 
1407   // If we haven't overrun the buffer yet, we can continue.
1408   Builder.CreateCondBr(Builder.CreateICmpULT(indexPlusOne, count),
1409                        LoopBodyBB, FetchMoreBB);
1410 
1411   index->addIncoming(indexPlusOne, AfterBody.getBlock());
1412   count->addIncoming(count, AfterBody.getBlock());
1413 
1414   // Otherwise, we have to fetch more elements.
1415   EmitBlock(FetchMoreBB);
1416 
1417   CountRV =
1418     CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
1419                                              getContext().UnsignedLongTy,
1420                                              FastEnumSel,
1421                                              Collection, Args);
1422 
1423   // If we got a zero count, we're done.
1424   llvm::Value *refetchCount = CountRV.getScalarVal();
1425 
1426   // (note that the message send might split FetchMoreBB)
1427   index->addIncoming(zero, Builder.GetInsertBlock());
1428   count->addIncoming(refetchCount, Builder.GetInsertBlock());
1429 
1430   Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
1431                        EmptyBB, LoopBodyBB);
1432 
1433   // No more elements.
1434   EmitBlock(EmptyBB);
1435 
1436   if (!elementIsVariable) {
1437     // If the element was not a declaration, set it to be null.
1438 
1439     llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
1440     elementLValue = EmitLValue(cast<Expr>(S.getElement()));
1441     EmitStoreThroughLValue(RValue::get(null), elementLValue);
1442   }
1443 
1444   if (DI)
1445     DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
1446 
1447   // Leave the cleanup we entered in ARC.
1448   if (getLangOptions().ObjCAutoRefCount)
1449     PopCleanupBlock();
1450 
1451   EmitBlock(LoopEnd.getBlock());
1452 }
1453 
1454 void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
1455   CGM.getObjCRuntime().EmitTryStmt(*this, S);
1456 }
1457 
1458 void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
1459   CGM.getObjCRuntime().EmitThrowStmt(*this, S);
1460 }
1461 
1462 void CodeGenFunction::EmitObjCAtSynchronizedStmt(
1463                                               const ObjCAtSynchronizedStmt &S) {
1464   CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
1465 }
1466 
1467 /// Produce the code for a CK_ARCProduceObject.  Just does a
1468 /// primitive retain.
1469 llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
1470                                                     llvm::Value *value) {
1471   return EmitARCRetain(type, value);
1472 }
1473 
1474 namespace {
1475   struct CallObjCRelease : EHScopeStack::Cleanup {
1476     CallObjCRelease(llvm::Value *object) : object(object) {}
1477     llvm::Value *object;
1478 
1479     void Emit(CodeGenFunction &CGF, Flags flags) {
1480       CGF.EmitARCRelease(object, /*precise*/ true);
1481     }
1482   };
1483 }
1484 
1485 /// Produce the code for a CK_ARCConsumeObject.  Does a primitive
1486 /// release at the end of the full-expression.
1487 llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
1488                                                     llvm::Value *object) {
1489   // If we're in a conditional branch, we need to make the cleanup
1490   // conditional.
1491   pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
1492   return object;
1493 }
1494 
1495 llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
1496                                                            llvm::Value *value) {
1497   return EmitARCRetainAutorelease(type, value);
1498 }
1499 
1500 
1501 static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
1502                                                 llvm::FunctionType *type,
1503                                                 StringRef fnName) {
1504   llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
1505 
1506   // In -fobjc-no-arc-runtime, emit weak references to the runtime
1507   // support library.
1508   if (!CGM.getCodeGenOpts().ObjCRuntimeHasARC)
1509     if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
1510       f->setLinkage(llvm::Function::ExternalWeakLinkage);
1511 
1512   return fn;
1513 }
1514 
1515 /// Perform an operation having the signature
1516 ///   i8* (i8*)
1517 /// where a null input causes a no-op and returns null.
1518 static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
1519                                           llvm::Value *value,
1520                                           llvm::Constant *&fn,
1521                                           StringRef fnName) {
1522   if (isa<llvm::ConstantPointerNull>(value)) return value;
1523 
1524   if (!fn) {
1525     std::vector<llvm::Type*> args(1, CGF.Int8PtrTy);
1526     llvm::FunctionType *fnType =
1527       llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
1528     fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1529   }
1530 
1531   // Cast the argument to 'id'.
1532   llvm::Type *origType = value->getType();
1533   value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
1534 
1535   // Call the function.
1536   llvm::CallInst *call = CGF.Builder.CreateCall(fn, value);
1537   call->setDoesNotThrow();
1538 
1539   // Cast the result back to the original type.
1540   return CGF.Builder.CreateBitCast(call, origType);
1541 }
1542 
1543 /// Perform an operation having the following signature:
1544 ///   i8* (i8**)
1545 static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
1546                                          llvm::Value *addr,
1547                                          llvm::Constant *&fn,
1548                                          StringRef fnName) {
1549   if (!fn) {
1550     std::vector<llvm::Type*> args(1, CGF.Int8PtrPtrTy);
1551     llvm::FunctionType *fnType =
1552       llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
1553     fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1554   }
1555 
1556   // Cast the argument to 'id*'.
1557   llvm::Type *origType = addr->getType();
1558   addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
1559 
1560   // Call the function.
1561   llvm::CallInst *call = CGF.Builder.CreateCall(fn, addr);
1562   call->setDoesNotThrow();
1563 
1564   // Cast the result back to a dereference of the original type.
1565   llvm::Value *result = call;
1566   if (origType != CGF.Int8PtrPtrTy)
1567     result = CGF.Builder.CreateBitCast(result,
1568                         cast<llvm::PointerType>(origType)->getElementType());
1569 
1570   return result;
1571 }
1572 
1573 /// Perform an operation having the following signature:
1574 ///   i8* (i8**, i8*)
1575 static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
1576                                           llvm::Value *addr,
1577                                           llvm::Value *value,
1578                                           llvm::Constant *&fn,
1579                                           StringRef fnName,
1580                                           bool ignored) {
1581   assert(cast<llvm::PointerType>(addr->getType())->getElementType()
1582            == value->getType());
1583 
1584   if (!fn) {
1585     llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
1586 
1587     llvm::FunctionType *fnType
1588       = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
1589     fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1590   }
1591 
1592   llvm::Type *origType = value->getType();
1593 
1594   addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
1595   value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
1596 
1597   llvm::CallInst *result = CGF.Builder.CreateCall2(fn, addr, value);
1598   result->setDoesNotThrow();
1599 
1600   if (ignored) return 0;
1601 
1602   return CGF.Builder.CreateBitCast(result, origType);
1603 }
1604 
1605 /// Perform an operation having the following signature:
1606 ///   void (i8**, i8**)
1607 static void emitARCCopyOperation(CodeGenFunction &CGF,
1608                                  llvm::Value *dst,
1609                                  llvm::Value *src,
1610                                  llvm::Constant *&fn,
1611                                  StringRef fnName) {
1612   assert(dst->getType() == src->getType());
1613 
1614   if (!fn) {
1615     std::vector<llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy);
1616     llvm::FunctionType *fnType
1617       = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
1618     fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1619   }
1620 
1621   dst = CGF.Builder.CreateBitCast(dst, CGF.Int8PtrPtrTy);
1622   src = CGF.Builder.CreateBitCast(src, CGF.Int8PtrPtrTy);
1623 
1624   llvm::CallInst *result = CGF.Builder.CreateCall2(fn, dst, src);
1625   result->setDoesNotThrow();
1626 }
1627 
1628 /// Produce the code to do a retain.  Based on the type, calls one of:
1629 ///   call i8* @objc_retain(i8* %value)
1630 ///   call i8* @objc_retainBlock(i8* %value)
1631 llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
1632   if (type->isBlockPointerType())
1633     return EmitARCRetainBlock(value, /*mandatory*/ false);
1634   else
1635     return EmitARCRetainNonBlock(value);
1636 }
1637 
1638 /// Retain the given object, with normal retain semantics.
1639 ///   call i8* @objc_retain(i8* %value)
1640 llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
1641   return emitARCValueOperation(*this, value,
1642                                CGM.getARCEntrypoints().objc_retain,
1643                                "objc_retain");
1644 }
1645 
1646 /// Retain the given block, with _Block_copy semantics.
1647 ///   call i8* @objc_retainBlock(i8* %value)
1648 ///
1649 /// \param mandatory - If false, emit the call with metadata
1650 /// indicating that it's okay for the optimizer to eliminate this call
1651 /// if it can prove that the block never escapes except down the stack.
1652 llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
1653                                                  bool mandatory) {
1654   llvm::Value *result
1655     = emitARCValueOperation(*this, value,
1656                             CGM.getARCEntrypoints().objc_retainBlock,
1657                             "objc_retainBlock");
1658 
1659   // If the copy isn't mandatory, add !clang.arc.copy_on_escape to
1660   // tell the optimizer that it doesn't need to do this copy if the
1661   // block doesn't escape, where being passed as an argument doesn't
1662   // count as escaping.
1663   if (!mandatory && isa<llvm::Instruction>(result)) {
1664     llvm::CallInst *call
1665       = cast<llvm::CallInst>(result->stripPointerCasts());
1666     assert(call->getCalledValue() == CGM.getARCEntrypoints().objc_retainBlock);
1667 
1668     SmallVector<llvm::Value*,1> args;
1669     call->setMetadata("clang.arc.copy_on_escape",
1670                       llvm::MDNode::get(Builder.getContext(), args));
1671   }
1672 
1673   return result;
1674 }
1675 
1676 /// Retain the given object which is the result of a function call.
1677 ///   call i8* @objc_retainAutoreleasedReturnValue(i8* %value)
1678 ///
1679 /// Yes, this function name is one character away from a different
1680 /// call with completely different semantics.
1681 llvm::Value *
1682 CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
1683   // Fetch the void(void) inline asm which marks that we're going to
1684   // retain the autoreleased return value.
1685   llvm::InlineAsm *&marker
1686     = CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker;
1687   if (!marker) {
1688     StringRef assembly
1689       = CGM.getTargetCodeGenInfo()
1690            .getARCRetainAutoreleasedReturnValueMarker();
1691 
1692     // If we have an empty assembly string, there's nothing to do.
1693     if (assembly.empty()) {
1694 
1695     // Otherwise, at -O0, build an inline asm that we're going to call
1696     // in a moment.
1697     } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
1698       llvm::FunctionType *type =
1699         llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
1700                                 /*variadic*/ false);
1701 
1702       marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
1703 
1704     // If we're at -O1 and above, we don't want to litter the code
1705     // with this marker yet, so leave a breadcrumb for the ARC
1706     // optimizer to pick up.
1707     } else {
1708       llvm::NamedMDNode *metadata =
1709         CGM.getModule().getOrInsertNamedMetadata(
1710                             "clang.arc.retainAutoreleasedReturnValueMarker");
1711       assert(metadata->getNumOperands() <= 1);
1712       if (metadata->getNumOperands() == 0) {
1713         llvm::Value *string = llvm::MDString::get(getLLVMContext(), assembly);
1714         metadata->addOperand(llvm::MDNode::get(getLLVMContext(), string));
1715       }
1716     }
1717   }
1718 
1719   // Call the marker asm if we made one, which we do only at -O0.
1720   if (marker) Builder.CreateCall(marker);
1721 
1722   return emitARCValueOperation(*this, value,
1723                      CGM.getARCEntrypoints().objc_retainAutoreleasedReturnValue,
1724                                "objc_retainAutoreleasedReturnValue");
1725 }
1726 
1727 /// Release the given object.
1728 ///   call void @objc_release(i8* %value)
1729 void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
1730   if (isa<llvm::ConstantPointerNull>(value)) return;
1731 
1732   llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
1733   if (!fn) {
1734     std::vector<llvm::Type*> args(1, Int8PtrTy);
1735     llvm::FunctionType *fnType =
1736       llvm::FunctionType::get(Builder.getVoidTy(), args, false);
1737     fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
1738   }
1739 
1740   // Cast the argument to 'id'.
1741   value = Builder.CreateBitCast(value, Int8PtrTy);
1742 
1743   // Call objc_release.
1744   llvm::CallInst *call = Builder.CreateCall(fn, value);
1745   call->setDoesNotThrow();
1746 
1747   if (!precise) {
1748     SmallVector<llvm::Value*,1> args;
1749     call->setMetadata("clang.imprecise_release",
1750                       llvm::MDNode::get(Builder.getContext(), args));
1751   }
1752 }
1753 
1754 /// Store into a strong object.  Always calls this:
1755 ///   call void @objc_storeStrong(i8** %addr, i8* %value)
1756 llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
1757                                                      llvm::Value *value,
1758                                                      bool ignored) {
1759   assert(cast<llvm::PointerType>(addr->getType())->getElementType()
1760            == value->getType());
1761 
1762   llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
1763   if (!fn) {
1764     llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
1765     llvm::FunctionType *fnType
1766       = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
1767     fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
1768   }
1769 
1770   addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
1771   llvm::Value *castValue = Builder.CreateBitCast(value, Int8PtrTy);
1772 
1773   Builder.CreateCall2(fn, addr, castValue)->setDoesNotThrow();
1774 
1775   if (ignored) return 0;
1776   return value;
1777 }
1778 
1779 /// Store into a strong object.  Sometimes calls this:
1780 ///   call void @objc_storeStrong(i8** %addr, i8* %value)
1781 /// Other times, breaks it down into components.
1782 llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
1783                                                  llvm::Value *newValue,
1784                                                  bool ignored) {
1785   QualType type = dst.getType();
1786   bool isBlock = type->isBlockPointerType();
1787 
1788   // Use a store barrier at -O0 unless this is a block type or the
1789   // lvalue is inadequately aligned.
1790   if (shouldUseFusedARCCalls() &&
1791       !isBlock &&
1792       (dst.getAlignment().isZero() ||
1793        dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
1794     return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
1795   }
1796 
1797   // Otherwise, split it out.
1798 
1799   // Retain the new value.
1800   newValue = EmitARCRetain(type, newValue);
1801 
1802   // Read the old value.
1803   llvm::Value *oldValue = EmitLoadOfScalar(dst);
1804 
1805   // Store.  We do this before the release so that any deallocs won't
1806   // see the old value.
1807   EmitStoreOfScalar(newValue, dst);
1808 
1809   // Finally, release the old value.
1810   EmitARCRelease(oldValue, /*precise*/ false);
1811 
1812   return newValue;
1813 }
1814 
1815 /// Autorelease the given object.
1816 ///   call i8* @objc_autorelease(i8* %value)
1817 llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
1818   return emitARCValueOperation(*this, value,
1819                                CGM.getARCEntrypoints().objc_autorelease,
1820                                "objc_autorelease");
1821 }
1822 
1823 /// Autorelease the given object.
1824 ///   call i8* @objc_autoreleaseReturnValue(i8* %value)
1825 llvm::Value *
1826 CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
1827   return emitARCValueOperation(*this, value,
1828                             CGM.getARCEntrypoints().objc_autoreleaseReturnValue,
1829                                "objc_autoreleaseReturnValue");
1830 }
1831 
1832 /// Do a fused retain/autorelease of the given object.
1833 ///   call i8* @objc_retainAutoreleaseReturnValue(i8* %value)
1834 llvm::Value *
1835 CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
1836   return emitARCValueOperation(*this, value,
1837                      CGM.getARCEntrypoints().objc_retainAutoreleaseReturnValue,
1838                                "objc_retainAutoreleaseReturnValue");
1839 }
1840 
1841 /// Do a fused retain/autorelease of the given object.
1842 ///   call i8* @objc_retainAutorelease(i8* %value)
1843 /// or
1844 ///   %retain = call i8* @objc_retainBlock(i8* %value)
1845 ///   call i8* @objc_autorelease(i8* %retain)
1846 llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
1847                                                        llvm::Value *value) {
1848   if (!type->isBlockPointerType())
1849     return EmitARCRetainAutoreleaseNonBlock(value);
1850 
1851   if (isa<llvm::ConstantPointerNull>(value)) return value;
1852 
1853   llvm::Type *origType = value->getType();
1854   value = Builder.CreateBitCast(value, Int8PtrTy);
1855   value = EmitARCRetainBlock(value, /*mandatory*/ true);
1856   value = EmitARCAutorelease(value);
1857   return Builder.CreateBitCast(value, origType);
1858 }
1859 
1860 /// Do a fused retain/autorelease of the given object.
1861 ///   call i8* @objc_retainAutorelease(i8* %value)
1862 llvm::Value *
1863 CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
1864   return emitARCValueOperation(*this, value,
1865                                CGM.getARCEntrypoints().objc_retainAutorelease,
1866                                "objc_retainAutorelease");
1867 }
1868 
1869 /// i8* @objc_loadWeak(i8** %addr)
1870 /// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
1871 llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
1872   return emitARCLoadOperation(*this, addr,
1873                               CGM.getARCEntrypoints().objc_loadWeak,
1874                               "objc_loadWeak");
1875 }
1876 
1877 /// i8* @objc_loadWeakRetained(i8** %addr)
1878 llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
1879   return emitARCLoadOperation(*this, addr,
1880                               CGM.getARCEntrypoints().objc_loadWeakRetained,
1881                               "objc_loadWeakRetained");
1882 }
1883 
1884 /// i8* @objc_storeWeak(i8** %addr, i8* %value)
1885 /// Returns %value.
1886 llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
1887                                                llvm::Value *value,
1888                                                bool ignored) {
1889   return emitARCStoreOperation(*this, addr, value,
1890                                CGM.getARCEntrypoints().objc_storeWeak,
1891                                "objc_storeWeak", ignored);
1892 }
1893 
1894 /// i8* @objc_initWeak(i8** %addr, i8* %value)
1895 /// Returns %value.  %addr is known to not have a current weak entry.
1896 /// Essentially equivalent to:
1897 ///   *addr = nil; objc_storeWeak(addr, value);
1898 void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
1899   // If we're initializing to null, just write null to memory; no need
1900   // to get the runtime involved.  But don't do this if optimization
1901   // is enabled, because accounting for this would make the optimizer
1902   // much more complicated.
1903   if (isa<llvm::ConstantPointerNull>(value) &&
1904       CGM.getCodeGenOpts().OptimizationLevel == 0) {
1905     Builder.CreateStore(value, addr);
1906     return;
1907   }
1908 
1909   emitARCStoreOperation(*this, addr, value,
1910                         CGM.getARCEntrypoints().objc_initWeak,
1911                         "objc_initWeak", /*ignored*/ true);
1912 }
1913 
1914 /// void @objc_destroyWeak(i8** %addr)
1915 /// Essentially objc_storeWeak(addr, nil).
1916 void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
1917   llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
1918   if (!fn) {
1919     std::vector<llvm::Type*> args(1, Int8PtrPtrTy);
1920     llvm::FunctionType *fnType =
1921       llvm::FunctionType::get(Builder.getVoidTy(), args, false);
1922     fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
1923   }
1924 
1925   // Cast the argument to 'id*'.
1926   addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
1927 
1928   llvm::CallInst *call = Builder.CreateCall(fn, addr);
1929   call->setDoesNotThrow();
1930 }
1931 
1932 /// void @objc_moveWeak(i8** %dest, i8** %src)
1933 /// Disregards the current value in %dest.  Leaves %src pointing to nothing.
1934 /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
1935 void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
1936   emitARCCopyOperation(*this, dst, src,
1937                        CGM.getARCEntrypoints().objc_moveWeak,
1938                        "objc_moveWeak");
1939 }
1940 
1941 /// void @objc_copyWeak(i8** %dest, i8** %src)
1942 /// Disregards the current value in %dest.  Essentially
1943 ///   objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
1944 void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
1945   emitARCCopyOperation(*this, dst, src,
1946                        CGM.getARCEntrypoints().objc_copyWeak,
1947                        "objc_copyWeak");
1948 }
1949 
1950 /// Produce the code to do a objc_autoreleasepool_push.
1951 ///   call i8* @objc_autoreleasePoolPush(void)
1952 llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
1953   llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
1954   if (!fn) {
1955     llvm::FunctionType *fnType =
1956       llvm::FunctionType::get(Int8PtrTy, false);
1957     fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
1958   }
1959 
1960   llvm::CallInst *call = Builder.CreateCall(fn);
1961   call->setDoesNotThrow();
1962 
1963   return call;
1964 }
1965 
1966 /// Produce the code to do a primitive release.
1967 ///   call void @objc_autoreleasePoolPop(i8* %ptr)
1968 void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
1969   assert(value->getType() == Int8PtrTy);
1970 
1971   llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
1972   if (!fn) {
1973     std::vector<llvm::Type*> args(1, Int8PtrTy);
1974     llvm::FunctionType *fnType =
1975       llvm::FunctionType::get(Builder.getVoidTy(), args, false);
1976 
1977     // We don't want to use a weak import here; instead we should not
1978     // fall into this path.
1979     fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
1980   }
1981 
1982   llvm::CallInst *call = Builder.CreateCall(fn, value);
1983   call->setDoesNotThrow();
1984 }
1985 
1986 /// Produce the code to do an MRR version objc_autoreleasepool_push.
1987 /// Which is: [[NSAutoreleasePool alloc] init];
1988 /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
1989 /// init is declared as: - (id) init; in its NSObject super class.
1990 ///
1991 llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
1992   CGObjCRuntime &Runtime = CGM.getObjCRuntime();
1993   llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(Builder);
1994   // [NSAutoreleasePool alloc]
1995   IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
1996   Selector AllocSel = getContext().Selectors.getSelector(0, &II);
1997   CallArgList Args;
1998   RValue AllocRV =
1999     Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
2000                                 getContext().getObjCIdType(),
2001                                 AllocSel, Receiver, Args);
2002 
2003   // [Receiver init]
2004   Receiver = AllocRV.getScalarVal();
2005   II = &CGM.getContext().Idents.get("init");
2006   Selector InitSel = getContext().Selectors.getSelector(0, &II);
2007   RValue InitRV =
2008     Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
2009                                 getContext().getObjCIdType(),
2010                                 InitSel, Receiver, Args);
2011   return InitRV.getScalarVal();
2012 }
2013 
2014 /// Produce the code to do a primitive release.
2015 /// [tmp drain];
2016 void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
2017   IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
2018   Selector DrainSel = getContext().Selectors.getSelector(0, &II);
2019   CallArgList Args;
2020   CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
2021                               getContext().VoidTy, DrainSel, Arg, Args);
2022 }
2023 
2024 void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
2025                                               llvm::Value *addr,
2026                                               QualType type) {
2027   llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
2028   CGF.EmitARCRelease(ptr, /*precise*/ true);
2029 }
2030 
2031 void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
2032                                                 llvm::Value *addr,
2033                                                 QualType type) {
2034   llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
2035   CGF.EmitARCRelease(ptr, /*precise*/ false);
2036 }
2037 
2038 void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
2039                                      llvm::Value *addr,
2040                                      QualType type) {
2041   CGF.EmitARCDestroyWeak(addr);
2042 }
2043 
2044 namespace {
2045   struct CallObjCAutoreleasePoolObject : EHScopeStack::Cleanup {
2046     llvm::Value *Token;
2047 
2048     CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2049 
2050     void Emit(CodeGenFunction &CGF, Flags flags) {
2051       CGF.EmitObjCAutoreleasePoolPop(Token);
2052     }
2053   };
2054   struct CallObjCMRRAutoreleasePoolObject : EHScopeStack::Cleanup {
2055     llvm::Value *Token;
2056 
2057     CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2058 
2059     void Emit(CodeGenFunction &CGF, Flags flags) {
2060       CGF.EmitObjCMRRAutoreleasePoolPop(Token);
2061     }
2062   };
2063 }
2064 
2065 void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
2066   if (CGM.getLangOptions().ObjCAutoRefCount)
2067     EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
2068   else
2069     EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
2070 }
2071 
2072 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2073                                                   LValue lvalue,
2074                                                   QualType type) {
2075   switch (type.getObjCLifetime()) {
2076   case Qualifiers::OCL_None:
2077   case Qualifiers::OCL_ExplicitNone:
2078   case Qualifiers::OCL_Strong:
2079   case Qualifiers::OCL_Autoreleasing:
2080     return TryEmitResult(CGF.EmitLoadOfLValue(lvalue).getScalarVal(),
2081                          false);
2082 
2083   case Qualifiers::OCL_Weak:
2084     return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
2085                          true);
2086   }
2087 
2088   llvm_unreachable("impossible lifetime!");
2089 }
2090 
2091 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2092                                                   const Expr *e) {
2093   e = e->IgnoreParens();
2094   QualType type = e->getType();
2095 
2096   // If we're loading retained from a __strong xvalue, we can avoid
2097   // an extra retain/release pair by zeroing out the source of this
2098   // "move" operation.
2099   if (e->isXValue() &&
2100       !type.isConstQualified() &&
2101       type.getObjCLifetime() == Qualifiers::OCL_Strong) {
2102     // Emit the lvalue.
2103     LValue lv = CGF.EmitLValue(e);
2104 
2105     // Load the object pointer.
2106     llvm::Value *result = CGF.EmitLoadOfLValue(lv).getScalarVal();
2107 
2108     // Set the source pointer to NULL.
2109     CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
2110 
2111     return TryEmitResult(result, true);
2112   }
2113 
2114   // As a very special optimization, in ARC++, if the l-value is the
2115   // result of a non-volatile assignment, do a simple retain of the
2116   // result of the call to objc_storeWeak instead of reloading.
2117   if (CGF.getLangOptions().CPlusPlus &&
2118       !type.isVolatileQualified() &&
2119       type.getObjCLifetime() == Qualifiers::OCL_Weak &&
2120       isa<BinaryOperator>(e) &&
2121       cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
2122     return TryEmitResult(CGF.EmitScalarExpr(e), false);
2123 
2124   return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
2125 }
2126 
2127 static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
2128                                            llvm::Value *value);
2129 
2130 /// Given that the given expression is some sort of call (which does
2131 /// not return retained), emit a retain following it.
2132 static llvm::Value *emitARCRetainCall(CodeGenFunction &CGF, const Expr *e) {
2133   llvm::Value *value = CGF.EmitScalarExpr(e);
2134   return emitARCRetainAfterCall(CGF, value);
2135 }
2136 
2137 static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
2138                                            llvm::Value *value) {
2139   if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
2140     CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
2141 
2142     // Place the retain immediately following the call.
2143     CGF.Builder.SetInsertPoint(call->getParent(),
2144                                ++llvm::BasicBlock::iterator(call));
2145     value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
2146 
2147     CGF.Builder.restoreIP(ip);
2148     return value;
2149   } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
2150     CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
2151 
2152     // Place the retain at the beginning of the normal destination block.
2153     llvm::BasicBlock *BB = invoke->getNormalDest();
2154     CGF.Builder.SetInsertPoint(BB, BB->begin());
2155     value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
2156 
2157     CGF.Builder.restoreIP(ip);
2158     return value;
2159 
2160   // Bitcasts can arise because of related-result returns.  Rewrite
2161   // the operand.
2162   } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
2163     llvm::Value *operand = bitcast->getOperand(0);
2164     operand = emitARCRetainAfterCall(CGF, operand);
2165     bitcast->setOperand(0, operand);
2166     return bitcast;
2167 
2168   // Generic fall-back case.
2169   } else {
2170     // Retain using the non-block variant: we never need to do a copy
2171     // of a block that's been returned to us.
2172     return CGF.EmitARCRetainNonBlock(value);
2173   }
2174 }
2175 
2176 /// Determine whether it might be important to emit a separate
2177 /// objc_retain_block on the result of the given expression, or
2178 /// whether it's okay to just emit it in a +1 context.
2179 static bool shouldEmitSeparateBlockRetain(const Expr *e) {
2180   assert(e->getType()->isBlockPointerType());
2181   e = e->IgnoreParens();
2182 
2183   // For future goodness, emit block expressions directly in +1
2184   // contexts if we can.
2185   if (isa<BlockExpr>(e))
2186     return false;
2187 
2188   if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
2189     switch (cast->getCastKind()) {
2190     // Emitting these operations in +1 contexts is goodness.
2191     case CK_LValueToRValue:
2192     case CK_ARCReclaimReturnedObject:
2193     case CK_ARCConsumeObject:
2194     case CK_ARCProduceObject:
2195       return false;
2196 
2197     // These operations preserve a block type.
2198     case CK_NoOp:
2199     case CK_BitCast:
2200       return shouldEmitSeparateBlockRetain(cast->getSubExpr());
2201 
2202     // These operations are known to be bad (or haven't been considered).
2203     case CK_AnyPointerToBlockPointerCast:
2204     default:
2205       return true;
2206     }
2207   }
2208 
2209   return true;
2210 }
2211 
2212 /// Try to emit a PseudoObjectExpr at +1.
2213 ///
2214 /// This massively duplicates emitPseudoObjectRValue.
2215 static TryEmitResult tryEmitARCRetainPseudoObject(CodeGenFunction &CGF,
2216                                                   const PseudoObjectExpr *E) {
2217   llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
2218 
2219   // Find the result expression.
2220   const Expr *resultExpr = E->getResultExpr();
2221   assert(resultExpr);
2222   TryEmitResult result;
2223 
2224   for (PseudoObjectExpr::const_semantics_iterator
2225          i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
2226     const Expr *semantic = *i;
2227 
2228     // If this semantic expression is an opaque value, bind it
2229     // to the result of its source expression.
2230     if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
2231       typedef CodeGenFunction::OpaqueValueMappingData OVMA;
2232       OVMA opaqueData;
2233 
2234       // If this semantic is the result of the pseudo-object
2235       // expression, try to evaluate the source as +1.
2236       if (ov == resultExpr) {
2237         assert(!OVMA::shouldBindAsLValue(ov));
2238         result = tryEmitARCRetainScalarExpr(CGF, ov->getSourceExpr());
2239         opaqueData = OVMA::bind(CGF, ov, RValue::get(result.getPointer()));
2240 
2241       // Otherwise, just bind it.
2242       } else {
2243         opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
2244       }
2245       opaques.push_back(opaqueData);
2246 
2247     // Otherwise, if the expression is the result, evaluate it
2248     // and remember the result.
2249     } else if (semantic == resultExpr) {
2250       result = tryEmitARCRetainScalarExpr(CGF, semantic);
2251 
2252     // Otherwise, evaluate the expression in an ignored context.
2253     } else {
2254       CGF.EmitIgnoredExpr(semantic);
2255     }
2256   }
2257 
2258   // Unbind all the opaques now.
2259   for (unsigned i = 0, e = opaques.size(); i != e; ++i)
2260     opaques[i].unbind(CGF);
2261 
2262   return result;
2263 }
2264 
2265 static TryEmitResult
2266 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
2267   // Look through cleanups.
2268   if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
2269     CGF.enterFullExpression(cleanups);
2270     CodeGenFunction::RunCleanupsScope scope(CGF);
2271     return tryEmitARCRetainScalarExpr(CGF, cleanups->getSubExpr());
2272   }
2273 
2274   // The desired result type, if it differs from the type of the
2275   // ultimate opaque expression.
2276   llvm::Type *resultType = 0;
2277 
2278   while (true) {
2279     e = e->IgnoreParens();
2280 
2281     // There's a break at the end of this if-chain;  anything
2282     // that wants to keep looping has to explicitly continue.
2283     if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
2284       switch (ce->getCastKind()) {
2285       // No-op casts don't change the type, so we just ignore them.
2286       case CK_NoOp:
2287         e = ce->getSubExpr();
2288         continue;
2289 
2290       case CK_LValueToRValue: {
2291         TryEmitResult loadResult
2292           = tryEmitARCRetainLoadOfScalar(CGF, ce->getSubExpr());
2293         if (resultType) {
2294           llvm::Value *value = loadResult.getPointer();
2295           value = CGF.Builder.CreateBitCast(value, resultType);
2296           loadResult.setPointer(value);
2297         }
2298         return loadResult;
2299       }
2300 
2301       // These casts can change the type, so remember that and
2302       // soldier on.  We only need to remember the outermost such
2303       // cast, though.
2304       case CK_CPointerToObjCPointerCast:
2305       case CK_BlockPointerToObjCPointerCast:
2306       case CK_AnyPointerToBlockPointerCast:
2307       case CK_BitCast:
2308         if (!resultType)
2309           resultType = CGF.ConvertType(ce->getType());
2310         e = ce->getSubExpr();
2311         assert(e->getType()->hasPointerRepresentation());
2312         continue;
2313 
2314       // For consumptions, just emit the subexpression and thus elide
2315       // the retain/release pair.
2316       case CK_ARCConsumeObject: {
2317         llvm::Value *result = CGF.EmitScalarExpr(ce->getSubExpr());
2318         if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
2319         return TryEmitResult(result, true);
2320       }
2321 
2322       // Block extends are net +0.  Naively, we could just recurse on
2323       // the subexpression, but actually we need to ensure that the
2324       // value is copied as a block, so there's a little filter here.
2325       case CK_ARCExtendBlockObject: {
2326         llvm::Value *result; // will be a +0 value
2327 
2328         // If we can't safely assume the sub-expression will produce a
2329         // block-copied value, emit the sub-expression at +0.
2330         if (shouldEmitSeparateBlockRetain(ce->getSubExpr())) {
2331           result = CGF.EmitScalarExpr(ce->getSubExpr());
2332 
2333         // Otherwise, try to emit the sub-expression at +1 recursively.
2334         } else {
2335           TryEmitResult subresult
2336             = tryEmitARCRetainScalarExpr(CGF, ce->getSubExpr());
2337           result = subresult.getPointer();
2338 
2339           // If that produced a retained value, just use that,
2340           // possibly casting down.
2341           if (subresult.getInt()) {
2342             if (resultType)
2343               result = CGF.Builder.CreateBitCast(result, resultType);
2344             return TryEmitResult(result, true);
2345           }
2346 
2347           // Otherwise it's +0.
2348         }
2349 
2350         // Retain the object as a block, then cast down.
2351         result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
2352         if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
2353         return TryEmitResult(result, true);
2354       }
2355 
2356       // For reclaims, emit the subexpression as a retained call and
2357       // skip the consumption.
2358       case CK_ARCReclaimReturnedObject: {
2359         llvm::Value *result = emitARCRetainCall(CGF, ce->getSubExpr());
2360         if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
2361         return TryEmitResult(result, true);
2362       }
2363 
2364       default:
2365         break;
2366       }
2367 
2368     // Skip __extension__.
2369     } else if (const UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
2370       if (op->getOpcode() == UO_Extension) {
2371         e = op->getSubExpr();
2372         continue;
2373       }
2374 
2375     // For calls and message sends, use the retained-call logic.
2376     // Delegate inits are a special case in that they're the only
2377     // returns-retained expression that *isn't* surrounded by
2378     // a consume.
2379     } else if (isa<CallExpr>(e) ||
2380                (isa<ObjCMessageExpr>(e) &&
2381                 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
2382       llvm::Value *result = emitARCRetainCall(CGF, e);
2383       if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
2384       return TryEmitResult(result, true);
2385 
2386     // Look through pseudo-object expressions.
2387     } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
2388       TryEmitResult result
2389         = tryEmitARCRetainPseudoObject(CGF, pseudo);
2390       if (resultType) {
2391         llvm::Value *value = result.getPointer();
2392         value = CGF.Builder.CreateBitCast(value, resultType);
2393         result.setPointer(value);
2394       }
2395       return result;
2396     }
2397 
2398     // Conservatively halt the search at any other expression kind.
2399     break;
2400   }
2401 
2402   // We didn't find an obvious production, so emit what we've got and
2403   // tell the caller that we didn't manage to retain.
2404   llvm::Value *result = CGF.EmitScalarExpr(e);
2405   if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
2406   return TryEmitResult(result, false);
2407 }
2408 
2409 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2410                                                 LValue lvalue,
2411                                                 QualType type) {
2412   TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
2413   llvm::Value *value = result.getPointer();
2414   if (!result.getInt())
2415     value = CGF.EmitARCRetain(type, value);
2416   return value;
2417 }
2418 
2419 /// EmitARCRetainScalarExpr - Semantically equivalent to
2420 /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
2421 /// best-effort attempt to peephole expressions that naturally produce
2422 /// retained objects.
2423 llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
2424   TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
2425   llvm::Value *value = result.getPointer();
2426   if (!result.getInt())
2427     value = EmitARCRetain(e->getType(), value);
2428   return value;
2429 }
2430 
2431 llvm::Value *
2432 CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
2433   TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
2434   llvm::Value *value = result.getPointer();
2435   if (result.getInt())
2436     value = EmitARCAutorelease(value);
2437   else
2438     value = EmitARCRetainAutorelease(e->getType(), value);
2439   return value;
2440 }
2441 
2442 llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
2443   llvm::Value *result;
2444   bool doRetain;
2445 
2446   if (shouldEmitSeparateBlockRetain(e)) {
2447     result = EmitScalarExpr(e);
2448     doRetain = true;
2449   } else {
2450     TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
2451     result = subresult.getPointer();
2452     doRetain = !subresult.getInt();
2453   }
2454 
2455   if (doRetain)
2456     result = EmitARCRetainBlock(result, /*mandatory*/ true);
2457   return EmitObjCConsumeObject(e->getType(), result);
2458 }
2459 
2460 llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
2461   // In ARC, retain and autorelease the expression.
2462   if (getLangOptions().ObjCAutoRefCount) {
2463     // Do so before running any cleanups for the full-expression.
2464     // tryEmitARCRetainScalarExpr does make an effort to do things
2465     // inside cleanups, but there are crazy cases like
2466     //   @throw A().foo;
2467     // where a full retain+autorelease is required and would
2468     // otherwise happen after the destructor for the temporary.
2469     if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(expr)) {
2470       enterFullExpression(ewc);
2471       expr = ewc->getSubExpr();
2472     }
2473 
2474     CodeGenFunction::RunCleanupsScope cleanups(*this);
2475     return EmitARCRetainAutoreleaseScalarExpr(expr);
2476   }
2477 
2478   // Otherwise, use the normal scalar-expression emission.  The
2479   // exception machinery doesn't do anything special with the
2480   // exception like retaining it, so there's no safety associated with
2481   // only running cleanups after the throw has started, and when it
2482   // matters it tends to be substantially inferior code.
2483   return EmitScalarExpr(expr);
2484 }
2485 
2486 std::pair<LValue,llvm::Value*>
2487 CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
2488                                     bool ignored) {
2489   // Evaluate the RHS first.
2490   TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
2491   llvm::Value *value = result.getPointer();
2492 
2493   bool hasImmediateRetain = result.getInt();
2494 
2495   // If we didn't emit a retained object, and the l-value is of block
2496   // type, then we need to emit the block-retain immediately in case
2497   // it invalidates the l-value.
2498   if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
2499     value = EmitARCRetainBlock(value, /*mandatory*/ false);
2500     hasImmediateRetain = true;
2501   }
2502 
2503   LValue lvalue = EmitLValue(e->getLHS());
2504 
2505   // If the RHS was emitted retained, expand this.
2506   if (hasImmediateRetain) {
2507     llvm::Value *oldValue =
2508       EmitLoadOfScalar(lvalue);
2509     EmitStoreOfScalar(value, lvalue);
2510     EmitARCRelease(oldValue, /*precise*/ false);
2511   } else {
2512     value = EmitARCStoreStrong(lvalue, value, ignored);
2513   }
2514 
2515   return std::pair<LValue,llvm::Value*>(lvalue, value);
2516 }
2517 
2518 std::pair<LValue,llvm::Value*>
2519 CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
2520   llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
2521   LValue lvalue = EmitLValue(e->getLHS());
2522 
2523   EmitStoreOfScalar(value, lvalue);
2524 
2525   return std::pair<LValue,llvm::Value*>(lvalue, value);
2526 }
2527 
2528 void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
2529                                              const ObjCAutoreleasePoolStmt &ARPS) {
2530   const Stmt *subStmt = ARPS.getSubStmt();
2531   const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
2532 
2533   CGDebugInfo *DI = getDebugInfo();
2534   if (DI)
2535     DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
2536 
2537   // Keep track of the current cleanup stack depth.
2538   RunCleanupsScope Scope(*this);
2539   if (CGM.getCodeGenOpts().ObjCRuntimeHasARC) {
2540     llvm::Value *token = EmitObjCAutoreleasePoolPush();
2541     EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
2542   } else {
2543     llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
2544     EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
2545   }
2546 
2547   for (CompoundStmt::const_body_iterator I = S.body_begin(),
2548        E = S.body_end(); I != E; ++I)
2549     EmitStmt(*I);
2550 
2551   if (DI)
2552     DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
2553 }
2554 
2555 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
2556 /// make sure it survives garbage collection until this point.
2557 void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
2558   // We just use an inline assembly.
2559   llvm::FunctionType *extenderType
2560     = llvm::FunctionType::get(VoidTy, VoidPtrTy, /*variadic*/ false);
2561   llvm::Value *extender
2562     = llvm::InlineAsm::get(extenderType,
2563                            /* assembly */ "",
2564                            /* constraints */ "r",
2565                            /* side effects */ true);
2566 
2567   object = Builder.CreateBitCast(object, VoidPtrTy);
2568   Builder.CreateCall(extender, object)->setDoesNotThrow();
2569 }
2570 
2571 /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
2572 /// non-trivial copy assignment function, produce following helper function.
2573 /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
2574 ///
2575 llvm::Constant *
2576 CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
2577                                         const ObjCPropertyImplDecl *PID) {
2578   // FIXME. This api is for NeXt runtime only for now.
2579   if (!getLangOptions().CPlusPlus || !getLangOptions().NeXTRuntime)
2580     return 0;
2581   QualType Ty = PID->getPropertyIvarDecl()->getType();
2582   if (!Ty->isRecordType())
2583     return 0;
2584   const ObjCPropertyDecl *PD = PID->getPropertyDecl();
2585   if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
2586     return 0;
2587   llvm::Constant * HelperFn = 0;
2588   if (hasTrivialSetExpr(PID))
2589     return 0;
2590   assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
2591   if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
2592     return HelperFn;
2593 
2594   ASTContext &C = getContext();
2595   IdentifierInfo *II
2596     = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
2597   FunctionDecl *FD = FunctionDecl::Create(C,
2598                                           C.getTranslationUnitDecl(),
2599                                           SourceLocation(),
2600                                           SourceLocation(), II, C.VoidTy, 0,
2601                                           SC_Static,
2602                                           SC_None,
2603                                           false,
2604                                           true);
2605 
2606   QualType DestTy = C.getPointerType(Ty);
2607   QualType SrcTy = Ty;
2608   SrcTy.addConst();
2609   SrcTy = C.getPointerType(SrcTy);
2610 
2611   FunctionArgList args;
2612   ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
2613   args.push_back(&dstDecl);
2614   ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
2615   args.push_back(&srcDecl);
2616 
2617   const CGFunctionInfo &FI =
2618     CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
2619 
2620   llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
2621 
2622   llvm::Function *Fn =
2623     llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
2624                            "__assign_helper_atomic_property_", &CGM.getModule());
2625 
2626   if (CGM.getModuleDebugInfo())
2627     DebugInfo = CGM.getModuleDebugInfo();
2628 
2629 
2630   StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
2631 
2632   DeclRefExpr *DstExpr =
2633     new (C) DeclRefExpr(&dstDecl, DestTy,
2634                               VK_RValue, SourceLocation());
2635 
2636   Expr* DST = new (C) UnaryOperator(DstExpr, UO_Deref, DestTy->getPointeeType(),
2637                                     VK_LValue, OK_Ordinary, SourceLocation());
2638 
2639   DeclRefExpr *SrcExpr =
2640     new (C) DeclRefExpr(&srcDecl, SrcTy,
2641                         VK_RValue, SourceLocation());
2642 
2643   Expr* SRC = new (C) UnaryOperator(SrcExpr, UO_Deref, SrcTy->getPointeeType(),
2644                                     VK_LValue, OK_Ordinary, SourceLocation());
2645 
2646   Expr *Args[2] = { DST, SRC };
2647   CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
2648   CXXOperatorCallExpr *TheCall =
2649     new (C) CXXOperatorCallExpr(C, OO_Equal, CalleeExp->getCallee(),
2650                                 Args, 2, DestTy->getPointeeType(),
2651                                 VK_LValue, SourceLocation());
2652 
2653   EmitStmt(TheCall);
2654 
2655   FinishFunction();
2656   HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
2657   CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
2658   return HelperFn;
2659 }
2660 
2661 llvm::Constant *
2662 CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
2663                                             const ObjCPropertyImplDecl *PID) {
2664   // FIXME. This api is for NeXt runtime only for now.
2665   if (!getLangOptions().CPlusPlus || !getLangOptions().NeXTRuntime)
2666     return 0;
2667   const ObjCPropertyDecl *PD = PID->getPropertyDecl();
2668   QualType Ty = PD->getType();
2669   if (!Ty->isRecordType())
2670     return 0;
2671   if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
2672     return 0;
2673   llvm::Constant * HelperFn = 0;
2674 
2675   if (hasTrivialGetExpr(PID))
2676     return 0;
2677   assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
2678   if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
2679     return HelperFn;
2680 
2681 
2682   ASTContext &C = getContext();
2683   IdentifierInfo *II
2684   = &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
2685   FunctionDecl *FD = FunctionDecl::Create(C,
2686                                           C.getTranslationUnitDecl(),
2687                                           SourceLocation(),
2688                                           SourceLocation(), II, C.VoidTy, 0,
2689                                           SC_Static,
2690                                           SC_None,
2691                                           false,
2692                                           true);
2693 
2694   QualType DestTy = C.getPointerType(Ty);
2695   QualType SrcTy = Ty;
2696   SrcTy.addConst();
2697   SrcTy = C.getPointerType(SrcTy);
2698 
2699   FunctionArgList args;
2700   ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
2701   args.push_back(&dstDecl);
2702   ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
2703   args.push_back(&srcDecl);
2704 
2705   const CGFunctionInfo &FI =
2706   CGM.getTypes().getFunctionInfo(C.VoidTy, args, FunctionType::ExtInfo());
2707 
2708   llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI, false);
2709 
2710   llvm::Function *Fn =
2711   llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
2712                          "__copy_helper_atomic_property_", &CGM.getModule());
2713 
2714   if (CGM.getModuleDebugInfo())
2715     DebugInfo = CGM.getModuleDebugInfo();
2716 
2717 
2718   StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
2719 
2720   DeclRefExpr *SrcExpr =
2721   new (C) DeclRefExpr(&srcDecl, SrcTy,
2722                       VK_RValue, SourceLocation());
2723 
2724   Expr* SRC = new (C) UnaryOperator(SrcExpr, UO_Deref, SrcTy->getPointeeType(),
2725                                     VK_LValue, OK_Ordinary, SourceLocation());
2726 
2727   CXXConstructExpr *CXXConstExpr =
2728     cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
2729 
2730   SmallVector<Expr*, 4> ConstructorArgs;
2731   ConstructorArgs.push_back(SRC);
2732   CXXConstructExpr::arg_iterator A = CXXConstExpr->arg_begin();
2733   ++A;
2734 
2735   for (CXXConstructExpr::arg_iterator AEnd = CXXConstExpr->arg_end();
2736        A != AEnd; ++A)
2737     ConstructorArgs.push_back(*A);
2738 
2739   CXXConstructExpr *TheCXXConstructExpr =
2740     CXXConstructExpr::Create(C, Ty, SourceLocation(),
2741                              CXXConstExpr->getConstructor(),
2742                              CXXConstExpr->isElidable(),
2743                              &ConstructorArgs[0], ConstructorArgs.size(),
2744                              CXXConstExpr->hadMultipleCandidates(),
2745                              CXXConstExpr->requiresZeroInitialization(),
2746                              CXXConstExpr->getConstructionKind(), SourceRange());
2747 
2748   DeclRefExpr *DstExpr =
2749     new (C) DeclRefExpr(&dstDecl, DestTy,
2750                         VK_RValue, SourceLocation());
2751 
2752   RValue DV = EmitAnyExpr(DstExpr);
2753   CharUnits Alignment = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
2754   EmitAggExpr(TheCXXConstructExpr,
2755               AggValueSlot::forAddr(DV.getScalarVal(), Alignment, Qualifiers(),
2756                                     AggValueSlot::IsDestructed,
2757                                     AggValueSlot::DoesNotNeedGCBarriers,
2758                                     AggValueSlot::IsNotAliased));
2759 
2760   FinishFunction();
2761   HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
2762   CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
2763   return HelperFn;
2764 }
2765 
2766 
2767 CGObjCRuntime::~CGObjCRuntime() {}
2768