1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGCall.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGRecordLayout.h"
20 #include "CGObjCRuntime.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/ConvertUTF.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/LLVMContext.h"
28 #include "llvm/MDBuilder.h"
29 #include "llvm/Target/TargetData.h"
30 using namespace clang;
31 using namespace CodeGen;
32 
33 //===--------------------------------------------------------------------===//
34 //                        Miscellaneous Helper Methods
35 //===--------------------------------------------------------------------===//
36 
37 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
38   unsigned addressSpace =
39     cast<llvm::PointerType>(value->getType())->getAddressSpace();
40 
41   llvm::PointerType *destType = Int8PtrTy;
42   if (addressSpace)
43     destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
44 
45   if (value->getType() == destType) return value;
46   return Builder.CreateBitCast(value, destType);
47 }
48 
49 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
50 /// block.
51 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
52                                                     const Twine &Name) {
53   if (!Builder.isNamePreserving())
54     return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
55   return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
56 }
57 
58 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
59                                      llvm::Value *Init) {
60   llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
61   llvm::BasicBlock *Block = AllocaInsertPt->getParent();
62   Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
63 }
64 
65 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
66                                                 const Twine &Name) {
67   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
68   // FIXME: Should we prefer the preferred type alignment here?
69   CharUnits Align = getContext().getTypeAlignInChars(Ty);
70   Alloc->setAlignment(Align.getQuantity());
71   return Alloc;
72 }
73 
74 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
75                                                  const Twine &Name) {
76   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
77   // FIXME: Should we prefer the preferred type alignment here?
78   CharUnits Align = getContext().getTypeAlignInChars(Ty);
79   Alloc->setAlignment(Align.getQuantity());
80   return Alloc;
81 }
82 
83 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
84 /// expression and compare the result against zero, returning an Int1Ty value.
85 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
86   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
87     llvm::Value *MemPtr = EmitScalarExpr(E);
88     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
89   }
90 
91   QualType BoolTy = getContext().BoolTy;
92   if (!E->getType()->isAnyComplexType())
93     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
94 
95   return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
96 }
97 
98 /// EmitIgnoredExpr - Emit code to compute the specified expression,
99 /// ignoring the result.
100 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
101   if (E->isRValue())
102     return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
103 
104   // Just emit it as an l-value and drop the result.
105   EmitLValue(E);
106 }
107 
108 /// EmitAnyExpr - Emit code to compute the specified expression which
109 /// can have any type.  The result is returned as an RValue struct.
110 /// If this is an aggregate expression, AggSlot indicates where the
111 /// result should be returned.
112 RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
113                                     AggValueSlot aggSlot,
114                                     bool ignoreResult) {
115   if (!hasAggregateLLVMType(E->getType()))
116     return RValue::get(EmitScalarExpr(E, ignoreResult));
117   else if (E->getType()->isAnyComplexType())
118     return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
119 
120   if (!ignoreResult && aggSlot.isIgnored())
121     aggSlot = CreateAggTemp(E->getType(), "agg-temp");
122   EmitAggExpr(E, aggSlot);
123   return aggSlot.asRValue();
124 }
125 
126 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
127 /// always be accessible even if no aggregate location is provided.
128 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
129   AggValueSlot AggSlot = AggValueSlot::ignored();
130 
131   if (hasAggregateLLVMType(E->getType()) &&
132       !E->getType()->isAnyComplexType())
133     AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
134   return EmitAnyExpr(E, AggSlot);
135 }
136 
137 /// EmitAnyExprToMem - Evaluate an expression into a given memory
138 /// location.
139 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
140                                        llvm::Value *Location,
141                                        Qualifiers Quals,
142                                        bool IsInit) {
143   // FIXME: This function should take an LValue as an argument.
144   if (E->getType()->isAnyComplexType()) {
145     EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
146   } else if (hasAggregateLLVMType(E->getType())) {
147     CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
148     EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
149                                          AggValueSlot::IsDestructed_t(IsInit),
150                                          AggValueSlot::DoesNotNeedGCBarriers,
151                                          AggValueSlot::IsAliased_t(!IsInit)));
152   } else {
153     RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
154     LValue LV = MakeAddrLValue(Location, E->getType());
155     EmitStoreThroughLValue(RV, LV);
156   }
157 }
158 
159 namespace {
160 /// \brief An adjustment to be made to the temporary created when emitting a
161 /// reference binding, which accesses a particular subobject of that temporary.
162   struct SubobjectAdjustment {
163     enum {
164       DerivedToBaseAdjustment,
165       FieldAdjustment,
166       MemberPointerAdjustment
167     } Kind;
168 
169     union {
170       struct {
171         const CastExpr *BasePath;
172         const CXXRecordDecl *DerivedClass;
173       } DerivedToBase;
174 
175       FieldDecl *Field;
176 
177       struct {
178         const MemberPointerType *MPT;
179         llvm::Value *Ptr;
180       } Ptr;
181     };
182 
183     SubobjectAdjustment(const CastExpr *BasePath,
184                         const CXXRecordDecl *DerivedClass)
185       : Kind(DerivedToBaseAdjustment) {
186       DerivedToBase.BasePath = BasePath;
187       DerivedToBase.DerivedClass = DerivedClass;
188     }
189 
190     SubobjectAdjustment(FieldDecl *Field)
191       : Kind(FieldAdjustment) {
192       this->Field = Field;
193     }
194 
195     SubobjectAdjustment(const MemberPointerType *MPT, llvm::Value *Ptr)
196       : Kind(MemberPointerAdjustment) {
197       this->Ptr.MPT = MPT;
198       this->Ptr.Ptr = Ptr;
199     }
200   };
201 }
202 
203 static llvm::Value *
204 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
205                          const NamedDecl *InitializedDecl) {
206   if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
207     if (VD->hasGlobalStorage()) {
208       SmallString<256> Name;
209       llvm::raw_svector_ostream Out(Name);
210       CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
211       Out.flush();
212 
213       llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
214 
215       // Create the reference temporary.
216       llvm::GlobalValue *RefTemp =
217         new llvm::GlobalVariable(CGF.CGM.getModule(),
218                                  RefTempTy, /*isConstant=*/false,
219                                  llvm::GlobalValue::InternalLinkage,
220                                  llvm::Constant::getNullValue(RefTempTy),
221                                  Name.str());
222       return RefTemp;
223     }
224   }
225 
226   return CGF.CreateMemTemp(Type, "ref.tmp");
227 }
228 
229 static llvm::Value *
230 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
231                             llvm::Value *&ReferenceTemporary,
232                             const CXXDestructorDecl *&ReferenceTemporaryDtor,
233                             QualType &ObjCARCReferenceLifetimeType,
234                             const NamedDecl *InitializedDecl) {
235   // Look through single-element init lists that claim to be lvalues. They're
236   // just syntactic wrappers in this case.
237   if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
238     if (ILE->getNumInits() == 1 && ILE->isGLValue())
239       E = ILE->getInit(0);
240   }
241 
242   // Look through expressions for materialized temporaries (for now).
243   if (const MaterializeTemporaryExpr *M
244                                       = dyn_cast<MaterializeTemporaryExpr>(E)) {
245     // Objective-C++ ARC:
246     //   If we are binding a reference to a temporary that has ownership, we
247     //   need to perform retain/release operations on the temporary.
248     if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
249         E->getType()->isObjCLifetimeType() &&
250         (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
251          E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
252          E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
253       ObjCARCReferenceLifetimeType = E->getType();
254 
255     E = M->GetTemporaryExpr();
256   }
257 
258   if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
259     E = DAE->getExpr();
260 
261   if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
262     CGF.enterFullExpression(EWC);
263     CodeGenFunction::RunCleanupsScope Scope(CGF);
264 
265     return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
266                                        ReferenceTemporary,
267                                        ReferenceTemporaryDtor,
268                                        ObjCARCReferenceLifetimeType,
269                                        InitializedDecl);
270   }
271 
272   RValue RV;
273   if (E->isGLValue()) {
274     // Emit the expression as an lvalue.
275     LValue LV = CGF.EmitLValue(E);
276 
277     if (LV.isSimple())
278       return LV.getAddress();
279 
280     // We have to load the lvalue.
281     RV = CGF.EmitLoadOfLValue(LV);
282   } else {
283     if (!ObjCARCReferenceLifetimeType.isNull()) {
284       ReferenceTemporary = CreateReferenceTemporary(CGF,
285                                                   ObjCARCReferenceLifetimeType,
286                                                     InitializedDecl);
287 
288 
289       LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
290                                              ObjCARCReferenceLifetimeType);
291 
292       CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
293                          RefTempDst, false);
294 
295       bool ExtendsLifeOfTemporary = false;
296       if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
297         if (Var->extendsLifetimeOfTemporary())
298           ExtendsLifeOfTemporary = true;
299       } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
300         ExtendsLifeOfTemporary = true;
301       }
302 
303       if (!ExtendsLifeOfTemporary) {
304         // Since the lifetime of this temporary isn't going to be extended,
305         // we need to clean it up ourselves at the end of the full expression.
306         switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
307         case Qualifiers::OCL_None:
308         case Qualifiers::OCL_ExplicitNone:
309         case Qualifiers::OCL_Autoreleasing:
310           break;
311 
312         case Qualifiers::OCL_Strong: {
313           assert(!ObjCARCReferenceLifetimeType->isArrayType());
314           CleanupKind cleanupKind = CGF.getARCCleanupKind();
315           CGF.pushDestroy(cleanupKind,
316                           ReferenceTemporary,
317                           ObjCARCReferenceLifetimeType,
318                           CodeGenFunction::destroyARCStrongImprecise,
319                           cleanupKind & EHCleanup);
320           break;
321         }
322 
323         case Qualifiers::OCL_Weak:
324           assert(!ObjCARCReferenceLifetimeType->isArrayType());
325           CGF.pushDestroy(NormalAndEHCleanup,
326                           ReferenceTemporary,
327                           ObjCARCReferenceLifetimeType,
328                           CodeGenFunction::destroyARCWeak,
329                           /*useEHCleanupForArray*/ true);
330           break;
331         }
332 
333         ObjCARCReferenceLifetimeType = QualType();
334       }
335 
336       return ReferenceTemporary;
337     }
338 
339     SmallVector<SubobjectAdjustment, 2> Adjustments;
340     while (true) {
341       E = E->IgnoreParens();
342 
343       if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
344         if ((CE->getCastKind() == CK_DerivedToBase ||
345              CE->getCastKind() == CK_UncheckedDerivedToBase) &&
346             E->getType()->isRecordType()) {
347           E = CE->getSubExpr();
348           CXXRecordDecl *Derived
349             = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
350           Adjustments.push_back(SubobjectAdjustment(CE, Derived));
351           continue;
352         }
353 
354         if (CE->getCastKind() == CK_NoOp) {
355           E = CE->getSubExpr();
356           continue;
357         }
358       } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
359         if (!ME->isArrow() && ME->getBase()->isRValue()) {
360           assert(ME->getBase()->getType()->isRecordType());
361           if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
362             E = ME->getBase();
363             Adjustments.push_back(SubobjectAdjustment(Field));
364             continue;
365           }
366         }
367       } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
368         if (BO->isPtrMemOp()) {
369           assert(BO->getLHS()->isRValue());
370           E = BO->getLHS();
371           const MemberPointerType *MPT =
372               BO->getRHS()->getType()->getAs<MemberPointerType>();
373           llvm::Value *Ptr = CGF.EmitScalarExpr(BO->getRHS());
374           Adjustments.push_back(SubobjectAdjustment(MPT, Ptr));
375         }
376       }
377 
378       if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
379         if (opaque->getType()->isRecordType())
380           return CGF.EmitOpaqueValueLValue(opaque).getAddress();
381 
382       // Nothing changed.
383       break;
384     }
385 
386     // Create a reference temporary if necessary.
387     AggValueSlot AggSlot = AggValueSlot::ignored();
388     if (CGF.hasAggregateLLVMType(E->getType()) &&
389         !E->getType()->isAnyComplexType()) {
390       ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
391                                                     InitializedDecl);
392       CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
393       AggValueSlot::IsDestructed_t isDestructed
394         = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
395       AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
396                                       Qualifiers(), isDestructed,
397                                       AggValueSlot::DoesNotNeedGCBarriers,
398                                       AggValueSlot::IsNotAliased);
399     }
400 
401     if (InitializedDecl) {
402       // Get the destructor for the reference temporary.
403       if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
404         CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
405         if (!ClassDecl->hasTrivialDestructor())
406           ReferenceTemporaryDtor = ClassDecl->getDestructor();
407       }
408     }
409 
410     RV = CGF.EmitAnyExpr(E, AggSlot);
411 
412     // Check if need to perform derived-to-base casts and/or field accesses, to
413     // get from the temporary object we created (and, potentially, for which we
414     // extended the lifetime) to the subobject we're binding the reference to.
415     if (!Adjustments.empty()) {
416       llvm::Value *Object = RV.getAggregateAddr();
417       for (unsigned I = Adjustments.size(); I != 0; --I) {
418         SubobjectAdjustment &Adjustment = Adjustments[I-1];
419         switch (Adjustment.Kind) {
420         case SubobjectAdjustment::DerivedToBaseAdjustment:
421           Object =
422               CGF.GetAddressOfBaseClass(Object,
423                                         Adjustment.DerivedToBase.DerivedClass,
424                               Adjustment.DerivedToBase.BasePath->path_begin(),
425                               Adjustment.DerivedToBase.BasePath->path_end(),
426                                         /*NullCheckValue=*/false);
427           break;
428 
429         case SubobjectAdjustment::FieldAdjustment: {
430           LValue LV = CGF.MakeAddrLValue(Object, E->getType());
431           LV = CGF.EmitLValueForField(LV, Adjustment.Field);
432           if (LV.isSimple()) {
433             Object = LV.getAddress();
434             break;
435           }
436 
437           // For non-simple lvalues, we actually have to create a copy of
438           // the object we're binding to.
439           QualType T = Adjustment.Field->getType().getNonReferenceType()
440                                                   .getUnqualifiedType();
441           Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
442           LValue TempLV = CGF.MakeAddrLValue(Object,
443                                              Adjustment.Field->getType());
444           CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
445           break;
446         }
447 
448         case SubobjectAdjustment::MemberPointerAdjustment: {
449           Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
450                         CGF, Object, Adjustment.Ptr.Ptr, Adjustment.Ptr.MPT);
451           break;
452         }
453         }
454       }
455 
456       return Object;
457     }
458   }
459 
460   if (RV.isAggregate())
461     return RV.getAggregateAddr();
462 
463   // Create a temporary variable that we can bind the reference to.
464   ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
465                                                 InitializedDecl);
466 
467 
468   unsigned Alignment =
469     CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
470   if (RV.isScalar())
471     CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
472                           /*Volatile=*/false, Alignment, E->getType());
473   else
474     CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
475                            /*Volatile=*/false);
476   return ReferenceTemporary;
477 }
478 
479 RValue
480 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
481                                             const NamedDecl *InitializedDecl) {
482   llvm::Value *ReferenceTemporary = 0;
483   const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
484   QualType ObjCARCReferenceLifetimeType;
485   llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
486                                                    ReferenceTemporaryDtor,
487                                                    ObjCARCReferenceLifetimeType,
488                                                    InitializedDecl);
489   if (CatchUndefined && !E->getType()->isFunctionType()) {
490     // C++11 [dcl.ref]p5 (as amended by core issue 453):
491     //   If a glvalue to which a reference is directly bound designates neither
492     //   an existing object or function of an appropriate type nor a region of
493     //   storage of suitable size and alignment to contain an object of the
494     //   reference's type, the behavior is undefined.
495     QualType Ty = E->getType();
496     EmitCheck(CT_ReferenceBinding, Value, Ty);
497   }
498   if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
499     return RValue::get(Value);
500 
501   // Make sure to call the destructor for the reference temporary.
502   const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
503   if (VD && VD->hasGlobalStorage()) {
504     if (ReferenceTemporaryDtor) {
505       llvm::Constant *DtorFn =
506         CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
507       CGM.getCXXABI().registerGlobalDtor(*this, DtorFn,
508                                     cast<llvm::Constant>(ReferenceTemporary));
509     } else {
510       assert(!ObjCARCReferenceLifetimeType.isNull());
511       // Note: We intentionally do not register a global "destructor" to
512       // release the object.
513     }
514 
515     return RValue::get(Value);
516   }
517 
518   if (ReferenceTemporaryDtor)
519     PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
520   else {
521     switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
522     case Qualifiers::OCL_None:
523       llvm_unreachable(
524                       "Not a reference temporary that needs to be deallocated");
525     case Qualifiers::OCL_ExplicitNone:
526     case Qualifiers::OCL_Autoreleasing:
527       // Nothing to do.
528       break;
529 
530     case Qualifiers::OCL_Strong: {
531       bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
532       CleanupKind cleanupKind = getARCCleanupKind();
533       pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
534                   precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
535                   cleanupKind & EHCleanup);
536       break;
537     }
538 
539     case Qualifiers::OCL_Weak: {
540       // __weak objects always get EH cleanups; otherwise, exceptions
541       // could cause really nasty crashes instead of mere leaks.
542       pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
543                   ObjCARCReferenceLifetimeType, destroyARCWeak, true);
544       break;
545     }
546     }
547   }
548 
549   return RValue::get(Value);
550 }
551 
552 
553 /// getAccessedFieldNo - Given an encoded value and a result number, return the
554 /// input field number being accessed.
555 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
556                                              const llvm::Constant *Elts) {
557   return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
558       ->getZExtValue();
559 }
560 
561 void CodeGenFunction::EmitCheck(CheckType CT, llvm::Value *Address, QualType Ty,
562                                 CharUnits Alignment) {
563   if (!CatchUndefined)
564     return;
565 
566   llvm::Value *Cond = 0;
567 
568   if (CT != CT_Load && CT != CT_Store) {
569     // The glvalue must not be an empty glvalue. Don't bother checking this for
570     // loads and stores, because we will get a segfault anyway (if the operation
571     // isn't optimized out).
572     Cond = Builder.CreateICmpNE(
573         Address, llvm::Constant::getNullValue(Address->getType()));
574   }
575 
576   if (!Ty->isIncompleteType()) {
577     uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
578     uint64_t AlignVal = Alignment.getQuantity();
579     if (!AlignVal)
580       AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
581 
582     // This needs to be to the standard address space.
583     Address = Builder.CreateBitCast(Address, Int8PtrTy);
584 
585     // The glvalue must refer to a large enough storage region.
586     // FIXME: If -faddress-sanitizer is enabled, insert dynamic instrumentation
587     //        to check this.
588     llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
589     llvm::Value *Min = Builder.getFalse();
590     llvm::Value *LargeEnough =
591         Builder.CreateICmpUGE(Builder.CreateCall2(F, Address, Min),
592                               llvm::ConstantInt::get(IntPtrTy, Size));
593     Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough;
594 
595     // The glvalue must be suitably aligned.
596     llvm::Value *Align =
597         Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy),
598                           llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
599     Cond = Builder.CreateAnd(Cond,
600         Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)));
601   }
602 
603   if (Cond) {
604     llvm::BasicBlock *Cont = createBasicBlock();
605     Builder.CreateCondBr(Cond, Cont, getTrapBB());
606     EmitBlock(Cont);
607   }
608 }
609 
610 
611 CodeGenFunction::ComplexPairTy CodeGenFunction::
612 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
613                          bool isInc, bool isPre) {
614   ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
615                                             LV.isVolatileQualified());
616 
617   llvm::Value *NextVal;
618   if (isa<llvm::IntegerType>(InVal.first->getType())) {
619     uint64_t AmountVal = isInc ? 1 : -1;
620     NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
621 
622     // Add the inc/dec to the real part.
623     NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
624   } else {
625     QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
626     llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
627     if (!isInc)
628       FVal.changeSign();
629     NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
630 
631     // Add the inc/dec to the real part.
632     NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
633   }
634 
635   ComplexPairTy IncVal(NextVal, InVal.second);
636 
637   // Store the updated result through the lvalue.
638   StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
639 
640   // If this is a postinc, return the value read from memory, otherwise use the
641   // updated value.
642   return isPre ? IncVal : InVal;
643 }
644 
645 
646 //===----------------------------------------------------------------------===//
647 //                         LValue Expression Emission
648 //===----------------------------------------------------------------------===//
649 
650 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
651   if (Ty->isVoidType())
652     return RValue::get(0);
653 
654   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
655     llvm::Type *EltTy = ConvertType(CTy->getElementType());
656     llvm::Value *U = llvm::UndefValue::get(EltTy);
657     return RValue::getComplex(std::make_pair(U, U));
658   }
659 
660   // If this is a use of an undefined aggregate type, the aggregate must have an
661   // identifiable address.  Just because the contents of the value are undefined
662   // doesn't mean that the address can't be taken and compared.
663   if (hasAggregateLLVMType(Ty)) {
664     llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
665     return RValue::getAggregate(DestPtr);
666   }
667 
668   return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
669 }
670 
671 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
672                                               const char *Name) {
673   ErrorUnsupported(E, Name);
674   return GetUndefRValue(E->getType());
675 }
676 
677 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
678                                               const char *Name) {
679   ErrorUnsupported(E, Name);
680   llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
681   return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
682 }
683 
684 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, CheckType CT) {
685   LValue LV = EmitLValue(E);
686   if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
687     EmitCheck(CT, LV.getAddress(), E->getType(), LV.getAlignment());
688   return LV;
689 }
690 
691 /// EmitLValue - Emit code to compute a designator that specifies the location
692 /// of the expression.
693 ///
694 /// This can return one of two things: a simple address or a bitfield reference.
695 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
696 /// an LLVM pointer type.
697 ///
698 /// If this returns a bitfield reference, nothing about the pointee type of the
699 /// LLVM value is known: For example, it may not be a pointer to an integer.
700 ///
701 /// If this returns a normal address, and if the lvalue's C type is fixed size,
702 /// this method guarantees that the returned pointer type will point to an LLVM
703 /// type of the same size of the lvalue's type.  If the lvalue has a variable
704 /// length type, this is not possible.
705 ///
706 LValue CodeGenFunction::EmitLValue(const Expr *E) {
707   switch (E->getStmtClass()) {
708   default: return EmitUnsupportedLValue(E, "l-value expression");
709 
710   case Expr::ObjCPropertyRefExprClass:
711     llvm_unreachable("cannot emit a property reference directly");
712 
713   case Expr::ObjCSelectorExprClass:
714   return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
715   case Expr::ObjCIsaExprClass:
716     return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
717   case Expr::BinaryOperatorClass:
718     return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
719   case Expr::CompoundAssignOperatorClass:
720     if (!E->getType()->isAnyComplexType())
721       return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
722     return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
723   case Expr::CallExprClass:
724   case Expr::CXXMemberCallExprClass:
725   case Expr::CXXOperatorCallExprClass:
726   case Expr::UserDefinedLiteralClass:
727     return EmitCallExprLValue(cast<CallExpr>(E));
728   case Expr::VAArgExprClass:
729     return EmitVAArgExprLValue(cast<VAArgExpr>(E));
730   case Expr::DeclRefExprClass:
731     return EmitDeclRefLValue(cast<DeclRefExpr>(E));
732   case Expr::ParenExprClass:
733     return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
734   case Expr::GenericSelectionExprClass:
735     return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
736   case Expr::PredefinedExprClass:
737     return EmitPredefinedLValue(cast<PredefinedExpr>(E));
738   case Expr::StringLiteralClass:
739     return EmitStringLiteralLValue(cast<StringLiteral>(E));
740   case Expr::ObjCEncodeExprClass:
741     return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
742   case Expr::PseudoObjectExprClass:
743     return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
744   case Expr::InitListExprClass:
745     return EmitInitListLValue(cast<InitListExpr>(E));
746   case Expr::CXXTemporaryObjectExprClass:
747   case Expr::CXXConstructExprClass:
748     return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
749   case Expr::CXXBindTemporaryExprClass:
750     return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
751   case Expr::LambdaExprClass:
752     return EmitLambdaLValue(cast<LambdaExpr>(E));
753 
754   case Expr::ExprWithCleanupsClass: {
755     const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
756     enterFullExpression(cleanups);
757     RunCleanupsScope Scope(*this);
758     return EmitLValue(cleanups->getSubExpr());
759   }
760 
761   case Expr::CXXScalarValueInitExprClass:
762     return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
763   case Expr::CXXDefaultArgExprClass:
764     return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
765   case Expr::CXXTypeidExprClass:
766     return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
767 
768   case Expr::ObjCMessageExprClass:
769     return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
770   case Expr::ObjCIvarRefExprClass:
771     return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
772   case Expr::StmtExprClass:
773     return EmitStmtExprLValue(cast<StmtExpr>(E));
774   case Expr::UnaryOperatorClass:
775     return EmitUnaryOpLValue(cast<UnaryOperator>(E));
776   case Expr::ArraySubscriptExprClass:
777     return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
778   case Expr::ExtVectorElementExprClass:
779     return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
780   case Expr::MemberExprClass:
781     return EmitMemberExpr(cast<MemberExpr>(E));
782   case Expr::CompoundLiteralExprClass:
783     return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
784   case Expr::ConditionalOperatorClass:
785     return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
786   case Expr::BinaryConditionalOperatorClass:
787     return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
788   case Expr::ChooseExprClass:
789     return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
790   case Expr::OpaqueValueExprClass:
791     return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
792   case Expr::SubstNonTypeTemplateParmExprClass:
793     return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
794   case Expr::ImplicitCastExprClass:
795   case Expr::CStyleCastExprClass:
796   case Expr::CXXFunctionalCastExprClass:
797   case Expr::CXXStaticCastExprClass:
798   case Expr::CXXDynamicCastExprClass:
799   case Expr::CXXReinterpretCastExprClass:
800   case Expr::CXXConstCastExprClass:
801   case Expr::ObjCBridgedCastExprClass:
802     return EmitCastLValue(cast<CastExpr>(E));
803 
804   case Expr::MaterializeTemporaryExprClass:
805     return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
806   }
807 }
808 
809 /// Given an object of the given canonical type, can we safely copy a
810 /// value out of it based on its initializer?
811 static bool isConstantEmittableObjectType(QualType type) {
812   assert(type.isCanonical());
813   assert(!type->isReferenceType());
814 
815   // Must be const-qualified but non-volatile.
816   Qualifiers qs = type.getLocalQualifiers();
817   if (!qs.hasConst() || qs.hasVolatile()) return false;
818 
819   // Otherwise, all object types satisfy this except C++ classes with
820   // mutable subobjects or non-trivial copy/destroy behavior.
821   if (const RecordType *RT = dyn_cast<RecordType>(type))
822     if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
823       if (RD->hasMutableFields() || !RD->isTrivial())
824         return false;
825 
826   return true;
827 }
828 
829 /// Can we constant-emit a load of a reference to a variable of the
830 /// given type?  This is different from predicates like
831 /// Decl::isUsableInConstantExpressions because we do want it to apply
832 /// in situations that don't necessarily satisfy the language's rules
833 /// for this (e.g. C++'s ODR-use rules).  For example, we want to able
834 /// to do this with const float variables even if those variables
835 /// aren't marked 'constexpr'.
836 enum ConstantEmissionKind {
837   CEK_None,
838   CEK_AsReferenceOnly,
839   CEK_AsValueOrReference,
840   CEK_AsValueOnly
841 };
842 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
843   type = type.getCanonicalType();
844   if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
845     if (isConstantEmittableObjectType(ref->getPointeeType()))
846       return CEK_AsValueOrReference;
847     return CEK_AsReferenceOnly;
848   }
849   if (isConstantEmittableObjectType(type))
850     return CEK_AsValueOnly;
851   return CEK_None;
852 }
853 
854 /// Try to emit a reference to the given value without producing it as
855 /// an l-value.  This is actually more than an optimization: we can't
856 /// produce an l-value for variables that we never actually captured
857 /// in a block or lambda, which means const int variables or constexpr
858 /// literals or similar.
859 CodeGenFunction::ConstantEmission
860 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
861   ValueDecl *value = refExpr->getDecl();
862 
863   // The value needs to be an enum constant or a constant variable.
864   ConstantEmissionKind CEK;
865   if (isa<ParmVarDecl>(value)) {
866     CEK = CEK_None;
867   } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
868     CEK = checkVarTypeForConstantEmission(var->getType());
869   } else if (isa<EnumConstantDecl>(value)) {
870     CEK = CEK_AsValueOnly;
871   } else {
872     CEK = CEK_None;
873   }
874   if (CEK == CEK_None) return ConstantEmission();
875 
876   Expr::EvalResult result;
877   bool resultIsReference;
878   QualType resultType;
879 
880   // It's best to evaluate all the way as an r-value if that's permitted.
881   if (CEK != CEK_AsReferenceOnly &&
882       refExpr->EvaluateAsRValue(result, getContext())) {
883     resultIsReference = false;
884     resultType = refExpr->getType();
885 
886   // Otherwise, try to evaluate as an l-value.
887   } else if (CEK != CEK_AsValueOnly &&
888              refExpr->EvaluateAsLValue(result, getContext())) {
889     resultIsReference = true;
890     resultType = value->getType();
891 
892   // Failure.
893   } else {
894     return ConstantEmission();
895   }
896 
897   // In any case, if the initializer has side-effects, abandon ship.
898   if (result.HasSideEffects)
899     return ConstantEmission();
900 
901   // Emit as a constant.
902   llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
903 
904   // Make sure we emit a debug reference to the global variable.
905   // This should probably fire even for
906   if (isa<VarDecl>(value)) {
907     if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
908       EmitDeclRefExprDbgValue(refExpr, C);
909   } else {
910     assert(isa<EnumConstantDecl>(value));
911     EmitDeclRefExprDbgValue(refExpr, C);
912   }
913 
914   // If we emitted a reference constant, we need to dereference that.
915   if (resultIsReference)
916     return ConstantEmission::forReference(C);
917 
918   return ConstantEmission::forValue(C);
919 }
920 
921 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
922   return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
923                           lvalue.getAlignment().getQuantity(),
924                           lvalue.getType(), lvalue.getTBAAInfo());
925 }
926 
927 static bool hasBooleanRepresentation(QualType Ty) {
928   if (Ty->isBooleanType())
929     return true;
930 
931   if (const EnumType *ET = Ty->getAs<EnumType>())
932     return ET->getDecl()->getIntegerType()->isBooleanType();
933 
934   if (const AtomicType *AT = Ty->getAs<AtomicType>())
935     return hasBooleanRepresentation(AT->getValueType());
936 
937   return false;
938 }
939 
940 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
941   const EnumType *ET = Ty->getAs<EnumType>();
942   bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
943                                  CGM.getCodeGenOpts().StrictEnums &&
944                                  !ET->getDecl()->isFixed());
945   bool IsBool = hasBooleanRepresentation(Ty);
946   if (!IsBool && !IsRegularCPlusPlusEnum)
947     return NULL;
948 
949   llvm::APInt Min;
950   llvm::APInt End;
951   if (IsBool) {
952     Min = llvm::APInt(8, 0);
953     End = llvm::APInt(8, 2);
954   } else {
955     const EnumDecl *ED = ET->getDecl();
956     llvm::Type *LTy = ConvertTypeForMem(ED->getIntegerType());
957     unsigned Bitwidth = LTy->getScalarSizeInBits();
958     unsigned NumNegativeBits = ED->getNumNegativeBits();
959     unsigned NumPositiveBits = ED->getNumPositiveBits();
960 
961     if (NumNegativeBits) {
962       unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
963       assert(NumBits <= Bitwidth);
964       End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
965       Min = -End;
966     } else {
967       assert(NumPositiveBits <= Bitwidth);
968       End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
969       Min = llvm::APInt(Bitwidth, 0);
970     }
971   }
972 
973   llvm::MDBuilder MDHelper(getLLVMContext());
974   return MDHelper.createRange(Min, End);
975 }
976 
977 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
978                                               unsigned Alignment, QualType Ty,
979                                               llvm::MDNode *TBAAInfo) {
980 
981   // For better performance, handle vector loads differently.
982   if (Ty->isVectorType()) {
983     llvm::Value *V;
984     const llvm::Type *EltTy =
985     cast<llvm::PointerType>(Addr->getType())->getElementType();
986 
987     const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy);
988 
989     // Handle vectors of size 3, like size 4 for better performance.
990     if (VTy->getNumElements() == 3) {
991 
992       // Bitcast to vec4 type.
993       llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
994                                                          4);
995       llvm::PointerType *ptVec4Ty =
996       llvm::PointerType::get(vec4Ty,
997                              (cast<llvm::PointerType>(
998                                       Addr->getType()))->getAddressSpace());
999       llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty,
1000                                                 "castToVec4");
1001       // Now load value.
1002       llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1003 
1004       // Shuffle vector to get vec3.
1005       llvm::SmallVector<llvm::Constant*, 3> Mask;
1006       Mask.push_back(llvm::ConstantInt::get(
1007                                     llvm::Type::getInt32Ty(getLLVMContext()),
1008                                             0));
1009       Mask.push_back(llvm::ConstantInt::get(
1010                                     llvm::Type::getInt32Ty(getLLVMContext()),
1011                                             1));
1012       Mask.push_back(llvm::ConstantInt::get(
1013                                      llvm::Type::getInt32Ty(getLLVMContext()),
1014                                             2));
1015 
1016       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1017       V = Builder.CreateShuffleVector(LoadVal,
1018                                       llvm::UndefValue::get(vec4Ty),
1019                                       MaskV, "extractVec");
1020       return EmitFromMemory(V, Ty);
1021     }
1022   }
1023 
1024   llvm::LoadInst *Load = Builder.CreateLoad(Addr);
1025   if (Volatile)
1026     Load->setVolatile(true);
1027   if (Alignment)
1028     Load->setAlignment(Alignment);
1029   if (TBAAInfo)
1030     CGM.DecorateInstruction(Load, TBAAInfo);
1031   // If this is an atomic type, all normal reads must be atomic
1032   if (Ty->isAtomicType())
1033     Load->setAtomic(llvm::SequentiallyConsistent);
1034 
1035   if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1036     if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
1037       Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1038 
1039   return EmitFromMemory(Load, Ty);
1040 }
1041 
1042 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
1043   // Bool has a different representation in memory than in registers.
1044   if (hasBooleanRepresentation(Ty)) {
1045     // This should really always be an i1, but sometimes it's already
1046     // an i8, and it's awkward to track those cases down.
1047     if (Value->getType()->isIntegerTy(1))
1048       return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
1049     assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
1050   }
1051 
1052   return Value;
1053 }
1054 
1055 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
1056   // Bool has a different representation in memory than in registers.
1057   if (hasBooleanRepresentation(Ty)) {
1058     assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
1059     return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1060   }
1061 
1062   return Value;
1063 }
1064 
1065 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
1066                                         bool Volatile, unsigned Alignment,
1067                                         QualType Ty,
1068                                         llvm::MDNode *TBAAInfo,
1069                                         bool isInit) {
1070 
1071   // Handle vectors differently to get better performance.
1072   if (Ty->isVectorType()) {
1073     llvm::Type *SrcTy = Value->getType();
1074     llvm::VectorType *VecTy = cast<llvm::VectorType>(SrcTy);
1075     // Handle vec3 special.
1076     if (VecTy->getNumElements() == 3) {
1077       llvm::LLVMContext &VMContext = getLLVMContext();
1078 
1079       // Our source is a vec3, do a shuffle vector to make it a vec4.
1080       llvm::SmallVector<llvm::Constant*, 4> Mask;
1081       Mask.push_back(llvm::ConstantInt::get(
1082                                             llvm::Type::getInt32Ty(VMContext),
1083                                             0));
1084       Mask.push_back(llvm::ConstantInt::get(
1085                                             llvm::Type::getInt32Ty(VMContext),
1086                                             1));
1087       Mask.push_back(llvm::ConstantInt::get(
1088                                             llvm::Type::getInt32Ty(VMContext),
1089                                             2));
1090       Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
1091 
1092       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1093       Value = Builder.CreateShuffleVector(Value,
1094                                           llvm::UndefValue::get(VecTy),
1095                                           MaskV, "extractVec");
1096       SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
1097     }
1098     llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
1099     if (DstPtr->getElementType() != SrcTy) {
1100       llvm::Type *MemTy =
1101       llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
1102       Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
1103     }
1104   }
1105 
1106   Value = EmitToMemory(Value, Ty);
1107 
1108   llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1109   if (Alignment)
1110     Store->setAlignment(Alignment);
1111   if (TBAAInfo)
1112     CGM.DecorateInstruction(Store, TBAAInfo);
1113   if (!isInit && Ty->isAtomicType())
1114     Store->setAtomic(llvm::SequentiallyConsistent);
1115 }
1116 
1117 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
1118     bool isInit) {
1119   EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
1120                     lvalue.getAlignment().getQuantity(), lvalue.getType(),
1121                     lvalue.getTBAAInfo(), isInit);
1122 }
1123 
1124 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1125 /// method emits the address of the lvalue, then loads the result as an rvalue,
1126 /// returning the rvalue.
1127 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
1128   if (LV.isObjCWeak()) {
1129     // load of a __weak object.
1130     llvm::Value *AddrWeakObj = LV.getAddress();
1131     return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
1132                                                              AddrWeakObj));
1133   }
1134   if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
1135     return RValue::get(EmitARCLoadWeak(LV.getAddress()));
1136 
1137   if (LV.isSimple()) {
1138     assert(!LV.getType()->isFunctionType());
1139 
1140     // Everything needs a load.
1141     return RValue::get(EmitLoadOfScalar(LV));
1142   }
1143 
1144   if (LV.isVectorElt()) {
1145     llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
1146                                               LV.isVolatileQualified());
1147     Load->setAlignment(LV.getAlignment().getQuantity());
1148     return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1149                                                     "vecext"));
1150   }
1151 
1152   // If this is a reference to a subset of the elements of a vector, either
1153   // shuffle the input or extract/insert them as appropriate.
1154   if (LV.isExtVectorElt())
1155     return EmitLoadOfExtVectorElementLValue(LV);
1156 
1157   assert(LV.isBitField() && "Unknown LValue type!");
1158   return EmitLoadOfBitfieldLValue(LV);
1159 }
1160 
1161 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
1162   const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1163 
1164   // Get the output type.
1165   llvm::Type *ResLTy = ConvertType(LV.getType());
1166   unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
1167 
1168   // Compute the result as an OR of all of the individual component accesses.
1169   llvm::Value *Res = 0;
1170   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1171     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1172     CharUnits AccessAlignment = AI.AccessAlignment;
1173     if (!LV.getAlignment().isZero())
1174       AccessAlignment = std::min(AccessAlignment, LV.getAlignment());
1175 
1176     // Get the field pointer.
1177     llvm::Value *Ptr = LV.getBitFieldBaseAddr();
1178 
1179     // Only offset by the field index if used, so that incoming values are not
1180     // required to be structures.
1181     if (AI.FieldIndex)
1182       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1183 
1184     // Offset by the byte offset, if used.
1185     if (!AI.FieldByteOffset.isZero()) {
1186       Ptr = EmitCastToVoidPtr(Ptr);
1187       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1188                                        "bf.field.offs");
1189     }
1190 
1191     // Cast to the access type.
1192     llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
1193                        CGM.getContext().getTargetAddressSpace(LV.getType()));
1194     Ptr = Builder.CreateBitCast(Ptr, PTy);
1195 
1196     // Perform the load.
1197     llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
1198     Load->setAlignment(AccessAlignment.getQuantity());
1199 
1200     // Shift out unused low bits and mask out unused high bits.
1201     llvm::Value *Val = Load;
1202     if (AI.FieldBitStart)
1203       Val = Builder.CreateLShr(Load, AI.FieldBitStart);
1204     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
1205                                                             AI.TargetBitWidth),
1206                             "bf.clear");
1207 
1208     // Extend or truncate to the target size.
1209     if (AI.AccessWidth < ResSizeInBits)
1210       Val = Builder.CreateZExt(Val, ResLTy);
1211     else if (AI.AccessWidth > ResSizeInBits)
1212       Val = Builder.CreateTrunc(Val, ResLTy);
1213 
1214     // Shift into place, and OR into the result.
1215     if (AI.TargetBitOffset)
1216       Val = Builder.CreateShl(Val, AI.TargetBitOffset);
1217     Res = Res ? Builder.CreateOr(Res, Val) : Val;
1218   }
1219 
1220   // If the bit-field is signed, perform the sign-extension.
1221   //
1222   // FIXME: This can easily be folded into the load of the high bits, which
1223   // could also eliminate the mask of high bits in some situations.
1224   if (Info.isSigned()) {
1225     unsigned ExtraBits = ResSizeInBits - Info.getSize();
1226     if (ExtraBits)
1227       Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
1228                                ExtraBits, "bf.val.sext");
1229   }
1230 
1231   return RValue::get(Res);
1232 }
1233 
1234 // If this is a reference to a subset of the elements of a vector, create an
1235 // appropriate shufflevector.
1236 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
1237   llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
1238                                             LV.isVolatileQualified());
1239   Load->setAlignment(LV.getAlignment().getQuantity());
1240   llvm::Value *Vec = Load;
1241 
1242   const llvm::Constant *Elts = LV.getExtVectorElts();
1243 
1244   // If the result of the expression is a non-vector type, we must be extracting
1245   // a single element.  Just codegen as an extractelement.
1246   const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1247   if (!ExprVT) {
1248     unsigned InIdx = getAccessedFieldNo(0, Elts);
1249     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1250     return RValue::get(Builder.CreateExtractElement(Vec, Elt));
1251   }
1252 
1253   // Always use shuffle vector to try to retain the original program structure
1254   unsigned NumResultElts = ExprVT->getNumElements();
1255 
1256   SmallVector<llvm::Constant*, 4> Mask;
1257   for (unsigned i = 0; i != NumResultElts; ++i)
1258     Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
1259 
1260   llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1261   Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
1262                                     MaskV);
1263   return RValue::get(Vec);
1264 }
1265 
1266 
1267 
1268 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
1269 /// lvalue, where both are guaranteed to the have the same type, and that type
1270 /// is 'Ty'.
1271 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
1272   if (!Dst.isSimple()) {
1273     if (Dst.isVectorElt()) {
1274       // Read/modify/write the vector, inserting the new element.
1275       llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
1276                                                 Dst.isVolatileQualified());
1277       Load->setAlignment(Dst.getAlignment().getQuantity());
1278       llvm::Value *Vec = Load;
1279       Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
1280                                         Dst.getVectorIdx(), "vecins");
1281       llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
1282                                                    Dst.isVolatileQualified());
1283       Store->setAlignment(Dst.getAlignment().getQuantity());
1284       return;
1285     }
1286 
1287     // If this is an update of extended vector elements, insert them as
1288     // appropriate.
1289     if (Dst.isExtVectorElt())
1290       return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
1291 
1292     assert(Dst.isBitField() && "Unknown LValue type");
1293     return EmitStoreThroughBitfieldLValue(Src, Dst);
1294   }
1295 
1296   // There's special magic for assigning into an ARC-qualified l-value.
1297   if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
1298     switch (Lifetime) {
1299     case Qualifiers::OCL_None:
1300       llvm_unreachable("present but none");
1301 
1302     case Qualifiers::OCL_ExplicitNone:
1303       // nothing special
1304       break;
1305 
1306     case Qualifiers::OCL_Strong:
1307       EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
1308       return;
1309 
1310     case Qualifiers::OCL_Weak:
1311       EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
1312       return;
1313 
1314     case Qualifiers::OCL_Autoreleasing:
1315       Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
1316                                                      Src.getScalarVal()));
1317       // fall into the normal path
1318       break;
1319     }
1320   }
1321 
1322   if (Dst.isObjCWeak() && !Dst.isNonGC()) {
1323     // load of a __weak object.
1324     llvm::Value *LvalueDst = Dst.getAddress();
1325     llvm::Value *src = Src.getScalarVal();
1326      CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
1327     return;
1328   }
1329 
1330   if (Dst.isObjCStrong() && !Dst.isNonGC()) {
1331     // load of a __strong object.
1332     llvm::Value *LvalueDst = Dst.getAddress();
1333     llvm::Value *src = Src.getScalarVal();
1334     if (Dst.isObjCIvar()) {
1335       assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
1336       llvm::Type *ResultType = ConvertType(getContext().LongTy);
1337       llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
1338       llvm::Value *dst = RHS;
1339       RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1340       llvm::Value *LHS =
1341         Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
1342       llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
1343       CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1344                                               BytesBetween);
1345     } else if (Dst.isGlobalObjCRef()) {
1346       CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1347                                                 Dst.isThreadLocalRef());
1348     }
1349     else
1350       CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
1351     return;
1352   }
1353 
1354   assert(Src.isScalar() && "Can't emit an agg store with this method");
1355   EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
1356 }
1357 
1358 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
1359                                                      llvm::Value **Result) {
1360   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
1361 
1362   // Get the output type.
1363   llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
1364   unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
1365 
1366   // Get the source value, truncated to the width of the bit-field.
1367   llvm::Value *SrcVal = Src.getScalarVal();
1368 
1369   if (hasBooleanRepresentation(Dst.getType()))
1370     SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
1371 
1372   SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
1373                                                                 Info.getSize()),
1374                              "bf.value");
1375 
1376   // Return the new value of the bit-field, if requested.
1377   if (Result) {
1378     // Cast back to the proper type for result.
1379     llvm::Type *SrcTy = Src.getScalarVal()->getType();
1380     llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
1381                                                    "bf.reload.val");
1382 
1383     // Sign extend if necessary.
1384     if (Info.isSigned()) {
1385       unsigned ExtraBits = ResSizeInBits - Info.getSize();
1386       if (ExtraBits)
1387         ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
1388                                        ExtraBits, "bf.reload.sext");
1389     }
1390 
1391     *Result = ReloadVal;
1392   }
1393 
1394   // Iterate over the components, writing each piece to memory.
1395   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1396     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1397     CharUnits AccessAlignment = AI.AccessAlignment;
1398     if (!Dst.getAlignment().isZero())
1399       AccessAlignment = std::min(AccessAlignment, Dst.getAlignment());
1400 
1401     // Get the field pointer.
1402     llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
1403     unsigned addressSpace =
1404       cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
1405 
1406     // Only offset by the field index if used, so that incoming values are not
1407     // required to be structures.
1408     if (AI.FieldIndex)
1409       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1410 
1411     // Offset by the byte offset, if used.
1412     if (!AI.FieldByteOffset.isZero()) {
1413       Ptr = EmitCastToVoidPtr(Ptr);
1414       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1415                                        "bf.field.offs");
1416     }
1417 
1418     // Cast to the access type.
1419     llvm::Type *AccessLTy =
1420       llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
1421 
1422     llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
1423     Ptr = Builder.CreateBitCast(Ptr, PTy);
1424 
1425     // Extract the piece of the bit-field value to write in this access, limited
1426     // to the values that are part of this access.
1427     llvm::Value *Val = SrcVal;
1428     if (AI.TargetBitOffset)
1429       Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
1430     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
1431                                                             AI.TargetBitWidth));
1432 
1433     // Extend or truncate to the access size.
1434     if (ResSizeInBits < AI.AccessWidth)
1435       Val = Builder.CreateZExt(Val, AccessLTy);
1436     else if (ResSizeInBits > AI.AccessWidth)
1437       Val = Builder.CreateTrunc(Val, AccessLTy);
1438 
1439     // Shift into the position in memory.
1440     if (AI.FieldBitStart)
1441       Val = Builder.CreateShl(Val, AI.FieldBitStart);
1442 
1443     // If necessary, load and OR in bits that are outside of the bit-field.
1444     if (AI.TargetBitWidth != AI.AccessWidth) {
1445       llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
1446       Load->setAlignment(AccessAlignment.getQuantity());
1447 
1448       // Compute the mask for zeroing the bits that are part of the bit-field.
1449       llvm::APInt InvMask =
1450         ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
1451                                  AI.FieldBitStart + AI.TargetBitWidth);
1452 
1453       // Apply the mask and OR in to the value to write.
1454       Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
1455     }
1456 
1457     // Write the value.
1458     llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
1459                                                  Dst.isVolatileQualified());
1460     Store->setAlignment(AccessAlignment.getQuantity());
1461   }
1462 }
1463 
1464 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
1465                                                                LValue Dst) {
1466   // This access turns into a read/modify/write of the vector.  Load the input
1467   // value now.
1468   llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
1469                                             Dst.isVolatileQualified());
1470   Load->setAlignment(Dst.getAlignment().getQuantity());
1471   llvm::Value *Vec = Load;
1472   const llvm::Constant *Elts = Dst.getExtVectorElts();
1473 
1474   llvm::Value *SrcVal = Src.getScalarVal();
1475 
1476   if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
1477     unsigned NumSrcElts = VTy->getNumElements();
1478     unsigned NumDstElts =
1479        cast<llvm::VectorType>(Vec->getType())->getNumElements();
1480     if (NumDstElts == NumSrcElts) {
1481       // Use shuffle vector is the src and destination are the same number of
1482       // elements and restore the vector mask since it is on the side it will be
1483       // stored.
1484       SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
1485       for (unsigned i = 0; i != NumSrcElts; ++i)
1486         Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
1487 
1488       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1489       Vec = Builder.CreateShuffleVector(SrcVal,
1490                                         llvm::UndefValue::get(Vec->getType()),
1491                                         MaskV);
1492     } else if (NumDstElts > NumSrcElts) {
1493       // Extended the source vector to the same length and then shuffle it
1494       // into the destination.
1495       // FIXME: since we're shuffling with undef, can we just use the indices
1496       //        into that?  This could be simpler.
1497       SmallVector<llvm::Constant*, 4> ExtMask;
1498       for (unsigned i = 0; i != NumSrcElts; ++i)
1499         ExtMask.push_back(Builder.getInt32(i));
1500       ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
1501       llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
1502       llvm::Value *ExtSrcVal =
1503         Builder.CreateShuffleVector(SrcVal,
1504                                     llvm::UndefValue::get(SrcVal->getType()),
1505                                     ExtMaskV);
1506       // build identity
1507       SmallVector<llvm::Constant*, 4> Mask;
1508       for (unsigned i = 0; i != NumDstElts; ++i)
1509         Mask.push_back(Builder.getInt32(i));
1510 
1511       // modify when what gets shuffled in
1512       for (unsigned i = 0; i != NumSrcElts; ++i)
1513         Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
1514       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1515       Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
1516     } else {
1517       // We should never shorten the vector
1518       llvm_unreachable("unexpected shorten vector length");
1519     }
1520   } else {
1521     // If the Src is a scalar (not a vector) it must be updating one element.
1522     unsigned InIdx = getAccessedFieldNo(0, Elts);
1523     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1524     Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
1525   }
1526 
1527   llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
1528                                                Dst.isVolatileQualified());
1529   Store->setAlignment(Dst.getAlignment().getQuantity());
1530 }
1531 
1532 // setObjCGCLValueClass - sets class of he lvalue for the purpose of
1533 // generating write-barries API. It is currently a global, ivar,
1534 // or neither.
1535 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1536                                  LValue &LV,
1537                                  bool IsMemberAccess=false) {
1538   if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
1539     return;
1540 
1541   if (isa<ObjCIvarRefExpr>(E)) {
1542     QualType ExpTy = E->getType();
1543     if (IsMemberAccess && ExpTy->isPointerType()) {
1544       // If ivar is a structure pointer, assigning to field of
1545       // this struct follows gcc's behavior and makes it a non-ivar
1546       // writer-barrier conservatively.
1547       ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1548       if (ExpTy->isRecordType()) {
1549         LV.setObjCIvar(false);
1550         return;
1551       }
1552     }
1553     LV.setObjCIvar(true);
1554     ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1555     LV.setBaseIvarExp(Exp->getBase());
1556     LV.setObjCArray(E->getType()->isArrayType());
1557     return;
1558   }
1559 
1560   if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1561     if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1562       if (VD->hasGlobalStorage()) {
1563         LV.setGlobalObjCRef(true);
1564         LV.setThreadLocalRef(VD->isThreadSpecified());
1565       }
1566     }
1567     LV.setObjCArray(E->getType()->isArrayType());
1568     return;
1569   }
1570 
1571   if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1572     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1573     return;
1574   }
1575 
1576   if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1577     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1578     if (LV.isObjCIvar()) {
1579       // If cast is to a structure pointer, follow gcc's behavior and make it
1580       // a non-ivar write-barrier.
1581       QualType ExpTy = E->getType();
1582       if (ExpTy->isPointerType())
1583         ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1584       if (ExpTy->isRecordType())
1585         LV.setObjCIvar(false);
1586     }
1587     return;
1588   }
1589 
1590   if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
1591     setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
1592     return;
1593   }
1594 
1595   if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1596     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1597     return;
1598   }
1599 
1600   if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1601     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1602     return;
1603   }
1604 
1605   if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
1606     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1607     return;
1608   }
1609 
1610   if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1611     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1612     if (LV.isObjCIvar() && !LV.isObjCArray())
1613       // Using array syntax to assigning to what an ivar points to is not
1614       // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1615       LV.setObjCIvar(false);
1616     else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1617       // Using array syntax to assigning to what global points to is not
1618       // same as assigning to the global itself. {id *G;} G[i] = 0;
1619       LV.setGlobalObjCRef(false);
1620     return;
1621   }
1622 
1623   if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1624     setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
1625     // We don't know if member is an 'ivar', but this flag is looked at
1626     // only in the context of LV.isObjCIvar().
1627     LV.setObjCArray(E->getType()->isArrayType());
1628     return;
1629   }
1630 }
1631 
1632 static llvm::Value *
1633 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
1634                                 llvm::Value *V, llvm::Type *IRType,
1635                                 StringRef Name = StringRef()) {
1636   unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1637   return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
1638 }
1639 
1640 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1641                                       const Expr *E, const VarDecl *VD) {
1642   assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
1643          "Var decl must have external storage or be a file var decl!");
1644 
1645   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1646   llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
1647   V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
1648   CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
1649   QualType T = E->getType();
1650   LValue LV;
1651   if (VD->getType()->isReferenceType()) {
1652     llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
1653     LI->setAlignment(Alignment.getQuantity());
1654     V = LI;
1655     LV = CGF.MakeNaturalAlignAddrLValue(V, T);
1656   } else {
1657     LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1658   }
1659   setObjCGCLValueClass(CGF.getContext(), E, LV);
1660   return LV;
1661 }
1662 
1663 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1664                                      const Expr *E, const FunctionDecl *FD) {
1665   llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1666   if (!FD->hasPrototype()) {
1667     if (const FunctionProtoType *Proto =
1668             FD->getType()->getAs<FunctionProtoType>()) {
1669       // Ugly case: for a K&R-style definition, the type of the definition
1670       // isn't the same as the type of a use.  Correct for this with a
1671       // bitcast.
1672       QualType NoProtoType =
1673           CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1674       NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1675       V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
1676     }
1677   }
1678   CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
1679   return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1680 }
1681 
1682 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1683   const NamedDecl *ND = E->getDecl();
1684   CharUnits Alignment = getContext().getDeclAlign(ND);
1685   QualType T = E->getType();
1686 
1687   // FIXME: We should be able to assert this for FunctionDecls as well!
1688   // FIXME: We should be able to assert this for all DeclRefExprs, not just
1689   // those with a valid source location.
1690   assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
1691           !E->getLocation().isValid()) &&
1692          "Should not use decl without marking it used!");
1693 
1694   if (ND->hasAttr<WeakRefAttr>()) {
1695     const ValueDecl *VD = cast<ValueDecl>(ND);
1696     llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1697     return MakeAddrLValue(Aliasee, E->getType(), Alignment);
1698   }
1699 
1700   if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1701     // Check if this is a global variable.
1702     if (VD->hasExternalStorage() || VD->isFileVarDecl())
1703       return EmitGlobalVarDeclLValue(*this, E, VD);
1704 
1705     bool isBlockVariable = VD->hasAttr<BlocksAttr>();
1706 
1707     bool NonGCable = VD->hasLocalStorage() &&
1708                      !VD->getType()->isReferenceType() &&
1709                      !isBlockVariable;
1710 
1711     llvm::Value *V = LocalDeclMap[VD];
1712     if (!V && VD->isStaticLocal())
1713       V = CGM.getStaticLocalDeclAddress(VD);
1714 
1715     // Use special handling for lambdas.
1716     if (!V) {
1717       if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
1718         QualType LambdaTagType = getContext().getTagDeclType(FD->getParent());
1719         LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
1720                                                      LambdaTagType);
1721         return EmitLValueForField(LambdaLV, FD);
1722       }
1723 
1724       assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
1725       CharUnits alignment = getContext().getDeclAlign(VD);
1726       return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
1727                             E->getType(), alignment);
1728     }
1729 
1730     assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1731 
1732     if (isBlockVariable)
1733       V = BuildBlockByrefAddress(V, VD);
1734 
1735     LValue LV;
1736     if (VD->getType()->isReferenceType()) {
1737       llvm::LoadInst *LI = Builder.CreateLoad(V);
1738       LI->setAlignment(Alignment.getQuantity());
1739       V = LI;
1740       LV = MakeNaturalAlignAddrLValue(V, T);
1741     } else {
1742       LV = MakeAddrLValue(V, T, Alignment);
1743     }
1744 
1745     if (NonGCable) {
1746       LV.getQuals().removeObjCGCAttr();
1747       LV.setNonGC(true);
1748     }
1749     setObjCGCLValueClass(getContext(), E, LV);
1750     return LV;
1751   }
1752 
1753   if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
1754     return EmitFunctionDeclLValue(*this, E, fn);
1755 
1756   llvm_unreachable("Unhandled DeclRefExpr");
1757 }
1758 
1759 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1760   // __extension__ doesn't affect lvalue-ness.
1761   if (E->getOpcode() == UO_Extension)
1762     return EmitLValue(E->getSubExpr());
1763 
1764   QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1765   switch (E->getOpcode()) {
1766   default: llvm_unreachable("Unknown unary operator lvalue!");
1767   case UO_Deref: {
1768     QualType T = E->getSubExpr()->getType()->getPointeeType();
1769     assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1770 
1771     LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1772     LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1773 
1774     // We should not generate __weak write barrier on indirect reference
1775     // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1776     // But, we continue to generate __strong write barrier on indirect write
1777     // into a pointer to object.
1778     if (getContext().getLangOpts().ObjC1 &&
1779         getContext().getLangOpts().getGC() != LangOptions::NonGC &&
1780         LV.isObjCWeak())
1781       LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1782     return LV;
1783   }
1784   case UO_Real:
1785   case UO_Imag: {
1786     LValue LV = EmitLValue(E->getSubExpr());
1787     assert(LV.isSimple() && "real/imag on non-ordinary l-value");
1788     llvm::Value *Addr = LV.getAddress();
1789 
1790     // __real is valid on scalars.  This is a faster way of testing that.
1791     // __imag can only produce an rvalue on scalars.
1792     if (E->getOpcode() == UO_Real &&
1793         !cast<llvm::PointerType>(Addr->getType())
1794            ->getElementType()->isStructTy()) {
1795       assert(E->getSubExpr()->getType()->isArithmeticType());
1796       return LV;
1797     }
1798 
1799     assert(E->getSubExpr()->getType()->isAnyComplexType());
1800 
1801     unsigned Idx = E->getOpcode() == UO_Imag;
1802     return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1803                                                   Idx, "idx"),
1804                           ExprTy);
1805   }
1806   case UO_PreInc:
1807   case UO_PreDec: {
1808     LValue LV = EmitLValue(E->getSubExpr());
1809     bool isInc = E->getOpcode() == UO_PreInc;
1810 
1811     if (E->getType()->isAnyComplexType())
1812       EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1813     else
1814       EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1815     return LV;
1816   }
1817   }
1818 }
1819 
1820 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1821   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1822                         E->getType());
1823 }
1824 
1825 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1826   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1827                         E->getType());
1828 }
1829 
1830 static llvm::Constant*
1831 GetAddrOfConstantWideString(StringRef Str,
1832                             const char *GlobalName,
1833                             ASTContext &Context,
1834                             QualType Ty, SourceLocation Loc,
1835                             CodeGenModule &CGM) {
1836 
1837   StringLiteral *SL = StringLiteral::Create(Context,
1838                                             Str,
1839                                             StringLiteral::Wide,
1840                                             /*Pascal = */false,
1841                                             Ty, Loc);
1842   llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL);
1843   llvm::GlobalVariable *GV =
1844     new llvm::GlobalVariable(CGM.getModule(), C->getType(),
1845                              !CGM.getLangOpts().WritableStrings,
1846                              llvm::GlobalValue::PrivateLinkage,
1847                              C, GlobalName);
1848   const unsigned WideAlignment =
1849     Context.getTypeAlignInChars(Ty).getQuantity();
1850   GV->setAlignment(WideAlignment);
1851   return GV;
1852 }
1853 
1854 static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
1855                                     SmallString<32>& Target) {
1856   Target.resize(CharByteWidth * (Source.size() + 1));
1857   char* ResultPtr = &Target[0];
1858   bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr);
1859   (void)success;
1860   assert(success);
1861   Target.resize(ResultPtr - &Target[0]);
1862 }
1863 
1864 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1865   switch (E->getIdentType()) {
1866   default:
1867     return EmitUnsupportedLValue(E, "predefined expression");
1868 
1869   case PredefinedExpr::Func:
1870   case PredefinedExpr::Function:
1871   case PredefinedExpr::LFunction:
1872   case PredefinedExpr::PrettyFunction: {
1873     unsigned IdentType = E->getIdentType();
1874     std::string GlobalVarName;
1875 
1876     switch (IdentType) {
1877     default: llvm_unreachable("Invalid type");
1878     case PredefinedExpr::Func:
1879       GlobalVarName = "__func__.";
1880       break;
1881     case PredefinedExpr::Function:
1882       GlobalVarName = "__FUNCTION__.";
1883       break;
1884     case PredefinedExpr::LFunction:
1885       GlobalVarName = "L__FUNCTION__.";
1886       break;
1887     case PredefinedExpr::PrettyFunction:
1888       GlobalVarName = "__PRETTY_FUNCTION__.";
1889       break;
1890     }
1891 
1892     StringRef FnName = CurFn->getName();
1893     if (FnName.startswith("\01"))
1894       FnName = FnName.substr(1);
1895     GlobalVarName += FnName;
1896 
1897     const Decl *CurDecl = CurCodeDecl;
1898     if (CurDecl == 0)
1899       CurDecl = getContext().getTranslationUnitDecl();
1900 
1901     std::string FunctionName =
1902         (isa<BlockDecl>(CurDecl)
1903          ? FnName.str()
1904          : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType,
1905                                        CurDecl));
1906 
1907     const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual();
1908     llvm::Constant *C;
1909     if (ElemType->isWideCharType()) {
1910       SmallString<32> RawChars;
1911       ConvertUTF8ToWideString(
1912           getContext().getTypeSizeInChars(ElemType).getQuantity(),
1913           FunctionName, RawChars);
1914       C = GetAddrOfConstantWideString(RawChars,
1915                                       GlobalVarName.c_str(),
1916                                       getContext(),
1917                                       E->getType(),
1918                                       E->getLocation(),
1919                                       CGM);
1920     } else {
1921       C = CGM.GetAddrOfConstantCString(FunctionName,
1922                                        GlobalVarName.c_str(),
1923                                        1);
1924     }
1925     return MakeAddrLValue(C, E->getType());
1926   }
1927   }
1928 }
1929 
1930 llvm::BasicBlock *CodeGenFunction::getTrapBB() {
1931   const CodeGenOptions &GCO = CGM.getCodeGenOpts();
1932 
1933   // If we are not optimzing, don't collapse all calls to trap in the function
1934   // to the same call, that way, in the debugger they can see which operation
1935   // did in fact fail.  If we are optimizing, we collapse all calls to trap down
1936   // to just one per function to save on codesize.
1937   if (GCO.OptimizationLevel && TrapBB)
1938     return TrapBB;
1939 
1940   llvm::BasicBlock *Cont = 0;
1941   if (HaveInsertPoint()) {
1942     Cont = createBasicBlock("cont");
1943     EmitBranch(Cont);
1944   }
1945   TrapBB = createBasicBlock("trap");
1946   EmitBlock(TrapBB);
1947 
1948   llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
1949   llvm::CallInst *TrapCall = Builder.CreateCall(F);
1950   TrapCall->setDoesNotReturn();
1951   TrapCall->setDoesNotThrow();
1952   Builder.CreateUnreachable();
1953 
1954   if (Cont)
1955     EmitBlock(Cont);
1956   return TrapBB;
1957 }
1958 
1959 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
1960 /// array to pointer, return the array subexpression.
1961 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
1962   // If this isn't just an array->pointer decay, bail out.
1963   const CastExpr *CE = dyn_cast<CastExpr>(E);
1964   if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
1965     return 0;
1966 
1967   // If this is a decay from variable width array, bail out.
1968   const Expr *SubExpr = CE->getSubExpr();
1969   if (SubExpr->getType()->isVariableArrayType())
1970     return 0;
1971 
1972   return SubExpr;
1973 }
1974 
1975 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1976   // The index must always be an integer, which is not an aggregate.  Emit it.
1977   llvm::Value *Idx = EmitScalarExpr(E->getIdx());
1978   QualType IdxTy  = E->getIdx()->getType();
1979   bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
1980 
1981   // If the base is a vector type, then we are forming a vector element lvalue
1982   // with this subscript.
1983   if (E->getBase()->getType()->isVectorType()) {
1984     // Emit the vector as an lvalue to get its address.
1985     LValue LHS = EmitLValue(E->getBase());
1986     assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
1987     Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
1988     return LValue::MakeVectorElt(LHS.getAddress(), Idx,
1989                                  E->getBase()->getType(), LHS.getAlignment());
1990   }
1991 
1992   // Extend or truncate the index type to 32 or 64-bits.
1993   if (Idx->getType() != IntPtrTy)
1994     Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
1995 
1996   // We know that the pointer points to a type of the correct size, unless the
1997   // size is a VLA or Objective-C interface.
1998   llvm::Value *Address = 0;
1999   CharUnits ArrayAlignment;
2000   if (const VariableArrayType *vla =
2001         getContext().getAsVariableArrayType(E->getType())) {
2002     // The base must be a pointer, which is not an aggregate.  Emit
2003     // it.  It needs to be emitted first in case it's what captures
2004     // the VLA bounds.
2005     Address = EmitScalarExpr(E->getBase());
2006 
2007     // The element count here is the total number of non-VLA elements.
2008     llvm::Value *numElements = getVLASize(vla).first;
2009 
2010     // Effectively, the multiply by the VLA size is part of the GEP.
2011     // GEP indexes are signed, and scaling an index isn't permitted to
2012     // signed-overflow, so we use the same semantics for our explicit
2013     // multiply.  We suppress this if overflow is not undefined behavior.
2014     if (getLangOpts().isSignedOverflowDefined()) {
2015       Idx = Builder.CreateMul(Idx, numElements);
2016       Address = Builder.CreateGEP(Address, Idx, "arrayidx");
2017     } else {
2018       Idx = Builder.CreateNSWMul(Idx, numElements);
2019       Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
2020     }
2021   } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
2022     // Indexing over an interface, as in "NSString *P; P[4];"
2023     llvm::Value *InterfaceSize =
2024       llvm::ConstantInt::get(Idx->getType(),
2025           getContext().getTypeSizeInChars(OIT).getQuantity());
2026 
2027     Idx = Builder.CreateMul(Idx, InterfaceSize);
2028 
2029     // The base must be a pointer, which is not an aggregate.  Emit it.
2030     llvm::Value *Base = EmitScalarExpr(E->getBase());
2031     Address = EmitCastToVoidPtr(Base);
2032     Address = Builder.CreateGEP(Address, Idx, "arrayidx");
2033     Address = Builder.CreateBitCast(Address, Base->getType());
2034   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
2035     // If this is A[i] where A is an array, the frontend will have decayed the
2036     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
2037     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
2038     // "gep x, i" here.  Emit one "gep A, 0, i".
2039     assert(Array->getType()->isArrayType() &&
2040            "Array to pointer decay must have array source type!");
2041     LValue ArrayLV = EmitLValue(Array);
2042     llvm::Value *ArrayPtr = ArrayLV.getAddress();
2043     llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2044     llvm::Value *Args[] = { Zero, Idx };
2045 
2046     // Propagate the alignment from the array itself to the result.
2047     ArrayAlignment = ArrayLV.getAlignment();
2048 
2049     if (getContext().getLangOpts().isSignedOverflowDefined())
2050       Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
2051     else
2052       Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
2053   } else {
2054     // The base must be a pointer, which is not an aggregate.  Emit it.
2055     llvm::Value *Base = EmitScalarExpr(E->getBase());
2056     if (getContext().getLangOpts().isSignedOverflowDefined())
2057       Address = Builder.CreateGEP(Base, Idx, "arrayidx");
2058     else
2059       Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
2060   }
2061 
2062   QualType T = E->getBase()->getType()->getPointeeType();
2063   assert(!T.isNull() &&
2064          "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
2065 
2066 
2067   // Limit the alignment to that of the result type.
2068   LValue LV;
2069   if (!ArrayAlignment.isZero()) {
2070     CharUnits Align = getContext().getTypeAlignInChars(T);
2071     ArrayAlignment = std::min(Align, ArrayAlignment);
2072     LV = MakeAddrLValue(Address, T, ArrayAlignment);
2073   } else {
2074     LV = MakeNaturalAlignAddrLValue(Address, T);
2075   }
2076 
2077   LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
2078 
2079   if (getContext().getLangOpts().ObjC1 &&
2080       getContext().getLangOpts().getGC() != LangOptions::NonGC) {
2081     LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
2082     setObjCGCLValueClass(getContext(), E, LV);
2083   }
2084   return LV;
2085 }
2086 
2087 static
2088 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
2089                                        SmallVector<unsigned, 4> &Elts) {
2090   SmallVector<llvm::Constant*, 4> CElts;
2091   for (unsigned i = 0, e = Elts.size(); i != e; ++i)
2092     CElts.push_back(Builder.getInt32(Elts[i]));
2093 
2094   return llvm::ConstantVector::get(CElts);
2095 }
2096 
2097 LValue CodeGenFunction::
2098 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
2099   // Emit the base vector as an l-value.
2100   LValue Base;
2101 
2102   // ExtVectorElementExpr's base can either be a vector or pointer to vector.
2103   if (E->isArrow()) {
2104     // If it is a pointer to a vector, emit the address and form an lvalue with
2105     // it.
2106     llvm::Value *Ptr = EmitScalarExpr(E->getBase());
2107     const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
2108     Base = MakeAddrLValue(Ptr, PT->getPointeeType());
2109     Base.getQuals().removeObjCGCAttr();
2110   } else if (E->getBase()->isGLValue()) {
2111     // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
2112     // emit the base as an lvalue.
2113     assert(E->getBase()->getType()->isVectorType());
2114     Base = EmitLValue(E->getBase());
2115   } else {
2116     // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
2117     assert(E->getBase()->getType()->isVectorType() &&
2118            "Result must be a vector");
2119     llvm::Value *Vec = EmitScalarExpr(E->getBase());
2120 
2121     // Store the vector to memory (because LValue wants an address).
2122     llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
2123     Builder.CreateStore(Vec, VecMem);
2124     Base = MakeAddrLValue(VecMem, E->getBase()->getType());
2125   }
2126 
2127   QualType type =
2128     E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
2129 
2130   // Encode the element access list into a vector of unsigned indices.
2131   SmallVector<unsigned, 4> Indices;
2132   E->getEncodedElementAccess(Indices);
2133 
2134   if (Base.isSimple()) {
2135     llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
2136     return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
2137                                     Base.getAlignment());
2138   }
2139   assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
2140 
2141   llvm::Constant *BaseElts = Base.getExtVectorElts();
2142   SmallVector<llvm::Constant *, 4> CElts;
2143 
2144   for (unsigned i = 0, e = Indices.size(); i != e; ++i)
2145     CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
2146   llvm::Constant *CV = llvm::ConstantVector::get(CElts);
2147   return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
2148                                   Base.getAlignment());
2149 }
2150 
2151 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
2152   Expr *BaseExpr = E->getBase();
2153 
2154   // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
2155   LValue BaseLV;
2156   if (E->isArrow()) {
2157     llvm::Value *Ptr = EmitScalarExpr(BaseExpr);
2158     QualType PtrTy = BaseExpr->getType()->getPointeeType();
2159     EmitCheck(CT_MemberAccess, Ptr, PtrTy);
2160     BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy);
2161   } else
2162     BaseLV = EmitCheckedLValue(BaseExpr, CT_MemberAccess);
2163 
2164   NamedDecl *ND = E->getMemberDecl();
2165   if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
2166     LValue LV = EmitLValueForField(BaseLV, Field);
2167     setObjCGCLValueClass(getContext(), E, LV);
2168     return LV;
2169   }
2170 
2171   if (VarDecl *VD = dyn_cast<VarDecl>(ND))
2172     return EmitGlobalVarDeclLValue(*this, E, VD);
2173 
2174   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
2175     return EmitFunctionDeclLValue(*this, E, FD);
2176 
2177   llvm_unreachable("Unhandled member declaration!");
2178 }
2179 
2180 LValue CodeGenFunction::EmitLValueForField(LValue base,
2181                                            const FieldDecl *field) {
2182   if (field->isBitField()) {
2183     const CGRecordLayout &RL =
2184       CGM.getTypes().getCGRecordLayout(field->getParent());
2185     const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
2186     QualType fieldType =
2187       field->getType().withCVRQualifiers(base.getVRQualifiers());
2188     return LValue::MakeBitfield(base.getAddress(), Info, fieldType,
2189                                 base.getAlignment());
2190   }
2191 
2192   const RecordDecl *rec = field->getParent();
2193   QualType type = field->getType();
2194   CharUnits alignment = getContext().getDeclAlign(field);
2195 
2196   // FIXME: It should be impossible to have an LValue without alignment for a
2197   // complete type.
2198   if (!base.getAlignment().isZero())
2199     alignment = std::min(alignment, base.getAlignment());
2200 
2201   bool mayAlias = rec->hasAttr<MayAliasAttr>();
2202 
2203   llvm::Value *addr = base.getAddress();
2204   unsigned cvr = base.getVRQualifiers();
2205   if (rec->isUnion()) {
2206     // For unions, there is no pointer adjustment.
2207     assert(!type->isReferenceType() && "union has reference member");
2208   } else {
2209     // For structs, we GEP to the field that the record layout suggests.
2210     unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
2211     addr = Builder.CreateStructGEP(addr, idx, field->getName());
2212 
2213     // If this is a reference field, load the reference right now.
2214     if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
2215       llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
2216       if (cvr & Qualifiers::Volatile) load->setVolatile(true);
2217       load->setAlignment(alignment.getQuantity());
2218 
2219       if (CGM.shouldUseTBAA()) {
2220         llvm::MDNode *tbaa;
2221         if (mayAlias)
2222           tbaa = CGM.getTBAAInfo(getContext().CharTy);
2223         else
2224           tbaa = CGM.getTBAAInfo(type);
2225         CGM.DecorateInstruction(load, tbaa);
2226       }
2227 
2228       addr = load;
2229       mayAlias = false;
2230       type = refType->getPointeeType();
2231       if (type->isIncompleteType())
2232         alignment = CharUnits();
2233       else
2234         alignment = getContext().getTypeAlignInChars(type);
2235       cvr = 0; // qualifiers don't recursively apply to referencee
2236     }
2237   }
2238 
2239   // Make sure that the address is pointing to the right type.  This is critical
2240   // for both unions and structs.  A union needs a bitcast, a struct element
2241   // will need a bitcast if the LLVM type laid out doesn't match the desired
2242   // type.
2243   addr = EmitBitCastOfLValueToProperType(*this, addr,
2244                                          CGM.getTypes().ConvertTypeForMem(type),
2245                                          field->getName());
2246 
2247   if (field->hasAttr<AnnotateAttr>())
2248     addr = EmitFieldAnnotations(field, addr);
2249 
2250   LValue LV = MakeAddrLValue(addr, type, alignment);
2251   LV.getQuals().addCVRQualifiers(cvr);
2252 
2253   // __weak attribute on a field is ignored.
2254   if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
2255     LV.getQuals().removeObjCGCAttr();
2256 
2257   // Fields of may_alias structs act like 'char' for TBAA purposes.
2258   // FIXME: this should get propagated down through anonymous structs
2259   // and unions.
2260   if (mayAlias && LV.getTBAAInfo())
2261     LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
2262 
2263   return LV;
2264 }
2265 
2266 LValue
2267 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
2268                                                   const FieldDecl *Field) {
2269   QualType FieldType = Field->getType();
2270 
2271   if (!FieldType->isReferenceType())
2272     return EmitLValueForField(Base, Field);
2273 
2274   const CGRecordLayout &RL =
2275     CGM.getTypes().getCGRecordLayout(Field->getParent());
2276   unsigned idx = RL.getLLVMFieldNo(Field);
2277   llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx);
2278   assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
2279 
2280   // Make sure that the address is pointing to the right type.  This is critical
2281   // for both unions and structs.  A union needs a bitcast, a struct element
2282   // will need a bitcast if the LLVM type laid out doesn't match the desired
2283   // type.
2284   llvm::Type *llvmType = ConvertTypeForMem(FieldType);
2285   V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
2286 
2287   CharUnits Alignment = getContext().getDeclAlign(Field);
2288 
2289   // FIXME: It should be impossible to have an LValue without alignment for a
2290   // complete type.
2291   if (!Base.getAlignment().isZero())
2292     Alignment = std::min(Alignment, Base.getAlignment());
2293 
2294   return MakeAddrLValue(V, FieldType, Alignment);
2295 }
2296 
2297 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
2298   if (E->isFileScope()) {
2299     llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
2300     return MakeAddrLValue(GlobalPtr, E->getType());
2301   }
2302   if (E->getType()->isVariablyModifiedType())
2303     // make sure to emit the VLA size.
2304     EmitVariablyModifiedType(E->getType());
2305 
2306   llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
2307   const Expr *InitExpr = E->getInitializer();
2308   LValue Result = MakeAddrLValue(DeclPtr, E->getType());
2309 
2310   EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
2311                    /*Init*/ true);
2312 
2313   return Result;
2314 }
2315 
2316 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
2317   if (!E->isGLValue())
2318     // Initializing an aggregate temporary in C++11: T{...}.
2319     return EmitAggExprToLValue(E);
2320 
2321   // An lvalue initializer list must be initializing a reference.
2322   assert(E->getNumInits() == 1 && "reference init with multiple values");
2323   return EmitLValue(E->getInit(0));
2324 }
2325 
2326 LValue CodeGenFunction::
2327 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
2328   if (!expr->isGLValue()) {
2329     // ?: here should be an aggregate.
2330     assert((hasAggregateLLVMType(expr->getType()) &&
2331             !expr->getType()->isAnyComplexType()) &&
2332            "Unexpected conditional operator!");
2333     return EmitAggExprToLValue(expr);
2334   }
2335 
2336   OpaqueValueMapping binding(*this, expr);
2337 
2338   const Expr *condExpr = expr->getCond();
2339   bool CondExprBool;
2340   if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
2341     const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
2342     if (!CondExprBool) std::swap(live, dead);
2343 
2344     if (!ContainsLabel(dead))
2345       return EmitLValue(live);
2346   }
2347 
2348   llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
2349   llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
2350   llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
2351 
2352   ConditionalEvaluation eval(*this);
2353   EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
2354 
2355   // Any temporaries created here are conditional.
2356   EmitBlock(lhsBlock);
2357   eval.begin(*this);
2358   LValue lhs = EmitLValue(expr->getTrueExpr());
2359   eval.end(*this);
2360 
2361   if (!lhs.isSimple())
2362     return EmitUnsupportedLValue(expr, "conditional operator");
2363 
2364   lhsBlock = Builder.GetInsertBlock();
2365   Builder.CreateBr(contBlock);
2366 
2367   // Any temporaries created here are conditional.
2368   EmitBlock(rhsBlock);
2369   eval.begin(*this);
2370   LValue rhs = EmitLValue(expr->getFalseExpr());
2371   eval.end(*this);
2372   if (!rhs.isSimple())
2373     return EmitUnsupportedLValue(expr, "conditional operator");
2374   rhsBlock = Builder.GetInsertBlock();
2375 
2376   EmitBlock(contBlock);
2377 
2378   llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
2379                                          "cond-lvalue");
2380   phi->addIncoming(lhs.getAddress(), lhsBlock);
2381   phi->addIncoming(rhs.getAddress(), rhsBlock);
2382   return MakeAddrLValue(phi, expr->getType());
2383 }
2384 
2385 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
2386 /// type. If the cast is to a reference, we can have the usual lvalue result,
2387 /// otherwise if a cast is needed by the code generator in an lvalue context,
2388 /// then it must mean that we need the address of an aggregate in order to
2389 /// access one of its members.  This can happen for all the reasons that casts
2390 /// are permitted with aggregate result, including noop aggregate casts, and
2391 /// cast from scalar to union.
2392 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
2393   switch (E->getCastKind()) {
2394   case CK_ToVoid:
2395     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
2396 
2397   case CK_Dependent:
2398     llvm_unreachable("dependent cast kind in IR gen!");
2399 
2400   case CK_BuiltinFnToFnPtr:
2401     llvm_unreachable("builtin functions are handled elsewhere");
2402 
2403   // These two casts are currently treated as no-ops, although they could
2404   // potentially be real operations depending on the target's ABI.
2405   case CK_NonAtomicToAtomic:
2406   case CK_AtomicToNonAtomic:
2407 
2408   case CK_NoOp:
2409   case CK_LValueToRValue:
2410     if (!E->getSubExpr()->Classify(getContext()).isPRValue()
2411         || E->getType()->isRecordType())
2412       return EmitLValue(E->getSubExpr());
2413     // Fall through to synthesize a temporary.
2414 
2415   case CK_BitCast:
2416   case CK_ArrayToPointerDecay:
2417   case CK_FunctionToPointerDecay:
2418   case CK_NullToMemberPointer:
2419   case CK_NullToPointer:
2420   case CK_IntegralToPointer:
2421   case CK_PointerToIntegral:
2422   case CK_PointerToBoolean:
2423   case CK_VectorSplat:
2424   case CK_IntegralCast:
2425   case CK_IntegralToBoolean:
2426   case CK_IntegralToFloating:
2427   case CK_FloatingToIntegral:
2428   case CK_FloatingToBoolean:
2429   case CK_FloatingCast:
2430   case CK_FloatingRealToComplex:
2431   case CK_FloatingComplexToReal:
2432   case CK_FloatingComplexToBoolean:
2433   case CK_FloatingComplexCast:
2434   case CK_FloatingComplexToIntegralComplex:
2435   case CK_IntegralRealToComplex:
2436   case CK_IntegralComplexToReal:
2437   case CK_IntegralComplexToBoolean:
2438   case CK_IntegralComplexCast:
2439   case CK_IntegralComplexToFloatingComplex:
2440   case CK_DerivedToBaseMemberPointer:
2441   case CK_BaseToDerivedMemberPointer:
2442   case CK_MemberPointerToBoolean:
2443   case CK_ReinterpretMemberPointer:
2444   case CK_AnyPointerToBlockPointerCast:
2445   case CK_ARCProduceObject:
2446   case CK_ARCConsumeObject:
2447   case CK_ARCReclaimReturnedObject:
2448   case CK_ARCExtendBlockObject:
2449   case CK_CopyAndAutoreleaseBlockObject: {
2450     // These casts only produce lvalues when we're binding a reference to a
2451     // temporary realized from a (converted) pure rvalue. Emit the expression
2452     // as a value, copy it into a temporary, and return an lvalue referring to
2453     // that temporary.
2454     llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
2455     EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
2456     return MakeAddrLValue(V, E->getType());
2457   }
2458 
2459   case CK_Dynamic: {
2460     LValue LV = EmitLValue(E->getSubExpr());
2461     llvm::Value *V = LV.getAddress();
2462     const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
2463     return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
2464   }
2465 
2466   case CK_ConstructorConversion:
2467   case CK_UserDefinedConversion:
2468   case CK_CPointerToObjCPointerCast:
2469   case CK_BlockPointerToObjCPointerCast:
2470     return EmitLValue(E->getSubExpr());
2471 
2472   case CK_UncheckedDerivedToBase:
2473   case CK_DerivedToBase: {
2474     const RecordType *DerivedClassTy =
2475       E->getSubExpr()->getType()->getAs<RecordType>();
2476     CXXRecordDecl *DerivedClassDecl =
2477       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2478 
2479     LValue LV = EmitLValue(E->getSubExpr());
2480     llvm::Value *This = LV.getAddress();
2481 
2482     // Perform the derived-to-base conversion
2483     llvm::Value *Base =
2484       GetAddressOfBaseClass(This, DerivedClassDecl,
2485                             E->path_begin(), E->path_end(),
2486                             /*NullCheckValue=*/false);
2487 
2488     return MakeAddrLValue(Base, E->getType());
2489   }
2490   case CK_ToUnion:
2491     return EmitAggExprToLValue(E);
2492   case CK_BaseToDerived: {
2493     const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
2494     CXXRecordDecl *DerivedClassDecl =
2495       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2496 
2497     LValue LV = EmitLValue(E->getSubExpr());
2498 
2499     // Perform the base-to-derived conversion
2500     llvm::Value *Derived =
2501       GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
2502                                E->path_begin(), E->path_end(),
2503                                /*NullCheckValue=*/false);
2504 
2505     return MakeAddrLValue(Derived, E->getType());
2506   }
2507   case CK_LValueBitCast: {
2508     // This must be a reinterpret_cast (or c-style equivalent).
2509     const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
2510 
2511     LValue LV = EmitLValue(E->getSubExpr());
2512     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2513                                            ConvertType(CE->getTypeAsWritten()));
2514     return MakeAddrLValue(V, E->getType());
2515   }
2516   case CK_ObjCObjectLValueCast: {
2517     LValue LV = EmitLValue(E->getSubExpr());
2518     QualType ToType = getContext().getLValueReferenceType(E->getType());
2519     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2520                                            ConvertType(ToType));
2521     return MakeAddrLValue(V, E->getType());
2522   }
2523   }
2524 
2525   llvm_unreachable("Unhandled lvalue cast kind?");
2526 }
2527 
2528 LValue CodeGenFunction::EmitNullInitializationLValue(
2529                                               const CXXScalarValueInitExpr *E) {
2530   QualType Ty = E->getType();
2531   LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
2532   EmitNullInitialization(LV.getAddress(), Ty);
2533   return LV;
2534 }
2535 
2536 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
2537   assert(OpaqueValueMappingData::shouldBindAsLValue(e));
2538   return getOpaqueLValueMapping(e);
2539 }
2540 
2541 LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
2542                                            const MaterializeTemporaryExpr *E) {
2543   RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
2544   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2545 }
2546 
2547 RValue CodeGenFunction::EmitRValueForField(LValue LV,
2548                                            const FieldDecl *FD) {
2549   QualType FT = FD->getType();
2550   LValue FieldLV = EmitLValueForField(LV, FD);
2551   if (FT->isAnyComplexType())
2552     return RValue::getComplex(
2553         LoadComplexFromAddr(FieldLV.getAddress(),
2554                             FieldLV.isVolatileQualified()));
2555   else if (CodeGenFunction::hasAggregateLLVMType(FT))
2556     return FieldLV.asAggregateRValue();
2557 
2558   return EmitLoadOfLValue(FieldLV);
2559 }
2560 
2561 //===--------------------------------------------------------------------===//
2562 //                             Expression Emission
2563 //===--------------------------------------------------------------------===//
2564 
2565 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
2566                                      ReturnValueSlot ReturnValue) {
2567   if (CGDebugInfo *DI = getDebugInfo())
2568     DI->EmitLocation(Builder, E->getLocStart());
2569 
2570   // Builtins never have block type.
2571   if (E->getCallee()->getType()->isBlockPointerType())
2572     return EmitBlockCallExpr(E, ReturnValue);
2573 
2574   if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
2575     return EmitCXXMemberCallExpr(CE, ReturnValue);
2576 
2577   if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
2578     return EmitCUDAKernelCallExpr(CE, ReturnValue);
2579 
2580   const Decl *TargetDecl = E->getCalleeDecl();
2581   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2582     if (unsigned builtinID = FD->getBuiltinID())
2583       return EmitBuiltinExpr(FD, builtinID, E);
2584   }
2585 
2586   if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
2587     if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
2588       return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
2589 
2590   if (const CXXPseudoDestructorExpr *PseudoDtor
2591           = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
2592     QualType DestroyedType = PseudoDtor->getDestroyedType();
2593     if (getContext().getLangOpts().ObjCAutoRefCount &&
2594         DestroyedType->isObjCLifetimeType() &&
2595         (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
2596          DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
2597       // Automatic Reference Counting:
2598       //   If the pseudo-expression names a retainable object with weak or
2599       //   strong lifetime, the object shall be released.
2600       Expr *BaseExpr = PseudoDtor->getBase();
2601       llvm::Value *BaseValue = NULL;
2602       Qualifiers BaseQuals;
2603 
2604       // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2605       if (PseudoDtor->isArrow()) {
2606         BaseValue = EmitScalarExpr(BaseExpr);
2607         const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
2608         BaseQuals = PTy->getPointeeType().getQualifiers();
2609       } else {
2610         LValue BaseLV = EmitLValue(BaseExpr);
2611         BaseValue = BaseLV.getAddress();
2612         QualType BaseTy = BaseExpr->getType();
2613         BaseQuals = BaseTy.getQualifiers();
2614       }
2615 
2616       switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
2617       case Qualifiers::OCL_None:
2618       case Qualifiers::OCL_ExplicitNone:
2619       case Qualifiers::OCL_Autoreleasing:
2620         break;
2621 
2622       case Qualifiers::OCL_Strong:
2623         EmitARCRelease(Builder.CreateLoad(BaseValue,
2624                           PseudoDtor->getDestroyedType().isVolatileQualified()),
2625                        /*precise*/ true);
2626         break;
2627 
2628       case Qualifiers::OCL_Weak:
2629         EmitARCDestroyWeak(BaseValue);
2630         break;
2631       }
2632     } else {
2633       // C++ [expr.pseudo]p1:
2634       //   The result shall only be used as the operand for the function call
2635       //   operator (), and the result of such a call has type void. The only
2636       //   effect is the evaluation of the postfix-expression before the dot or
2637       //   arrow.
2638       EmitScalarExpr(E->getCallee());
2639     }
2640 
2641     return RValue::get(0);
2642   }
2643 
2644   llvm::Value *Callee = EmitScalarExpr(E->getCallee());
2645   return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
2646                   E->arg_begin(), E->arg_end(), TargetDecl);
2647 }
2648 
2649 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
2650   // Comma expressions just emit their LHS then their RHS as an l-value.
2651   if (E->getOpcode() == BO_Comma) {
2652     EmitIgnoredExpr(E->getLHS());
2653     EnsureInsertPoint();
2654     return EmitLValue(E->getRHS());
2655   }
2656 
2657   if (E->getOpcode() == BO_PtrMemD ||
2658       E->getOpcode() == BO_PtrMemI)
2659     return EmitPointerToDataMemberBinaryExpr(E);
2660 
2661   assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
2662 
2663   // Note that in all of these cases, __block variables need the RHS
2664   // evaluated first just in case the variable gets moved by the RHS.
2665 
2666   if (!hasAggregateLLVMType(E->getType())) {
2667     switch (E->getLHS()->getType().getObjCLifetime()) {
2668     case Qualifiers::OCL_Strong:
2669       return EmitARCStoreStrong(E, /*ignored*/ false).first;
2670 
2671     case Qualifiers::OCL_Autoreleasing:
2672       return EmitARCStoreAutoreleasing(E).first;
2673 
2674     // No reason to do any of these differently.
2675     case Qualifiers::OCL_None:
2676     case Qualifiers::OCL_ExplicitNone:
2677     case Qualifiers::OCL_Weak:
2678       break;
2679     }
2680 
2681     RValue RV = EmitAnyExpr(E->getRHS());
2682     LValue LV = EmitLValue(E->getLHS());
2683     EmitStoreThroughLValue(RV, LV);
2684     return LV;
2685   }
2686 
2687   if (E->getType()->isAnyComplexType())
2688     return EmitComplexAssignmentLValue(E);
2689 
2690   return EmitAggExprToLValue(E);
2691 }
2692 
2693 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
2694   RValue RV = EmitCallExpr(E);
2695 
2696   if (!RV.isScalar())
2697     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2698 
2699   assert(E->getCallReturnType()->isReferenceType() &&
2700          "Can't have a scalar return unless the return type is a "
2701          "reference type!");
2702 
2703   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2704 }
2705 
2706 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
2707   // FIXME: This shouldn't require another copy.
2708   return EmitAggExprToLValue(E);
2709 }
2710 
2711 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
2712   assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
2713          && "binding l-value to type which needs a temporary");
2714   AggValueSlot Slot = CreateAggTemp(E->getType());
2715   EmitCXXConstructExpr(E, Slot);
2716   return MakeAddrLValue(Slot.getAddr(), E->getType());
2717 }
2718 
2719 LValue
2720 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
2721   return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
2722 }
2723 
2724 LValue
2725 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
2726   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2727   Slot.setExternallyDestructed();
2728   EmitAggExpr(E->getSubExpr(), Slot);
2729   EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
2730   return MakeAddrLValue(Slot.getAddr(), E->getType());
2731 }
2732 
2733 LValue
2734 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
2735   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2736   EmitLambdaExpr(E, Slot);
2737   return MakeAddrLValue(Slot.getAddr(), E->getType());
2738 }
2739 
2740 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
2741   RValue RV = EmitObjCMessageExpr(E);
2742 
2743   if (!RV.isScalar())
2744     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2745 
2746   assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
2747          "Can't have a scalar return unless the return type is a "
2748          "reference type!");
2749 
2750   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2751 }
2752 
2753 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
2754   llvm::Value *V =
2755     CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
2756   return MakeAddrLValue(V, E->getType());
2757 }
2758 
2759 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2760                                              const ObjCIvarDecl *Ivar) {
2761   return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
2762 }
2763 
2764 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
2765                                           llvm::Value *BaseValue,
2766                                           const ObjCIvarDecl *Ivar,
2767                                           unsigned CVRQualifiers) {
2768   return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
2769                                                    Ivar, CVRQualifiers);
2770 }
2771 
2772 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
2773   // FIXME: A lot of the code below could be shared with EmitMemberExpr.
2774   llvm::Value *BaseValue = 0;
2775   const Expr *BaseExpr = E->getBase();
2776   Qualifiers BaseQuals;
2777   QualType ObjectTy;
2778   if (E->isArrow()) {
2779     BaseValue = EmitScalarExpr(BaseExpr);
2780     ObjectTy = BaseExpr->getType()->getPointeeType();
2781     BaseQuals = ObjectTy.getQualifiers();
2782   } else {
2783     LValue BaseLV = EmitLValue(BaseExpr);
2784     // FIXME: this isn't right for bitfields.
2785     BaseValue = BaseLV.getAddress();
2786     ObjectTy = BaseExpr->getType();
2787     BaseQuals = ObjectTy.getQualifiers();
2788   }
2789 
2790   LValue LV =
2791     EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
2792                       BaseQuals.getCVRQualifiers());
2793   setObjCGCLValueClass(getContext(), E, LV);
2794   return LV;
2795 }
2796 
2797 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
2798   // Can only get l-value for message expression returning aggregate type
2799   RValue RV = EmitAnyExprToTemp(E);
2800   return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2801 }
2802 
2803 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
2804                                  ReturnValueSlot ReturnValue,
2805                                  CallExpr::const_arg_iterator ArgBeg,
2806                                  CallExpr::const_arg_iterator ArgEnd,
2807                                  const Decl *TargetDecl) {
2808   // Get the actual function type. The callee type will always be a pointer to
2809   // function type or a block pointer type.
2810   assert(CalleeType->isFunctionPointerType() &&
2811          "Call must have function pointer type!");
2812 
2813   CalleeType = getContext().getCanonicalType(CalleeType);
2814 
2815   const FunctionType *FnType
2816     = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
2817 
2818   CallArgList Args;
2819   EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
2820 
2821   const CGFunctionInfo &FnInfo =
2822     CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
2823 
2824   // C99 6.5.2.2p6:
2825   //   If the expression that denotes the called function has a type
2826   //   that does not include a prototype, [the default argument
2827   //   promotions are performed]. If the number of arguments does not
2828   //   equal the number of parameters, the behavior is undefined. If
2829   //   the function is defined with a type that includes a prototype,
2830   //   and either the prototype ends with an ellipsis (, ...) or the
2831   //   types of the arguments after promotion are not compatible with
2832   //   the types of the parameters, the behavior is undefined. If the
2833   //   function is defined with a type that does not include a
2834   //   prototype, and the types of the arguments after promotion are
2835   //   not compatible with those of the parameters after promotion,
2836   //   the behavior is undefined [except in some trivial cases].
2837   // That is, in the general case, we should assume that a call
2838   // through an unprototyped function type works like a *non-variadic*
2839   // call.  The way we make this work is to cast to the exact type
2840   // of the promoted arguments.
2841   if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
2842     llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
2843     CalleeTy = CalleeTy->getPointerTo();
2844     Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
2845   }
2846 
2847   return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
2848 }
2849 
2850 LValue CodeGenFunction::
2851 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
2852   llvm::Value *BaseV;
2853   if (E->getOpcode() == BO_PtrMemI)
2854     BaseV = EmitScalarExpr(E->getLHS());
2855   else
2856     BaseV = EmitLValue(E->getLHS()).getAddress();
2857 
2858   llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
2859 
2860   const MemberPointerType *MPT
2861     = E->getRHS()->getType()->getAs<MemberPointerType>();
2862 
2863   llvm::Value *AddV =
2864     CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
2865 
2866   return MakeAddrLValue(AddV, MPT->getPointeeType());
2867 }
2868 
2869 static void
2870 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
2871              llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
2872              uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
2873   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
2874   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
2875 
2876   switch (E->getOp()) {
2877   case AtomicExpr::AO__c11_atomic_init:
2878     llvm_unreachable("Already handled!");
2879 
2880   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
2881   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
2882   case AtomicExpr::AO__atomic_compare_exchange:
2883   case AtomicExpr::AO__atomic_compare_exchange_n: {
2884     // Note that cmpxchg only supports specifying one ordering and
2885     // doesn't support weak cmpxchg, at least at the moment.
2886     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2887     LoadVal1->setAlignment(Align);
2888     llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
2889     LoadVal2->setAlignment(Align);
2890     llvm::AtomicCmpXchgInst *CXI =
2891         CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
2892     CXI->setVolatile(E->isVolatile());
2893     llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
2894     StoreVal1->setAlignment(Align);
2895     llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
2896     CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
2897     return;
2898   }
2899 
2900   case AtomicExpr::AO__c11_atomic_load:
2901   case AtomicExpr::AO__atomic_load_n:
2902   case AtomicExpr::AO__atomic_load: {
2903     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
2904     Load->setAtomic(Order);
2905     Load->setAlignment(Size);
2906     Load->setVolatile(E->isVolatile());
2907     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
2908     StoreDest->setAlignment(Align);
2909     return;
2910   }
2911 
2912   case AtomicExpr::AO__c11_atomic_store:
2913   case AtomicExpr::AO__atomic_store:
2914   case AtomicExpr::AO__atomic_store_n: {
2915     assert(!Dest && "Store does not return a value");
2916     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2917     LoadVal1->setAlignment(Align);
2918     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
2919     Store->setAtomic(Order);
2920     Store->setAlignment(Size);
2921     Store->setVolatile(E->isVolatile());
2922     return;
2923   }
2924 
2925   case AtomicExpr::AO__c11_atomic_exchange:
2926   case AtomicExpr::AO__atomic_exchange_n:
2927   case AtomicExpr::AO__atomic_exchange:
2928     Op = llvm::AtomicRMWInst::Xchg;
2929     break;
2930 
2931   case AtomicExpr::AO__atomic_add_fetch:
2932     PostOp = llvm::Instruction::Add;
2933     // Fall through.
2934   case AtomicExpr::AO__c11_atomic_fetch_add:
2935   case AtomicExpr::AO__atomic_fetch_add:
2936     Op = llvm::AtomicRMWInst::Add;
2937     break;
2938 
2939   case AtomicExpr::AO__atomic_sub_fetch:
2940     PostOp = llvm::Instruction::Sub;
2941     // Fall through.
2942   case AtomicExpr::AO__c11_atomic_fetch_sub:
2943   case AtomicExpr::AO__atomic_fetch_sub:
2944     Op = llvm::AtomicRMWInst::Sub;
2945     break;
2946 
2947   case AtomicExpr::AO__atomic_and_fetch:
2948     PostOp = llvm::Instruction::And;
2949     // Fall through.
2950   case AtomicExpr::AO__c11_atomic_fetch_and:
2951   case AtomicExpr::AO__atomic_fetch_and:
2952     Op = llvm::AtomicRMWInst::And;
2953     break;
2954 
2955   case AtomicExpr::AO__atomic_or_fetch:
2956     PostOp = llvm::Instruction::Or;
2957     // Fall through.
2958   case AtomicExpr::AO__c11_atomic_fetch_or:
2959   case AtomicExpr::AO__atomic_fetch_or:
2960     Op = llvm::AtomicRMWInst::Or;
2961     break;
2962 
2963   case AtomicExpr::AO__atomic_xor_fetch:
2964     PostOp = llvm::Instruction::Xor;
2965     // Fall through.
2966   case AtomicExpr::AO__c11_atomic_fetch_xor:
2967   case AtomicExpr::AO__atomic_fetch_xor:
2968     Op = llvm::AtomicRMWInst::Xor;
2969     break;
2970 
2971   case AtomicExpr::AO__atomic_nand_fetch:
2972     PostOp = llvm::Instruction::And;
2973     // Fall through.
2974   case AtomicExpr::AO__atomic_fetch_nand:
2975     Op = llvm::AtomicRMWInst::Nand;
2976     break;
2977   }
2978 
2979   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2980   LoadVal1->setAlignment(Align);
2981   llvm::AtomicRMWInst *RMWI =
2982       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
2983   RMWI->setVolatile(E->isVolatile());
2984 
2985   // For __atomic_*_fetch operations, perform the operation again to
2986   // determine the value which was written.
2987   llvm::Value *Result = RMWI;
2988   if (PostOp)
2989     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
2990   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
2991     Result = CGF.Builder.CreateNot(Result);
2992   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
2993   StoreDest->setAlignment(Align);
2994 }
2995 
2996 // This function emits any expression (scalar, complex, or aggregate)
2997 // into a temporary alloca.
2998 static llvm::Value *
2999 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
3000   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
3001   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
3002                        /*Init*/ true);
3003   return DeclPtr;
3004 }
3005 
3006 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
3007                                   llvm::Value *Dest) {
3008   if (Ty->isAnyComplexType())
3009     return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
3010   if (CGF.hasAggregateLLVMType(Ty))
3011     return RValue::getAggregate(Dest);
3012   return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
3013 }
3014 
3015 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
3016   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
3017   QualType MemTy = AtomicTy;
3018   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
3019     MemTy = AT->getValueType();
3020   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
3021   uint64_t Size = sizeChars.getQuantity();
3022   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
3023   unsigned Align = alignChars.getQuantity();
3024   unsigned MaxInlineWidth =
3025       getContext().getTargetInfo().getMaxAtomicInlineWidth();
3026   bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
3027 
3028 
3029 
3030   llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
3031   Ptr = EmitScalarExpr(E->getPtr());
3032 
3033   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
3034     assert(!Dest && "Init does not return a value");
3035     if (!hasAggregateLLVMType(E->getVal1()->getType())) {
3036       QualType PointeeType
3037         = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
3038       EmitScalarInit(EmitScalarExpr(E->getVal1()),
3039                      LValue::MakeAddr(Ptr, PointeeType, alignChars,
3040                                       getContext()));
3041     } else if (E->getType()->isAnyComplexType()) {
3042       EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
3043     } else {
3044       AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
3045                                         AtomicTy.getQualifiers(),
3046                                         AggValueSlot::IsNotDestructed,
3047                                         AggValueSlot::DoesNotNeedGCBarriers,
3048                                         AggValueSlot::IsNotAliased);
3049       EmitAggExpr(E->getVal1(), Slot);
3050     }
3051     return RValue::get(0);
3052   }
3053 
3054   Order = EmitScalarExpr(E->getOrder());
3055 
3056   switch (E->getOp()) {
3057   case AtomicExpr::AO__c11_atomic_init:
3058     llvm_unreachable("Already handled!");
3059 
3060   case AtomicExpr::AO__c11_atomic_load:
3061   case AtomicExpr::AO__atomic_load_n:
3062     break;
3063 
3064   case AtomicExpr::AO__atomic_load:
3065     Dest = EmitScalarExpr(E->getVal1());
3066     break;
3067 
3068   case AtomicExpr::AO__atomic_store:
3069     Val1 = EmitScalarExpr(E->getVal1());
3070     break;
3071 
3072   case AtomicExpr::AO__atomic_exchange:
3073     Val1 = EmitScalarExpr(E->getVal1());
3074     Dest = EmitScalarExpr(E->getVal2());
3075     break;
3076 
3077   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3078   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3079   case AtomicExpr::AO__atomic_compare_exchange_n:
3080   case AtomicExpr::AO__atomic_compare_exchange:
3081     Val1 = EmitScalarExpr(E->getVal1());
3082     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
3083       Val2 = EmitScalarExpr(E->getVal2());
3084     else
3085       Val2 = EmitValToTemp(*this, E->getVal2());
3086     OrderFail = EmitScalarExpr(E->getOrderFail());
3087     // Evaluate and discard the 'weak' argument.
3088     if (E->getNumSubExprs() == 6)
3089       EmitScalarExpr(E->getWeak());
3090     break;
3091 
3092   case AtomicExpr::AO__c11_atomic_fetch_add:
3093   case AtomicExpr::AO__c11_atomic_fetch_sub:
3094     if (MemTy->isPointerType()) {
3095       // For pointer arithmetic, we're required to do a bit of math:
3096       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
3097       // ... but only for the C11 builtins. The GNU builtins expect the
3098       // user to multiply by sizeof(T).
3099       QualType Val1Ty = E->getVal1()->getType();
3100       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
3101       CharUnits PointeeIncAmt =
3102           getContext().getTypeSizeInChars(MemTy->getPointeeType());
3103       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
3104       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
3105       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
3106       break;
3107     }
3108     // Fall through.
3109   case AtomicExpr::AO__atomic_fetch_add:
3110   case AtomicExpr::AO__atomic_fetch_sub:
3111   case AtomicExpr::AO__atomic_add_fetch:
3112   case AtomicExpr::AO__atomic_sub_fetch:
3113   case AtomicExpr::AO__c11_atomic_store:
3114   case AtomicExpr::AO__c11_atomic_exchange:
3115   case AtomicExpr::AO__atomic_store_n:
3116   case AtomicExpr::AO__atomic_exchange_n:
3117   case AtomicExpr::AO__c11_atomic_fetch_and:
3118   case AtomicExpr::AO__c11_atomic_fetch_or:
3119   case AtomicExpr::AO__c11_atomic_fetch_xor:
3120   case AtomicExpr::AO__atomic_fetch_and:
3121   case AtomicExpr::AO__atomic_fetch_or:
3122   case AtomicExpr::AO__atomic_fetch_xor:
3123   case AtomicExpr::AO__atomic_fetch_nand:
3124   case AtomicExpr::AO__atomic_and_fetch:
3125   case AtomicExpr::AO__atomic_or_fetch:
3126   case AtomicExpr::AO__atomic_xor_fetch:
3127   case AtomicExpr::AO__atomic_nand_fetch:
3128     Val1 = EmitValToTemp(*this, E->getVal1());
3129     break;
3130   }
3131 
3132   if (!E->getType()->isVoidType() && !Dest)
3133     Dest = CreateMemTemp(E->getType(), ".atomicdst");
3134 
3135   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
3136   if (UseLibcall) {
3137 
3138     llvm::SmallVector<QualType, 5> Params;
3139     CallArgList Args;
3140     // Size is always the first parameter
3141     Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
3142              getContext().getSizeType());
3143     // Atomic address is always the second parameter
3144     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
3145              getContext().VoidPtrTy);
3146 
3147     const char* LibCallName;
3148     QualType RetTy = getContext().VoidTy;
3149     switch (E->getOp()) {
3150     // There is only one libcall for compare an exchange, because there is no
3151     // optimisation benefit possible from a libcall version of a weak compare
3152     // and exchange.
3153     // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
3154     //                                void *desired, int success, int failure)
3155     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3156     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3157     case AtomicExpr::AO__atomic_compare_exchange:
3158     case AtomicExpr::AO__atomic_compare_exchange_n:
3159       LibCallName = "__atomic_compare_exchange";
3160       RetTy = getContext().BoolTy;
3161       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3162                getContext().VoidPtrTy);
3163       Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
3164                getContext().VoidPtrTy);
3165       Args.add(RValue::get(Order),
3166                getContext().IntTy);
3167       Order = OrderFail;
3168       break;
3169     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
3170     //                        int order)
3171     case AtomicExpr::AO__c11_atomic_exchange:
3172     case AtomicExpr::AO__atomic_exchange_n:
3173     case AtomicExpr::AO__atomic_exchange:
3174       LibCallName = "__atomic_exchange";
3175       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3176                getContext().VoidPtrTy);
3177       Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3178                getContext().VoidPtrTy);
3179       break;
3180     // void __atomic_store(size_t size, void *mem, void *val, int order)
3181     case AtomicExpr::AO__c11_atomic_store:
3182     case AtomicExpr::AO__atomic_store:
3183     case AtomicExpr::AO__atomic_store_n:
3184       LibCallName = "__atomic_store";
3185       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3186                getContext().VoidPtrTy);
3187       break;
3188     // void __atomic_load(size_t size, void *mem, void *return, int order)
3189     case AtomicExpr::AO__c11_atomic_load:
3190     case AtomicExpr::AO__atomic_load:
3191     case AtomicExpr::AO__atomic_load_n:
3192       LibCallName = "__atomic_load";
3193       Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3194                getContext().VoidPtrTy);
3195       break;
3196 #if 0
3197     // These are only defined for 1-16 byte integers.  It is not clear what
3198     // their semantics would be on anything else...
3199     case AtomicExpr::Add:   LibCallName = "__atomic_fetch_add_generic"; break;
3200     case AtomicExpr::Sub:   LibCallName = "__atomic_fetch_sub_generic"; break;
3201     case AtomicExpr::And:   LibCallName = "__atomic_fetch_and_generic"; break;
3202     case AtomicExpr::Or:    LibCallName = "__atomic_fetch_or_generic"; break;
3203     case AtomicExpr::Xor:   LibCallName = "__atomic_fetch_xor_generic"; break;
3204 #endif
3205     default: return EmitUnsupportedRValue(E, "atomic library call");
3206     }
3207     // order is always the last parameter
3208     Args.add(RValue::get(Order),
3209              getContext().IntTy);
3210 
3211     const CGFunctionInfo &FuncInfo =
3212         CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
3213             FunctionType::ExtInfo(), RequiredArgs::All);
3214     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3215     llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3216     RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
3217     if (E->isCmpXChg())
3218       return Res;
3219     if (E->getType()->isVoidType())
3220       return RValue::get(0);
3221     return ConvertTempToRValue(*this, E->getType(), Dest);
3222   }
3223 
3224   llvm::Type *IPtrTy =
3225       llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
3226   llvm::Value *OrigDest = Dest;
3227   Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
3228   if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
3229   if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
3230   if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
3231 
3232   if (isa<llvm::ConstantInt>(Order)) {
3233     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3234     switch (ord) {
3235     case 0:  // memory_order_relaxed
3236       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3237                    llvm::Monotonic);
3238       break;
3239     case 1:  // memory_order_consume
3240     case 2:  // memory_order_acquire
3241       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3242                    llvm::Acquire);
3243       break;
3244     case 3:  // memory_order_release
3245       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3246                    llvm::Release);
3247       break;
3248     case 4:  // memory_order_acq_rel
3249       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3250                    llvm::AcquireRelease);
3251       break;
3252     case 5:  // memory_order_seq_cst
3253       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3254                    llvm::SequentiallyConsistent);
3255       break;
3256     default: // invalid order
3257       // We should not ever get here normally, but it's hard to
3258       // enforce that in general.
3259       break;
3260     }
3261     if (E->getType()->isVoidType())
3262       return RValue::get(0);
3263     return ConvertTempToRValue(*this, E->getType(), OrigDest);
3264   }
3265 
3266   // Long case, when Order isn't obviously constant.
3267 
3268   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
3269                  E->getOp() == AtomicExpr::AO__atomic_store ||
3270                  E->getOp() == AtomicExpr::AO__atomic_store_n;
3271   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
3272                 E->getOp() == AtomicExpr::AO__atomic_load ||
3273                 E->getOp() == AtomicExpr::AO__atomic_load_n;
3274 
3275   // Create all the relevant BB's
3276   llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
3277                    *AcqRelBB = 0, *SeqCstBB = 0;
3278   MonotonicBB = createBasicBlock("monotonic", CurFn);
3279   if (!IsStore)
3280     AcquireBB = createBasicBlock("acquire", CurFn);
3281   if (!IsLoad)
3282     ReleaseBB = createBasicBlock("release", CurFn);
3283   if (!IsLoad && !IsStore)
3284     AcqRelBB = createBasicBlock("acqrel", CurFn);
3285   SeqCstBB = createBasicBlock("seqcst", CurFn);
3286   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3287 
3288   // Create the switch for the split
3289   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
3290   // doesn't matter unless someone is crazy enough to use something that
3291   // doesn't fold to a constant for the ordering.
3292   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3293   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
3294 
3295   // Emit all the different atomics
3296   Builder.SetInsertPoint(MonotonicBB);
3297   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3298                llvm::Monotonic);
3299   Builder.CreateBr(ContBB);
3300   if (!IsStore) {
3301     Builder.SetInsertPoint(AcquireBB);
3302     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3303                  llvm::Acquire);
3304     Builder.CreateBr(ContBB);
3305     SI->addCase(Builder.getInt32(1), AcquireBB);
3306     SI->addCase(Builder.getInt32(2), AcquireBB);
3307   }
3308   if (!IsLoad) {
3309     Builder.SetInsertPoint(ReleaseBB);
3310     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3311                  llvm::Release);
3312     Builder.CreateBr(ContBB);
3313     SI->addCase(Builder.getInt32(3), ReleaseBB);
3314   }
3315   if (!IsLoad && !IsStore) {
3316     Builder.SetInsertPoint(AcqRelBB);
3317     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3318                  llvm::AcquireRelease);
3319     Builder.CreateBr(ContBB);
3320     SI->addCase(Builder.getInt32(4), AcqRelBB);
3321   }
3322   Builder.SetInsertPoint(SeqCstBB);
3323   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3324                llvm::SequentiallyConsistent);
3325   Builder.CreateBr(ContBB);
3326   SI->addCase(Builder.getInt32(5), SeqCstBB);
3327 
3328   // Cleanup and return
3329   Builder.SetInsertPoint(ContBB);
3330   if (E->getType()->isVoidType())
3331     return RValue::get(0);
3332   return ConvertTempToRValue(*this, E->getType(), OrigDest);
3333 }
3334 
3335 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
3336   assert(Val->getType()->isFPOrFPVectorTy());
3337   if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
3338     return;
3339 
3340   llvm::MDBuilder MDHelper(getLLVMContext());
3341   llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
3342 
3343   cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
3344 }
3345 
3346 namespace {
3347   struct LValueOrRValue {
3348     LValue LV;
3349     RValue RV;
3350   };
3351 }
3352 
3353 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
3354                                            const PseudoObjectExpr *E,
3355                                            bool forLValue,
3356                                            AggValueSlot slot) {
3357   llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
3358 
3359   // Find the result expression, if any.
3360   const Expr *resultExpr = E->getResultExpr();
3361   LValueOrRValue result;
3362 
3363   for (PseudoObjectExpr::const_semantics_iterator
3364          i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
3365     const Expr *semantic = *i;
3366 
3367     // If this semantic expression is an opaque value, bind it
3368     // to the result of its source expression.
3369     if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
3370 
3371       // If this is the result expression, we may need to evaluate
3372       // directly into the slot.
3373       typedef CodeGenFunction::OpaqueValueMappingData OVMA;
3374       OVMA opaqueData;
3375       if (ov == resultExpr && ov->isRValue() && !forLValue &&
3376           CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
3377           !ov->getType()->isAnyComplexType()) {
3378         CGF.EmitAggExpr(ov->getSourceExpr(), slot);
3379 
3380         LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
3381         opaqueData = OVMA::bind(CGF, ov, LV);
3382         result.RV = slot.asRValue();
3383 
3384       // Otherwise, emit as normal.
3385       } else {
3386         opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
3387 
3388         // If this is the result, also evaluate the result now.
3389         if (ov == resultExpr) {
3390           if (forLValue)
3391             result.LV = CGF.EmitLValue(ov);
3392           else
3393             result.RV = CGF.EmitAnyExpr(ov, slot);
3394         }
3395       }
3396 
3397       opaques.push_back(opaqueData);
3398 
3399     // Otherwise, if the expression is the result, evaluate it
3400     // and remember the result.
3401     } else if (semantic == resultExpr) {
3402       if (forLValue)
3403         result.LV = CGF.EmitLValue(semantic);
3404       else
3405         result.RV = CGF.EmitAnyExpr(semantic, slot);
3406 
3407     // Otherwise, evaluate the expression in an ignored context.
3408     } else {
3409       CGF.EmitIgnoredExpr(semantic);
3410     }
3411   }
3412 
3413   // Unbind all the opaques now.
3414   for (unsigned i = 0, e = opaques.size(); i != e; ++i)
3415     opaques[i].unbind(CGF);
3416 
3417   return result;
3418 }
3419 
3420 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
3421                                                AggValueSlot slot) {
3422   return emitPseudoObjectExpr(*this, E, false, slot).RV;
3423 }
3424 
3425 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
3426   return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
3427 }
3428