1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGCall.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGRecordLayout.h"
20 #include "CGObjCRuntime.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Intrinsics.h"
26 #include "llvm/LLVMContext.h"
27 #include "llvm/Support/MDBuilder.h"
28 #include "llvm/Target/TargetData.h"
29 using namespace clang;
30 using namespace CodeGen;
31 
32 //===--------------------------------------------------------------------===//
33 //                        Miscellaneous Helper Methods
34 //===--------------------------------------------------------------------===//
35 
36 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
37   unsigned addressSpace =
38     cast<llvm::PointerType>(value->getType())->getAddressSpace();
39 
40   llvm::PointerType *destType = Int8PtrTy;
41   if (addressSpace)
42     destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
43 
44   if (value->getType() == destType) return value;
45   return Builder.CreateBitCast(value, destType);
46 }
47 
48 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
49 /// block.
50 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
51                                                     const Twine &Name) {
52   if (!Builder.isNamePreserving())
53     return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
54   return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
55 }
56 
57 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
58                                      llvm::Value *Init) {
59   llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
60   llvm::BasicBlock *Block = AllocaInsertPt->getParent();
61   Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
62 }
63 
64 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
65                                                 const Twine &Name) {
66   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
67   // FIXME: Should we prefer the preferred type alignment here?
68   CharUnits Align = getContext().getTypeAlignInChars(Ty);
69   Alloc->setAlignment(Align.getQuantity());
70   return Alloc;
71 }
72 
73 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
74                                                  const Twine &Name) {
75   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
76   // FIXME: Should we prefer the preferred type alignment here?
77   CharUnits Align = getContext().getTypeAlignInChars(Ty);
78   Alloc->setAlignment(Align.getQuantity());
79   return Alloc;
80 }
81 
82 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
83 /// expression and compare the result against zero, returning an Int1Ty value.
84 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
85   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
86     llvm::Value *MemPtr = EmitScalarExpr(E);
87     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
88   }
89 
90   QualType BoolTy = getContext().BoolTy;
91   if (!E->getType()->isAnyComplexType())
92     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
93 
94   return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
95 }
96 
97 /// EmitIgnoredExpr - Emit code to compute the specified expression,
98 /// ignoring the result.
99 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
100   if (E->isRValue())
101     return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
102 
103   // Just emit it as an l-value and drop the result.
104   EmitLValue(E);
105 }
106 
107 /// EmitAnyExpr - Emit code to compute the specified expression which
108 /// can have any type.  The result is returned as an RValue struct.
109 /// If this is an aggregate expression, AggSlot indicates where the
110 /// result should be returned.
111 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
112                                     bool IgnoreResult) {
113   if (!hasAggregateLLVMType(E->getType()))
114     return RValue::get(EmitScalarExpr(E, IgnoreResult));
115   else if (E->getType()->isAnyComplexType())
116     return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
117 
118   EmitAggExpr(E, AggSlot, IgnoreResult);
119   return AggSlot.asRValue();
120 }
121 
122 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
123 /// always be accessible even if no aggregate location is provided.
124 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
125   AggValueSlot AggSlot = AggValueSlot::ignored();
126 
127   if (hasAggregateLLVMType(E->getType()) &&
128       !E->getType()->isAnyComplexType())
129     AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
130   return EmitAnyExpr(E, AggSlot);
131 }
132 
133 /// EmitAnyExprToMem - Evaluate an expression into a given memory
134 /// location.
135 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
136                                        llvm::Value *Location,
137                                        Qualifiers Quals,
138                                        bool IsInit) {
139   // FIXME: This function should take an LValue as an argument.
140   if (E->getType()->isAnyComplexType()) {
141     EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
142   } else if (hasAggregateLLVMType(E->getType())) {
143     CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
144     EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
145                                          AggValueSlot::IsDestructed_t(IsInit),
146                                          AggValueSlot::DoesNotNeedGCBarriers,
147                                          AggValueSlot::IsAliased_t(!IsInit)));
148   } else {
149     RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
150     LValue LV = MakeAddrLValue(Location, E->getType());
151     EmitStoreThroughLValue(RV, LV);
152   }
153 }
154 
155 namespace {
156 /// \brief An adjustment to be made to the temporary created when emitting a
157 /// reference binding, which accesses a particular subobject of that temporary.
158   struct SubobjectAdjustment {
159     enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
160 
161     union {
162       struct {
163         const CastExpr *BasePath;
164         const CXXRecordDecl *DerivedClass;
165       } DerivedToBase;
166 
167       FieldDecl *Field;
168     };
169 
170     SubobjectAdjustment(const CastExpr *BasePath,
171                         const CXXRecordDecl *DerivedClass)
172       : Kind(DerivedToBaseAdjustment) {
173       DerivedToBase.BasePath = BasePath;
174       DerivedToBase.DerivedClass = DerivedClass;
175     }
176 
177     SubobjectAdjustment(FieldDecl *Field)
178       : Kind(FieldAdjustment) {
179       this->Field = Field;
180     }
181   };
182 }
183 
184 static llvm::Value *
185 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
186                          const NamedDecl *InitializedDecl) {
187   if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
188     if (VD->hasGlobalStorage()) {
189       SmallString<256> Name;
190       llvm::raw_svector_ostream Out(Name);
191       CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
192       Out.flush();
193 
194       llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
195 
196       // Create the reference temporary.
197       llvm::GlobalValue *RefTemp =
198         new llvm::GlobalVariable(CGF.CGM.getModule(),
199                                  RefTempTy, /*isConstant=*/false,
200                                  llvm::GlobalValue::InternalLinkage,
201                                  llvm::Constant::getNullValue(RefTempTy),
202                                  Name.str());
203       return RefTemp;
204     }
205   }
206 
207   return CGF.CreateMemTemp(Type, "ref.tmp");
208 }
209 
210 static llvm::Value *
211 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
212                             llvm::Value *&ReferenceTemporary,
213                             const CXXDestructorDecl *&ReferenceTemporaryDtor,
214                             QualType &ObjCARCReferenceLifetimeType,
215                             const NamedDecl *InitializedDecl) {
216   // Look through single-element init lists that claim to be lvalues. They're
217   // just syntactic wrappers in this case.
218   if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
219     if (ILE->getNumInits() == 1 && ILE->isGLValue())
220       E = ILE->getInit(0);
221   }
222 
223   // Look through expressions for materialized temporaries (for now).
224   if (const MaterializeTemporaryExpr *M
225                                       = dyn_cast<MaterializeTemporaryExpr>(E)) {
226     // Objective-C++ ARC:
227     //   If we are binding a reference to a temporary that has ownership, we
228     //   need to perform retain/release operations on the temporary.
229     if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
230         E->getType()->isObjCLifetimeType() &&
231         (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
232          E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
233          E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
234       ObjCARCReferenceLifetimeType = E->getType();
235 
236     E = M->GetTemporaryExpr();
237   }
238 
239   if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
240     E = DAE->getExpr();
241 
242   if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
243     CGF.enterFullExpression(EWC);
244     CodeGenFunction::RunCleanupsScope Scope(CGF);
245 
246     return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
247                                        ReferenceTemporary,
248                                        ReferenceTemporaryDtor,
249                                        ObjCARCReferenceLifetimeType,
250                                        InitializedDecl);
251   }
252 
253   RValue RV;
254   if (E->isGLValue()) {
255     // Emit the expression as an lvalue.
256     LValue LV = CGF.EmitLValue(E);
257 
258     if (LV.isSimple())
259       return LV.getAddress();
260 
261     // We have to load the lvalue.
262     RV = CGF.EmitLoadOfLValue(LV);
263   } else {
264     if (!ObjCARCReferenceLifetimeType.isNull()) {
265       ReferenceTemporary = CreateReferenceTemporary(CGF,
266                                                   ObjCARCReferenceLifetimeType,
267                                                     InitializedDecl);
268 
269 
270       LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
271                                              ObjCARCReferenceLifetimeType);
272 
273       CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
274                          RefTempDst, false);
275 
276       bool ExtendsLifeOfTemporary = false;
277       if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
278         if (Var->extendsLifetimeOfTemporary())
279           ExtendsLifeOfTemporary = true;
280       } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
281         ExtendsLifeOfTemporary = true;
282       }
283 
284       if (!ExtendsLifeOfTemporary) {
285         // Since the lifetime of this temporary isn't going to be extended,
286         // we need to clean it up ourselves at the end of the full expression.
287         switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
288         case Qualifiers::OCL_None:
289         case Qualifiers::OCL_ExplicitNone:
290         case Qualifiers::OCL_Autoreleasing:
291           break;
292 
293         case Qualifiers::OCL_Strong: {
294           assert(!ObjCARCReferenceLifetimeType->isArrayType());
295           CleanupKind cleanupKind = CGF.getARCCleanupKind();
296           CGF.pushDestroy(cleanupKind,
297                           ReferenceTemporary,
298                           ObjCARCReferenceLifetimeType,
299                           CodeGenFunction::destroyARCStrongImprecise,
300                           cleanupKind & EHCleanup);
301           break;
302         }
303 
304         case Qualifiers::OCL_Weak:
305           assert(!ObjCARCReferenceLifetimeType->isArrayType());
306           CGF.pushDestroy(NormalAndEHCleanup,
307                           ReferenceTemporary,
308                           ObjCARCReferenceLifetimeType,
309                           CodeGenFunction::destroyARCWeak,
310                           /*useEHCleanupForArray*/ true);
311           break;
312         }
313 
314         ObjCARCReferenceLifetimeType = QualType();
315       }
316 
317       return ReferenceTemporary;
318     }
319 
320     SmallVector<SubobjectAdjustment, 2> Adjustments;
321     while (true) {
322       E = E->IgnoreParens();
323 
324       if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
325         if ((CE->getCastKind() == CK_DerivedToBase ||
326              CE->getCastKind() == CK_UncheckedDerivedToBase) &&
327             E->getType()->isRecordType()) {
328           E = CE->getSubExpr();
329           CXXRecordDecl *Derived
330             = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
331           Adjustments.push_back(SubobjectAdjustment(CE, Derived));
332           continue;
333         }
334 
335         if (CE->getCastKind() == CK_NoOp) {
336           E = CE->getSubExpr();
337           continue;
338         }
339       } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
340         if (!ME->isArrow() && ME->getBase()->isRValue()) {
341           assert(ME->getBase()->getType()->isRecordType());
342           if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
343             E = ME->getBase();
344             Adjustments.push_back(SubobjectAdjustment(Field));
345             continue;
346           }
347         }
348       }
349 
350       if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
351         if (opaque->getType()->isRecordType())
352           return CGF.EmitOpaqueValueLValue(opaque).getAddress();
353 
354       // Nothing changed.
355       break;
356     }
357 
358     // Create a reference temporary if necessary.
359     AggValueSlot AggSlot = AggValueSlot::ignored();
360     if (CGF.hasAggregateLLVMType(E->getType()) &&
361         !E->getType()->isAnyComplexType()) {
362       ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
363                                                     InitializedDecl);
364       CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
365       AggValueSlot::IsDestructed_t isDestructed
366         = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
367       AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
368                                       Qualifiers(), isDestructed,
369                                       AggValueSlot::DoesNotNeedGCBarriers,
370                                       AggValueSlot::IsNotAliased);
371     }
372 
373     if (InitializedDecl) {
374       // Get the destructor for the reference temporary.
375       if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
376         CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
377         if (!ClassDecl->hasTrivialDestructor())
378           ReferenceTemporaryDtor = ClassDecl->getDestructor();
379       }
380     }
381 
382     RV = CGF.EmitAnyExpr(E, AggSlot);
383 
384     // Check if need to perform derived-to-base casts and/or field accesses, to
385     // get from the temporary object we created (and, potentially, for which we
386     // extended the lifetime) to the subobject we're binding the reference to.
387     if (!Adjustments.empty()) {
388       llvm::Value *Object = RV.getAggregateAddr();
389       for (unsigned I = Adjustments.size(); I != 0; --I) {
390         SubobjectAdjustment &Adjustment = Adjustments[I-1];
391         switch (Adjustment.Kind) {
392         case SubobjectAdjustment::DerivedToBaseAdjustment:
393           Object =
394               CGF.GetAddressOfBaseClass(Object,
395                                         Adjustment.DerivedToBase.DerivedClass,
396                               Adjustment.DerivedToBase.BasePath->path_begin(),
397                               Adjustment.DerivedToBase.BasePath->path_end(),
398                                         /*NullCheckValue=*/false);
399           break;
400 
401         case SubobjectAdjustment::FieldAdjustment: {
402           LValue LV = CGF.MakeAddrLValue(Object, E->getType());
403           LV = CGF.EmitLValueForField(LV, Adjustment.Field);
404           if (LV.isSimple()) {
405             Object = LV.getAddress();
406             break;
407           }
408 
409           // For non-simple lvalues, we actually have to create a copy of
410           // the object we're binding to.
411           QualType T = Adjustment.Field->getType().getNonReferenceType()
412                                                   .getUnqualifiedType();
413           Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
414           LValue TempLV = CGF.MakeAddrLValue(Object,
415                                              Adjustment.Field->getType());
416           CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
417           break;
418         }
419 
420         }
421       }
422 
423       return Object;
424     }
425   }
426 
427   if (RV.isAggregate())
428     return RV.getAggregateAddr();
429 
430   // Create a temporary variable that we can bind the reference to.
431   ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
432                                                 InitializedDecl);
433 
434 
435   unsigned Alignment =
436     CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
437   if (RV.isScalar())
438     CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
439                           /*Volatile=*/false, Alignment, E->getType());
440   else
441     CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
442                            /*Volatile=*/false);
443   return ReferenceTemporary;
444 }
445 
446 RValue
447 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
448                                             const NamedDecl *InitializedDecl) {
449   llvm::Value *ReferenceTemporary = 0;
450   const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
451   QualType ObjCARCReferenceLifetimeType;
452   llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
453                                                    ReferenceTemporaryDtor,
454                                                    ObjCARCReferenceLifetimeType,
455                                                    InitializedDecl);
456   if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
457     return RValue::get(Value);
458 
459   // Make sure to call the destructor for the reference temporary.
460   const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
461   if (VD && VD->hasGlobalStorage()) {
462     if (ReferenceTemporaryDtor) {
463       llvm::Constant *DtorFn =
464         CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
465       CGM.getCXXABI().registerGlobalDtor(*this, DtorFn,
466                                     cast<llvm::Constant>(ReferenceTemporary));
467     } else {
468       assert(!ObjCARCReferenceLifetimeType.isNull());
469       // Note: We intentionally do not register a global "destructor" to
470       // release the object.
471     }
472 
473     return RValue::get(Value);
474   }
475 
476   if (ReferenceTemporaryDtor)
477     PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
478   else {
479     switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
480     case Qualifiers::OCL_None:
481       llvm_unreachable(
482                       "Not a reference temporary that needs to be deallocated");
483     case Qualifiers::OCL_ExplicitNone:
484     case Qualifiers::OCL_Autoreleasing:
485       // Nothing to do.
486       break;
487 
488     case Qualifiers::OCL_Strong: {
489       bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
490       CleanupKind cleanupKind = getARCCleanupKind();
491       pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
492                   precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
493                   cleanupKind & EHCleanup);
494       break;
495     }
496 
497     case Qualifiers::OCL_Weak: {
498       // __weak objects always get EH cleanups; otherwise, exceptions
499       // could cause really nasty crashes instead of mere leaks.
500       pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
501                   ObjCARCReferenceLifetimeType, destroyARCWeak, true);
502       break;
503     }
504     }
505   }
506 
507   return RValue::get(Value);
508 }
509 
510 
511 /// getAccessedFieldNo - Given an encoded value and a result number, return the
512 /// input field number being accessed.
513 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
514                                              const llvm::Constant *Elts) {
515   return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
516       ->getZExtValue();
517 }
518 
519 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
520   if (!CatchUndefined)
521     return;
522 
523   // This needs to be to the standard address space.
524   Address = Builder.CreateBitCast(Address, Int8PtrTy);
525 
526   llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
527 
528   // In time, people may want to control this and use a 1 here.
529   llvm::Value *Arg = Builder.getFalse();
530   llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
531   llvm::BasicBlock *Cont = createBasicBlock();
532   llvm::BasicBlock *Check = createBasicBlock();
533   llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
534   Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
535 
536   EmitBlock(Check);
537   Builder.CreateCondBr(Builder.CreateICmpUGE(C,
538                                         llvm::ConstantInt::get(IntPtrTy, Size)),
539                        Cont, getTrapBB());
540   EmitBlock(Cont);
541 }
542 
543 
544 CodeGenFunction::ComplexPairTy CodeGenFunction::
545 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
546                          bool isInc, bool isPre) {
547   ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
548                                             LV.isVolatileQualified());
549 
550   llvm::Value *NextVal;
551   if (isa<llvm::IntegerType>(InVal.first->getType())) {
552     uint64_t AmountVal = isInc ? 1 : -1;
553     NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
554 
555     // Add the inc/dec to the real part.
556     NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
557   } else {
558     QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
559     llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
560     if (!isInc)
561       FVal.changeSign();
562     NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
563 
564     // Add the inc/dec to the real part.
565     NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
566   }
567 
568   ComplexPairTy IncVal(NextVal, InVal.second);
569 
570   // Store the updated result through the lvalue.
571   StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
572 
573   // If this is a postinc, return the value read from memory, otherwise use the
574   // updated value.
575   return isPre ? IncVal : InVal;
576 }
577 
578 
579 //===----------------------------------------------------------------------===//
580 //                         LValue Expression Emission
581 //===----------------------------------------------------------------------===//
582 
583 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
584   if (Ty->isVoidType())
585     return RValue::get(0);
586 
587   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
588     llvm::Type *EltTy = ConvertType(CTy->getElementType());
589     llvm::Value *U = llvm::UndefValue::get(EltTy);
590     return RValue::getComplex(std::make_pair(U, U));
591   }
592 
593   // If this is a use of an undefined aggregate type, the aggregate must have an
594   // identifiable address.  Just because the contents of the value are undefined
595   // doesn't mean that the address can't be taken and compared.
596   if (hasAggregateLLVMType(Ty)) {
597     llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
598     return RValue::getAggregate(DestPtr);
599   }
600 
601   return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
602 }
603 
604 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
605                                               const char *Name) {
606   ErrorUnsupported(E, Name);
607   return GetUndefRValue(E->getType());
608 }
609 
610 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
611                                               const char *Name) {
612   ErrorUnsupported(E, Name);
613   llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
614   return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
615 }
616 
617 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
618   LValue LV = EmitLValue(E);
619   if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
620     EmitCheck(LV.getAddress(),
621               getContext().getTypeSizeInChars(E->getType()).getQuantity());
622   return LV;
623 }
624 
625 /// EmitLValue - Emit code to compute a designator that specifies the location
626 /// of the expression.
627 ///
628 /// This can return one of two things: a simple address or a bitfield reference.
629 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
630 /// an LLVM pointer type.
631 ///
632 /// If this returns a bitfield reference, nothing about the pointee type of the
633 /// LLVM value is known: For example, it may not be a pointer to an integer.
634 ///
635 /// If this returns a normal address, and if the lvalue's C type is fixed size,
636 /// this method guarantees that the returned pointer type will point to an LLVM
637 /// type of the same size of the lvalue's type.  If the lvalue has a variable
638 /// length type, this is not possible.
639 ///
640 LValue CodeGenFunction::EmitLValue(const Expr *E) {
641   switch (E->getStmtClass()) {
642   default: return EmitUnsupportedLValue(E, "l-value expression");
643 
644   case Expr::ObjCPropertyRefExprClass:
645     llvm_unreachable("cannot emit a property reference directly");
646 
647   case Expr::ObjCSelectorExprClass:
648   return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
649   case Expr::ObjCIsaExprClass:
650     return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
651   case Expr::BinaryOperatorClass:
652     return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
653   case Expr::CompoundAssignOperatorClass:
654     if (!E->getType()->isAnyComplexType())
655       return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
656     return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
657   case Expr::CallExprClass:
658   case Expr::CXXMemberCallExprClass:
659   case Expr::CXXOperatorCallExprClass:
660   case Expr::UserDefinedLiteralClass:
661     return EmitCallExprLValue(cast<CallExpr>(E));
662   case Expr::VAArgExprClass:
663     return EmitVAArgExprLValue(cast<VAArgExpr>(E));
664   case Expr::DeclRefExprClass:
665     return EmitDeclRefLValue(cast<DeclRefExpr>(E));
666   case Expr::ParenExprClass:
667     return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
668   case Expr::GenericSelectionExprClass:
669     return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
670   case Expr::PredefinedExprClass:
671     return EmitPredefinedLValue(cast<PredefinedExpr>(E));
672   case Expr::StringLiteralClass:
673     return EmitStringLiteralLValue(cast<StringLiteral>(E));
674   case Expr::ObjCEncodeExprClass:
675     return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
676   case Expr::PseudoObjectExprClass:
677     return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
678   case Expr::InitListExprClass:
679     assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
680            "Only single-element init list can be lvalue.");
681     return EmitLValue(cast<InitListExpr>(E)->getInit(0));
682 
683   case Expr::CXXTemporaryObjectExprClass:
684   case Expr::CXXConstructExprClass:
685     return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
686   case Expr::CXXBindTemporaryExprClass:
687     return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
688   case Expr::LambdaExprClass:
689     return EmitLambdaLValue(cast<LambdaExpr>(E));
690 
691   case Expr::ExprWithCleanupsClass: {
692     const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
693     enterFullExpression(cleanups);
694     RunCleanupsScope Scope(*this);
695     return EmitLValue(cleanups->getSubExpr());
696   }
697 
698   case Expr::CXXScalarValueInitExprClass:
699     return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
700   case Expr::CXXDefaultArgExprClass:
701     return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
702   case Expr::CXXTypeidExprClass:
703     return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
704 
705   case Expr::ObjCMessageExprClass:
706     return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
707   case Expr::ObjCIvarRefExprClass:
708     return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
709   case Expr::StmtExprClass:
710     return EmitStmtExprLValue(cast<StmtExpr>(E));
711   case Expr::UnaryOperatorClass:
712     return EmitUnaryOpLValue(cast<UnaryOperator>(E));
713   case Expr::ArraySubscriptExprClass:
714     return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
715   case Expr::ExtVectorElementExprClass:
716     return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
717   case Expr::MemberExprClass:
718     return EmitMemberExpr(cast<MemberExpr>(E));
719   case Expr::CompoundLiteralExprClass:
720     return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
721   case Expr::ConditionalOperatorClass:
722     return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
723   case Expr::BinaryConditionalOperatorClass:
724     return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
725   case Expr::ChooseExprClass:
726     return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
727   case Expr::OpaqueValueExprClass:
728     return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
729   case Expr::SubstNonTypeTemplateParmExprClass:
730     return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
731   case Expr::ImplicitCastExprClass:
732   case Expr::CStyleCastExprClass:
733   case Expr::CXXFunctionalCastExprClass:
734   case Expr::CXXStaticCastExprClass:
735   case Expr::CXXDynamicCastExprClass:
736   case Expr::CXXReinterpretCastExprClass:
737   case Expr::CXXConstCastExprClass:
738   case Expr::ObjCBridgedCastExprClass:
739     return EmitCastLValue(cast<CastExpr>(E));
740 
741   case Expr::MaterializeTemporaryExprClass:
742     return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
743   }
744 }
745 
746 /// Given an object of the given canonical type, can we safely copy a
747 /// value out of it based on its initializer?
748 static bool isConstantEmittableObjectType(QualType type) {
749   assert(type.isCanonical());
750   assert(!type->isReferenceType());
751 
752   // Must be const-qualified but non-volatile.
753   Qualifiers qs = type.getLocalQualifiers();
754   if (!qs.hasConst() || qs.hasVolatile()) return false;
755 
756   // Otherwise, all object types satisfy this except C++ classes with
757   // mutable subobjects or non-trivial copy/destroy behavior.
758   if (const RecordType *RT = dyn_cast<RecordType>(type))
759     if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
760       if (RD->hasMutableFields() || !RD->isTrivial())
761         return false;
762 
763   return true;
764 }
765 
766 /// Can we constant-emit a load of a reference to a variable of the
767 /// given type?  This is different from predicates like
768 /// Decl::isUsableInConstantExpressions because we do want it to apply
769 /// in situations that don't necessarily satisfy the language's rules
770 /// for this (e.g. C++'s ODR-use rules).  For example, we want to able
771 /// to do this with const float variables even if those variables
772 /// aren't marked 'constexpr'.
773 enum ConstantEmissionKind {
774   CEK_None,
775   CEK_AsReferenceOnly,
776   CEK_AsValueOrReference,
777   CEK_AsValueOnly
778 };
779 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
780   type = type.getCanonicalType();
781   if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
782     if (isConstantEmittableObjectType(ref->getPointeeType()))
783       return CEK_AsValueOrReference;
784     return CEK_AsReferenceOnly;
785   }
786   if (isConstantEmittableObjectType(type))
787     return CEK_AsValueOnly;
788   return CEK_None;
789 }
790 
791 /// Try to emit a reference to the given value without producing it as
792 /// an l-value.  This is actually more than an optimization: we can't
793 /// produce an l-value for variables that we never actually captured
794 /// in a block or lambda, which means const int variables or constexpr
795 /// literals or similar.
796 CodeGenFunction::ConstantEmission
797 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
798   ValueDecl *value = refExpr->getDecl();
799 
800   // The value needs to be an enum constant or a constant variable.
801   ConstantEmissionKind CEK;
802   if (isa<ParmVarDecl>(value)) {
803     CEK = CEK_None;
804   } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
805     CEK = checkVarTypeForConstantEmission(var->getType());
806   } else if (isa<EnumConstantDecl>(value)) {
807     CEK = CEK_AsValueOnly;
808   } else {
809     CEK = CEK_None;
810   }
811   if (CEK == CEK_None) return ConstantEmission();
812 
813   Expr::EvalResult result;
814   bool resultIsReference;
815   QualType resultType;
816 
817   // It's best to evaluate all the way as an r-value if that's permitted.
818   if (CEK != CEK_AsReferenceOnly &&
819       refExpr->EvaluateAsRValue(result, getContext())) {
820     resultIsReference = false;
821     resultType = refExpr->getType();
822 
823   // Otherwise, try to evaluate as an l-value.
824   } else if (CEK != CEK_AsValueOnly &&
825              refExpr->EvaluateAsLValue(result, getContext())) {
826     resultIsReference = true;
827     resultType = value->getType();
828 
829   // Failure.
830   } else {
831     return ConstantEmission();
832   }
833 
834   // In any case, if the initializer has side-effects, abandon ship.
835   if (result.HasSideEffects)
836     return ConstantEmission();
837 
838   // Emit as a constant.
839   llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
840 
841   // Make sure we emit a debug reference to the global variable.
842   // This should probably fire even for
843   if (isa<VarDecl>(value)) {
844     if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
845       EmitDeclRefExprDbgValue(refExpr, C);
846   } else {
847     assert(isa<EnumConstantDecl>(value));
848     EmitDeclRefExprDbgValue(refExpr, C);
849   }
850 
851   // If we emitted a reference constant, we need to dereference that.
852   if (resultIsReference)
853     return ConstantEmission::forReference(C);
854 
855   return ConstantEmission::forValue(C);
856 }
857 
858 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
859   return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
860                           lvalue.getAlignment().getQuantity(),
861                           lvalue.getType(), lvalue.getTBAAInfo());
862 }
863 
864 static bool hasBooleanRepresentation(QualType Ty) {
865   if (Ty->isBooleanType())
866     return true;
867 
868   if (const EnumType *ET = Ty->getAs<EnumType>())
869     return ET->getDecl()->getIntegerType()->isBooleanType();
870 
871   if (const AtomicType *AT = Ty->getAs<AtomicType>())
872     return hasBooleanRepresentation(AT->getValueType());
873 
874   return false;
875 }
876 
877 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
878   const EnumType *ET = Ty->getAs<EnumType>();
879   bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
880                                  CGM.getCodeGenOpts().StrictEnums &&
881                                  !ET->getDecl()->isFixed());
882   bool IsBool = hasBooleanRepresentation(Ty);
883   if (!IsBool && !IsRegularCPlusPlusEnum)
884     return NULL;
885 
886   llvm::APInt Min;
887   llvm::APInt End;
888   if (IsBool) {
889     Min = llvm::APInt(8, 0);
890     End = llvm::APInt(8, 2);
891   } else {
892     const EnumDecl *ED = ET->getDecl();
893     llvm::Type *LTy = ConvertTypeForMem(ED->getIntegerType());
894     unsigned Bitwidth = LTy->getScalarSizeInBits();
895     unsigned NumNegativeBits = ED->getNumNegativeBits();
896     unsigned NumPositiveBits = ED->getNumPositiveBits();
897 
898     if (NumNegativeBits) {
899       unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
900       assert(NumBits <= Bitwidth);
901       End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
902       Min = -End;
903     } else {
904       assert(NumPositiveBits <= Bitwidth);
905       End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
906       Min = llvm::APInt(Bitwidth, 0);
907     }
908   }
909 
910   llvm::MDBuilder MDHelper(getLLVMContext());
911   return MDHelper.createRange(Min, End);
912 }
913 
914 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
915                                               unsigned Alignment, QualType Ty,
916                                               llvm::MDNode *TBAAInfo) {
917   llvm::LoadInst *Load = Builder.CreateLoad(Addr);
918   if (Volatile)
919     Load->setVolatile(true);
920   if (Alignment)
921     Load->setAlignment(Alignment);
922   if (TBAAInfo)
923     CGM.DecorateInstruction(Load, TBAAInfo);
924   // If this is an atomic type, all normal reads must be atomic
925   if (Ty->isAtomicType())
926     Load->setAtomic(llvm::SequentiallyConsistent);
927 
928   if (CGM.getCodeGenOpts().OptimizationLevel > 0)
929     if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
930       Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
931 
932   return EmitFromMemory(Load, Ty);
933 }
934 
935 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
936   // Bool has a different representation in memory than in registers.
937   if (hasBooleanRepresentation(Ty)) {
938     // This should really always be an i1, but sometimes it's already
939     // an i8, and it's awkward to track those cases down.
940     if (Value->getType()->isIntegerTy(1))
941       return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
942     assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
943   }
944 
945   return Value;
946 }
947 
948 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
949   // Bool has a different representation in memory than in registers.
950   if (hasBooleanRepresentation(Ty)) {
951     assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
952     return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
953   }
954 
955   return Value;
956 }
957 
958 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
959                                         bool Volatile, unsigned Alignment,
960                                         QualType Ty,
961                                         llvm::MDNode *TBAAInfo,
962                                         bool isInit) {
963   Value = EmitToMemory(Value, Ty);
964 
965   llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
966   if (Alignment)
967     Store->setAlignment(Alignment);
968   if (TBAAInfo)
969     CGM.DecorateInstruction(Store, TBAAInfo);
970   if (!isInit && Ty->isAtomicType())
971     Store->setAtomic(llvm::SequentiallyConsistent);
972 }
973 
974 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
975     bool isInit) {
976   EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
977                     lvalue.getAlignment().getQuantity(), lvalue.getType(),
978                     lvalue.getTBAAInfo(), isInit);
979 }
980 
981 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
982 /// method emits the address of the lvalue, then loads the result as an rvalue,
983 /// returning the rvalue.
984 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
985   if (LV.isObjCWeak()) {
986     // load of a __weak object.
987     llvm::Value *AddrWeakObj = LV.getAddress();
988     return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
989                                                              AddrWeakObj));
990   }
991   if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
992     return RValue::get(EmitARCLoadWeak(LV.getAddress()));
993 
994   if (LV.isSimple()) {
995     assert(!LV.getType()->isFunctionType());
996 
997     // Everything needs a load.
998     return RValue::get(EmitLoadOfScalar(LV));
999   }
1000 
1001   if (LV.isVectorElt()) {
1002     llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
1003                                               LV.isVolatileQualified());
1004     Load->setAlignment(LV.getAlignment().getQuantity());
1005     return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1006                                                     "vecext"));
1007   }
1008 
1009   // If this is a reference to a subset of the elements of a vector, either
1010   // shuffle the input or extract/insert them as appropriate.
1011   if (LV.isExtVectorElt())
1012     return EmitLoadOfExtVectorElementLValue(LV);
1013 
1014   assert(LV.isBitField() && "Unknown LValue type!");
1015   return EmitLoadOfBitfieldLValue(LV);
1016 }
1017 
1018 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
1019   const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1020 
1021   // Get the output type.
1022   llvm::Type *ResLTy = ConvertType(LV.getType());
1023   unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
1024 
1025   // Compute the result as an OR of all of the individual component accesses.
1026   llvm::Value *Res = 0;
1027   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1028     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1029 
1030     // Get the field pointer.
1031     llvm::Value *Ptr = LV.getBitFieldBaseAddr();
1032 
1033     // Only offset by the field index if used, so that incoming values are not
1034     // required to be structures.
1035     if (AI.FieldIndex)
1036       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1037 
1038     // Offset by the byte offset, if used.
1039     if (!AI.FieldByteOffset.isZero()) {
1040       Ptr = EmitCastToVoidPtr(Ptr);
1041       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1042                                        "bf.field.offs");
1043     }
1044 
1045     // Cast to the access type.
1046     llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
1047                        CGM.getContext().getTargetAddressSpace(LV.getType()));
1048     Ptr = Builder.CreateBitCast(Ptr, PTy);
1049 
1050     // Perform the load.
1051     llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
1052     if (!AI.AccessAlignment.isZero())
1053       Load->setAlignment(AI.AccessAlignment.getQuantity());
1054 
1055     // Shift out unused low bits and mask out unused high bits.
1056     llvm::Value *Val = Load;
1057     if (AI.FieldBitStart)
1058       Val = Builder.CreateLShr(Load, AI.FieldBitStart);
1059     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
1060                                                             AI.TargetBitWidth),
1061                             "bf.clear");
1062 
1063     // Extend or truncate to the target size.
1064     if (AI.AccessWidth < ResSizeInBits)
1065       Val = Builder.CreateZExt(Val, ResLTy);
1066     else if (AI.AccessWidth > ResSizeInBits)
1067       Val = Builder.CreateTrunc(Val, ResLTy);
1068 
1069     // Shift into place, and OR into the result.
1070     if (AI.TargetBitOffset)
1071       Val = Builder.CreateShl(Val, AI.TargetBitOffset);
1072     Res = Res ? Builder.CreateOr(Res, Val) : Val;
1073   }
1074 
1075   // If the bit-field is signed, perform the sign-extension.
1076   //
1077   // FIXME: This can easily be folded into the load of the high bits, which
1078   // could also eliminate the mask of high bits in some situations.
1079   if (Info.isSigned()) {
1080     unsigned ExtraBits = ResSizeInBits - Info.getSize();
1081     if (ExtraBits)
1082       Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
1083                                ExtraBits, "bf.val.sext");
1084   }
1085 
1086   return RValue::get(Res);
1087 }
1088 
1089 // If this is a reference to a subset of the elements of a vector, create an
1090 // appropriate shufflevector.
1091 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
1092   llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
1093                                             LV.isVolatileQualified());
1094   Load->setAlignment(LV.getAlignment().getQuantity());
1095   llvm::Value *Vec = Load;
1096 
1097   const llvm::Constant *Elts = LV.getExtVectorElts();
1098 
1099   // If the result of the expression is a non-vector type, we must be extracting
1100   // a single element.  Just codegen as an extractelement.
1101   const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1102   if (!ExprVT) {
1103     unsigned InIdx = getAccessedFieldNo(0, Elts);
1104     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1105     return RValue::get(Builder.CreateExtractElement(Vec, Elt));
1106   }
1107 
1108   // Always use shuffle vector to try to retain the original program structure
1109   unsigned NumResultElts = ExprVT->getNumElements();
1110 
1111   SmallVector<llvm::Constant*, 4> Mask;
1112   for (unsigned i = 0; i != NumResultElts; ++i)
1113     Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
1114 
1115   llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1116   Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
1117                                     MaskV);
1118   return RValue::get(Vec);
1119 }
1120 
1121 
1122 
1123 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
1124 /// lvalue, where both are guaranteed to the have the same type, and that type
1125 /// is 'Ty'.
1126 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
1127   if (!Dst.isSimple()) {
1128     if (Dst.isVectorElt()) {
1129       // Read/modify/write the vector, inserting the new element.
1130       llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
1131                                                 Dst.isVolatileQualified());
1132       Load->setAlignment(Dst.getAlignment().getQuantity());
1133       llvm::Value *Vec = Load;
1134       Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
1135                                         Dst.getVectorIdx(), "vecins");
1136       llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
1137                                                    Dst.isVolatileQualified());
1138       Store->setAlignment(Dst.getAlignment().getQuantity());
1139       return;
1140     }
1141 
1142     // If this is an update of extended vector elements, insert them as
1143     // appropriate.
1144     if (Dst.isExtVectorElt())
1145       return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
1146 
1147     assert(Dst.isBitField() && "Unknown LValue type");
1148     return EmitStoreThroughBitfieldLValue(Src, Dst);
1149   }
1150 
1151   // There's special magic for assigning into an ARC-qualified l-value.
1152   if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
1153     switch (Lifetime) {
1154     case Qualifiers::OCL_None:
1155       llvm_unreachable("present but none");
1156 
1157     case Qualifiers::OCL_ExplicitNone:
1158       // nothing special
1159       break;
1160 
1161     case Qualifiers::OCL_Strong:
1162       EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
1163       return;
1164 
1165     case Qualifiers::OCL_Weak:
1166       EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
1167       return;
1168 
1169     case Qualifiers::OCL_Autoreleasing:
1170       Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
1171                                                      Src.getScalarVal()));
1172       // fall into the normal path
1173       break;
1174     }
1175   }
1176 
1177   if (Dst.isObjCWeak() && !Dst.isNonGC()) {
1178     // load of a __weak object.
1179     llvm::Value *LvalueDst = Dst.getAddress();
1180     llvm::Value *src = Src.getScalarVal();
1181      CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
1182     return;
1183   }
1184 
1185   if (Dst.isObjCStrong() && !Dst.isNonGC()) {
1186     // load of a __strong object.
1187     llvm::Value *LvalueDst = Dst.getAddress();
1188     llvm::Value *src = Src.getScalarVal();
1189     if (Dst.isObjCIvar()) {
1190       assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
1191       llvm::Type *ResultType = ConvertType(getContext().LongTy);
1192       llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
1193       llvm::Value *dst = RHS;
1194       RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1195       llvm::Value *LHS =
1196         Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
1197       llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
1198       CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1199                                               BytesBetween);
1200     } else if (Dst.isGlobalObjCRef()) {
1201       CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1202                                                 Dst.isThreadLocalRef());
1203     }
1204     else
1205       CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
1206     return;
1207   }
1208 
1209   assert(Src.isScalar() && "Can't emit an agg store with this method");
1210   EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
1211 }
1212 
1213 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
1214                                                      llvm::Value **Result) {
1215   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
1216 
1217   // Get the output type.
1218   llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
1219   unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
1220 
1221   // Get the source value, truncated to the width of the bit-field.
1222   llvm::Value *SrcVal = Src.getScalarVal();
1223 
1224   if (hasBooleanRepresentation(Dst.getType()))
1225     SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
1226 
1227   SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
1228                                                                 Info.getSize()),
1229                              "bf.value");
1230 
1231   // Return the new value of the bit-field, if requested.
1232   if (Result) {
1233     // Cast back to the proper type for result.
1234     llvm::Type *SrcTy = Src.getScalarVal()->getType();
1235     llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
1236                                                    "bf.reload.val");
1237 
1238     // Sign extend if necessary.
1239     if (Info.isSigned()) {
1240       unsigned ExtraBits = ResSizeInBits - Info.getSize();
1241       if (ExtraBits)
1242         ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
1243                                        ExtraBits, "bf.reload.sext");
1244     }
1245 
1246     *Result = ReloadVal;
1247   }
1248 
1249   // Iterate over the components, writing each piece to memory.
1250   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1251     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1252 
1253     // Get the field pointer.
1254     llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
1255     unsigned addressSpace =
1256       cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
1257 
1258     // Only offset by the field index if used, so that incoming values are not
1259     // required to be structures.
1260     if (AI.FieldIndex)
1261       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1262 
1263     // Offset by the byte offset, if used.
1264     if (!AI.FieldByteOffset.isZero()) {
1265       Ptr = EmitCastToVoidPtr(Ptr);
1266       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1267                                        "bf.field.offs");
1268     }
1269 
1270     // Cast to the access type.
1271     llvm::Type *AccessLTy =
1272       llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
1273 
1274     llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
1275     Ptr = Builder.CreateBitCast(Ptr, PTy);
1276 
1277     // Extract the piece of the bit-field value to write in this access, limited
1278     // to the values that are part of this access.
1279     llvm::Value *Val = SrcVal;
1280     if (AI.TargetBitOffset)
1281       Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
1282     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
1283                                                             AI.TargetBitWidth));
1284 
1285     // Extend or truncate to the access size.
1286     if (ResSizeInBits < AI.AccessWidth)
1287       Val = Builder.CreateZExt(Val, AccessLTy);
1288     else if (ResSizeInBits > AI.AccessWidth)
1289       Val = Builder.CreateTrunc(Val, AccessLTy);
1290 
1291     // Shift into the position in memory.
1292     if (AI.FieldBitStart)
1293       Val = Builder.CreateShl(Val, AI.FieldBitStart);
1294 
1295     // If necessary, load and OR in bits that are outside of the bit-field.
1296     if (AI.TargetBitWidth != AI.AccessWidth) {
1297       llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
1298       if (!AI.AccessAlignment.isZero())
1299         Load->setAlignment(AI.AccessAlignment.getQuantity());
1300 
1301       // Compute the mask for zeroing the bits that are part of the bit-field.
1302       llvm::APInt InvMask =
1303         ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
1304                                  AI.FieldBitStart + AI.TargetBitWidth);
1305 
1306       // Apply the mask and OR in to the value to write.
1307       Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
1308     }
1309 
1310     // Write the value.
1311     llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
1312                                                  Dst.isVolatileQualified());
1313     if (!AI.AccessAlignment.isZero())
1314       Store->setAlignment(AI.AccessAlignment.getQuantity());
1315   }
1316 }
1317 
1318 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
1319                                                                LValue Dst) {
1320   // This access turns into a read/modify/write of the vector.  Load the input
1321   // value now.
1322   llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
1323                                             Dst.isVolatileQualified());
1324   Load->setAlignment(Dst.getAlignment().getQuantity());
1325   llvm::Value *Vec = Load;
1326   const llvm::Constant *Elts = Dst.getExtVectorElts();
1327 
1328   llvm::Value *SrcVal = Src.getScalarVal();
1329 
1330   if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
1331     unsigned NumSrcElts = VTy->getNumElements();
1332     unsigned NumDstElts =
1333        cast<llvm::VectorType>(Vec->getType())->getNumElements();
1334     if (NumDstElts == NumSrcElts) {
1335       // Use shuffle vector is the src and destination are the same number of
1336       // elements and restore the vector mask since it is on the side it will be
1337       // stored.
1338       SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
1339       for (unsigned i = 0; i != NumSrcElts; ++i)
1340         Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
1341 
1342       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1343       Vec = Builder.CreateShuffleVector(SrcVal,
1344                                         llvm::UndefValue::get(Vec->getType()),
1345                                         MaskV);
1346     } else if (NumDstElts > NumSrcElts) {
1347       // Extended the source vector to the same length and then shuffle it
1348       // into the destination.
1349       // FIXME: since we're shuffling with undef, can we just use the indices
1350       //        into that?  This could be simpler.
1351       SmallVector<llvm::Constant*, 4> ExtMask;
1352       for (unsigned i = 0; i != NumSrcElts; ++i)
1353         ExtMask.push_back(Builder.getInt32(i));
1354       ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
1355       llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
1356       llvm::Value *ExtSrcVal =
1357         Builder.CreateShuffleVector(SrcVal,
1358                                     llvm::UndefValue::get(SrcVal->getType()),
1359                                     ExtMaskV);
1360       // build identity
1361       SmallVector<llvm::Constant*, 4> Mask;
1362       for (unsigned i = 0; i != NumDstElts; ++i)
1363         Mask.push_back(Builder.getInt32(i));
1364 
1365       // modify when what gets shuffled in
1366       for (unsigned i = 0; i != NumSrcElts; ++i)
1367         Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
1368       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1369       Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
1370     } else {
1371       // We should never shorten the vector
1372       llvm_unreachable("unexpected shorten vector length");
1373     }
1374   } else {
1375     // If the Src is a scalar (not a vector) it must be updating one element.
1376     unsigned InIdx = getAccessedFieldNo(0, Elts);
1377     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1378     Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
1379   }
1380 
1381   llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
1382                                                Dst.isVolatileQualified());
1383   Store->setAlignment(Dst.getAlignment().getQuantity());
1384 }
1385 
1386 // setObjCGCLValueClass - sets class of he lvalue for the purpose of
1387 // generating write-barries API. It is currently a global, ivar,
1388 // or neither.
1389 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1390                                  LValue &LV,
1391                                  bool IsMemberAccess=false) {
1392   if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
1393     return;
1394 
1395   if (isa<ObjCIvarRefExpr>(E)) {
1396     QualType ExpTy = E->getType();
1397     if (IsMemberAccess && ExpTy->isPointerType()) {
1398       // If ivar is a structure pointer, assigning to field of
1399       // this struct follows gcc's behavior and makes it a non-ivar
1400       // writer-barrier conservatively.
1401       ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1402       if (ExpTy->isRecordType()) {
1403         LV.setObjCIvar(false);
1404         return;
1405       }
1406     }
1407     LV.setObjCIvar(true);
1408     ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1409     LV.setBaseIvarExp(Exp->getBase());
1410     LV.setObjCArray(E->getType()->isArrayType());
1411     return;
1412   }
1413 
1414   if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1415     if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1416       if (VD->hasGlobalStorage()) {
1417         LV.setGlobalObjCRef(true);
1418         LV.setThreadLocalRef(VD->isThreadSpecified());
1419       }
1420     }
1421     LV.setObjCArray(E->getType()->isArrayType());
1422     return;
1423   }
1424 
1425   if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1426     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1427     return;
1428   }
1429 
1430   if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1431     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1432     if (LV.isObjCIvar()) {
1433       // If cast is to a structure pointer, follow gcc's behavior and make it
1434       // a non-ivar write-barrier.
1435       QualType ExpTy = E->getType();
1436       if (ExpTy->isPointerType())
1437         ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1438       if (ExpTy->isRecordType())
1439         LV.setObjCIvar(false);
1440     }
1441     return;
1442   }
1443 
1444   if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
1445     setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
1446     return;
1447   }
1448 
1449   if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1450     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1451     return;
1452   }
1453 
1454   if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1455     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1456     return;
1457   }
1458 
1459   if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
1460     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1461     return;
1462   }
1463 
1464   if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1465     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1466     if (LV.isObjCIvar() && !LV.isObjCArray())
1467       // Using array syntax to assigning to what an ivar points to is not
1468       // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1469       LV.setObjCIvar(false);
1470     else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1471       // Using array syntax to assigning to what global points to is not
1472       // same as assigning to the global itself. {id *G;} G[i] = 0;
1473       LV.setGlobalObjCRef(false);
1474     return;
1475   }
1476 
1477   if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1478     setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
1479     // We don't know if member is an 'ivar', but this flag is looked at
1480     // only in the context of LV.isObjCIvar().
1481     LV.setObjCArray(E->getType()->isArrayType());
1482     return;
1483   }
1484 }
1485 
1486 static llvm::Value *
1487 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
1488                                 llvm::Value *V, llvm::Type *IRType,
1489                                 StringRef Name = StringRef()) {
1490   unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1491   return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
1492 }
1493 
1494 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1495                                       const Expr *E, const VarDecl *VD) {
1496   assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
1497          "Var decl must have external storage or be a file var decl!");
1498 
1499   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1500   llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
1501   V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
1502   CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
1503   QualType T = E->getType();
1504   LValue LV;
1505   if (VD->getType()->isReferenceType()) {
1506     llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
1507     LI->setAlignment(Alignment.getQuantity());
1508     V = LI;
1509     LV = CGF.MakeNaturalAlignAddrLValue(V, T);
1510   } else {
1511     LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1512   }
1513   setObjCGCLValueClass(CGF.getContext(), E, LV);
1514   return LV;
1515 }
1516 
1517 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1518                                      const Expr *E, const FunctionDecl *FD) {
1519   llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1520   if (!FD->hasPrototype()) {
1521     if (const FunctionProtoType *Proto =
1522             FD->getType()->getAs<FunctionProtoType>()) {
1523       // Ugly case: for a K&R-style definition, the type of the definition
1524       // isn't the same as the type of a use.  Correct for this with a
1525       // bitcast.
1526       QualType NoProtoType =
1527           CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1528       NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1529       V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
1530     }
1531   }
1532   CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
1533   return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1534 }
1535 
1536 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1537   const NamedDecl *ND = E->getDecl();
1538   CharUnits Alignment = getContext().getDeclAlign(ND);
1539   QualType T = E->getType();
1540 
1541   // FIXME: We should be able to assert this for FunctionDecls as well!
1542   // FIXME: We should be able to assert this for all DeclRefExprs, not just
1543   // those with a valid source location.
1544   assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
1545           !E->getLocation().isValid()) &&
1546          "Should not use decl without marking it used!");
1547 
1548   if (ND->hasAttr<WeakRefAttr>()) {
1549     const ValueDecl *VD = cast<ValueDecl>(ND);
1550     llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1551     return MakeAddrLValue(Aliasee, E->getType(), Alignment);
1552   }
1553 
1554   if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1555     // Check if this is a global variable.
1556     if (VD->hasExternalStorage() || VD->isFileVarDecl())
1557       return EmitGlobalVarDeclLValue(*this, E, VD);
1558 
1559     bool isBlockVariable = VD->hasAttr<BlocksAttr>();
1560 
1561     bool NonGCable = VD->hasLocalStorage() &&
1562                      !VD->getType()->isReferenceType() &&
1563                      !isBlockVariable;
1564 
1565     llvm::Value *V = LocalDeclMap[VD];
1566     if (!V && VD->isStaticLocal())
1567       V = CGM.getStaticLocalDeclAddress(VD);
1568 
1569     // Use special handling for lambdas.
1570     if (!V) {
1571       if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
1572         QualType LambdaTagType = getContext().getTagDeclType(FD->getParent());
1573         LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
1574                                                      LambdaTagType);
1575         return EmitLValueForField(LambdaLV, FD);
1576       }
1577 
1578       assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
1579       CharUnits alignment = getContext().getDeclAlign(VD);
1580       return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
1581                             E->getType(), alignment);
1582     }
1583 
1584     assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1585 
1586     if (isBlockVariable)
1587       V = BuildBlockByrefAddress(V, VD);
1588 
1589     LValue LV;
1590     if (VD->getType()->isReferenceType()) {
1591       llvm::LoadInst *LI = Builder.CreateLoad(V);
1592       LI->setAlignment(Alignment.getQuantity());
1593       V = LI;
1594       LV = MakeNaturalAlignAddrLValue(V, T);
1595     } else {
1596       LV = MakeAddrLValue(V, T, Alignment);
1597     }
1598 
1599     if (NonGCable) {
1600       LV.getQuals().removeObjCGCAttr();
1601       LV.setNonGC(true);
1602     }
1603     setObjCGCLValueClass(getContext(), E, LV);
1604     return LV;
1605   }
1606 
1607   if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
1608     return EmitFunctionDeclLValue(*this, E, fn);
1609 
1610   llvm_unreachable("Unhandled DeclRefExpr");
1611 }
1612 
1613 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1614   // __extension__ doesn't affect lvalue-ness.
1615   if (E->getOpcode() == UO_Extension)
1616     return EmitLValue(E->getSubExpr());
1617 
1618   QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1619   switch (E->getOpcode()) {
1620   default: llvm_unreachable("Unknown unary operator lvalue!");
1621   case UO_Deref: {
1622     QualType T = E->getSubExpr()->getType()->getPointeeType();
1623     assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1624 
1625     LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1626     LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1627 
1628     // We should not generate __weak write barrier on indirect reference
1629     // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1630     // But, we continue to generate __strong write barrier on indirect write
1631     // into a pointer to object.
1632     if (getContext().getLangOpts().ObjC1 &&
1633         getContext().getLangOpts().getGC() != LangOptions::NonGC &&
1634         LV.isObjCWeak())
1635       LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1636     return LV;
1637   }
1638   case UO_Real:
1639   case UO_Imag: {
1640     LValue LV = EmitLValue(E->getSubExpr());
1641     assert(LV.isSimple() && "real/imag on non-ordinary l-value");
1642     llvm::Value *Addr = LV.getAddress();
1643 
1644     // __real is valid on scalars.  This is a faster way of testing that.
1645     // __imag can only produce an rvalue on scalars.
1646     if (E->getOpcode() == UO_Real &&
1647         !cast<llvm::PointerType>(Addr->getType())
1648            ->getElementType()->isStructTy()) {
1649       assert(E->getSubExpr()->getType()->isArithmeticType());
1650       return LV;
1651     }
1652 
1653     assert(E->getSubExpr()->getType()->isAnyComplexType());
1654 
1655     unsigned Idx = E->getOpcode() == UO_Imag;
1656     return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1657                                                   Idx, "idx"),
1658                           ExprTy);
1659   }
1660   case UO_PreInc:
1661   case UO_PreDec: {
1662     LValue LV = EmitLValue(E->getSubExpr());
1663     bool isInc = E->getOpcode() == UO_PreInc;
1664 
1665     if (E->getType()->isAnyComplexType())
1666       EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1667     else
1668       EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1669     return LV;
1670   }
1671   }
1672 }
1673 
1674 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1675   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1676                         E->getType());
1677 }
1678 
1679 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1680   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1681                         E->getType());
1682 }
1683 
1684 
1685 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1686   switch (E->getIdentType()) {
1687   default:
1688     return EmitUnsupportedLValue(E, "predefined expression");
1689 
1690   case PredefinedExpr::Func:
1691   case PredefinedExpr::Function:
1692   case PredefinedExpr::PrettyFunction: {
1693     unsigned Type = E->getIdentType();
1694     std::string GlobalVarName;
1695 
1696     switch (Type) {
1697     default: llvm_unreachable("Invalid type");
1698     case PredefinedExpr::Func:
1699       GlobalVarName = "__func__.";
1700       break;
1701     case PredefinedExpr::Function:
1702       GlobalVarName = "__FUNCTION__.";
1703       break;
1704     case PredefinedExpr::PrettyFunction:
1705       GlobalVarName = "__PRETTY_FUNCTION__.";
1706       break;
1707     }
1708 
1709     StringRef FnName = CurFn->getName();
1710     if (FnName.startswith("\01"))
1711       FnName = FnName.substr(1);
1712     GlobalVarName += FnName;
1713 
1714     const Decl *CurDecl = CurCodeDecl;
1715     if (CurDecl == 0)
1716       CurDecl = getContext().getTranslationUnitDecl();
1717 
1718     std::string FunctionName =
1719         (isa<BlockDecl>(CurDecl)
1720          ? FnName.str()
1721          : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
1722 
1723     llvm::Constant *C =
1724       CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
1725     return MakeAddrLValue(C, E->getType());
1726   }
1727   }
1728 }
1729 
1730 llvm::BasicBlock *CodeGenFunction::getTrapBB() {
1731   const CodeGenOptions &GCO = CGM.getCodeGenOpts();
1732 
1733   // If we are not optimzing, don't collapse all calls to trap in the function
1734   // to the same call, that way, in the debugger they can see which operation
1735   // did in fact fail.  If we are optimizing, we collapse all calls to trap down
1736   // to just one per function to save on codesize.
1737   if (GCO.OptimizationLevel && TrapBB)
1738     return TrapBB;
1739 
1740   llvm::BasicBlock *Cont = 0;
1741   if (HaveInsertPoint()) {
1742     Cont = createBasicBlock("cont");
1743     EmitBranch(Cont);
1744   }
1745   TrapBB = createBasicBlock("trap");
1746   EmitBlock(TrapBB);
1747 
1748   llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
1749   llvm::CallInst *TrapCall = Builder.CreateCall(F);
1750   TrapCall->setDoesNotReturn();
1751   TrapCall->setDoesNotThrow();
1752   Builder.CreateUnreachable();
1753 
1754   if (Cont)
1755     EmitBlock(Cont);
1756   return TrapBB;
1757 }
1758 
1759 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
1760 /// array to pointer, return the array subexpression.
1761 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
1762   // If this isn't just an array->pointer decay, bail out.
1763   const CastExpr *CE = dyn_cast<CastExpr>(E);
1764   if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
1765     return 0;
1766 
1767   // If this is a decay from variable width array, bail out.
1768   const Expr *SubExpr = CE->getSubExpr();
1769   if (SubExpr->getType()->isVariableArrayType())
1770     return 0;
1771 
1772   return SubExpr;
1773 }
1774 
1775 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1776   // The index must always be an integer, which is not an aggregate.  Emit it.
1777   llvm::Value *Idx = EmitScalarExpr(E->getIdx());
1778   QualType IdxTy  = E->getIdx()->getType();
1779   bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
1780 
1781   // If the base is a vector type, then we are forming a vector element lvalue
1782   // with this subscript.
1783   if (E->getBase()->getType()->isVectorType()) {
1784     // Emit the vector as an lvalue to get its address.
1785     LValue LHS = EmitLValue(E->getBase());
1786     assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
1787     Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
1788     return LValue::MakeVectorElt(LHS.getAddress(), Idx,
1789                                  E->getBase()->getType(), LHS.getAlignment());
1790   }
1791 
1792   // Extend or truncate the index type to 32 or 64-bits.
1793   if (Idx->getType() != IntPtrTy)
1794     Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
1795 
1796   // FIXME: As llvm implements the object size checking, this can come out.
1797   if (CatchUndefined) {
1798     if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
1799       if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
1800         if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
1801           if (const ConstantArrayType *CAT
1802               = getContext().getAsConstantArrayType(DRE->getType())) {
1803             llvm::APInt Size = CAT->getSize();
1804             llvm::BasicBlock *Cont = createBasicBlock("cont");
1805             Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
1806                                   llvm::ConstantInt::get(Idx->getType(), Size)),
1807                                  Cont, getTrapBB());
1808             EmitBlock(Cont);
1809           }
1810         }
1811       }
1812     }
1813   }
1814 
1815   // We know that the pointer points to a type of the correct size, unless the
1816   // size is a VLA or Objective-C interface.
1817   llvm::Value *Address = 0;
1818   CharUnits ArrayAlignment;
1819   if (const VariableArrayType *vla =
1820         getContext().getAsVariableArrayType(E->getType())) {
1821     // The base must be a pointer, which is not an aggregate.  Emit
1822     // it.  It needs to be emitted first in case it's what captures
1823     // the VLA bounds.
1824     Address = EmitScalarExpr(E->getBase());
1825 
1826     // The element count here is the total number of non-VLA elements.
1827     llvm::Value *numElements = getVLASize(vla).first;
1828 
1829     // Effectively, the multiply by the VLA size is part of the GEP.
1830     // GEP indexes are signed, and scaling an index isn't permitted to
1831     // signed-overflow, so we use the same semantics for our explicit
1832     // multiply.  We suppress this if overflow is not undefined behavior.
1833     if (getLangOpts().isSignedOverflowDefined()) {
1834       Idx = Builder.CreateMul(Idx, numElements);
1835       Address = Builder.CreateGEP(Address, Idx, "arrayidx");
1836     } else {
1837       Idx = Builder.CreateNSWMul(Idx, numElements);
1838       Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
1839     }
1840   } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
1841     // Indexing over an interface, as in "NSString *P; P[4];"
1842     llvm::Value *InterfaceSize =
1843       llvm::ConstantInt::get(Idx->getType(),
1844           getContext().getTypeSizeInChars(OIT).getQuantity());
1845 
1846     Idx = Builder.CreateMul(Idx, InterfaceSize);
1847 
1848     // The base must be a pointer, which is not an aggregate.  Emit it.
1849     llvm::Value *Base = EmitScalarExpr(E->getBase());
1850     Address = EmitCastToVoidPtr(Base);
1851     Address = Builder.CreateGEP(Address, Idx, "arrayidx");
1852     Address = Builder.CreateBitCast(Address, Base->getType());
1853   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
1854     // If this is A[i] where A is an array, the frontend will have decayed the
1855     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
1856     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
1857     // "gep x, i" here.  Emit one "gep A, 0, i".
1858     assert(Array->getType()->isArrayType() &&
1859            "Array to pointer decay must have array source type!");
1860     LValue ArrayLV = EmitLValue(Array);
1861     llvm::Value *ArrayPtr = ArrayLV.getAddress();
1862     llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
1863     llvm::Value *Args[] = { Zero, Idx };
1864 
1865     // Propagate the alignment from the array itself to the result.
1866     ArrayAlignment = ArrayLV.getAlignment();
1867 
1868     if (getContext().getLangOpts().isSignedOverflowDefined())
1869       Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
1870     else
1871       Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
1872   } else {
1873     // The base must be a pointer, which is not an aggregate.  Emit it.
1874     llvm::Value *Base = EmitScalarExpr(E->getBase());
1875     if (getContext().getLangOpts().isSignedOverflowDefined())
1876       Address = Builder.CreateGEP(Base, Idx, "arrayidx");
1877     else
1878       Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
1879   }
1880 
1881   QualType T = E->getBase()->getType()->getPointeeType();
1882   assert(!T.isNull() &&
1883          "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
1884 
1885 
1886   // Limit the alignment to that of the result type.
1887   LValue LV;
1888   if (!ArrayAlignment.isZero()) {
1889     CharUnits Align = getContext().getTypeAlignInChars(T);
1890     ArrayAlignment = std::min(Align, ArrayAlignment);
1891     LV = MakeAddrLValue(Address, T, ArrayAlignment);
1892   } else {
1893     LV = MakeNaturalAlignAddrLValue(Address, T);
1894   }
1895 
1896   LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
1897 
1898   if (getContext().getLangOpts().ObjC1 &&
1899       getContext().getLangOpts().getGC() != LangOptions::NonGC) {
1900     LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1901     setObjCGCLValueClass(getContext(), E, LV);
1902   }
1903   return LV;
1904 }
1905 
1906 static
1907 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
1908                                        SmallVector<unsigned, 4> &Elts) {
1909   SmallVector<llvm::Constant*, 4> CElts;
1910   for (unsigned i = 0, e = Elts.size(); i != e; ++i)
1911     CElts.push_back(Builder.getInt32(Elts[i]));
1912 
1913   return llvm::ConstantVector::get(CElts);
1914 }
1915 
1916 LValue CodeGenFunction::
1917 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
1918   // Emit the base vector as an l-value.
1919   LValue Base;
1920 
1921   // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1922   if (E->isArrow()) {
1923     // If it is a pointer to a vector, emit the address and form an lvalue with
1924     // it.
1925     llvm::Value *Ptr = EmitScalarExpr(E->getBase());
1926     const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
1927     Base = MakeAddrLValue(Ptr, PT->getPointeeType());
1928     Base.getQuals().removeObjCGCAttr();
1929   } else if (E->getBase()->isGLValue()) {
1930     // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1931     // emit the base as an lvalue.
1932     assert(E->getBase()->getType()->isVectorType());
1933     Base = EmitLValue(E->getBase());
1934   } else {
1935     // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1936     assert(E->getBase()->getType()->isVectorType() &&
1937            "Result must be a vector");
1938     llvm::Value *Vec = EmitScalarExpr(E->getBase());
1939 
1940     // Store the vector to memory (because LValue wants an address).
1941     llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
1942     Builder.CreateStore(Vec, VecMem);
1943     Base = MakeAddrLValue(VecMem, E->getBase()->getType());
1944   }
1945 
1946   QualType type =
1947     E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
1948 
1949   // Encode the element access list into a vector of unsigned indices.
1950   SmallVector<unsigned, 4> Indices;
1951   E->getEncodedElementAccess(Indices);
1952 
1953   if (Base.isSimple()) {
1954     llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
1955     return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
1956                                     Base.getAlignment());
1957   }
1958   assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
1959 
1960   llvm::Constant *BaseElts = Base.getExtVectorElts();
1961   SmallVector<llvm::Constant *, 4> CElts;
1962 
1963   for (unsigned i = 0, e = Indices.size(); i != e; ++i)
1964     CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
1965   llvm::Constant *CV = llvm::ConstantVector::get(CElts);
1966   return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
1967                                   Base.getAlignment());
1968 }
1969 
1970 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
1971   Expr *BaseExpr = E->getBase();
1972 
1973   // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
1974   LValue BaseLV;
1975   if (E->isArrow())
1976     BaseLV = MakeNaturalAlignAddrLValue(EmitScalarExpr(BaseExpr),
1977                                         BaseExpr->getType()->getPointeeType());
1978   else
1979     BaseLV = EmitLValue(BaseExpr);
1980 
1981   NamedDecl *ND = E->getMemberDecl();
1982   if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
1983     LValue LV = EmitLValueForField(BaseLV, Field);
1984     setObjCGCLValueClass(getContext(), E, LV);
1985     return LV;
1986   }
1987 
1988   if (VarDecl *VD = dyn_cast<VarDecl>(ND))
1989     return EmitGlobalVarDeclLValue(*this, E, VD);
1990 
1991   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
1992     return EmitFunctionDeclLValue(*this, E, FD);
1993 
1994   llvm_unreachable("Unhandled member declaration!");
1995 }
1996 
1997 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
1998                                               const FieldDecl *Field,
1999                                               unsigned CVRQualifiers) {
2000   const CGRecordLayout &RL =
2001     CGM.getTypes().getCGRecordLayout(Field->getParent());
2002   const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
2003   return LValue::MakeBitfield(BaseValue, Info,
2004                           Field->getType().withCVRQualifiers(CVRQualifiers));
2005 }
2006 
2007 /// EmitLValueForAnonRecordField - Given that the field is a member of
2008 /// an anonymous struct or union buried inside a record, and given
2009 /// that the base value is a pointer to the enclosing record, derive
2010 /// an lvalue for the ultimate field.
2011 LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
2012                                              const IndirectFieldDecl *Field,
2013                                                      unsigned CVRQualifiers) {
2014   IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
2015     IEnd = Field->chain_end();
2016   while (true) {
2017     QualType RecordTy =
2018         getContext().getTypeDeclType(cast<FieldDecl>(*I)->getParent());
2019     LValue LV = EmitLValueForField(MakeAddrLValue(BaseValue, RecordTy),
2020                                    cast<FieldDecl>(*I));
2021     if (++I == IEnd) return LV;
2022 
2023     assert(LV.isSimple());
2024     BaseValue = LV.getAddress();
2025     CVRQualifiers |= LV.getVRQualifiers();
2026   }
2027 }
2028 
2029 LValue CodeGenFunction::EmitLValueForField(LValue base,
2030                                            const FieldDecl *field) {
2031   if (field->isBitField())
2032     return EmitLValueForBitfield(base.getAddress(), field,
2033                                  base.getVRQualifiers());
2034 
2035   const RecordDecl *rec = field->getParent();
2036   QualType type = field->getType();
2037   CharUnits alignment = getContext().getDeclAlign(field);
2038 
2039   // FIXME: It should be impossible to have an LValue without alignment for a
2040   // complete type.
2041   if (!base.getAlignment().isZero())
2042     alignment = std::min(alignment, base.getAlignment());
2043 
2044   bool mayAlias = rec->hasAttr<MayAliasAttr>();
2045 
2046   llvm::Value *addr = base.getAddress();
2047   unsigned cvr = base.getVRQualifiers();
2048   if (rec->isUnion()) {
2049     // For unions, there is no pointer adjustment.
2050     assert(!type->isReferenceType() && "union has reference member");
2051   } else {
2052     // For structs, we GEP to the field that the record layout suggests.
2053     unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
2054     addr = Builder.CreateStructGEP(addr, idx, field->getName());
2055 
2056     // If this is a reference field, load the reference right now.
2057     if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
2058       llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
2059       if (cvr & Qualifiers::Volatile) load->setVolatile(true);
2060       load->setAlignment(alignment.getQuantity());
2061 
2062       if (CGM.shouldUseTBAA()) {
2063         llvm::MDNode *tbaa;
2064         if (mayAlias)
2065           tbaa = CGM.getTBAAInfo(getContext().CharTy);
2066         else
2067           tbaa = CGM.getTBAAInfo(type);
2068         CGM.DecorateInstruction(load, tbaa);
2069       }
2070 
2071       addr = load;
2072       mayAlias = false;
2073       type = refType->getPointeeType();
2074       if (type->isIncompleteType())
2075         alignment = CharUnits();
2076       else
2077         alignment = getContext().getTypeAlignInChars(type);
2078       cvr = 0; // qualifiers don't recursively apply to referencee
2079     }
2080   }
2081 
2082   // Make sure that the address is pointing to the right type.  This is critical
2083   // for both unions and structs.  A union needs a bitcast, a struct element
2084   // will need a bitcast if the LLVM type laid out doesn't match the desired
2085   // type.
2086   addr = EmitBitCastOfLValueToProperType(*this, addr,
2087                                          CGM.getTypes().ConvertTypeForMem(type),
2088                                          field->getName());
2089 
2090   if (field->hasAttr<AnnotateAttr>())
2091     addr = EmitFieldAnnotations(field, addr);
2092 
2093   LValue LV = MakeAddrLValue(addr, type, alignment);
2094   LV.getQuals().addCVRQualifiers(cvr);
2095 
2096   // __weak attribute on a field is ignored.
2097   if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
2098     LV.getQuals().removeObjCGCAttr();
2099 
2100   // Fields of may_alias structs act like 'char' for TBAA purposes.
2101   // FIXME: this should get propagated down through anonymous structs
2102   // and unions.
2103   if (mayAlias && LV.getTBAAInfo())
2104     LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
2105 
2106   return LV;
2107 }
2108 
2109 LValue
2110 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
2111                                                   const FieldDecl *Field) {
2112   QualType FieldType = Field->getType();
2113 
2114   if (!FieldType->isReferenceType())
2115     return EmitLValueForField(Base, Field);
2116 
2117   const CGRecordLayout &RL =
2118     CGM.getTypes().getCGRecordLayout(Field->getParent());
2119   unsigned idx = RL.getLLVMFieldNo(Field);
2120   llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx);
2121   assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
2122 
2123   // Make sure that the address is pointing to the right type.  This is critical
2124   // for both unions and structs.  A union needs a bitcast, a struct element
2125   // will need a bitcast if the LLVM type laid out doesn't match the desired
2126   // type.
2127   llvm::Type *llvmType = ConvertTypeForMem(FieldType);
2128   V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
2129 
2130   CharUnits Alignment = getContext().getDeclAlign(Field);
2131 
2132   // FIXME: It should be impossible to have an LValue without alignment for a
2133   // complete type.
2134   if (!Base.getAlignment().isZero())
2135     Alignment = std::min(Alignment, Base.getAlignment());
2136 
2137   return MakeAddrLValue(V, FieldType, Alignment);
2138 }
2139 
2140 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
2141   if (E->isFileScope()) {
2142     llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
2143     return MakeAddrLValue(GlobalPtr, E->getType());
2144   }
2145 
2146   llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
2147   const Expr *InitExpr = E->getInitializer();
2148   LValue Result = MakeAddrLValue(DeclPtr, E->getType());
2149 
2150   EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
2151                    /*Init*/ true);
2152 
2153   return Result;
2154 }
2155 
2156 LValue CodeGenFunction::
2157 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
2158   if (!expr->isGLValue()) {
2159     // ?: here should be an aggregate.
2160     assert((hasAggregateLLVMType(expr->getType()) &&
2161             !expr->getType()->isAnyComplexType()) &&
2162            "Unexpected conditional operator!");
2163     return EmitAggExprToLValue(expr);
2164   }
2165 
2166   OpaqueValueMapping binding(*this, expr);
2167 
2168   const Expr *condExpr = expr->getCond();
2169   bool CondExprBool;
2170   if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
2171     const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
2172     if (!CondExprBool) std::swap(live, dead);
2173 
2174     if (!ContainsLabel(dead))
2175       return EmitLValue(live);
2176   }
2177 
2178   llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
2179   llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
2180   llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
2181 
2182   ConditionalEvaluation eval(*this);
2183   EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
2184 
2185   // Any temporaries created here are conditional.
2186   EmitBlock(lhsBlock);
2187   eval.begin(*this);
2188   LValue lhs = EmitLValue(expr->getTrueExpr());
2189   eval.end(*this);
2190 
2191   if (!lhs.isSimple())
2192     return EmitUnsupportedLValue(expr, "conditional operator");
2193 
2194   lhsBlock = Builder.GetInsertBlock();
2195   Builder.CreateBr(contBlock);
2196 
2197   // Any temporaries created here are conditional.
2198   EmitBlock(rhsBlock);
2199   eval.begin(*this);
2200   LValue rhs = EmitLValue(expr->getFalseExpr());
2201   eval.end(*this);
2202   if (!rhs.isSimple())
2203     return EmitUnsupportedLValue(expr, "conditional operator");
2204   rhsBlock = Builder.GetInsertBlock();
2205 
2206   EmitBlock(contBlock);
2207 
2208   llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
2209                                          "cond-lvalue");
2210   phi->addIncoming(lhs.getAddress(), lhsBlock);
2211   phi->addIncoming(rhs.getAddress(), rhsBlock);
2212   return MakeAddrLValue(phi, expr->getType());
2213 }
2214 
2215 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
2216 /// If the cast is a dynamic_cast, we can have the usual lvalue result,
2217 /// otherwise if a cast is needed by the code generator in an lvalue context,
2218 /// then it must mean that we need the address of an aggregate in order to
2219 /// access one of its fields.  This can happen for all the reasons that casts
2220 /// are permitted with aggregate result, including noop aggregate casts, and
2221 /// cast from scalar to union.
2222 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
2223   switch (E->getCastKind()) {
2224   case CK_ToVoid:
2225     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
2226 
2227   case CK_Dependent:
2228     llvm_unreachable("dependent cast kind in IR gen!");
2229 
2230   // These two casts are currently treated as no-ops, although they could
2231   // potentially be real operations depending on the target's ABI.
2232   case CK_NonAtomicToAtomic:
2233   case CK_AtomicToNonAtomic:
2234 
2235   case CK_NoOp:
2236   case CK_LValueToRValue:
2237     if (!E->getSubExpr()->Classify(getContext()).isPRValue()
2238         || E->getType()->isRecordType())
2239       return EmitLValue(E->getSubExpr());
2240     // Fall through to synthesize a temporary.
2241 
2242   case CK_BitCast:
2243   case CK_ArrayToPointerDecay:
2244   case CK_FunctionToPointerDecay:
2245   case CK_NullToMemberPointer:
2246   case CK_NullToPointer:
2247   case CK_IntegralToPointer:
2248   case CK_PointerToIntegral:
2249   case CK_PointerToBoolean:
2250   case CK_VectorSplat:
2251   case CK_IntegralCast:
2252   case CK_IntegralToBoolean:
2253   case CK_IntegralToFloating:
2254   case CK_FloatingToIntegral:
2255   case CK_FloatingToBoolean:
2256   case CK_FloatingCast:
2257   case CK_FloatingRealToComplex:
2258   case CK_FloatingComplexToReal:
2259   case CK_FloatingComplexToBoolean:
2260   case CK_FloatingComplexCast:
2261   case CK_FloatingComplexToIntegralComplex:
2262   case CK_IntegralRealToComplex:
2263   case CK_IntegralComplexToReal:
2264   case CK_IntegralComplexToBoolean:
2265   case CK_IntegralComplexCast:
2266   case CK_IntegralComplexToFloatingComplex:
2267   case CK_DerivedToBaseMemberPointer:
2268   case CK_BaseToDerivedMemberPointer:
2269   case CK_MemberPointerToBoolean:
2270   case CK_ReinterpretMemberPointer:
2271   case CK_AnyPointerToBlockPointerCast:
2272   case CK_ARCProduceObject:
2273   case CK_ARCConsumeObject:
2274   case CK_ARCReclaimReturnedObject:
2275   case CK_ARCExtendBlockObject:
2276   case CK_CopyAndAutoreleaseBlockObject: {
2277     // These casts only produce lvalues when we're binding a reference to a
2278     // temporary realized from a (converted) pure rvalue. Emit the expression
2279     // as a value, copy it into a temporary, and return an lvalue referring to
2280     // that temporary.
2281     llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
2282     EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
2283     return MakeAddrLValue(V, E->getType());
2284   }
2285 
2286   case CK_Dynamic: {
2287     LValue LV = EmitLValue(E->getSubExpr());
2288     llvm::Value *V = LV.getAddress();
2289     const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
2290     return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
2291   }
2292 
2293   case CK_ConstructorConversion:
2294   case CK_UserDefinedConversion:
2295   case CK_CPointerToObjCPointerCast:
2296   case CK_BlockPointerToObjCPointerCast:
2297     return EmitLValue(E->getSubExpr());
2298 
2299   case CK_UncheckedDerivedToBase:
2300   case CK_DerivedToBase: {
2301     const RecordType *DerivedClassTy =
2302       E->getSubExpr()->getType()->getAs<RecordType>();
2303     CXXRecordDecl *DerivedClassDecl =
2304       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2305 
2306     LValue LV = EmitLValue(E->getSubExpr());
2307     llvm::Value *This = LV.getAddress();
2308 
2309     // Perform the derived-to-base conversion
2310     llvm::Value *Base =
2311       GetAddressOfBaseClass(This, DerivedClassDecl,
2312                             E->path_begin(), E->path_end(),
2313                             /*NullCheckValue=*/false);
2314 
2315     return MakeAddrLValue(Base, E->getType());
2316   }
2317   case CK_ToUnion:
2318     return EmitAggExprToLValue(E);
2319   case CK_BaseToDerived: {
2320     const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
2321     CXXRecordDecl *DerivedClassDecl =
2322       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2323 
2324     LValue LV = EmitLValue(E->getSubExpr());
2325 
2326     // Perform the base-to-derived conversion
2327     llvm::Value *Derived =
2328       GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
2329                                E->path_begin(), E->path_end(),
2330                                /*NullCheckValue=*/false);
2331 
2332     return MakeAddrLValue(Derived, E->getType());
2333   }
2334   case CK_LValueBitCast: {
2335     // This must be a reinterpret_cast (or c-style equivalent).
2336     const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
2337 
2338     LValue LV = EmitLValue(E->getSubExpr());
2339     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2340                                            ConvertType(CE->getTypeAsWritten()));
2341     return MakeAddrLValue(V, E->getType());
2342   }
2343   case CK_ObjCObjectLValueCast: {
2344     LValue LV = EmitLValue(E->getSubExpr());
2345     QualType ToType = getContext().getLValueReferenceType(E->getType());
2346     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2347                                            ConvertType(ToType));
2348     return MakeAddrLValue(V, E->getType());
2349   }
2350   }
2351 
2352   llvm_unreachable("Unhandled lvalue cast kind?");
2353 }
2354 
2355 LValue CodeGenFunction::EmitNullInitializationLValue(
2356                                               const CXXScalarValueInitExpr *E) {
2357   QualType Ty = E->getType();
2358   LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
2359   EmitNullInitialization(LV.getAddress(), Ty);
2360   return LV;
2361 }
2362 
2363 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
2364   assert(OpaqueValueMappingData::shouldBindAsLValue(e));
2365   return getOpaqueLValueMapping(e);
2366 }
2367 
2368 LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
2369                                            const MaterializeTemporaryExpr *E) {
2370   RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
2371   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2372 }
2373 
2374 RValue CodeGenFunction::EmitRValueForField(LValue LV,
2375                                            const FieldDecl *FD) {
2376   QualType FT = FD->getType();
2377   LValue FieldLV = EmitLValueForField(LV, FD);
2378   if (FT->isAnyComplexType())
2379     return RValue::getComplex(
2380         LoadComplexFromAddr(FieldLV.getAddress(),
2381                             FieldLV.isVolatileQualified()));
2382   else if (CodeGenFunction::hasAggregateLLVMType(FT))
2383     return FieldLV.asAggregateRValue();
2384 
2385   return EmitLoadOfLValue(FieldLV);
2386 }
2387 
2388 //===--------------------------------------------------------------------===//
2389 //                             Expression Emission
2390 //===--------------------------------------------------------------------===//
2391 
2392 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
2393                                      ReturnValueSlot ReturnValue) {
2394   if (CGDebugInfo *DI = getDebugInfo())
2395     DI->EmitLocation(Builder, E->getLocStart());
2396 
2397   // Builtins never have block type.
2398   if (E->getCallee()->getType()->isBlockPointerType())
2399     return EmitBlockCallExpr(E, ReturnValue);
2400 
2401   if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
2402     return EmitCXXMemberCallExpr(CE, ReturnValue);
2403 
2404   if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
2405     return EmitCUDAKernelCallExpr(CE, ReturnValue);
2406 
2407   const Decl *TargetDecl = E->getCalleeDecl();
2408   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2409     if (unsigned builtinID = FD->getBuiltinID())
2410       return EmitBuiltinExpr(FD, builtinID, E);
2411   }
2412 
2413   if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
2414     if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
2415       return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
2416 
2417   if (const CXXPseudoDestructorExpr *PseudoDtor
2418           = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
2419     QualType DestroyedType = PseudoDtor->getDestroyedType();
2420     if (getContext().getLangOpts().ObjCAutoRefCount &&
2421         DestroyedType->isObjCLifetimeType() &&
2422         (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
2423          DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
2424       // Automatic Reference Counting:
2425       //   If the pseudo-expression names a retainable object with weak or
2426       //   strong lifetime, the object shall be released.
2427       Expr *BaseExpr = PseudoDtor->getBase();
2428       llvm::Value *BaseValue = NULL;
2429       Qualifiers BaseQuals;
2430 
2431       // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2432       if (PseudoDtor->isArrow()) {
2433         BaseValue = EmitScalarExpr(BaseExpr);
2434         const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
2435         BaseQuals = PTy->getPointeeType().getQualifiers();
2436       } else {
2437         LValue BaseLV = EmitLValue(BaseExpr);
2438         BaseValue = BaseLV.getAddress();
2439         QualType BaseTy = BaseExpr->getType();
2440         BaseQuals = BaseTy.getQualifiers();
2441       }
2442 
2443       switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
2444       case Qualifiers::OCL_None:
2445       case Qualifiers::OCL_ExplicitNone:
2446       case Qualifiers::OCL_Autoreleasing:
2447         break;
2448 
2449       case Qualifiers::OCL_Strong:
2450         EmitARCRelease(Builder.CreateLoad(BaseValue,
2451                           PseudoDtor->getDestroyedType().isVolatileQualified()),
2452                        /*precise*/ true);
2453         break;
2454 
2455       case Qualifiers::OCL_Weak:
2456         EmitARCDestroyWeak(BaseValue);
2457         break;
2458       }
2459     } else {
2460       // C++ [expr.pseudo]p1:
2461       //   The result shall only be used as the operand for the function call
2462       //   operator (), and the result of such a call has type void. The only
2463       //   effect is the evaluation of the postfix-expression before the dot or
2464       //   arrow.
2465       EmitScalarExpr(E->getCallee());
2466     }
2467 
2468     return RValue::get(0);
2469   }
2470 
2471   llvm::Value *Callee = EmitScalarExpr(E->getCallee());
2472   return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
2473                   E->arg_begin(), E->arg_end(), TargetDecl);
2474 }
2475 
2476 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
2477   // Comma expressions just emit their LHS then their RHS as an l-value.
2478   if (E->getOpcode() == BO_Comma) {
2479     EmitIgnoredExpr(E->getLHS());
2480     EnsureInsertPoint();
2481     return EmitLValue(E->getRHS());
2482   }
2483 
2484   if (E->getOpcode() == BO_PtrMemD ||
2485       E->getOpcode() == BO_PtrMemI)
2486     return EmitPointerToDataMemberBinaryExpr(E);
2487 
2488   assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
2489 
2490   // Note that in all of these cases, __block variables need the RHS
2491   // evaluated first just in case the variable gets moved by the RHS.
2492 
2493   if (!hasAggregateLLVMType(E->getType())) {
2494     switch (E->getLHS()->getType().getObjCLifetime()) {
2495     case Qualifiers::OCL_Strong:
2496       return EmitARCStoreStrong(E, /*ignored*/ false).first;
2497 
2498     case Qualifiers::OCL_Autoreleasing:
2499       return EmitARCStoreAutoreleasing(E).first;
2500 
2501     // No reason to do any of these differently.
2502     case Qualifiers::OCL_None:
2503     case Qualifiers::OCL_ExplicitNone:
2504     case Qualifiers::OCL_Weak:
2505       break;
2506     }
2507 
2508     RValue RV = EmitAnyExpr(E->getRHS());
2509     LValue LV = EmitLValue(E->getLHS());
2510     EmitStoreThroughLValue(RV, LV);
2511     return LV;
2512   }
2513 
2514   if (E->getType()->isAnyComplexType())
2515     return EmitComplexAssignmentLValue(E);
2516 
2517   return EmitAggExprToLValue(E);
2518 }
2519 
2520 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
2521   RValue RV = EmitCallExpr(E);
2522 
2523   if (!RV.isScalar())
2524     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2525 
2526   assert(E->getCallReturnType()->isReferenceType() &&
2527          "Can't have a scalar return unless the return type is a "
2528          "reference type!");
2529 
2530   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2531 }
2532 
2533 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
2534   // FIXME: This shouldn't require another copy.
2535   return EmitAggExprToLValue(E);
2536 }
2537 
2538 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
2539   assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
2540          && "binding l-value to type which needs a temporary");
2541   AggValueSlot Slot = CreateAggTemp(E->getType());
2542   EmitCXXConstructExpr(E, Slot);
2543   return MakeAddrLValue(Slot.getAddr(), E->getType());
2544 }
2545 
2546 LValue
2547 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
2548   return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
2549 }
2550 
2551 LValue
2552 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
2553   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2554   Slot.setExternallyDestructed();
2555   EmitAggExpr(E->getSubExpr(), Slot);
2556   EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
2557   return MakeAddrLValue(Slot.getAddr(), E->getType());
2558 }
2559 
2560 LValue
2561 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
2562   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2563   EmitLambdaExpr(E, Slot);
2564   return MakeAddrLValue(Slot.getAddr(), E->getType());
2565 }
2566 
2567 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
2568   RValue RV = EmitObjCMessageExpr(E);
2569 
2570   if (!RV.isScalar())
2571     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2572 
2573   assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
2574          "Can't have a scalar return unless the return type is a "
2575          "reference type!");
2576 
2577   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2578 }
2579 
2580 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
2581   llvm::Value *V =
2582     CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
2583   return MakeAddrLValue(V, E->getType());
2584 }
2585 
2586 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2587                                              const ObjCIvarDecl *Ivar) {
2588   return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
2589 }
2590 
2591 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
2592                                           llvm::Value *BaseValue,
2593                                           const ObjCIvarDecl *Ivar,
2594                                           unsigned CVRQualifiers) {
2595   return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
2596                                                    Ivar, CVRQualifiers);
2597 }
2598 
2599 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
2600   // FIXME: A lot of the code below could be shared with EmitMemberExpr.
2601   llvm::Value *BaseValue = 0;
2602   const Expr *BaseExpr = E->getBase();
2603   Qualifiers BaseQuals;
2604   QualType ObjectTy;
2605   if (E->isArrow()) {
2606     BaseValue = EmitScalarExpr(BaseExpr);
2607     ObjectTy = BaseExpr->getType()->getPointeeType();
2608     BaseQuals = ObjectTy.getQualifiers();
2609   } else {
2610     LValue BaseLV = EmitLValue(BaseExpr);
2611     // FIXME: this isn't right for bitfields.
2612     BaseValue = BaseLV.getAddress();
2613     ObjectTy = BaseExpr->getType();
2614     BaseQuals = ObjectTy.getQualifiers();
2615   }
2616 
2617   LValue LV =
2618     EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
2619                       BaseQuals.getCVRQualifiers());
2620   setObjCGCLValueClass(getContext(), E, LV);
2621   return LV;
2622 }
2623 
2624 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
2625   // Can only get l-value for message expression returning aggregate type
2626   RValue RV = EmitAnyExprToTemp(E);
2627   return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2628 }
2629 
2630 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
2631                                  ReturnValueSlot ReturnValue,
2632                                  CallExpr::const_arg_iterator ArgBeg,
2633                                  CallExpr::const_arg_iterator ArgEnd,
2634                                  const Decl *TargetDecl) {
2635   // Get the actual function type. The callee type will always be a pointer to
2636   // function type or a block pointer type.
2637   assert(CalleeType->isFunctionPointerType() &&
2638          "Call must have function pointer type!");
2639 
2640   CalleeType = getContext().getCanonicalType(CalleeType);
2641 
2642   const FunctionType *FnType
2643     = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
2644 
2645   CallArgList Args;
2646   EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
2647 
2648   const CGFunctionInfo &FnInfo =
2649     CGM.getTypes().arrangeFunctionCall(Args, FnType);
2650 
2651   // C99 6.5.2.2p6:
2652   //   If the expression that denotes the called function has a type
2653   //   that does not include a prototype, [the default argument
2654   //   promotions are performed]. If the number of arguments does not
2655   //   equal the number of parameters, the behavior is undefined. If
2656   //   the function is defined with a type that includes a prototype,
2657   //   and either the prototype ends with an ellipsis (, ...) or the
2658   //   types of the arguments after promotion are not compatible with
2659   //   the types of the parameters, the behavior is undefined. If the
2660   //   function is defined with a type that does not include a
2661   //   prototype, and the types of the arguments after promotion are
2662   //   not compatible with those of the parameters after promotion,
2663   //   the behavior is undefined [except in some trivial cases].
2664   // That is, in the general case, we should assume that a call
2665   // through an unprototyped function type works like a *non-variadic*
2666   // call.  The way we make this work is to cast to the exact type
2667   // of the promoted arguments.
2668   if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
2669     llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
2670     CalleeTy = CalleeTy->getPointerTo();
2671     Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
2672   }
2673 
2674   return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
2675 }
2676 
2677 LValue CodeGenFunction::
2678 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
2679   llvm::Value *BaseV;
2680   if (E->getOpcode() == BO_PtrMemI)
2681     BaseV = EmitScalarExpr(E->getLHS());
2682   else
2683     BaseV = EmitLValue(E->getLHS()).getAddress();
2684 
2685   llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
2686 
2687   const MemberPointerType *MPT
2688     = E->getRHS()->getType()->getAs<MemberPointerType>();
2689 
2690   llvm::Value *AddV =
2691     CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
2692 
2693   return MakeAddrLValue(AddV, MPT->getPointeeType());
2694 }
2695 
2696 static void
2697 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
2698              llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
2699              uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
2700   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
2701   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
2702 
2703   switch (E->getOp()) {
2704   case AtomicExpr::AO__c11_atomic_init:
2705     llvm_unreachable("Already handled!");
2706 
2707   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
2708   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
2709   case AtomicExpr::AO__atomic_compare_exchange:
2710   case AtomicExpr::AO__atomic_compare_exchange_n: {
2711     // Note that cmpxchg only supports specifying one ordering and
2712     // doesn't support weak cmpxchg, at least at the moment.
2713     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2714     LoadVal1->setAlignment(Align);
2715     llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
2716     LoadVal2->setAlignment(Align);
2717     llvm::AtomicCmpXchgInst *CXI =
2718         CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
2719     CXI->setVolatile(E->isVolatile());
2720     llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
2721     StoreVal1->setAlignment(Align);
2722     llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
2723     CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
2724     return;
2725   }
2726 
2727   case AtomicExpr::AO__c11_atomic_load:
2728   case AtomicExpr::AO__atomic_load_n:
2729   case AtomicExpr::AO__atomic_load: {
2730     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
2731     Load->setAtomic(Order);
2732     Load->setAlignment(Size);
2733     Load->setVolatile(E->isVolatile());
2734     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
2735     StoreDest->setAlignment(Align);
2736     return;
2737   }
2738 
2739   case AtomicExpr::AO__c11_atomic_store:
2740   case AtomicExpr::AO__atomic_store:
2741   case AtomicExpr::AO__atomic_store_n: {
2742     assert(!Dest && "Store does not return a value");
2743     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2744     LoadVal1->setAlignment(Align);
2745     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
2746     Store->setAtomic(Order);
2747     Store->setAlignment(Size);
2748     Store->setVolatile(E->isVolatile());
2749     return;
2750   }
2751 
2752   case AtomicExpr::AO__c11_atomic_exchange:
2753   case AtomicExpr::AO__atomic_exchange_n:
2754   case AtomicExpr::AO__atomic_exchange:
2755     Op = llvm::AtomicRMWInst::Xchg;
2756     break;
2757 
2758   case AtomicExpr::AO__atomic_add_fetch:
2759     PostOp = llvm::Instruction::Add;
2760     // Fall through.
2761   case AtomicExpr::AO__c11_atomic_fetch_add:
2762   case AtomicExpr::AO__atomic_fetch_add:
2763     Op = llvm::AtomicRMWInst::Add;
2764     break;
2765 
2766   case AtomicExpr::AO__atomic_sub_fetch:
2767     PostOp = llvm::Instruction::Sub;
2768     // Fall through.
2769   case AtomicExpr::AO__c11_atomic_fetch_sub:
2770   case AtomicExpr::AO__atomic_fetch_sub:
2771     Op = llvm::AtomicRMWInst::Sub;
2772     break;
2773 
2774   case AtomicExpr::AO__atomic_and_fetch:
2775     PostOp = llvm::Instruction::And;
2776     // Fall through.
2777   case AtomicExpr::AO__c11_atomic_fetch_and:
2778   case AtomicExpr::AO__atomic_fetch_and:
2779     Op = llvm::AtomicRMWInst::And;
2780     break;
2781 
2782   case AtomicExpr::AO__atomic_or_fetch:
2783     PostOp = llvm::Instruction::Or;
2784     // Fall through.
2785   case AtomicExpr::AO__c11_atomic_fetch_or:
2786   case AtomicExpr::AO__atomic_fetch_or:
2787     Op = llvm::AtomicRMWInst::Or;
2788     break;
2789 
2790   case AtomicExpr::AO__atomic_xor_fetch:
2791     PostOp = llvm::Instruction::Xor;
2792     // Fall through.
2793   case AtomicExpr::AO__c11_atomic_fetch_xor:
2794   case AtomicExpr::AO__atomic_fetch_xor:
2795     Op = llvm::AtomicRMWInst::Xor;
2796     break;
2797 
2798   case AtomicExpr::AO__atomic_nand_fetch:
2799     PostOp = llvm::Instruction::And;
2800     // Fall through.
2801   case AtomicExpr::AO__atomic_fetch_nand:
2802     Op = llvm::AtomicRMWInst::Nand;
2803     break;
2804   }
2805 
2806   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2807   LoadVal1->setAlignment(Align);
2808   llvm::AtomicRMWInst *RMWI =
2809       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
2810   RMWI->setVolatile(E->isVolatile());
2811 
2812   // For __atomic_*_fetch operations, perform the operation again to
2813   // determine the value which was written.
2814   llvm::Value *Result = RMWI;
2815   if (PostOp)
2816     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
2817   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
2818     Result = CGF.Builder.CreateNot(Result);
2819   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
2820   StoreDest->setAlignment(Align);
2821 }
2822 
2823 // This function emits any expression (scalar, complex, or aggregate)
2824 // into a temporary alloca.
2825 static llvm::Value *
2826 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
2827   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
2828   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
2829                        /*Init*/ true);
2830   return DeclPtr;
2831 }
2832 
2833 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
2834                                   llvm::Value *Dest) {
2835   if (Ty->isAnyComplexType())
2836     return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
2837   if (CGF.hasAggregateLLVMType(Ty))
2838     return RValue::getAggregate(Dest);
2839   return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
2840 }
2841 
2842 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
2843   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
2844   QualType MemTy = AtomicTy;
2845   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
2846     MemTy = AT->getValueType();
2847   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
2848   uint64_t Size = sizeChars.getQuantity();
2849   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
2850   unsigned Align = alignChars.getQuantity();
2851   unsigned MaxInlineWidth =
2852       getContext().getTargetInfo().getMaxAtomicInlineWidth();
2853   bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
2854 
2855 
2856 
2857   llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
2858   Ptr = EmitScalarExpr(E->getPtr());
2859 
2860   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
2861     assert(!Dest && "Init does not return a value");
2862     if (!hasAggregateLLVMType(E->getVal1()->getType())) {
2863       QualType PointeeType
2864         = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
2865       EmitScalarInit(EmitScalarExpr(E->getVal1()),
2866                      LValue::MakeAddr(Ptr, PointeeType, alignChars,
2867                                       getContext()));
2868     } else if (E->getType()->isAnyComplexType()) {
2869       EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
2870     } else {
2871       AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
2872                                         AtomicTy.getQualifiers(),
2873                                         AggValueSlot::IsNotDestructed,
2874                                         AggValueSlot::DoesNotNeedGCBarriers,
2875                                         AggValueSlot::IsNotAliased);
2876       EmitAggExpr(E->getVal1(), Slot);
2877     }
2878     return RValue::get(0);
2879   }
2880 
2881   Order = EmitScalarExpr(E->getOrder());
2882 
2883   switch (E->getOp()) {
2884   case AtomicExpr::AO__c11_atomic_init:
2885     llvm_unreachable("Already handled!");
2886 
2887   case AtomicExpr::AO__c11_atomic_load:
2888   case AtomicExpr::AO__atomic_load_n:
2889     break;
2890 
2891   case AtomicExpr::AO__atomic_load:
2892     Dest = EmitScalarExpr(E->getVal1());
2893     break;
2894 
2895   case AtomicExpr::AO__atomic_store:
2896     Val1 = EmitScalarExpr(E->getVal1());
2897     break;
2898 
2899   case AtomicExpr::AO__atomic_exchange:
2900     Val1 = EmitScalarExpr(E->getVal1());
2901     Dest = EmitScalarExpr(E->getVal2());
2902     break;
2903 
2904   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
2905   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
2906   case AtomicExpr::AO__atomic_compare_exchange_n:
2907   case AtomicExpr::AO__atomic_compare_exchange:
2908     Val1 = EmitScalarExpr(E->getVal1());
2909     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
2910       Val2 = EmitScalarExpr(E->getVal2());
2911     else
2912       Val2 = EmitValToTemp(*this, E->getVal2());
2913     OrderFail = EmitScalarExpr(E->getOrderFail());
2914     // Evaluate and discard the 'weak' argument.
2915     if (E->getNumSubExprs() == 6)
2916       EmitScalarExpr(E->getWeak());
2917     break;
2918 
2919   case AtomicExpr::AO__c11_atomic_fetch_add:
2920   case AtomicExpr::AO__c11_atomic_fetch_sub:
2921     if (MemTy->isPointerType()) {
2922       // For pointer arithmetic, we're required to do a bit of math:
2923       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
2924       // ... but only for the C11 builtins. The GNU builtins expect the
2925       // user to multiply by sizeof(T).
2926       QualType Val1Ty = E->getVal1()->getType();
2927       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
2928       CharUnits PointeeIncAmt =
2929           getContext().getTypeSizeInChars(MemTy->getPointeeType());
2930       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
2931       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
2932       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
2933       break;
2934     }
2935     // Fall through.
2936   case AtomicExpr::AO__atomic_fetch_add:
2937   case AtomicExpr::AO__atomic_fetch_sub:
2938   case AtomicExpr::AO__atomic_add_fetch:
2939   case AtomicExpr::AO__atomic_sub_fetch:
2940   case AtomicExpr::AO__c11_atomic_store:
2941   case AtomicExpr::AO__c11_atomic_exchange:
2942   case AtomicExpr::AO__atomic_store_n:
2943   case AtomicExpr::AO__atomic_exchange_n:
2944   case AtomicExpr::AO__c11_atomic_fetch_and:
2945   case AtomicExpr::AO__c11_atomic_fetch_or:
2946   case AtomicExpr::AO__c11_atomic_fetch_xor:
2947   case AtomicExpr::AO__atomic_fetch_and:
2948   case AtomicExpr::AO__atomic_fetch_or:
2949   case AtomicExpr::AO__atomic_fetch_xor:
2950   case AtomicExpr::AO__atomic_fetch_nand:
2951   case AtomicExpr::AO__atomic_and_fetch:
2952   case AtomicExpr::AO__atomic_or_fetch:
2953   case AtomicExpr::AO__atomic_xor_fetch:
2954   case AtomicExpr::AO__atomic_nand_fetch:
2955     Val1 = EmitValToTemp(*this, E->getVal1());
2956     break;
2957   }
2958 
2959   if (!E->getType()->isVoidType() && !Dest)
2960     Dest = CreateMemTemp(E->getType(), ".atomicdst");
2961 
2962   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
2963   if (UseLibcall) {
2964 
2965     llvm::SmallVector<QualType, 5> Params;
2966     CallArgList Args;
2967     // Size is always the first parameter
2968     Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
2969              getContext().getSizeType());
2970     // Atomic address is always the second parameter
2971     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
2972              getContext().VoidPtrTy);
2973 
2974     const char* LibCallName;
2975     QualType RetTy = getContext().VoidTy;
2976     switch (E->getOp()) {
2977     // There is only one libcall for compare an exchange, because there is no
2978     // optimisation benefit possible from a libcall version of a weak compare
2979     // and exchange.
2980     // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
2981     //                                void *desired, int success, int failure)
2982     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
2983     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
2984     case AtomicExpr::AO__atomic_compare_exchange:
2985     case AtomicExpr::AO__atomic_compare_exchange_n:
2986       LibCallName = "__atomic_compare_exchange";
2987       RetTy = getContext().BoolTy;
2988       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
2989                getContext().VoidPtrTy);
2990       Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
2991                getContext().VoidPtrTy);
2992       Args.add(RValue::get(Order),
2993                getContext().IntTy);
2994       Order = OrderFail;
2995       break;
2996     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
2997     //                        int order)
2998     case AtomicExpr::AO__c11_atomic_exchange:
2999     case AtomicExpr::AO__atomic_exchange_n:
3000     case AtomicExpr::AO__atomic_exchange:
3001       LibCallName = "__atomic_exchange";
3002       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3003                getContext().VoidPtrTy);
3004       Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3005                getContext().VoidPtrTy);
3006       break;
3007     // void __atomic_store(size_t size, void *mem, void *val, int order)
3008     case AtomicExpr::AO__c11_atomic_store:
3009     case AtomicExpr::AO__atomic_store:
3010     case AtomicExpr::AO__atomic_store_n:
3011       LibCallName = "__atomic_store";
3012       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3013                getContext().VoidPtrTy);
3014       break;
3015     // void __atomic_load(size_t size, void *mem, void *return, int order)
3016     case AtomicExpr::AO__c11_atomic_load:
3017     case AtomicExpr::AO__atomic_load:
3018     case AtomicExpr::AO__atomic_load_n:
3019       LibCallName = "__atomic_load";
3020       Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3021                getContext().VoidPtrTy);
3022       break;
3023 #if 0
3024     // These are only defined for 1-16 byte integers.  It is not clear what
3025     // their semantics would be on anything else...
3026     case AtomicExpr::Add:   LibCallName = "__atomic_fetch_add_generic"; break;
3027     case AtomicExpr::Sub:   LibCallName = "__atomic_fetch_sub_generic"; break;
3028     case AtomicExpr::And:   LibCallName = "__atomic_fetch_and_generic"; break;
3029     case AtomicExpr::Or:    LibCallName = "__atomic_fetch_or_generic"; break;
3030     case AtomicExpr::Xor:   LibCallName = "__atomic_fetch_xor_generic"; break;
3031 #endif
3032     default: return EmitUnsupportedRValue(E, "atomic library call");
3033     }
3034     // order is always the last parameter
3035     Args.add(RValue::get(Order),
3036              getContext().IntTy);
3037 
3038     const CGFunctionInfo &FuncInfo =
3039         CGM.getTypes().arrangeFunctionCall(RetTy, Args,
3040             FunctionType::ExtInfo(), RequiredArgs::All);
3041     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3042     llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3043     RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
3044     if (E->isCmpXChg())
3045       return Res;
3046     if (E->getType()->isVoidType())
3047       return RValue::get(0);
3048     return ConvertTempToRValue(*this, E->getType(), Dest);
3049   }
3050 
3051   llvm::Type *IPtrTy =
3052       llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
3053   llvm::Value *OrigDest = Dest;
3054   Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
3055   if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
3056   if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
3057   if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
3058 
3059   if (isa<llvm::ConstantInt>(Order)) {
3060     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3061     switch (ord) {
3062     case 0:  // memory_order_relaxed
3063       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3064                    llvm::Monotonic);
3065       break;
3066     case 1:  // memory_order_consume
3067     case 2:  // memory_order_acquire
3068       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3069                    llvm::Acquire);
3070       break;
3071     case 3:  // memory_order_release
3072       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3073                    llvm::Release);
3074       break;
3075     case 4:  // memory_order_acq_rel
3076       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3077                    llvm::AcquireRelease);
3078       break;
3079     case 5:  // memory_order_seq_cst
3080       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3081                    llvm::SequentiallyConsistent);
3082       break;
3083     default: // invalid order
3084       // We should not ever get here normally, but it's hard to
3085       // enforce that in general.
3086       break;
3087     }
3088     if (E->getType()->isVoidType())
3089       return RValue::get(0);
3090     return ConvertTempToRValue(*this, E->getType(), OrigDest);
3091   }
3092 
3093   // Long case, when Order isn't obviously constant.
3094 
3095   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
3096                  E->getOp() == AtomicExpr::AO__atomic_store ||
3097                  E->getOp() == AtomicExpr::AO__atomic_store_n;
3098   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
3099                 E->getOp() == AtomicExpr::AO__atomic_load ||
3100                 E->getOp() == AtomicExpr::AO__atomic_load_n;
3101 
3102   // Create all the relevant BB's
3103   llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
3104                    *AcqRelBB = 0, *SeqCstBB = 0;
3105   MonotonicBB = createBasicBlock("monotonic", CurFn);
3106   if (!IsStore)
3107     AcquireBB = createBasicBlock("acquire", CurFn);
3108   if (!IsLoad)
3109     ReleaseBB = createBasicBlock("release", CurFn);
3110   if (!IsLoad && !IsStore)
3111     AcqRelBB = createBasicBlock("acqrel", CurFn);
3112   SeqCstBB = createBasicBlock("seqcst", CurFn);
3113   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3114 
3115   // Create the switch for the split
3116   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
3117   // doesn't matter unless someone is crazy enough to use something that
3118   // doesn't fold to a constant for the ordering.
3119   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3120   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
3121 
3122   // Emit all the different atomics
3123   Builder.SetInsertPoint(MonotonicBB);
3124   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3125                llvm::Monotonic);
3126   Builder.CreateBr(ContBB);
3127   if (!IsStore) {
3128     Builder.SetInsertPoint(AcquireBB);
3129     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3130                  llvm::Acquire);
3131     Builder.CreateBr(ContBB);
3132     SI->addCase(Builder.getInt32(1), AcquireBB);
3133     SI->addCase(Builder.getInt32(2), AcquireBB);
3134   }
3135   if (!IsLoad) {
3136     Builder.SetInsertPoint(ReleaseBB);
3137     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3138                  llvm::Release);
3139     Builder.CreateBr(ContBB);
3140     SI->addCase(Builder.getInt32(3), ReleaseBB);
3141   }
3142   if (!IsLoad && !IsStore) {
3143     Builder.SetInsertPoint(AcqRelBB);
3144     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3145                  llvm::AcquireRelease);
3146     Builder.CreateBr(ContBB);
3147     SI->addCase(Builder.getInt32(4), AcqRelBB);
3148   }
3149   Builder.SetInsertPoint(SeqCstBB);
3150   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3151                llvm::SequentiallyConsistent);
3152   Builder.CreateBr(ContBB);
3153   SI->addCase(Builder.getInt32(5), SeqCstBB);
3154 
3155   // Cleanup and return
3156   Builder.SetInsertPoint(ContBB);
3157   if (E->getType()->isVoidType())
3158     return RValue::get(0);
3159   return ConvertTempToRValue(*this, E->getType(), OrigDest);
3160 }
3161 
3162 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
3163   assert(Val->getType()->isFPOrFPVectorTy());
3164   if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
3165     return;
3166 
3167   llvm::MDBuilder MDHelper(getLLVMContext());
3168   llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
3169 
3170   cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
3171 }
3172 
3173 namespace {
3174   struct LValueOrRValue {
3175     LValue LV;
3176     RValue RV;
3177   };
3178 }
3179 
3180 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
3181                                            const PseudoObjectExpr *E,
3182                                            bool forLValue,
3183                                            AggValueSlot slot) {
3184   llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
3185 
3186   // Find the result expression, if any.
3187   const Expr *resultExpr = E->getResultExpr();
3188   LValueOrRValue result;
3189 
3190   for (PseudoObjectExpr::const_semantics_iterator
3191          i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
3192     const Expr *semantic = *i;
3193 
3194     // If this semantic expression is an opaque value, bind it
3195     // to the result of its source expression.
3196     if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
3197 
3198       // If this is the result expression, we may need to evaluate
3199       // directly into the slot.
3200       typedef CodeGenFunction::OpaqueValueMappingData OVMA;
3201       OVMA opaqueData;
3202       if (ov == resultExpr && ov->isRValue() && !forLValue &&
3203           CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
3204           !ov->getType()->isAnyComplexType()) {
3205         CGF.EmitAggExpr(ov->getSourceExpr(), slot);
3206 
3207         LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
3208         opaqueData = OVMA::bind(CGF, ov, LV);
3209         result.RV = slot.asRValue();
3210 
3211       // Otherwise, emit as normal.
3212       } else {
3213         opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
3214 
3215         // If this is the result, also evaluate the result now.
3216         if (ov == resultExpr) {
3217           if (forLValue)
3218             result.LV = CGF.EmitLValue(ov);
3219           else
3220             result.RV = CGF.EmitAnyExpr(ov, slot);
3221         }
3222       }
3223 
3224       opaques.push_back(opaqueData);
3225 
3226     // Otherwise, if the expression is the result, evaluate it
3227     // and remember the result.
3228     } else if (semantic == resultExpr) {
3229       if (forLValue)
3230         result.LV = CGF.EmitLValue(semantic);
3231       else
3232         result.RV = CGF.EmitAnyExpr(semantic, slot);
3233 
3234     // Otherwise, evaluate the expression in an ignored context.
3235     } else {
3236       CGF.EmitIgnoredExpr(semantic);
3237     }
3238   }
3239 
3240   // Unbind all the opaques now.
3241   for (unsigned i = 0, e = opaques.size(); i != e; ++i)
3242     opaques[i].unbind(CGF);
3243 
3244   return result;
3245 }
3246 
3247 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
3248                                                AggValueSlot slot) {
3249   return emitPseudoObjectExpr(*this, E, false, slot).RV;
3250 }
3251 
3252 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
3253   return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
3254 }
3255