1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGCall.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGRecordLayout.h"
20 #include "CGObjCRuntime.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/ConvertUTF.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/LLVMContext.h"
28 #include "llvm/MDBuilder.h"
29 #include "llvm/Target/TargetData.h"
30 using namespace clang;
31 using namespace CodeGen;
32 
33 //===--------------------------------------------------------------------===//
34 //                        Miscellaneous Helper Methods
35 //===--------------------------------------------------------------------===//
36 
37 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
38   unsigned addressSpace =
39     cast<llvm::PointerType>(value->getType())->getAddressSpace();
40 
41   llvm::PointerType *destType = Int8PtrTy;
42   if (addressSpace)
43     destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
44 
45   if (value->getType() == destType) return value;
46   return Builder.CreateBitCast(value, destType);
47 }
48 
49 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
50 /// block.
51 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
52                                                     const Twine &Name) {
53   if (!Builder.isNamePreserving())
54     return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
55   return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
56 }
57 
58 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
59                                      llvm::Value *Init) {
60   llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
61   llvm::BasicBlock *Block = AllocaInsertPt->getParent();
62   Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
63 }
64 
65 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
66                                                 const Twine &Name) {
67   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
68   // FIXME: Should we prefer the preferred type alignment here?
69   CharUnits Align = getContext().getTypeAlignInChars(Ty);
70   Alloc->setAlignment(Align.getQuantity());
71   return Alloc;
72 }
73 
74 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
75                                                  const Twine &Name) {
76   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
77   // FIXME: Should we prefer the preferred type alignment here?
78   CharUnits Align = getContext().getTypeAlignInChars(Ty);
79   Alloc->setAlignment(Align.getQuantity());
80   return Alloc;
81 }
82 
83 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
84 /// expression and compare the result against zero, returning an Int1Ty value.
85 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
86   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
87     llvm::Value *MemPtr = EmitScalarExpr(E);
88     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
89   }
90 
91   QualType BoolTy = getContext().BoolTy;
92   if (!E->getType()->isAnyComplexType())
93     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
94 
95   return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
96 }
97 
98 /// EmitIgnoredExpr - Emit code to compute the specified expression,
99 /// ignoring the result.
100 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
101   if (E->isRValue())
102     return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
103 
104   // Just emit it as an l-value and drop the result.
105   EmitLValue(E);
106 }
107 
108 /// EmitAnyExpr - Emit code to compute the specified expression which
109 /// can have any type.  The result is returned as an RValue struct.
110 /// If this is an aggregate expression, AggSlot indicates where the
111 /// result should be returned.
112 RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
113                                     AggValueSlot aggSlot,
114                                     bool ignoreResult) {
115   if (!hasAggregateLLVMType(E->getType()))
116     return RValue::get(EmitScalarExpr(E, ignoreResult));
117   else if (E->getType()->isAnyComplexType())
118     return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
119 
120   if (!ignoreResult && aggSlot.isIgnored())
121     aggSlot = CreateAggTemp(E->getType(), "agg-temp");
122   EmitAggExpr(E, aggSlot);
123   return aggSlot.asRValue();
124 }
125 
126 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
127 /// always be accessible even if no aggregate location is provided.
128 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
129   AggValueSlot AggSlot = AggValueSlot::ignored();
130 
131   if (hasAggregateLLVMType(E->getType()) &&
132       !E->getType()->isAnyComplexType())
133     AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
134   return EmitAnyExpr(E, AggSlot);
135 }
136 
137 /// EmitAnyExprToMem - Evaluate an expression into a given memory
138 /// location.
139 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
140                                        llvm::Value *Location,
141                                        Qualifiers Quals,
142                                        bool IsInit) {
143   // FIXME: This function should take an LValue as an argument.
144   if (E->getType()->isAnyComplexType()) {
145     EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
146   } else if (hasAggregateLLVMType(E->getType())) {
147     CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
148     EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
149                                          AggValueSlot::IsDestructed_t(IsInit),
150                                          AggValueSlot::DoesNotNeedGCBarriers,
151                                          AggValueSlot::IsAliased_t(!IsInit)));
152   } else {
153     RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
154     LValue LV = MakeAddrLValue(Location, E->getType());
155     EmitStoreThroughLValue(RV, LV);
156   }
157 }
158 
159 namespace {
160 /// \brief An adjustment to be made to the temporary created when emitting a
161 /// reference binding, which accesses a particular subobject of that temporary.
162   struct SubobjectAdjustment {
163     enum {
164       DerivedToBaseAdjustment,
165       FieldAdjustment,
166       MemberPointerAdjustment
167     } Kind;
168 
169     union {
170       struct {
171         const CastExpr *BasePath;
172         const CXXRecordDecl *DerivedClass;
173       } DerivedToBase;
174 
175       FieldDecl *Field;
176 
177       struct {
178         const MemberPointerType *MPT;
179         llvm::Value *Ptr;
180       } Ptr;
181     };
182 
183     SubobjectAdjustment(const CastExpr *BasePath,
184                         const CXXRecordDecl *DerivedClass)
185       : Kind(DerivedToBaseAdjustment) {
186       DerivedToBase.BasePath = BasePath;
187       DerivedToBase.DerivedClass = DerivedClass;
188     }
189 
190     SubobjectAdjustment(FieldDecl *Field)
191       : Kind(FieldAdjustment) {
192       this->Field = Field;
193     }
194 
195     SubobjectAdjustment(const MemberPointerType *MPT, llvm::Value *Ptr)
196       : Kind(MemberPointerAdjustment) {
197       this->Ptr.MPT = MPT;
198       this->Ptr.Ptr = Ptr;
199     }
200   };
201 }
202 
203 static llvm::Value *
204 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
205                          const NamedDecl *InitializedDecl) {
206   if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
207     if (VD->hasGlobalStorage()) {
208       SmallString<256> Name;
209       llvm::raw_svector_ostream Out(Name);
210       CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
211       Out.flush();
212 
213       llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
214 
215       // Create the reference temporary.
216       llvm::GlobalValue *RefTemp =
217         new llvm::GlobalVariable(CGF.CGM.getModule(),
218                                  RefTempTy, /*isConstant=*/false,
219                                  llvm::GlobalValue::InternalLinkage,
220                                  llvm::Constant::getNullValue(RefTempTy),
221                                  Name.str());
222       return RefTemp;
223     }
224   }
225 
226   return CGF.CreateMemTemp(Type, "ref.tmp");
227 }
228 
229 static llvm::Value *
230 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
231                             llvm::Value *&ReferenceTemporary,
232                             const CXXDestructorDecl *&ReferenceTemporaryDtor,
233                             QualType &ObjCARCReferenceLifetimeType,
234                             const NamedDecl *InitializedDecl) {
235   // Look through single-element init lists that claim to be lvalues. They're
236   // just syntactic wrappers in this case.
237   if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
238     if (ILE->getNumInits() == 1 && ILE->isGLValue())
239       E = ILE->getInit(0);
240   }
241 
242   // Look through expressions for materialized temporaries (for now).
243   if (const MaterializeTemporaryExpr *M
244                                       = dyn_cast<MaterializeTemporaryExpr>(E)) {
245     // Objective-C++ ARC:
246     //   If we are binding a reference to a temporary that has ownership, we
247     //   need to perform retain/release operations on the temporary.
248     if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
249         E->getType()->isObjCLifetimeType() &&
250         (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
251          E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
252          E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
253       ObjCARCReferenceLifetimeType = E->getType();
254 
255     E = M->GetTemporaryExpr();
256   }
257 
258   if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
259     E = DAE->getExpr();
260 
261   if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
262     CGF.enterFullExpression(EWC);
263     CodeGenFunction::RunCleanupsScope Scope(CGF);
264 
265     return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
266                                        ReferenceTemporary,
267                                        ReferenceTemporaryDtor,
268                                        ObjCARCReferenceLifetimeType,
269                                        InitializedDecl);
270   }
271 
272   RValue RV;
273   if (E->isGLValue()) {
274     // Emit the expression as an lvalue.
275     LValue LV = CGF.EmitLValue(E);
276 
277     if (LV.isSimple())
278       return LV.getAddress();
279 
280     // We have to load the lvalue.
281     RV = CGF.EmitLoadOfLValue(LV);
282   } else {
283     if (!ObjCARCReferenceLifetimeType.isNull()) {
284       ReferenceTemporary = CreateReferenceTemporary(CGF,
285                                                   ObjCARCReferenceLifetimeType,
286                                                     InitializedDecl);
287 
288 
289       LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
290                                              ObjCARCReferenceLifetimeType);
291 
292       CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
293                          RefTempDst, false);
294 
295       bool ExtendsLifeOfTemporary = false;
296       if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
297         if (Var->extendsLifetimeOfTemporary())
298           ExtendsLifeOfTemporary = true;
299       } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
300         ExtendsLifeOfTemporary = true;
301       }
302 
303       if (!ExtendsLifeOfTemporary) {
304         // Since the lifetime of this temporary isn't going to be extended,
305         // we need to clean it up ourselves at the end of the full expression.
306         switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
307         case Qualifiers::OCL_None:
308         case Qualifiers::OCL_ExplicitNone:
309         case Qualifiers::OCL_Autoreleasing:
310           break;
311 
312         case Qualifiers::OCL_Strong: {
313           assert(!ObjCARCReferenceLifetimeType->isArrayType());
314           CleanupKind cleanupKind = CGF.getARCCleanupKind();
315           CGF.pushDestroy(cleanupKind,
316                           ReferenceTemporary,
317                           ObjCARCReferenceLifetimeType,
318                           CodeGenFunction::destroyARCStrongImprecise,
319                           cleanupKind & EHCleanup);
320           break;
321         }
322 
323         case Qualifiers::OCL_Weak:
324           assert(!ObjCARCReferenceLifetimeType->isArrayType());
325           CGF.pushDestroy(NormalAndEHCleanup,
326                           ReferenceTemporary,
327                           ObjCARCReferenceLifetimeType,
328                           CodeGenFunction::destroyARCWeak,
329                           /*useEHCleanupForArray*/ true);
330           break;
331         }
332 
333         ObjCARCReferenceLifetimeType = QualType();
334       }
335 
336       return ReferenceTemporary;
337     }
338 
339     SmallVector<SubobjectAdjustment, 2> Adjustments;
340     while (true) {
341       E = E->IgnoreParens();
342 
343       if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
344         if ((CE->getCastKind() == CK_DerivedToBase ||
345              CE->getCastKind() == CK_UncheckedDerivedToBase) &&
346             E->getType()->isRecordType()) {
347           E = CE->getSubExpr();
348           CXXRecordDecl *Derived
349             = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
350           Adjustments.push_back(SubobjectAdjustment(CE, Derived));
351           continue;
352         }
353 
354         if (CE->getCastKind() == CK_NoOp) {
355           E = CE->getSubExpr();
356           continue;
357         }
358       } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
359         if (!ME->isArrow() && ME->getBase()->isRValue()) {
360           assert(ME->getBase()->getType()->isRecordType());
361           if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
362             E = ME->getBase();
363             Adjustments.push_back(SubobjectAdjustment(Field));
364             continue;
365           }
366         }
367       } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
368         if (BO->isPtrMemOp()) {
369           assert(BO->getLHS()->isRValue());
370           E = BO->getLHS();
371           const MemberPointerType *MPT =
372               BO->getRHS()->getType()->getAs<MemberPointerType>();
373           llvm::Value *Ptr = CGF.EmitScalarExpr(BO->getRHS());
374           Adjustments.push_back(SubobjectAdjustment(MPT, Ptr));
375         }
376       }
377 
378       if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
379         if (opaque->getType()->isRecordType())
380           return CGF.EmitOpaqueValueLValue(opaque).getAddress();
381 
382       // Nothing changed.
383       break;
384     }
385 
386     // Create a reference temporary if necessary.
387     AggValueSlot AggSlot = AggValueSlot::ignored();
388     if (CGF.hasAggregateLLVMType(E->getType()) &&
389         !E->getType()->isAnyComplexType()) {
390       ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
391                                                     InitializedDecl);
392       CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
393       AggValueSlot::IsDestructed_t isDestructed
394         = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
395       AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
396                                       Qualifiers(), isDestructed,
397                                       AggValueSlot::DoesNotNeedGCBarriers,
398                                       AggValueSlot::IsNotAliased);
399     }
400 
401     if (InitializedDecl) {
402       // Get the destructor for the reference temporary.
403       if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
404         CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
405         if (!ClassDecl->hasTrivialDestructor())
406           ReferenceTemporaryDtor = ClassDecl->getDestructor();
407       }
408     }
409 
410     RV = CGF.EmitAnyExpr(E, AggSlot);
411 
412     // Check if need to perform derived-to-base casts and/or field accesses, to
413     // get from the temporary object we created (and, potentially, for which we
414     // extended the lifetime) to the subobject we're binding the reference to.
415     if (!Adjustments.empty()) {
416       llvm::Value *Object = RV.getAggregateAddr();
417       for (unsigned I = Adjustments.size(); I != 0; --I) {
418         SubobjectAdjustment &Adjustment = Adjustments[I-1];
419         switch (Adjustment.Kind) {
420         case SubobjectAdjustment::DerivedToBaseAdjustment:
421           Object =
422               CGF.GetAddressOfBaseClass(Object,
423                                         Adjustment.DerivedToBase.DerivedClass,
424                               Adjustment.DerivedToBase.BasePath->path_begin(),
425                               Adjustment.DerivedToBase.BasePath->path_end(),
426                                         /*NullCheckValue=*/false);
427           break;
428 
429         case SubobjectAdjustment::FieldAdjustment: {
430           LValue LV = CGF.MakeAddrLValue(Object, E->getType());
431           LV = CGF.EmitLValueForField(LV, Adjustment.Field);
432           if (LV.isSimple()) {
433             Object = LV.getAddress();
434             break;
435           }
436 
437           // For non-simple lvalues, we actually have to create a copy of
438           // the object we're binding to.
439           QualType T = Adjustment.Field->getType().getNonReferenceType()
440                                                   .getUnqualifiedType();
441           Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
442           LValue TempLV = CGF.MakeAddrLValue(Object,
443                                              Adjustment.Field->getType());
444           CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
445           break;
446         }
447 
448         case SubobjectAdjustment::MemberPointerAdjustment: {
449           Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
450                         CGF, Object, Adjustment.Ptr.Ptr, Adjustment.Ptr.MPT);
451           break;
452         }
453         }
454       }
455 
456       return Object;
457     }
458   }
459 
460   if (RV.isAggregate())
461     return RV.getAggregateAddr();
462 
463   // Create a temporary variable that we can bind the reference to.
464   ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
465                                                 InitializedDecl);
466 
467 
468   unsigned Alignment =
469     CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
470   if (RV.isScalar())
471     CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
472                           /*Volatile=*/false, Alignment, E->getType());
473   else
474     CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
475                            /*Volatile=*/false);
476   return ReferenceTemporary;
477 }
478 
479 RValue
480 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
481                                             const NamedDecl *InitializedDecl) {
482   llvm::Value *ReferenceTemporary = 0;
483   const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
484   QualType ObjCARCReferenceLifetimeType;
485   llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
486                                                    ReferenceTemporaryDtor,
487                                                    ObjCARCReferenceLifetimeType,
488                                                    InitializedDecl);
489   if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
490     return RValue::get(Value);
491 
492   // Make sure to call the destructor for the reference temporary.
493   const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
494   if (VD && VD->hasGlobalStorage()) {
495     if (ReferenceTemporaryDtor) {
496       llvm::Constant *DtorFn =
497         CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
498       CGM.getCXXABI().registerGlobalDtor(*this, DtorFn,
499                                     cast<llvm::Constant>(ReferenceTemporary));
500     } else {
501       assert(!ObjCARCReferenceLifetimeType.isNull());
502       // Note: We intentionally do not register a global "destructor" to
503       // release the object.
504     }
505 
506     return RValue::get(Value);
507   }
508 
509   if (ReferenceTemporaryDtor)
510     PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
511   else {
512     switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
513     case Qualifiers::OCL_None:
514       llvm_unreachable(
515                       "Not a reference temporary that needs to be deallocated");
516     case Qualifiers::OCL_ExplicitNone:
517     case Qualifiers::OCL_Autoreleasing:
518       // Nothing to do.
519       break;
520 
521     case Qualifiers::OCL_Strong: {
522       bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
523       CleanupKind cleanupKind = getARCCleanupKind();
524       pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
525                   precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
526                   cleanupKind & EHCleanup);
527       break;
528     }
529 
530     case Qualifiers::OCL_Weak: {
531       // __weak objects always get EH cleanups; otherwise, exceptions
532       // could cause really nasty crashes instead of mere leaks.
533       pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
534                   ObjCARCReferenceLifetimeType, destroyARCWeak, true);
535       break;
536     }
537     }
538   }
539 
540   return RValue::get(Value);
541 }
542 
543 
544 /// getAccessedFieldNo - Given an encoded value and a result number, return the
545 /// input field number being accessed.
546 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
547                                              const llvm::Constant *Elts) {
548   return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
549       ->getZExtValue();
550 }
551 
552 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
553   if (!CatchUndefined)
554     return;
555 
556   // This needs to be to the standard address space.
557   Address = Builder.CreateBitCast(Address, Int8PtrTy);
558 
559   llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
560 
561   llvm::Value *Min = Builder.getFalse();
562   llvm::Value *C = Builder.CreateCall2(F, Address, Min);
563   llvm::BasicBlock *Cont = createBasicBlock();
564   Builder.CreateCondBr(Builder.CreateICmpUGE(C,
565                                         llvm::ConstantInt::get(IntPtrTy, Size)),
566                        Cont, getTrapBB());
567   EmitBlock(Cont);
568 }
569 
570 
571 CodeGenFunction::ComplexPairTy CodeGenFunction::
572 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
573                          bool isInc, bool isPre) {
574   ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
575                                             LV.isVolatileQualified());
576 
577   llvm::Value *NextVal;
578   if (isa<llvm::IntegerType>(InVal.first->getType())) {
579     uint64_t AmountVal = isInc ? 1 : -1;
580     NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
581 
582     // Add the inc/dec to the real part.
583     NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
584   } else {
585     QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
586     llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
587     if (!isInc)
588       FVal.changeSign();
589     NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
590 
591     // Add the inc/dec to the real part.
592     NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
593   }
594 
595   ComplexPairTy IncVal(NextVal, InVal.second);
596 
597   // Store the updated result through the lvalue.
598   StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
599 
600   // If this is a postinc, return the value read from memory, otherwise use the
601   // updated value.
602   return isPre ? IncVal : InVal;
603 }
604 
605 
606 //===----------------------------------------------------------------------===//
607 //                         LValue Expression Emission
608 //===----------------------------------------------------------------------===//
609 
610 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
611   if (Ty->isVoidType())
612     return RValue::get(0);
613 
614   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
615     llvm::Type *EltTy = ConvertType(CTy->getElementType());
616     llvm::Value *U = llvm::UndefValue::get(EltTy);
617     return RValue::getComplex(std::make_pair(U, U));
618   }
619 
620   // If this is a use of an undefined aggregate type, the aggregate must have an
621   // identifiable address.  Just because the contents of the value are undefined
622   // doesn't mean that the address can't be taken and compared.
623   if (hasAggregateLLVMType(Ty)) {
624     llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
625     return RValue::getAggregate(DestPtr);
626   }
627 
628   return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
629 }
630 
631 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
632                                               const char *Name) {
633   ErrorUnsupported(E, Name);
634   return GetUndefRValue(E->getType());
635 }
636 
637 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
638                                               const char *Name) {
639   ErrorUnsupported(E, Name);
640   llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
641   return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
642 }
643 
644 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
645   LValue LV = EmitLValue(E);
646   if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
647     EmitCheck(LV.getAddress(),
648               getContext().getTypeSizeInChars(E->getType()).getQuantity());
649   return LV;
650 }
651 
652 /// EmitLValue - Emit code to compute a designator that specifies the location
653 /// of the expression.
654 ///
655 /// This can return one of two things: a simple address or a bitfield reference.
656 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
657 /// an LLVM pointer type.
658 ///
659 /// If this returns a bitfield reference, nothing about the pointee type of the
660 /// LLVM value is known: For example, it may not be a pointer to an integer.
661 ///
662 /// If this returns a normal address, and if the lvalue's C type is fixed size,
663 /// this method guarantees that the returned pointer type will point to an LLVM
664 /// type of the same size of the lvalue's type.  If the lvalue has a variable
665 /// length type, this is not possible.
666 ///
667 LValue CodeGenFunction::EmitLValue(const Expr *E) {
668   switch (E->getStmtClass()) {
669   default: return EmitUnsupportedLValue(E, "l-value expression");
670 
671   case Expr::ObjCPropertyRefExprClass:
672     llvm_unreachable("cannot emit a property reference directly");
673 
674   case Expr::ObjCSelectorExprClass:
675   return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
676   case Expr::ObjCIsaExprClass:
677     return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
678   case Expr::BinaryOperatorClass:
679     return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
680   case Expr::CompoundAssignOperatorClass:
681     if (!E->getType()->isAnyComplexType())
682       return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
683     return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
684   case Expr::CallExprClass:
685   case Expr::CXXMemberCallExprClass:
686   case Expr::CXXOperatorCallExprClass:
687   case Expr::UserDefinedLiteralClass:
688     return EmitCallExprLValue(cast<CallExpr>(E));
689   case Expr::VAArgExprClass:
690     return EmitVAArgExprLValue(cast<VAArgExpr>(E));
691   case Expr::DeclRefExprClass:
692     return EmitDeclRefLValue(cast<DeclRefExpr>(E));
693   case Expr::ParenExprClass:
694     return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
695   case Expr::GenericSelectionExprClass:
696     return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
697   case Expr::PredefinedExprClass:
698     return EmitPredefinedLValue(cast<PredefinedExpr>(E));
699   case Expr::StringLiteralClass:
700     return EmitStringLiteralLValue(cast<StringLiteral>(E));
701   case Expr::ObjCEncodeExprClass:
702     return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
703   case Expr::PseudoObjectExprClass:
704     return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
705   case Expr::InitListExprClass:
706     return EmitInitListLValue(cast<InitListExpr>(E));
707   case Expr::CXXTemporaryObjectExprClass:
708   case Expr::CXXConstructExprClass:
709     return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
710   case Expr::CXXBindTemporaryExprClass:
711     return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
712   case Expr::LambdaExprClass:
713     return EmitLambdaLValue(cast<LambdaExpr>(E));
714 
715   case Expr::ExprWithCleanupsClass: {
716     const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
717     enterFullExpression(cleanups);
718     RunCleanupsScope Scope(*this);
719     return EmitLValue(cleanups->getSubExpr());
720   }
721 
722   case Expr::CXXScalarValueInitExprClass:
723     return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
724   case Expr::CXXDefaultArgExprClass:
725     return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
726   case Expr::CXXTypeidExprClass:
727     return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
728 
729   case Expr::ObjCMessageExprClass:
730     return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
731   case Expr::ObjCIvarRefExprClass:
732     return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
733   case Expr::StmtExprClass:
734     return EmitStmtExprLValue(cast<StmtExpr>(E));
735   case Expr::UnaryOperatorClass:
736     return EmitUnaryOpLValue(cast<UnaryOperator>(E));
737   case Expr::ArraySubscriptExprClass:
738     return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
739   case Expr::ExtVectorElementExprClass:
740     return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
741   case Expr::MemberExprClass:
742     return EmitMemberExpr(cast<MemberExpr>(E));
743   case Expr::CompoundLiteralExprClass:
744     return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
745   case Expr::ConditionalOperatorClass:
746     return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
747   case Expr::BinaryConditionalOperatorClass:
748     return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
749   case Expr::ChooseExprClass:
750     return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
751   case Expr::OpaqueValueExprClass:
752     return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
753   case Expr::SubstNonTypeTemplateParmExprClass:
754     return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
755   case Expr::ImplicitCastExprClass:
756   case Expr::CStyleCastExprClass:
757   case Expr::CXXFunctionalCastExprClass:
758   case Expr::CXXStaticCastExprClass:
759   case Expr::CXXDynamicCastExprClass:
760   case Expr::CXXReinterpretCastExprClass:
761   case Expr::CXXConstCastExprClass:
762   case Expr::ObjCBridgedCastExprClass:
763     return EmitCastLValue(cast<CastExpr>(E));
764 
765   case Expr::MaterializeTemporaryExprClass:
766     return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
767   }
768 }
769 
770 /// Given an object of the given canonical type, can we safely copy a
771 /// value out of it based on its initializer?
772 static bool isConstantEmittableObjectType(QualType type) {
773   assert(type.isCanonical());
774   assert(!type->isReferenceType());
775 
776   // Must be const-qualified but non-volatile.
777   Qualifiers qs = type.getLocalQualifiers();
778   if (!qs.hasConst() || qs.hasVolatile()) return false;
779 
780   // Otherwise, all object types satisfy this except C++ classes with
781   // mutable subobjects or non-trivial copy/destroy behavior.
782   if (const RecordType *RT = dyn_cast<RecordType>(type))
783     if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
784       if (RD->hasMutableFields() || !RD->isTrivial())
785         return false;
786 
787   return true;
788 }
789 
790 /// Can we constant-emit a load of a reference to a variable of the
791 /// given type?  This is different from predicates like
792 /// Decl::isUsableInConstantExpressions because we do want it to apply
793 /// in situations that don't necessarily satisfy the language's rules
794 /// for this (e.g. C++'s ODR-use rules).  For example, we want to able
795 /// to do this with const float variables even if those variables
796 /// aren't marked 'constexpr'.
797 enum ConstantEmissionKind {
798   CEK_None,
799   CEK_AsReferenceOnly,
800   CEK_AsValueOrReference,
801   CEK_AsValueOnly
802 };
803 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
804   type = type.getCanonicalType();
805   if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
806     if (isConstantEmittableObjectType(ref->getPointeeType()))
807       return CEK_AsValueOrReference;
808     return CEK_AsReferenceOnly;
809   }
810   if (isConstantEmittableObjectType(type))
811     return CEK_AsValueOnly;
812   return CEK_None;
813 }
814 
815 /// Try to emit a reference to the given value without producing it as
816 /// an l-value.  This is actually more than an optimization: we can't
817 /// produce an l-value for variables that we never actually captured
818 /// in a block or lambda, which means const int variables or constexpr
819 /// literals or similar.
820 CodeGenFunction::ConstantEmission
821 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
822   ValueDecl *value = refExpr->getDecl();
823 
824   // The value needs to be an enum constant or a constant variable.
825   ConstantEmissionKind CEK;
826   if (isa<ParmVarDecl>(value)) {
827     CEK = CEK_None;
828   } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
829     CEK = checkVarTypeForConstantEmission(var->getType());
830   } else if (isa<EnumConstantDecl>(value)) {
831     CEK = CEK_AsValueOnly;
832   } else {
833     CEK = CEK_None;
834   }
835   if (CEK == CEK_None) return ConstantEmission();
836 
837   Expr::EvalResult result;
838   bool resultIsReference;
839   QualType resultType;
840 
841   // It's best to evaluate all the way as an r-value if that's permitted.
842   if (CEK != CEK_AsReferenceOnly &&
843       refExpr->EvaluateAsRValue(result, getContext())) {
844     resultIsReference = false;
845     resultType = refExpr->getType();
846 
847   // Otherwise, try to evaluate as an l-value.
848   } else if (CEK != CEK_AsValueOnly &&
849              refExpr->EvaluateAsLValue(result, getContext())) {
850     resultIsReference = true;
851     resultType = value->getType();
852 
853   // Failure.
854   } else {
855     return ConstantEmission();
856   }
857 
858   // In any case, if the initializer has side-effects, abandon ship.
859   if (result.HasSideEffects)
860     return ConstantEmission();
861 
862   // Emit as a constant.
863   llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
864 
865   // Make sure we emit a debug reference to the global variable.
866   // This should probably fire even for
867   if (isa<VarDecl>(value)) {
868     if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
869       EmitDeclRefExprDbgValue(refExpr, C);
870   } else {
871     assert(isa<EnumConstantDecl>(value));
872     EmitDeclRefExprDbgValue(refExpr, C);
873   }
874 
875   // If we emitted a reference constant, we need to dereference that.
876   if (resultIsReference)
877     return ConstantEmission::forReference(C);
878 
879   return ConstantEmission::forValue(C);
880 }
881 
882 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
883   return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
884                           lvalue.getAlignment().getQuantity(),
885                           lvalue.getType(), lvalue.getTBAAInfo());
886 }
887 
888 static bool hasBooleanRepresentation(QualType Ty) {
889   if (Ty->isBooleanType())
890     return true;
891 
892   if (const EnumType *ET = Ty->getAs<EnumType>())
893     return ET->getDecl()->getIntegerType()->isBooleanType();
894 
895   if (const AtomicType *AT = Ty->getAs<AtomicType>())
896     return hasBooleanRepresentation(AT->getValueType());
897 
898   return false;
899 }
900 
901 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
902   const EnumType *ET = Ty->getAs<EnumType>();
903   bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
904                                  CGM.getCodeGenOpts().StrictEnums &&
905                                  !ET->getDecl()->isFixed());
906   bool IsBool = hasBooleanRepresentation(Ty);
907   if (!IsBool && !IsRegularCPlusPlusEnum)
908     return NULL;
909 
910   llvm::APInt Min;
911   llvm::APInt End;
912   if (IsBool) {
913     Min = llvm::APInt(8, 0);
914     End = llvm::APInt(8, 2);
915   } else {
916     const EnumDecl *ED = ET->getDecl();
917     llvm::Type *LTy = ConvertTypeForMem(ED->getIntegerType());
918     unsigned Bitwidth = LTy->getScalarSizeInBits();
919     unsigned NumNegativeBits = ED->getNumNegativeBits();
920     unsigned NumPositiveBits = ED->getNumPositiveBits();
921 
922     if (NumNegativeBits) {
923       unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
924       assert(NumBits <= Bitwidth);
925       End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
926       Min = -End;
927     } else {
928       assert(NumPositiveBits <= Bitwidth);
929       End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
930       Min = llvm::APInt(Bitwidth, 0);
931     }
932   }
933 
934   llvm::MDBuilder MDHelper(getLLVMContext());
935   return MDHelper.createRange(Min, End);
936 }
937 
938 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
939                                               unsigned Alignment, QualType Ty,
940                                               llvm::MDNode *TBAAInfo) {
941 
942   // For better performance, handle vector loads differently.
943   if (Ty->isVectorType()) {
944     llvm::Value *V;
945     const llvm::Type *EltTy =
946     cast<llvm::PointerType>(Addr->getType())->getElementType();
947 
948     const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy);
949 
950     // Handle vectors of size 3, like size 4 for better performance.
951     if (VTy->getNumElements() == 3) {
952 
953       // Bitcast to vec4 type.
954       llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
955                                                          4);
956       llvm::PointerType *ptVec4Ty =
957       llvm::PointerType::get(vec4Ty,
958                              (cast<llvm::PointerType>(
959                                       Addr->getType()))->getAddressSpace());
960       llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty,
961                                                 "castToVec4");
962       // Now load value.
963       llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4");
964 
965       // Shuffle vector to get vec3.
966       llvm::SmallVector<llvm::Constant*, 3> Mask;
967       Mask.push_back(llvm::ConstantInt::get(
968                                     llvm::Type::getInt32Ty(getLLVMContext()),
969                                             0));
970       Mask.push_back(llvm::ConstantInt::get(
971                                     llvm::Type::getInt32Ty(getLLVMContext()),
972                                             1));
973       Mask.push_back(llvm::ConstantInt::get(
974                                      llvm::Type::getInt32Ty(getLLVMContext()),
975                                             2));
976 
977       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
978       V = Builder.CreateShuffleVector(LoadVal,
979                                       llvm::UndefValue::get(vec4Ty),
980                                       MaskV, "extractVec");
981       return EmitFromMemory(V, Ty);
982     }
983   }
984 
985   llvm::LoadInst *Load = Builder.CreateLoad(Addr);
986   if (Volatile)
987     Load->setVolatile(true);
988   if (Alignment)
989     Load->setAlignment(Alignment);
990   if (TBAAInfo)
991     CGM.DecorateInstruction(Load, TBAAInfo);
992   // If this is an atomic type, all normal reads must be atomic
993   if (Ty->isAtomicType())
994     Load->setAtomic(llvm::SequentiallyConsistent);
995 
996   if (CGM.getCodeGenOpts().OptimizationLevel > 0)
997     if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
998       Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
999 
1000   return EmitFromMemory(Load, Ty);
1001 }
1002 
1003 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
1004   // Bool has a different representation in memory than in registers.
1005   if (hasBooleanRepresentation(Ty)) {
1006     // This should really always be an i1, but sometimes it's already
1007     // an i8, and it's awkward to track those cases down.
1008     if (Value->getType()->isIntegerTy(1))
1009       return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
1010     assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
1011   }
1012 
1013   return Value;
1014 }
1015 
1016 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
1017   // Bool has a different representation in memory than in registers.
1018   if (hasBooleanRepresentation(Ty)) {
1019     assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
1020     return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1021   }
1022 
1023   return Value;
1024 }
1025 
1026 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
1027                                         bool Volatile, unsigned Alignment,
1028                                         QualType Ty,
1029                                         llvm::MDNode *TBAAInfo,
1030                                         bool isInit) {
1031 
1032   // Handle vectors differently to get better performance.
1033   if (Ty->isVectorType()) {
1034     llvm::Type *SrcTy = Value->getType();
1035     llvm::VectorType *VecTy = cast<llvm::VectorType>(SrcTy);
1036     // Handle vec3 special.
1037     if (VecTy->getNumElements() == 3) {
1038       llvm::LLVMContext &VMContext = getLLVMContext();
1039 
1040       // Our source is a vec3, do a shuffle vector to make it a vec4.
1041       llvm::SmallVector<llvm::Constant*, 4> Mask;
1042       Mask.push_back(llvm::ConstantInt::get(
1043                                             llvm::Type::getInt32Ty(VMContext),
1044                                             0));
1045       Mask.push_back(llvm::ConstantInt::get(
1046                                             llvm::Type::getInt32Ty(VMContext),
1047                                             1));
1048       Mask.push_back(llvm::ConstantInt::get(
1049                                             llvm::Type::getInt32Ty(VMContext),
1050                                             2));
1051       Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
1052 
1053       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1054       Value = Builder.CreateShuffleVector(Value,
1055                                           llvm::UndefValue::get(VecTy),
1056                                           MaskV, "extractVec");
1057       SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
1058     }
1059     llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
1060     if (DstPtr->getElementType() != SrcTy) {
1061       llvm::Type *MemTy =
1062       llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
1063       Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
1064     }
1065   }
1066 
1067   Value = EmitToMemory(Value, Ty);
1068 
1069   llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1070   if (Alignment)
1071     Store->setAlignment(Alignment);
1072   if (TBAAInfo)
1073     CGM.DecorateInstruction(Store, TBAAInfo);
1074   if (!isInit && Ty->isAtomicType())
1075     Store->setAtomic(llvm::SequentiallyConsistent);
1076 }
1077 
1078 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
1079     bool isInit) {
1080   EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
1081                     lvalue.getAlignment().getQuantity(), lvalue.getType(),
1082                     lvalue.getTBAAInfo(), isInit);
1083 }
1084 
1085 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1086 /// method emits the address of the lvalue, then loads the result as an rvalue,
1087 /// returning the rvalue.
1088 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
1089   if (LV.isObjCWeak()) {
1090     // load of a __weak object.
1091     llvm::Value *AddrWeakObj = LV.getAddress();
1092     return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
1093                                                              AddrWeakObj));
1094   }
1095   if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
1096     return RValue::get(EmitARCLoadWeak(LV.getAddress()));
1097 
1098   if (LV.isSimple()) {
1099     assert(!LV.getType()->isFunctionType());
1100 
1101     // Everything needs a load.
1102     return RValue::get(EmitLoadOfScalar(LV));
1103   }
1104 
1105   if (LV.isVectorElt()) {
1106     llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
1107                                               LV.isVolatileQualified());
1108     Load->setAlignment(LV.getAlignment().getQuantity());
1109     return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1110                                                     "vecext"));
1111   }
1112 
1113   // If this is a reference to a subset of the elements of a vector, either
1114   // shuffle the input or extract/insert them as appropriate.
1115   if (LV.isExtVectorElt())
1116     return EmitLoadOfExtVectorElementLValue(LV);
1117 
1118   assert(LV.isBitField() && "Unknown LValue type!");
1119   return EmitLoadOfBitfieldLValue(LV);
1120 }
1121 
1122 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
1123   const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1124 
1125   // Get the output type.
1126   llvm::Type *ResLTy = ConvertType(LV.getType());
1127   unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
1128 
1129   // Compute the result as an OR of all of the individual component accesses.
1130   llvm::Value *Res = 0;
1131   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1132     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1133     CharUnits AccessAlignment = AI.AccessAlignment;
1134     if (!LV.getAlignment().isZero())
1135       AccessAlignment = std::min(AccessAlignment, LV.getAlignment());
1136 
1137     // Get the field pointer.
1138     llvm::Value *Ptr = LV.getBitFieldBaseAddr();
1139 
1140     // Only offset by the field index if used, so that incoming values are not
1141     // required to be structures.
1142     if (AI.FieldIndex)
1143       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1144 
1145     // Offset by the byte offset, if used.
1146     if (!AI.FieldByteOffset.isZero()) {
1147       Ptr = EmitCastToVoidPtr(Ptr);
1148       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1149                                        "bf.field.offs");
1150     }
1151 
1152     // Cast to the access type.
1153     llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
1154                        CGM.getContext().getTargetAddressSpace(LV.getType()));
1155     Ptr = Builder.CreateBitCast(Ptr, PTy);
1156 
1157     // Perform the load.
1158     llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
1159     Load->setAlignment(AccessAlignment.getQuantity());
1160 
1161     // Shift out unused low bits and mask out unused high bits.
1162     llvm::Value *Val = Load;
1163     if (AI.FieldBitStart)
1164       Val = Builder.CreateLShr(Load, AI.FieldBitStart);
1165     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
1166                                                             AI.TargetBitWidth),
1167                             "bf.clear");
1168 
1169     // Extend or truncate to the target size.
1170     if (AI.AccessWidth < ResSizeInBits)
1171       Val = Builder.CreateZExt(Val, ResLTy);
1172     else if (AI.AccessWidth > ResSizeInBits)
1173       Val = Builder.CreateTrunc(Val, ResLTy);
1174 
1175     // Shift into place, and OR into the result.
1176     if (AI.TargetBitOffset)
1177       Val = Builder.CreateShl(Val, AI.TargetBitOffset);
1178     Res = Res ? Builder.CreateOr(Res, Val) : Val;
1179   }
1180 
1181   // If the bit-field is signed, perform the sign-extension.
1182   //
1183   // FIXME: This can easily be folded into the load of the high bits, which
1184   // could also eliminate the mask of high bits in some situations.
1185   if (Info.isSigned()) {
1186     unsigned ExtraBits = ResSizeInBits - Info.getSize();
1187     if (ExtraBits)
1188       Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
1189                                ExtraBits, "bf.val.sext");
1190   }
1191 
1192   return RValue::get(Res);
1193 }
1194 
1195 // If this is a reference to a subset of the elements of a vector, create an
1196 // appropriate shufflevector.
1197 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
1198   llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
1199                                             LV.isVolatileQualified());
1200   Load->setAlignment(LV.getAlignment().getQuantity());
1201   llvm::Value *Vec = Load;
1202 
1203   const llvm::Constant *Elts = LV.getExtVectorElts();
1204 
1205   // If the result of the expression is a non-vector type, we must be extracting
1206   // a single element.  Just codegen as an extractelement.
1207   const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1208   if (!ExprVT) {
1209     unsigned InIdx = getAccessedFieldNo(0, Elts);
1210     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1211     return RValue::get(Builder.CreateExtractElement(Vec, Elt));
1212   }
1213 
1214   // Always use shuffle vector to try to retain the original program structure
1215   unsigned NumResultElts = ExprVT->getNumElements();
1216 
1217   SmallVector<llvm::Constant*, 4> Mask;
1218   for (unsigned i = 0; i != NumResultElts; ++i)
1219     Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
1220 
1221   llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1222   Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
1223                                     MaskV);
1224   return RValue::get(Vec);
1225 }
1226 
1227 
1228 
1229 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
1230 /// lvalue, where both are guaranteed to the have the same type, and that type
1231 /// is 'Ty'.
1232 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
1233   if (!Dst.isSimple()) {
1234     if (Dst.isVectorElt()) {
1235       // Read/modify/write the vector, inserting the new element.
1236       llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
1237                                                 Dst.isVolatileQualified());
1238       Load->setAlignment(Dst.getAlignment().getQuantity());
1239       llvm::Value *Vec = Load;
1240       Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
1241                                         Dst.getVectorIdx(), "vecins");
1242       llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
1243                                                    Dst.isVolatileQualified());
1244       Store->setAlignment(Dst.getAlignment().getQuantity());
1245       return;
1246     }
1247 
1248     // If this is an update of extended vector elements, insert them as
1249     // appropriate.
1250     if (Dst.isExtVectorElt())
1251       return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
1252 
1253     assert(Dst.isBitField() && "Unknown LValue type");
1254     return EmitStoreThroughBitfieldLValue(Src, Dst);
1255   }
1256 
1257   // There's special magic for assigning into an ARC-qualified l-value.
1258   if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
1259     switch (Lifetime) {
1260     case Qualifiers::OCL_None:
1261       llvm_unreachable("present but none");
1262 
1263     case Qualifiers::OCL_ExplicitNone:
1264       // nothing special
1265       break;
1266 
1267     case Qualifiers::OCL_Strong:
1268       EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
1269       return;
1270 
1271     case Qualifiers::OCL_Weak:
1272       EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
1273       return;
1274 
1275     case Qualifiers::OCL_Autoreleasing:
1276       Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
1277                                                      Src.getScalarVal()));
1278       // fall into the normal path
1279       break;
1280     }
1281   }
1282 
1283   if (Dst.isObjCWeak() && !Dst.isNonGC()) {
1284     // load of a __weak object.
1285     llvm::Value *LvalueDst = Dst.getAddress();
1286     llvm::Value *src = Src.getScalarVal();
1287      CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
1288     return;
1289   }
1290 
1291   if (Dst.isObjCStrong() && !Dst.isNonGC()) {
1292     // load of a __strong object.
1293     llvm::Value *LvalueDst = Dst.getAddress();
1294     llvm::Value *src = Src.getScalarVal();
1295     if (Dst.isObjCIvar()) {
1296       assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
1297       llvm::Type *ResultType = ConvertType(getContext().LongTy);
1298       llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
1299       llvm::Value *dst = RHS;
1300       RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1301       llvm::Value *LHS =
1302         Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
1303       llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
1304       CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1305                                               BytesBetween);
1306     } else if (Dst.isGlobalObjCRef()) {
1307       CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1308                                                 Dst.isThreadLocalRef());
1309     }
1310     else
1311       CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
1312     return;
1313   }
1314 
1315   assert(Src.isScalar() && "Can't emit an agg store with this method");
1316   EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
1317 }
1318 
1319 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
1320                                                      llvm::Value **Result) {
1321   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
1322 
1323   // Get the output type.
1324   llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
1325   unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
1326 
1327   // Get the source value, truncated to the width of the bit-field.
1328   llvm::Value *SrcVal = Src.getScalarVal();
1329 
1330   if (hasBooleanRepresentation(Dst.getType()))
1331     SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
1332 
1333   SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
1334                                                                 Info.getSize()),
1335                              "bf.value");
1336 
1337   // Return the new value of the bit-field, if requested.
1338   if (Result) {
1339     // Cast back to the proper type for result.
1340     llvm::Type *SrcTy = Src.getScalarVal()->getType();
1341     llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
1342                                                    "bf.reload.val");
1343 
1344     // Sign extend if necessary.
1345     if (Info.isSigned()) {
1346       unsigned ExtraBits = ResSizeInBits - Info.getSize();
1347       if (ExtraBits)
1348         ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
1349                                        ExtraBits, "bf.reload.sext");
1350     }
1351 
1352     *Result = ReloadVal;
1353   }
1354 
1355   // Iterate over the components, writing each piece to memory.
1356   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1357     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1358     CharUnits AccessAlignment = AI.AccessAlignment;
1359     if (!Dst.getAlignment().isZero())
1360       AccessAlignment = std::min(AccessAlignment, Dst.getAlignment());
1361 
1362     // Get the field pointer.
1363     llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
1364     unsigned addressSpace =
1365       cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
1366 
1367     // Only offset by the field index if used, so that incoming values are not
1368     // required to be structures.
1369     if (AI.FieldIndex)
1370       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1371 
1372     // Offset by the byte offset, if used.
1373     if (!AI.FieldByteOffset.isZero()) {
1374       Ptr = EmitCastToVoidPtr(Ptr);
1375       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1376                                        "bf.field.offs");
1377     }
1378 
1379     // Cast to the access type.
1380     llvm::Type *AccessLTy =
1381       llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
1382 
1383     llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
1384     Ptr = Builder.CreateBitCast(Ptr, PTy);
1385 
1386     // Extract the piece of the bit-field value to write in this access, limited
1387     // to the values that are part of this access.
1388     llvm::Value *Val = SrcVal;
1389     if (AI.TargetBitOffset)
1390       Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
1391     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
1392                                                             AI.TargetBitWidth));
1393 
1394     // Extend or truncate to the access size.
1395     if (ResSizeInBits < AI.AccessWidth)
1396       Val = Builder.CreateZExt(Val, AccessLTy);
1397     else if (ResSizeInBits > AI.AccessWidth)
1398       Val = Builder.CreateTrunc(Val, AccessLTy);
1399 
1400     // Shift into the position in memory.
1401     if (AI.FieldBitStart)
1402       Val = Builder.CreateShl(Val, AI.FieldBitStart);
1403 
1404     // If necessary, load and OR in bits that are outside of the bit-field.
1405     if (AI.TargetBitWidth != AI.AccessWidth) {
1406       llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
1407       Load->setAlignment(AccessAlignment.getQuantity());
1408 
1409       // Compute the mask for zeroing the bits that are part of the bit-field.
1410       llvm::APInt InvMask =
1411         ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
1412                                  AI.FieldBitStart + AI.TargetBitWidth);
1413 
1414       // Apply the mask and OR in to the value to write.
1415       Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
1416     }
1417 
1418     // Write the value.
1419     llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
1420                                                  Dst.isVolatileQualified());
1421     Store->setAlignment(AccessAlignment.getQuantity());
1422   }
1423 }
1424 
1425 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
1426                                                                LValue Dst) {
1427   // This access turns into a read/modify/write of the vector.  Load the input
1428   // value now.
1429   llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
1430                                             Dst.isVolatileQualified());
1431   Load->setAlignment(Dst.getAlignment().getQuantity());
1432   llvm::Value *Vec = Load;
1433   const llvm::Constant *Elts = Dst.getExtVectorElts();
1434 
1435   llvm::Value *SrcVal = Src.getScalarVal();
1436 
1437   if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
1438     unsigned NumSrcElts = VTy->getNumElements();
1439     unsigned NumDstElts =
1440        cast<llvm::VectorType>(Vec->getType())->getNumElements();
1441     if (NumDstElts == NumSrcElts) {
1442       // Use shuffle vector is the src and destination are the same number of
1443       // elements and restore the vector mask since it is on the side it will be
1444       // stored.
1445       SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
1446       for (unsigned i = 0; i != NumSrcElts; ++i)
1447         Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
1448 
1449       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1450       Vec = Builder.CreateShuffleVector(SrcVal,
1451                                         llvm::UndefValue::get(Vec->getType()),
1452                                         MaskV);
1453     } else if (NumDstElts > NumSrcElts) {
1454       // Extended the source vector to the same length and then shuffle it
1455       // into the destination.
1456       // FIXME: since we're shuffling with undef, can we just use the indices
1457       //        into that?  This could be simpler.
1458       SmallVector<llvm::Constant*, 4> ExtMask;
1459       for (unsigned i = 0; i != NumSrcElts; ++i)
1460         ExtMask.push_back(Builder.getInt32(i));
1461       ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
1462       llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
1463       llvm::Value *ExtSrcVal =
1464         Builder.CreateShuffleVector(SrcVal,
1465                                     llvm::UndefValue::get(SrcVal->getType()),
1466                                     ExtMaskV);
1467       // build identity
1468       SmallVector<llvm::Constant*, 4> Mask;
1469       for (unsigned i = 0; i != NumDstElts; ++i)
1470         Mask.push_back(Builder.getInt32(i));
1471 
1472       // modify when what gets shuffled in
1473       for (unsigned i = 0; i != NumSrcElts; ++i)
1474         Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
1475       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1476       Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
1477     } else {
1478       // We should never shorten the vector
1479       llvm_unreachable("unexpected shorten vector length");
1480     }
1481   } else {
1482     // If the Src is a scalar (not a vector) it must be updating one element.
1483     unsigned InIdx = getAccessedFieldNo(0, Elts);
1484     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1485     Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
1486   }
1487 
1488   llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
1489                                                Dst.isVolatileQualified());
1490   Store->setAlignment(Dst.getAlignment().getQuantity());
1491 }
1492 
1493 // setObjCGCLValueClass - sets class of he lvalue for the purpose of
1494 // generating write-barries API. It is currently a global, ivar,
1495 // or neither.
1496 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1497                                  LValue &LV,
1498                                  bool IsMemberAccess=false) {
1499   if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
1500     return;
1501 
1502   if (isa<ObjCIvarRefExpr>(E)) {
1503     QualType ExpTy = E->getType();
1504     if (IsMemberAccess && ExpTy->isPointerType()) {
1505       // If ivar is a structure pointer, assigning to field of
1506       // this struct follows gcc's behavior and makes it a non-ivar
1507       // writer-barrier conservatively.
1508       ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1509       if (ExpTy->isRecordType()) {
1510         LV.setObjCIvar(false);
1511         return;
1512       }
1513     }
1514     LV.setObjCIvar(true);
1515     ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1516     LV.setBaseIvarExp(Exp->getBase());
1517     LV.setObjCArray(E->getType()->isArrayType());
1518     return;
1519   }
1520 
1521   if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1522     if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1523       if (VD->hasGlobalStorage()) {
1524         LV.setGlobalObjCRef(true);
1525         LV.setThreadLocalRef(VD->isThreadSpecified());
1526       }
1527     }
1528     LV.setObjCArray(E->getType()->isArrayType());
1529     return;
1530   }
1531 
1532   if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1533     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1534     return;
1535   }
1536 
1537   if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1538     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1539     if (LV.isObjCIvar()) {
1540       // If cast is to a structure pointer, follow gcc's behavior and make it
1541       // a non-ivar write-barrier.
1542       QualType ExpTy = E->getType();
1543       if (ExpTy->isPointerType())
1544         ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1545       if (ExpTy->isRecordType())
1546         LV.setObjCIvar(false);
1547     }
1548     return;
1549   }
1550 
1551   if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
1552     setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
1553     return;
1554   }
1555 
1556   if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1557     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1558     return;
1559   }
1560 
1561   if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1562     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1563     return;
1564   }
1565 
1566   if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
1567     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1568     return;
1569   }
1570 
1571   if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1572     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1573     if (LV.isObjCIvar() && !LV.isObjCArray())
1574       // Using array syntax to assigning to what an ivar points to is not
1575       // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1576       LV.setObjCIvar(false);
1577     else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1578       // Using array syntax to assigning to what global points to is not
1579       // same as assigning to the global itself. {id *G;} G[i] = 0;
1580       LV.setGlobalObjCRef(false);
1581     return;
1582   }
1583 
1584   if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1585     setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
1586     // We don't know if member is an 'ivar', but this flag is looked at
1587     // only in the context of LV.isObjCIvar().
1588     LV.setObjCArray(E->getType()->isArrayType());
1589     return;
1590   }
1591 }
1592 
1593 static llvm::Value *
1594 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
1595                                 llvm::Value *V, llvm::Type *IRType,
1596                                 StringRef Name = StringRef()) {
1597   unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1598   return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
1599 }
1600 
1601 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1602                                       const Expr *E, const VarDecl *VD) {
1603   assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
1604          "Var decl must have external storage or be a file var decl!");
1605 
1606   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1607   llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
1608   V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
1609   CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
1610   QualType T = E->getType();
1611   LValue LV;
1612   if (VD->getType()->isReferenceType()) {
1613     llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
1614     LI->setAlignment(Alignment.getQuantity());
1615     V = LI;
1616     LV = CGF.MakeNaturalAlignAddrLValue(V, T);
1617   } else {
1618     LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1619   }
1620   setObjCGCLValueClass(CGF.getContext(), E, LV);
1621   return LV;
1622 }
1623 
1624 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1625                                      const Expr *E, const FunctionDecl *FD) {
1626   llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1627   if (!FD->hasPrototype()) {
1628     if (const FunctionProtoType *Proto =
1629             FD->getType()->getAs<FunctionProtoType>()) {
1630       // Ugly case: for a K&R-style definition, the type of the definition
1631       // isn't the same as the type of a use.  Correct for this with a
1632       // bitcast.
1633       QualType NoProtoType =
1634           CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1635       NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1636       V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
1637     }
1638   }
1639   CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
1640   return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1641 }
1642 
1643 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1644   const NamedDecl *ND = E->getDecl();
1645   CharUnits Alignment = getContext().getDeclAlign(ND);
1646   QualType T = E->getType();
1647 
1648   // FIXME: We should be able to assert this for FunctionDecls as well!
1649   // FIXME: We should be able to assert this for all DeclRefExprs, not just
1650   // those with a valid source location.
1651   assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
1652           !E->getLocation().isValid()) &&
1653          "Should not use decl without marking it used!");
1654 
1655   if (ND->hasAttr<WeakRefAttr>()) {
1656     const ValueDecl *VD = cast<ValueDecl>(ND);
1657     llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1658     return MakeAddrLValue(Aliasee, E->getType(), Alignment);
1659   }
1660 
1661   if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1662     // Check if this is a global variable.
1663     if (VD->hasExternalStorage() || VD->isFileVarDecl())
1664       return EmitGlobalVarDeclLValue(*this, E, VD);
1665 
1666     bool isBlockVariable = VD->hasAttr<BlocksAttr>();
1667 
1668     bool NonGCable = VD->hasLocalStorage() &&
1669                      !VD->getType()->isReferenceType() &&
1670                      !isBlockVariable;
1671 
1672     llvm::Value *V = LocalDeclMap[VD];
1673     if (!V && VD->isStaticLocal())
1674       V = CGM.getStaticLocalDeclAddress(VD);
1675 
1676     // Use special handling for lambdas.
1677     if (!V) {
1678       if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
1679         QualType LambdaTagType = getContext().getTagDeclType(FD->getParent());
1680         LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
1681                                                      LambdaTagType);
1682         return EmitLValueForField(LambdaLV, FD);
1683       }
1684 
1685       assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
1686       CharUnits alignment = getContext().getDeclAlign(VD);
1687       return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
1688                             E->getType(), alignment);
1689     }
1690 
1691     assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1692 
1693     if (isBlockVariable)
1694       V = BuildBlockByrefAddress(V, VD);
1695 
1696     LValue LV;
1697     if (VD->getType()->isReferenceType()) {
1698       llvm::LoadInst *LI = Builder.CreateLoad(V);
1699       LI->setAlignment(Alignment.getQuantity());
1700       V = LI;
1701       LV = MakeNaturalAlignAddrLValue(V, T);
1702     } else {
1703       LV = MakeAddrLValue(V, T, Alignment);
1704     }
1705 
1706     if (NonGCable) {
1707       LV.getQuals().removeObjCGCAttr();
1708       LV.setNonGC(true);
1709     }
1710     setObjCGCLValueClass(getContext(), E, LV);
1711     return LV;
1712   }
1713 
1714   if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
1715     return EmitFunctionDeclLValue(*this, E, fn);
1716 
1717   llvm_unreachable("Unhandled DeclRefExpr");
1718 }
1719 
1720 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1721   // __extension__ doesn't affect lvalue-ness.
1722   if (E->getOpcode() == UO_Extension)
1723     return EmitLValue(E->getSubExpr());
1724 
1725   QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1726   switch (E->getOpcode()) {
1727   default: llvm_unreachable("Unknown unary operator lvalue!");
1728   case UO_Deref: {
1729     QualType T = E->getSubExpr()->getType()->getPointeeType();
1730     assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1731 
1732     LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1733     LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1734 
1735     // We should not generate __weak write barrier on indirect reference
1736     // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1737     // But, we continue to generate __strong write barrier on indirect write
1738     // into a pointer to object.
1739     if (getContext().getLangOpts().ObjC1 &&
1740         getContext().getLangOpts().getGC() != LangOptions::NonGC &&
1741         LV.isObjCWeak())
1742       LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1743     return LV;
1744   }
1745   case UO_Real:
1746   case UO_Imag: {
1747     LValue LV = EmitLValue(E->getSubExpr());
1748     assert(LV.isSimple() && "real/imag on non-ordinary l-value");
1749     llvm::Value *Addr = LV.getAddress();
1750 
1751     // __real is valid on scalars.  This is a faster way of testing that.
1752     // __imag can only produce an rvalue on scalars.
1753     if (E->getOpcode() == UO_Real &&
1754         !cast<llvm::PointerType>(Addr->getType())
1755            ->getElementType()->isStructTy()) {
1756       assert(E->getSubExpr()->getType()->isArithmeticType());
1757       return LV;
1758     }
1759 
1760     assert(E->getSubExpr()->getType()->isAnyComplexType());
1761 
1762     unsigned Idx = E->getOpcode() == UO_Imag;
1763     return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1764                                                   Idx, "idx"),
1765                           ExprTy);
1766   }
1767   case UO_PreInc:
1768   case UO_PreDec: {
1769     LValue LV = EmitLValue(E->getSubExpr());
1770     bool isInc = E->getOpcode() == UO_PreInc;
1771 
1772     if (E->getType()->isAnyComplexType())
1773       EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1774     else
1775       EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1776     return LV;
1777   }
1778   }
1779 }
1780 
1781 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1782   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1783                         E->getType());
1784 }
1785 
1786 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1787   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1788                         E->getType());
1789 }
1790 
1791 static llvm::Constant*
1792 GetAddrOfConstantWideString(StringRef Str,
1793                             const char *GlobalName,
1794                             ASTContext &Context,
1795                             QualType Ty, SourceLocation Loc,
1796                             CodeGenModule &CGM) {
1797 
1798   StringLiteral *SL = StringLiteral::Create(Context,
1799                                             Str,
1800                                             StringLiteral::Wide,
1801                                             /*Pascal = */false,
1802                                             Ty, Loc);
1803   llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL);
1804   llvm::GlobalVariable *GV =
1805     new llvm::GlobalVariable(CGM.getModule(), C->getType(),
1806                              !CGM.getLangOpts().WritableStrings,
1807                              llvm::GlobalValue::PrivateLinkage,
1808                              C, GlobalName);
1809   const unsigned WideAlignment =
1810     Context.getTypeAlignInChars(Ty).getQuantity();
1811   GV->setAlignment(WideAlignment);
1812   return GV;
1813 }
1814 
1815 static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
1816                                     SmallString<32>& Target) {
1817   Target.resize(CharByteWidth * (Source.size() + 1));
1818   char* ResultPtr = &Target[0];
1819   bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr);
1820   (void)success;
1821   assert(success);
1822   Target.resize(ResultPtr - &Target[0]);
1823 }
1824 
1825 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1826   switch (E->getIdentType()) {
1827   default:
1828     return EmitUnsupportedLValue(E, "predefined expression");
1829 
1830   case PredefinedExpr::Func:
1831   case PredefinedExpr::Function:
1832   case PredefinedExpr::LFunction:
1833   case PredefinedExpr::PrettyFunction: {
1834     unsigned IdentType = E->getIdentType();
1835     std::string GlobalVarName;
1836 
1837     switch (IdentType) {
1838     default: llvm_unreachable("Invalid type");
1839     case PredefinedExpr::Func:
1840       GlobalVarName = "__func__.";
1841       break;
1842     case PredefinedExpr::Function:
1843       GlobalVarName = "__FUNCTION__.";
1844       break;
1845     case PredefinedExpr::LFunction:
1846       GlobalVarName = "L__FUNCTION__.";
1847       break;
1848     case PredefinedExpr::PrettyFunction:
1849       GlobalVarName = "__PRETTY_FUNCTION__.";
1850       break;
1851     }
1852 
1853     StringRef FnName = CurFn->getName();
1854     if (FnName.startswith("\01"))
1855       FnName = FnName.substr(1);
1856     GlobalVarName += FnName;
1857 
1858     const Decl *CurDecl = CurCodeDecl;
1859     if (CurDecl == 0)
1860       CurDecl = getContext().getTranslationUnitDecl();
1861 
1862     std::string FunctionName =
1863         (isa<BlockDecl>(CurDecl)
1864          ? FnName.str()
1865          : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType,
1866                                        CurDecl));
1867 
1868     const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual();
1869     llvm::Constant *C;
1870     if (ElemType->isWideCharType()) {
1871       SmallString<32> RawChars;
1872       ConvertUTF8ToWideString(
1873           getContext().getTypeSizeInChars(ElemType).getQuantity(),
1874           FunctionName, RawChars);
1875       C = GetAddrOfConstantWideString(RawChars,
1876                                       GlobalVarName.c_str(),
1877                                       getContext(),
1878                                       E->getType(),
1879                                       E->getLocation(),
1880                                       CGM);
1881     } else {
1882       C = CGM.GetAddrOfConstantCString(FunctionName,
1883                                        GlobalVarName.c_str(),
1884                                        1);
1885     }
1886     return MakeAddrLValue(C, E->getType());
1887   }
1888   }
1889 }
1890 
1891 llvm::BasicBlock *CodeGenFunction::getTrapBB() {
1892   const CodeGenOptions &GCO = CGM.getCodeGenOpts();
1893 
1894   // If we are not optimzing, don't collapse all calls to trap in the function
1895   // to the same call, that way, in the debugger they can see which operation
1896   // did in fact fail.  If we are optimizing, we collapse all calls to trap down
1897   // to just one per function to save on codesize.
1898   if (GCO.OptimizationLevel && TrapBB)
1899     return TrapBB;
1900 
1901   llvm::BasicBlock *Cont = 0;
1902   if (HaveInsertPoint()) {
1903     Cont = createBasicBlock("cont");
1904     EmitBranch(Cont);
1905   }
1906   TrapBB = createBasicBlock("trap");
1907   EmitBlock(TrapBB);
1908 
1909   llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
1910   llvm::CallInst *TrapCall = Builder.CreateCall(F);
1911   TrapCall->setDoesNotReturn();
1912   TrapCall->setDoesNotThrow();
1913   Builder.CreateUnreachable();
1914 
1915   if (Cont)
1916     EmitBlock(Cont);
1917   return TrapBB;
1918 }
1919 
1920 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
1921 /// array to pointer, return the array subexpression.
1922 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
1923   // If this isn't just an array->pointer decay, bail out.
1924   const CastExpr *CE = dyn_cast<CastExpr>(E);
1925   if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
1926     return 0;
1927 
1928   // If this is a decay from variable width array, bail out.
1929   const Expr *SubExpr = CE->getSubExpr();
1930   if (SubExpr->getType()->isVariableArrayType())
1931     return 0;
1932 
1933   return SubExpr;
1934 }
1935 
1936 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1937   // The index must always be an integer, which is not an aggregate.  Emit it.
1938   llvm::Value *Idx = EmitScalarExpr(E->getIdx());
1939   QualType IdxTy  = E->getIdx()->getType();
1940   bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
1941 
1942   // If the base is a vector type, then we are forming a vector element lvalue
1943   // with this subscript.
1944   if (E->getBase()->getType()->isVectorType()) {
1945     // Emit the vector as an lvalue to get its address.
1946     LValue LHS = EmitLValue(E->getBase());
1947     assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
1948     Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
1949     return LValue::MakeVectorElt(LHS.getAddress(), Idx,
1950                                  E->getBase()->getType(), LHS.getAlignment());
1951   }
1952 
1953   // Extend or truncate the index type to 32 or 64-bits.
1954   if (Idx->getType() != IntPtrTy)
1955     Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
1956 
1957   // We know that the pointer points to a type of the correct size, unless the
1958   // size is a VLA or Objective-C interface.
1959   llvm::Value *Address = 0;
1960   CharUnits ArrayAlignment;
1961   if (const VariableArrayType *vla =
1962         getContext().getAsVariableArrayType(E->getType())) {
1963     // The base must be a pointer, which is not an aggregate.  Emit
1964     // it.  It needs to be emitted first in case it's what captures
1965     // the VLA bounds.
1966     Address = EmitScalarExpr(E->getBase());
1967 
1968     // The element count here is the total number of non-VLA elements.
1969     llvm::Value *numElements = getVLASize(vla).first;
1970 
1971     // Effectively, the multiply by the VLA size is part of the GEP.
1972     // GEP indexes are signed, and scaling an index isn't permitted to
1973     // signed-overflow, so we use the same semantics for our explicit
1974     // multiply.  We suppress this if overflow is not undefined behavior.
1975     if (getLangOpts().isSignedOverflowDefined()) {
1976       Idx = Builder.CreateMul(Idx, numElements);
1977       Address = Builder.CreateGEP(Address, Idx, "arrayidx");
1978     } else {
1979       Idx = Builder.CreateNSWMul(Idx, numElements);
1980       Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
1981     }
1982   } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
1983     // Indexing over an interface, as in "NSString *P; P[4];"
1984     llvm::Value *InterfaceSize =
1985       llvm::ConstantInt::get(Idx->getType(),
1986           getContext().getTypeSizeInChars(OIT).getQuantity());
1987 
1988     Idx = Builder.CreateMul(Idx, InterfaceSize);
1989 
1990     // The base must be a pointer, which is not an aggregate.  Emit it.
1991     llvm::Value *Base = EmitScalarExpr(E->getBase());
1992     Address = EmitCastToVoidPtr(Base);
1993     Address = Builder.CreateGEP(Address, Idx, "arrayidx");
1994     Address = Builder.CreateBitCast(Address, Base->getType());
1995   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
1996     // If this is A[i] where A is an array, the frontend will have decayed the
1997     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
1998     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
1999     // "gep x, i" here.  Emit one "gep A, 0, i".
2000     assert(Array->getType()->isArrayType() &&
2001            "Array to pointer decay must have array source type!");
2002     LValue ArrayLV = EmitLValue(Array);
2003     llvm::Value *ArrayPtr = ArrayLV.getAddress();
2004     llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2005     llvm::Value *Args[] = { Zero, Idx };
2006 
2007     // Propagate the alignment from the array itself to the result.
2008     ArrayAlignment = ArrayLV.getAlignment();
2009 
2010     if (getContext().getLangOpts().isSignedOverflowDefined())
2011       Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
2012     else
2013       Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
2014   } else {
2015     // The base must be a pointer, which is not an aggregate.  Emit it.
2016     llvm::Value *Base = EmitScalarExpr(E->getBase());
2017     if (getContext().getLangOpts().isSignedOverflowDefined())
2018       Address = Builder.CreateGEP(Base, Idx, "arrayidx");
2019     else
2020       Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
2021   }
2022 
2023   QualType T = E->getBase()->getType()->getPointeeType();
2024   assert(!T.isNull() &&
2025          "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
2026 
2027 
2028   // Limit the alignment to that of the result type.
2029   LValue LV;
2030   if (!ArrayAlignment.isZero()) {
2031     CharUnits Align = getContext().getTypeAlignInChars(T);
2032     ArrayAlignment = std::min(Align, ArrayAlignment);
2033     LV = MakeAddrLValue(Address, T, ArrayAlignment);
2034   } else {
2035     LV = MakeNaturalAlignAddrLValue(Address, T);
2036   }
2037 
2038   LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
2039 
2040   if (getContext().getLangOpts().ObjC1 &&
2041       getContext().getLangOpts().getGC() != LangOptions::NonGC) {
2042     LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
2043     setObjCGCLValueClass(getContext(), E, LV);
2044   }
2045   return LV;
2046 }
2047 
2048 static
2049 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
2050                                        SmallVector<unsigned, 4> &Elts) {
2051   SmallVector<llvm::Constant*, 4> CElts;
2052   for (unsigned i = 0, e = Elts.size(); i != e; ++i)
2053     CElts.push_back(Builder.getInt32(Elts[i]));
2054 
2055   return llvm::ConstantVector::get(CElts);
2056 }
2057 
2058 LValue CodeGenFunction::
2059 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
2060   // Emit the base vector as an l-value.
2061   LValue Base;
2062 
2063   // ExtVectorElementExpr's base can either be a vector or pointer to vector.
2064   if (E->isArrow()) {
2065     // If it is a pointer to a vector, emit the address and form an lvalue with
2066     // it.
2067     llvm::Value *Ptr = EmitScalarExpr(E->getBase());
2068     const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
2069     Base = MakeAddrLValue(Ptr, PT->getPointeeType());
2070     Base.getQuals().removeObjCGCAttr();
2071   } else if (E->getBase()->isGLValue()) {
2072     // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
2073     // emit the base as an lvalue.
2074     assert(E->getBase()->getType()->isVectorType());
2075     Base = EmitLValue(E->getBase());
2076   } else {
2077     // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
2078     assert(E->getBase()->getType()->isVectorType() &&
2079            "Result must be a vector");
2080     llvm::Value *Vec = EmitScalarExpr(E->getBase());
2081 
2082     // Store the vector to memory (because LValue wants an address).
2083     llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
2084     Builder.CreateStore(Vec, VecMem);
2085     Base = MakeAddrLValue(VecMem, E->getBase()->getType());
2086   }
2087 
2088   QualType type =
2089     E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
2090 
2091   // Encode the element access list into a vector of unsigned indices.
2092   SmallVector<unsigned, 4> Indices;
2093   E->getEncodedElementAccess(Indices);
2094 
2095   if (Base.isSimple()) {
2096     llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
2097     return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
2098                                     Base.getAlignment());
2099   }
2100   assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
2101 
2102   llvm::Constant *BaseElts = Base.getExtVectorElts();
2103   SmallVector<llvm::Constant *, 4> CElts;
2104 
2105   for (unsigned i = 0, e = Indices.size(); i != e; ++i)
2106     CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
2107   llvm::Constant *CV = llvm::ConstantVector::get(CElts);
2108   return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
2109                                   Base.getAlignment());
2110 }
2111 
2112 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
2113   Expr *BaseExpr = E->getBase();
2114 
2115   // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
2116   LValue BaseLV;
2117   if (E->isArrow())
2118     BaseLV = MakeNaturalAlignAddrLValue(EmitScalarExpr(BaseExpr),
2119                                         BaseExpr->getType()->getPointeeType());
2120   else
2121     BaseLV = EmitLValue(BaseExpr);
2122 
2123   NamedDecl *ND = E->getMemberDecl();
2124   if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
2125     LValue LV = EmitLValueForField(BaseLV, Field);
2126     setObjCGCLValueClass(getContext(), E, LV);
2127     return LV;
2128   }
2129 
2130   if (VarDecl *VD = dyn_cast<VarDecl>(ND))
2131     return EmitGlobalVarDeclLValue(*this, E, VD);
2132 
2133   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
2134     return EmitFunctionDeclLValue(*this, E, FD);
2135 
2136   llvm_unreachable("Unhandled member declaration!");
2137 }
2138 
2139 LValue CodeGenFunction::EmitLValueForField(LValue base,
2140                                            const FieldDecl *field) {
2141   if (field->isBitField()) {
2142     const CGRecordLayout &RL =
2143       CGM.getTypes().getCGRecordLayout(field->getParent());
2144     const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
2145     QualType fieldType =
2146       field->getType().withCVRQualifiers(base.getVRQualifiers());
2147     return LValue::MakeBitfield(base.getAddress(), Info, fieldType,
2148                                 base.getAlignment());
2149   }
2150 
2151   const RecordDecl *rec = field->getParent();
2152   QualType type = field->getType();
2153   CharUnits alignment = getContext().getDeclAlign(field);
2154 
2155   // FIXME: It should be impossible to have an LValue without alignment for a
2156   // complete type.
2157   if (!base.getAlignment().isZero())
2158     alignment = std::min(alignment, base.getAlignment());
2159 
2160   bool mayAlias = rec->hasAttr<MayAliasAttr>();
2161 
2162   llvm::Value *addr = base.getAddress();
2163   unsigned cvr = base.getVRQualifiers();
2164   if (rec->isUnion()) {
2165     // For unions, there is no pointer adjustment.
2166     assert(!type->isReferenceType() && "union has reference member");
2167   } else {
2168     // For structs, we GEP to the field that the record layout suggests.
2169     unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
2170     addr = Builder.CreateStructGEP(addr, idx, field->getName());
2171 
2172     // If this is a reference field, load the reference right now.
2173     if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
2174       llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
2175       if (cvr & Qualifiers::Volatile) load->setVolatile(true);
2176       load->setAlignment(alignment.getQuantity());
2177 
2178       if (CGM.shouldUseTBAA()) {
2179         llvm::MDNode *tbaa;
2180         if (mayAlias)
2181           tbaa = CGM.getTBAAInfo(getContext().CharTy);
2182         else
2183           tbaa = CGM.getTBAAInfo(type);
2184         CGM.DecorateInstruction(load, tbaa);
2185       }
2186 
2187       addr = load;
2188       mayAlias = false;
2189       type = refType->getPointeeType();
2190       if (type->isIncompleteType())
2191         alignment = CharUnits();
2192       else
2193         alignment = getContext().getTypeAlignInChars(type);
2194       cvr = 0; // qualifiers don't recursively apply to referencee
2195     }
2196   }
2197 
2198   // Make sure that the address is pointing to the right type.  This is critical
2199   // for both unions and structs.  A union needs a bitcast, a struct element
2200   // will need a bitcast if the LLVM type laid out doesn't match the desired
2201   // type.
2202   addr = EmitBitCastOfLValueToProperType(*this, addr,
2203                                          CGM.getTypes().ConvertTypeForMem(type),
2204                                          field->getName());
2205 
2206   if (field->hasAttr<AnnotateAttr>())
2207     addr = EmitFieldAnnotations(field, addr);
2208 
2209   LValue LV = MakeAddrLValue(addr, type, alignment);
2210   LV.getQuals().addCVRQualifiers(cvr);
2211 
2212   // __weak attribute on a field is ignored.
2213   if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
2214     LV.getQuals().removeObjCGCAttr();
2215 
2216   // Fields of may_alias structs act like 'char' for TBAA purposes.
2217   // FIXME: this should get propagated down through anonymous structs
2218   // and unions.
2219   if (mayAlias && LV.getTBAAInfo())
2220     LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
2221 
2222   return LV;
2223 }
2224 
2225 LValue
2226 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
2227                                                   const FieldDecl *Field) {
2228   QualType FieldType = Field->getType();
2229 
2230   if (!FieldType->isReferenceType())
2231     return EmitLValueForField(Base, Field);
2232 
2233   const CGRecordLayout &RL =
2234     CGM.getTypes().getCGRecordLayout(Field->getParent());
2235   unsigned idx = RL.getLLVMFieldNo(Field);
2236   llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx);
2237   assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
2238 
2239   // Make sure that the address is pointing to the right type.  This is critical
2240   // for both unions and structs.  A union needs a bitcast, a struct element
2241   // will need a bitcast if the LLVM type laid out doesn't match the desired
2242   // type.
2243   llvm::Type *llvmType = ConvertTypeForMem(FieldType);
2244   V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
2245 
2246   CharUnits Alignment = getContext().getDeclAlign(Field);
2247 
2248   // FIXME: It should be impossible to have an LValue without alignment for a
2249   // complete type.
2250   if (!Base.getAlignment().isZero())
2251     Alignment = std::min(Alignment, Base.getAlignment());
2252 
2253   return MakeAddrLValue(V, FieldType, Alignment);
2254 }
2255 
2256 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
2257   if (E->isFileScope()) {
2258     llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
2259     return MakeAddrLValue(GlobalPtr, E->getType());
2260   }
2261   if (E->getType()->isVariablyModifiedType())
2262     // make sure to emit the VLA size.
2263     EmitVariablyModifiedType(E->getType());
2264 
2265   llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
2266   const Expr *InitExpr = E->getInitializer();
2267   LValue Result = MakeAddrLValue(DeclPtr, E->getType());
2268 
2269   EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
2270                    /*Init*/ true);
2271 
2272   return Result;
2273 }
2274 
2275 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
2276   if (!E->isGLValue())
2277     // Initializing an aggregate temporary in C++11: T{...}.
2278     return EmitAggExprToLValue(E);
2279 
2280   // An lvalue initializer list must be initializing a reference.
2281   assert(E->getNumInits() == 1 && "reference init with multiple values");
2282   return EmitLValue(E->getInit(0));
2283 }
2284 
2285 LValue CodeGenFunction::
2286 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
2287   if (!expr->isGLValue()) {
2288     // ?: here should be an aggregate.
2289     assert((hasAggregateLLVMType(expr->getType()) &&
2290             !expr->getType()->isAnyComplexType()) &&
2291            "Unexpected conditional operator!");
2292     return EmitAggExprToLValue(expr);
2293   }
2294 
2295   OpaqueValueMapping binding(*this, expr);
2296 
2297   const Expr *condExpr = expr->getCond();
2298   bool CondExprBool;
2299   if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
2300     const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
2301     if (!CondExprBool) std::swap(live, dead);
2302 
2303     if (!ContainsLabel(dead))
2304       return EmitLValue(live);
2305   }
2306 
2307   llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
2308   llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
2309   llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
2310 
2311   ConditionalEvaluation eval(*this);
2312   EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
2313 
2314   // Any temporaries created here are conditional.
2315   EmitBlock(lhsBlock);
2316   eval.begin(*this);
2317   LValue lhs = EmitLValue(expr->getTrueExpr());
2318   eval.end(*this);
2319 
2320   if (!lhs.isSimple())
2321     return EmitUnsupportedLValue(expr, "conditional operator");
2322 
2323   lhsBlock = Builder.GetInsertBlock();
2324   Builder.CreateBr(contBlock);
2325 
2326   // Any temporaries created here are conditional.
2327   EmitBlock(rhsBlock);
2328   eval.begin(*this);
2329   LValue rhs = EmitLValue(expr->getFalseExpr());
2330   eval.end(*this);
2331   if (!rhs.isSimple())
2332     return EmitUnsupportedLValue(expr, "conditional operator");
2333   rhsBlock = Builder.GetInsertBlock();
2334 
2335   EmitBlock(contBlock);
2336 
2337   llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
2338                                          "cond-lvalue");
2339   phi->addIncoming(lhs.getAddress(), lhsBlock);
2340   phi->addIncoming(rhs.getAddress(), rhsBlock);
2341   return MakeAddrLValue(phi, expr->getType());
2342 }
2343 
2344 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
2345 /// type. If the cast is to a reference, we can have the usual lvalue result,
2346 /// otherwise if a cast is needed by the code generator in an lvalue context,
2347 /// then it must mean that we need the address of an aggregate in order to
2348 /// access one of its members.  This can happen for all the reasons that casts
2349 /// are permitted with aggregate result, including noop aggregate casts, and
2350 /// cast from scalar to union.
2351 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
2352   switch (E->getCastKind()) {
2353   case CK_ToVoid:
2354     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
2355 
2356   case CK_Dependent:
2357     llvm_unreachable("dependent cast kind in IR gen!");
2358 
2359   // These two casts are currently treated as no-ops, although they could
2360   // potentially be real operations depending on the target's ABI.
2361   case CK_NonAtomicToAtomic:
2362   case CK_AtomicToNonAtomic:
2363 
2364   case CK_NoOp:
2365   case CK_LValueToRValue:
2366     if (!E->getSubExpr()->Classify(getContext()).isPRValue()
2367         || E->getType()->isRecordType())
2368       return EmitLValue(E->getSubExpr());
2369     // Fall through to synthesize a temporary.
2370 
2371   case CK_BitCast:
2372   case CK_ArrayToPointerDecay:
2373   case CK_FunctionToPointerDecay:
2374   case CK_NullToMemberPointer:
2375   case CK_NullToPointer:
2376   case CK_IntegralToPointer:
2377   case CK_PointerToIntegral:
2378   case CK_PointerToBoolean:
2379   case CK_VectorSplat:
2380   case CK_IntegralCast:
2381   case CK_IntegralToBoolean:
2382   case CK_IntegralToFloating:
2383   case CK_FloatingToIntegral:
2384   case CK_FloatingToBoolean:
2385   case CK_FloatingCast:
2386   case CK_FloatingRealToComplex:
2387   case CK_FloatingComplexToReal:
2388   case CK_FloatingComplexToBoolean:
2389   case CK_FloatingComplexCast:
2390   case CK_FloatingComplexToIntegralComplex:
2391   case CK_IntegralRealToComplex:
2392   case CK_IntegralComplexToReal:
2393   case CK_IntegralComplexToBoolean:
2394   case CK_IntegralComplexCast:
2395   case CK_IntegralComplexToFloatingComplex:
2396   case CK_DerivedToBaseMemberPointer:
2397   case CK_BaseToDerivedMemberPointer:
2398   case CK_MemberPointerToBoolean:
2399   case CK_ReinterpretMemberPointer:
2400   case CK_AnyPointerToBlockPointerCast:
2401   case CK_ARCProduceObject:
2402   case CK_ARCConsumeObject:
2403   case CK_ARCReclaimReturnedObject:
2404   case CK_ARCExtendBlockObject:
2405   case CK_CopyAndAutoreleaseBlockObject: {
2406     // These casts only produce lvalues when we're binding a reference to a
2407     // temporary realized from a (converted) pure rvalue. Emit the expression
2408     // as a value, copy it into a temporary, and return an lvalue referring to
2409     // that temporary.
2410     llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
2411     EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
2412     return MakeAddrLValue(V, E->getType());
2413   }
2414 
2415   case CK_Dynamic: {
2416     LValue LV = EmitLValue(E->getSubExpr());
2417     llvm::Value *V = LV.getAddress();
2418     const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
2419     return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
2420   }
2421 
2422   case CK_ConstructorConversion:
2423   case CK_UserDefinedConversion:
2424   case CK_CPointerToObjCPointerCast:
2425   case CK_BlockPointerToObjCPointerCast:
2426     return EmitLValue(E->getSubExpr());
2427 
2428   case CK_UncheckedDerivedToBase:
2429   case CK_DerivedToBase: {
2430     const RecordType *DerivedClassTy =
2431       E->getSubExpr()->getType()->getAs<RecordType>();
2432     CXXRecordDecl *DerivedClassDecl =
2433       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2434 
2435     LValue LV = EmitLValue(E->getSubExpr());
2436     llvm::Value *This = LV.getAddress();
2437 
2438     // Perform the derived-to-base conversion
2439     llvm::Value *Base =
2440       GetAddressOfBaseClass(This, DerivedClassDecl,
2441                             E->path_begin(), E->path_end(),
2442                             /*NullCheckValue=*/false);
2443 
2444     return MakeAddrLValue(Base, E->getType());
2445   }
2446   case CK_ToUnion:
2447     return EmitAggExprToLValue(E);
2448   case CK_BaseToDerived: {
2449     const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
2450     CXXRecordDecl *DerivedClassDecl =
2451       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2452 
2453     LValue LV = EmitLValue(E->getSubExpr());
2454 
2455     // Perform the base-to-derived conversion
2456     llvm::Value *Derived =
2457       GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
2458                                E->path_begin(), E->path_end(),
2459                                /*NullCheckValue=*/false);
2460 
2461     return MakeAddrLValue(Derived, E->getType());
2462   }
2463   case CK_LValueBitCast: {
2464     // This must be a reinterpret_cast (or c-style equivalent).
2465     const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
2466 
2467     LValue LV = EmitLValue(E->getSubExpr());
2468     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2469                                            ConvertType(CE->getTypeAsWritten()));
2470     return MakeAddrLValue(V, E->getType());
2471   }
2472   case CK_ObjCObjectLValueCast: {
2473     LValue LV = EmitLValue(E->getSubExpr());
2474     QualType ToType = getContext().getLValueReferenceType(E->getType());
2475     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2476                                            ConvertType(ToType));
2477     return MakeAddrLValue(V, E->getType());
2478   }
2479   }
2480 
2481   llvm_unreachable("Unhandled lvalue cast kind?");
2482 }
2483 
2484 LValue CodeGenFunction::EmitNullInitializationLValue(
2485                                               const CXXScalarValueInitExpr *E) {
2486   QualType Ty = E->getType();
2487   LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
2488   EmitNullInitialization(LV.getAddress(), Ty);
2489   return LV;
2490 }
2491 
2492 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
2493   assert(OpaqueValueMappingData::shouldBindAsLValue(e));
2494   return getOpaqueLValueMapping(e);
2495 }
2496 
2497 LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
2498                                            const MaterializeTemporaryExpr *E) {
2499   RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
2500   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2501 }
2502 
2503 RValue CodeGenFunction::EmitRValueForField(LValue LV,
2504                                            const FieldDecl *FD) {
2505   QualType FT = FD->getType();
2506   LValue FieldLV = EmitLValueForField(LV, FD);
2507   if (FT->isAnyComplexType())
2508     return RValue::getComplex(
2509         LoadComplexFromAddr(FieldLV.getAddress(),
2510                             FieldLV.isVolatileQualified()));
2511   else if (CodeGenFunction::hasAggregateLLVMType(FT))
2512     return FieldLV.asAggregateRValue();
2513 
2514   return EmitLoadOfLValue(FieldLV);
2515 }
2516 
2517 //===--------------------------------------------------------------------===//
2518 //                             Expression Emission
2519 //===--------------------------------------------------------------------===//
2520 
2521 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
2522                                      ReturnValueSlot ReturnValue) {
2523   if (CGDebugInfo *DI = getDebugInfo())
2524     DI->EmitLocation(Builder, E->getLocStart());
2525 
2526   // Builtins never have block type.
2527   if (E->getCallee()->getType()->isBlockPointerType())
2528     return EmitBlockCallExpr(E, ReturnValue);
2529 
2530   if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
2531     return EmitCXXMemberCallExpr(CE, ReturnValue);
2532 
2533   if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
2534     return EmitCUDAKernelCallExpr(CE, ReturnValue);
2535 
2536   const Decl *TargetDecl = E->getCalleeDecl();
2537   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2538     if (unsigned builtinID = FD->getBuiltinID())
2539       return EmitBuiltinExpr(FD, builtinID, E);
2540   }
2541 
2542   if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
2543     if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
2544       return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
2545 
2546   if (const CXXPseudoDestructorExpr *PseudoDtor
2547           = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
2548     QualType DestroyedType = PseudoDtor->getDestroyedType();
2549     if (getContext().getLangOpts().ObjCAutoRefCount &&
2550         DestroyedType->isObjCLifetimeType() &&
2551         (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
2552          DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
2553       // Automatic Reference Counting:
2554       //   If the pseudo-expression names a retainable object with weak or
2555       //   strong lifetime, the object shall be released.
2556       Expr *BaseExpr = PseudoDtor->getBase();
2557       llvm::Value *BaseValue = NULL;
2558       Qualifiers BaseQuals;
2559 
2560       // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2561       if (PseudoDtor->isArrow()) {
2562         BaseValue = EmitScalarExpr(BaseExpr);
2563         const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
2564         BaseQuals = PTy->getPointeeType().getQualifiers();
2565       } else {
2566         LValue BaseLV = EmitLValue(BaseExpr);
2567         BaseValue = BaseLV.getAddress();
2568         QualType BaseTy = BaseExpr->getType();
2569         BaseQuals = BaseTy.getQualifiers();
2570       }
2571 
2572       switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
2573       case Qualifiers::OCL_None:
2574       case Qualifiers::OCL_ExplicitNone:
2575       case Qualifiers::OCL_Autoreleasing:
2576         break;
2577 
2578       case Qualifiers::OCL_Strong:
2579         EmitARCRelease(Builder.CreateLoad(BaseValue,
2580                           PseudoDtor->getDestroyedType().isVolatileQualified()),
2581                        /*precise*/ true);
2582         break;
2583 
2584       case Qualifiers::OCL_Weak:
2585         EmitARCDestroyWeak(BaseValue);
2586         break;
2587       }
2588     } else {
2589       // C++ [expr.pseudo]p1:
2590       //   The result shall only be used as the operand for the function call
2591       //   operator (), and the result of such a call has type void. The only
2592       //   effect is the evaluation of the postfix-expression before the dot or
2593       //   arrow.
2594       EmitScalarExpr(E->getCallee());
2595     }
2596 
2597     return RValue::get(0);
2598   }
2599 
2600   llvm::Value *Callee = EmitScalarExpr(E->getCallee());
2601   return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
2602                   E->arg_begin(), E->arg_end(), TargetDecl);
2603 }
2604 
2605 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
2606   // Comma expressions just emit their LHS then their RHS as an l-value.
2607   if (E->getOpcode() == BO_Comma) {
2608     EmitIgnoredExpr(E->getLHS());
2609     EnsureInsertPoint();
2610     return EmitLValue(E->getRHS());
2611   }
2612 
2613   if (E->getOpcode() == BO_PtrMemD ||
2614       E->getOpcode() == BO_PtrMemI)
2615     return EmitPointerToDataMemberBinaryExpr(E);
2616 
2617   assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
2618 
2619   // Note that in all of these cases, __block variables need the RHS
2620   // evaluated first just in case the variable gets moved by the RHS.
2621 
2622   if (!hasAggregateLLVMType(E->getType())) {
2623     switch (E->getLHS()->getType().getObjCLifetime()) {
2624     case Qualifiers::OCL_Strong:
2625       return EmitARCStoreStrong(E, /*ignored*/ false).first;
2626 
2627     case Qualifiers::OCL_Autoreleasing:
2628       return EmitARCStoreAutoreleasing(E).first;
2629 
2630     // No reason to do any of these differently.
2631     case Qualifiers::OCL_None:
2632     case Qualifiers::OCL_ExplicitNone:
2633     case Qualifiers::OCL_Weak:
2634       break;
2635     }
2636 
2637     RValue RV = EmitAnyExpr(E->getRHS());
2638     LValue LV = EmitLValue(E->getLHS());
2639     EmitStoreThroughLValue(RV, LV);
2640     return LV;
2641   }
2642 
2643   if (E->getType()->isAnyComplexType())
2644     return EmitComplexAssignmentLValue(E);
2645 
2646   return EmitAggExprToLValue(E);
2647 }
2648 
2649 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
2650   RValue RV = EmitCallExpr(E);
2651 
2652   if (!RV.isScalar())
2653     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2654 
2655   assert(E->getCallReturnType()->isReferenceType() &&
2656          "Can't have a scalar return unless the return type is a "
2657          "reference type!");
2658 
2659   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2660 }
2661 
2662 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
2663   // FIXME: This shouldn't require another copy.
2664   return EmitAggExprToLValue(E);
2665 }
2666 
2667 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
2668   assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
2669          && "binding l-value to type which needs a temporary");
2670   AggValueSlot Slot = CreateAggTemp(E->getType());
2671   EmitCXXConstructExpr(E, Slot);
2672   return MakeAddrLValue(Slot.getAddr(), E->getType());
2673 }
2674 
2675 LValue
2676 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
2677   return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
2678 }
2679 
2680 LValue
2681 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
2682   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2683   Slot.setExternallyDestructed();
2684   EmitAggExpr(E->getSubExpr(), Slot);
2685   EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
2686   return MakeAddrLValue(Slot.getAddr(), E->getType());
2687 }
2688 
2689 LValue
2690 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
2691   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2692   EmitLambdaExpr(E, Slot);
2693   return MakeAddrLValue(Slot.getAddr(), E->getType());
2694 }
2695 
2696 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
2697   RValue RV = EmitObjCMessageExpr(E);
2698 
2699   if (!RV.isScalar())
2700     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2701 
2702   assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
2703          "Can't have a scalar return unless the return type is a "
2704          "reference type!");
2705 
2706   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2707 }
2708 
2709 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
2710   llvm::Value *V =
2711     CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
2712   return MakeAddrLValue(V, E->getType());
2713 }
2714 
2715 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2716                                              const ObjCIvarDecl *Ivar) {
2717   return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
2718 }
2719 
2720 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
2721                                           llvm::Value *BaseValue,
2722                                           const ObjCIvarDecl *Ivar,
2723                                           unsigned CVRQualifiers) {
2724   return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
2725                                                    Ivar, CVRQualifiers);
2726 }
2727 
2728 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
2729   // FIXME: A lot of the code below could be shared with EmitMemberExpr.
2730   llvm::Value *BaseValue = 0;
2731   const Expr *BaseExpr = E->getBase();
2732   Qualifiers BaseQuals;
2733   QualType ObjectTy;
2734   if (E->isArrow()) {
2735     BaseValue = EmitScalarExpr(BaseExpr);
2736     ObjectTy = BaseExpr->getType()->getPointeeType();
2737     BaseQuals = ObjectTy.getQualifiers();
2738   } else {
2739     LValue BaseLV = EmitLValue(BaseExpr);
2740     // FIXME: this isn't right for bitfields.
2741     BaseValue = BaseLV.getAddress();
2742     ObjectTy = BaseExpr->getType();
2743     BaseQuals = ObjectTy.getQualifiers();
2744   }
2745 
2746   LValue LV =
2747     EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
2748                       BaseQuals.getCVRQualifiers());
2749   setObjCGCLValueClass(getContext(), E, LV);
2750   return LV;
2751 }
2752 
2753 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
2754   // Can only get l-value for message expression returning aggregate type
2755   RValue RV = EmitAnyExprToTemp(E);
2756   return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2757 }
2758 
2759 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
2760                                  ReturnValueSlot ReturnValue,
2761                                  CallExpr::const_arg_iterator ArgBeg,
2762                                  CallExpr::const_arg_iterator ArgEnd,
2763                                  const Decl *TargetDecl) {
2764   // Get the actual function type. The callee type will always be a pointer to
2765   // function type or a block pointer type.
2766   assert(CalleeType->isFunctionPointerType() &&
2767          "Call must have function pointer type!");
2768 
2769   CalleeType = getContext().getCanonicalType(CalleeType);
2770 
2771   const FunctionType *FnType
2772     = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
2773 
2774   CallArgList Args;
2775   EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
2776 
2777   const CGFunctionInfo &FnInfo =
2778     CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
2779 
2780   // C99 6.5.2.2p6:
2781   //   If the expression that denotes the called function has a type
2782   //   that does not include a prototype, [the default argument
2783   //   promotions are performed]. If the number of arguments does not
2784   //   equal the number of parameters, the behavior is undefined. If
2785   //   the function is defined with a type that includes a prototype,
2786   //   and either the prototype ends with an ellipsis (, ...) or the
2787   //   types of the arguments after promotion are not compatible with
2788   //   the types of the parameters, the behavior is undefined. If the
2789   //   function is defined with a type that does not include a
2790   //   prototype, and the types of the arguments after promotion are
2791   //   not compatible with those of the parameters after promotion,
2792   //   the behavior is undefined [except in some trivial cases].
2793   // That is, in the general case, we should assume that a call
2794   // through an unprototyped function type works like a *non-variadic*
2795   // call.  The way we make this work is to cast to the exact type
2796   // of the promoted arguments.
2797   if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
2798     llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
2799     CalleeTy = CalleeTy->getPointerTo();
2800     Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
2801   }
2802 
2803   return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
2804 }
2805 
2806 LValue CodeGenFunction::
2807 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
2808   llvm::Value *BaseV;
2809   if (E->getOpcode() == BO_PtrMemI)
2810     BaseV = EmitScalarExpr(E->getLHS());
2811   else
2812     BaseV = EmitLValue(E->getLHS()).getAddress();
2813 
2814   llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
2815 
2816   const MemberPointerType *MPT
2817     = E->getRHS()->getType()->getAs<MemberPointerType>();
2818 
2819   llvm::Value *AddV =
2820     CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
2821 
2822   return MakeAddrLValue(AddV, MPT->getPointeeType());
2823 }
2824 
2825 static void
2826 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
2827              llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
2828              uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
2829   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
2830   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
2831 
2832   switch (E->getOp()) {
2833   case AtomicExpr::AO__c11_atomic_init:
2834     llvm_unreachable("Already handled!");
2835 
2836   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
2837   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
2838   case AtomicExpr::AO__atomic_compare_exchange:
2839   case AtomicExpr::AO__atomic_compare_exchange_n: {
2840     // Note that cmpxchg only supports specifying one ordering and
2841     // doesn't support weak cmpxchg, at least at the moment.
2842     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2843     LoadVal1->setAlignment(Align);
2844     llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
2845     LoadVal2->setAlignment(Align);
2846     llvm::AtomicCmpXchgInst *CXI =
2847         CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
2848     CXI->setVolatile(E->isVolatile());
2849     llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
2850     StoreVal1->setAlignment(Align);
2851     llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
2852     CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
2853     return;
2854   }
2855 
2856   case AtomicExpr::AO__c11_atomic_load:
2857   case AtomicExpr::AO__atomic_load_n:
2858   case AtomicExpr::AO__atomic_load: {
2859     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
2860     Load->setAtomic(Order);
2861     Load->setAlignment(Size);
2862     Load->setVolatile(E->isVolatile());
2863     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
2864     StoreDest->setAlignment(Align);
2865     return;
2866   }
2867 
2868   case AtomicExpr::AO__c11_atomic_store:
2869   case AtomicExpr::AO__atomic_store:
2870   case AtomicExpr::AO__atomic_store_n: {
2871     assert(!Dest && "Store does not return a value");
2872     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2873     LoadVal1->setAlignment(Align);
2874     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
2875     Store->setAtomic(Order);
2876     Store->setAlignment(Size);
2877     Store->setVolatile(E->isVolatile());
2878     return;
2879   }
2880 
2881   case AtomicExpr::AO__c11_atomic_exchange:
2882   case AtomicExpr::AO__atomic_exchange_n:
2883   case AtomicExpr::AO__atomic_exchange:
2884     Op = llvm::AtomicRMWInst::Xchg;
2885     break;
2886 
2887   case AtomicExpr::AO__atomic_add_fetch:
2888     PostOp = llvm::Instruction::Add;
2889     // Fall through.
2890   case AtomicExpr::AO__c11_atomic_fetch_add:
2891   case AtomicExpr::AO__atomic_fetch_add:
2892     Op = llvm::AtomicRMWInst::Add;
2893     break;
2894 
2895   case AtomicExpr::AO__atomic_sub_fetch:
2896     PostOp = llvm::Instruction::Sub;
2897     // Fall through.
2898   case AtomicExpr::AO__c11_atomic_fetch_sub:
2899   case AtomicExpr::AO__atomic_fetch_sub:
2900     Op = llvm::AtomicRMWInst::Sub;
2901     break;
2902 
2903   case AtomicExpr::AO__atomic_and_fetch:
2904     PostOp = llvm::Instruction::And;
2905     // Fall through.
2906   case AtomicExpr::AO__c11_atomic_fetch_and:
2907   case AtomicExpr::AO__atomic_fetch_and:
2908     Op = llvm::AtomicRMWInst::And;
2909     break;
2910 
2911   case AtomicExpr::AO__atomic_or_fetch:
2912     PostOp = llvm::Instruction::Or;
2913     // Fall through.
2914   case AtomicExpr::AO__c11_atomic_fetch_or:
2915   case AtomicExpr::AO__atomic_fetch_or:
2916     Op = llvm::AtomicRMWInst::Or;
2917     break;
2918 
2919   case AtomicExpr::AO__atomic_xor_fetch:
2920     PostOp = llvm::Instruction::Xor;
2921     // Fall through.
2922   case AtomicExpr::AO__c11_atomic_fetch_xor:
2923   case AtomicExpr::AO__atomic_fetch_xor:
2924     Op = llvm::AtomicRMWInst::Xor;
2925     break;
2926 
2927   case AtomicExpr::AO__atomic_nand_fetch:
2928     PostOp = llvm::Instruction::And;
2929     // Fall through.
2930   case AtomicExpr::AO__atomic_fetch_nand:
2931     Op = llvm::AtomicRMWInst::Nand;
2932     break;
2933   }
2934 
2935   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
2936   LoadVal1->setAlignment(Align);
2937   llvm::AtomicRMWInst *RMWI =
2938       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
2939   RMWI->setVolatile(E->isVolatile());
2940 
2941   // For __atomic_*_fetch operations, perform the operation again to
2942   // determine the value which was written.
2943   llvm::Value *Result = RMWI;
2944   if (PostOp)
2945     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
2946   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
2947     Result = CGF.Builder.CreateNot(Result);
2948   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
2949   StoreDest->setAlignment(Align);
2950 }
2951 
2952 // This function emits any expression (scalar, complex, or aggregate)
2953 // into a temporary alloca.
2954 static llvm::Value *
2955 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
2956   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
2957   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
2958                        /*Init*/ true);
2959   return DeclPtr;
2960 }
2961 
2962 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
2963                                   llvm::Value *Dest) {
2964   if (Ty->isAnyComplexType())
2965     return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
2966   if (CGF.hasAggregateLLVMType(Ty))
2967     return RValue::getAggregate(Dest);
2968   return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
2969 }
2970 
2971 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
2972   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
2973   QualType MemTy = AtomicTy;
2974   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
2975     MemTy = AT->getValueType();
2976   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
2977   uint64_t Size = sizeChars.getQuantity();
2978   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
2979   unsigned Align = alignChars.getQuantity();
2980   unsigned MaxInlineWidth =
2981       getContext().getTargetInfo().getMaxAtomicInlineWidth();
2982   bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
2983 
2984 
2985 
2986   llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
2987   Ptr = EmitScalarExpr(E->getPtr());
2988 
2989   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
2990     assert(!Dest && "Init does not return a value");
2991     if (!hasAggregateLLVMType(E->getVal1()->getType())) {
2992       QualType PointeeType
2993         = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
2994       EmitScalarInit(EmitScalarExpr(E->getVal1()),
2995                      LValue::MakeAddr(Ptr, PointeeType, alignChars,
2996                                       getContext()));
2997     } else if (E->getType()->isAnyComplexType()) {
2998       EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
2999     } else {
3000       AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
3001                                         AtomicTy.getQualifiers(),
3002                                         AggValueSlot::IsNotDestructed,
3003                                         AggValueSlot::DoesNotNeedGCBarriers,
3004                                         AggValueSlot::IsNotAliased);
3005       EmitAggExpr(E->getVal1(), Slot);
3006     }
3007     return RValue::get(0);
3008   }
3009 
3010   Order = EmitScalarExpr(E->getOrder());
3011 
3012   switch (E->getOp()) {
3013   case AtomicExpr::AO__c11_atomic_init:
3014     llvm_unreachable("Already handled!");
3015 
3016   case AtomicExpr::AO__c11_atomic_load:
3017   case AtomicExpr::AO__atomic_load_n:
3018     break;
3019 
3020   case AtomicExpr::AO__atomic_load:
3021     Dest = EmitScalarExpr(E->getVal1());
3022     break;
3023 
3024   case AtomicExpr::AO__atomic_store:
3025     Val1 = EmitScalarExpr(E->getVal1());
3026     break;
3027 
3028   case AtomicExpr::AO__atomic_exchange:
3029     Val1 = EmitScalarExpr(E->getVal1());
3030     Dest = EmitScalarExpr(E->getVal2());
3031     break;
3032 
3033   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3034   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3035   case AtomicExpr::AO__atomic_compare_exchange_n:
3036   case AtomicExpr::AO__atomic_compare_exchange:
3037     Val1 = EmitScalarExpr(E->getVal1());
3038     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
3039       Val2 = EmitScalarExpr(E->getVal2());
3040     else
3041       Val2 = EmitValToTemp(*this, E->getVal2());
3042     OrderFail = EmitScalarExpr(E->getOrderFail());
3043     // Evaluate and discard the 'weak' argument.
3044     if (E->getNumSubExprs() == 6)
3045       EmitScalarExpr(E->getWeak());
3046     break;
3047 
3048   case AtomicExpr::AO__c11_atomic_fetch_add:
3049   case AtomicExpr::AO__c11_atomic_fetch_sub:
3050     if (MemTy->isPointerType()) {
3051       // For pointer arithmetic, we're required to do a bit of math:
3052       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
3053       // ... but only for the C11 builtins. The GNU builtins expect the
3054       // user to multiply by sizeof(T).
3055       QualType Val1Ty = E->getVal1()->getType();
3056       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
3057       CharUnits PointeeIncAmt =
3058           getContext().getTypeSizeInChars(MemTy->getPointeeType());
3059       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
3060       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
3061       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
3062       break;
3063     }
3064     // Fall through.
3065   case AtomicExpr::AO__atomic_fetch_add:
3066   case AtomicExpr::AO__atomic_fetch_sub:
3067   case AtomicExpr::AO__atomic_add_fetch:
3068   case AtomicExpr::AO__atomic_sub_fetch:
3069   case AtomicExpr::AO__c11_atomic_store:
3070   case AtomicExpr::AO__c11_atomic_exchange:
3071   case AtomicExpr::AO__atomic_store_n:
3072   case AtomicExpr::AO__atomic_exchange_n:
3073   case AtomicExpr::AO__c11_atomic_fetch_and:
3074   case AtomicExpr::AO__c11_atomic_fetch_or:
3075   case AtomicExpr::AO__c11_atomic_fetch_xor:
3076   case AtomicExpr::AO__atomic_fetch_and:
3077   case AtomicExpr::AO__atomic_fetch_or:
3078   case AtomicExpr::AO__atomic_fetch_xor:
3079   case AtomicExpr::AO__atomic_fetch_nand:
3080   case AtomicExpr::AO__atomic_and_fetch:
3081   case AtomicExpr::AO__atomic_or_fetch:
3082   case AtomicExpr::AO__atomic_xor_fetch:
3083   case AtomicExpr::AO__atomic_nand_fetch:
3084     Val1 = EmitValToTemp(*this, E->getVal1());
3085     break;
3086   }
3087 
3088   if (!E->getType()->isVoidType() && !Dest)
3089     Dest = CreateMemTemp(E->getType(), ".atomicdst");
3090 
3091   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
3092   if (UseLibcall) {
3093 
3094     llvm::SmallVector<QualType, 5> Params;
3095     CallArgList Args;
3096     // Size is always the first parameter
3097     Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
3098              getContext().getSizeType());
3099     // Atomic address is always the second parameter
3100     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
3101              getContext().VoidPtrTy);
3102 
3103     const char* LibCallName;
3104     QualType RetTy = getContext().VoidTy;
3105     switch (E->getOp()) {
3106     // There is only one libcall for compare an exchange, because there is no
3107     // optimisation benefit possible from a libcall version of a weak compare
3108     // and exchange.
3109     // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
3110     //                                void *desired, int success, int failure)
3111     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3112     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3113     case AtomicExpr::AO__atomic_compare_exchange:
3114     case AtomicExpr::AO__atomic_compare_exchange_n:
3115       LibCallName = "__atomic_compare_exchange";
3116       RetTy = getContext().BoolTy;
3117       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3118                getContext().VoidPtrTy);
3119       Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
3120                getContext().VoidPtrTy);
3121       Args.add(RValue::get(Order),
3122                getContext().IntTy);
3123       Order = OrderFail;
3124       break;
3125     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
3126     //                        int order)
3127     case AtomicExpr::AO__c11_atomic_exchange:
3128     case AtomicExpr::AO__atomic_exchange_n:
3129     case AtomicExpr::AO__atomic_exchange:
3130       LibCallName = "__atomic_exchange";
3131       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3132                getContext().VoidPtrTy);
3133       Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3134                getContext().VoidPtrTy);
3135       break;
3136     // void __atomic_store(size_t size, void *mem, void *val, int order)
3137     case AtomicExpr::AO__c11_atomic_store:
3138     case AtomicExpr::AO__atomic_store:
3139     case AtomicExpr::AO__atomic_store_n:
3140       LibCallName = "__atomic_store";
3141       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3142                getContext().VoidPtrTy);
3143       break;
3144     // void __atomic_load(size_t size, void *mem, void *return, int order)
3145     case AtomicExpr::AO__c11_atomic_load:
3146     case AtomicExpr::AO__atomic_load:
3147     case AtomicExpr::AO__atomic_load_n:
3148       LibCallName = "__atomic_load";
3149       Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3150                getContext().VoidPtrTy);
3151       break;
3152 #if 0
3153     // These are only defined for 1-16 byte integers.  It is not clear what
3154     // their semantics would be on anything else...
3155     case AtomicExpr::Add:   LibCallName = "__atomic_fetch_add_generic"; break;
3156     case AtomicExpr::Sub:   LibCallName = "__atomic_fetch_sub_generic"; break;
3157     case AtomicExpr::And:   LibCallName = "__atomic_fetch_and_generic"; break;
3158     case AtomicExpr::Or:    LibCallName = "__atomic_fetch_or_generic"; break;
3159     case AtomicExpr::Xor:   LibCallName = "__atomic_fetch_xor_generic"; break;
3160 #endif
3161     default: return EmitUnsupportedRValue(E, "atomic library call");
3162     }
3163     // order is always the last parameter
3164     Args.add(RValue::get(Order),
3165              getContext().IntTy);
3166 
3167     const CGFunctionInfo &FuncInfo =
3168         CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
3169             FunctionType::ExtInfo(), RequiredArgs::All);
3170     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3171     llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3172     RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
3173     if (E->isCmpXChg())
3174       return Res;
3175     if (E->getType()->isVoidType())
3176       return RValue::get(0);
3177     return ConvertTempToRValue(*this, E->getType(), Dest);
3178   }
3179 
3180   llvm::Type *IPtrTy =
3181       llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
3182   llvm::Value *OrigDest = Dest;
3183   Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
3184   if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
3185   if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
3186   if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
3187 
3188   if (isa<llvm::ConstantInt>(Order)) {
3189     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3190     switch (ord) {
3191     case 0:  // memory_order_relaxed
3192       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3193                    llvm::Monotonic);
3194       break;
3195     case 1:  // memory_order_consume
3196     case 2:  // memory_order_acquire
3197       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3198                    llvm::Acquire);
3199       break;
3200     case 3:  // memory_order_release
3201       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3202                    llvm::Release);
3203       break;
3204     case 4:  // memory_order_acq_rel
3205       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3206                    llvm::AcquireRelease);
3207       break;
3208     case 5:  // memory_order_seq_cst
3209       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3210                    llvm::SequentiallyConsistent);
3211       break;
3212     default: // invalid order
3213       // We should not ever get here normally, but it's hard to
3214       // enforce that in general.
3215       break;
3216     }
3217     if (E->getType()->isVoidType())
3218       return RValue::get(0);
3219     return ConvertTempToRValue(*this, E->getType(), OrigDest);
3220   }
3221 
3222   // Long case, when Order isn't obviously constant.
3223 
3224   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
3225                  E->getOp() == AtomicExpr::AO__atomic_store ||
3226                  E->getOp() == AtomicExpr::AO__atomic_store_n;
3227   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
3228                 E->getOp() == AtomicExpr::AO__atomic_load ||
3229                 E->getOp() == AtomicExpr::AO__atomic_load_n;
3230 
3231   // Create all the relevant BB's
3232   llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
3233                    *AcqRelBB = 0, *SeqCstBB = 0;
3234   MonotonicBB = createBasicBlock("monotonic", CurFn);
3235   if (!IsStore)
3236     AcquireBB = createBasicBlock("acquire", CurFn);
3237   if (!IsLoad)
3238     ReleaseBB = createBasicBlock("release", CurFn);
3239   if (!IsLoad && !IsStore)
3240     AcqRelBB = createBasicBlock("acqrel", CurFn);
3241   SeqCstBB = createBasicBlock("seqcst", CurFn);
3242   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3243 
3244   // Create the switch for the split
3245   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
3246   // doesn't matter unless someone is crazy enough to use something that
3247   // doesn't fold to a constant for the ordering.
3248   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3249   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
3250 
3251   // Emit all the different atomics
3252   Builder.SetInsertPoint(MonotonicBB);
3253   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3254                llvm::Monotonic);
3255   Builder.CreateBr(ContBB);
3256   if (!IsStore) {
3257     Builder.SetInsertPoint(AcquireBB);
3258     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3259                  llvm::Acquire);
3260     Builder.CreateBr(ContBB);
3261     SI->addCase(Builder.getInt32(1), AcquireBB);
3262     SI->addCase(Builder.getInt32(2), AcquireBB);
3263   }
3264   if (!IsLoad) {
3265     Builder.SetInsertPoint(ReleaseBB);
3266     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3267                  llvm::Release);
3268     Builder.CreateBr(ContBB);
3269     SI->addCase(Builder.getInt32(3), ReleaseBB);
3270   }
3271   if (!IsLoad && !IsStore) {
3272     Builder.SetInsertPoint(AcqRelBB);
3273     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3274                  llvm::AcquireRelease);
3275     Builder.CreateBr(ContBB);
3276     SI->addCase(Builder.getInt32(4), AcqRelBB);
3277   }
3278   Builder.SetInsertPoint(SeqCstBB);
3279   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3280                llvm::SequentiallyConsistent);
3281   Builder.CreateBr(ContBB);
3282   SI->addCase(Builder.getInt32(5), SeqCstBB);
3283 
3284   // Cleanup and return
3285   Builder.SetInsertPoint(ContBB);
3286   if (E->getType()->isVoidType())
3287     return RValue::get(0);
3288   return ConvertTempToRValue(*this, E->getType(), OrigDest);
3289 }
3290 
3291 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
3292   assert(Val->getType()->isFPOrFPVectorTy());
3293   if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
3294     return;
3295 
3296   llvm::MDBuilder MDHelper(getLLVMContext());
3297   llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
3298 
3299   cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
3300 }
3301 
3302 namespace {
3303   struct LValueOrRValue {
3304     LValue LV;
3305     RValue RV;
3306   };
3307 }
3308 
3309 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
3310                                            const PseudoObjectExpr *E,
3311                                            bool forLValue,
3312                                            AggValueSlot slot) {
3313   llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
3314 
3315   // Find the result expression, if any.
3316   const Expr *resultExpr = E->getResultExpr();
3317   LValueOrRValue result;
3318 
3319   for (PseudoObjectExpr::const_semantics_iterator
3320          i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
3321     const Expr *semantic = *i;
3322 
3323     // If this semantic expression is an opaque value, bind it
3324     // to the result of its source expression.
3325     if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
3326 
3327       // If this is the result expression, we may need to evaluate
3328       // directly into the slot.
3329       typedef CodeGenFunction::OpaqueValueMappingData OVMA;
3330       OVMA opaqueData;
3331       if (ov == resultExpr && ov->isRValue() && !forLValue &&
3332           CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
3333           !ov->getType()->isAnyComplexType()) {
3334         CGF.EmitAggExpr(ov->getSourceExpr(), slot);
3335 
3336         LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
3337         opaqueData = OVMA::bind(CGF, ov, LV);
3338         result.RV = slot.asRValue();
3339 
3340       // Otherwise, emit as normal.
3341       } else {
3342         opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
3343 
3344         // If this is the result, also evaluate the result now.
3345         if (ov == resultExpr) {
3346           if (forLValue)
3347             result.LV = CGF.EmitLValue(ov);
3348           else
3349             result.RV = CGF.EmitAnyExpr(ov, slot);
3350         }
3351       }
3352 
3353       opaques.push_back(opaqueData);
3354 
3355     // Otherwise, if the expression is the result, evaluate it
3356     // and remember the result.
3357     } else if (semantic == resultExpr) {
3358       if (forLValue)
3359         result.LV = CGF.EmitLValue(semantic);
3360       else
3361         result.RV = CGF.EmitAnyExpr(semantic, slot);
3362 
3363     // Otherwise, evaluate the expression in an ignored context.
3364     } else {
3365       CGF.EmitIgnoredExpr(semantic);
3366     }
3367   }
3368 
3369   // Unbind all the opaques now.
3370   for (unsigned i = 0, e = opaques.size(); i != e; ++i)
3371     opaques[i].unbind(CGF);
3372 
3373   return result;
3374 }
3375 
3376 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
3377                                                AggValueSlot slot) {
3378   return emitPseudoObjectExpr(*this, E, false, slot).RV;
3379 }
3380 
3381 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
3382   return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
3383 }
3384