1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGCXXABI.h"
16 #include "CGCall.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "CGRecordLayout.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/ADT/Hashing.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/IR/MDBuilder.h"
30 #include "llvm/Support/ConvertUTF.h"
31 
32 using namespace clang;
33 using namespace CodeGen;
34 
35 //===--------------------------------------------------------------------===//
36 //                        Miscellaneous Helper Methods
37 //===--------------------------------------------------------------------===//
38 
39 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
40   unsigned addressSpace =
41     cast<llvm::PointerType>(value->getType())->getAddressSpace();
42 
43   llvm::PointerType *destType = Int8PtrTy;
44   if (addressSpace)
45     destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
46 
47   if (value->getType() == destType) return value;
48   return Builder.CreateBitCast(value, destType);
49 }
50 
51 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
52 /// block.
53 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
54                                                     const Twine &Name) {
55   if (!Builder.isNamePreserving())
56     return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
57   return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
58 }
59 
60 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
61                                      llvm::Value *Init) {
62   llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
63   llvm::BasicBlock *Block = AllocaInsertPt->getParent();
64   Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
65 }
66 
67 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
68                                                 const Twine &Name) {
69   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
70   // FIXME: Should we prefer the preferred type alignment here?
71   CharUnits Align = getContext().getTypeAlignInChars(Ty);
72   Alloc->setAlignment(Align.getQuantity());
73   return Alloc;
74 }
75 
76 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
77                                                  const Twine &Name) {
78   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
79   // FIXME: Should we prefer the preferred type alignment here?
80   CharUnits Align = getContext().getTypeAlignInChars(Ty);
81   Alloc->setAlignment(Align.getQuantity());
82   return Alloc;
83 }
84 
85 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
86 /// expression and compare the result against zero, returning an Int1Ty value.
87 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
88   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
89     llvm::Value *MemPtr = EmitScalarExpr(E);
90     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
91   }
92 
93   QualType BoolTy = getContext().BoolTy;
94   if (!E->getType()->isAnyComplexType())
95     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
96 
97   return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
98 }
99 
100 /// EmitIgnoredExpr - Emit code to compute the specified expression,
101 /// ignoring the result.
102 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
103   if (E->isRValue())
104     return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
105 
106   // Just emit it as an l-value and drop the result.
107   EmitLValue(E);
108 }
109 
110 /// EmitAnyExpr - Emit code to compute the specified expression which
111 /// can have any type.  The result is returned as an RValue struct.
112 /// If this is an aggregate expression, AggSlot indicates where the
113 /// result should be returned.
114 RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
115                                     AggValueSlot aggSlot,
116                                     bool ignoreResult) {
117   switch (getEvaluationKind(E->getType())) {
118   case TEK_Scalar:
119     return RValue::get(EmitScalarExpr(E, ignoreResult));
120   case TEK_Complex:
121     return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
122   case TEK_Aggregate:
123     if (!ignoreResult && aggSlot.isIgnored())
124       aggSlot = CreateAggTemp(E->getType(), "agg-temp");
125     EmitAggExpr(E, aggSlot);
126     return aggSlot.asRValue();
127   }
128   llvm_unreachable("bad evaluation kind");
129 }
130 
131 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
132 /// always be accessible even if no aggregate location is provided.
133 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
134   AggValueSlot AggSlot = AggValueSlot::ignored();
135 
136   if (hasAggregateEvaluationKind(E->getType()))
137     AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
138   return EmitAnyExpr(E, AggSlot);
139 }
140 
141 /// EmitAnyExprToMem - Evaluate an expression into a given memory
142 /// location.
143 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
144                                        llvm::Value *Location,
145                                        Qualifiers Quals,
146                                        bool IsInit) {
147   // FIXME: This function should take an LValue as an argument.
148   switch (getEvaluationKind(E->getType())) {
149   case TEK_Complex:
150     EmitComplexExprIntoLValue(E,
151                          MakeNaturalAlignAddrLValue(Location, E->getType()),
152                               /*isInit*/ false);
153     return;
154 
155   case TEK_Aggregate: {
156     CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
157     EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
158                                          AggValueSlot::IsDestructed_t(IsInit),
159                                          AggValueSlot::DoesNotNeedGCBarriers,
160                                          AggValueSlot::IsAliased_t(!IsInit)));
161     return;
162   }
163 
164   case TEK_Scalar: {
165     RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
166     LValue LV = MakeAddrLValue(Location, E->getType());
167     EmitStoreThroughLValue(RV, LV);
168     return;
169   }
170   }
171   llvm_unreachable("bad evaluation kind");
172 }
173 
174 static void
175 pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
176                      const Expr *E, llvm::Value *ReferenceTemporary) {
177   // Objective-C++ ARC:
178   //   If we are binding a reference to a temporary that has ownership, we
179   //   need to perform retain/release operations on the temporary.
180   //
181   // FIXME: This should be looking at E, not M.
182   if (CGF.getLangOpts().ObjCAutoRefCount &&
183       M->getType()->isObjCLifetimeType()) {
184     QualType ObjCARCReferenceLifetimeType = M->getType();
185     switch (Qualifiers::ObjCLifetime Lifetime =
186                 ObjCARCReferenceLifetimeType.getObjCLifetime()) {
187     case Qualifiers::OCL_None:
188     case Qualifiers::OCL_ExplicitNone:
189       // Carry on to normal cleanup handling.
190       break;
191 
192     case Qualifiers::OCL_Autoreleasing:
193       // Nothing to do; cleaned up by an autorelease pool.
194       return;
195 
196     case Qualifiers::OCL_Strong:
197     case Qualifiers::OCL_Weak:
198       switch (StorageDuration Duration = M->getStorageDuration()) {
199       case SD_Static:
200         // Note: we intentionally do not register a cleanup to release
201         // the object on program termination.
202         return;
203 
204       case SD_Thread:
205         // FIXME: We should probably register a cleanup in this case.
206         return;
207 
208       case SD_Automatic:
209       case SD_FullExpression:
210         assert(!ObjCARCReferenceLifetimeType->isArrayType());
211         CodeGenFunction::Destroyer *Destroy;
212         CleanupKind CleanupKind;
213         if (Lifetime == Qualifiers::OCL_Strong) {
214           const ValueDecl *VD = M->getExtendingDecl();
215           bool Precise =
216               VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
217           CleanupKind = CGF.getARCCleanupKind();
218           Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
219                             : &CodeGenFunction::destroyARCStrongImprecise;
220         } else {
221           // __weak objects always get EH cleanups; otherwise, exceptions
222           // could cause really nasty crashes instead of mere leaks.
223           CleanupKind = NormalAndEHCleanup;
224           Destroy = &CodeGenFunction::destroyARCWeak;
225         }
226         if (Duration == SD_FullExpression)
227           CGF.pushDestroy(CleanupKind, ReferenceTemporary,
228                           ObjCARCReferenceLifetimeType, *Destroy,
229                           CleanupKind & EHCleanup);
230         else
231           CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
232                                           ObjCARCReferenceLifetimeType,
233                                           *Destroy, CleanupKind & EHCleanup);
234         return;
235 
236       case SD_Dynamic:
237         llvm_unreachable("temporary cannot have dynamic storage duration");
238       }
239       llvm_unreachable("unknown storage duration");
240     }
241   }
242 
243   CXXDestructorDecl *ReferenceTemporaryDtor = 0;
244   if (const RecordType *RT =
245           E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
246     // Get the destructor for the reference temporary.
247     CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
248     if (!ClassDecl->hasTrivialDestructor())
249       ReferenceTemporaryDtor = ClassDecl->getDestructor();
250   }
251 
252   if (!ReferenceTemporaryDtor)
253     return;
254 
255   // Call the destructor for the temporary.
256   switch (M->getStorageDuration()) {
257   case SD_Static:
258   case SD_Thread: {
259     llvm::Constant *CleanupFn;
260     llvm::Constant *CleanupArg;
261     if (E->getType()->isArrayType()) {
262       CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
263           cast<llvm::Constant>(ReferenceTemporary), E->getType(),
264           CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions);
265       CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
266     } else {
267       CleanupFn =
268         CGF.CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
269       CleanupArg = cast<llvm::Constant>(ReferenceTemporary);
270     }
271     CGF.CGM.getCXXABI().registerGlobalDtor(
272         CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
273     break;
274   }
275 
276   case SD_FullExpression:
277     CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
278                     CodeGenFunction::destroyCXXObject,
279                     CGF.getLangOpts().Exceptions);
280     break;
281 
282   case SD_Automatic:
283     CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup,
284                                     ReferenceTemporary, E->getType(),
285                                     CodeGenFunction::destroyCXXObject,
286                                     CGF.getLangOpts().Exceptions);
287     break;
288 
289   case SD_Dynamic:
290     llvm_unreachable("temporary cannot have dynamic storage duration");
291   }
292 }
293 
294 static llvm::Value *
295 createReferenceTemporary(CodeGenFunction &CGF,
296                          const MaterializeTemporaryExpr *M, const Expr *Inner) {
297   switch (M->getStorageDuration()) {
298   case SD_FullExpression:
299   case SD_Automatic:
300     return CGF.CreateMemTemp(Inner->getType(), "ref.tmp");
301 
302   case SD_Thread:
303   case SD_Static:
304     return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
305 
306   case SD_Dynamic:
307     llvm_unreachable("temporary can't have dynamic storage duration");
308   }
309   llvm_unreachable("unknown storage duration");
310 }
311 
312 LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
313                                            const MaterializeTemporaryExpr *M) {
314   const Expr *E = M->GetTemporaryExpr();
315 
316   if (getLangOpts().ObjCAutoRefCount &&
317       M->getType()->isObjCLifetimeType() &&
318       M->getType().getObjCLifetime() != Qualifiers::OCL_None &&
319       M->getType().getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
320     // FIXME: Fold this into the general case below.
321     llvm::Value *Object = createReferenceTemporary(*this, M, E);
322     LValue RefTempDst = MakeAddrLValue(Object, M->getType());
323 
324     if (llvm::GlobalVariable *Var = dyn_cast<llvm::GlobalVariable>(Object)) {
325       // We should not have emitted the initializer for this temporary as a
326       // constant.
327       assert(!Var->hasInitializer());
328       Var->setInitializer(CGM.EmitNullConstant(E->getType()));
329     }
330 
331     EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
332 
333     pushTemporaryCleanup(*this, M, E, Object);
334     return RefTempDst;
335   }
336 
337   SmallVector<const Expr *, 2> CommaLHSs;
338   SmallVector<SubobjectAdjustment, 2> Adjustments;
339   E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
340 
341   for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I)
342     EmitIgnoredExpr(CommaLHSs[I]);
343 
344   if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E)) {
345     if (opaque->getType()->isRecordType()) {
346       assert(Adjustments.empty());
347       return EmitOpaqueValueLValue(opaque);
348     }
349   }
350 
351   // Create and initialize the reference temporary.
352   llvm::Value *Object = createReferenceTemporary(*this, M, E);
353   if (llvm::GlobalVariable *Var = dyn_cast<llvm::GlobalVariable>(Object)) {
354     // If the temporary is a global and has a constant initializer, we may
355     // have already initialized it.
356     if (!Var->hasInitializer()) {
357       Var->setInitializer(CGM.EmitNullConstant(E->getType()));
358       EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
359     }
360   } else {
361     EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
362   }
363   pushTemporaryCleanup(*this, M, E, Object);
364 
365   // Perform derived-to-base casts and/or field accesses, to get from the
366   // temporary object we created (and, potentially, for which we extended
367   // the lifetime) to the subobject we're binding the reference to.
368   for (unsigned I = Adjustments.size(); I != 0; --I) {
369     SubobjectAdjustment &Adjustment = Adjustments[I-1];
370     switch (Adjustment.Kind) {
371     case SubobjectAdjustment::DerivedToBaseAdjustment:
372       Object =
373           GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
374                                 Adjustment.DerivedToBase.BasePath->path_begin(),
375                                 Adjustment.DerivedToBase.BasePath->path_end(),
376                                 /*NullCheckValue=*/ false);
377       break;
378 
379     case SubobjectAdjustment::FieldAdjustment: {
380       LValue LV = MakeAddrLValue(Object, E->getType());
381       LV = EmitLValueForField(LV, Adjustment.Field);
382       assert(LV.isSimple() &&
383              "materialized temporary field is not a simple lvalue");
384       Object = LV.getAddress();
385       break;
386     }
387 
388     case SubobjectAdjustment::MemberPointerAdjustment: {
389       llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
390       Object = CGM.getCXXABI().EmitMemberDataPointerAddress(
391                     *this, Object, Ptr, Adjustment.Ptr.MPT);
392       break;
393     }
394     }
395   }
396 
397   return MakeAddrLValue(Object, M->getType());
398 }
399 
400 RValue
401 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
402   // Emit the expression as an lvalue.
403   LValue LV = EmitLValue(E);
404   assert(LV.isSimple());
405   llvm::Value *Value = LV.getAddress();
406 
407   if (SanitizePerformTypeCheck && !E->getType()->isFunctionType()) {
408     // C++11 [dcl.ref]p5 (as amended by core issue 453):
409     //   If a glvalue to which a reference is directly bound designates neither
410     //   an existing object or function of an appropriate type nor a region of
411     //   storage of suitable size and alignment to contain an object of the
412     //   reference's type, the behavior is undefined.
413     QualType Ty = E->getType();
414     EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
415   }
416 
417   return RValue::get(Value);
418 }
419 
420 
421 /// getAccessedFieldNo - Given an encoded value and a result number, return the
422 /// input field number being accessed.
423 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
424                                              const llvm::Constant *Elts) {
425   return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
426       ->getZExtValue();
427 }
428 
429 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
430 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
431                                     llvm::Value *High) {
432   llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
433   llvm::Value *K47 = Builder.getInt64(47);
434   llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
435   llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
436   llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
437   llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
438   return Builder.CreateMul(B1, KMul);
439 }
440 
441 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
442                                     llvm::Value *Address,
443                                     QualType Ty, CharUnits Alignment) {
444   if (!SanitizePerformTypeCheck)
445     return;
446 
447   // Don't check pointers outside the default address space. The null check
448   // isn't correct, the object-size check isn't supported by LLVM, and we can't
449   // communicate the addresses to the runtime handler for the vptr check.
450   if (Address->getType()->getPointerAddressSpace())
451     return;
452 
453   llvm::Value *Cond = 0;
454   llvm::BasicBlock *Done = 0;
455 
456   if (SanOpts->Null) {
457     // The glvalue must not be an empty glvalue.
458     Cond = Builder.CreateICmpNE(
459         Address, llvm::Constant::getNullValue(Address->getType()));
460 
461     if (TCK == TCK_DowncastPointer) {
462       // When performing a pointer downcast, it's OK if the value is null.
463       // Skip the remaining checks in that case.
464       Done = createBasicBlock("null");
465       llvm::BasicBlock *Rest = createBasicBlock("not.null");
466       Builder.CreateCondBr(Cond, Rest, Done);
467       EmitBlock(Rest);
468       Cond = 0;
469     }
470   }
471 
472   if (SanOpts->ObjectSize && !Ty->isIncompleteType()) {
473     uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
474 
475     // The glvalue must refer to a large enough storage region.
476     // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
477     //        to check this.
478     llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
479     llvm::Value *Min = Builder.getFalse();
480     llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy);
481     llvm::Value *LargeEnough =
482         Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min),
483                               llvm::ConstantInt::get(IntPtrTy, Size));
484     Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough;
485   }
486 
487   uint64_t AlignVal = 0;
488 
489   if (SanOpts->Alignment) {
490     AlignVal = Alignment.getQuantity();
491     if (!Ty->isIncompleteType() && !AlignVal)
492       AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
493 
494     // The glvalue must be suitably aligned.
495     if (AlignVal) {
496       llvm::Value *Align =
497           Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy),
498                             llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
499       llvm::Value *Aligned =
500         Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
501       Cond = Cond ? Builder.CreateAnd(Cond, Aligned) : Aligned;
502     }
503   }
504 
505   if (Cond) {
506     llvm::Constant *StaticData[] = {
507       EmitCheckSourceLocation(Loc),
508       EmitCheckTypeDescriptor(Ty),
509       llvm::ConstantInt::get(SizeTy, AlignVal),
510       llvm::ConstantInt::get(Int8Ty, TCK)
511     };
512     EmitCheck(Cond, "type_mismatch", StaticData, Address, CRK_Recoverable);
513   }
514 
515   // If possible, check that the vptr indicates that there is a subobject of
516   // type Ty at offset zero within this object.
517   //
518   // C++11 [basic.life]p5,6:
519   //   [For storage which does not refer to an object within its lifetime]
520   //   The program has undefined behavior if:
521   //    -- the [pointer or glvalue] is used to access a non-static data member
522   //       or call a non-static member function
523   CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
524   if (SanOpts->Vptr &&
525       (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
526        TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference) &&
527       RD && RD->hasDefinition() && RD->isDynamicClass()) {
528     // Compute a hash of the mangled name of the type.
529     //
530     // FIXME: This is not guaranteed to be deterministic! Move to a
531     //        fingerprinting mechanism once LLVM provides one. For the time
532     //        being the implementation happens to be deterministic.
533     SmallString<64> MangledName;
534     llvm::raw_svector_ostream Out(MangledName);
535     CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
536                                                      Out);
537     llvm::hash_code TypeHash = hash_value(Out.str());
538 
539     // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
540     llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
541     llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
542     llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy);
543     llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
544     llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
545 
546     llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
547     Hash = Builder.CreateTrunc(Hash, IntPtrTy);
548 
549     // Look the hash up in our cache.
550     const int CacheSize = 128;
551     llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
552     llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
553                                                    "__ubsan_vptr_type_cache");
554     llvm::Value *Slot = Builder.CreateAnd(Hash,
555                                           llvm::ConstantInt::get(IntPtrTy,
556                                                                  CacheSize-1));
557     llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
558     llvm::Value *CacheVal =
559       Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices));
560 
561     // If the hash isn't in the cache, call a runtime handler to perform the
562     // hard work of checking whether the vptr is for an object of the right
563     // type. This will either fill in the cache and return, or produce a
564     // diagnostic.
565     llvm::Constant *StaticData[] = {
566       EmitCheckSourceLocation(Loc),
567       EmitCheckTypeDescriptor(Ty),
568       CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
569       llvm::ConstantInt::get(Int8Ty, TCK)
570     };
571     llvm::Value *DynamicData[] = { Address, Hash };
572     EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash),
573               "dynamic_type_cache_miss", StaticData, DynamicData,
574               CRK_AlwaysRecoverable);
575   }
576 
577   if (Done) {
578     Builder.CreateBr(Done);
579     EmitBlock(Done);
580   }
581 }
582 
583 /// Determine whether this expression refers to a flexible array member in a
584 /// struct. We disable array bounds checks for such members.
585 static bool isFlexibleArrayMemberExpr(const Expr *E) {
586   // For compatibility with existing code, we treat arrays of length 0 or
587   // 1 as flexible array members.
588   const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
589   if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
590     if (CAT->getSize().ugt(1))
591       return false;
592   } else if (!isa<IncompleteArrayType>(AT))
593     return false;
594 
595   E = E->IgnoreParens();
596 
597   // A flexible array member must be the last member in the class.
598   if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
599     // FIXME: If the base type of the member expr is not FD->getParent(),
600     // this should not be treated as a flexible array member access.
601     if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
602       RecordDecl::field_iterator FI(
603           DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
604       return ++FI == FD->getParent()->field_end();
605     }
606   }
607 
608   return false;
609 }
610 
611 /// If Base is known to point to the start of an array, return the length of
612 /// that array. Return 0 if the length cannot be determined.
613 static llvm::Value *getArrayIndexingBound(
614     CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) {
615   // For the vector indexing extension, the bound is the number of elements.
616   if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
617     IndexedType = Base->getType();
618     return CGF.Builder.getInt32(VT->getNumElements());
619   }
620 
621   Base = Base->IgnoreParens();
622 
623   if (const CastExpr *CE = dyn_cast<CastExpr>(Base)) {
624     if (CE->getCastKind() == CK_ArrayToPointerDecay &&
625         !isFlexibleArrayMemberExpr(CE->getSubExpr())) {
626       IndexedType = CE->getSubExpr()->getType();
627       const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
628       if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
629         return CGF.Builder.getInt(CAT->getSize());
630       else if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT))
631         return CGF.getVLASize(VAT).first;
632     }
633   }
634 
635   return 0;
636 }
637 
638 void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
639                                       llvm::Value *Index, QualType IndexType,
640                                       bool Accessed) {
641   assert(SanOpts->Bounds && "should not be called unless adding bounds checks");
642 
643   QualType IndexedType;
644   llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType);
645   if (!Bound)
646     return;
647 
648   bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
649   llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
650   llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
651 
652   llvm::Constant *StaticData[] = {
653     EmitCheckSourceLocation(E->getExprLoc()),
654     EmitCheckTypeDescriptor(IndexedType),
655     EmitCheckTypeDescriptor(IndexType)
656   };
657   llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
658                                 : Builder.CreateICmpULE(IndexVal, BoundVal);
659   EmitCheck(Check, "out_of_bounds", StaticData, Index, CRK_Recoverable);
660 }
661 
662 
663 CodeGenFunction::ComplexPairTy CodeGenFunction::
664 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
665                          bool isInc, bool isPre) {
666   ComplexPairTy InVal = EmitLoadOfComplex(LV);
667 
668   llvm::Value *NextVal;
669   if (isa<llvm::IntegerType>(InVal.first->getType())) {
670     uint64_t AmountVal = isInc ? 1 : -1;
671     NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
672 
673     // Add the inc/dec to the real part.
674     NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
675   } else {
676     QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
677     llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
678     if (!isInc)
679       FVal.changeSign();
680     NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
681 
682     // Add the inc/dec to the real part.
683     NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
684   }
685 
686   ComplexPairTy IncVal(NextVal, InVal.second);
687 
688   // Store the updated result through the lvalue.
689   EmitStoreOfComplex(IncVal, LV, /*init*/ false);
690 
691   // If this is a postinc, return the value read from memory, otherwise use the
692   // updated value.
693   return isPre ? IncVal : InVal;
694 }
695 
696 
697 //===----------------------------------------------------------------------===//
698 //                         LValue Expression Emission
699 //===----------------------------------------------------------------------===//
700 
701 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
702   if (Ty->isVoidType())
703     return RValue::get(0);
704 
705   switch (getEvaluationKind(Ty)) {
706   case TEK_Complex: {
707     llvm::Type *EltTy =
708       ConvertType(Ty->castAs<ComplexType>()->getElementType());
709     llvm::Value *U = llvm::UndefValue::get(EltTy);
710     return RValue::getComplex(std::make_pair(U, U));
711   }
712 
713   // If this is a use of an undefined aggregate type, the aggregate must have an
714   // identifiable address.  Just because the contents of the value are undefined
715   // doesn't mean that the address can't be taken and compared.
716   case TEK_Aggregate: {
717     llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
718     return RValue::getAggregate(DestPtr);
719   }
720 
721   case TEK_Scalar:
722     return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
723   }
724   llvm_unreachable("bad evaluation kind");
725 }
726 
727 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
728                                               const char *Name) {
729   ErrorUnsupported(E, Name);
730   return GetUndefRValue(E->getType());
731 }
732 
733 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
734                                               const char *Name) {
735   ErrorUnsupported(E, Name);
736   llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
737   return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
738 }
739 
740 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
741   LValue LV;
742   if (SanOpts->Bounds && isa<ArraySubscriptExpr>(E))
743     LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
744   else
745     LV = EmitLValue(E);
746   if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
747     EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(),
748                   E->getType(), LV.getAlignment());
749   return LV;
750 }
751 
752 /// EmitLValue - Emit code to compute a designator that specifies the location
753 /// of the expression.
754 ///
755 /// This can return one of two things: a simple address or a bitfield reference.
756 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
757 /// an LLVM pointer type.
758 ///
759 /// If this returns a bitfield reference, nothing about the pointee type of the
760 /// LLVM value is known: For example, it may not be a pointer to an integer.
761 ///
762 /// If this returns a normal address, and if the lvalue's C type is fixed size,
763 /// this method guarantees that the returned pointer type will point to an LLVM
764 /// type of the same size of the lvalue's type.  If the lvalue has a variable
765 /// length type, this is not possible.
766 ///
767 LValue CodeGenFunction::EmitLValue(const Expr *E) {
768   switch (E->getStmtClass()) {
769   default: return EmitUnsupportedLValue(E, "l-value expression");
770 
771   case Expr::ObjCPropertyRefExprClass:
772     llvm_unreachable("cannot emit a property reference directly");
773 
774   case Expr::ObjCSelectorExprClass:
775     return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
776   case Expr::ObjCIsaExprClass:
777     return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
778   case Expr::BinaryOperatorClass:
779     return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
780   case Expr::CompoundAssignOperatorClass:
781     if (!E->getType()->isAnyComplexType())
782       return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
783     return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
784   case Expr::CallExprClass:
785   case Expr::CXXMemberCallExprClass:
786   case Expr::CXXOperatorCallExprClass:
787   case Expr::UserDefinedLiteralClass:
788     return EmitCallExprLValue(cast<CallExpr>(E));
789   case Expr::VAArgExprClass:
790     return EmitVAArgExprLValue(cast<VAArgExpr>(E));
791   case Expr::DeclRefExprClass:
792     return EmitDeclRefLValue(cast<DeclRefExpr>(E));
793   case Expr::ParenExprClass:
794     return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
795   case Expr::GenericSelectionExprClass:
796     return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
797   case Expr::PredefinedExprClass:
798     return EmitPredefinedLValue(cast<PredefinedExpr>(E));
799   case Expr::StringLiteralClass:
800     return EmitStringLiteralLValue(cast<StringLiteral>(E));
801   case Expr::ObjCEncodeExprClass:
802     return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
803   case Expr::PseudoObjectExprClass:
804     return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
805   case Expr::InitListExprClass:
806     return EmitInitListLValue(cast<InitListExpr>(E));
807   case Expr::CXXTemporaryObjectExprClass:
808   case Expr::CXXConstructExprClass:
809     return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
810   case Expr::CXXBindTemporaryExprClass:
811     return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
812   case Expr::CXXUuidofExprClass:
813     return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
814   case Expr::LambdaExprClass:
815     return EmitLambdaLValue(cast<LambdaExpr>(E));
816 
817   case Expr::ExprWithCleanupsClass: {
818     const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
819     enterFullExpression(cleanups);
820     RunCleanupsScope Scope(*this);
821     return EmitLValue(cleanups->getSubExpr());
822   }
823 
824   case Expr::CXXScalarValueInitExprClass:
825     return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
826   case Expr::CXXDefaultArgExprClass:
827     return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
828   case Expr::CXXDefaultInitExprClass: {
829     CXXDefaultInitExprScope Scope(*this);
830     return EmitLValue(cast<CXXDefaultInitExpr>(E)->getExpr());
831   }
832   case Expr::CXXTypeidExprClass:
833     return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
834 
835   case Expr::ObjCMessageExprClass:
836     return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
837   case Expr::ObjCIvarRefExprClass:
838     return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
839   case Expr::StmtExprClass:
840     return EmitStmtExprLValue(cast<StmtExpr>(E));
841   case Expr::UnaryOperatorClass:
842     return EmitUnaryOpLValue(cast<UnaryOperator>(E));
843   case Expr::ArraySubscriptExprClass:
844     return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
845   case Expr::ExtVectorElementExprClass:
846     return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
847   case Expr::MemberExprClass:
848     return EmitMemberExpr(cast<MemberExpr>(E));
849   case Expr::CompoundLiteralExprClass:
850     return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
851   case Expr::ConditionalOperatorClass:
852     return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
853   case Expr::BinaryConditionalOperatorClass:
854     return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
855   case Expr::ChooseExprClass:
856     return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
857   case Expr::OpaqueValueExprClass:
858     return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
859   case Expr::SubstNonTypeTemplateParmExprClass:
860     return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
861   case Expr::ImplicitCastExprClass:
862   case Expr::CStyleCastExprClass:
863   case Expr::CXXFunctionalCastExprClass:
864   case Expr::CXXStaticCastExprClass:
865   case Expr::CXXDynamicCastExprClass:
866   case Expr::CXXReinterpretCastExprClass:
867   case Expr::CXXConstCastExprClass:
868   case Expr::ObjCBridgedCastExprClass:
869     return EmitCastLValue(cast<CastExpr>(E));
870 
871   case Expr::MaterializeTemporaryExprClass:
872     return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
873   }
874 }
875 
876 /// Given an object of the given canonical type, can we safely copy a
877 /// value out of it based on its initializer?
878 static bool isConstantEmittableObjectType(QualType type) {
879   assert(type.isCanonical());
880   assert(!type->isReferenceType());
881 
882   // Must be const-qualified but non-volatile.
883   Qualifiers qs = type.getLocalQualifiers();
884   if (!qs.hasConst() || qs.hasVolatile()) return false;
885 
886   // Otherwise, all object types satisfy this except C++ classes with
887   // mutable subobjects or non-trivial copy/destroy behavior.
888   if (const RecordType *RT = dyn_cast<RecordType>(type))
889     if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
890       if (RD->hasMutableFields() || !RD->isTrivial())
891         return false;
892 
893   return true;
894 }
895 
896 /// Can we constant-emit a load of a reference to a variable of the
897 /// given type?  This is different from predicates like
898 /// Decl::isUsableInConstantExpressions because we do want it to apply
899 /// in situations that don't necessarily satisfy the language's rules
900 /// for this (e.g. C++'s ODR-use rules).  For example, we want to able
901 /// to do this with const float variables even if those variables
902 /// aren't marked 'constexpr'.
903 enum ConstantEmissionKind {
904   CEK_None,
905   CEK_AsReferenceOnly,
906   CEK_AsValueOrReference,
907   CEK_AsValueOnly
908 };
909 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
910   type = type.getCanonicalType();
911   if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
912     if (isConstantEmittableObjectType(ref->getPointeeType()))
913       return CEK_AsValueOrReference;
914     return CEK_AsReferenceOnly;
915   }
916   if (isConstantEmittableObjectType(type))
917     return CEK_AsValueOnly;
918   return CEK_None;
919 }
920 
921 /// Try to emit a reference to the given value without producing it as
922 /// an l-value.  This is actually more than an optimization: we can't
923 /// produce an l-value for variables that we never actually captured
924 /// in a block or lambda, which means const int variables or constexpr
925 /// literals or similar.
926 CodeGenFunction::ConstantEmission
927 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
928   ValueDecl *value = refExpr->getDecl();
929 
930   // The value needs to be an enum constant or a constant variable.
931   ConstantEmissionKind CEK;
932   if (isa<ParmVarDecl>(value)) {
933     CEK = CEK_None;
934   } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
935     CEK = checkVarTypeForConstantEmission(var->getType());
936   } else if (isa<EnumConstantDecl>(value)) {
937     CEK = CEK_AsValueOnly;
938   } else {
939     CEK = CEK_None;
940   }
941   if (CEK == CEK_None) return ConstantEmission();
942 
943   Expr::EvalResult result;
944   bool resultIsReference;
945   QualType resultType;
946 
947   // It's best to evaluate all the way as an r-value if that's permitted.
948   if (CEK != CEK_AsReferenceOnly &&
949       refExpr->EvaluateAsRValue(result, getContext())) {
950     resultIsReference = false;
951     resultType = refExpr->getType();
952 
953   // Otherwise, try to evaluate as an l-value.
954   } else if (CEK != CEK_AsValueOnly &&
955              refExpr->EvaluateAsLValue(result, getContext())) {
956     resultIsReference = true;
957     resultType = value->getType();
958 
959   // Failure.
960   } else {
961     return ConstantEmission();
962   }
963 
964   // In any case, if the initializer has side-effects, abandon ship.
965   if (result.HasSideEffects)
966     return ConstantEmission();
967 
968   // Emit as a constant.
969   llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
970 
971   // Make sure we emit a debug reference to the global variable.
972   // This should probably fire even for
973   if (isa<VarDecl>(value)) {
974     if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
975       EmitDeclRefExprDbgValue(refExpr, C);
976   } else {
977     assert(isa<EnumConstantDecl>(value));
978     EmitDeclRefExprDbgValue(refExpr, C);
979   }
980 
981   // If we emitted a reference constant, we need to dereference that.
982   if (resultIsReference)
983     return ConstantEmission::forReference(C);
984 
985   return ConstantEmission::forValue(C);
986 }
987 
988 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
989   return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
990                           lvalue.getAlignment().getQuantity(),
991                           lvalue.getType(), lvalue.getTBAAInfo(),
992                           lvalue.getTBAABaseType(), lvalue.getTBAAOffset());
993 }
994 
995 static bool hasBooleanRepresentation(QualType Ty) {
996   if (Ty->isBooleanType())
997     return true;
998 
999   if (const EnumType *ET = Ty->getAs<EnumType>())
1000     return ET->getDecl()->getIntegerType()->isBooleanType();
1001 
1002   if (const AtomicType *AT = Ty->getAs<AtomicType>())
1003     return hasBooleanRepresentation(AT->getValueType());
1004 
1005   return false;
1006 }
1007 
1008 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
1009                             llvm::APInt &Min, llvm::APInt &End,
1010                             bool StrictEnums) {
1011   const EnumType *ET = Ty->getAs<EnumType>();
1012   bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1013                                 ET && !ET->getDecl()->isFixed();
1014   bool IsBool = hasBooleanRepresentation(Ty);
1015   if (!IsBool && !IsRegularCPlusPlusEnum)
1016     return false;
1017 
1018   if (IsBool) {
1019     Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1020     End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1021   } else {
1022     const EnumDecl *ED = ET->getDecl();
1023     llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType());
1024     unsigned Bitwidth = LTy->getScalarSizeInBits();
1025     unsigned NumNegativeBits = ED->getNumNegativeBits();
1026     unsigned NumPositiveBits = ED->getNumPositiveBits();
1027 
1028     if (NumNegativeBits) {
1029       unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
1030       assert(NumBits <= Bitwidth);
1031       End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
1032       Min = -End;
1033     } else {
1034       assert(NumPositiveBits <= Bitwidth);
1035       End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
1036       Min = llvm::APInt(Bitwidth, 0);
1037     }
1038   }
1039   return true;
1040 }
1041 
1042 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1043   llvm::APInt Min, End;
1044   if (!getRangeForType(*this, Ty, Min, End,
1045                        CGM.getCodeGenOpts().StrictEnums))
1046     return 0;
1047 
1048   llvm::MDBuilder MDHelper(getLLVMContext());
1049   return MDHelper.createRange(Min, End);
1050 }
1051 
1052 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
1053                                               unsigned Alignment, QualType Ty,
1054                                               llvm::MDNode *TBAAInfo,
1055                                               QualType TBAABaseType,
1056                                               uint64_t TBAAOffset) {
1057   // For better performance, handle vector loads differently.
1058   if (Ty->isVectorType()) {
1059     llvm::Value *V;
1060     const llvm::Type *EltTy =
1061     cast<llvm::PointerType>(Addr->getType())->getElementType();
1062 
1063     const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy);
1064 
1065     // Handle vectors of size 3, like size 4 for better performance.
1066     if (VTy->getNumElements() == 3) {
1067 
1068       // Bitcast to vec4 type.
1069       llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
1070                                                          4);
1071       llvm::PointerType *ptVec4Ty =
1072       llvm::PointerType::get(vec4Ty,
1073                              (cast<llvm::PointerType>(
1074                                       Addr->getType()))->getAddressSpace());
1075       llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty,
1076                                                 "castToVec4");
1077       // Now load value.
1078       llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1079 
1080       // Shuffle vector to get vec3.
1081       llvm::Constant *Mask[] = {
1082         llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 0),
1083         llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 1),
1084         llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 2)
1085       };
1086 
1087       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1088       V = Builder.CreateShuffleVector(LoadVal,
1089                                       llvm::UndefValue::get(vec4Ty),
1090                                       MaskV, "extractVec");
1091       return EmitFromMemory(V, Ty);
1092     }
1093   }
1094 
1095   // Atomic operations have to be done on integral types.
1096   if (Ty->isAtomicType()) {
1097     LValue lvalue = LValue::MakeAddr(Addr, Ty,
1098                                      CharUnits::fromQuantity(Alignment),
1099                                      getContext(), TBAAInfo);
1100     return EmitAtomicLoad(lvalue).getScalarVal();
1101   }
1102 
1103   llvm::LoadInst *Load = Builder.CreateLoad(Addr);
1104   if (Volatile)
1105     Load->setVolatile(true);
1106   if (Alignment)
1107     Load->setAlignment(Alignment);
1108   if (TBAAInfo) {
1109     llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
1110                                                       TBAAOffset);
1111     CGM.DecorateInstruction(Load, TBAAPath, false/*ConvertTypeToTag*/);
1112   }
1113 
1114   if ((SanOpts->Bool && hasBooleanRepresentation(Ty)) ||
1115       (SanOpts->Enum && Ty->getAs<EnumType>())) {
1116     llvm::APInt Min, End;
1117     if (getRangeForType(*this, Ty, Min, End, true)) {
1118       --End;
1119       llvm::Value *Check;
1120       if (!Min)
1121         Check = Builder.CreateICmpULE(
1122           Load, llvm::ConstantInt::get(getLLVMContext(), End));
1123       else {
1124         llvm::Value *Upper = Builder.CreateICmpSLE(
1125           Load, llvm::ConstantInt::get(getLLVMContext(), End));
1126         llvm::Value *Lower = Builder.CreateICmpSGE(
1127           Load, llvm::ConstantInt::get(getLLVMContext(), Min));
1128         Check = Builder.CreateAnd(Upper, Lower);
1129       }
1130       // FIXME: Provide a SourceLocation.
1131       EmitCheck(Check, "load_invalid_value", EmitCheckTypeDescriptor(Ty),
1132                 EmitCheckValue(Load), CRK_Recoverable);
1133     }
1134   } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1135     if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
1136       Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1137 
1138   return EmitFromMemory(Load, Ty);
1139 }
1140 
1141 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
1142   // Bool has a different representation in memory than in registers.
1143   if (hasBooleanRepresentation(Ty)) {
1144     // This should really always be an i1, but sometimes it's already
1145     // an i8, and it's awkward to track those cases down.
1146     if (Value->getType()->isIntegerTy(1))
1147       return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
1148     assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1149            "wrong value rep of bool");
1150   }
1151 
1152   return Value;
1153 }
1154 
1155 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
1156   // Bool has a different representation in memory than in registers.
1157   if (hasBooleanRepresentation(Ty)) {
1158     assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1159            "wrong value rep of bool");
1160     return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1161   }
1162 
1163   return Value;
1164 }
1165 
1166 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
1167                                         bool Volatile, unsigned Alignment,
1168                                         QualType Ty,
1169                                         llvm::MDNode *TBAAInfo,
1170                                         bool isInit, QualType TBAABaseType,
1171                                         uint64_t TBAAOffset) {
1172 
1173   // Handle vectors differently to get better performance.
1174   if (Ty->isVectorType()) {
1175     llvm::Type *SrcTy = Value->getType();
1176     llvm::VectorType *VecTy = cast<llvm::VectorType>(SrcTy);
1177     // Handle vec3 special.
1178     if (VecTy->getNumElements() == 3) {
1179       llvm::LLVMContext &VMContext = getLLVMContext();
1180 
1181       // Our source is a vec3, do a shuffle vector to make it a vec4.
1182       SmallVector<llvm::Constant*, 4> Mask;
1183       Mask.push_back(llvm::ConstantInt::get(
1184                                             llvm::Type::getInt32Ty(VMContext),
1185                                             0));
1186       Mask.push_back(llvm::ConstantInt::get(
1187                                             llvm::Type::getInt32Ty(VMContext),
1188                                             1));
1189       Mask.push_back(llvm::ConstantInt::get(
1190                                             llvm::Type::getInt32Ty(VMContext),
1191                                             2));
1192       Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
1193 
1194       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1195       Value = Builder.CreateShuffleVector(Value,
1196                                           llvm::UndefValue::get(VecTy),
1197                                           MaskV, "extractVec");
1198       SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
1199     }
1200     llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
1201     if (DstPtr->getElementType() != SrcTy) {
1202       llvm::Type *MemTy =
1203       llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
1204       Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
1205     }
1206   }
1207 
1208   Value = EmitToMemory(Value, Ty);
1209 
1210   if (Ty->isAtomicType()) {
1211     EmitAtomicStore(RValue::get(Value),
1212                     LValue::MakeAddr(Addr, Ty,
1213                                      CharUnits::fromQuantity(Alignment),
1214                                      getContext(), TBAAInfo),
1215                     isInit);
1216     return;
1217   }
1218 
1219   llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1220   if (Alignment)
1221     Store->setAlignment(Alignment);
1222   if (TBAAInfo) {
1223     llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
1224                                                       TBAAOffset);
1225     CGM.DecorateInstruction(Store, TBAAPath, false/*ConvertTypeToTag*/);
1226   }
1227 }
1228 
1229 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
1230                                         bool isInit) {
1231   EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
1232                     lvalue.getAlignment().getQuantity(), lvalue.getType(),
1233                     lvalue.getTBAAInfo(), isInit, lvalue.getTBAABaseType(),
1234                     lvalue.getTBAAOffset());
1235 }
1236 
1237 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1238 /// method emits the address of the lvalue, then loads the result as an rvalue,
1239 /// returning the rvalue.
1240 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
1241   if (LV.isObjCWeak()) {
1242     // load of a __weak object.
1243     llvm::Value *AddrWeakObj = LV.getAddress();
1244     return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
1245                                                              AddrWeakObj));
1246   }
1247   if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
1248     llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
1249     Object = EmitObjCConsumeObject(LV.getType(), Object);
1250     return RValue::get(Object);
1251   }
1252 
1253   if (LV.isSimple()) {
1254     assert(!LV.getType()->isFunctionType());
1255 
1256     // Everything needs a load.
1257     return RValue::get(EmitLoadOfScalar(LV));
1258   }
1259 
1260   if (LV.isVectorElt()) {
1261     llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
1262                                               LV.isVolatileQualified());
1263     Load->setAlignment(LV.getAlignment().getQuantity());
1264     return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1265                                                     "vecext"));
1266   }
1267 
1268   // If this is a reference to a subset of the elements of a vector, either
1269   // shuffle the input or extract/insert them as appropriate.
1270   if (LV.isExtVectorElt())
1271     return EmitLoadOfExtVectorElementLValue(LV);
1272 
1273   assert(LV.isBitField() && "Unknown LValue type!");
1274   return EmitLoadOfBitfieldLValue(LV);
1275 }
1276 
1277 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
1278   const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1279 
1280   // Get the output type.
1281   llvm::Type *ResLTy = ConvertType(LV.getType());
1282 
1283   llvm::Value *Ptr = LV.getBitFieldAddr();
1284   llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(),
1285                                         "bf.load");
1286   cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment);
1287 
1288   if (Info.IsSigned) {
1289     assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
1290     unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
1291     if (HighBits)
1292       Val = Builder.CreateShl(Val, HighBits, "bf.shl");
1293     if (Info.Offset + HighBits)
1294       Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
1295   } else {
1296     if (Info.Offset)
1297       Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
1298     if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
1299       Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
1300                                                               Info.Size),
1301                               "bf.clear");
1302   }
1303   Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
1304 
1305   return RValue::get(Val);
1306 }
1307 
1308 // If this is a reference to a subset of the elements of a vector, create an
1309 // appropriate shufflevector.
1310 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
1311   llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
1312                                             LV.isVolatileQualified());
1313   Load->setAlignment(LV.getAlignment().getQuantity());
1314   llvm::Value *Vec = Load;
1315 
1316   const llvm::Constant *Elts = LV.getExtVectorElts();
1317 
1318   // If the result of the expression is a non-vector type, we must be extracting
1319   // a single element.  Just codegen as an extractelement.
1320   const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1321   if (!ExprVT) {
1322     unsigned InIdx = getAccessedFieldNo(0, Elts);
1323     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1324     return RValue::get(Builder.CreateExtractElement(Vec, Elt));
1325   }
1326 
1327   // Always use shuffle vector to try to retain the original program structure
1328   unsigned NumResultElts = ExprVT->getNumElements();
1329 
1330   SmallVector<llvm::Constant*, 4> Mask;
1331   for (unsigned i = 0; i != NumResultElts; ++i)
1332     Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
1333 
1334   llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1335   Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
1336                                     MaskV);
1337   return RValue::get(Vec);
1338 }
1339 
1340 
1341 
1342 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
1343 /// lvalue, where both are guaranteed to the have the same type, and that type
1344 /// is 'Ty'.
1345 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
1346   if (!Dst.isSimple()) {
1347     if (Dst.isVectorElt()) {
1348       // Read/modify/write the vector, inserting the new element.
1349       llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
1350                                                 Dst.isVolatileQualified());
1351       Load->setAlignment(Dst.getAlignment().getQuantity());
1352       llvm::Value *Vec = Load;
1353       Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
1354                                         Dst.getVectorIdx(), "vecins");
1355       llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
1356                                                    Dst.isVolatileQualified());
1357       Store->setAlignment(Dst.getAlignment().getQuantity());
1358       return;
1359     }
1360 
1361     // If this is an update of extended vector elements, insert them as
1362     // appropriate.
1363     if (Dst.isExtVectorElt())
1364       return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
1365 
1366     assert(Dst.isBitField() && "Unknown LValue type");
1367     return EmitStoreThroughBitfieldLValue(Src, Dst);
1368   }
1369 
1370   // There's special magic for assigning into an ARC-qualified l-value.
1371   if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
1372     switch (Lifetime) {
1373     case Qualifiers::OCL_None:
1374       llvm_unreachable("present but none");
1375 
1376     case Qualifiers::OCL_ExplicitNone:
1377       // nothing special
1378       break;
1379 
1380     case Qualifiers::OCL_Strong:
1381       EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
1382       return;
1383 
1384     case Qualifiers::OCL_Weak:
1385       EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
1386       return;
1387 
1388     case Qualifiers::OCL_Autoreleasing:
1389       Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
1390                                                      Src.getScalarVal()));
1391       // fall into the normal path
1392       break;
1393     }
1394   }
1395 
1396   if (Dst.isObjCWeak() && !Dst.isNonGC()) {
1397     // load of a __weak object.
1398     llvm::Value *LvalueDst = Dst.getAddress();
1399     llvm::Value *src = Src.getScalarVal();
1400      CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
1401     return;
1402   }
1403 
1404   if (Dst.isObjCStrong() && !Dst.isNonGC()) {
1405     // load of a __strong object.
1406     llvm::Value *LvalueDst = Dst.getAddress();
1407     llvm::Value *src = Src.getScalarVal();
1408     if (Dst.isObjCIvar()) {
1409       assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
1410       llvm::Type *ResultType = ConvertType(getContext().LongTy);
1411       llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
1412       llvm::Value *dst = RHS;
1413       RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1414       llvm::Value *LHS =
1415         Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
1416       llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
1417       CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1418                                               BytesBetween);
1419     } else if (Dst.isGlobalObjCRef()) {
1420       CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1421                                                 Dst.isThreadLocalRef());
1422     }
1423     else
1424       CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
1425     return;
1426   }
1427 
1428   assert(Src.isScalar() && "Can't emit an agg store with this method");
1429   EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
1430 }
1431 
1432 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
1433                                                      llvm::Value **Result) {
1434   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
1435   llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
1436   llvm::Value *Ptr = Dst.getBitFieldAddr();
1437 
1438   // Get the source value, truncated to the width of the bit-field.
1439   llvm::Value *SrcVal = Src.getScalarVal();
1440 
1441   // Cast the source to the storage type and shift it into place.
1442   SrcVal = Builder.CreateIntCast(SrcVal,
1443                                  Ptr->getType()->getPointerElementType(),
1444                                  /*IsSigned=*/false);
1445   llvm::Value *MaskedVal = SrcVal;
1446 
1447   // See if there are other bits in the bitfield's storage we'll need to load
1448   // and mask together with source before storing.
1449   if (Info.StorageSize != Info.Size) {
1450     assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
1451     llvm::Value *Val = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(),
1452                                           "bf.load");
1453     cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment);
1454 
1455     // Mask the source value as needed.
1456     if (!hasBooleanRepresentation(Dst.getType()))
1457       SrcVal = Builder.CreateAnd(SrcVal,
1458                                  llvm::APInt::getLowBitsSet(Info.StorageSize,
1459                                                             Info.Size),
1460                                  "bf.value");
1461     MaskedVal = SrcVal;
1462     if (Info.Offset)
1463       SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
1464 
1465     // Mask out the original value.
1466     Val = Builder.CreateAnd(Val,
1467                             ~llvm::APInt::getBitsSet(Info.StorageSize,
1468                                                      Info.Offset,
1469                                                      Info.Offset + Info.Size),
1470                             "bf.clear");
1471 
1472     // Or together the unchanged values and the source value.
1473     SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
1474   } else {
1475     assert(Info.Offset == 0);
1476   }
1477 
1478   // Write the new value back out.
1479   llvm::StoreInst *Store = Builder.CreateStore(SrcVal, Ptr,
1480                                                Dst.isVolatileQualified());
1481   Store->setAlignment(Info.StorageAlignment);
1482 
1483   // Return the new value of the bit-field, if requested.
1484   if (Result) {
1485     llvm::Value *ResultVal = MaskedVal;
1486 
1487     // Sign extend the value if needed.
1488     if (Info.IsSigned) {
1489       assert(Info.Size <= Info.StorageSize);
1490       unsigned HighBits = Info.StorageSize - Info.Size;
1491       if (HighBits) {
1492         ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
1493         ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
1494       }
1495     }
1496 
1497     ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
1498                                       "bf.result.cast");
1499     *Result = EmitFromMemory(ResultVal, Dst.getType());
1500   }
1501 }
1502 
1503 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
1504                                                                LValue Dst) {
1505   // This access turns into a read/modify/write of the vector.  Load the input
1506   // value now.
1507   llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
1508                                             Dst.isVolatileQualified());
1509   Load->setAlignment(Dst.getAlignment().getQuantity());
1510   llvm::Value *Vec = Load;
1511   const llvm::Constant *Elts = Dst.getExtVectorElts();
1512 
1513   llvm::Value *SrcVal = Src.getScalarVal();
1514 
1515   if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
1516     unsigned NumSrcElts = VTy->getNumElements();
1517     unsigned NumDstElts =
1518        cast<llvm::VectorType>(Vec->getType())->getNumElements();
1519     if (NumDstElts == NumSrcElts) {
1520       // Use shuffle vector is the src and destination are the same number of
1521       // elements and restore the vector mask since it is on the side it will be
1522       // stored.
1523       SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
1524       for (unsigned i = 0; i != NumSrcElts; ++i)
1525         Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
1526 
1527       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1528       Vec = Builder.CreateShuffleVector(SrcVal,
1529                                         llvm::UndefValue::get(Vec->getType()),
1530                                         MaskV);
1531     } else if (NumDstElts > NumSrcElts) {
1532       // Extended the source vector to the same length and then shuffle it
1533       // into the destination.
1534       // FIXME: since we're shuffling with undef, can we just use the indices
1535       //        into that?  This could be simpler.
1536       SmallVector<llvm::Constant*, 4> ExtMask;
1537       for (unsigned i = 0; i != NumSrcElts; ++i)
1538         ExtMask.push_back(Builder.getInt32(i));
1539       ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
1540       llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
1541       llvm::Value *ExtSrcVal =
1542         Builder.CreateShuffleVector(SrcVal,
1543                                     llvm::UndefValue::get(SrcVal->getType()),
1544                                     ExtMaskV);
1545       // build identity
1546       SmallVector<llvm::Constant*, 4> Mask;
1547       for (unsigned i = 0; i != NumDstElts; ++i)
1548         Mask.push_back(Builder.getInt32(i));
1549 
1550       // modify when what gets shuffled in
1551       for (unsigned i = 0; i != NumSrcElts; ++i)
1552         Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
1553       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1554       Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
1555     } else {
1556       // We should never shorten the vector
1557       llvm_unreachable("unexpected shorten vector length");
1558     }
1559   } else {
1560     // If the Src is a scalar (not a vector) it must be updating one element.
1561     unsigned InIdx = getAccessedFieldNo(0, Elts);
1562     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1563     Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
1564   }
1565 
1566   llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
1567                                                Dst.isVolatileQualified());
1568   Store->setAlignment(Dst.getAlignment().getQuantity());
1569 }
1570 
1571 // setObjCGCLValueClass - sets class of he lvalue for the purpose of
1572 // generating write-barries API. It is currently a global, ivar,
1573 // or neither.
1574 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1575                                  LValue &LV,
1576                                  bool IsMemberAccess=false) {
1577   if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
1578     return;
1579 
1580   if (isa<ObjCIvarRefExpr>(E)) {
1581     QualType ExpTy = E->getType();
1582     if (IsMemberAccess && ExpTy->isPointerType()) {
1583       // If ivar is a structure pointer, assigning to field of
1584       // this struct follows gcc's behavior and makes it a non-ivar
1585       // writer-barrier conservatively.
1586       ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1587       if (ExpTy->isRecordType()) {
1588         LV.setObjCIvar(false);
1589         return;
1590       }
1591     }
1592     LV.setObjCIvar(true);
1593     ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1594     LV.setBaseIvarExp(Exp->getBase());
1595     LV.setObjCArray(E->getType()->isArrayType());
1596     return;
1597   }
1598 
1599   if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1600     if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1601       if (VD->hasGlobalStorage()) {
1602         LV.setGlobalObjCRef(true);
1603         LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
1604       }
1605     }
1606     LV.setObjCArray(E->getType()->isArrayType());
1607     return;
1608   }
1609 
1610   if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1611     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1612     return;
1613   }
1614 
1615   if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1616     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1617     if (LV.isObjCIvar()) {
1618       // If cast is to a structure pointer, follow gcc's behavior and make it
1619       // a non-ivar write-barrier.
1620       QualType ExpTy = E->getType();
1621       if (ExpTy->isPointerType())
1622         ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1623       if (ExpTy->isRecordType())
1624         LV.setObjCIvar(false);
1625     }
1626     return;
1627   }
1628 
1629   if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
1630     setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
1631     return;
1632   }
1633 
1634   if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1635     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1636     return;
1637   }
1638 
1639   if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1640     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1641     return;
1642   }
1643 
1644   if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
1645     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1646     return;
1647   }
1648 
1649   if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1650     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1651     if (LV.isObjCIvar() && !LV.isObjCArray())
1652       // Using array syntax to assigning to what an ivar points to is not
1653       // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1654       LV.setObjCIvar(false);
1655     else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1656       // Using array syntax to assigning to what global points to is not
1657       // same as assigning to the global itself. {id *G;} G[i] = 0;
1658       LV.setGlobalObjCRef(false);
1659     return;
1660   }
1661 
1662   if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1663     setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
1664     // We don't know if member is an 'ivar', but this flag is looked at
1665     // only in the context of LV.isObjCIvar().
1666     LV.setObjCArray(E->getType()->isArrayType());
1667     return;
1668   }
1669 }
1670 
1671 static llvm::Value *
1672 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
1673                                 llvm::Value *V, llvm::Type *IRType,
1674                                 StringRef Name = StringRef()) {
1675   unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1676   return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
1677 }
1678 
1679 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1680                                       const Expr *E, const VarDecl *VD) {
1681   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1682   llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
1683   V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
1684   CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
1685   QualType T = E->getType();
1686   LValue LV;
1687   if (VD->getType()->isReferenceType()) {
1688     llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
1689     LI->setAlignment(Alignment.getQuantity());
1690     V = LI;
1691     LV = CGF.MakeNaturalAlignAddrLValue(V, T);
1692   } else {
1693     LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1694   }
1695   setObjCGCLValueClass(CGF.getContext(), E, LV);
1696   return LV;
1697 }
1698 
1699 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1700                                      const Expr *E, const FunctionDecl *FD) {
1701   llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1702   if (!FD->hasPrototype()) {
1703     if (const FunctionProtoType *Proto =
1704             FD->getType()->getAs<FunctionProtoType>()) {
1705       // Ugly case: for a K&R-style definition, the type of the definition
1706       // isn't the same as the type of a use.  Correct for this with a
1707       // bitcast.
1708       QualType NoProtoType =
1709           CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1710       NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1711       V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
1712     }
1713   }
1714   CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
1715   return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1716 }
1717 
1718 static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
1719                                       llvm::Value *ThisValue) {
1720   QualType TagType = CGF.getContext().getTagDeclType(FD->getParent());
1721   LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType);
1722   return CGF.EmitLValueForField(LV, FD);
1723 }
1724 
1725 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1726   const NamedDecl *ND = E->getDecl();
1727   CharUnits Alignment = getContext().getDeclAlign(ND);
1728   QualType T = E->getType();
1729 
1730   // A DeclRefExpr for a reference initialized by a constant expression can
1731   // appear without being odr-used. Directly emit the constant initializer.
1732   if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1733     const Expr *Init = VD->getAnyInitializer(VD);
1734     if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
1735         VD->isUsableInConstantExpressions(getContext()) &&
1736         VD->checkInitIsICE()) {
1737       llvm::Constant *Val =
1738         CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this);
1739       assert(Val && "failed to emit reference constant expression");
1740       // FIXME: Eventually we will want to emit vector element references.
1741       return MakeAddrLValue(Val, T, Alignment);
1742     }
1743   }
1744 
1745   // FIXME: We should be able to assert this for FunctionDecls as well!
1746   // FIXME: We should be able to assert this for all DeclRefExprs, not just
1747   // those with a valid source location.
1748   assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
1749           !E->getLocation().isValid()) &&
1750          "Should not use decl without marking it used!");
1751 
1752   if (ND->hasAttr<WeakRefAttr>()) {
1753     const ValueDecl *VD = cast<ValueDecl>(ND);
1754     llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1755     return MakeAddrLValue(Aliasee, T, Alignment);
1756   }
1757 
1758   if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1759     // Check if this is a global variable.
1760     if (VD->hasLinkage() || VD->isStaticDataMember()) {
1761       // If it's thread_local, emit a call to its wrapper function instead.
1762       if (VD->getTLSKind() == VarDecl::TLS_Dynamic)
1763         return CGM.getCXXABI().EmitThreadLocalDeclRefExpr(*this, E);
1764       return EmitGlobalVarDeclLValue(*this, E, VD);
1765     }
1766 
1767     bool isBlockVariable = VD->hasAttr<BlocksAttr>();
1768 
1769     llvm::Value *V = LocalDeclMap.lookup(VD);
1770     if (!V && VD->isStaticLocal())
1771       V = CGM.getStaticLocalDeclAddress(VD);
1772 
1773     // Use special handling for lambdas.
1774     if (!V) {
1775       if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
1776         return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
1777       } else if (CapturedStmtInfo) {
1778         if (const FieldDecl *FD = CapturedStmtInfo->lookup(VD))
1779           return EmitCapturedFieldLValue(*this, FD,
1780                                          CapturedStmtInfo->getContextValue());
1781       }
1782 
1783       assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
1784       return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
1785                             T, Alignment);
1786     }
1787 
1788     assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1789 
1790     if (isBlockVariable)
1791       V = BuildBlockByrefAddress(V, VD);
1792 
1793     LValue LV;
1794     if (VD->getType()->isReferenceType()) {
1795       llvm::LoadInst *LI = Builder.CreateLoad(V);
1796       LI->setAlignment(Alignment.getQuantity());
1797       V = LI;
1798       LV = MakeNaturalAlignAddrLValue(V, T);
1799     } else {
1800       LV = MakeAddrLValue(V, T, Alignment);
1801     }
1802 
1803     bool isLocalStorage = VD->hasLocalStorage();
1804 
1805     bool NonGCable = isLocalStorage &&
1806                      !VD->getType()->isReferenceType() &&
1807                      !isBlockVariable;
1808     if (NonGCable) {
1809       LV.getQuals().removeObjCGCAttr();
1810       LV.setNonGC(true);
1811     }
1812 
1813     bool isImpreciseLifetime =
1814       (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
1815     if (isImpreciseLifetime)
1816       LV.setARCPreciseLifetime(ARCImpreciseLifetime);
1817     setObjCGCLValueClass(getContext(), E, LV);
1818     return LV;
1819   }
1820 
1821   if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
1822     return EmitFunctionDeclLValue(*this, E, fn);
1823 
1824   llvm_unreachable("Unhandled DeclRefExpr");
1825 }
1826 
1827 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1828   // __extension__ doesn't affect lvalue-ness.
1829   if (E->getOpcode() == UO_Extension)
1830     return EmitLValue(E->getSubExpr());
1831 
1832   QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1833   switch (E->getOpcode()) {
1834   default: llvm_unreachable("Unknown unary operator lvalue!");
1835   case UO_Deref: {
1836     QualType T = E->getSubExpr()->getType()->getPointeeType();
1837     assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1838 
1839     LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1840     LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1841 
1842     // We should not generate __weak write barrier on indirect reference
1843     // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1844     // But, we continue to generate __strong write barrier on indirect write
1845     // into a pointer to object.
1846     if (getLangOpts().ObjC1 &&
1847         getLangOpts().getGC() != LangOptions::NonGC &&
1848         LV.isObjCWeak())
1849       LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1850     return LV;
1851   }
1852   case UO_Real:
1853   case UO_Imag: {
1854     LValue LV = EmitLValue(E->getSubExpr());
1855     assert(LV.isSimple() && "real/imag on non-ordinary l-value");
1856     llvm::Value *Addr = LV.getAddress();
1857 
1858     // __real is valid on scalars.  This is a faster way of testing that.
1859     // __imag can only produce an rvalue on scalars.
1860     if (E->getOpcode() == UO_Real &&
1861         !cast<llvm::PointerType>(Addr->getType())
1862            ->getElementType()->isStructTy()) {
1863       assert(E->getSubExpr()->getType()->isArithmeticType());
1864       return LV;
1865     }
1866 
1867     assert(E->getSubExpr()->getType()->isAnyComplexType());
1868 
1869     unsigned Idx = E->getOpcode() == UO_Imag;
1870     return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1871                                                   Idx, "idx"),
1872                           ExprTy);
1873   }
1874   case UO_PreInc:
1875   case UO_PreDec: {
1876     LValue LV = EmitLValue(E->getSubExpr());
1877     bool isInc = E->getOpcode() == UO_PreInc;
1878 
1879     if (E->getType()->isAnyComplexType())
1880       EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1881     else
1882       EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1883     return LV;
1884   }
1885   }
1886 }
1887 
1888 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1889   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1890                         E->getType());
1891 }
1892 
1893 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1894   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1895                         E->getType());
1896 }
1897 
1898 static llvm::Constant*
1899 GetAddrOfConstantWideString(StringRef Str,
1900                             const char *GlobalName,
1901                             ASTContext &Context,
1902                             QualType Ty, SourceLocation Loc,
1903                             CodeGenModule &CGM) {
1904 
1905   StringLiteral *SL = StringLiteral::Create(Context,
1906                                             Str,
1907                                             StringLiteral::Wide,
1908                                             /*Pascal = */false,
1909                                             Ty, Loc);
1910   llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL);
1911   llvm::GlobalVariable *GV =
1912     new llvm::GlobalVariable(CGM.getModule(), C->getType(),
1913                              !CGM.getLangOpts().WritableStrings,
1914                              llvm::GlobalValue::PrivateLinkage,
1915                              C, GlobalName);
1916   const unsigned WideAlignment =
1917     Context.getTypeAlignInChars(Ty).getQuantity();
1918   GV->setAlignment(WideAlignment);
1919   return GV;
1920 }
1921 
1922 static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
1923                                     SmallString<32>& Target) {
1924   Target.resize(CharByteWidth * (Source.size() + 1));
1925   char *ResultPtr = &Target[0];
1926   const UTF8 *ErrorPtr;
1927   bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
1928   (void)success;
1929   assert(success);
1930   Target.resize(ResultPtr - &Target[0]);
1931 }
1932 
1933 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1934   switch (E->getIdentType()) {
1935   default:
1936     return EmitUnsupportedLValue(E, "predefined expression");
1937 
1938   case PredefinedExpr::Func:
1939   case PredefinedExpr::Function:
1940   case PredefinedExpr::LFunction:
1941   case PredefinedExpr::PrettyFunction: {
1942     unsigned IdentType = E->getIdentType();
1943     std::string GlobalVarName;
1944 
1945     switch (IdentType) {
1946     default: llvm_unreachable("Invalid type");
1947     case PredefinedExpr::Func:
1948       GlobalVarName = "__func__.";
1949       break;
1950     case PredefinedExpr::Function:
1951       GlobalVarName = "__FUNCTION__.";
1952       break;
1953     case PredefinedExpr::LFunction:
1954       GlobalVarName = "L__FUNCTION__.";
1955       break;
1956     case PredefinedExpr::PrettyFunction:
1957       GlobalVarName = "__PRETTY_FUNCTION__.";
1958       break;
1959     }
1960 
1961     StringRef FnName = CurFn->getName();
1962     if (FnName.startswith("\01"))
1963       FnName = FnName.substr(1);
1964     GlobalVarName += FnName;
1965 
1966     const Decl *CurDecl = CurCodeDecl;
1967     if (CurDecl == 0)
1968       CurDecl = getContext().getTranslationUnitDecl();
1969 
1970     std::string FunctionName =
1971         (isa<BlockDecl>(CurDecl)
1972          ? FnName.str()
1973          : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType,
1974                                        CurDecl));
1975 
1976     const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual();
1977     llvm::Constant *C;
1978     if (ElemType->isWideCharType()) {
1979       SmallString<32> RawChars;
1980       ConvertUTF8ToWideString(
1981           getContext().getTypeSizeInChars(ElemType).getQuantity(),
1982           FunctionName, RawChars);
1983       C = GetAddrOfConstantWideString(RawChars,
1984                                       GlobalVarName.c_str(),
1985                                       getContext(),
1986                                       E->getType(),
1987                                       E->getLocation(),
1988                                       CGM);
1989     } else {
1990       C = CGM.GetAddrOfConstantCString(FunctionName,
1991                                        GlobalVarName.c_str(),
1992                                        1);
1993     }
1994     return MakeAddrLValue(C, E->getType());
1995   }
1996   }
1997 }
1998 
1999 /// Emit a type description suitable for use by a runtime sanitizer library. The
2000 /// format of a type descriptor is
2001 ///
2002 /// \code
2003 ///   { i16 TypeKind, i16 TypeInfo }
2004 /// \endcode
2005 ///
2006 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
2007 /// integer, 1 for a floating point value, and -1 for anything else.
2008 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
2009   // FIXME: Only emit each type's descriptor once.
2010   uint16_t TypeKind = -1;
2011   uint16_t TypeInfo = 0;
2012 
2013   if (T->isIntegerType()) {
2014     TypeKind = 0;
2015     TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
2016                (T->isSignedIntegerType() ? 1 : 0);
2017   } else if (T->isFloatingType()) {
2018     TypeKind = 1;
2019     TypeInfo = getContext().getTypeSize(T);
2020   }
2021 
2022   // Format the type name as if for a diagnostic, including quotes and
2023   // optionally an 'aka'.
2024   SmallString<32> Buffer;
2025   CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
2026                                     (intptr_t)T.getAsOpaquePtr(),
2027                                     0, 0, 0, 0, 0, 0, Buffer,
2028                                     ArrayRef<intptr_t>());
2029 
2030   llvm::Constant *Components[] = {
2031     Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
2032     llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
2033   };
2034   llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
2035 
2036   llvm::GlobalVariable *GV =
2037     new llvm::GlobalVariable(CGM.getModule(), Descriptor->getType(),
2038                              /*isConstant=*/true,
2039                              llvm::GlobalVariable::PrivateLinkage,
2040                              Descriptor);
2041   GV->setUnnamedAddr(true);
2042   return GV;
2043 }
2044 
2045 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
2046   llvm::Type *TargetTy = IntPtrTy;
2047 
2048   // Floating-point types which fit into intptr_t are bitcast to integers
2049   // and then passed directly (after zero-extension, if necessary).
2050   if (V->getType()->isFloatingPointTy()) {
2051     unsigned Bits = V->getType()->getPrimitiveSizeInBits();
2052     if (Bits <= TargetTy->getIntegerBitWidth())
2053       V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
2054                                                          Bits));
2055   }
2056 
2057   // Integers which fit in intptr_t are zero-extended and passed directly.
2058   if (V->getType()->isIntegerTy() &&
2059       V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
2060     return Builder.CreateZExt(V, TargetTy);
2061 
2062   // Pointers are passed directly, everything else is passed by address.
2063   if (!V->getType()->isPointerTy()) {
2064     llvm::Value *Ptr = CreateTempAlloca(V->getType());
2065     Builder.CreateStore(V, Ptr);
2066     V = Ptr;
2067   }
2068   return Builder.CreatePtrToInt(V, TargetTy);
2069 }
2070 
2071 /// \brief Emit a representation of a SourceLocation for passing to a handler
2072 /// in a sanitizer runtime library. The format for this data is:
2073 /// \code
2074 ///   struct SourceLocation {
2075 ///     const char *Filename;
2076 ///     int32_t Line, Column;
2077 ///   };
2078 /// \endcode
2079 /// For an invalid SourceLocation, the Filename pointer is null.
2080 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
2081   PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
2082 
2083   llvm::Constant *Data[] = {
2084     // FIXME: Only emit each file name once.
2085     PLoc.isValid() ? cast<llvm::Constant>(
2086                        Builder.CreateGlobalStringPtr(PLoc.getFilename()))
2087                    : llvm::Constant::getNullValue(Int8PtrTy),
2088     Builder.getInt32(PLoc.getLine()),
2089     Builder.getInt32(PLoc.getColumn())
2090   };
2091 
2092   return llvm::ConstantStruct::getAnon(Data);
2093 }
2094 
2095 void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName,
2096                                 ArrayRef<llvm::Constant *> StaticArgs,
2097                                 ArrayRef<llvm::Value *> DynamicArgs,
2098                                 CheckRecoverableKind RecoverKind) {
2099   assert(SanOpts != &SanitizerOptions::Disabled);
2100 
2101   if (CGM.getCodeGenOpts().SanitizeUndefinedTrapOnError) {
2102     assert (RecoverKind != CRK_AlwaysRecoverable &&
2103             "Runtime call required for AlwaysRecoverable kind!");
2104     return EmitTrapCheck(Checked);
2105   }
2106 
2107   llvm::BasicBlock *Cont = createBasicBlock("cont");
2108 
2109   llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName);
2110 
2111   llvm::Instruction *Branch = Builder.CreateCondBr(Checked, Cont, Handler);
2112 
2113   // Give hint that we very much don't expect to execute the handler
2114   // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
2115   llvm::MDBuilder MDHelper(getLLVMContext());
2116   llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
2117   Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
2118 
2119   EmitBlock(Handler);
2120 
2121   llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
2122   llvm::GlobalValue *InfoPtr =
2123       new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
2124                                llvm::GlobalVariable::PrivateLinkage, Info);
2125   InfoPtr->setUnnamedAddr(true);
2126 
2127   SmallVector<llvm::Value *, 4> Args;
2128   SmallVector<llvm::Type *, 4> ArgTypes;
2129   Args.reserve(DynamicArgs.size() + 1);
2130   ArgTypes.reserve(DynamicArgs.size() + 1);
2131 
2132   // Handler functions take an i8* pointing to the (handler-specific) static
2133   // information block, followed by a sequence of intptr_t arguments
2134   // representing operand values.
2135   Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
2136   ArgTypes.push_back(Int8PtrTy);
2137   for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
2138     Args.push_back(EmitCheckValue(DynamicArgs[i]));
2139     ArgTypes.push_back(IntPtrTy);
2140   }
2141 
2142   bool Recover = (RecoverKind == CRK_AlwaysRecoverable) ||
2143                  ((RecoverKind == CRK_Recoverable) &&
2144                    CGM.getCodeGenOpts().SanitizeRecover);
2145 
2146   llvm::FunctionType *FnType =
2147     llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
2148   llvm::AttrBuilder B;
2149   if (!Recover) {
2150     B.addAttribute(llvm::Attribute::NoReturn)
2151      .addAttribute(llvm::Attribute::NoUnwind);
2152   }
2153   B.addAttribute(llvm::Attribute::UWTable);
2154 
2155   // Checks that have two variants use a suffix to differentiate them
2156   bool NeedsAbortSuffix = (RecoverKind != CRK_Unrecoverable) &&
2157                            !CGM.getCodeGenOpts().SanitizeRecover;
2158   std::string FunctionName = ("__ubsan_handle_" + CheckName +
2159                               (NeedsAbortSuffix? "_abort" : "")).str();
2160   llvm::Value *Fn =
2161     CGM.CreateRuntimeFunction(FnType, FunctionName,
2162                               llvm::AttributeSet::get(getLLVMContext(),
2163                                               llvm::AttributeSet::FunctionIndex,
2164                                                       B));
2165   llvm::CallInst *HandlerCall = EmitNounwindRuntimeCall(Fn, Args);
2166   if (Recover) {
2167     Builder.CreateBr(Cont);
2168   } else {
2169     HandlerCall->setDoesNotReturn();
2170     Builder.CreateUnreachable();
2171   }
2172 
2173   EmitBlock(Cont);
2174 }
2175 
2176 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) {
2177   llvm::BasicBlock *Cont = createBasicBlock("cont");
2178 
2179   // If we're optimizing, collapse all calls to trap down to just one per
2180   // function to save on code size.
2181   if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
2182     TrapBB = createBasicBlock("trap");
2183     Builder.CreateCondBr(Checked, Cont, TrapBB);
2184     EmitBlock(TrapBB);
2185     llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
2186     llvm::CallInst *TrapCall = Builder.CreateCall(F);
2187     TrapCall->setDoesNotReturn();
2188     TrapCall->setDoesNotThrow();
2189     Builder.CreateUnreachable();
2190   } else {
2191     Builder.CreateCondBr(Checked, Cont, TrapBB);
2192   }
2193 
2194   EmitBlock(Cont);
2195 }
2196 
2197 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
2198 /// array to pointer, return the array subexpression.
2199 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
2200   // If this isn't just an array->pointer decay, bail out.
2201   const CastExpr *CE = dyn_cast<CastExpr>(E);
2202   if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
2203     return 0;
2204 
2205   // If this is a decay from variable width array, bail out.
2206   const Expr *SubExpr = CE->getSubExpr();
2207   if (SubExpr->getType()->isVariableArrayType())
2208     return 0;
2209 
2210   return SubExpr;
2211 }
2212 
2213 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
2214                                                bool Accessed) {
2215   // The index must always be an integer, which is not an aggregate.  Emit it.
2216   llvm::Value *Idx = EmitScalarExpr(E->getIdx());
2217   QualType IdxTy  = E->getIdx()->getType();
2218   bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
2219 
2220   if (SanOpts->Bounds)
2221     EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
2222 
2223   // If the base is a vector type, then we are forming a vector element lvalue
2224   // with this subscript.
2225   if (E->getBase()->getType()->isVectorType()) {
2226     // Emit the vector as an lvalue to get its address.
2227     LValue LHS = EmitLValue(E->getBase());
2228     assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
2229     Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
2230     return LValue::MakeVectorElt(LHS.getAddress(), Idx,
2231                                  E->getBase()->getType(), LHS.getAlignment());
2232   }
2233 
2234   // Extend or truncate the index type to 32 or 64-bits.
2235   if (Idx->getType() != IntPtrTy)
2236     Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
2237 
2238   // We know that the pointer points to a type of the correct size, unless the
2239   // size is a VLA or Objective-C interface.
2240   llvm::Value *Address = 0;
2241   CharUnits ArrayAlignment;
2242   if (const VariableArrayType *vla =
2243         getContext().getAsVariableArrayType(E->getType())) {
2244     // The base must be a pointer, which is not an aggregate.  Emit
2245     // it.  It needs to be emitted first in case it's what captures
2246     // the VLA bounds.
2247     Address = EmitScalarExpr(E->getBase());
2248 
2249     // The element count here is the total number of non-VLA elements.
2250     llvm::Value *numElements = getVLASize(vla).first;
2251 
2252     // Effectively, the multiply by the VLA size is part of the GEP.
2253     // GEP indexes are signed, and scaling an index isn't permitted to
2254     // signed-overflow, so we use the same semantics for our explicit
2255     // multiply.  We suppress this if overflow is not undefined behavior.
2256     if (getLangOpts().isSignedOverflowDefined()) {
2257       Idx = Builder.CreateMul(Idx, numElements);
2258       Address = Builder.CreateGEP(Address, Idx, "arrayidx");
2259     } else {
2260       Idx = Builder.CreateNSWMul(Idx, numElements);
2261       Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
2262     }
2263   } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
2264     // Indexing over an interface, as in "NSString *P; P[4];"
2265     llvm::Value *InterfaceSize =
2266       llvm::ConstantInt::get(Idx->getType(),
2267           getContext().getTypeSizeInChars(OIT).getQuantity());
2268 
2269     Idx = Builder.CreateMul(Idx, InterfaceSize);
2270 
2271     // The base must be a pointer, which is not an aggregate.  Emit it.
2272     llvm::Value *Base = EmitScalarExpr(E->getBase());
2273     Address = EmitCastToVoidPtr(Base);
2274     Address = Builder.CreateGEP(Address, Idx, "arrayidx");
2275     Address = Builder.CreateBitCast(Address, Base->getType());
2276   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
2277     // If this is A[i] where A is an array, the frontend will have decayed the
2278     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
2279     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
2280     // "gep x, i" here.  Emit one "gep A, 0, i".
2281     assert(Array->getType()->isArrayType() &&
2282            "Array to pointer decay must have array source type!");
2283     LValue ArrayLV;
2284     // For simple multidimensional array indexing, set the 'accessed' flag for
2285     // better bounds-checking of the base expression.
2286     if (const ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(Array))
2287       ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
2288     else
2289       ArrayLV = EmitLValue(Array);
2290     llvm::Value *ArrayPtr = ArrayLV.getAddress();
2291     llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2292     llvm::Value *Args[] = { Zero, Idx };
2293 
2294     // Propagate the alignment from the array itself to the result.
2295     ArrayAlignment = ArrayLV.getAlignment();
2296 
2297     if (getLangOpts().isSignedOverflowDefined())
2298       Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
2299     else
2300       Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
2301   } else {
2302     // The base must be a pointer, which is not an aggregate.  Emit it.
2303     llvm::Value *Base = EmitScalarExpr(E->getBase());
2304     if (getLangOpts().isSignedOverflowDefined())
2305       Address = Builder.CreateGEP(Base, Idx, "arrayidx");
2306     else
2307       Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
2308   }
2309 
2310   QualType T = E->getBase()->getType()->getPointeeType();
2311   assert(!T.isNull() &&
2312          "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
2313 
2314 
2315   // Limit the alignment to that of the result type.
2316   LValue LV;
2317   if (!ArrayAlignment.isZero()) {
2318     CharUnits Align = getContext().getTypeAlignInChars(T);
2319     ArrayAlignment = std::min(Align, ArrayAlignment);
2320     LV = MakeAddrLValue(Address, T, ArrayAlignment);
2321   } else {
2322     LV = MakeNaturalAlignAddrLValue(Address, T);
2323   }
2324 
2325   LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
2326 
2327   if (getLangOpts().ObjC1 &&
2328       getLangOpts().getGC() != LangOptions::NonGC) {
2329     LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
2330     setObjCGCLValueClass(getContext(), E, LV);
2331   }
2332   return LV;
2333 }
2334 
2335 static
2336 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
2337                                        SmallVector<unsigned, 4> &Elts) {
2338   SmallVector<llvm::Constant*, 4> CElts;
2339   for (unsigned i = 0, e = Elts.size(); i != e; ++i)
2340     CElts.push_back(Builder.getInt32(Elts[i]));
2341 
2342   return llvm::ConstantVector::get(CElts);
2343 }
2344 
2345 LValue CodeGenFunction::
2346 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
2347   // Emit the base vector as an l-value.
2348   LValue Base;
2349 
2350   // ExtVectorElementExpr's base can either be a vector or pointer to vector.
2351   if (E->isArrow()) {
2352     // If it is a pointer to a vector, emit the address and form an lvalue with
2353     // it.
2354     llvm::Value *Ptr = EmitScalarExpr(E->getBase());
2355     const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
2356     Base = MakeAddrLValue(Ptr, PT->getPointeeType());
2357     Base.getQuals().removeObjCGCAttr();
2358   } else if (E->getBase()->isGLValue()) {
2359     // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
2360     // emit the base as an lvalue.
2361     assert(E->getBase()->getType()->isVectorType());
2362     Base = EmitLValue(E->getBase());
2363   } else {
2364     // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
2365     assert(E->getBase()->getType()->isVectorType() &&
2366            "Result must be a vector");
2367     llvm::Value *Vec = EmitScalarExpr(E->getBase());
2368 
2369     // Store the vector to memory (because LValue wants an address).
2370     llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
2371     Builder.CreateStore(Vec, VecMem);
2372     Base = MakeAddrLValue(VecMem, E->getBase()->getType());
2373   }
2374 
2375   QualType type =
2376     E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
2377 
2378   // Encode the element access list into a vector of unsigned indices.
2379   SmallVector<unsigned, 4> Indices;
2380   E->getEncodedElementAccess(Indices);
2381 
2382   if (Base.isSimple()) {
2383     llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
2384     return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
2385                                     Base.getAlignment());
2386   }
2387   assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
2388 
2389   llvm::Constant *BaseElts = Base.getExtVectorElts();
2390   SmallVector<llvm::Constant *, 4> CElts;
2391 
2392   for (unsigned i = 0, e = Indices.size(); i != e; ++i)
2393     CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
2394   llvm::Constant *CV = llvm::ConstantVector::get(CElts);
2395   return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
2396                                   Base.getAlignment());
2397 }
2398 
2399 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
2400   Expr *BaseExpr = E->getBase();
2401 
2402   // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
2403   LValue BaseLV;
2404   if (E->isArrow()) {
2405     llvm::Value *Ptr = EmitScalarExpr(BaseExpr);
2406     QualType PtrTy = BaseExpr->getType()->getPointeeType();
2407     EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy);
2408     BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy);
2409   } else
2410     BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
2411 
2412   NamedDecl *ND = E->getMemberDecl();
2413   if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
2414     LValue LV = EmitLValueForField(BaseLV, Field);
2415     setObjCGCLValueClass(getContext(), E, LV);
2416     return LV;
2417   }
2418 
2419   if (VarDecl *VD = dyn_cast<VarDecl>(ND))
2420     return EmitGlobalVarDeclLValue(*this, E, VD);
2421 
2422   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
2423     return EmitFunctionDeclLValue(*this, E, FD);
2424 
2425   llvm_unreachable("Unhandled member declaration!");
2426 }
2427 
2428 /// Given that we are currently emitting a lambda, emit an l-value for
2429 /// one of its members.
2430 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
2431   assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
2432   assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
2433   QualType LambdaTagType =
2434     getContext().getTagDeclType(Field->getParent());
2435   LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType);
2436   return EmitLValueForField(LambdaLV, Field);
2437 }
2438 
2439 LValue CodeGenFunction::EmitLValueForField(LValue base,
2440                                            const FieldDecl *field) {
2441   if (field->isBitField()) {
2442     const CGRecordLayout &RL =
2443       CGM.getTypes().getCGRecordLayout(field->getParent());
2444     const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
2445     llvm::Value *Addr = base.getAddress();
2446     unsigned Idx = RL.getLLVMFieldNo(field);
2447     if (Idx != 0)
2448       // For structs, we GEP to the field that the record layout suggests.
2449       Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
2450     // Get the access type.
2451     llvm::Type *PtrTy = llvm::Type::getIntNPtrTy(
2452       getLLVMContext(), Info.StorageSize,
2453       CGM.getContext().getTargetAddressSpace(base.getType()));
2454     if (Addr->getType() != PtrTy)
2455       Addr = Builder.CreateBitCast(Addr, PtrTy);
2456 
2457     QualType fieldType =
2458       field->getType().withCVRQualifiers(base.getVRQualifiers());
2459     return LValue::MakeBitfield(Addr, Info, fieldType, base.getAlignment());
2460   }
2461 
2462   const RecordDecl *rec = field->getParent();
2463   QualType type = field->getType();
2464   CharUnits alignment = getContext().getDeclAlign(field);
2465 
2466   // FIXME: It should be impossible to have an LValue without alignment for a
2467   // complete type.
2468   if (!base.getAlignment().isZero())
2469     alignment = std::min(alignment, base.getAlignment());
2470 
2471   bool mayAlias = rec->hasAttr<MayAliasAttr>();
2472 
2473   llvm::Value *addr = base.getAddress();
2474   unsigned cvr = base.getVRQualifiers();
2475   bool TBAAPath = CGM.getCodeGenOpts().StructPathTBAA;
2476   if (rec->isUnion()) {
2477     // For unions, there is no pointer adjustment.
2478     assert(!type->isReferenceType() && "union has reference member");
2479     // TODO: handle path-aware TBAA for union.
2480     TBAAPath = false;
2481   } else {
2482     // For structs, we GEP to the field that the record layout suggests.
2483     unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
2484     addr = Builder.CreateStructGEP(addr, idx, field->getName());
2485 
2486     // If this is a reference field, load the reference right now.
2487     if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
2488       llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
2489       if (cvr & Qualifiers::Volatile) load->setVolatile(true);
2490       load->setAlignment(alignment.getQuantity());
2491 
2492       // Loading the reference will disable path-aware TBAA.
2493       TBAAPath = false;
2494       if (CGM.shouldUseTBAA()) {
2495         llvm::MDNode *tbaa;
2496         if (mayAlias)
2497           tbaa = CGM.getTBAAInfo(getContext().CharTy);
2498         else
2499           tbaa = CGM.getTBAAInfo(type);
2500         CGM.DecorateInstruction(load, tbaa);
2501       }
2502 
2503       addr = load;
2504       mayAlias = false;
2505       type = refType->getPointeeType();
2506       if (type->isIncompleteType())
2507         alignment = CharUnits();
2508       else
2509         alignment = getContext().getTypeAlignInChars(type);
2510       cvr = 0; // qualifiers don't recursively apply to referencee
2511     }
2512   }
2513 
2514   // Make sure that the address is pointing to the right type.  This is critical
2515   // for both unions and structs.  A union needs a bitcast, a struct element
2516   // will need a bitcast if the LLVM type laid out doesn't match the desired
2517   // type.
2518   addr = EmitBitCastOfLValueToProperType(*this, addr,
2519                                          CGM.getTypes().ConvertTypeForMem(type),
2520                                          field->getName());
2521 
2522   if (field->hasAttr<AnnotateAttr>())
2523     addr = EmitFieldAnnotations(field, addr);
2524 
2525   LValue LV = MakeAddrLValue(addr, type, alignment);
2526   LV.getQuals().addCVRQualifiers(cvr);
2527   if (TBAAPath) {
2528     const ASTRecordLayout &Layout =
2529         getContext().getASTRecordLayout(field->getParent());
2530     // Set the base type to be the base type of the base LValue and
2531     // update offset to be relative to the base type.
2532     LV.setTBAABaseType(mayAlias ? getContext().CharTy : base.getTBAABaseType());
2533     LV.setTBAAOffset(mayAlias ? 0 : base.getTBAAOffset() +
2534                      Layout.getFieldOffset(field->getFieldIndex()) /
2535                                            getContext().getCharWidth());
2536   }
2537 
2538   // __weak attribute on a field is ignored.
2539   if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
2540     LV.getQuals().removeObjCGCAttr();
2541 
2542   // Fields of may_alias structs act like 'char' for TBAA purposes.
2543   // FIXME: this should get propagated down through anonymous structs
2544   // and unions.
2545   if (mayAlias && LV.getTBAAInfo())
2546     LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
2547 
2548   return LV;
2549 }
2550 
2551 LValue
2552 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
2553                                                   const FieldDecl *Field) {
2554   QualType FieldType = Field->getType();
2555 
2556   if (!FieldType->isReferenceType())
2557     return EmitLValueForField(Base, Field);
2558 
2559   const CGRecordLayout &RL =
2560     CGM.getTypes().getCGRecordLayout(Field->getParent());
2561   unsigned idx = RL.getLLVMFieldNo(Field);
2562   llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx);
2563   assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
2564 
2565   // Make sure that the address is pointing to the right type.  This is critical
2566   // for both unions and structs.  A union needs a bitcast, a struct element
2567   // will need a bitcast if the LLVM type laid out doesn't match the desired
2568   // type.
2569   llvm::Type *llvmType = ConvertTypeForMem(FieldType);
2570   V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
2571 
2572   CharUnits Alignment = getContext().getDeclAlign(Field);
2573 
2574   // FIXME: It should be impossible to have an LValue without alignment for a
2575   // complete type.
2576   if (!Base.getAlignment().isZero())
2577     Alignment = std::min(Alignment, Base.getAlignment());
2578 
2579   return MakeAddrLValue(V, FieldType, Alignment);
2580 }
2581 
2582 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
2583   if (E->isFileScope()) {
2584     llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
2585     return MakeAddrLValue(GlobalPtr, E->getType());
2586   }
2587   if (E->getType()->isVariablyModifiedType())
2588     // make sure to emit the VLA size.
2589     EmitVariablyModifiedType(E->getType());
2590 
2591   llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
2592   const Expr *InitExpr = E->getInitializer();
2593   LValue Result = MakeAddrLValue(DeclPtr, E->getType());
2594 
2595   EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
2596                    /*Init*/ true);
2597 
2598   return Result;
2599 }
2600 
2601 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
2602   if (!E->isGLValue())
2603     // Initializing an aggregate temporary in C++11: T{...}.
2604     return EmitAggExprToLValue(E);
2605 
2606   // An lvalue initializer list must be initializing a reference.
2607   assert(E->getNumInits() == 1 && "reference init with multiple values");
2608   return EmitLValue(E->getInit(0));
2609 }
2610 
2611 LValue CodeGenFunction::
2612 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
2613   if (!expr->isGLValue()) {
2614     // ?: here should be an aggregate.
2615     assert(hasAggregateEvaluationKind(expr->getType()) &&
2616            "Unexpected conditional operator!");
2617     return EmitAggExprToLValue(expr);
2618   }
2619 
2620   OpaqueValueMapping binding(*this, expr);
2621 
2622   const Expr *condExpr = expr->getCond();
2623   bool CondExprBool;
2624   if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
2625     const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
2626     if (!CondExprBool) std::swap(live, dead);
2627 
2628     if (!ContainsLabel(dead))
2629       return EmitLValue(live);
2630   }
2631 
2632   llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
2633   llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
2634   llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
2635 
2636   ConditionalEvaluation eval(*this);
2637   EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
2638 
2639   // Any temporaries created here are conditional.
2640   EmitBlock(lhsBlock);
2641   eval.begin(*this);
2642   LValue lhs = EmitLValue(expr->getTrueExpr());
2643   eval.end(*this);
2644 
2645   if (!lhs.isSimple())
2646     return EmitUnsupportedLValue(expr, "conditional operator");
2647 
2648   lhsBlock = Builder.GetInsertBlock();
2649   Builder.CreateBr(contBlock);
2650 
2651   // Any temporaries created here are conditional.
2652   EmitBlock(rhsBlock);
2653   eval.begin(*this);
2654   LValue rhs = EmitLValue(expr->getFalseExpr());
2655   eval.end(*this);
2656   if (!rhs.isSimple())
2657     return EmitUnsupportedLValue(expr, "conditional operator");
2658   rhsBlock = Builder.GetInsertBlock();
2659 
2660   EmitBlock(contBlock);
2661 
2662   llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
2663                                          "cond-lvalue");
2664   phi->addIncoming(lhs.getAddress(), lhsBlock);
2665   phi->addIncoming(rhs.getAddress(), rhsBlock);
2666   return MakeAddrLValue(phi, expr->getType());
2667 }
2668 
2669 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
2670 /// type. If the cast is to a reference, we can have the usual lvalue result,
2671 /// otherwise if a cast is needed by the code generator in an lvalue context,
2672 /// then it must mean that we need the address of an aggregate in order to
2673 /// access one of its members.  This can happen for all the reasons that casts
2674 /// are permitted with aggregate result, including noop aggregate casts, and
2675 /// cast from scalar to union.
2676 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
2677   switch (E->getCastKind()) {
2678   case CK_ToVoid:
2679     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
2680 
2681   case CK_Dependent:
2682     llvm_unreachable("dependent cast kind in IR gen!");
2683 
2684   case CK_BuiltinFnToFnPtr:
2685     llvm_unreachable("builtin functions are handled elsewhere");
2686 
2687   // These two casts are currently treated as no-ops, although they could
2688   // potentially be real operations depending on the target's ABI.
2689   case CK_NonAtomicToAtomic:
2690   case CK_AtomicToNonAtomic:
2691 
2692   case CK_NoOp:
2693   case CK_LValueToRValue:
2694     if (!E->getSubExpr()->Classify(getContext()).isPRValue()
2695         || E->getType()->isRecordType())
2696       return EmitLValue(E->getSubExpr());
2697     // Fall through to synthesize a temporary.
2698 
2699   case CK_BitCast:
2700   case CK_ArrayToPointerDecay:
2701   case CK_FunctionToPointerDecay:
2702   case CK_NullToMemberPointer:
2703   case CK_NullToPointer:
2704   case CK_IntegralToPointer:
2705   case CK_PointerToIntegral:
2706   case CK_PointerToBoolean:
2707   case CK_VectorSplat:
2708   case CK_IntegralCast:
2709   case CK_IntegralToBoolean:
2710   case CK_IntegralToFloating:
2711   case CK_FloatingToIntegral:
2712   case CK_FloatingToBoolean:
2713   case CK_FloatingCast:
2714   case CK_FloatingRealToComplex:
2715   case CK_FloatingComplexToReal:
2716   case CK_FloatingComplexToBoolean:
2717   case CK_FloatingComplexCast:
2718   case CK_FloatingComplexToIntegralComplex:
2719   case CK_IntegralRealToComplex:
2720   case CK_IntegralComplexToReal:
2721   case CK_IntegralComplexToBoolean:
2722   case CK_IntegralComplexCast:
2723   case CK_IntegralComplexToFloatingComplex:
2724   case CK_DerivedToBaseMemberPointer:
2725   case CK_BaseToDerivedMemberPointer:
2726   case CK_MemberPointerToBoolean:
2727   case CK_ReinterpretMemberPointer:
2728   case CK_AnyPointerToBlockPointerCast:
2729   case CK_ARCProduceObject:
2730   case CK_ARCConsumeObject:
2731   case CK_ARCReclaimReturnedObject:
2732   case CK_ARCExtendBlockObject:
2733   case CK_CopyAndAutoreleaseBlockObject: {
2734     // These casts only produce lvalues when we're binding a reference to a
2735     // temporary realized from a (converted) pure rvalue. Emit the expression
2736     // as a value, copy it into a temporary, and return an lvalue referring to
2737     // that temporary.
2738     llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
2739     EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
2740     return MakeAddrLValue(V, E->getType());
2741   }
2742 
2743   case CK_Dynamic: {
2744     LValue LV = EmitLValue(E->getSubExpr());
2745     llvm::Value *V = LV.getAddress();
2746     const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
2747     return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
2748   }
2749 
2750   case CK_ConstructorConversion:
2751   case CK_UserDefinedConversion:
2752   case CK_CPointerToObjCPointerCast:
2753   case CK_BlockPointerToObjCPointerCast:
2754     return EmitLValue(E->getSubExpr());
2755 
2756   case CK_UncheckedDerivedToBase:
2757   case CK_DerivedToBase: {
2758     const RecordType *DerivedClassTy =
2759       E->getSubExpr()->getType()->getAs<RecordType>();
2760     CXXRecordDecl *DerivedClassDecl =
2761       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2762 
2763     LValue LV = EmitLValue(E->getSubExpr());
2764     llvm::Value *This = LV.getAddress();
2765 
2766     // Perform the derived-to-base conversion
2767     llvm::Value *Base =
2768       GetAddressOfBaseClass(This, DerivedClassDecl,
2769                             E->path_begin(), E->path_end(),
2770                             /*NullCheckValue=*/false);
2771 
2772     return MakeAddrLValue(Base, E->getType());
2773   }
2774   case CK_ToUnion:
2775     return EmitAggExprToLValue(E);
2776   case CK_BaseToDerived: {
2777     const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
2778     CXXRecordDecl *DerivedClassDecl =
2779       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2780 
2781     LValue LV = EmitLValue(E->getSubExpr());
2782 
2783     // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
2784     // performed and the object is not of the derived type.
2785     if (SanitizePerformTypeCheck)
2786       EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(),
2787                     LV.getAddress(), E->getType());
2788 
2789     // Perform the base-to-derived conversion
2790     llvm::Value *Derived =
2791       GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
2792                                E->path_begin(), E->path_end(),
2793                                /*NullCheckValue=*/false);
2794 
2795     return MakeAddrLValue(Derived, E->getType());
2796   }
2797   case CK_LValueBitCast: {
2798     // This must be a reinterpret_cast (or c-style equivalent).
2799     const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
2800 
2801     LValue LV = EmitLValue(E->getSubExpr());
2802     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2803                                            ConvertType(CE->getTypeAsWritten()));
2804     return MakeAddrLValue(V, E->getType());
2805   }
2806   case CK_ObjCObjectLValueCast: {
2807     LValue LV = EmitLValue(E->getSubExpr());
2808     QualType ToType = getContext().getLValueReferenceType(E->getType());
2809     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2810                                            ConvertType(ToType));
2811     return MakeAddrLValue(V, E->getType());
2812   }
2813   case CK_ZeroToOCLEvent:
2814     llvm_unreachable("NULL to OpenCL event lvalue cast is not valid");
2815   }
2816 
2817   llvm_unreachable("Unhandled lvalue cast kind?");
2818 }
2819 
2820 LValue CodeGenFunction::EmitNullInitializationLValue(
2821                                               const CXXScalarValueInitExpr *E) {
2822   QualType Ty = E->getType();
2823   LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
2824   EmitNullInitialization(LV.getAddress(), Ty);
2825   return LV;
2826 }
2827 
2828 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
2829   assert(OpaqueValueMappingData::shouldBindAsLValue(e));
2830   return getOpaqueLValueMapping(e);
2831 }
2832 
2833 RValue CodeGenFunction::EmitRValueForField(LValue LV,
2834                                            const FieldDecl *FD) {
2835   QualType FT = FD->getType();
2836   LValue FieldLV = EmitLValueForField(LV, FD);
2837   switch (getEvaluationKind(FT)) {
2838   case TEK_Complex:
2839     return RValue::getComplex(EmitLoadOfComplex(FieldLV));
2840   case TEK_Aggregate:
2841     return FieldLV.asAggregateRValue();
2842   case TEK_Scalar:
2843     return EmitLoadOfLValue(FieldLV);
2844   }
2845   llvm_unreachable("bad evaluation kind");
2846 }
2847 
2848 //===--------------------------------------------------------------------===//
2849 //                             Expression Emission
2850 //===--------------------------------------------------------------------===//
2851 
2852 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
2853                                      ReturnValueSlot ReturnValue) {
2854   if (CGDebugInfo *DI = getDebugInfo()) {
2855     SourceLocation Loc = E->getLocStart();
2856     // Force column info to be generated so we can differentiate
2857     // multiple call sites on the same line in the debug info.
2858     const FunctionDecl* Callee = E->getDirectCallee();
2859     bool ForceColumnInfo = Callee && Callee->isInlineSpecified();
2860     DI->EmitLocation(Builder, Loc, ForceColumnInfo);
2861   }
2862 
2863   // Builtins never have block type.
2864   if (E->getCallee()->getType()->isBlockPointerType())
2865     return EmitBlockCallExpr(E, ReturnValue);
2866 
2867   if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
2868     return EmitCXXMemberCallExpr(CE, ReturnValue);
2869 
2870   if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
2871     return EmitCUDAKernelCallExpr(CE, ReturnValue);
2872 
2873   const Decl *TargetDecl = E->getCalleeDecl();
2874   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2875     if (unsigned builtinID = FD->getBuiltinID())
2876       return EmitBuiltinExpr(FD, builtinID, E);
2877   }
2878 
2879   if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
2880     if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
2881       return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
2882 
2883   if (const CXXPseudoDestructorExpr *PseudoDtor
2884           = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
2885     QualType DestroyedType = PseudoDtor->getDestroyedType();
2886     if (getLangOpts().ObjCAutoRefCount &&
2887         DestroyedType->isObjCLifetimeType() &&
2888         (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
2889          DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
2890       // Automatic Reference Counting:
2891       //   If the pseudo-expression names a retainable object with weak or
2892       //   strong lifetime, the object shall be released.
2893       Expr *BaseExpr = PseudoDtor->getBase();
2894       llvm::Value *BaseValue = NULL;
2895       Qualifiers BaseQuals;
2896 
2897       // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2898       if (PseudoDtor->isArrow()) {
2899         BaseValue = EmitScalarExpr(BaseExpr);
2900         const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
2901         BaseQuals = PTy->getPointeeType().getQualifiers();
2902       } else {
2903         LValue BaseLV = EmitLValue(BaseExpr);
2904         BaseValue = BaseLV.getAddress();
2905         QualType BaseTy = BaseExpr->getType();
2906         BaseQuals = BaseTy.getQualifiers();
2907       }
2908 
2909       switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
2910       case Qualifiers::OCL_None:
2911       case Qualifiers::OCL_ExplicitNone:
2912       case Qualifiers::OCL_Autoreleasing:
2913         break;
2914 
2915       case Qualifiers::OCL_Strong:
2916         EmitARCRelease(Builder.CreateLoad(BaseValue,
2917                           PseudoDtor->getDestroyedType().isVolatileQualified()),
2918                        ARCPreciseLifetime);
2919         break;
2920 
2921       case Qualifiers::OCL_Weak:
2922         EmitARCDestroyWeak(BaseValue);
2923         break;
2924       }
2925     } else {
2926       // C++ [expr.pseudo]p1:
2927       //   The result shall only be used as the operand for the function call
2928       //   operator (), and the result of such a call has type void. The only
2929       //   effect is the evaluation of the postfix-expression before the dot or
2930       //   arrow.
2931       EmitScalarExpr(E->getCallee());
2932     }
2933 
2934     return RValue::get(0);
2935   }
2936 
2937   llvm::Value *Callee = EmitScalarExpr(E->getCallee());
2938   return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
2939                   E->arg_begin(), E->arg_end(), TargetDecl);
2940 }
2941 
2942 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
2943   // Comma expressions just emit their LHS then their RHS as an l-value.
2944   if (E->getOpcode() == BO_Comma) {
2945     EmitIgnoredExpr(E->getLHS());
2946     EnsureInsertPoint();
2947     return EmitLValue(E->getRHS());
2948   }
2949 
2950   if (E->getOpcode() == BO_PtrMemD ||
2951       E->getOpcode() == BO_PtrMemI)
2952     return EmitPointerToDataMemberBinaryExpr(E);
2953 
2954   assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
2955 
2956   // Note that in all of these cases, __block variables need the RHS
2957   // evaluated first just in case the variable gets moved by the RHS.
2958 
2959   switch (getEvaluationKind(E->getType())) {
2960   case TEK_Scalar: {
2961     switch (E->getLHS()->getType().getObjCLifetime()) {
2962     case Qualifiers::OCL_Strong:
2963       return EmitARCStoreStrong(E, /*ignored*/ false).first;
2964 
2965     case Qualifiers::OCL_Autoreleasing:
2966       return EmitARCStoreAutoreleasing(E).first;
2967 
2968     // No reason to do any of these differently.
2969     case Qualifiers::OCL_None:
2970     case Qualifiers::OCL_ExplicitNone:
2971     case Qualifiers::OCL_Weak:
2972       break;
2973     }
2974 
2975     RValue RV = EmitAnyExpr(E->getRHS());
2976     LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
2977     EmitStoreThroughLValue(RV, LV);
2978     return LV;
2979   }
2980 
2981   case TEK_Complex:
2982     return EmitComplexAssignmentLValue(E);
2983 
2984   case TEK_Aggregate:
2985     return EmitAggExprToLValue(E);
2986   }
2987   llvm_unreachable("bad evaluation kind");
2988 }
2989 
2990 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
2991   RValue RV = EmitCallExpr(E);
2992 
2993   if (!RV.isScalar())
2994     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2995 
2996   assert(E->getCallReturnType()->isReferenceType() &&
2997          "Can't have a scalar return unless the return type is a "
2998          "reference type!");
2999 
3000   return MakeAddrLValue(RV.getScalarVal(), E->getType());
3001 }
3002 
3003 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
3004   // FIXME: This shouldn't require another copy.
3005   return EmitAggExprToLValue(E);
3006 }
3007 
3008 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
3009   assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
3010          && "binding l-value to type which needs a temporary");
3011   AggValueSlot Slot = CreateAggTemp(E->getType());
3012   EmitCXXConstructExpr(E, Slot);
3013   return MakeAddrLValue(Slot.getAddr(), E->getType());
3014 }
3015 
3016 LValue
3017 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
3018   return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
3019 }
3020 
3021 llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
3022   return CGM.GetAddrOfUuidDescriptor(E);
3023 }
3024 
3025 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
3026   return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType());
3027 }
3028 
3029 LValue
3030 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
3031   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
3032   Slot.setExternallyDestructed();
3033   EmitAggExpr(E->getSubExpr(), Slot);
3034   EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
3035   return MakeAddrLValue(Slot.getAddr(), E->getType());
3036 }
3037 
3038 LValue
3039 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
3040   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
3041   EmitLambdaExpr(E, Slot);
3042   return MakeAddrLValue(Slot.getAddr(), E->getType());
3043 }
3044 
3045 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
3046   RValue RV = EmitObjCMessageExpr(E);
3047 
3048   if (!RV.isScalar())
3049     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
3050 
3051   assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
3052          "Can't have a scalar return unless the return type is a "
3053          "reference type!");
3054 
3055   return MakeAddrLValue(RV.getScalarVal(), E->getType());
3056 }
3057 
3058 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
3059   llvm::Value *V =
3060     CGM.getObjCRuntime().GetSelector(*this, E->getSelector(), true);
3061   return MakeAddrLValue(V, E->getType());
3062 }
3063 
3064 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
3065                                              const ObjCIvarDecl *Ivar) {
3066   return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
3067 }
3068 
3069 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
3070                                           llvm::Value *BaseValue,
3071                                           const ObjCIvarDecl *Ivar,
3072                                           unsigned CVRQualifiers) {
3073   return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
3074                                                    Ivar, CVRQualifiers);
3075 }
3076 
3077 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
3078   // FIXME: A lot of the code below could be shared with EmitMemberExpr.
3079   llvm::Value *BaseValue = 0;
3080   const Expr *BaseExpr = E->getBase();
3081   Qualifiers BaseQuals;
3082   QualType ObjectTy;
3083   if (E->isArrow()) {
3084     BaseValue = EmitScalarExpr(BaseExpr);
3085     ObjectTy = BaseExpr->getType()->getPointeeType();
3086     BaseQuals = ObjectTy.getQualifiers();
3087   } else {
3088     LValue BaseLV = EmitLValue(BaseExpr);
3089     // FIXME: this isn't right for bitfields.
3090     BaseValue = BaseLV.getAddress();
3091     ObjectTy = BaseExpr->getType();
3092     BaseQuals = ObjectTy.getQualifiers();
3093   }
3094 
3095   LValue LV =
3096     EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
3097                       BaseQuals.getCVRQualifiers());
3098   setObjCGCLValueClass(getContext(), E, LV);
3099   return LV;
3100 }
3101 
3102 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
3103   // Can only get l-value for message expression returning aggregate type
3104   RValue RV = EmitAnyExprToTemp(E);
3105   return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
3106 }
3107 
3108 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
3109                                  ReturnValueSlot ReturnValue,
3110                                  CallExpr::const_arg_iterator ArgBeg,
3111                                  CallExpr::const_arg_iterator ArgEnd,
3112                                  const Decl *TargetDecl) {
3113   // Get the actual function type. The callee type will always be a pointer to
3114   // function type or a block pointer type.
3115   assert(CalleeType->isFunctionPointerType() &&
3116          "Call must have function pointer type!");
3117 
3118   CalleeType = getContext().getCanonicalType(CalleeType);
3119 
3120   const FunctionType *FnType
3121     = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
3122 
3123   CallArgList Args;
3124   EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
3125 
3126   const CGFunctionInfo &FnInfo =
3127     CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
3128 
3129   // C99 6.5.2.2p6:
3130   //   If the expression that denotes the called function has a type
3131   //   that does not include a prototype, [the default argument
3132   //   promotions are performed]. If the number of arguments does not
3133   //   equal the number of parameters, the behavior is undefined. If
3134   //   the function is defined with a type that includes a prototype,
3135   //   and either the prototype ends with an ellipsis (, ...) or the
3136   //   types of the arguments after promotion are not compatible with
3137   //   the types of the parameters, the behavior is undefined. If the
3138   //   function is defined with a type that does not include a
3139   //   prototype, and the types of the arguments after promotion are
3140   //   not compatible with those of the parameters after promotion,
3141   //   the behavior is undefined [except in some trivial cases].
3142   // That is, in the general case, we should assume that a call
3143   // through an unprototyped function type works like a *non-variadic*
3144   // call.  The way we make this work is to cast to the exact type
3145   // of the promoted arguments.
3146   if (isa<FunctionNoProtoType>(FnType)) {
3147     llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
3148     CalleeTy = CalleeTy->getPointerTo();
3149     Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
3150   }
3151 
3152   return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
3153 }
3154 
3155 LValue CodeGenFunction::
3156 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
3157   llvm::Value *BaseV;
3158   if (E->getOpcode() == BO_PtrMemI)
3159     BaseV = EmitScalarExpr(E->getLHS());
3160   else
3161     BaseV = EmitLValue(E->getLHS()).getAddress();
3162 
3163   llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
3164 
3165   const MemberPointerType *MPT
3166     = E->getRHS()->getType()->getAs<MemberPointerType>();
3167 
3168   llvm::Value *AddV =
3169     CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
3170 
3171   return MakeAddrLValue(AddV, MPT->getPointeeType());
3172 }
3173 
3174 /// Given the address of a temporary variable, produce an r-value of
3175 /// its type.
3176 RValue CodeGenFunction::convertTempToRValue(llvm::Value *addr,
3177                                             QualType type) {
3178   LValue lvalue = MakeNaturalAlignAddrLValue(addr, type);
3179   switch (getEvaluationKind(type)) {
3180   case TEK_Complex:
3181     return RValue::getComplex(EmitLoadOfComplex(lvalue));
3182   case TEK_Aggregate:
3183     return lvalue.asAggregateRValue();
3184   case TEK_Scalar:
3185     return RValue::get(EmitLoadOfScalar(lvalue));
3186   }
3187   llvm_unreachable("bad evaluation kind");
3188 }
3189 
3190 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
3191   assert(Val->getType()->isFPOrFPVectorTy());
3192   if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
3193     return;
3194 
3195   llvm::MDBuilder MDHelper(getLLVMContext());
3196   llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
3197 
3198   cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
3199 }
3200 
3201 namespace {
3202   struct LValueOrRValue {
3203     LValue LV;
3204     RValue RV;
3205   };
3206 }
3207 
3208 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
3209                                            const PseudoObjectExpr *E,
3210                                            bool forLValue,
3211                                            AggValueSlot slot) {
3212   SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
3213 
3214   // Find the result expression, if any.
3215   const Expr *resultExpr = E->getResultExpr();
3216   LValueOrRValue result;
3217 
3218   for (PseudoObjectExpr::const_semantics_iterator
3219          i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
3220     const Expr *semantic = *i;
3221 
3222     // If this semantic expression is an opaque value, bind it
3223     // to the result of its source expression.
3224     if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
3225 
3226       // If this is the result expression, we may need to evaluate
3227       // directly into the slot.
3228       typedef CodeGenFunction::OpaqueValueMappingData OVMA;
3229       OVMA opaqueData;
3230       if (ov == resultExpr && ov->isRValue() && !forLValue &&
3231           CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {
3232         CGF.EmitAggExpr(ov->getSourceExpr(), slot);
3233 
3234         LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
3235         opaqueData = OVMA::bind(CGF, ov, LV);
3236         result.RV = slot.asRValue();
3237 
3238       // Otherwise, emit as normal.
3239       } else {
3240         opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
3241 
3242         // If this is the result, also evaluate the result now.
3243         if (ov == resultExpr) {
3244           if (forLValue)
3245             result.LV = CGF.EmitLValue(ov);
3246           else
3247             result.RV = CGF.EmitAnyExpr(ov, slot);
3248         }
3249       }
3250 
3251       opaques.push_back(opaqueData);
3252 
3253     // Otherwise, if the expression is the result, evaluate it
3254     // and remember the result.
3255     } else if (semantic == resultExpr) {
3256       if (forLValue)
3257         result.LV = CGF.EmitLValue(semantic);
3258       else
3259         result.RV = CGF.EmitAnyExpr(semantic, slot);
3260 
3261     // Otherwise, evaluate the expression in an ignored context.
3262     } else {
3263       CGF.EmitIgnoredExpr(semantic);
3264     }
3265   }
3266 
3267   // Unbind all the opaques now.
3268   for (unsigned i = 0, e = opaques.size(); i != e; ++i)
3269     opaques[i].unbind(CGF);
3270 
3271   return result;
3272 }
3273 
3274 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
3275                                                AggValueSlot slot) {
3276   return emitPseudoObjectExpr(*this, E, false, slot).RV;
3277 }
3278 
3279 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
3280   return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
3281 }
3282