1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGCall.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGRecordLayout.h"
20 #include "CGObjCRuntime.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/ConvertUTF.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/LLVMContext.h"
28 #include "llvm/MDBuilder.h"
29 #include "llvm/DataLayout.h"
30 #include "llvm/ADT/Hashing.h"
31 using namespace clang;
32 using namespace CodeGen;
33 
34 //===--------------------------------------------------------------------===//
35 //                        Miscellaneous Helper Methods
36 //===--------------------------------------------------------------------===//
37 
38 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
39   unsigned addressSpace =
40     cast<llvm::PointerType>(value->getType())->getAddressSpace();
41 
42   llvm::PointerType *destType = Int8PtrTy;
43   if (addressSpace)
44     destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
45 
46   if (value->getType() == destType) return value;
47   return Builder.CreateBitCast(value, destType);
48 }
49 
50 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
51 /// block.
52 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
53                                                     const Twine &Name) {
54   if (!Builder.isNamePreserving())
55     return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
56   return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
57 }
58 
59 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
60                                      llvm::Value *Init) {
61   llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
62   llvm::BasicBlock *Block = AllocaInsertPt->getParent();
63   Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
64 }
65 
66 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
67                                                 const Twine &Name) {
68   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
69   // FIXME: Should we prefer the preferred type alignment here?
70   CharUnits Align = getContext().getTypeAlignInChars(Ty);
71   Alloc->setAlignment(Align.getQuantity());
72   return Alloc;
73 }
74 
75 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
76                                                  const Twine &Name) {
77   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
78   // FIXME: Should we prefer the preferred type alignment here?
79   CharUnits Align = getContext().getTypeAlignInChars(Ty);
80   Alloc->setAlignment(Align.getQuantity());
81   return Alloc;
82 }
83 
84 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
85 /// expression and compare the result against zero, returning an Int1Ty value.
86 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
87   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
88     llvm::Value *MemPtr = EmitScalarExpr(E);
89     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
90   }
91 
92   QualType BoolTy = getContext().BoolTy;
93   if (!E->getType()->isAnyComplexType())
94     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
95 
96   return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
97 }
98 
99 /// EmitIgnoredExpr - Emit code to compute the specified expression,
100 /// ignoring the result.
101 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
102   if (E->isRValue())
103     return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
104 
105   // Just emit it as an l-value and drop the result.
106   EmitLValue(E);
107 }
108 
109 /// EmitAnyExpr - Emit code to compute the specified expression which
110 /// can have any type.  The result is returned as an RValue struct.
111 /// If this is an aggregate expression, AggSlot indicates where the
112 /// result should be returned.
113 RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
114                                     AggValueSlot aggSlot,
115                                     bool ignoreResult) {
116   if (!hasAggregateLLVMType(E->getType()))
117     return RValue::get(EmitScalarExpr(E, ignoreResult));
118   else if (E->getType()->isAnyComplexType())
119     return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
120 
121   if (!ignoreResult && aggSlot.isIgnored())
122     aggSlot = CreateAggTemp(E->getType(), "agg-temp");
123   EmitAggExpr(E, aggSlot);
124   return aggSlot.asRValue();
125 }
126 
127 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
128 /// always be accessible even if no aggregate location is provided.
129 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
130   AggValueSlot AggSlot = AggValueSlot::ignored();
131 
132   if (hasAggregateLLVMType(E->getType()) &&
133       !E->getType()->isAnyComplexType())
134     AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
135   return EmitAnyExpr(E, AggSlot);
136 }
137 
138 /// EmitAnyExprToMem - Evaluate an expression into a given memory
139 /// location.
140 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
141                                        llvm::Value *Location,
142                                        Qualifiers Quals,
143                                        bool IsInit) {
144   // FIXME: This function should take an LValue as an argument.
145   if (E->getType()->isAnyComplexType()) {
146     EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
147   } else if (hasAggregateLLVMType(E->getType())) {
148     CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
149     EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
150                                          AggValueSlot::IsDestructed_t(IsInit),
151                                          AggValueSlot::DoesNotNeedGCBarriers,
152                                          AggValueSlot::IsAliased_t(!IsInit)));
153   } else {
154     RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
155     LValue LV = MakeAddrLValue(Location, E->getType());
156     EmitStoreThroughLValue(RV, LV);
157   }
158 }
159 
160 static llvm::Value *
161 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
162                          const NamedDecl *InitializedDecl) {
163   if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
164     if (VD->hasGlobalStorage()) {
165       SmallString<256> Name;
166       llvm::raw_svector_ostream Out(Name);
167       CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
168       Out.flush();
169 
170       llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
171 
172       // Create the reference temporary.
173       llvm::GlobalValue *RefTemp =
174         new llvm::GlobalVariable(CGF.CGM.getModule(),
175                                  RefTempTy, /*isConstant=*/false,
176                                  llvm::GlobalValue::InternalLinkage,
177                                  llvm::Constant::getNullValue(RefTempTy),
178                                  Name.str());
179       return RefTemp;
180     }
181   }
182 
183   return CGF.CreateMemTemp(Type, "ref.tmp");
184 }
185 
186 static llvm::Value *
187 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
188                             llvm::Value *&ReferenceTemporary,
189                             const CXXDestructorDecl *&ReferenceTemporaryDtor,
190                             QualType &ObjCARCReferenceLifetimeType,
191                             const NamedDecl *InitializedDecl) {
192   const MaterializeTemporaryExpr *M = NULL;
193   E = E->findMaterializedTemporary(M);
194   // Objective-C++ ARC:
195   //   If we are binding a reference to a temporary that has ownership, we
196   //   need to perform retain/release operations on the temporary.
197   if (M && CGF.getLangOpts().ObjCAutoRefCount &&
198       M->getType()->isObjCLifetimeType() &&
199       (M->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
200        M->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
201        M->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
202     ObjCARCReferenceLifetimeType = M->getType();
203 
204   if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
205     CGF.enterFullExpression(EWC);
206     CodeGenFunction::RunCleanupsScope Scope(CGF);
207 
208     return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
209                                        ReferenceTemporary,
210                                        ReferenceTemporaryDtor,
211                                        ObjCARCReferenceLifetimeType,
212                                        InitializedDecl);
213   }
214 
215   RValue RV;
216   if (E->isGLValue()) {
217     // Emit the expression as an lvalue.
218     LValue LV = CGF.EmitLValue(E);
219 
220     if (LV.isSimple())
221       return LV.getAddress();
222 
223     // We have to load the lvalue.
224     RV = CGF.EmitLoadOfLValue(LV);
225   } else {
226     if (!ObjCARCReferenceLifetimeType.isNull()) {
227       ReferenceTemporary = CreateReferenceTemporary(CGF,
228                                                   ObjCARCReferenceLifetimeType,
229                                                     InitializedDecl);
230 
231 
232       LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
233                                              ObjCARCReferenceLifetimeType);
234 
235       CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
236                          RefTempDst, false);
237 
238       bool ExtendsLifeOfTemporary = false;
239       if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
240         if (Var->extendsLifetimeOfTemporary())
241           ExtendsLifeOfTemporary = true;
242       } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
243         ExtendsLifeOfTemporary = true;
244       }
245 
246       if (!ExtendsLifeOfTemporary) {
247         // Since the lifetime of this temporary isn't going to be extended,
248         // we need to clean it up ourselves at the end of the full expression.
249         switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
250         case Qualifiers::OCL_None:
251         case Qualifiers::OCL_ExplicitNone:
252         case Qualifiers::OCL_Autoreleasing:
253           break;
254 
255         case Qualifiers::OCL_Strong: {
256           assert(!ObjCARCReferenceLifetimeType->isArrayType());
257           CleanupKind cleanupKind = CGF.getARCCleanupKind();
258           CGF.pushDestroy(cleanupKind,
259                           ReferenceTemporary,
260                           ObjCARCReferenceLifetimeType,
261                           CodeGenFunction::destroyARCStrongImprecise,
262                           cleanupKind & EHCleanup);
263           break;
264         }
265 
266         case Qualifiers::OCL_Weak:
267           assert(!ObjCARCReferenceLifetimeType->isArrayType());
268           CGF.pushDestroy(NormalAndEHCleanup,
269                           ReferenceTemporary,
270                           ObjCARCReferenceLifetimeType,
271                           CodeGenFunction::destroyARCWeak,
272                           /*useEHCleanupForArray*/ true);
273           break;
274         }
275 
276         ObjCARCReferenceLifetimeType = QualType();
277       }
278 
279       return ReferenceTemporary;
280     }
281 
282     SmallVector<SubobjectAdjustment, 2> Adjustments;
283     E = E->skipRValueSubobjectAdjustments(Adjustments);
284     if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
285       if (opaque->getType()->isRecordType())
286         return CGF.EmitOpaqueValueLValue(opaque).getAddress();
287 
288     // Create a reference temporary if necessary.
289     AggValueSlot AggSlot = AggValueSlot::ignored();
290     if (CGF.hasAggregateLLVMType(E->getType()) &&
291         !E->getType()->isAnyComplexType()) {
292       ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
293                                                     InitializedDecl);
294       CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
295       AggValueSlot::IsDestructed_t isDestructed
296         = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
297       AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
298                                       Qualifiers(), isDestructed,
299                                       AggValueSlot::DoesNotNeedGCBarriers,
300                                       AggValueSlot::IsNotAliased);
301     }
302 
303     if (InitializedDecl) {
304       // Get the destructor for the reference temporary.
305       if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
306         CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
307         if (!ClassDecl->hasTrivialDestructor())
308           ReferenceTemporaryDtor = ClassDecl->getDestructor();
309       }
310     }
311 
312     RV = CGF.EmitAnyExpr(E, AggSlot);
313 
314     // Check if need to perform derived-to-base casts and/or field accesses, to
315     // get from the temporary object we created (and, potentially, for which we
316     // extended the lifetime) to the subobject we're binding the reference to.
317     if (!Adjustments.empty()) {
318       llvm::Value *Object = RV.getAggregateAddr();
319       for (unsigned I = Adjustments.size(); I != 0; --I) {
320         SubobjectAdjustment &Adjustment = Adjustments[I-1];
321         switch (Adjustment.Kind) {
322         case SubobjectAdjustment::DerivedToBaseAdjustment:
323           Object =
324               CGF.GetAddressOfBaseClass(Object,
325                                         Adjustment.DerivedToBase.DerivedClass,
326                               Adjustment.DerivedToBase.BasePath->path_begin(),
327                               Adjustment.DerivedToBase.BasePath->path_end(),
328                                         /*NullCheckValue=*/false);
329           break;
330 
331         case SubobjectAdjustment::FieldAdjustment: {
332           LValue LV = CGF.MakeAddrLValue(Object, E->getType());
333           LV = CGF.EmitLValueForField(LV, Adjustment.Field);
334           if (LV.isSimple()) {
335             Object = LV.getAddress();
336             break;
337           }
338 
339           // For non-simple lvalues, we actually have to create a copy of
340           // the object we're binding to.
341           QualType T = Adjustment.Field->getType().getNonReferenceType()
342                                                   .getUnqualifiedType();
343           Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
344           LValue TempLV = CGF.MakeAddrLValue(Object,
345                                              Adjustment.Field->getType());
346           CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
347           break;
348         }
349 
350         case SubobjectAdjustment::MemberPointerAdjustment: {
351           llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS);
352           Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
353                         CGF, Object, Ptr, Adjustment.Ptr.MPT);
354           break;
355         }
356         }
357       }
358 
359       return Object;
360     }
361   }
362 
363   if (RV.isAggregate())
364     return RV.getAggregateAddr();
365 
366   // Create a temporary variable that we can bind the reference to.
367   ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
368                                                 InitializedDecl);
369 
370 
371   unsigned Alignment =
372     CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
373   if (RV.isScalar())
374     CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
375                           /*Volatile=*/false, Alignment, E->getType());
376   else
377     CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
378                            /*Volatile=*/false);
379   return ReferenceTemporary;
380 }
381 
382 RValue
383 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
384                                             const NamedDecl *InitializedDecl) {
385   llvm::Value *ReferenceTemporary = 0;
386   const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
387   QualType ObjCARCReferenceLifetimeType;
388   llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
389                                                    ReferenceTemporaryDtor,
390                                                    ObjCARCReferenceLifetimeType,
391                                                    InitializedDecl);
392   if (CatchUndefined && !E->getType()->isFunctionType()) {
393     // C++11 [dcl.ref]p5 (as amended by core issue 453):
394     //   If a glvalue to which a reference is directly bound designates neither
395     //   an existing object or function of an appropriate type nor a region of
396     //   storage of suitable size and alignment to contain an object of the
397     //   reference's type, the behavior is undefined.
398     QualType Ty = E->getType();
399     EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
400   }
401   if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
402     return RValue::get(Value);
403 
404   // Make sure to call the destructor for the reference temporary.
405   const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
406   if (VD && VD->hasGlobalStorage()) {
407     if (ReferenceTemporaryDtor) {
408       llvm::Constant *DtorFn =
409         CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
410       CGM.getCXXABI().registerGlobalDtor(*this, DtorFn,
411                                     cast<llvm::Constant>(ReferenceTemporary));
412     } else {
413       assert(!ObjCARCReferenceLifetimeType.isNull());
414       // Note: We intentionally do not register a global "destructor" to
415       // release the object.
416     }
417 
418     return RValue::get(Value);
419   }
420 
421   if (ReferenceTemporaryDtor)
422     PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
423   else {
424     switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
425     case Qualifiers::OCL_None:
426       llvm_unreachable(
427                       "Not a reference temporary that needs to be deallocated");
428     case Qualifiers::OCL_ExplicitNone:
429     case Qualifiers::OCL_Autoreleasing:
430       // Nothing to do.
431       break;
432 
433     case Qualifiers::OCL_Strong: {
434       bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
435       CleanupKind cleanupKind = getARCCleanupKind();
436       pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
437                   precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
438                   cleanupKind & EHCleanup);
439       break;
440     }
441 
442     case Qualifiers::OCL_Weak: {
443       // __weak objects always get EH cleanups; otherwise, exceptions
444       // could cause really nasty crashes instead of mere leaks.
445       pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
446                   ObjCARCReferenceLifetimeType, destroyARCWeak, true);
447       break;
448     }
449     }
450   }
451 
452   return RValue::get(Value);
453 }
454 
455 
456 /// getAccessedFieldNo - Given an encoded value and a result number, return the
457 /// input field number being accessed.
458 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
459                                              const llvm::Constant *Elts) {
460   return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
461       ->getZExtValue();
462 }
463 
464 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
465 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
466                                     llvm::Value *High) {
467   llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
468   llvm::Value *K47 = Builder.getInt64(47);
469   llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
470   llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
471   llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
472   llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
473   return Builder.CreateMul(B1, KMul);
474 }
475 
476 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
477                                     llvm::Value *Address,
478                                     QualType Ty, CharUnits Alignment) {
479   if (!CatchUndefined)
480     return;
481 
482   // Don't check pointers outside the default address space. The null check
483   // isn't correct, the object-size check isn't supported by LLVM, and we can't
484   // communicate the addresses to the runtime handler for the vptr check.
485   if (Address->getType()->getPointerAddressSpace())
486     return;
487 
488   llvm::Value *Cond = 0;
489 
490   // The glvalue must not be an empty glvalue.
491   Cond = Builder.CreateICmpNE(
492     Address, llvm::Constant::getNullValue(Address->getType()));
493 
494   uint64_t AlignVal = Alignment.getQuantity();
495 
496   if (!Ty->isIncompleteType()) {
497     uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
498     if (!AlignVal)
499       AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
500 
501     // The glvalue must refer to a large enough storage region.
502     // FIXME: If -faddress-sanitizer is enabled, insert dynamic instrumentation
503     //        to check this.
504     llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
505     llvm::Value *Min = Builder.getFalse();
506     llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy);
507     llvm::Value *LargeEnough =
508         Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min),
509                               llvm::ConstantInt::get(IntPtrTy, Size));
510     Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough;
511   }
512 
513   if (AlignVal) {
514     // The glvalue must be suitably aligned.
515     llvm::Value *Align =
516         Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy),
517                           llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
518     Cond = Builder.CreateAnd(Cond,
519         Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)));
520   }
521 
522   if (Cond) {
523     llvm::Constant *StaticData[] = {
524       EmitCheckSourceLocation(Loc),
525       EmitCheckTypeDescriptor(Ty),
526       llvm::ConstantInt::get(SizeTy, AlignVal),
527       llvm::ConstantInt::get(Int8Ty, TCK)
528     };
529     EmitCheck(Cond, "type_mismatch", StaticData, Address);
530   }
531 
532   CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
533   if (TCK != TCK_ConstructorCall &&
534       RD && RD->hasDefinition() && RD->isDynamicClass()) {
535     // Check that the vptr indicates that there is a subobject of type Ty at
536     // offset zero within this object.
537     // FIXME: Produce a diagnostic if the user tries to combine this check with
538     //        -fno-rtti.
539 
540     // Compute a hash of the mangled name of the type.
541     //
542     // FIXME: This is not guaranteed to be deterministic! Move to a
543     //        fingerprinting mechanism once LLVM provides one. For the time
544     //        being the implementation happens to be deterministic.
545     llvm::SmallString<64> MangledName;
546     llvm::raw_svector_ostream Out(MangledName);
547     CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
548                                                      Out);
549     llvm::hash_code TypeHash = hash_value(Out.str());
550 
551     // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
552     llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
553     llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
554     llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy);
555     llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
556     llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
557 
558     llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
559     Hash = Builder.CreateTrunc(Hash, IntPtrTy);
560 
561     // Look the hash up in our cache.
562     const int CacheSize = 128;
563     llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
564     llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
565                                                    "__ubsan_vptr_type_cache");
566     llvm::Value *Slot = Builder.CreateAnd(Hash,
567                                           llvm::ConstantInt::get(IntPtrTy,
568                                                                  CacheSize-1));
569     llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
570     llvm::Value *CacheVal =
571       Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices));
572 
573     // If the hash isn't in the cache, call a runtime handler to perform the
574     // hard work of checking whether the vptr is for an object of the right
575     // type. This will either fill in the cache and return, or produce a
576     // diagnostic.
577     llvm::Constant *StaticData[] = {
578       EmitCheckSourceLocation(Loc),
579       EmitCheckTypeDescriptor(Ty),
580       CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
581       llvm::ConstantInt::get(Int8Ty, TCK)
582     };
583     llvm::Value *DynamicData[] = { Address, Hash };
584     EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash),
585               "dynamic_type_cache_miss", StaticData, DynamicData, true);
586   }
587 }
588 
589 
590 CodeGenFunction::ComplexPairTy CodeGenFunction::
591 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
592                          bool isInc, bool isPre) {
593   ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
594                                             LV.isVolatileQualified());
595 
596   llvm::Value *NextVal;
597   if (isa<llvm::IntegerType>(InVal.first->getType())) {
598     uint64_t AmountVal = isInc ? 1 : -1;
599     NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
600 
601     // Add the inc/dec to the real part.
602     NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
603   } else {
604     QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
605     llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
606     if (!isInc)
607       FVal.changeSign();
608     NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
609 
610     // Add the inc/dec to the real part.
611     NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
612   }
613 
614   ComplexPairTy IncVal(NextVal, InVal.second);
615 
616   // Store the updated result through the lvalue.
617   StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
618 
619   // If this is a postinc, return the value read from memory, otherwise use the
620   // updated value.
621   return isPre ? IncVal : InVal;
622 }
623 
624 
625 //===----------------------------------------------------------------------===//
626 //                         LValue Expression Emission
627 //===----------------------------------------------------------------------===//
628 
629 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
630   if (Ty->isVoidType())
631     return RValue::get(0);
632 
633   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
634     llvm::Type *EltTy = ConvertType(CTy->getElementType());
635     llvm::Value *U = llvm::UndefValue::get(EltTy);
636     return RValue::getComplex(std::make_pair(U, U));
637   }
638 
639   // If this is a use of an undefined aggregate type, the aggregate must have an
640   // identifiable address.  Just because the contents of the value are undefined
641   // doesn't mean that the address can't be taken and compared.
642   if (hasAggregateLLVMType(Ty)) {
643     llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
644     return RValue::getAggregate(DestPtr);
645   }
646 
647   return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
648 }
649 
650 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
651                                               const char *Name) {
652   ErrorUnsupported(E, Name);
653   return GetUndefRValue(E->getType());
654 }
655 
656 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
657                                               const char *Name) {
658   ErrorUnsupported(E, Name);
659   llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
660   return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
661 }
662 
663 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
664   LValue LV = EmitLValue(E);
665   if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
666     EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(),
667                   E->getType(), LV.getAlignment());
668   return LV;
669 }
670 
671 /// EmitLValue - Emit code to compute a designator that specifies the location
672 /// of the expression.
673 ///
674 /// This can return one of two things: a simple address or a bitfield reference.
675 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
676 /// an LLVM pointer type.
677 ///
678 /// If this returns a bitfield reference, nothing about the pointee type of the
679 /// LLVM value is known: For example, it may not be a pointer to an integer.
680 ///
681 /// If this returns a normal address, and if the lvalue's C type is fixed size,
682 /// this method guarantees that the returned pointer type will point to an LLVM
683 /// type of the same size of the lvalue's type.  If the lvalue has a variable
684 /// length type, this is not possible.
685 ///
686 LValue CodeGenFunction::EmitLValue(const Expr *E) {
687   switch (E->getStmtClass()) {
688   default: return EmitUnsupportedLValue(E, "l-value expression");
689 
690   case Expr::ObjCPropertyRefExprClass:
691     llvm_unreachable("cannot emit a property reference directly");
692 
693   case Expr::ObjCSelectorExprClass:
694     return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
695   case Expr::ObjCIsaExprClass:
696     return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
697   case Expr::BinaryOperatorClass:
698     return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
699   case Expr::CompoundAssignOperatorClass:
700     if (!E->getType()->isAnyComplexType())
701       return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
702     return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
703   case Expr::CallExprClass:
704   case Expr::CXXMemberCallExprClass:
705   case Expr::CXXOperatorCallExprClass:
706   case Expr::UserDefinedLiteralClass:
707     return EmitCallExprLValue(cast<CallExpr>(E));
708   case Expr::VAArgExprClass:
709     return EmitVAArgExprLValue(cast<VAArgExpr>(E));
710   case Expr::DeclRefExprClass:
711     return EmitDeclRefLValue(cast<DeclRefExpr>(E));
712   case Expr::ParenExprClass:
713     return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
714   case Expr::GenericSelectionExprClass:
715     return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
716   case Expr::PredefinedExprClass:
717     return EmitPredefinedLValue(cast<PredefinedExpr>(E));
718   case Expr::StringLiteralClass:
719     return EmitStringLiteralLValue(cast<StringLiteral>(E));
720   case Expr::ObjCEncodeExprClass:
721     return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
722   case Expr::PseudoObjectExprClass:
723     return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
724   case Expr::InitListExprClass:
725     return EmitInitListLValue(cast<InitListExpr>(E));
726   case Expr::CXXTemporaryObjectExprClass:
727   case Expr::CXXConstructExprClass:
728     return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
729   case Expr::CXXBindTemporaryExprClass:
730     return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
731   case Expr::CXXUuidofExprClass:
732     return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
733   case Expr::LambdaExprClass:
734     return EmitLambdaLValue(cast<LambdaExpr>(E));
735 
736   case Expr::ExprWithCleanupsClass: {
737     const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
738     enterFullExpression(cleanups);
739     RunCleanupsScope Scope(*this);
740     return EmitLValue(cleanups->getSubExpr());
741   }
742 
743   case Expr::CXXScalarValueInitExprClass:
744     return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
745   case Expr::CXXDefaultArgExprClass:
746     return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
747   case Expr::CXXTypeidExprClass:
748     return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
749 
750   case Expr::ObjCMessageExprClass:
751     return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
752   case Expr::ObjCIvarRefExprClass:
753     return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
754   case Expr::StmtExprClass:
755     return EmitStmtExprLValue(cast<StmtExpr>(E));
756   case Expr::UnaryOperatorClass:
757     return EmitUnaryOpLValue(cast<UnaryOperator>(E));
758   case Expr::ArraySubscriptExprClass:
759     return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
760   case Expr::ExtVectorElementExprClass:
761     return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
762   case Expr::MemberExprClass:
763     return EmitMemberExpr(cast<MemberExpr>(E));
764   case Expr::CompoundLiteralExprClass:
765     return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
766   case Expr::ConditionalOperatorClass:
767     return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
768   case Expr::BinaryConditionalOperatorClass:
769     return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
770   case Expr::ChooseExprClass:
771     return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
772   case Expr::OpaqueValueExprClass:
773     return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
774   case Expr::SubstNonTypeTemplateParmExprClass:
775     return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
776   case Expr::ImplicitCastExprClass:
777   case Expr::CStyleCastExprClass:
778   case Expr::CXXFunctionalCastExprClass:
779   case Expr::CXXStaticCastExprClass:
780   case Expr::CXXDynamicCastExprClass:
781   case Expr::CXXReinterpretCastExprClass:
782   case Expr::CXXConstCastExprClass:
783   case Expr::ObjCBridgedCastExprClass:
784     return EmitCastLValue(cast<CastExpr>(E));
785 
786   case Expr::MaterializeTemporaryExprClass:
787     return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
788   }
789 }
790 
791 /// Given an object of the given canonical type, can we safely copy a
792 /// value out of it based on its initializer?
793 static bool isConstantEmittableObjectType(QualType type) {
794   assert(type.isCanonical());
795   assert(!type->isReferenceType());
796 
797   // Must be const-qualified but non-volatile.
798   Qualifiers qs = type.getLocalQualifiers();
799   if (!qs.hasConst() || qs.hasVolatile()) return false;
800 
801   // Otherwise, all object types satisfy this except C++ classes with
802   // mutable subobjects or non-trivial copy/destroy behavior.
803   if (const RecordType *RT = dyn_cast<RecordType>(type))
804     if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
805       if (RD->hasMutableFields() || !RD->isTrivial())
806         return false;
807 
808   return true;
809 }
810 
811 /// Can we constant-emit a load of a reference to a variable of the
812 /// given type?  This is different from predicates like
813 /// Decl::isUsableInConstantExpressions because we do want it to apply
814 /// in situations that don't necessarily satisfy the language's rules
815 /// for this (e.g. C++'s ODR-use rules).  For example, we want to able
816 /// to do this with const float variables even if those variables
817 /// aren't marked 'constexpr'.
818 enum ConstantEmissionKind {
819   CEK_None,
820   CEK_AsReferenceOnly,
821   CEK_AsValueOrReference,
822   CEK_AsValueOnly
823 };
824 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
825   type = type.getCanonicalType();
826   if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
827     if (isConstantEmittableObjectType(ref->getPointeeType()))
828       return CEK_AsValueOrReference;
829     return CEK_AsReferenceOnly;
830   }
831   if (isConstantEmittableObjectType(type))
832     return CEK_AsValueOnly;
833   return CEK_None;
834 }
835 
836 /// Try to emit a reference to the given value without producing it as
837 /// an l-value.  This is actually more than an optimization: we can't
838 /// produce an l-value for variables that we never actually captured
839 /// in a block or lambda, which means const int variables or constexpr
840 /// literals or similar.
841 CodeGenFunction::ConstantEmission
842 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
843   ValueDecl *value = refExpr->getDecl();
844 
845   // The value needs to be an enum constant or a constant variable.
846   ConstantEmissionKind CEK;
847   if (isa<ParmVarDecl>(value)) {
848     CEK = CEK_None;
849   } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
850     CEK = checkVarTypeForConstantEmission(var->getType());
851   } else if (isa<EnumConstantDecl>(value)) {
852     CEK = CEK_AsValueOnly;
853   } else {
854     CEK = CEK_None;
855   }
856   if (CEK == CEK_None) return ConstantEmission();
857 
858   Expr::EvalResult result;
859   bool resultIsReference;
860   QualType resultType;
861 
862   // It's best to evaluate all the way as an r-value if that's permitted.
863   if (CEK != CEK_AsReferenceOnly &&
864       refExpr->EvaluateAsRValue(result, getContext())) {
865     resultIsReference = false;
866     resultType = refExpr->getType();
867 
868   // Otherwise, try to evaluate as an l-value.
869   } else if (CEK != CEK_AsValueOnly &&
870              refExpr->EvaluateAsLValue(result, getContext())) {
871     resultIsReference = true;
872     resultType = value->getType();
873 
874   // Failure.
875   } else {
876     return ConstantEmission();
877   }
878 
879   // In any case, if the initializer has side-effects, abandon ship.
880   if (result.HasSideEffects)
881     return ConstantEmission();
882 
883   // Emit as a constant.
884   llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
885 
886   // Make sure we emit a debug reference to the global variable.
887   // This should probably fire even for
888   if (isa<VarDecl>(value)) {
889     if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
890       EmitDeclRefExprDbgValue(refExpr, C);
891   } else {
892     assert(isa<EnumConstantDecl>(value));
893     EmitDeclRefExprDbgValue(refExpr, C);
894   }
895 
896   // If we emitted a reference constant, we need to dereference that.
897   if (resultIsReference)
898     return ConstantEmission::forReference(C);
899 
900   return ConstantEmission::forValue(C);
901 }
902 
903 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
904   return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
905                           lvalue.getAlignment().getQuantity(),
906                           lvalue.getType(), lvalue.getTBAAInfo());
907 }
908 
909 static bool hasBooleanRepresentation(QualType Ty) {
910   if (Ty->isBooleanType())
911     return true;
912 
913   if (const EnumType *ET = Ty->getAs<EnumType>())
914     return ET->getDecl()->getIntegerType()->isBooleanType();
915 
916   if (const AtomicType *AT = Ty->getAs<AtomicType>())
917     return hasBooleanRepresentation(AT->getValueType());
918 
919   return false;
920 }
921 
922 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
923   const EnumType *ET = Ty->getAs<EnumType>();
924   bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
925                                  CGM.getCodeGenOpts().StrictEnums &&
926                                  !ET->getDecl()->isFixed());
927   bool IsBool = hasBooleanRepresentation(Ty);
928   if (!IsBool && !IsRegularCPlusPlusEnum)
929     return NULL;
930 
931   llvm::APInt Min;
932   llvm::APInt End;
933   if (IsBool) {
934     Min = llvm::APInt(8, 0);
935     End = llvm::APInt(8, 2);
936   } else {
937     const EnumDecl *ED = ET->getDecl();
938     llvm::Type *LTy = ConvertTypeForMem(ED->getIntegerType());
939     unsigned Bitwidth = LTy->getScalarSizeInBits();
940     unsigned NumNegativeBits = ED->getNumNegativeBits();
941     unsigned NumPositiveBits = ED->getNumPositiveBits();
942 
943     if (NumNegativeBits) {
944       unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
945       assert(NumBits <= Bitwidth);
946       End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
947       Min = -End;
948     } else {
949       assert(NumPositiveBits <= Bitwidth);
950       End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
951       Min = llvm::APInt(Bitwidth, 0);
952     }
953   }
954 
955   llvm::MDBuilder MDHelper(getLLVMContext());
956   return MDHelper.createRange(Min, End);
957 }
958 
959 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
960                                               unsigned Alignment, QualType Ty,
961                                               llvm::MDNode *TBAAInfo) {
962 
963   // For better performance, handle vector loads differently.
964   if (Ty->isVectorType()) {
965     llvm::Value *V;
966     const llvm::Type *EltTy =
967     cast<llvm::PointerType>(Addr->getType())->getElementType();
968 
969     const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy);
970 
971     // Handle vectors of size 3, like size 4 for better performance.
972     if (VTy->getNumElements() == 3) {
973 
974       // Bitcast to vec4 type.
975       llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
976                                                          4);
977       llvm::PointerType *ptVec4Ty =
978       llvm::PointerType::get(vec4Ty,
979                              (cast<llvm::PointerType>(
980                                       Addr->getType()))->getAddressSpace());
981       llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty,
982                                                 "castToVec4");
983       // Now load value.
984       llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4");
985 
986       // Shuffle vector to get vec3.
987       llvm::SmallVector<llvm::Constant*, 3> Mask;
988       Mask.push_back(llvm::ConstantInt::get(
989                                     llvm::Type::getInt32Ty(getLLVMContext()),
990                                             0));
991       Mask.push_back(llvm::ConstantInt::get(
992                                     llvm::Type::getInt32Ty(getLLVMContext()),
993                                             1));
994       Mask.push_back(llvm::ConstantInt::get(
995                                      llvm::Type::getInt32Ty(getLLVMContext()),
996                                             2));
997 
998       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
999       V = Builder.CreateShuffleVector(LoadVal,
1000                                       llvm::UndefValue::get(vec4Ty),
1001                                       MaskV, "extractVec");
1002       return EmitFromMemory(V, Ty);
1003     }
1004   }
1005 
1006   llvm::LoadInst *Load = Builder.CreateLoad(Addr);
1007   if (Volatile)
1008     Load->setVolatile(true);
1009   if (Alignment)
1010     Load->setAlignment(Alignment);
1011   if (TBAAInfo)
1012     CGM.DecorateInstruction(Load, TBAAInfo);
1013   // If this is an atomic type, all normal reads must be atomic
1014   if (Ty->isAtomicType())
1015     Load->setAtomic(llvm::SequentiallyConsistent);
1016 
1017   if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1018     if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
1019       Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1020 
1021   return EmitFromMemory(Load, Ty);
1022 }
1023 
1024 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
1025   // Bool has a different representation in memory than in registers.
1026   if (hasBooleanRepresentation(Ty)) {
1027     // This should really always be an i1, but sometimes it's already
1028     // an i8, and it's awkward to track those cases down.
1029     if (Value->getType()->isIntegerTy(1))
1030       return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
1031     assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
1032   }
1033 
1034   return Value;
1035 }
1036 
1037 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
1038   // Bool has a different representation in memory than in registers.
1039   if (hasBooleanRepresentation(Ty)) {
1040     assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
1041     return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1042   }
1043 
1044   return Value;
1045 }
1046 
1047 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
1048                                         bool Volatile, unsigned Alignment,
1049                                         QualType Ty,
1050                                         llvm::MDNode *TBAAInfo,
1051                                         bool isInit) {
1052 
1053   // Handle vectors differently to get better performance.
1054   if (Ty->isVectorType()) {
1055     llvm::Type *SrcTy = Value->getType();
1056     llvm::VectorType *VecTy = cast<llvm::VectorType>(SrcTy);
1057     // Handle vec3 special.
1058     if (VecTy->getNumElements() == 3) {
1059       llvm::LLVMContext &VMContext = getLLVMContext();
1060 
1061       // Our source is a vec3, do a shuffle vector to make it a vec4.
1062       llvm::SmallVector<llvm::Constant*, 4> Mask;
1063       Mask.push_back(llvm::ConstantInt::get(
1064                                             llvm::Type::getInt32Ty(VMContext),
1065                                             0));
1066       Mask.push_back(llvm::ConstantInt::get(
1067                                             llvm::Type::getInt32Ty(VMContext),
1068                                             1));
1069       Mask.push_back(llvm::ConstantInt::get(
1070                                             llvm::Type::getInt32Ty(VMContext),
1071                                             2));
1072       Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
1073 
1074       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1075       Value = Builder.CreateShuffleVector(Value,
1076                                           llvm::UndefValue::get(VecTy),
1077                                           MaskV, "extractVec");
1078       SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
1079     }
1080     llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
1081     if (DstPtr->getElementType() != SrcTy) {
1082       llvm::Type *MemTy =
1083       llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
1084       Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
1085     }
1086   }
1087 
1088   Value = EmitToMemory(Value, Ty);
1089 
1090   llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1091   if (Alignment)
1092     Store->setAlignment(Alignment);
1093   if (TBAAInfo)
1094     CGM.DecorateInstruction(Store, TBAAInfo);
1095   if (!isInit && Ty->isAtomicType())
1096     Store->setAtomic(llvm::SequentiallyConsistent);
1097 }
1098 
1099 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
1100     bool isInit) {
1101   EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
1102                     lvalue.getAlignment().getQuantity(), lvalue.getType(),
1103                     lvalue.getTBAAInfo(), isInit);
1104 }
1105 
1106 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1107 /// method emits the address of the lvalue, then loads the result as an rvalue,
1108 /// returning the rvalue.
1109 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
1110   if (LV.isObjCWeak()) {
1111     // load of a __weak object.
1112     llvm::Value *AddrWeakObj = LV.getAddress();
1113     return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
1114                                                              AddrWeakObj));
1115   }
1116   if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
1117     return RValue::get(EmitARCLoadWeak(LV.getAddress()));
1118 
1119   if (LV.isSimple()) {
1120     assert(!LV.getType()->isFunctionType());
1121 
1122     // Everything needs a load.
1123     return RValue::get(EmitLoadOfScalar(LV));
1124   }
1125 
1126   if (LV.isVectorElt()) {
1127     llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
1128                                               LV.isVolatileQualified());
1129     Load->setAlignment(LV.getAlignment().getQuantity());
1130     return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1131                                                     "vecext"));
1132   }
1133 
1134   // If this is a reference to a subset of the elements of a vector, either
1135   // shuffle the input or extract/insert them as appropriate.
1136   if (LV.isExtVectorElt())
1137     return EmitLoadOfExtVectorElementLValue(LV);
1138 
1139   assert(LV.isBitField() && "Unknown LValue type!");
1140   return EmitLoadOfBitfieldLValue(LV);
1141 }
1142 
1143 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
1144   const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1145 
1146   // Get the output type.
1147   llvm::Type *ResLTy = ConvertType(LV.getType());
1148   unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy);
1149 
1150   // Compute the result as an OR of all of the individual component accesses.
1151   llvm::Value *Res = 0;
1152   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1153     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1154     CharUnits AccessAlignment = AI.AccessAlignment;
1155     if (!LV.getAlignment().isZero())
1156       AccessAlignment = std::min(AccessAlignment, LV.getAlignment());
1157 
1158     // Get the field pointer.
1159     llvm::Value *Ptr = LV.getBitFieldBaseAddr();
1160 
1161     // Only offset by the field index if used, so that incoming values are not
1162     // required to be structures.
1163     if (AI.FieldIndex)
1164       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1165 
1166     // Offset by the byte offset, if used.
1167     if (!AI.FieldByteOffset.isZero()) {
1168       Ptr = EmitCastToVoidPtr(Ptr);
1169       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1170                                        "bf.field.offs");
1171     }
1172 
1173     // Cast to the access type.
1174     llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
1175                        CGM.getContext().getTargetAddressSpace(LV.getType()));
1176     Ptr = Builder.CreateBitCast(Ptr, PTy);
1177 
1178     // Perform the load.
1179     llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
1180     Load->setAlignment(AccessAlignment.getQuantity());
1181 
1182     // Shift out unused low bits and mask out unused high bits.
1183     llvm::Value *Val = Load;
1184     if (AI.FieldBitStart)
1185       Val = Builder.CreateLShr(Load, AI.FieldBitStart);
1186     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
1187                                                             AI.TargetBitWidth),
1188                             "bf.clear");
1189 
1190     // Extend or truncate to the target size.
1191     if (AI.AccessWidth < ResSizeInBits)
1192       Val = Builder.CreateZExt(Val, ResLTy);
1193     else if (AI.AccessWidth > ResSizeInBits)
1194       Val = Builder.CreateTrunc(Val, ResLTy);
1195 
1196     // Shift into place, and OR into the result.
1197     if (AI.TargetBitOffset)
1198       Val = Builder.CreateShl(Val, AI.TargetBitOffset);
1199     Res = Res ? Builder.CreateOr(Res, Val) : Val;
1200   }
1201 
1202   // If the bit-field is signed, perform the sign-extension.
1203   //
1204   // FIXME: This can easily be folded into the load of the high bits, which
1205   // could also eliminate the mask of high bits in some situations.
1206   if (Info.isSigned()) {
1207     unsigned ExtraBits = ResSizeInBits - Info.getSize();
1208     if (ExtraBits)
1209       Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
1210                                ExtraBits, "bf.val.sext");
1211   }
1212 
1213   return RValue::get(Res);
1214 }
1215 
1216 // If this is a reference to a subset of the elements of a vector, create an
1217 // appropriate shufflevector.
1218 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
1219   llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
1220                                             LV.isVolatileQualified());
1221   Load->setAlignment(LV.getAlignment().getQuantity());
1222   llvm::Value *Vec = Load;
1223 
1224   const llvm::Constant *Elts = LV.getExtVectorElts();
1225 
1226   // If the result of the expression is a non-vector type, we must be extracting
1227   // a single element.  Just codegen as an extractelement.
1228   const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1229   if (!ExprVT) {
1230     unsigned InIdx = getAccessedFieldNo(0, Elts);
1231     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1232     return RValue::get(Builder.CreateExtractElement(Vec, Elt));
1233   }
1234 
1235   // Always use shuffle vector to try to retain the original program structure
1236   unsigned NumResultElts = ExprVT->getNumElements();
1237 
1238   SmallVector<llvm::Constant*, 4> Mask;
1239   for (unsigned i = 0; i != NumResultElts; ++i)
1240     Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
1241 
1242   llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1243   Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
1244                                     MaskV);
1245   return RValue::get(Vec);
1246 }
1247 
1248 
1249 
1250 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
1251 /// lvalue, where both are guaranteed to the have the same type, and that type
1252 /// is 'Ty'.
1253 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
1254   if (!Dst.isSimple()) {
1255     if (Dst.isVectorElt()) {
1256       // Read/modify/write the vector, inserting the new element.
1257       llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
1258                                                 Dst.isVolatileQualified());
1259       Load->setAlignment(Dst.getAlignment().getQuantity());
1260       llvm::Value *Vec = Load;
1261       Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
1262                                         Dst.getVectorIdx(), "vecins");
1263       llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
1264                                                    Dst.isVolatileQualified());
1265       Store->setAlignment(Dst.getAlignment().getQuantity());
1266       return;
1267     }
1268 
1269     // If this is an update of extended vector elements, insert them as
1270     // appropriate.
1271     if (Dst.isExtVectorElt())
1272       return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
1273 
1274     assert(Dst.isBitField() && "Unknown LValue type");
1275     return EmitStoreThroughBitfieldLValue(Src, Dst);
1276   }
1277 
1278   // There's special magic for assigning into an ARC-qualified l-value.
1279   if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
1280     switch (Lifetime) {
1281     case Qualifiers::OCL_None:
1282       llvm_unreachable("present but none");
1283 
1284     case Qualifiers::OCL_ExplicitNone:
1285       // nothing special
1286       break;
1287 
1288     case Qualifiers::OCL_Strong:
1289       EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
1290       return;
1291 
1292     case Qualifiers::OCL_Weak:
1293       EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
1294       return;
1295 
1296     case Qualifiers::OCL_Autoreleasing:
1297       Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
1298                                                      Src.getScalarVal()));
1299       // fall into the normal path
1300       break;
1301     }
1302   }
1303 
1304   if (Dst.isObjCWeak() && !Dst.isNonGC()) {
1305     // load of a __weak object.
1306     llvm::Value *LvalueDst = Dst.getAddress();
1307     llvm::Value *src = Src.getScalarVal();
1308      CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
1309     return;
1310   }
1311 
1312   if (Dst.isObjCStrong() && !Dst.isNonGC()) {
1313     // load of a __strong object.
1314     llvm::Value *LvalueDst = Dst.getAddress();
1315     llvm::Value *src = Src.getScalarVal();
1316     if (Dst.isObjCIvar()) {
1317       assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
1318       llvm::Type *ResultType = ConvertType(getContext().LongTy);
1319       llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
1320       llvm::Value *dst = RHS;
1321       RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1322       llvm::Value *LHS =
1323         Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
1324       llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
1325       CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1326                                               BytesBetween);
1327     } else if (Dst.isGlobalObjCRef()) {
1328       CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1329                                                 Dst.isThreadLocalRef());
1330     }
1331     else
1332       CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
1333     return;
1334   }
1335 
1336   assert(Src.isScalar() && "Can't emit an agg store with this method");
1337   EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
1338 }
1339 
1340 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
1341                                                      llvm::Value **Result) {
1342   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
1343 
1344   // Get the output type.
1345   llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
1346   unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy);
1347 
1348   // Get the source value, truncated to the width of the bit-field.
1349   llvm::Value *SrcVal = Src.getScalarVal();
1350 
1351   if (hasBooleanRepresentation(Dst.getType()))
1352     SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
1353 
1354   SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
1355                                                                 Info.getSize()),
1356                              "bf.value");
1357 
1358   // Return the new value of the bit-field, if requested.
1359   if (Result) {
1360     // Cast back to the proper type for result.
1361     llvm::Type *SrcTy = Src.getScalarVal()->getType();
1362     llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
1363                                                    "bf.reload.val");
1364 
1365     // Sign extend if necessary.
1366     if (Info.isSigned()) {
1367       unsigned ExtraBits = ResSizeInBits - Info.getSize();
1368       if (ExtraBits)
1369         ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
1370                                        ExtraBits, "bf.reload.sext");
1371     }
1372 
1373     *Result = ReloadVal;
1374   }
1375 
1376   // Iterate over the components, writing each piece to memory.
1377   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1378     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1379     CharUnits AccessAlignment = AI.AccessAlignment;
1380     if (!Dst.getAlignment().isZero())
1381       AccessAlignment = std::min(AccessAlignment, Dst.getAlignment());
1382 
1383     // Get the field pointer.
1384     llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
1385     unsigned addressSpace =
1386       cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
1387 
1388     // Only offset by the field index if used, so that incoming values are not
1389     // required to be structures.
1390     if (AI.FieldIndex)
1391       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1392 
1393     // Offset by the byte offset, if used.
1394     if (!AI.FieldByteOffset.isZero()) {
1395       Ptr = EmitCastToVoidPtr(Ptr);
1396       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1397                                        "bf.field.offs");
1398     }
1399 
1400     // Cast to the access type.
1401     llvm::Type *AccessLTy =
1402       llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
1403 
1404     llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
1405     Ptr = Builder.CreateBitCast(Ptr, PTy);
1406 
1407     // Extract the piece of the bit-field value to write in this access, limited
1408     // to the values that are part of this access.
1409     llvm::Value *Val = SrcVal;
1410     if (AI.TargetBitOffset)
1411       Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
1412     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
1413                                                             AI.TargetBitWidth));
1414 
1415     // Extend or truncate to the access size.
1416     if (ResSizeInBits < AI.AccessWidth)
1417       Val = Builder.CreateZExt(Val, AccessLTy);
1418     else if (ResSizeInBits > AI.AccessWidth)
1419       Val = Builder.CreateTrunc(Val, AccessLTy);
1420 
1421     // Shift into the position in memory.
1422     if (AI.FieldBitStart)
1423       Val = Builder.CreateShl(Val, AI.FieldBitStart);
1424 
1425     // If necessary, load and OR in bits that are outside of the bit-field.
1426     if (AI.TargetBitWidth != AI.AccessWidth) {
1427       llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
1428       Load->setAlignment(AccessAlignment.getQuantity());
1429 
1430       // Compute the mask for zeroing the bits that are part of the bit-field.
1431       llvm::APInt InvMask =
1432         ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
1433                                  AI.FieldBitStart + AI.TargetBitWidth);
1434 
1435       // Apply the mask and OR in to the value to write.
1436       Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
1437     }
1438 
1439     // Write the value.
1440     llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
1441                                                  Dst.isVolatileQualified());
1442     Store->setAlignment(AccessAlignment.getQuantity());
1443   }
1444 }
1445 
1446 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
1447                                                                LValue Dst) {
1448   // This access turns into a read/modify/write of the vector.  Load the input
1449   // value now.
1450   llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
1451                                             Dst.isVolatileQualified());
1452   Load->setAlignment(Dst.getAlignment().getQuantity());
1453   llvm::Value *Vec = Load;
1454   const llvm::Constant *Elts = Dst.getExtVectorElts();
1455 
1456   llvm::Value *SrcVal = Src.getScalarVal();
1457 
1458   if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
1459     unsigned NumSrcElts = VTy->getNumElements();
1460     unsigned NumDstElts =
1461        cast<llvm::VectorType>(Vec->getType())->getNumElements();
1462     if (NumDstElts == NumSrcElts) {
1463       // Use shuffle vector is the src and destination are the same number of
1464       // elements and restore the vector mask since it is on the side it will be
1465       // stored.
1466       SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
1467       for (unsigned i = 0; i != NumSrcElts; ++i)
1468         Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
1469 
1470       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1471       Vec = Builder.CreateShuffleVector(SrcVal,
1472                                         llvm::UndefValue::get(Vec->getType()),
1473                                         MaskV);
1474     } else if (NumDstElts > NumSrcElts) {
1475       // Extended the source vector to the same length and then shuffle it
1476       // into the destination.
1477       // FIXME: since we're shuffling with undef, can we just use the indices
1478       //        into that?  This could be simpler.
1479       SmallVector<llvm::Constant*, 4> ExtMask;
1480       for (unsigned i = 0; i != NumSrcElts; ++i)
1481         ExtMask.push_back(Builder.getInt32(i));
1482       ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
1483       llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
1484       llvm::Value *ExtSrcVal =
1485         Builder.CreateShuffleVector(SrcVal,
1486                                     llvm::UndefValue::get(SrcVal->getType()),
1487                                     ExtMaskV);
1488       // build identity
1489       SmallVector<llvm::Constant*, 4> Mask;
1490       for (unsigned i = 0; i != NumDstElts; ++i)
1491         Mask.push_back(Builder.getInt32(i));
1492 
1493       // modify when what gets shuffled in
1494       for (unsigned i = 0; i != NumSrcElts; ++i)
1495         Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
1496       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1497       Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
1498     } else {
1499       // We should never shorten the vector
1500       llvm_unreachable("unexpected shorten vector length");
1501     }
1502   } else {
1503     // If the Src is a scalar (not a vector) it must be updating one element.
1504     unsigned InIdx = getAccessedFieldNo(0, Elts);
1505     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1506     Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
1507   }
1508 
1509   llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
1510                                                Dst.isVolatileQualified());
1511   Store->setAlignment(Dst.getAlignment().getQuantity());
1512 }
1513 
1514 // setObjCGCLValueClass - sets class of he lvalue for the purpose of
1515 // generating write-barries API. It is currently a global, ivar,
1516 // or neither.
1517 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1518                                  LValue &LV,
1519                                  bool IsMemberAccess=false) {
1520   if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
1521     return;
1522 
1523   if (isa<ObjCIvarRefExpr>(E)) {
1524     QualType ExpTy = E->getType();
1525     if (IsMemberAccess && ExpTy->isPointerType()) {
1526       // If ivar is a structure pointer, assigning to field of
1527       // this struct follows gcc's behavior and makes it a non-ivar
1528       // writer-barrier conservatively.
1529       ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1530       if (ExpTy->isRecordType()) {
1531         LV.setObjCIvar(false);
1532         return;
1533       }
1534     }
1535     LV.setObjCIvar(true);
1536     ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1537     LV.setBaseIvarExp(Exp->getBase());
1538     LV.setObjCArray(E->getType()->isArrayType());
1539     return;
1540   }
1541 
1542   if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1543     if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1544       if (VD->hasGlobalStorage()) {
1545         LV.setGlobalObjCRef(true);
1546         LV.setThreadLocalRef(VD->isThreadSpecified());
1547       }
1548     }
1549     LV.setObjCArray(E->getType()->isArrayType());
1550     return;
1551   }
1552 
1553   if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1554     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1555     return;
1556   }
1557 
1558   if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1559     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1560     if (LV.isObjCIvar()) {
1561       // If cast is to a structure pointer, follow gcc's behavior and make it
1562       // a non-ivar write-barrier.
1563       QualType ExpTy = E->getType();
1564       if (ExpTy->isPointerType())
1565         ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1566       if (ExpTy->isRecordType())
1567         LV.setObjCIvar(false);
1568     }
1569     return;
1570   }
1571 
1572   if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
1573     setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
1574     return;
1575   }
1576 
1577   if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1578     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1579     return;
1580   }
1581 
1582   if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1583     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1584     return;
1585   }
1586 
1587   if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
1588     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
1589     return;
1590   }
1591 
1592   if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1593     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1594     if (LV.isObjCIvar() && !LV.isObjCArray())
1595       // Using array syntax to assigning to what an ivar points to is not
1596       // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1597       LV.setObjCIvar(false);
1598     else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1599       // Using array syntax to assigning to what global points to is not
1600       // same as assigning to the global itself. {id *G;} G[i] = 0;
1601       LV.setGlobalObjCRef(false);
1602     return;
1603   }
1604 
1605   if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1606     setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
1607     // We don't know if member is an 'ivar', but this flag is looked at
1608     // only in the context of LV.isObjCIvar().
1609     LV.setObjCArray(E->getType()->isArrayType());
1610     return;
1611   }
1612 }
1613 
1614 static llvm::Value *
1615 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
1616                                 llvm::Value *V, llvm::Type *IRType,
1617                                 StringRef Name = StringRef()) {
1618   unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1619   return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
1620 }
1621 
1622 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1623                                       const Expr *E, const VarDecl *VD) {
1624   assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
1625          "Var decl must have external storage or be a file var decl!");
1626 
1627   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1628   llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
1629   V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
1630   CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
1631   QualType T = E->getType();
1632   LValue LV;
1633   if (VD->getType()->isReferenceType()) {
1634     llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
1635     LI->setAlignment(Alignment.getQuantity());
1636     V = LI;
1637     LV = CGF.MakeNaturalAlignAddrLValue(V, T);
1638   } else {
1639     LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1640   }
1641   setObjCGCLValueClass(CGF.getContext(), E, LV);
1642   return LV;
1643 }
1644 
1645 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1646                                      const Expr *E, const FunctionDecl *FD) {
1647   llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1648   if (!FD->hasPrototype()) {
1649     if (const FunctionProtoType *Proto =
1650             FD->getType()->getAs<FunctionProtoType>()) {
1651       // Ugly case: for a K&R-style definition, the type of the definition
1652       // isn't the same as the type of a use.  Correct for this with a
1653       // bitcast.
1654       QualType NoProtoType =
1655           CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1656       NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1657       V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
1658     }
1659   }
1660   CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
1661   return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1662 }
1663 
1664 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1665   const NamedDecl *ND = E->getDecl();
1666   CharUnits Alignment = getContext().getDeclAlign(ND);
1667   QualType T = E->getType();
1668 
1669   // A DeclRefExpr for a reference initialized by a constant expression can
1670   // appear without being odr-used. Directly emit the constant initializer.
1671   if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1672     const Expr *Init = VD->getAnyInitializer(VD);
1673     if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
1674         VD->isUsableInConstantExpressions(getContext()) &&
1675         VD->checkInitIsICE()) {
1676       llvm::Constant *Val =
1677         CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this);
1678       assert(Val && "failed to emit reference constant expression");
1679       // FIXME: Eventually we will want to emit vector element references.
1680       return MakeAddrLValue(Val, T, Alignment);
1681     }
1682   }
1683 
1684   // FIXME: We should be able to assert this for FunctionDecls as well!
1685   // FIXME: We should be able to assert this for all DeclRefExprs, not just
1686   // those with a valid source location.
1687   assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
1688           !E->getLocation().isValid()) &&
1689          "Should not use decl without marking it used!");
1690 
1691   if (ND->hasAttr<WeakRefAttr>()) {
1692     const ValueDecl *VD = cast<ValueDecl>(ND);
1693     llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1694     return MakeAddrLValue(Aliasee, T, Alignment);
1695   }
1696 
1697   if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1698     // Check if this is a global variable.
1699     if (VD->hasExternalStorage() || VD->isFileVarDecl())
1700       return EmitGlobalVarDeclLValue(*this, E, VD);
1701 
1702     bool isBlockVariable = VD->hasAttr<BlocksAttr>();
1703 
1704     bool NonGCable = VD->hasLocalStorage() &&
1705                      !VD->getType()->isReferenceType() &&
1706                      !isBlockVariable;
1707 
1708     llvm::Value *V = LocalDeclMap[VD];
1709     if (!V && VD->isStaticLocal())
1710       V = CGM.getStaticLocalDeclAddress(VD);
1711 
1712     // Use special handling for lambdas.
1713     if (!V) {
1714       if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
1715         QualType LambdaTagType = getContext().getTagDeclType(FD->getParent());
1716         LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
1717                                                      LambdaTagType);
1718         return EmitLValueForField(LambdaLV, FD);
1719       }
1720 
1721       assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
1722       return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
1723                             T, Alignment);
1724     }
1725 
1726     assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1727 
1728     if (isBlockVariable)
1729       V = BuildBlockByrefAddress(V, VD);
1730 
1731     LValue LV;
1732     if (VD->getType()->isReferenceType()) {
1733       llvm::LoadInst *LI = Builder.CreateLoad(V);
1734       LI->setAlignment(Alignment.getQuantity());
1735       V = LI;
1736       LV = MakeNaturalAlignAddrLValue(V, T);
1737     } else {
1738       LV = MakeAddrLValue(V, T, Alignment);
1739     }
1740 
1741     if (NonGCable) {
1742       LV.getQuals().removeObjCGCAttr();
1743       LV.setNonGC(true);
1744     }
1745     setObjCGCLValueClass(getContext(), E, LV);
1746     return LV;
1747   }
1748 
1749   if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
1750     return EmitFunctionDeclLValue(*this, E, fn);
1751 
1752   llvm_unreachable("Unhandled DeclRefExpr");
1753 }
1754 
1755 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1756   // __extension__ doesn't affect lvalue-ness.
1757   if (E->getOpcode() == UO_Extension)
1758     return EmitLValue(E->getSubExpr());
1759 
1760   QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1761   switch (E->getOpcode()) {
1762   default: llvm_unreachable("Unknown unary operator lvalue!");
1763   case UO_Deref: {
1764     QualType T = E->getSubExpr()->getType()->getPointeeType();
1765     assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1766 
1767     LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1768     LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1769 
1770     // We should not generate __weak write barrier on indirect reference
1771     // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1772     // But, we continue to generate __strong write barrier on indirect write
1773     // into a pointer to object.
1774     if (getLangOpts().ObjC1 &&
1775         getLangOpts().getGC() != LangOptions::NonGC &&
1776         LV.isObjCWeak())
1777       LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1778     return LV;
1779   }
1780   case UO_Real:
1781   case UO_Imag: {
1782     LValue LV = EmitLValue(E->getSubExpr());
1783     assert(LV.isSimple() && "real/imag on non-ordinary l-value");
1784     llvm::Value *Addr = LV.getAddress();
1785 
1786     // __real is valid on scalars.  This is a faster way of testing that.
1787     // __imag can only produce an rvalue on scalars.
1788     if (E->getOpcode() == UO_Real &&
1789         !cast<llvm::PointerType>(Addr->getType())
1790            ->getElementType()->isStructTy()) {
1791       assert(E->getSubExpr()->getType()->isArithmeticType());
1792       return LV;
1793     }
1794 
1795     assert(E->getSubExpr()->getType()->isAnyComplexType());
1796 
1797     unsigned Idx = E->getOpcode() == UO_Imag;
1798     return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1799                                                   Idx, "idx"),
1800                           ExprTy);
1801   }
1802   case UO_PreInc:
1803   case UO_PreDec: {
1804     LValue LV = EmitLValue(E->getSubExpr());
1805     bool isInc = E->getOpcode() == UO_PreInc;
1806 
1807     if (E->getType()->isAnyComplexType())
1808       EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1809     else
1810       EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1811     return LV;
1812   }
1813   }
1814 }
1815 
1816 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1817   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1818                         E->getType());
1819 }
1820 
1821 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1822   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1823                         E->getType());
1824 }
1825 
1826 static llvm::Constant*
1827 GetAddrOfConstantWideString(StringRef Str,
1828                             const char *GlobalName,
1829                             ASTContext &Context,
1830                             QualType Ty, SourceLocation Loc,
1831                             CodeGenModule &CGM) {
1832 
1833   StringLiteral *SL = StringLiteral::Create(Context,
1834                                             Str,
1835                                             StringLiteral::Wide,
1836                                             /*Pascal = */false,
1837                                             Ty, Loc);
1838   llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL);
1839   llvm::GlobalVariable *GV =
1840     new llvm::GlobalVariable(CGM.getModule(), C->getType(),
1841                              !CGM.getLangOpts().WritableStrings,
1842                              llvm::GlobalValue::PrivateLinkage,
1843                              C, GlobalName);
1844   const unsigned WideAlignment =
1845     Context.getTypeAlignInChars(Ty).getQuantity();
1846   GV->setAlignment(WideAlignment);
1847   return GV;
1848 }
1849 
1850 static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
1851                                     SmallString<32>& Target) {
1852   Target.resize(CharByteWidth * (Source.size() + 1));
1853   char *ResultPtr = &Target[0];
1854   const UTF8 *ErrorPtr;
1855   bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
1856   (void)success;
1857   assert(success);
1858   Target.resize(ResultPtr - &Target[0]);
1859 }
1860 
1861 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1862   switch (E->getIdentType()) {
1863   default:
1864     return EmitUnsupportedLValue(E, "predefined expression");
1865 
1866   case PredefinedExpr::Func:
1867   case PredefinedExpr::Function:
1868   case PredefinedExpr::LFunction:
1869   case PredefinedExpr::PrettyFunction: {
1870     unsigned IdentType = E->getIdentType();
1871     std::string GlobalVarName;
1872 
1873     switch (IdentType) {
1874     default: llvm_unreachable("Invalid type");
1875     case PredefinedExpr::Func:
1876       GlobalVarName = "__func__.";
1877       break;
1878     case PredefinedExpr::Function:
1879       GlobalVarName = "__FUNCTION__.";
1880       break;
1881     case PredefinedExpr::LFunction:
1882       GlobalVarName = "L__FUNCTION__.";
1883       break;
1884     case PredefinedExpr::PrettyFunction:
1885       GlobalVarName = "__PRETTY_FUNCTION__.";
1886       break;
1887     }
1888 
1889     StringRef FnName = CurFn->getName();
1890     if (FnName.startswith("\01"))
1891       FnName = FnName.substr(1);
1892     GlobalVarName += FnName;
1893 
1894     const Decl *CurDecl = CurCodeDecl;
1895     if (CurDecl == 0)
1896       CurDecl = getContext().getTranslationUnitDecl();
1897 
1898     std::string FunctionName =
1899         (isa<BlockDecl>(CurDecl)
1900          ? FnName.str()
1901          : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType,
1902                                        CurDecl));
1903 
1904     const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual();
1905     llvm::Constant *C;
1906     if (ElemType->isWideCharType()) {
1907       SmallString<32> RawChars;
1908       ConvertUTF8ToWideString(
1909           getContext().getTypeSizeInChars(ElemType).getQuantity(),
1910           FunctionName, RawChars);
1911       C = GetAddrOfConstantWideString(RawChars,
1912                                       GlobalVarName.c_str(),
1913                                       getContext(),
1914                                       E->getType(),
1915                                       E->getLocation(),
1916                                       CGM);
1917     } else {
1918       C = CGM.GetAddrOfConstantCString(FunctionName,
1919                                        GlobalVarName.c_str(),
1920                                        1);
1921     }
1922     return MakeAddrLValue(C, E->getType());
1923   }
1924   }
1925 }
1926 
1927 /// Emit a type description suitable for use by a runtime sanitizer library. The
1928 /// format of a type descriptor is
1929 ///
1930 /// \code
1931 ///   { i16 TypeKind, i16 TypeInfo }
1932 /// \endcode
1933 ///
1934 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
1935 /// integer, 1 for a floating point value, and -1 for anything else.
1936 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
1937   // FIXME: Only emit each type's descriptor once.
1938   uint16_t TypeKind = -1;
1939   uint16_t TypeInfo = 0;
1940 
1941   if (T->isIntegerType()) {
1942     TypeKind = 0;
1943     TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
1944                T->isSignedIntegerType();
1945   } else if (T->isFloatingType()) {
1946     TypeKind = 1;
1947     TypeInfo = getContext().getTypeSize(T);
1948   }
1949 
1950   // Format the type name as if for a diagnostic, including quotes and
1951   // optionally an 'aka'.
1952   llvm::SmallString<32> Buffer;
1953   CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
1954                                     (intptr_t)T.getAsOpaquePtr(),
1955                                     0, 0, 0, 0, 0, 0, Buffer,
1956                                     ArrayRef<intptr_t>());
1957 
1958   llvm::Constant *Components[] = {
1959     Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
1960     llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
1961   };
1962   llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
1963 
1964   llvm::GlobalVariable *GV =
1965     new llvm::GlobalVariable(CGM.getModule(), Descriptor->getType(),
1966                              /*isConstant=*/true,
1967                              llvm::GlobalVariable::PrivateLinkage,
1968                              Descriptor);
1969   GV->setUnnamedAddr(true);
1970   return GV;
1971 }
1972 
1973 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
1974   llvm::Type *TargetTy = IntPtrTy;
1975 
1976   // Integers which fit in intptr_t are zero-extended and passed directly.
1977   if (V->getType()->isIntegerTy() &&
1978       V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
1979     return Builder.CreateZExt(V, TargetTy);
1980 
1981   // Pointers are passed directly, everything else is passed by address.
1982   if (!V->getType()->isPointerTy()) {
1983     llvm::Value *Ptr = Builder.CreateAlloca(V->getType());
1984     Builder.CreateStore(V, Ptr);
1985     V = Ptr;
1986   }
1987   return Builder.CreatePtrToInt(V, TargetTy);
1988 }
1989 
1990 /// \brief Emit a representation of a SourceLocation for passing to a handler
1991 /// in a sanitizer runtime library. The format for this data is:
1992 /// \code
1993 ///   struct SourceLocation {
1994 ///     const char *Filename;
1995 ///     int32_t Line, Column;
1996 ///   };
1997 /// \endcode
1998 /// For an invalid SourceLocation, the Filename pointer is null.
1999 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
2000   PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
2001 
2002   llvm::Constant *Data[] = {
2003     // FIXME: Only emit each file name once.
2004     PLoc.isValid() ? cast<llvm::Constant>(
2005                        Builder.CreateGlobalStringPtr(PLoc.getFilename()))
2006                    : llvm::Constant::getNullValue(Int8PtrTy),
2007     Builder.getInt32(PLoc.getLine()),
2008     Builder.getInt32(PLoc.getColumn())
2009   };
2010 
2011   return llvm::ConstantStruct::getAnon(Data);
2012 }
2013 
2014 void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName,
2015                                 llvm::ArrayRef<llvm::Constant *> StaticArgs,
2016                                 llvm::ArrayRef<llvm::Value *> DynamicArgs,
2017                                 bool Recoverable) {
2018   llvm::BasicBlock *Cont = createBasicBlock("cont");
2019 
2020   llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName);
2021   Builder.CreateCondBr(Checked, Cont, Handler);
2022   EmitBlock(Handler);
2023 
2024   llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
2025   llvm::GlobalValue *InfoPtr =
2026       new llvm::GlobalVariable(CGM.getModule(), Info->getType(), true,
2027                                llvm::GlobalVariable::PrivateLinkage, Info);
2028   InfoPtr->setUnnamedAddr(true);
2029 
2030   llvm::SmallVector<llvm::Value *, 4> Args;
2031   llvm::SmallVector<llvm::Type *, 4> ArgTypes;
2032   Args.reserve(DynamicArgs.size() + 1);
2033   ArgTypes.reserve(DynamicArgs.size() + 1);
2034 
2035   // Handler functions take an i8* pointing to the (handler-specific) static
2036   // information block, followed by a sequence of intptr_t arguments
2037   // representing operand values.
2038   Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
2039   ArgTypes.push_back(Int8PtrTy);
2040   for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
2041     Args.push_back(EmitCheckValue(DynamicArgs[i]));
2042     ArgTypes.push_back(IntPtrTy);
2043   }
2044 
2045   llvm::FunctionType *FnType =
2046     llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
2047   llvm::AttrBuilder B;
2048   if (!Recoverable) {
2049     B.addAttribute(llvm::Attributes::NoReturn)
2050      .addAttribute(llvm::Attributes::NoUnwind);
2051   }
2052   B.addAttribute(llvm::Attributes::UWTable);
2053   llvm::Value *Fn = CGM.CreateRuntimeFunction(FnType,
2054                                           ("__ubsan_handle_" + CheckName).str(),
2055                                          llvm::Attributes::get(getLLVMContext(),
2056                                                                B));
2057   llvm::CallInst *HandlerCall = Builder.CreateCall(Fn, Args);
2058   if (Recoverable) {
2059     Builder.CreateBr(Cont);
2060   } else {
2061     HandlerCall->setDoesNotReturn();
2062     HandlerCall->setDoesNotThrow();
2063     Builder.CreateUnreachable();
2064   }
2065 
2066   EmitBlock(Cont);
2067 }
2068 
2069 void CodeGenFunction::EmitTrapvCheck(llvm::Value *Checked) {
2070   llvm::BasicBlock *Cont = createBasicBlock("cont");
2071 
2072   // If we're optimizing, collapse all calls to trap down to just one per
2073   // function to save on code size.
2074   if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
2075     TrapBB = createBasicBlock("trap");
2076     Builder.CreateCondBr(Checked, Cont, TrapBB);
2077     EmitBlock(TrapBB);
2078     llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
2079     llvm::CallInst *TrapCall = Builder.CreateCall(F);
2080     TrapCall->setDoesNotReturn();
2081     TrapCall->setDoesNotThrow();
2082     Builder.CreateUnreachable();
2083   } else {
2084     Builder.CreateCondBr(Checked, Cont, TrapBB);
2085   }
2086 
2087   EmitBlock(Cont);
2088 }
2089 
2090 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
2091 /// array to pointer, return the array subexpression.
2092 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
2093   // If this isn't just an array->pointer decay, bail out.
2094   const CastExpr *CE = dyn_cast<CastExpr>(E);
2095   if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
2096     return 0;
2097 
2098   // If this is a decay from variable width array, bail out.
2099   const Expr *SubExpr = CE->getSubExpr();
2100   if (SubExpr->getType()->isVariableArrayType())
2101     return 0;
2102 
2103   return SubExpr;
2104 }
2105 
2106 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
2107   // The index must always be an integer, which is not an aggregate.  Emit it.
2108   llvm::Value *Idx = EmitScalarExpr(E->getIdx());
2109   QualType IdxTy  = E->getIdx()->getType();
2110   bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
2111 
2112   // If the base is a vector type, then we are forming a vector element lvalue
2113   // with this subscript.
2114   if (E->getBase()->getType()->isVectorType()) {
2115     // Emit the vector as an lvalue to get its address.
2116     LValue LHS = EmitLValue(E->getBase());
2117     assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
2118     Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
2119     return LValue::MakeVectorElt(LHS.getAddress(), Idx,
2120                                  E->getBase()->getType(), LHS.getAlignment());
2121   }
2122 
2123   // Extend or truncate the index type to 32 or 64-bits.
2124   if (Idx->getType() != IntPtrTy)
2125     Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
2126 
2127   // We know that the pointer points to a type of the correct size, unless the
2128   // size is a VLA or Objective-C interface.
2129   llvm::Value *Address = 0;
2130   CharUnits ArrayAlignment;
2131   if (const VariableArrayType *vla =
2132         getContext().getAsVariableArrayType(E->getType())) {
2133     // The base must be a pointer, which is not an aggregate.  Emit
2134     // it.  It needs to be emitted first in case it's what captures
2135     // the VLA bounds.
2136     Address = EmitScalarExpr(E->getBase());
2137 
2138     // The element count here is the total number of non-VLA elements.
2139     llvm::Value *numElements = getVLASize(vla).first;
2140 
2141     // Effectively, the multiply by the VLA size is part of the GEP.
2142     // GEP indexes are signed, and scaling an index isn't permitted to
2143     // signed-overflow, so we use the same semantics for our explicit
2144     // multiply.  We suppress this if overflow is not undefined behavior.
2145     if (getLangOpts().isSignedOverflowDefined()) {
2146       Idx = Builder.CreateMul(Idx, numElements);
2147       Address = Builder.CreateGEP(Address, Idx, "arrayidx");
2148     } else {
2149       Idx = Builder.CreateNSWMul(Idx, numElements);
2150       Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
2151     }
2152   } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
2153     // Indexing over an interface, as in "NSString *P; P[4];"
2154     llvm::Value *InterfaceSize =
2155       llvm::ConstantInt::get(Idx->getType(),
2156           getContext().getTypeSizeInChars(OIT).getQuantity());
2157 
2158     Idx = Builder.CreateMul(Idx, InterfaceSize);
2159 
2160     // The base must be a pointer, which is not an aggregate.  Emit it.
2161     llvm::Value *Base = EmitScalarExpr(E->getBase());
2162     Address = EmitCastToVoidPtr(Base);
2163     Address = Builder.CreateGEP(Address, Idx, "arrayidx");
2164     Address = Builder.CreateBitCast(Address, Base->getType());
2165   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
2166     // If this is A[i] where A is an array, the frontend will have decayed the
2167     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
2168     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
2169     // "gep x, i" here.  Emit one "gep A, 0, i".
2170     assert(Array->getType()->isArrayType() &&
2171            "Array to pointer decay must have array source type!");
2172     LValue ArrayLV = EmitLValue(Array);
2173     llvm::Value *ArrayPtr = ArrayLV.getAddress();
2174     llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
2175     llvm::Value *Args[] = { Zero, Idx };
2176 
2177     // Propagate the alignment from the array itself to the result.
2178     ArrayAlignment = ArrayLV.getAlignment();
2179 
2180     if (getLangOpts().isSignedOverflowDefined())
2181       Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
2182     else
2183       Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
2184   } else {
2185     // The base must be a pointer, which is not an aggregate.  Emit it.
2186     llvm::Value *Base = EmitScalarExpr(E->getBase());
2187     if (getLangOpts().isSignedOverflowDefined())
2188       Address = Builder.CreateGEP(Base, Idx, "arrayidx");
2189     else
2190       Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
2191   }
2192 
2193   QualType T = E->getBase()->getType()->getPointeeType();
2194   assert(!T.isNull() &&
2195          "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
2196 
2197 
2198   // Limit the alignment to that of the result type.
2199   LValue LV;
2200   if (!ArrayAlignment.isZero()) {
2201     CharUnits Align = getContext().getTypeAlignInChars(T);
2202     ArrayAlignment = std::min(Align, ArrayAlignment);
2203     LV = MakeAddrLValue(Address, T, ArrayAlignment);
2204   } else {
2205     LV = MakeNaturalAlignAddrLValue(Address, T);
2206   }
2207 
2208   LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
2209 
2210   if (getLangOpts().ObjC1 &&
2211       getLangOpts().getGC() != LangOptions::NonGC) {
2212     LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
2213     setObjCGCLValueClass(getContext(), E, LV);
2214   }
2215   return LV;
2216 }
2217 
2218 static
2219 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
2220                                        SmallVector<unsigned, 4> &Elts) {
2221   SmallVector<llvm::Constant*, 4> CElts;
2222   for (unsigned i = 0, e = Elts.size(); i != e; ++i)
2223     CElts.push_back(Builder.getInt32(Elts[i]));
2224 
2225   return llvm::ConstantVector::get(CElts);
2226 }
2227 
2228 LValue CodeGenFunction::
2229 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
2230   // Emit the base vector as an l-value.
2231   LValue Base;
2232 
2233   // ExtVectorElementExpr's base can either be a vector or pointer to vector.
2234   if (E->isArrow()) {
2235     // If it is a pointer to a vector, emit the address and form an lvalue with
2236     // it.
2237     llvm::Value *Ptr = EmitScalarExpr(E->getBase());
2238     const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
2239     Base = MakeAddrLValue(Ptr, PT->getPointeeType());
2240     Base.getQuals().removeObjCGCAttr();
2241   } else if (E->getBase()->isGLValue()) {
2242     // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
2243     // emit the base as an lvalue.
2244     assert(E->getBase()->getType()->isVectorType());
2245     Base = EmitLValue(E->getBase());
2246   } else {
2247     // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
2248     assert(E->getBase()->getType()->isVectorType() &&
2249            "Result must be a vector");
2250     llvm::Value *Vec = EmitScalarExpr(E->getBase());
2251 
2252     // Store the vector to memory (because LValue wants an address).
2253     llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
2254     Builder.CreateStore(Vec, VecMem);
2255     Base = MakeAddrLValue(VecMem, E->getBase()->getType());
2256   }
2257 
2258   QualType type =
2259     E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
2260 
2261   // Encode the element access list into a vector of unsigned indices.
2262   SmallVector<unsigned, 4> Indices;
2263   E->getEncodedElementAccess(Indices);
2264 
2265   if (Base.isSimple()) {
2266     llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
2267     return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
2268                                     Base.getAlignment());
2269   }
2270   assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
2271 
2272   llvm::Constant *BaseElts = Base.getExtVectorElts();
2273   SmallVector<llvm::Constant *, 4> CElts;
2274 
2275   for (unsigned i = 0, e = Indices.size(); i != e; ++i)
2276     CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
2277   llvm::Constant *CV = llvm::ConstantVector::get(CElts);
2278   return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
2279                                   Base.getAlignment());
2280 }
2281 
2282 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
2283   Expr *BaseExpr = E->getBase();
2284 
2285   // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
2286   LValue BaseLV;
2287   if (E->isArrow()) {
2288     llvm::Value *Ptr = EmitScalarExpr(BaseExpr);
2289     QualType PtrTy = BaseExpr->getType()->getPointeeType();
2290     EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy);
2291     BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy);
2292   } else
2293     BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
2294 
2295   NamedDecl *ND = E->getMemberDecl();
2296   if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
2297     LValue LV = EmitLValueForField(BaseLV, Field);
2298     setObjCGCLValueClass(getContext(), E, LV);
2299     return LV;
2300   }
2301 
2302   if (VarDecl *VD = dyn_cast<VarDecl>(ND))
2303     return EmitGlobalVarDeclLValue(*this, E, VD);
2304 
2305   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
2306     return EmitFunctionDeclLValue(*this, E, FD);
2307 
2308   llvm_unreachable("Unhandled member declaration!");
2309 }
2310 
2311 LValue CodeGenFunction::EmitLValueForField(LValue base,
2312                                            const FieldDecl *field) {
2313   if (field->isBitField()) {
2314     const CGRecordLayout &RL =
2315       CGM.getTypes().getCGRecordLayout(field->getParent());
2316     const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
2317     QualType fieldType =
2318       field->getType().withCVRQualifiers(base.getVRQualifiers());
2319     return LValue::MakeBitfield(base.getAddress(), Info, fieldType,
2320                                 base.getAlignment());
2321   }
2322 
2323   const RecordDecl *rec = field->getParent();
2324   QualType type = field->getType();
2325   CharUnits alignment = getContext().getDeclAlign(field);
2326 
2327   // FIXME: It should be impossible to have an LValue without alignment for a
2328   // complete type.
2329   if (!base.getAlignment().isZero())
2330     alignment = std::min(alignment, base.getAlignment());
2331 
2332   bool mayAlias = rec->hasAttr<MayAliasAttr>();
2333 
2334   llvm::Value *addr = base.getAddress();
2335   unsigned cvr = base.getVRQualifiers();
2336   if (rec->isUnion()) {
2337     // For unions, there is no pointer adjustment.
2338     assert(!type->isReferenceType() && "union has reference member");
2339   } else {
2340     // For structs, we GEP to the field that the record layout suggests.
2341     unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
2342     addr = Builder.CreateStructGEP(addr, idx, field->getName());
2343 
2344     // If this is a reference field, load the reference right now.
2345     if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
2346       llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
2347       if (cvr & Qualifiers::Volatile) load->setVolatile(true);
2348       load->setAlignment(alignment.getQuantity());
2349 
2350       if (CGM.shouldUseTBAA()) {
2351         llvm::MDNode *tbaa;
2352         if (mayAlias)
2353           tbaa = CGM.getTBAAInfo(getContext().CharTy);
2354         else
2355           tbaa = CGM.getTBAAInfo(type);
2356         CGM.DecorateInstruction(load, tbaa);
2357       }
2358 
2359       addr = load;
2360       mayAlias = false;
2361       type = refType->getPointeeType();
2362       if (type->isIncompleteType())
2363         alignment = CharUnits();
2364       else
2365         alignment = getContext().getTypeAlignInChars(type);
2366       cvr = 0; // qualifiers don't recursively apply to referencee
2367     }
2368   }
2369 
2370   // Make sure that the address is pointing to the right type.  This is critical
2371   // for both unions and structs.  A union needs a bitcast, a struct element
2372   // will need a bitcast if the LLVM type laid out doesn't match the desired
2373   // type.
2374   addr = EmitBitCastOfLValueToProperType(*this, addr,
2375                                          CGM.getTypes().ConvertTypeForMem(type),
2376                                          field->getName());
2377 
2378   if (field->hasAttr<AnnotateAttr>())
2379     addr = EmitFieldAnnotations(field, addr);
2380 
2381   LValue LV = MakeAddrLValue(addr, type, alignment);
2382   LV.getQuals().addCVRQualifiers(cvr);
2383 
2384   // __weak attribute on a field is ignored.
2385   if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
2386     LV.getQuals().removeObjCGCAttr();
2387 
2388   // Fields of may_alias structs act like 'char' for TBAA purposes.
2389   // FIXME: this should get propagated down through anonymous structs
2390   // and unions.
2391   if (mayAlias && LV.getTBAAInfo())
2392     LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
2393 
2394   return LV;
2395 }
2396 
2397 LValue
2398 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
2399                                                   const FieldDecl *Field) {
2400   QualType FieldType = Field->getType();
2401 
2402   if (!FieldType->isReferenceType())
2403     return EmitLValueForField(Base, Field);
2404 
2405   const CGRecordLayout &RL =
2406     CGM.getTypes().getCGRecordLayout(Field->getParent());
2407   unsigned idx = RL.getLLVMFieldNo(Field);
2408   llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx);
2409   assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
2410 
2411   // Make sure that the address is pointing to the right type.  This is critical
2412   // for both unions and structs.  A union needs a bitcast, a struct element
2413   // will need a bitcast if the LLVM type laid out doesn't match the desired
2414   // type.
2415   llvm::Type *llvmType = ConvertTypeForMem(FieldType);
2416   V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName());
2417 
2418   CharUnits Alignment = getContext().getDeclAlign(Field);
2419 
2420   // FIXME: It should be impossible to have an LValue without alignment for a
2421   // complete type.
2422   if (!Base.getAlignment().isZero())
2423     Alignment = std::min(Alignment, Base.getAlignment());
2424 
2425   return MakeAddrLValue(V, FieldType, Alignment);
2426 }
2427 
2428 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
2429   if (E->isFileScope()) {
2430     llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
2431     return MakeAddrLValue(GlobalPtr, E->getType());
2432   }
2433   if (E->getType()->isVariablyModifiedType())
2434     // make sure to emit the VLA size.
2435     EmitVariablyModifiedType(E->getType());
2436 
2437   llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
2438   const Expr *InitExpr = E->getInitializer();
2439   LValue Result = MakeAddrLValue(DeclPtr, E->getType());
2440 
2441   EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
2442                    /*Init*/ true);
2443 
2444   return Result;
2445 }
2446 
2447 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
2448   if (!E->isGLValue())
2449     // Initializing an aggregate temporary in C++11: T{...}.
2450     return EmitAggExprToLValue(E);
2451 
2452   // An lvalue initializer list must be initializing a reference.
2453   assert(E->getNumInits() == 1 && "reference init with multiple values");
2454   return EmitLValue(E->getInit(0));
2455 }
2456 
2457 LValue CodeGenFunction::
2458 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
2459   if (!expr->isGLValue()) {
2460     // ?: here should be an aggregate.
2461     assert((hasAggregateLLVMType(expr->getType()) &&
2462             !expr->getType()->isAnyComplexType()) &&
2463            "Unexpected conditional operator!");
2464     return EmitAggExprToLValue(expr);
2465   }
2466 
2467   OpaqueValueMapping binding(*this, expr);
2468 
2469   const Expr *condExpr = expr->getCond();
2470   bool CondExprBool;
2471   if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
2472     const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
2473     if (!CondExprBool) std::swap(live, dead);
2474 
2475     if (!ContainsLabel(dead))
2476       return EmitLValue(live);
2477   }
2478 
2479   llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
2480   llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
2481   llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
2482 
2483   ConditionalEvaluation eval(*this);
2484   EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
2485 
2486   // Any temporaries created here are conditional.
2487   EmitBlock(lhsBlock);
2488   eval.begin(*this);
2489   LValue lhs = EmitLValue(expr->getTrueExpr());
2490   eval.end(*this);
2491 
2492   if (!lhs.isSimple())
2493     return EmitUnsupportedLValue(expr, "conditional operator");
2494 
2495   lhsBlock = Builder.GetInsertBlock();
2496   Builder.CreateBr(contBlock);
2497 
2498   // Any temporaries created here are conditional.
2499   EmitBlock(rhsBlock);
2500   eval.begin(*this);
2501   LValue rhs = EmitLValue(expr->getFalseExpr());
2502   eval.end(*this);
2503   if (!rhs.isSimple())
2504     return EmitUnsupportedLValue(expr, "conditional operator");
2505   rhsBlock = Builder.GetInsertBlock();
2506 
2507   EmitBlock(contBlock);
2508 
2509   llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
2510                                          "cond-lvalue");
2511   phi->addIncoming(lhs.getAddress(), lhsBlock);
2512   phi->addIncoming(rhs.getAddress(), rhsBlock);
2513   return MakeAddrLValue(phi, expr->getType());
2514 }
2515 
2516 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
2517 /// type. If the cast is to a reference, we can have the usual lvalue result,
2518 /// otherwise if a cast is needed by the code generator in an lvalue context,
2519 /// then it must mean that we need the address of an aggregate in order to
2520 /// access one of its members.  This can happen for all the reasons that casts
2521 /// are permitted with aggregate result, including noop aggregate casts, and
2522 /// cast from scalar to union.
2523 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
2524   switch (E->getCastKind()) {
2525   case CK_ToVoid:
2526     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
2527 
2528   case CK_Dependent:
2529     llvm_unreachable("dependent cast kind in IR gen!");
2530 
2531   case CK_BuiltinFnToFnPtr:
2532     llvm_unreachable("builtin functions are handled elsewhere");
2533 
2534   // These two casts are currently treated as no-ops, although they could
2535   // potentially be real operations depending on the target's ABI.
2536   case CK_NonAtomicToAtomic:
2537   case CK_AtomicToNonAtomic:
2538 
2539   case CK_NoOp:
2540   case CK_LValueToRValue:
2541     if (!E->getSubExpr()->Classify(getContext()).isPRValue()
2542         || E->getType()->isRecordType())
2543       return EmitLValue(E->getSubExpr());
2544     // Fall through to synthesize a temporary.
2545 
2546   case CK_BitCast:
2547   case CK_ArrayToPointerDecay:
2548   case CK_FunctionToPointerDecay:
2549   case CK_NullToMemberPointer:
2550   case CK_NullToPointer:
2551   case CK_IntegralToPointer:
2552   case CK_PointerToIntegral:
2553   case CK_PointerToBoolean:
2554   case CK_VectorSplat:
2555   case CK_IntegralCast:
2556   case CK_IntegralToBoolean:
2557   case CK_IntegralToFloating:
2558   case CK_FloatingToIntegral:
2559   case CK_FloatingToBoolean:
2560   case CK_FloatingCast:
2561   case CK_FloatingRealToComplex:
2562   case CK_FloatingComplexToReal:
2563   case CK_FloatingComplexToBoolean:
2564   case CK_FloatingComplexCast:
2565   case CK_FloatingComplexToIntegralComplex:
2566   case CK_IntegralRealToComplex:
2567   case CK_IntegralComplexToReal:
2568   case CK_IntegralComplexToBoolean:
2569   case CK_IntegralComplexCast:
2570   case CK_IntegralComplexToFloatingComplex:
2571   case CK_DerivedToBaseMemberPointer:
2572   case CK_BaseToDerivedMemberPointer:
2573   case CK_MemberPointerToBoolean:
2574   case CK_ReinterpretMemberPointer:
2575   case CK_AnyPointerToBlockPointerCast:
2576   case CK_ARCProduceObject:
2577   case CK_ARCConsumeObject:
2578   case CK_ARCReclaimReturnedObject:
2579   case CK_ARCExtendBlockObject:
2580   case CK_CopyAndAutoreleaseBlockObject: {
2581     // These casts only produce lvalues when we're binding a reference to a
2582     // temporary realized from a (converted) pure rvalue. Emit the expression
2583     // as a value, copy it into a temporary, and return an lvalue referring to
2584     // that temporary.
2585     llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
2586     EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
2587     return MakeAddrLValue(V, E->getType());
2588   }
2589 
2590   case CK_Dynamic: {
2591     LValue LV = EmitLValue(E->getSubExpr());
2592     llvm::Value *V = LV.getAddress();
2593     const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
2594     return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
2595   }
2596 
2597   case CK_ConstructorConversion:
2598   case CK_UserDefinedConversion:
2599   case CK_CPointerToObjCPointerCast:
2600   case CK_BlockPointerToObjCPointerCast:
2601     return EmitLValue(E->getSubExpr());
2602 
2603   case CK_UncheckedDerivedToBase:
2604   case CK_DerivedToBase: {
2605     const RecordType *DerivedClassTy =
2606       E->getSubExpr()->getType()->getAs<RecordType>();
2607     CXXRecordDecl *DerivedClassDecl =
2608       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2609 
2610     LValue LV = EmitLValue(E->getSubExpr());
2611     llvm::Value *This = LV.getAddress();
2612 
2613     // Perform the derived-to-base conversion
2614     llvm::Value *Base =
2615       GetAddressOfBaseClass(This, DerivedClassDecl,
2616                             E->path_begin(), E->path_end(),
2617                             /*NullCheckValue=*/false);
2618 
2619     return MakeAddrLValue(Base, E->getType());
2620   }
2621   case CK_ToUnion:
2622     return EmitAggExprToLValue(E);
2623   case CK_BaseToDerived: {
2624     const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
2625     CXXRecordDecl *DerivedClassDecl =
2626       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2627 
2628     LValue LV = EmitLValue(E->getSubExpr());
2629 
2630     // Perform the base-to-derived conversion
2631     llvm::Value *Derived =
2632       GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
2633                                E->path_begin(), E->path_end(),
2634                                /*NullCheckValue=*/false);
2635 
2636     return MakeAddrLValue(Derived, E->getType());
2637   }
2638   case CK_LValueBitCast: {
2639     // This must be a reinterpret_cast (or c-style equivalent).
2640     const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
2641 
2642     LValue LV = EmitLValue(E->getSubExpr());
2643     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2644                                            ConvertType(CE->getTypeAsWritten()));
2645     return MakeAddrLValue(V, E->getType());
2646   }
2647   case CK_ObjCObjectLValueCast: {
2648     LValue LV = EmitLValue(E->getSubExpr());
2649     QualType ToType = getContext().getLValueReferenceType(E->getType());
2650     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2651                                            ConvertType(ToType));
2652     return MakeAddrLValue(V, E->getType());
2653   }
2654   }
2655 
2656   llvm_unreachable("Unhandled lvalue cast kind?");
2657 }
2658 
2659 LValue CodeGenFunction::EmitNullInitializationLValue(
2660                                               const CXXScalarValueInitExpr *E) {
2661   QualType Ty = E->getType();
2662   LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
2663   EmitNullInitialization(LV.getAddress(), Ty);
2664   return LV;
2665 }
2666 
2667 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
2668   assert(OpaqueValueMappingData::shouldBindAsLValue(e));
2669   return getOpaqueLValueMapping(e);
2670 }
2671 
2672 LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
2673                                            const MaterializeTemporaryExpr *E) {
2674   RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
2675   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2676 }
2677 
2678 RValue CodeGenFunction::EmitRValueForField(LValue LV,
2679                                            const FieldDecl *FD) {
2680   QualType FT = FD->getType();
2681   LValue FieldLV = EmitLValueForField(LV, FD);
2682   if (FT->isAnyComplexType())
2683     return RValue::getComplex(
2684         LoadComplexFromAddr(FieldLV.getAddress(),
2685                             FieldLV.isVolatileQualified()));
2686   else if (CodeGenFunction::hasAggregateLLVMType(FT))
2687     return FieldLV.asAggregateRValue();
2688 
2689   return EmitLoadOfLValue(FieldLV);
2690 }
2691 
2692 //===--------------------------------------------------------------------===//
2693 //                             Expression Emission
2694 //===--------------------------------------------------------------------===//
2695 
2696 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
2697                                      ReturnValueSlot ReturnValue) {
2698   if (CGDebugInfo *DI = getDebugInfo())
2699     DI->EmitLocation(Builder, E->getLocStart());
2700 
2701   // Builtins never have block type.
2702   if (E->getCallee()->getType()->isBlockPointerType())
2703     return EmitBlockCallExpr(E, ReturnValue);
2704 
2705   if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
2706     return EmitCXXMemberCallExpr(CE, ReturnValue);
2707 
2708   if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
2709     return EmitCUDAKernelCallExpr(CE, ReturnValue);
2710 
2711   const Decl *TargetDecl = E->getCalleeDecl();
2712   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2713     if (unsigned builtinID = FD->getBuiltinID())
2714       return EmitBuiltinExpr(FD, builtinID, E);
2715   }
2716 
2717   if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
2718     if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
2719       return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
2720 
2721   if (const CXXPseudoDestructorExpr *PseudoDtor
2722           = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
2723     QualType DestroyedType = PseudoDtor->getDestroyedType();
2724     if (getLangOpts().ObjCAutoRefCount &&
2725         DestroyedType->isObjCLifetimeType() &&
2726         (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
2727          DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
2728       // Automatic Reference Counting:
2729       //   If the pseudo-expression names a retainable object with weak or
2730       //   strong lifetime, the object shall be released.
2731       Expr *BaseExpr = PseudoDtor->getBase();
2732       llvm::Value *BaseValue = NULL;
2733       Qualifiers BaseQuals;
2734 
2735       // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2736       if (PseudoDtor->isArrow()) {
2737         BaseValue = EmitScalarExpr(BaseExpr);
2738         const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
2739         BaseQuals = PTy->getPointeeType().getQualifiers();
2740       } else {
2741         LValue BaseLV = EmitLValue(BaseExpr);
2742         BaseValue = BaseLV.getAddress();
2743         QualType BaseTy = BaseExpr->getType();
2744         BaseQuals = BaseTy.getQualifiers();
2745       }
2746 
2747       switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
2748       case Qualifiers::OCL_None:
2749       case Qualifiers::OCL_ExplicitNone:
2750       case Qualifiers::OCL_Autoreleasing:
2751         break;
2752 
2753       case Qualifiers::OCL_Strong:
2754         EmitARCRelease(Builder.CreateLoad(BaseValue,
2755                           PseudoDtor->getDestroyedType().isVolatileQualified()),
2756                        /*precise*/ true);
2757         break;
2758 
2759       case Qualifiers::OCL_Weak:
2760         EmitARCDestroyWeak(BaseValue);
2761         break;
2762       }
2763     } else {
2764       // C++ [expr.pseudo]p1:
2765       //   The result shall only be used as the operand for the function call
2766       //   operator (), and the result of such a call has type void. The only
2767       //   effect is the evaluation of the postfix-expression before the dot or
2768       //   arrow.
2769       EmitScalarExpr(E->getCallee());
2770     }
2771 
2772     return RValue::get(0);
2773   }
2774 
2775   llvm::Value *Callee = EmitScalarExpr(E->getCallee());
2776   return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
2777                   E->arg_begin(), E->arg_end(), TargetDecl);
2778 }
2779 
2780 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
2781   // Comma expressions just emit their LHS then their RHS as an l-value.
2782   if (E->getOpcode() == BO_Comma) {
2783     EmitIgnoredExpr(E->getLHS());
2784     EnsureInsertPoint();
2785     return EmitLValue(E->getRHS());
2786   }
2787 
2788   if (E->getOpcode() == BO_PtrMemD ||
2789       E->getOpcode() == BO_PtrMemI)
2790     return EmitPointerToDataMemberBinaryExpr(E);
2791 
2792   assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
2793 
2794   // Note that in all of these cases, __block variables need the RHS
2795   // evaluated first just in case the variable gets moved by the RHS.
2796 
2797   if (!hasAggregateLLVMType(E->getType())) {
2798     switch (E->getLHS()->getType().getObjCLifetime()) {
2799     case Qualifiers::OCL_Strong:
2800       return EmitARCStoreStrong(E, /*ignored*/ false).first;
2801 
2802     case Qualifiers::OCL_Autoreleasing:
2803       return EmitARCStoreAutoreleasing(E).first;
2804 
2805     // No reason to do any of these differently.
2806     case Qualifiers::OCL_None:
2807     case Qualifiers::OCL_ExplicitNone:
2808     case Qualifiers::OCL_Weak:
2809       break;
2810     }
2811 
2812     RValue RV = EmitAnyExpr(E->getRHS());
2813     LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
2814     EmitStoreThroughLValue(RV, LV);
2815     return LV;
2816   }
2817 
2818   if (E->getType()->isAnyComplexType())
2819     return EmitComplexAssignmentLValue(E);
2820 
2821   return EmitAggExprToLValue(E);
2822 }
2823 
2824 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
2825   RValue RV = EmitCallExpr(E);
2826 
2827   if (!RV.isScalar())
2828     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2829 
2830   assert(E->getCallReturnType()->isReferenceType() &&
2831          "Can't have a scalar return unless the return type is a "
2832          "reference type!");
2833 
2834   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2835 }
2836 
2837 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
2838   // FIXME: This shouldn't require another copy.
2839   return EmitAggExprToLValue(E);
2840 }
2841 
2842 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
2843   assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
2844          && "binding l-value to type which needs a temporary");
2845   AggValueSlot Slot = CreateAggTemp(E->getType());
2846   EmitCXXConstructExpr(E, Slot);
2847   return MakeAddrLValue(Slot.getAddr(), E->getType());
2848 }
2849 
2850 LValue
2851 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
2852   return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
2853 }
2854 
2855 llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
2856   return CGM.GetAddrOfUuidDescriptor(E);
2857 }
2858 
2859 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
2860   return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType());
2861 }
2862 
2863 LValue
2864 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
2865   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2866   Slot.setExternallyDestructed();
2867   EmitAggExpr(E->getSubExpr(), Slot);
2868   EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
2869   return MakeAddrLValue(Slot.getAddr(), E->getType());
2870 }
2871 
2872 LValue
2873 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
2874   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2875   EmitLambdaExpr(E, Slot);
2876   return MakeAddrLValue(Slot.getAddr(), E->getType());
2877 }
2878 
2879 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
2880   RValue RV = EmitObjCMessageExpr(E);
2881 
2882   if (!RV.isScalar())
2883     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2884 
2885   assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
2886          "Can't have a scalar return unless the return type is a "
2887          "reference type!");
2888 
2889   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2890 }
2891 
2892 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
2893   llvm::Value *V =
2894     CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
2895   return MakeAddrLValue(V, E->getType());
2896 }
2897 
2898 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2899                                              const ObjCIvarDecl *Ivar) {
2900   return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
2901 }
2902 
2903 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
2904                                           llvm::Value *BaseValue,
2905                                           const ObjCIvarDecl *Ivar,
2906                                           unsigned CVRQualifiers) {
2907   return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
2908                                                    Ivar, CVRQualifiers);
2909 }
2910 
2911 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
2912   // FIXME: A lot of the code below could be shared with EmitMemberExpr.
2913   llvm::Value *BaseValue = 0;
2914   const Expr *BaseExpr = E->getBase();
2915   Qualifiers BaseQuals;
2916   QualType ObjectTy;
2917   if (E->isArrow()) {
2918     BaseValue = EmitScalarExpr(BaseExpr);
2919     ObjectTy = BaseExpr->getType()->getPointeeType();
2920     BaseQuals = ObjectTy.getQualifiers();
2921   } else {
2922     LValue BaseLV = EmitLValue(BaseExpr);
2923     // FIXME: this isn't right for bitfields.
2924     BaseValue = BaseLV.getAddress();
2925     ObjectTy = BaseExpr->getType();
2926     BaseQuals = ObjectTy.getQualifiers();
2927   }
2928 
2929   LValue LV =
2930     EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
2931                       BaseQuals.getCVRQualifiers());
2932   setObjCGCLValueClass(getContext(), E, LV);
2933   return LV;
2934 }
2935 
2936 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
2937   // Can only get l-value for message expression returning aggregate type
2938   RValue RV = EmitAnyExprToTemp(E);
2939   return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2940 }
2941 
2942 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
2943                                  ReturnValueSlot ReturnValue,
2944                                  CallExpr::const_arg_iterator ArgBeg,
2945                                  CallExpr::const_arg_iterator ArgEnd,
2946                                  const Decl *TargetDecl) {
2947   // Get the actual function type. The callee type will always be a pointer to
2948   // function type or a block pointer type.
2949   assert(CalleeType->isFunctionPointerType() &&
2950          "Call must have function pointer type!");
2951 
2952   CalleeType = getContext().getCanonicalType(CalleeType);
2953 
2954   const FunctionType *FnType
2955     = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
2956 
2957   CallArgList Args;
2958   EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
2959 
2960   const CGFunctionInfo &FnInfo =
2961     CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
2962 
2963   // C99 6.5.2.2p6:
2964   //   If the expression that denotes the called function has a type
2965   //   that does not include a prototype, [the default argument
2966   //   promotions are performed]. If the number of arguments does not
2967   //   equal the number of parameters, the behavior is undefined. If
2968   //   the function is defined with a type that includes a prototype,
2969   //   and either the prototype ends with an ellipsis (, ...) or the
2970   //   types of the arguments after promotion are not compatible with
2971   //   the types of the parameters, the behavior is undefined. If the
2972   //   function is defined with a type that does not include a
2973   //   prototype, and the types of the arguments after promotion are
2974   //   not compatible with those of the parameters after promotion,
2975   //   the behavior is undefined [except in some trivial cases].
2976   // That is, in the general case, we should assume that a call
2977   // through an unprototyped function type works like a *non-variadic*
2978   // call.  The way we make this work is to cast to the exact type
2979   // of the promoted arguments.
2980   if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
2981     llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
2982     CalleeTy = CalleeTy->getPointerTo();
2983     Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
2984   }
2985 
2986   return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
2987 }
2988 
2989 LValue CodeGenFunction::
2990 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
2991   llvm::Value *BaseV;
2992   if (E->getOpcode() == BO_PtrMemI)
2993     BaseV = EmitScalarExpr(E->getLHS());
2994   else
2995     BaseV = EmitLValue(E->getLHS()).getAddress();
2996 
2997   llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
2998 
2999   const MemberPointerType *MPT
3000     = E->getRHS()->getType()->getAs<MemberPointerType>();
3001 
3002   llvm::Value *AddV =
3003     CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
3004 
3005   return MakeAddrLValue(AddV, MPT->getPointeeType());
3006 }
3007 
3008 static void
3009 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
3010              llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
3011              uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
3012   llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
3013   llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
3014 
3015   switch (E->getOp()) {
3016   case AtomicExpr::AO__c11_atomic_init:
3017     llvm_unreachable("Already handled!");
3018 
3019   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3020   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3021   case AtomicExpr::AO__atomic_compare_exchange:
3022   case AtomicExpr::AO__atomic_compare_exchange_n: {
3023     // Note that cmpxchg only supports specifying one ordering and
3024     // doesn't support weak cmpxchg, at least at the moment.
3025     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
3026     LoadVal1->setAlignment(Align);
3027     llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
3028     LoadVal2->setAlignment(Align);
3029     llvm::AtomicCmpXchgInst *CXI =
3030         CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
3031     CXI->setVolatile(E->isVolatile());
3032     llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
3033     StoreVal1->setAlignment(Align);
3034     llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
3035     CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
3036     return;
3037   }
3038 
3039   case AtomicExpr::AO__c11_atomic_load:
3040   case AtomicExpr::AO__atomic_load_n:
3041   case AtomicExpr::AO__atomic_load: {
3042     llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
3043     Load->setAtomic(Order);
3044     Load->setAlignment(Size);
3045     Load->setVolatile(E->isVolatile());
3046     llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
3047     StoreDest->setAlignment(Align);
3048     return;
3049   }
3050 
3051   case AtomicExpr::AO__c11_atomic_store:
3052   case AtomicExpr::AO__atomic_store:
3053   case AtomicExpr::AO__atomic_store_n: {
3054     assert(!Dest && "Store does not return a value");
3055     llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
3056     LoadVal1->setAlignment(Align);
3057     llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
3058     Store->setAtomic(Order);
3059     Store->setAlignment(Size);
3060     Store->setVolatile(E->isVolatile());
3061     return;
3062   }
3063 
3064   case AtomicExpr::AO__c11_atomic_exchange:
3065   case AtomicExpr::AO__atomic_exchange_n:
3066   case AtomicExpr::AO__atomic_exchange:
3067     Op = llvm::AtomicRMWInst::Xchg;
3068     break;
3069 
3070   case AtomicExpr::AO__atomic_add_fetch:
3071     PostOp = llvm::Instruction::Add;
3072     // Fall through.
3073   case AtomicExpr::AO__c11_atomic_fetch_add:
3074   case AtomicExpr::AO__atomic_fetch_add:
3075     Op = llvm::AtomicRMWInst::Add;
3076     break;
3077 
3078   case AtomicExpr::AO__atomic_sub_fetch:
3079     PostOp = llvm::Instruction::Sub;
3080     // Fall through.
3081   case AtomicExpr::AO__c11_atomic_fetch_sub:
3082   case AtomicExpr::AO__atomic_fetch_sub:
3083     Op = llvm::AtomicRMWInst::Sub;
3084     break;
3085 
3086   case AtomicExpr::AO__atomic_and_fetch:
3087     PostOp = llvm::Instruction::And;
3088     // Fall through.
3089   case AtomicExpr::AO__c11_atomic_fetch_and:
3090   case AtomicExpr::AO__atomic_fetch_and:
3091     Op = llvm::AtomicRMWInst::And;
3092     break;
3093 
3094   case AtomicExpr::AO__atomic_or_fetch:
3095     PostOp = llvm::Instruction::Or;
3096     // Fall through.
3097   case AtomicExpr::AO__c11_atomic_fetch_or:
3098   case AtomicExpr::AO__atomic_fetch_or:
3099     Op = llvm::AtomicRMWInst::Or;
3100     break;
3101 
3102   case AtomicExpr::AO__atomic_xor_fetch:
3103     PostOp = llvm::Instruction::Xor;
3104     // Fall through.
3105   case AtomicExpr::AO__c11_atomic_fetch_xor:
3106   case AtomicExpr::AO__atomic_fetch_xor:
3107     Op = llvm::AtomicRMWInst::Xor;
3108     break;
3109 
3110   case AtomicExpr::AO__atomic_nand_fetch:
3111     PostOp = llvm::Instruction::And;
3112     // Fall through.
3113   case AtomicExpr::AO__atomic_fetch_nand:
3114     Op = llvm::AtomicRMWInst::Nand;
3115     break;
3116   }
3117 
3118   llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
3119   LoadVal1->setAlignment(Align);
3120   llvm::AtomicRMWInst *RMWI =
3121       CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
3122   RMWI->setVolatile(E->isVolatile());
3123 
3124   // For __atomic_*_fetch operations, perform the operation again to
3125   // determine the value which was written.
3126   llvm::Value *Result = RMWI;
3127   if (PostOp)
3128     Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
3129   if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
3130     Result = CGF.Builder.CreateNot(Result);
3131   llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
3132   StoreDest->setAlignment(Align);
3133 }
3134 
3135 // This function emits any expression (scalar, complex, or aggregate)
3136 // into a temporary alloca.
3137 static llvm::Value *
3138 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
3139   llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
3140   CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
3141                        /*Init*/ true);
3142   return DeclPtr;
3143 }
3144 
3145 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
3146                                   llvm::Value *Dest) {
3147   if (Ty->isAnyComplexType())
3148     return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
3149   if (CGF.hasAggregateLLVMType(Ty))
3150     return RValue::getAggregate(Dest);
3151   return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
3152 }
3153 
3154 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
3155   QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
3156   QualType MemTy = AtomicTy;
3157   if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
3158     MemTy = AT->getValueType();
3159   CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
3160   uint64_t Size = sizeChars.getQuantity();
3161   CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
3162   unsigned Align = alignChars.getQuantity();
3163   unsigned MaxInlineWidth =
3164       getContext().getTargetInfo().getMaxAtomicInlineWidth();
3165   bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
3166 
3167 
3168 
3169   llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
3170   Ptr = EmitScalarExpr(E->getPtr());
3171 
3172   if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
3173     assert(!Dest && "Init does not return a value");
3174     if (!hasAggregateLLVMType(E->getVal1()->getType())) {
3175       QualType PointeeType
3176         = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
3177       EmitScalarInit(EmitScalarExpr(E->getVal1()),
3178                      LValue::MakeAddr(Ptr, PointeeType, alignChars,
3179                                       getContext()));
3180     } else if (E->getType()->isAnyComplexType()) {
3181       EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
3182     } else {
3183       AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
3184                                         AtomicTy.getQualifiers(),
3185                                         AggValueSlot::IsNotDestructed,
3186                                         AggValueSlot::DoesNotNeedGCBarriers,
3187                                         AggValueSlot::IsNotAliased);
3188       EmitAggExpr(E->getVal1(), Slot);
3189     }
3190     return RValue::get(0);
3191   }
3192 
3193   Order = EmitScalarExpr(E->getOrder());
3194 
3195   switch (E->getOp()) {
3196   case AtomicExpr::AO__c11_atomic_init:
3197     llvm_unreachable("Already handled!");
3198 
3199   case AtomicExpr::AO__c11_atomic_load:
3200   case AtomicExpr::AO__atomic_load_n:
3201     break;
3202 
3203   case AtomicExpr::AO__atomic_load:
3204     Dest = EmitScalarExpr(E->getVal1());
3205     break;
3206 
3207   case AtomicExpr::AO__atomic_store:
3208     Val1 = EmitScalarExpr(E->getVal1());
3209     break;
3210 
3211   case AtomicExpr::AO__atomic_exchange:
3212     Val1 = EmitScalarExpr(E->getVal1());
3213     Dest = EmitScalarExpr(E->getVal2());
3214     break;
3215 
3216   case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3217   case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3218   case AtomicExpr::AO__atomic_compare_exchange_n:
3219   case AtomicExpr::AO__atomic_compare_exchange:
3220     Val1 = EmitScalarExpr(E->getVal1());
3221     if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
3222       Val2 = EmitScalarExpr(E->getVal2());
3223     else
3224       Val2 = EmitValToTemp(*this, E->getVal2());
3225     OrderFail = EmitScalarExpr(E->getOrderFail());
3226     // Evaluate and discard the 'weak' argument.
3227     if (E->getNumSubExprs() == 6)
3228       EmitScalarExpr(E->getWeak());
3229     break;
3230 
3231   case AtomicExpr::AO__c11_atomic_fetch_add:
3232   case AtomicExpr::AO__c11_atomic_fetch_sub:
3233     if (MemTy->isPointerType()) {
3234       // For pointer arithmetic, we're required to do a bit of math:
3235       // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
3236       // ... but only for the C11 builtins. The GNU builtins expect the
3237       // user to multiply by sizeof(T).
3238       QualType Val1Ty = E->getVal1()->getType();
3239       llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
3240       CharUnits PointeeIncAmt =
3241           getContext().getTypeSizeInChars(MemTy->getPointeeType());
3242       Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
3243       Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
3244       EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
3245       break;
3246     }
3247     // Fall through.
3248   case AtomicExpr::AO__atomic_fetch_add:
3249   case AtomicExpr::AO__atomic_fetch_sub:
3250   case AtomicExpr::AO__atomic_add_fetch:
3251   case AtomicExpr::AO__atomic_sub_fetch:
3252   case AtomicExpr::AO__c11_atomic_store:
3253   case AtomicExpr::AO__c11_atomic_exchange:
3254   case AtomicExpr::AO__atomic_store_n:
3255   case AtomicExpr::AO__atomic_exchange_n:
3256   case AtomicExpr::AO__c11_atomic_fetch_and:
3257   case AtomicExpr::AO__c11_atomic_fetch_or:
3258   case AtomicExpr::AO__c11_atomic_fetch_xor:
3259   case AtomicExpr::AO__atomic_fetch_and:
3260   case AtomicExpr::AO__atomic_fetch_or:
3261   case AtomicExpr::AO__atomic_fetch_xor:
3262   case AtomicExpr::AO__atomic_fetch_nand:
3263   case AtomicExpr::AO__atomic_and_fetch:
3264   case AtomicExpr::AO__atomic_or_fetch:
3265   case AtomicExpr::AO__atomic_xor_fetch:
3266   case AtomicExpr::AO__atomic_nand_fetch:
3267     Val1 = EmitValToTemp(*this, E->getVal1());
3268     break;
3269   }
3270 
3271   if (!E->getType()->isVoidType() && !Dest)
3272     Dest = CreateMemTemp(E->getType(), ".atomicdst");
3273 
3274   // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
3275   if (UseLibcall) {
3276 
3277     llvm::SmallVector<QualType, 5> Params;
3278     CallArgList Args;
3279     // Size is always the first parameter
3280     Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
3281              getContext().getSizeType());
3282     // Atomic address is always the second parameter
3283     Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
3284              getContext().VoidPtrTy);
3285 
3286     const char* LibCallName;
3287     QualType RetTy = getContext().VoidTy;
3288     switch (E->getOp()) {
3289     // There is only one libcall for compare an exchange, because there is no
3290     // optimisation benefit possible from a libcall version of a weak compare
3291     // and exchange.
3292     // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
3293     //                                void *desired, int success, int failure)
3294     case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
3295     case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
3296     case AtomicExpr::AO__atomic_compare_exchange:
3297     case AtomicExpr::AO__atomic_compare_exchange_n:
3298       LibCallName = "__atomic_compare_exchange";
3299       RetTy = getContext().BoolTy;
3300       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3301                getContext().VoidPtrTy);
3302       Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
3303                getContext().VoidPtrTy);
3304       Args.add(RValue::get(Order),
3305                getContext().IntTy);
3306       Order = OrderFail;
3307       break;
3308     // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
3309     //                        int order)
3310     case AtomicExpr::AO__c11_atomic_exchange:
3311     case AtomicExpr::AO__atomic_exchange_n:
3312     case AtomicExpr::AO__atomic_exchange:
3313       LibCallName = "__atomic_exchange";
3314       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3315                getContext().VoidPtrTy);
3316       Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3317                getContext().VoidPtrTy);
3318       break;
3319     // void __atomic_store(size_t size, void *mem, void *val, int order)
3320     case AtomicExpr::AO__c11_atomic_store:
3321     case AtomicExpr::AO__atomic_store:
3322     case AtomicExpr::AO__atomic_store_n:
3323       LibCallName = "__atomic_store";
3324       Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
3325                getContext().VoidPtrTy);
3326       break;
3327     // void __atomic_load(size_t size, void *mem, void *return, int order)
3328     case AtomicExpr::AO__c11_atomic_load:
3329     case AtomicExpr::AO__atomic_load:
3330     case AtomicExpr::AO__atomic_load_n:
3331       LibCallName = "__atomic_load";
3332       Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
3333                getContext().VoidPtrTy);
3334       break;
3335 #if 0
3336     // These are only defined for 1-16 byte integers.  It is not clear what
3337     // their semantics would be on anything else...
3338     case AtomicExpr::Add:   LibCallName = "__atomic_fetch_add_generic"; break;
3339     case AtomicExpr::Sub:   LibCallName = "__atomic_fetch_sub_generic"; break;
3340     case AtomicExpr::And:   LibCallName = "__atomic_fetch_and_generic"; break;
3341     case AtomicExpr::Or:    LibCallName = "__atomic_fetch_or_generic"; break;
3342     case AtomicExpr::Xor:   LibCallName = "__atomic_fetch_xor_generic"; break;
3343 #endif
3344     default: return EmitUnsupportedRValue(E, "atomic library call");
3345     }
3346     // order is always the last parameter
3347     Args.add(RValue::get(Order),
3348              getContext().IntTy);
3349 
3350     const CGFunctionInfo &FuncInfo =
3351         CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
3352             FunctionType::ExtInfo(), RequiredArgs::All);
3353     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3354     llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3355     RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
3356     if (E->isCmpXChg())
3357       return Res;
3358     if (E->getType()->isVoidType())
3359       return RValue::get(0);
3360     return ConvertTempToRValue(*this, E->getType(), Dest);
3361   }
3362 
3363   bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
3364                  E->getOp() == AtomicExpr::AO__atomic_store ||
3365                  E->getOp() == AtomicExpr::AO__atomic_store_n;
3366   bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
3367                 E->getOp() == AtomicExpr::AO__atomic_load ||
3368                 E->getOp() == AtomicExpr::AO__atomic_load_n;
3369 
3370   llvm::Type *IPtrTy =
3371       llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
3372   llvm::Value *OrigDest = Dest;
3373   Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
3374   if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
3375   if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
3376   if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
3377 
3378   if (isa<llvm::ConstantInt>(Order)) {
3379     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3380     switch (ord) {
3381     case 0:  // memory_order_relaxed
3382       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3383                    llvm::Monotonic);
3384       break;
3385     case 1:  // memory_order_consume
3386     case 2:  // memory_order_acquire
3387       if (IsStore)
3388         break; // Avoid crashing on code with undefined behavior
3389       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3390                    llvm::Acquire);
3391       break;
3392     case 3:  // memory_order_release
3393       if (IsLoad)
3394         break; // Avoid crashing on code with undefined behavior
3395       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3396                    llvm::Release);
3397       break;
3398     case 4:  // memory_order_acq_rel
3399       if (IsLoad || IsStore)
3400         break; // Avoid crashing on code with undefined behavior
3401       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3402                    llvm::AcquireRelease);
3403       break;
3404     case 5:  // memory_order_seq_cst
3405       EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3406                    llvm::SequentiallyConsistent);
3407       break;
3408     default: // invalid order
3409       // We should not ever get here normally, but it's hard to
3410       // enforce that in general.
3411       break;
3412     }
3413     if (E->getType()->isVoidType())
3414       return RValue::get(0);
3415     return ConvertTempToRValue(*this, E->getType(), OrigDest);
3416   }
3417 
3418   // Long case, when Order isn't obviously constant.
3419 
3420   // Create all the relevant BB's
3421   llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
3422                    *AcqRelBB = 0, *SeqCstBB = 0;
3423   MonotonicBB = createBasicBlock("monotonic", CurFn);
3424   if (!IsStore)
3425     AcquireBB = createBasicBlock("acquire", CurFn);
3426   if (!IsLoad)
3427     ReleaseBB = createBasicBlock("release", CurFn);
3428   if (!IsLoad && !IsStore)
3429     AcqRelBB = createBasicBlock("acqrel", CurFn);
3430   SeqCstBB = createBasicBlock("seqcst", CurFn);
3431   llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3432 
3433   // Create the switch for the split
3434   // MonotonicBB is arbitrarily chosen as the default case; in practice, this
3435   // doesn't matter unless someone is crazy enough to use something that
3436   // doesn't fold to a constant for the ordering.
3437   Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3438   llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
3439 
3440   // Emit all the different atomics
3441   Builder.SetInsertPoint(MonotonicBB);
3442   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3443                llvm::Monotonic);
3444   Builder.CreateBr(ContBB);
3445   if (!IsStore) {
3446     Builder.SetInsertPoint(AcquireBB);
3447     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3448                  llvm::Acquire);
3449     Builder.CreateBr(ContBB);
3450     SI->addCase(Builder.getInt32(1), AcquireBB);
3451     SI->addCase(Builder.getInt32(2), AcquireBB);
3452   }
3453   if (!IsLoad) {
3454     Builder.SetInsertPoint(ReleaseBB);
3455     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3456                  llvm::Release);
3457     Builder.CreateBr(ContBB);
3458     SI->addCase(Builder.getInt32(3), ReleaseBB);
3459   }
3460   if (!IsLoad && !IsStore) {
3461     Builder.SetInsertPoint(AcqRelBB);
3462     EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3463                  llvm::AcquireRelease);
3464     Builder.CreateBr(ContBB);
3465     SI->addCase(Builder.getInt32(4), AcqRelBB);
3466   }
3467   Builder.SetInsertPoint(SeqCstBB);
3468   EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
3469                llvm::SequentiallyConsistent);
3470   Builder.CreateBr(ContBB);
3471   SI->addCase(Builder.getInt32(5), SeqCstBB);
3472 
3473   // Cleanup and return
3474   Builder.SetInsertPoint(ContBB);
3475   if (E->getType()->isVoidType())
3476     return RValue::get(0);
3477   return ConvertTempToRValue(*this, E->getType(), OrigDest);
3478 }
3479 
3480 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
3481   assert(Val->getType()->isFPOrFPVectorTy());
3482   if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
3483     return;
3484 
3485   llvm::MDBuilder MDHelper(getLLVMContext());
3486   llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
3487 
3488   cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
3489 }
3490 
3491 namespace {
3492   struct LValueOrRValue {
3493     LValue LV;
3494     RValue RV;
3495   };
3496 }
3497 
3498 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
3499                                            const PseudoObjectExpr *E,
3500                                            bool forLValue,
3501                                            AggValueSlot slot) {
3502   llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
3503 
3504   // Find the result expression, if any.
3505   const Expr *resultExpr = E->getResultExpr();
3506   LValueOrRValue result;
3507 
3508   for (PseudoObjectExpr::const_semantics_iterator
3509          i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
3510     const Expr *semantic = *i;
3511 
3512     // If this semantic expression is an opaque value, bind it
3513     // to the result of its source expression.
3514     if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
3515 
3516       // If this is the result expression, we may need to evaluate
3517       // directly into the slot.
3518       typedef CodeGenFunction::OpaqueValueMappingData OVMA;
3519       OVMA opaqueData;
3520       if (ov == resultExpr && ov->isRValue() && !forLValue &&
3521           CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
3522           !ov->getType()->isAnyComplexType()) {
3523         CGF.EmitAggExpr(ov->getSourceExpr(), slot);
3524 
3525         LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
3526         opaqueData = OVMA::bind(CGF, ov, LV);
3527         result.RV = slot.asRValue();
3528 
3529       // Otherwise, emit as normal.
3530       } else {
3531         opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
3532 
3533         // If this is the result, also evaluate the result now.
3534         if (ov == resultExpr) {
3535           if (forLValue)
3536             result.LV = CGF.EmitLValue(ov);
3537           else
3538             result.RV = CGF.EmitAnyExpr(ov, slot);
3539         }
3540       }
3541 
3542       opaques.push_back(opaqueData);
3543 
3544     // Otherwise, if the expression is the result, evaluate it
3545     // and remember the result.
3546     } else if (semantic == resultExpr) {
3547       if (forLValue)
3548         result.LV = CGF.EmitLValue(semantic);
3549       else
3550         result.RV = CGF.EmitAnyExpr(semantic, slot);
3551 
3552     // Otherwise, evaluate the expression in an ignored context.
3553     } else {
3554       CGF.EmitIgnoredExpr(semantic);
3555     }
3556   }
3557 
3558   // Unbind all the opaques now.
3559   for (unsigned i = 0, e = opaques.size(); i != e; ++i)
3560     opaques[i].unbind(CGF);
3561 
3562   return result;
3563 }
3564 
3565 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
3566                                                AggValueSlot slot) {
3567   return emitPseudoObjectExpr(*this, E, false, slot).RV;
3568 }
3569 
3570 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
3571   return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
3572 }
3573