1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGCall.h"
17 #include "CGCXXABI.h"
18 #include "CGDebugInfo.h"
19 #include "CGRecordLayout.h"
20 #include "CGObjCRuntime.h"
21 #include "clang/AST/ASTContext.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/Frontend/CodeGenOptions.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/Target/TargetData.h"
26 using namespace clang;
27 using namespace CodeGen;
28 
29 //===--------------------------------------------------------------------===//
30 //                        Miscellaneous Helper Methods
31 //===--------------------------------------------------------------------===//
32 
33 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
34   unsigned addressSpace =
35     cast<llvm::PointerType>(value->getType())->getAddressSpace();
36 
37   const llvm::PointerType *destType = Int8PtrTy;
38   if (addressSpace)
39     destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
40 
41   if (value->getType() == destType) return value;
42   return Builder.CreateBitCast(value, destType);
43 }
44 
45 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
46 /// block.
47 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
48                                                     const llvm::Twine &Name) {
49   if (!Builder.isNamePreserving())
50     return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
51   return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
52 }
53 
54 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
55                                      llvm::Value *Init) {
56   llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
57   llvm::BasicBlock *Block = AllocaInsertPt->getParent();
58   Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
59 }
60 
61 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
62                                                 const llvm::Twine &Name) {
63   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
64   // FIXME: Should we prefer the preferred type alignment here?
65   CharUnits Align = getContext().getTypeAlignInChars(Ty);
66   Alloc->setAlignment(Align.getQuantity());
67   return Alloc;
68 }
69 
70 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
71                                                  const llvm::Twine &Name) {
72   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
73   // FIXME: Should we prefer the preferred type alignment here?
74   CharUnits Align = getContext().getTypeAlignInChars(Ty);
75   Alloc->setAlignment(Align.getQuantity());
76   return Alloc;
77 }
78 
79 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
80 /// expression and compare the result against zero, returning an Int1Ty value.
81 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
82   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
83     llvm::Value *MemPtr = EmitScalarExpr(E);
84     return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
85   }
86 
87   QualType BoolTy = getContext().BoolTy;
88   if (!E->getType()->isAnyComplexType())
89     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
90 
91   return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
92 }
93 
94 /// EmitIgnoredExpr - Emit code to compute the specified expression,
95 /// ignoring the result.
96 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
97   if (E->isRValue())
98     return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
99 
100   // Just emit it as an l-value and drop the result.
101   EmitLValue(E);
102 }
103 
104 /// EmitAnyExpr - Emit code to compute the specified expression which
105 /// can have any type.  The result is returned as an RValue struct.
106 /// If this is an aggregate expression, AggSlot indicates where the
107 /// result should be returned.
108 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
109                                     bool IgnoreResult) {
110   if (!hasAggregateLLVMType(E->getType()))
111     return RValue::get(EmitScalarExpr(E, IgnoreResult));
112   else if (E->getType()->isAnyComplexType())
113     return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
114 
115   EmitAggExpr(E, AggSlot, IgnoreResult);
116   return AggSlot.asRValue();
117 }
118 
119 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
120 /// always be accessible even if no aggregate location is provided.
121 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
122   AggValueSlot AggSlot = AggValueSlot::ignored();
123 
124   if (hasAggregateLLVMType(E->getType()) &&
125       !E->getType()->isAnyComplexType())
126     AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
127   return EmitAnyExpr(E, AggSlot);
128 }
129 
130 /// EmitAnyExprToMem - Evaluate an expression into a given memory
131 /// location.
132 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
133                                        llvm::Value *Location,
134                                        Qualifiers Quals,
135                                        bool IsInit) {
136   if (E->getType()->isAnyComplexType())
137     EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
138   else if (hasAggregateLLVMType(E->getType()))
139     EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, IsInit));
140   else {
141     RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
142     LValue LV = MakeAddrLValue(Location, E->getType());
143     EmitStoreThroughLValue(RV, LV);
144   }
145 }
146 
147 namespace {
148 /// \brief An adjustment to be made to the temporary created when emitting a
149 /// reference binding, which accesses a particular subobject of that temporary.
150   struct SubobjectAdjustment {
151     enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
152 
153     union {
154       struct {
155         const CastExpr *BasePath;
156         const CXXRecordDecl *DerivedClass;
157       } DerivedToBase;
158 
159       FieldDecl *Field;
160     };
161 
162     SubobjectAdjustment(const CastExpr *BasePath,
163                         const CXXRecordDecl *DerivedClass)
164       : Kind(DerivedToBaseAdjustment) {
165       DerivedToBase.BasePath = BasePath;
166       DerivedToBase.DerivedClass = DerivedClass;
167     }
168 
169     SubobjectAdjustment(FieldDecl *Field)
170       : Kind(FieldAdjustment) {
171       this->Field = Field;
172     }
173   };
174 }
175 
176 static llvm::Value *
177 CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
178                          const NamedDecl *InitializedDecl) {
179   if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
180     if (VD->hasGlobalStorage()) {
181       llvm::SmallString<256> Name;
182       llvm::raw_svector_ostream Out(Name);
183       CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
184       Out.flush();
185 
186       const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
187 
188       // Create the reference temporary.
189       llvm::GlobalValue *RefTemp =
190         new llvm::GlobalVariable(CGF.CGM.getModule(),
191                                  RefTempTy, /*isConstant=*/false,
192                                  llvm::GlobalValue::InternalLinkage,
193                                  llvm::Constant::getNullValue(RefTempTy),
194                                  Name.str());
195       return RefTemp;
196     }
197   }
198 
199   return CGF.CreateMemTemp(Type, "ref.tmp");
200 }
201 
202 static llvm::Value *
203 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
204                             llvm::Value *&ReferenceTemporary,
205                             const CXXDestructorDecl *&ReferenceTemporaryDtor,
206                             QualType &ObjCARCReferenceLifetimeType,
207                             const NamedDecl *InitializedDecl) {
208   // Look through expressions for materialized temporaries (for now).
209   if (const MaterializeTemporaryExpr *M
210                                       = dyn_cast<MaterializeTemporaryExpr>(E)) {
211     // Objective-C++ ARC:
212     //   If we are binding a reference to a temporary that has ownership, we
213     //   need to perform retain/release operations on the temporary.
214     if (CGF.getContext().getLangOptions().ObjCAutoRefCount &&
215         E->getType()->isObjCLifetimeType() &&
216         (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
217          E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
218          E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
219       ObjCARCReferenceLifetimeType = E->getType();
220 
221     E = M->GetTemporaryExpr();
222   }
223 
224   if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
225     E = DAE->getExpr();
226 
227   if (const ExprWithCleanups *TE = dyn_cast<ExprWithCleanups>(E)) {
228     CodeGenFunction::RunCleanupsScope Scope(CGF);
229 
230     return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
231                                        ReferenceTemporary,
232                                        ReferenceTemporaryDtor,
233                                        ObjCARCReferenceLifetimeType,
234                                        InitializedDecl);
235   }
236 
237   if (const ObjCPropertyRefExpr *PRE =
238       dyn_cast<ObjCPropertyRefExpr>(E->IgnoreParenImpCasts()))
239     if (PRE->getGetterResultType()->isReferenceType())
240       E = PRE;
241 
242   RValue RV;
243   if (E->isGLValue()) {
244     // Emit the expression as an lvalue.
245     LValue LV = CGF.EmitLValue(E);
246     if (LV.isPropertyRef()) {
247       RV = CGF.EmitLoadOfPropertyRefLValue(LV);
248       return RV.getScalarVal();
249     }
250 
251     if (LV.isSimple())
252       return LV.getAddress();
253 
254     // We have to load the lvalue.
255     RV = CGF.EmitLoadOfLValue(LV);
256   } else {
257     if (!ObjCARCReferenceLifetimeType.isNull()) {
258       ReferenceTemporary = CreateReferenceTemporary(CGF,
259                                                   ObjCARCReferenceLifetimeType,
260                                                     InitializedDecl);
261 
262 
263       LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
264                                              ObjCARCReferenceLifetimeType);
265 
266       CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
267                          RefTempDst, false);
268 
269       bool ExtendsLifeOfTemporary = false;
270       if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
271         if (Var->extendsLifetimeOfTemporary())
272           ExtendsLifeOfTemporary = true;
273       } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
274         ExtendsLifeOfTemporary = true;
275       }
276 
277       if (!ExtendsLifeOfTemporary) {
278         // Since the lifetime of this temporary isn't going to be extended,
279         // we need to clean it up ourselves at the end of the full expression.
280         switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
281         case Qualifiers::OCL_None:
282         case Qualifiers::OCL_ExplicitNone:
283         case Qualifiers::OCL_Autoreleasing:
284           break;
285 
286         case Qualifiers::OCL_Strong:
287           CGF.PushARCReleaseCleanup(CGF.getARCCleanupKind(),
288                                     ObjCARCReferenceLifetimeType,
289                                     ReferenceTemporary,
290                                     /*Precise lifetime=*/false,
291                                     /*For full expression=*/true);
292           break;
293 
294         case Qualifiers::OCL_Weak:
295           CGF.PushARCWeakReleaseCleanup(NormalAndEHCleanup,
296                                         ObjCARCReferenceLifetimeType,
297                                         ReferenceTemporary,
298                                         /*For full expression=*/true);
299           break;
300         }
301 
302         ObjCARCReferenceLifetimeType = QualType();
303       }
304 
305       return ReferenceTemporary;
306     }
307 
308     llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
309     while (true) {
310       E = E->IgnoreParens();
311 
312       if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
313         if ((CE->getCastKind() == CK_DerivedToBase ||
314              CE->getCastKind() == CK_UncheckedDerivedToBase) &&
315             E->getType()->isRecordType()) {
316           E = CE->getSubExpr();
317           CXXRecordDecl *Derived
318             = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
319           Adjustments.push_back(SubobjectAdjustment(CE, Derived));
320           continue;
321         }
322 
323         if (CE->getCastKind() == CK_NoOp) {
324           E = CE->getSubExpr();
325           continue;
326         }
327       } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
328         if (!ME->isArrow() && ME->getBase()->isRValue()) {
329           assert(ME->getBase()->getType()->isRecordType());
330           if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
331             E = ME->getBase();
332             Adjustments.push_back(SubobjectAdjustment(Field));
333             continue;
334           }
335         }
336       }
337 
338       if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
339         if (opaque->getType()->isRecordType())
340           return CGF.EmitOpaqueValueLValue(opaque).getAddress();
341 
342       // Nothing changed.
343       break;
344     }
345 
346     // Create a reference temporary if necessary.
347     AggValueSlot AggSlot = AggValueSlot::ignored();
348     if (CGF.hasAggregateLLVMType(E->getType()) &&
349         !E->getType()->isAnyComplexType()) {
350       ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
351                                                     InitializedDecl);
352       AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(),
353                                       InitializedDecl != 0);
354     }
355 
356     if (InitializedDecl) {
357       // Get the destructor for the reference temporary.
358       if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
359         CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
360         if (!ClassDecl->hasTrivialDestructor())
361           ReferenceTemporaryDtor = ClassDecl->getDestructor();
362       }
363     }
364 
365     RV = CGF.EmitAnyExpr(E, AggSlot);
366 
367     // Check if need to perform derived-to-base casts and/or field accesses, to
368     // get from the temporary object we created (and, potentially, for which we
369     // extended the lifetime) to the subobject we're binding the reference to.
370     if (!Adjustments.empty()) {
371       llvm::Value *Object = RV.getAggregateAddr();
372       for (unsigned I = Adjustments.size(); I != 0; --I) {
373         SubobjectAdjustment &Adjustment = Adjustments[I-1];
374         switch (Adjustment.Kind) {
375         case SubobjectAdjustment::DerivedToBaseAdjustment:
376           Object =
377               CGF.GetAddressOfBaseClass(Object,
378                                         Adjustment.DerivedToBase.DerivedClass,
379                               Adjustment.DerivedToBase.BasePath->path_begin(),
380                               Adjustment.DerivedToBase.BasePath->path_end(),
381                                         /*NullCheckValue=*/false);
382           break;
383 
384         case SubobjectAdjustment::FieldAdjustment: {
385           LValue LV =
386             CGF.EmitLValueForField(Object, Adjustment.Field, 0);
387           if (LV.isSimple()) {
388             Object = LV.getAddress();
389             break;
390           }
391 
392           // For non-simple lvalues, we actually have to create a copy of
393           // the object we're binding to.
394           QualType T = Adjustment.Field->getType().getNonReferenceType()
395                                                   .getUnqualifiedType();
396           Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
397           LValue TempLV = CGF.MakeAddrLValue(Object,
398                                              Adjustment.Field->getType());
399           CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
400           break;
401         }
402 
403         }
404       }
405 
406       return Object;
407     }
408   }
409 
410   if (RV.isAggregate())
411     return RV.getAggregateAddr();
412 
413   // Create a temporary variable that we can bind the reference to.
414   ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
415                                                 InitializedDecl);
416 
417 
418   unsigned Alignment =
419     CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
420   if (RV.isScalar())
421     CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
422                           /*Volatile=*/false, Alignment, E->getType());
423   else
424     CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
425                            /*Volatile=*/false);
426   return ReferenceTemporary;
427 }
428 
429 RValue
430 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
431                                             const NamedDecl *InitializedDecl) {
432   llvm::Value *ReferenceTemporary = 0;
433   const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
434   QualType ObjCARCReferenceLifetimeType;
435   llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
436                                                    ReferenceTemporaryDtor,
437                                                    ObjCARCReferenceLifetimeType,
438                                                    InitializedDecl);
439   if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
440     return RValue::get(Value);
441 
442   // Make sure to call the destructor for the reference temporary.
443   const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
444   if (VD && VD->hasGlobalStorage()) {
445     if (ReferenceTemporaryDtor) {
446       llvm::Constant *DtorFn =
447         CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
448       EmitCXXGlobalDtorRegistration(DtorFn,
449                                     cast<llvm::Constant>(ReferenceTemporary));
450     } else {
451       assert(!ObjCARCReferenceLifetimeType.isNull());
452       // Note: We intentionally do not register a global "destructor" to
453       // release the object.
454     }
455 
456     return RValue::get(Value);
457   }
458 
459   if (ReferenceTemporaryDtor)
460     PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
461   else {
462     switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
463     case Qualifiers::OCL_None:
464       assert(0 && "Not a reference temporary that needs to be deallocated");
465     case Qualifiers::OCL_ExplicitNone:
466     case Qualifiers::OCL_Autoreleasing:
467       // Nothing to do.
468       break;
469 
470     case Qualifiers::OCL_Strong:
471       PushARCReleaseCleanup(getARCCleanupKind(), ObjCARCReferenceLifetimeType,
472                             ReferenceTemporary,
473                             VD && VD->hasAttr<ObjCPreciseLifetimeAttr>());
474       break;
475 
476     case Qualifiers::OCL_Weak:
477       // __weak objects always get EH cleanups; otherwise, exceptions
478       // could cause really nasty crashes instead of mere leaks.
479       PushARCWeakReleaseCleanup(NormalAndEHCleanup,
480                                 ObjCARCReferenceLifetimeType,
481                                 ReferenceTemporary);
482       break;
483     }
484   }
485 
486   return RValue::get(Value);
487 }
488 
489 
490 /// getAccessedFieldNo - Given an encoded value and a result number, return the
491 /// input field number being accessed.
492 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
493                                              const llvm::Constant *Elts) {
494   if (isa<llvm::ConstantAggregateZero>(Elts))
495     return 0;
496 
497   return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
498 }
499 
500 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
501   if (!CatchUndefined)
502     return;
503 
504   // This needs to be to the standard address space.
505   Address = Builder.CreateBitCast(Address, Int8PtrTy);
506 
507   llvm::Type *IntPtrT = IntPtrTy;
508   llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1);
509 
510   // In time, people may want to control this and use a 1 here.
511   llvm::Value *Arg = Builder.getFalse();
512   llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
513   llvm::BasicBlock *Cont = createBasicBlock();
514   llvm::BasicBlock *Check = createBasicBlock();
515   llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
516   Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
517 
518   EmitBlock(Check);
519   Builder.CreateCondBr(Builder.CreateICmpUGE(C,
520                                         llvm::ConstantInt::get(IntPtrTy, Size)),
521                        Cont, getTrapBB());
522   EmitBlock(Cont);
523 }
524 
525 
526 CodeGenFunction::ComplexPairTy CodeGenFunction::
527 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
528                          bool isInc, bool isPre) {
529   ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
530                                             LV.isVolatileQualified());
531 
532   llvm::Value *NextVal;
533   if (isa<llvm::IntegerType>(InVal.first->getType())) {
534     uint64_t AmountVal = isInc ? 1 : -1;
535     NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
536 
537     // Add the inc/dec to the real part.
538     NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
539   } else {
540     QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
541     llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
542     if (!isInc)
543       FVal.changeSign();
544     NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
545 
546     // Add the inc/dec to the real part.
547     NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
548   }
549 
550   ComplexPairTy IncVal(NextVal, InVal.second);
551 
552   // Store the updated result through the lvalue.
553   StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
554 
555   // If this is a postinc, return the value read from memory, otherwise use the
556   // updated value.
557   return isPre ? IncVal : InVal;
558 }
559 
560 
561 //===----------------------------------------------------------------------===//
562 //                         LValue Expression Emission
563 //===----------------------------------------------------------------------===//
564 
565 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
566   if (Ty->isVoidType())
567     return RValue::get(0);
568 
569   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
570     const llvm::Type *EltTy = ConvertType(CTy->getElementType());
571     llvm::Value *U = llvm::UndefValue::get(EltTy);
572     return RValue::getComplex(std::make_pair(U, U));
573   }
574 
575   // If this is a use of an undefined aggregate type, the aggregate must have an
576   // identifiable address.  Just because the contents of the value are undefined
577   // doesn't mean that the address can't be taken and compared.
578   if (hasAggregateLLVMType(Ty)) {
579     llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
580     return RValue::getAggregate(DestPtr);
581   }
582 
583   return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
584 }
585 
586 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
587                                               const char *Name) {
588   ErrorUnsupported(E, Name);
589   return GetUndefRValue(E->getType());
590 }
591 
592 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
593                                               const char *Name) {
594   ErrorUnsupported(E, Name);
595   llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
596   return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
597 }
598 
599 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
600   LValue LV = EmitLValue(E);
601   if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
602     EmitCheck(LV.getAddress(),
603               getContext().getTypeSizeInChars(E->getType()).getQuantity());
604   return LV;
605 }
606 
607 /// EmitLValue - Emit code to compute a designator that specifies the location
608 /// of the expression.
609 ///
610 /// This can return one of two things: a simple address or a bitfield reference.
611 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
612 /// an LLVM pointer type.
613 ///
614 /// If this returns a bitfield reference, nothing about the pointee type of the
615 /// LLVM value is known: For example, it may not be a pointer to an integer.
616 ///
617 /// If this returns a normal address, and if the lvalue's C type is fixed size,
618 /// this method guarantees that the returned pointer type will point to an LLVM
619 /// type of the same size of the lvalue's type.  If the lvalue has a variable
620 /// length type, this is not possible.
621 ///
622 LValue CodeGenFunction::EmitLValue(const Expr *E) {
623   switch (E->getStmtClass()) {
624   default: return EmitUnsupportedLValue(E, "l-value expression");
625 
626   case Expr::ObjCSelectorExprClass:
627   return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
628   case Expr::ObjCIsaExprClass:
629     return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
630   case Expr::BinaryOperatorClass:
631     return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
632   case Expr::CompoundAssignOperatorClass:
633     if (!E->getType()->isAnyComplexType())
634       return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
635     return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
636   case Expr::CallExprClass:
637   case Expr::CXXMemberCallExprClass:
638   case Expr::CXXOperatorCallExprClass:
639     return EmitCallExprLValue(cast<CallExpr>(E));
640   case Expr::VAArgExprClass:
641     return EmitVAArgExprLValue(cast<VAArgExpr>(E));
642   case Expr::DeclRefExprClass:
643     return EmitDeclRefLValue(cast<DeclRefExpr>(E));
644   case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
645   case Expr::GenericSelectionExprClass:
646     return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
647   case Expr::PredefinedExprClass:
648     return EmitPredefinedLValue(cast<PredefinedExpr>(E));
649   case Expr::StringLiteralClass:
650     return EmitStringLiteralLValue(cast<StringLiteral>(E));
651   case Expr::ObjCEncodeExprClass:
652     return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
653 
654   case Expr::BlockDeclRefExprClass:
655     return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
656 
657   case Expr::CXXTemporaryObjectExprClass:
658   case Expr::CXXConstructExprClass:
659     return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
660   case Expr::CXXBindTemporaryExprClass:
661     return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
662   case Expr::ExprWithCleanupsClass:
663     return EmitExprWithCleanupsLValue(cast<ExprWithCleanups>(E));
664   case Expr::CXXScalarValueInitExprClass:
665     return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
666   case Expr::CXXDefaultArgExprClass:
667     return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
668   case Expr::CXXTypeidExprClass:
669     return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
670 
671   case Expr::ObjCMessageExprClass:
672     return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
673   case Expr::ObjCIvarRefExprClass:
674     return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
675   case Expr::ObjCPropertyRefExprClass:
676     return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
677   case Expr::StmtExprClass:
678     return EmitStmtExprLValue(cast<StmtExpr>(E));
679   case Expr::UnaryOperatorClass:
680     return EmitUnaryOpLValue(cast<UnaryOperator>(E));
681   case Expr::ArraySubscriptExprClass:
682     return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
683   case Expr::ExtVectorElementExprClass:
684     return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
685   case Expr::MemberExprClass:
686     return EmitMemberExpr(cast<MemberExpr>(E));
687   case Expr::CompoundLiteralExprClass:
688     return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
689   case Expr::ConditionalOperatorClass:
690     return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
691   case Expr::BinaryConditionalOperatorClass:
692     return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
693   case Expr::ChooseExprClass:
694     return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
695   case Expr::OpaqueValueExprClass:
696     return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
697   case Expr::ImplicitCastExprClass:
698   case Expr::CStyleCastExprClass:
699   case Expr::CXXFunctionalCastExprClass:
700   case Expr::CXXStaticCastExprClass:
701   case Expr::CXXDynamicCastExprClass:
702   case Expr::CXXReinterpretCastExprClass:
703   case Expr::CXXConstCastExprClass:
704   case Expr::ObjCBridgedCastExprClass:
705     return EmitCastLValue(cast<CastExpr>(E));
706 
707   case Expr::MaterializeTemporaryExprClass:
708     return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
709   }
710 }
711 
712 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
713   return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
714                           lvalue.getAlignment(), lvalue.getType(),
715                           lvalue.getTBAAInfo());
716 }
717 
718 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
719                                               unsigned Alignment, QualType Ty,
720                                               llvm::MDNode *TBAAInfo) {
721   llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp");
722   if (Volatile)
723     Load->setVolatile(true);
724   if (Alignment)
725     Load->setAlignment(Alignment);
726   if (TBAAInfo)
727     CGM.DecorateInstruction(Load, TBAAInfo);
728 
729   return EmitFromMemory(Load, Ty);
730 }
731 
732 static bool isBooleanUnderlyingType(QualType Ty) {
733   if (const EnumType *ET = dyn_cast<EnumType>(Ty))
734     return ET->getDecl()->getIntegerType()->isBooleanType();
735   return false;
736 }
737 
738 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
739   // Bool has a different representation in memory than in registers.
740   if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
741     // This should really always be an i1, but sometimes it's already
742     // an i8, and it's awkward to track those cases down.
743     if (Value->getType()->isIntegerTy(1))
744       return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
745     assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
746   }
747 
748   return Value;
749 }
750 
751 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
752   // Bool has a different representation in memory than in registers.
753   if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
754     assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
755     return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
756   }
757 
758   return Value;
759 }
760 
761 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
762                                         bool Volatile, unsigned Alignment,
763                                         QualType Ty,
764                                         llvm::MDNode *TBAAInfo) {
765   Value = EmitToMemory(Value, Ty);
766 
767   llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
768   if (Alignment)
769     Store->setAlignment(Alignment);
770   if (TBAAInfo)
771     CGM.DecorateInstruction(Store, TBAAInfo);
772 }
773 
774 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) {
775   EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
776                     lvalue.getAlignment(), lvalue.getType(),
777                     lvalue.getTBAAInfo());
778 }
779 
780 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
781 /// method emits the address of the lvalue, then loads the result as an rvalue,
782 /// returning the rvalue.
783 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
784   if (LV.isObjCWeak()) {
785     // load of a __weak object.
786     llvm::Value *AddrWeakObj = LV.getAddress();
787     return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
788                                                              AddrWeakObj));
789   }
790   if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
791     return RValue::get(EmitARCLoadWeak(LV.getAddress()));
792 
793   if (LV.isSimple()) {
794     assert(!LV.getType()->isFunctionType());
795 
796     // Everything needs a load.
797     return RValue::get(EmitLoadOfScalar(LV));
798   }
799 
800   if (LV.isVectorElt()) {
801     llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
802                                           LV.isVolatileQualified(), "tmp");
803     return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
804                                                     "vecext"));
805   }
806 
807   // If this is a reference to a subset of the elements of a vector, either
808   // shuffle the input or extract/insert them as appropriate.
809   if (LV.isExtVectorElt())
810     return EmitLoadOfExtVectorElementLValue(LV);
811 
812   if (LV.isBitField())
813     return EmitLoadOfBitfieldLValue(LV);
814 
815   assert(LV.isPropertyRef() && "Unknown LValue type!");
816   return EmitLoadOfPropertyRefLValue(LV);
817 }
818 
819 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
820   const CGBitFieldInfo &Info = LV.getBitFieldInfo();
821 
822   // Get the output type.
823   const llvm::Type *ResLTy = ConvertType(LV.getType());
824   unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
825 
826   // Compute the result as an OR of all of the individual component accesses.
827   llvm::Value *Res = 0;
828   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
829     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
830 
831     // Get the field pointer.
832     llvm::Value *Ptr = LV.getBitFieldBaseAddr();
833 
834     // Only offset by the field index if used, so that incoming values are not
835     // required to be structures.
836     if (AI.FieldIndex)
837       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
838 
839     // Offset by the byte offset, if used.
840     if (!AI.FieldByteOffset.isZero()) {
841       Ptr = EmitCastToVoidPtr(Ptr);
842       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
843                                        "bf.field.offs");
844     }
845 
846     // Cast to the access type.
847     const llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(),
848                                                      AI.AccessWidth,
849                        CGM.getContext().getTargetAddressSpace(LV.getType()));
850     Ptr = Builder.CreateBitCast(Ptr, PTy);
851 
852     // Perform the load.
853     llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
854     if (!AI.AccessAlignment.isZero())
855       Load->setAlignment(AI.AccessAlignment.getQuantity());
856 
857     // Shift out unused low bits and mask out unused high bits.
858     llvm::Value *Val = Load;
859     if (AI.FieldBitStart)
860       Val = Builder.CreateLShr(Load, AI.FieldBitStart);
861     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
862                                                             AI.TargetBitWidth),
863                             "bf.clear");
864 
865     // Extend or truncate to the target size.
866     if (AI.AccessWidth < ResSizeInBits)
867       Val = Builder.CreateZExt(Val, ResLTy);
868     else if (AI.AccessWidth > ResSizeInBits)
869       Val = Builder.CreateTrunc(Val, ResLTy);
870 
871     // Shift into place, and OR into the result.
872     if (AI.TargetBitOffset)
873       Val = Builder.CreateShl(Val, AI.TargetBitOffset);
874     Res = Res ? Builder.CreateOr(Res, Val) : Val;
875   }
876 
877   // If the bit-field is signed, perform the sign-extension.
878   //
879   // FIXME: This can easily be folded into the load of the high bits, which
880   // could also eliminate the mask of high bits in some situations.
881   if (Info.isSigned()) {
882     unsigned ExtraBits = ResSizeInBits - Info.getSize();
883     if (ExtraBits)
884       Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
885                                ExtraBits, "bf.val.sext");
886   }
887 
888   return RValue::get(Res);
889 }
890 
891 // If this is a reference to a subset of the elements of a vector, create an
892 // appropriate shufflevector.
893 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
894   llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
895                                         LV.isVolatileQualified(), "tmp");
896 
897   const llvm::Constant *Elts = LV.getExtVectorElts();
898 
899   // If the result of the expression is a non-vector type, we must be extracting
900   // a single element.  Just codegen as an extractelement.
901   const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
902   if (!ExprVT) {
903     unsigned InIdx = getAccessedFieldNo(0, Elts);
904     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
905     return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
906   }
907 
908   // Always use shuffle vector to try to retain the original program structure
909   unsigned NumResultElts = ExprVT->getNumElements();
910 
911   llvm::SmallVector<llvm::Constant*, 4> Mask;
912   for (unsigned i = 0; i != NumResultElts; ++i) {
913     unsigned InIdx = getAccessedFieldNo(i, Elts);
914     Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
915   }
916 
917   llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
918   Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
919                                     MaskV, "tmp");
920   return RValue::get(Vec);
921 }
922 
923 
924 
925 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
926 /// lvalue, where both are guaranteed to the have the same type, and that type
927 /// is 'Ty'.
928 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
929   if (!Dst.isSimple()) {
930     if (Dst.isVectorElt()) {
931       // Read/modify/write the vector, inserting the new element.
932       llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
933                                             Dst.isVolatileQualified(), "tmp");
934       Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
935                                         Dst.getVectorIdx(), "vecins");
936       Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
937       return;
938     }
939 
940     // If this is an update of extended vector elements, insert them as
941     // appropriate.
942     if (Dst.isExtVectorElt())
943       return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
944 
945     if (Dst.isBitField())
946       return EmitStoreThroughBitfieldLValue(Src, Dst);
947 
948     assert(Dst.isPropertyRef() && "Unknown LValue type");
949     return EmitStoreThroughPropertyRefLValue(Src, Dst);
950   }
951 
952   // There's special magic for assigning into an ARC-qualified l-value.
953   if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
954     switch (Lifetime) {
955     case Qualifiers::OCL_None:
956       llvm_unreachable("present but none");
957 
958     case Qualifiers::OCL_ExplicitNone:
959       // nothing special
960       break;
961 
962     case Qualifiers::OCL_Strong:
963       EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
964       return;
965 
966     case Qualifiers::OCL_Weak:
967       EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
968       return;
969 
970     case Qualifiers::OCL_Autoreleasing:
971       Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
972                                                      Src.getScalarVal()));
973       // fall into the normal path
974       break;
975     }
976   }
977 
978   if (Dst.isObjCWeak() && !Dst.isNonGC()) {
979     // load of a __weak object.
980     llvm::Value *LvalueDst = Dst.getAddress();
981     llvm::Value *src = Src.getScalarVal();
982      CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
983     return;
984   }
985 
986   if (Dst.isObjCStrong() && !Dst.isNonGC()) {
987     // load of a __strong object.
988     llvm::Value *LvalueDst = Dst.getAddress();
989     llvm::Value *src = Src.getScalarVal();
990     if (Dst.isObjCIvar()) {
991       assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
992       const llvm::Type *ResultType = ConvertType(getContext().LongTy);
993       llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
994       llvm::Value *dst = RHS;
995       RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
996       llvm::Value *LHS =
997         Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
998       llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
999       CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1000                                               BytesBetween);
1001     } else if (Dst.isGlobalObjCRef()) {
1002       CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1003                                                 Dst.isThreadLocalRef());
1004     }
1005     else
1006       CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
1007     return;
1008   }
1009 
1010   assert(Src.isScalar() && "Can't emit an agg store with this method");
1011   EmitStoreOfScalar(Src.getScalarVal(), Dst);
1012 }
1013 
1014 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
1015                                                      llvm::Value **Result) {
1016   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
1017 
1018   // Get the output type.
1019   const llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
1020   unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
1021 
1022   // Get the source value, truncated to the width of the bit-field.
1023   llvm::Value *SrcVal = Src.getScalarVal();
1024 
1025   if (Dst.getType()->isBooleanType())
1026     SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
1027 
1028   SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
1029                                                                 Info.getSize()),
1030                              "bf.value");
1031 
1032   // Return the new value of the bit-field, if requested.
1033   if (Result) {
1034     // Cast back to the proper type for result.
1035     const llvm::Type *SrcTy = Src.getScalarVal()->getType();
1036     llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
1037                                                    "bf.reload.val");
1038 
1039     // Sign extend if necessary.
1040     if (Info.isSigned()) {
1041       unsigned ExtraBits = ResSizeInBits - Info.getSize();
1042       if (ExtraBits)
1043         ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
1044                                        ExtraBits, "bf.reload.sext");
1045     }
1046 
1047     *Result = ReloadVal;
1048   }
1049 
1050   // Iterate over the components, writing each piece to memory.
1051   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
1052     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
1053 
1054     // Get the field pointer.
1055     llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
1056     unsigned addressSpace =
1057       cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
1058 
1059     // Only offset by the field index if used, so that incoming values are not
1060     // required to be structures.
1061     if (AI.FieldIndex)
1062       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
1063 
1064     // Offset by the byte offset, if used.
1065     if (!AI.FieldByteOffset.isZero()) {
1066       Ptr = EmitCastToVoidPtr(Ptr);
1067       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
1068                                        "bf.field.offs");
1069     }
1070 
1071     // Cast to the access type.
1072     const llvm::Type *AccessLTy =
1073       llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
1074 
1075     const llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
1076     Ptr = Builder.CreateBitCast(Ptr, PTy);
1077 
1078     // Extract the piece of the bit-field value to write in this access, limited
1079     // to the values that are part of this access.
1080     llvm::Value *Val = SrcVal;
1081     if (AI.TargetBitOffset)
1082       Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
1083     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
1084                                                             AI.TargetBitWidth));
1085 
1086     // Extend or truncate to the access size.
1087     if (ResSizeInBits < AI.AccessWidth)
1088       Val = Builder.CreateZExt(Val, AccessLTy);
1089     else if (ResSizeInBits > AI.AccessWidth)
1090       Val = Builder.CreateTrunc(Val, AccessLTy);
1091 
1092     // Shift into the position in memory.
1093     if (AI.FieldBitStart)
1094       Val = Builder.CreateShl(Val, AI.FieldBitStart);
1095 
1096     // If necessary, load and OR in bits that are outside of the bit-field.
1097     if (AI.TargetBitWidth != AI.AccessWidth) {
1098       llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
1099       if (!AI.AccessAlignment.isZero())
1100         Load->setAlignment(AI.AccessAlignment.getQuantity());
1101 
1102       // Compute the mask for zeroing the bits that are part of the bit-field.
1103       llvm::APInt InvMask =
1104         ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
1105                                  AI.FieldBitStart + AI.TargetBitWidth);
1106 
1107       // Apply the mask and OR in to the value to write.
1108       Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
1109     }
1110 
1111     // Write the value.
1112     llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
1113                                                  Dst.isVolatileQualified());
1114     if (!AI.AccessAlignment.isZero())
1115       Store->setAlignment(AI.AccessAlignment.getQuantity());
1116   }
1117 }
1118 
1119 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
1120                                                                LValue Dst) {
1121   // This access turns into a read/modify/write of the vector.  Load the input
1122   // value now.
1123   llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
1124                                         Dst.isVolatileQualified(), "tmp");
1125   const llvm::Constant *Elts = Dst.getExtVectorElts();
1126 
1127   llvm::Value *SrcVal = Src.getScalarVal();
1128 
1129   if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
1130     unsigned NumSrcElts = VTy->getNumElements();
1131     unsigned NumDstElts =
1132        cast<llvm::VectorType>(Vec->getType())->getNumElements();
1133     if (NumDstElts == NumSrcElts) {
1134       // Use shuffle vector is the src and destination are the same number of
1135       // elements and restore the vector mask since it is on the side it will be
1136       // stored.
1137       llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
1138       for (unsigned i = 0; i != NumSrcElts; ++i) {
1139         unsigned InIdx = getAccessedFieldNo(i, Elts);
1140         Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
1141       }
1142 
1143       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1144       Vec = Builder.CreateShuffleVector(SrcVal,
1145                                         llvm::UndefValue::get(Vec->getType()),
1146                                         MaskV, "tmp");
1147     } else if (NumDstElts > NumSrcElts) {
1148       // Extended the source vector to the same length and then shuffle it
1149       // into the destination.
1150       // FIXME: since we're shuffling with undef, can we just use the indices
1151       //        into that?  This could be simpler.
1152       llvm::SmallVector<llvm::Constant*, 4> ExtMask;
1153       unsigned i;
1154       for (i = 0; i != NumSrcElts; ++i)
1155         ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
1156       for (; i != NumDstElts; ++i)
1157         ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
1158       llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
1159       llvm::Value *ExtSrcVal =
1160         Builder.CreateShuffleVector(SrcVal,
1161                                     llvm::UndefValue::get(SrcVal->getType()),
1162                                     ExtMaskV, "tmp");
1163       // build identity
1164       llvm::SmallVector<llvm::Constant*, 4> Mask;
1165       for (unsigned i = 0; i != NumDstElts; ++i)
1166         Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
1167 
1168       // modify when what gets shuffled in
1169       for (unsigned i = 0; i != NumSrcElts; ++i) {
1170         unsigned Idx = getAccessedFieldNo(i, Elts);
1171         Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
1172       }
1173       llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1174       Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
1175     } else {
1176       // We should never shorten the vector
1177       assert(0 && "unexpected shorten vector length");
1178     }
1179   } else {
1180     // If the Src is a scalar (not a vector) it must be updating one element.
1181     unsigned InIdx = getAccessedFieldNo(0, Elts);
1182     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1183     Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
1184   }
1185 
1186   Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
1187 }
1188 
1189 // setObjCGCLValueClass - sets class of he lvalue for the purpose of
1190 // generating write-barries API. It is currently a global, ivar,
1191 // or neither.
1192 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1193                                  LValue &LV) {
1194   if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC)
1195     return;
1196 
1197   if (isa<ObjCIvarRefExpr>(E)) {
1198     LV.setObjCIvar(true);
1199     ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1200     LV.setBaseIvarExp(Exp->getBase());
1201     LV.setObjCArray(E->getType()->isArrayType());
1202     return;
1203   }
1204 
1205   if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1206     if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1207       if (VD->hasGlobalStorage()) {
1208         LV.setGlobalObjCRef(true);
1209         LV.setThreadLocalRef(VD->isThreadSpecified());
1210       }
1211     }
1212     LV.setObjCArray(E->getType()->isArrayType());
1213     return;
1214   }
1215 
1216   if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1217     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
1218     return;
1219   }
1220 
1221   if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1222     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
1223     if (LV.isObjCIvar()) {
1224       // If cast is to a structure pointer, follow gcc's behavior and make it
1225       // a non-ivar write-barrier.
1226       QualType ExpTy = E->getType();
1227       if (ExpTy->isPointerType())
1228         ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1229       if (ExpTy->isRecordType())
1230         LV.setObjCIvar(false);
1231     }
1232     return;
1233   }
1234 
1235   if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
1236     setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
1237     return;
1238   }
1239 
1240   if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1241     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
1242     return;
1243   }
1244 
1245   if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1246     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
1247     return;
1248   }
1249 
1250   if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
1251     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
1252     return;
1253   }
1254 
1255   if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1256     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1257     if (LV.isObjCIvar() && !LV.isObjCArray())
1258       // Using array syntax to assigning to what an ivar points to is not
1259       // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1260       LV.setObjCIvar(false);
1261     else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1262       // Using array syntax to assigning to what global points to is not
1263       // same as assigning to the global itself. {id *G;} G[i] = 0;
1264       LV.setGlobalObjCRef(false);
1265     return;
1266   }
1267 
1268   if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1269     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1270     // We don't know if member is an 'ivar', but this flag is looked at
1271     // only in the context of LV.isObjCIvar().
1272     LV.setObjCArray(E->getType()->isArrayType());
1273     return;
1274   }
1275 }
1276 
1277 static llvm::Value *
1278 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
1279                                 llvm::Value *V, llvm::Type *IRType,
1280                                 llvm::StringRef Name = llvm::StringRef()) {
1281   unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1282   return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
1283 }
1284 
1285 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1286                                       const Expr *E, const VarDecl *VD) {
1287   assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
1288          "Var decl must have external storage or be a file var decl!");
1289 
1290   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1291   if (VD->getType()->isReferenceType())
1292     V = CGF.Builder.CreateLoad(V, "tmp");
1293 
1294   V = EmitBitCastOfLValueToProperType(CGF, V,
1295                                 CGF.getTypes().ConvertTypeForMem(E->getType()));
1296 
1297   unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity();
1298   LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1299   setObjCGCLValueClass(CGF.getContext(), E, LV);
1300   return LV;
1301 }
1302 
1303 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1304                                      const Expr *E, const FunctionDecl *FD) {
1305   llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1306   if (!FD->hasPrototype()) {
1307     if (const FunctionProtoType *Proto =
1308             FD->getType()->getAs<FunctionProtoType>()) {
1309       // Ugly case: for a K&R-style definition, the type of the definition
1310       // isn't the same as the type of a use.  Correct for this with a
1311       // bitcast.
1312       QualType NoProtoType =
1313           CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1314       NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1315       V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp");
1316     }
1317   }
1318   unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity();
1319   return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1320 }
1321 
1322 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1323   const NamedDecl *ND = E->getDecl();
1324   unsigned Alignment = getContext().getDeclAlign(ND).getQuantity();
1325 
1326   if (ND->hasAttr<WeakRefAttr>()) {
1327     const ValueDecl *VD = cast<ValueDecl>(ND);
1328     llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1329     return MakeAddrLValue(Aliasee, E->getType(), Alignment);
1330   }
1331 
1332   if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1333 
1334     // Check if this is a global variable.
1335     if (VD->hasExternalStorage() || VD->isFileVarDecl())
1336       return EmitGlobalVarDeclLValue(*this, E, VD);
1337 
1338     bool NonGCable = VD->hasLocalStorage() &&
1339                      !VD->getType()->isReferenceType() &&
1340                      !VD->hasAttr<BlocksAttr>();
1341 
1342     llvm::Value *V = LocalDeclMap[VD];
1343     if (!V && VD->isStaticLocal())
1344       V = CGM.getStaticLocalDeclAddress(VD);
1345     assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1346 
1347     if (VD->hasAttr<BlocksAttr>())
1348       V = BuildBlockByrefAddress(V, VD);
1349 
1350     if (VD->getType()->isReferenceType())
1351       V = Builder.CreateLoad(V, "tmp");
1352 
1353     V = EmitBitCastOfLValueToProperType(*this, V,
1354                                     getTypes().ConvertTypeForMem(E->getType()));
1355 
1356     LValue LV = MakeAddrLValue(V, E->getType(), Alignment);
1357     if (NonGCable) {
1358       LV.getQuals().removeObjCGCAttr();
1359       LV.setNonGC(true);
1360     }
1361     setObjCGCLValueClass(getContext(), E, LV);
1362     return LV;
1363   }
1364 
1365   if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
1366     return EmitFunctionDeclLValue(*this, E, fn);
1367 
1368   assert(false && "Unhandled DeclRefExpr");
1369 
1370   // an invalid LValue, but the assert will
1371   // ensure that this point is never reached.
1372   return LValue();
1373 }
1374 
1375 LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
1376   unsigned Alignment =
1377     getContext().getDeclAlign(E->getDecl()).getQuantity();
1378   return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment);
1379 }
1380 
1381 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1382   // __extension__ doesn't affect lvalue-ness.
1383   if (E->getOpcode() == UO_Extension)
1384     return EmitLValue(E->getSubExpr());
1385 
1386   QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1387   switch (E->getOpcode()) {
1388   default: assert(0 && "Unknown unary operator lvalue!");
1389   case UO_Deref: {
1390     QualType T = E->getSubExpr()->getType()->getPointeeType();
1391     assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1392 
1393     LValue LV = MakeAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1394     LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1395 
1396     // We should not generate __weak write barrier on indirect reference
1397     // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1398     // But, we continue to generate __strong write barrier on indirect write
1399     // into a pointer to object.
1400     if (getContext().getLangOptions().ObjC1 &&
1401         getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
1402         LV.isObjCWeak())
1403       LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1404     return LV;
1405   }
1406   case UO_Real:
1407   case UO_Imag: {
1408     LValue LV = EmitLValue(E->getSubExpr());
1409     assert(LV.isSimple() && "real/imag on non-ordinary l-value");
1410     llvm::Value *Addr = LV.getAddress();
1411 
1412     // real and imag are valid on scalars.  This is a faster way of
1413     // testing that.
1414     if (!cast<llvm::PointerType>(Addr->getType())
1415            ->getElementType()->isStructTy()) {
1416       assert(E->getSubExpr()->getType()->isArithmeticType());
1417       return LV;
1418     }
1419 
1420     assert(E->getSubExpr()->getType()->isAnyComplexType());
1421 
1422     unsigned Idx = E->getOpcode() == UO_Imag;
1423     return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1424                                                   Idx, "idx"),
1425                           ExprTy);
1426   }
1427   case UO_PreInc:
1428   case UO_PreDec: {
1429     LValue LV = EmitLValue(E->getSubExpr());
1430     bool isInc = E->getOpcode() == UO_PreInc;
1431 
1432     if (E->getType()->isAnyComplexType())
1433       EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1434     else
1435       EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1436     return LV;
1437   }
1438   }
1439 }
1440 
1441 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1442   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1443                         E->getType());
1444 }
1445 
1446 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1447   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1448                         E->getType());
1449 }
1450 
1451 
1452 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1453   switch (E->getIdentType()) {
1454   default:
1455     return EmitUnsupportedLValue(E, "predefined expression");
1456 
1457   case PredefinedExpr::Func:
1458   case PredefinedExpr::Function:
1459   case PredefinedExpr::PrettyFunction: {
1460     unsigned Type = E->getIdentType();
1461     std::string GlobalVarName;
1462 
1463     switch (Type) {
1464     default: assert(0 && "Invalid type");
1465     case PredefinedExpr::Func:
1466       GlobalVarName = "__func__.";
1467       break;
1468     case PredefinedExpr::Function:
1469       GlobalVarName = "__FUNCTION__.";
1470       break;
1471     case PredefinedExpr::PrettyFunction:
1472       GlobalVarName = "__PRETTY_FUNCTION__.";
1473       break;
1474     }
1475 
1476     llvm::StringRef FnName = CurFn->getName();
1477     if (FnName.startswith("\01"))
1478       FnName = FnName.substr(1);
1479     GlobalVarName += FnName;
1480 
1481     const Decl *CurDecl = CurCodeDecl;
1482     if (CurDecl == 0)
1483       CurDecl = getContext().getTranslationUnitDecl();
1484 
1485     std::string FunctionName =
1486         (isa<BlockDecl>(CurDecl)
1487          ? FnName.str()
1488          : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
1489 
1490     llvm::Constant *C =
1491       CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
1492     return MakeAddrLValue(C, E->getType());
1493   }
1494   }
1495 }
1496 
1497 llvm::BasicBlock *CodeGenFunction::getTrapBB() {
1498   const CodeGenOptions &GCO = CGM.getCodeGenOpts();
1499 
1500   // If we are not optimzing, don't collapse all calls to trap in the function
1501   // to the same call, that way, in the debugger they can see which operation
1502   // did in fact fail.  If we are optimizing, we collapse all calls to trap down
1503   // to just one per function to save on codesize.
1504   if (GCO.OptimizationLevel && TrapBB)
1505     return TrapBB;
1506 
1507   llvm::BasicBlock *Cont = 0;
1508   if (HaveInsertPoint()) {
1509     Cont = createBasicBlock("cont");
1510     EmitBranch(Cont);
1511   }
1512   TrapBB = createBasicBlock("trap");
1513   EmitBlock(TrapBB);
1514 
1515   llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0);
1516   llvm::CallInst *TrapCall = Builder.CreateCall(F);
1517   TrapCall->setDoesNotReturn();
1518   TrapCall->setDoesNotThrow();
1519   Builder.CreateUnreachable();
1520 
1521   if (Cont)
1522     EmitBlock(Cont);
1523   return TrapBB;
1524 }
1525 
1526 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
1527 /// array to pointer, return the array subexpression.
1528 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
1529   // If this isn't just an array->pointer decay, bail out.
1530   const CastExpr *CE = dyn_cast<CastExpr>(E);
1531   if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
1532     return 0;
1533 
1534   // If this is a decay from variable width array, bail out.
1535   const Expr *SubExpr = CE->getSubExpr();
1536   if (SubExpr->getType()->isVariableArrayType())
1537     return 0;
1538 
1539   return SubExpr;
1540 }
1541 
1542 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1543   // The index must always be an integer, which is not an aggregate.  Emit it.
1544   llvm::Value *Idx = EmitScalarExpr(E->getIdx());
1545   QualType IdxTy  = E->getIdx()->getType();
1546   bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
1547 
1548   // If the base is a vector type, then we are forming a vector element lvalue
1549   // with this subscript.
1550   if (E->getBase()->getType()->isVectorType()) {
1551     // Emit the vector as an lvalue to get its address.
1552     LValue LHS = EmitLValue(E->getBase());
1553     assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
1554     Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
1555     return LValue::MakeVectorElt(LHS.getAddress(), Idx,
1556                                  E->getBase()->getType());
1557   }
1558 
1559   // Extend or truncate the index type to 32 or 64-bits.
1560   if (Idx->getType() != IntPtrTy)
1561     Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
1562 
1563   // FIXME: As llvm implements the object size checking, this can come out.
1564   if (CatchUndefined) {
1565     if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
1566       if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
1567         if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
1568           if (const ConstantArrayType *CAT
1569               = getContext().getAsConstantArrayType(DRE->getType())) {
1570             llvm::APInt Size = CAT->getSize();
1571             llvm::BasicBlock *Cont = createBasicBlock("cont");
1572             Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
1573                                   llvm::ConstantInt::get(Idx->getType(), Size)),
1574                                  Cont, getTrapBB());
1575             EmitBlock(Cont);
1576           }
1577         }
1578       }
1579     }
1580   }
1581 
1582   // We know that the pointer points to a type of the correct size, unless the
1583   // size is a VLA or Objective-C interface.
1584   llvm::Value *Address = 0;
1585   unsigned ArrayAlignment = 0;
1586   if (const VariableArrayType *vla =
1587         getContext().getAsVariableArrayType(E->getType())) {
1588     // The base must be a pointer, which is not an aggregate.  Emit
1589     // it.  It needs to be emitted first in case it's what captures
1590     // the VLA bounds.
1591     Address = EmitScalarExpr(E->getBase());
1592 
1593     // The element count here is the total number of non-VLA elements.
1594     llvm::Value *numElements = getVLASize(vla).first;
1595 
1596     // Effectively, the multiply by the VLA size is part of the GEP.
1597     // GEP indexes are signed, and scaling an index isn't permitted to
1598     // signed-overflow, so we use the same semantics for our explicit
1599     // multiply.  We suppress this if overflow is not undefined behavior.
1600     if (getLangOptions().isSignedOverflowDefined()) {
1601       Idx = Builder.CreateMul(Idx, numElements);
1602       Address = Builder.CreateGEP(Address, Idx, "arrayidx");
1603     } else {
1604       Idx = Builder.CreateNSWMul(Idx, numElements);
1605       Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
1606     }
1607   } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
1608     // Indexing over an interface, as in "NSString *P; P[4];"
1609     llvm::Value *InterfaceSize =
1610       llvm::ConstantInt::get(Idx->getType(),
1611           getContext().getTypeSizeInChars(OIT).getQuantity());
1612 
1613     Idx = Builder.CreateMul(Idx, InterfaceSize);
1614 
1615     // The base must be a pointer, which is not an aggregate.  Emit it.
1616     llvm::Value *Base = EmitScalarExpr(E->getBase());
1617     Address = EmitCastToVoidPtr(Base);
1618     Address = Builder.CreateGEP(Address, Idx, "arrayidx");
1619     Address = Builder.CreateBitCast(Address, Base->getType());
1620   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
1621     // If this is A[i] where A is an array, the frontend will have decayed the
1622     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
1623     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
1624     // "gep x, i" here.  Emit one "gep A, 0, i".
1625     assert(Array->getType()->isArrayType() &&
1626            "Array to pointer decay must have array source type!");
1627     LValue ArrayLV = EmitLValue(Array);
1628     llvm::Value *ArrayPtr = ArrayLV.getAddress();
1629     llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
1630     llvm::Value *Args[] = { Zero, Idx };
1631 
1632     // Propagate the alignment from the array itself to the result.
1633     ArrayAlignment = ArrayLV.getAlignment();
1634 
1635     if (getContext().getLangOptions().isSignedOverflowDefined())
1636       Address = Builder.CreateGEP(ArrayPtr, Args, Args+2, "arrayidx");
1637     else
1638       Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, Args+2, "arrayidx");
1639   } else {
1640     // The base must be a pointer, which is not an aggregate.  Emit it.
1641     llvm::Value *Base = EmitScalarExpr(E->getBase());
1642     if (getContext().getLangOptions().isSignedOverflowDefined())
1643       Address = Builder.CreateGEP(Base, Idx, "arrayidx");
1644     else
1645       Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
1646   }
1647 
1648   QualType T = E->getBase()->getType()->getPointeeType();
1649   assert(!T.isNull() &&
1650          "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
1651 
1652   // Limit the alignment to that of the result type.
1653   if (ArrayAlignment) {
1654     unsigned Align = getContext().getTypeAlignInChars(T).getQuantity();
1655     ArrayAlignment = std::min(Align, ArrayAlignment);
1656   }
1657 
1658   LValue LV = MakeAddrLValue(Address, T, ArrayAlignment);
1659   LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
1660 
1661   if (getContext().getLangOptions().ObjC1 &&
1662       getContext().getLangOptions().getGCMode() != LangOptions::NonGC) {
1663     LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1664     setObjCGCLValueClass(getContext(), E, LV);
1665   }
1666   return LV;
1667 }
1668 
1669 static
1670 llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
1671                                        llvm::SmallVector<unsigned, 4> &Elts) {
1672   llvm::SmallVector<llvm::Constant*, 4> CElts;
1673 
1674   const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
1675   for (unsigned i = 0, e = Elts.size(); i != e; ++i)
1676     CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
1677 
1678   return llvm::ConstantVector::get(CElts);
1679 }
1680 
1681 LValue CodeGenFunction::
1682 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
1683   // Emit the base vector as an l-value.
1684   LValue Base;
1685 
1686   // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1687   if (E->isArrow()) {
1688     // If it is a pointer to a vector, emit the address and form an lvalue with
1689     // it.
1690     llvm::Value *Ptr = EmitScalarExpr(E->getBase());
1691     const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
1692     Base = MakeAddrLValue(Ptr, PT->getPointeeType());
1693     Base.getQuals().removeObjCGCAttr();
1694   } else if (E->getBase()->isGLValue()) {
1695     // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1696     // emit the base as an lvalue.
1697     assert(E->getBase()->getType()->isVectorType());
1698     Base = EmitLValue(E->getBase());
1699   } else {
1700     // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1701     assert(E->getBase()->getType()->isVectorType() &&
1702            "Result must be a vector");
1703     llvm::Value *Vec = EmitScalarExpr(E->getBase());
1704 
1705     // Store the vector to memory (because LValue wants an address).
1706     llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
1707     Builder.CreateStore(Vec, VecMem);
1708     Base = MakeAddrLValue(VecMem, E->getBase()->getType());
1709   }
1710 
1711   QualType type =
1712     E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
1713 
1714   // Encode the element access list into a vector of unsigned indices.
1715   llvm::SmallVector<unsigned, 4> Indices;
1716   E->getEncodedElementAccess(Indices);
1717 
1718   if (Base.isSimple()) {
1719     llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices);
1720     return LValue::MakeExtVectorElt(Base.getAddress(), CV, type);
1721   }
1722   assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
1723 
1724   llvm::Constant *BaseElts = Base.getExtVectorElts();
1725   llvm::SmallVector<llvm::Constant *, 4> CElts;
1726 
1727   for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
1728     if (isa<llvm::ConstantAggregateZero>(BaseElts))
1729       CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0));
1730     else
1731       CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
1732   }
1733   llvm::Constant *CV = llvm::ConstantVector::get(CElts);
1734   return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type);
1735 }
1736 
1737 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
1738   bool isNonGC = false;
1739   Expr *BaseExpr = E->getBase();
1740   llvm::Value *BaseValue = NULL;
1741   Qualifiers BaseQuals;
1742 
1743   // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
1744   if (E->isArrow()) {
1745     BaseValue = EmitScalarExpr(BaseExpr);
1746     const PointerType *PTy =
1747       BaseExpr->getType()->getAs<PointerType>();
1748     BaseQuals = PTy->getPointeeType().getQualifiers();
1749   } else {
1750     LValue BaseLV = EmitLValue(BaseExpr);
1751     if (BaseLV.isNonGC())
1752       isNonGC = true;
1753     // FIXME: this isn't right for bitfields.
1754     BaseValue = BaseLV.getAddress();
1755     QualType BaseTy = BaseExpr->getType();
1756     BaseQuals = BaseTy.getQualifiers();
1757   }
1758 
1759   NamedDecl *ND = E->getMemberDecl();
1760   if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
1761     LValue LV = EmitLValueForField(BaseValue, Field,
1762                                    BaseQuals.getCVRQualifiers());
1763     LV.setNonGC(isNonGC);
1764     setObjCGCLValueClass(getContext(), E, LV);
1765     return LV;
1766   }
1767 
1768   if (VarDecl *VD = dyn_cast<VarDecl>(ND))
1769     return EmitGlobalVarDeclLValue(*this, E, VD);
1770 
1771   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
1772     return EmitFunctionDeclLValue(*this, E, FD);
1773 
1774   assert(false && "Unhandled member declaration!");
1775   return LValue();
1776 }
1777 
1778 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
1779                                               const FieldDecl *Field,
1780                                               unsigned CVRQualifiers) {
1781   const CGRecordLayout &RL =
1782     CGM.getTypes().getCGRecordLayout(Field->getParent());
1783   const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
1784   return LValue::MakeBitfield(BaseValue, Info,
1785                           Field->getType().withCVRQualifiers(CVRQualifiers));
1786 }
1787 
1788 /// EmitLValueForAnonRecordField - Given that the field is a member of
1789 /// an anonymous struct or union buried inside a record, and given
1790 /// that the base value is a pointer to the enclosing record, derive
1791 /// an lvalue for the ultimate field.
1792 LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
1793                                              const IndirectFieldDecl *Field,
1794                                                      unsigned CVRQualifiers) {
1795   IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
1796     IEnd = Field->chain_end();
1797   while (true) {
1798     LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I),
1799                                    CVRQualifiers);
1800     if (++I == IEnd) return LV;
1801 
1802     assert(LV.isSimple());
1803     BaseValue = LV.getAddress();
1804     CVRQualifiers |= LV.getVRQualifiers();
1805   }
1806 }
1807 
1808 LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
1809                                            const FieldDecl *field,
1810                                            unsigned cvr) {
1811   if (field->isBitField())
1812     return EmitLValueForBitfield(baseAddr, field, cvr);
1813 
1814   const RecordDecl *rec = field->getParent();
1815   QualType type = field->getType();
1816 
1817   bool mayAlias = rec->hasAttr<MayAliasAttr>();
1818 
1819   llvm::Value *addr = baseAddr;
1820   if (rec->isUnion()) {
1821     // For unions, there is no pointer adjustment.
1822     assert(!type->isReferenceType() && "union has reference member");
1823   } else {
1824     // For structs, we GEP to the field that the record layout suggests.
1825     unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
1826     addr = Builder.CreateStructGEP(addr, idx, field->getName());
1827 
1828     // If this is a reference field, load the reference right now.
1829     if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
1830       llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
1831       if (cvr & Qualifiers::Volatile) load->setVolatile(true);
1832 
1833       if (CGM.shouldUseTBAA()) {
1834         llvm::MDNode *tbaa;
1835         if (mayAlias)
1836           tbaa = CGM.getTBAAInfo(getContext().CharTy);
1837         else
1838           tbaa = CGM.getTBAAInfo(type);
1839         CGM.DecorateInstruction(load, tbaa);
1840       }
1841 
1842       addr = load;
1843       mayAlias = false;
1844       type = refType->getPointeeType();
1845       cvr = 0; // qualifiers don't recursively apply to referencee
1846     }
1847   }
1848 
1849   // Make sure that the address is pointing to the right type.  This is critical
1850   // for both unions and structs.  A union needs a bitcast, a struct element
1851   // will need a bitcast if the LLVM type laid out doesn't match the desired
1852   // type.
1853   addr = EmitBitCastOfLValueToProperType(*this, addr,
1854                                          CGM.getTypes().ConvertTypeForMem(type),
1855                                          field->getName());
1856 
1857   unsigned alignment = getContext().getDeclAlign(field).getQuantity();
1858   LValue LV = MakeAddrLValue(addr, type, alignment);
1859   LV.getQuals().addCVRQualifiers(cvr);
1860 
1861   // __weak attribute on a field is ignored.
1862   if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
1863     LV.getQuals().removeObjCGCAttr();
1864 
1865   // Fields of may_alias structs act like 'char' for TBAA purposes.
1866   // FIXME: this should get propagated down through anonymous structs
1867   // and unions.
1868   if (mayAlias && LV.getTBAAInfo())
1869     LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
1870 
1871   return LV;
1872 }
1873 
1874 LValue
1875 CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
1876                                                   const FieldDecl *Field,
1877                                                   unsigned CVRQualifiers) {
1878   QualType FieldType = Field->getType();
1879 
1880   if (!FieldType->isReferenceType())
1881     return EmitLValueForField(BaseValue, Field, CVRQualifiers);
1882 
1883   const CGRecordLayout &RL =
1884     CGM.getTypes().getCGRecordLayout(Field->getParent());
1885   unsigned idx = RL.getLLVMFieldNo(Field);
1886   llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
1887   assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
1888 
1889 
1890   // Make sure that the address is pointing to the right type.  This is critical
1891   // for both unions and structs.  A union needs a bitcast, a struct element
1892   // will need a bitcast if the LLVM type laid out doesn't match the desired
1893   // type.
1894   const llvm::Type *llvmType = ConvertTypeForMem(FieldType);
1895   unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
1896   V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
1897 
1898   unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
1899   return MakeAddrLValue(V, FieldType, Alignment);
1900 }
1901 
1902 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
1903   llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
1904   const Expr *InitExpr = E->getInitializer();
1905   LValue Result = MakeAddrLValue(DeclPtr, E->getType());
1906 
1907   EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
1908                    /*Init*/ true);
1909 
1910   return Result;
1911 }
1912 
1913 LValue CodeGenFunction::
1914 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
1915   if (!expr->isGLValue()) {
1916     // ?: here should be an aggregate.
1917     assert((hasAggregateLLVMType(expr->getType()) &&
1918             !expr->getType()->isAnyComplexType()) &&
1919            "Unexpected conditional operator!");
1920     return EmitAggExprToLValue(expr);
1921   }
1922 
1923   const Expr *condExpr = expr->getCond();
1924   bool CondExprBool;
1925   if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
1926     const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
1927     if (!CondExprBool) std::swap(live, dead);
1928 
1929     if (!ContainsLabel(dead))
1930       return EmitLValue(live);
1931   }
1932 
1933   OpaqueValueMapping binding(*this, expr);
1934 
1935   llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
1936   llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
1937   llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
1938 
1939   ConditionalEvaluation eval(*this);
1940   EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
1941 
1942   // Any temporaries created here are conditional.
1943   EmitBlock(lhsBlock);
1944   eval.begin(*this);
1945   LValue lhs = EmitLValue(expr->getTrueExpr());
1946   eval.end(*this);
1947 
1948   if (!lhs.isSimple())
1949     return EmitUnsupportedLValue(expr, "conditional operator");
1950 
1951   lhsBlock = Builder.GetInsertBlock();
1952   Builder.CreateBr(contBlock);
1953 
1954   // Any temporaries created here are conditional.
1955   EmitBlock(rhsBlock);
1956   eval.begin(*this);
1957   LValue rhs = EmitLValue(expr->getFalseExpr());
1958   eval.end(*this);
1959   if (!rhs.isSimple())
1960     return EmitUnsupportedLValue(expr, "conditional operator");
1961   rhsBlock = Builder.GetInsertBlock();
1962 
1963   EmitBlock(contBlock);
1964 
1965   llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
1966                                          "cond-lvalue");
1967   phi->addIncoming(lhs.getAddress(), lhsBlock);
1968   phi->addIncoming(rhs.getAddress(), rhsBlock);
1969   return MakeAddrLValue(phi, expr->getType());
1970 }
1971 
1972 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
1973 /// If the cast is a dynamic_cast, we can have the usual lvalue result,
1974 /// otherwise if a cast is needed by the code generator in an lvalue context,
1975 /// then it must mean that we need the address of an aggregate in order to
1976 /// access one of its fields.  This can happen for all the reasons that casts
1977 /// are permitted with aggregate result, including noop aggregate casts, and
1978 /// cast from scalar to union.
1979 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
1980   switch (E->getCastKind()) {
1981   case CK_ToVoid:
1982     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
1983 
1984   case CK_Dependent:
1985     llvm_unreachable("dependent cast kind in IR gen!");
1986 
1987   case CK_GetObjCProperty: {
1988     LValue LV = EmitLValue(E->getSubExpr());
1989     assert(LV.isPropertyRef());
1990     RValue RV = EmitLoadOfPropertyRefLValue(LV);
1991 
1992     // Property is an aggregate r-value.
1993     if (RV.isAggregate()) {
1994       return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
1995     }
1996 
1997     // Implicit property returns an l-value.
1998     assert(RV.isScalar());
1999     return MakeAddrLValue(RV.getScalarVal(), E->getSubExpr()->getType());
2000   }
2001 
2002   case CK_NoOp:
2003   case CK_LValueToRValue:
2004     if (!E->getSubExpr()->Classify(getContext()).isPRValue()
2005         || E->getType()->isRecordType())
2006       return EmitLValue(E->getSubExpr());
2007     // Fall through to synthesize a temporary.
2008 
2009   case CK_BitCast:
2010   case CK_ArrayToPointerDecay:
2011   case CK_FunctionToPointerDecay:
2012   case CK_NullToMemberPointer:
2013   case CK_NullToPointer:
2014   case CK_IntegralToPointer:
2015   case CK_PointerToIntegral:
2016   case CK_PointerToBoolean:
2017   case CK_VectorSplat:
2018   case CK_IntegralCast:
2019   case CK_IntegralToBoolean:
2020   case CK_IntegralToFloating:
2021   case CK_FloatingToIntegral:
2022   case CK_FloatingToBoolean:
2023   case CK_FloatingCast:
2024   case CK_FloatingRealToComplex:
2025   case CK_FloatingComplexToReal:
2026   case CK_FloatingComplexToBoolean:
2027   case CK_FloatingComplexCast:
2028   case CK_FloatingComplexToIntegralComplex:
2029   case CK_IntegralRealToComplex:
2030   case CK_IntegralComplexToReal:
2031   case CK_IntegralComplexToBoolean:
2032   case CK_IntegralComplexCast:
2033   case CK_IntegralComplexToFloatingComplex:
2034   case CK_DerivedToBaseMemberPointer:
2035   case CK_BaseToDerivedMemberPointer:
2036   case CK_MemberPointerToBoolean:
2037   case CK_AnyPointerToBlockPointerCast:
2038   case CK_ObjCProduceObject:
2039   case CK_ObjCConsumeObject:
2040   case CK_ObjCReclaimReturnedObject: {
2041     // These casts only produce lvalues when we're binding a reference to a
2042     // temporary realized from a (converted) pure rvalue. Emit the expression
2043     // as a value, copy it into a temporary, and return an lvalue referring to
2044     // that temporary.
2045     llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
2046     EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
2047     return MakeAddrLValue(V, E->getType());
2048   }
2049 
2050   case CK_Dynamic: {
2051     LValue LV = EmitLValue(E->getSubExpr());
2052     llvm::Value *V = LV.getAddress();
2053     const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
2054     return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
2055   }
2056 
2057   case CK_ConstructorConversion:
2058   case CK_UserDefinedConversion:
2059   case CK_AnyPointerToObjCPointerCast:
2060     return EmitLValue(E->getSubExpr());
2061 
2062   case CK_UncheckedDerivedToBase:
2063   case CK_DerivedToBase: {
2064     const RecordType *DerivedClassTy =
2065       E->getSubExpr()->getType()->getAs<RecordType>();
2066     CXXRecordDecl *DerivedClassDecl =
2067       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2068 
2069     LValue LV = EmitLValue(E->getSubExpr());
2070     llvm::Value *This = LV.getAddress();
2071 
2072     // Perform the derived-to-base conversion
2073     llvm::Value *Base =
2074       GetAddressOfBaseClass(This, DerivedClassDecl,
2075                             E->path_begin(), E->path_end(),
2076                             /*NullCheckValue=*/false);
2077 
2078     return MakeAddrLValue(Base, E->getType());
2079   }
2080   case CK_ToUnion:
2081     return EmitAggExprToLValue(E);
2082   case CK_BaseToDerived: {
2083     const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
2084     CXXRecordDecl *DerivedClassDecl =
2085       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
2086 
2087     LValue LV = EmitLValue(E->getSubExpr());
2088 
2089     // Perform the base-to-derived conversion
2090     llvm::Value *Derived =
2091       GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
2092                                E->path_begin(), E->path_end(),
2093                                /*NullCheckValue=*/false);
2094 
2095     return MakeAddrLValue(Derived, E->getType());
2096   }
2097   case CK_LValueBitCast: {
2098     // This must be a reinterpret_cast (or c-style equivalent).
2099     const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
2100 
2101     LValue LV = EmitLValue(E->getSubExpr());
2102     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2103                                            ConvertType(CE->getTypeAsWritten()));
2104     return MakeAddrLValue(V, E->getType());
2105   }
2106   case CK_ObjCObjectLValueCast: {
2107     LValue LV = EmitLValue(E->getSubExpr());
2108     QualType ToType = getContext().getLValueReferenceType(E->getType());
2109     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
2110                                            ConvertType(ToType));
2111     return MakeAddrLValue(V, E->getType());
2112   }
2113   }
2114 
2115   llvm_unreachable("Unhandled lvalue cast kind?");
2116 }
2117 
2118 LValue CodeGenFunction::EmitNullInitializationLValue(
2119                                               const CXXScalarValueInitExpr *E) {
2120   QualType Ty = E->getType();
2121   LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
2122   EmitNullInitialization(LV.getAddress(), Ty);
2123   return LV;
2124 }
2125 
2126 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
2127   assert(e->isGLValue() || e->getType()->isRecordType());
2128   return getOpaqueLValueMapping(e);
2129 }
2130 
2131 LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
2132                                            const MaterializeTemporaryExpr *E) {
2133   RValue RV = EmitReferenceBindingToExpr(E->GetTemporaryExpr(),
2134                                          /*InitializedDecl=*/0);
2135   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2136 }
2137 
2138 
2139 //===--------------------------------------------------------------------===//
2140 //                             Expression Emission
2141 //===--------------------------------------------------------------------===//
2142 
2143 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
2144                                      ReturnValueSlot ReturnValue) {
2145   if (CGDebugInfo *DI = getDebugInfo()) {
2146     DI->setLocation(E->getLocStart());
2147     DI->UpdateLineDirectiveRegion(Builder);
2148     DI->EmitStopPoint(Builder);
2149   }
2150 
2151   // Builtins never have block type.
2152   if (E->getCallee()->getType()->isBlockPointerType())
2153     return EmitBlockCallExpr(E, ReturnValue);
2154 
2155   if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
2156     return EmitCXXMemberCallExpr(CE, ReturnValue);
2157 
2158   const Decl *TargetDecl = 0;
2159   if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
2160     if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
2161       TargetDecl = DRE->getDecl();
2162       if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl))
2163         if (unsigned builtinID = FD->getBuiltinID())
2164           return EmitBuiltinExpr(FD, builtinID, E);
2165     }
2166   }
2167 
2168   if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
2169     if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
2170       return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
2171 
2172   if (const CXXPseudoDestructorExpr *PseudoDtor
2173           = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
2174     QualType DestroyedType = PseudoDtor->getDestroyedType();
2175     if (getContext().getLangOptions().ObjCAutoRefCount &&
2176         DestroyedType->isObjCLifetimeType() &&
2177         (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
2178          DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
2179       // Automatic Reference Counting:
2180       //   If the pseudo-expression names a retainable object with weak or
2181       //   strong lifetime, the object shall be released.
2182       Expr *BaseExpr = PseudoDtor->getBase();
2183       llvm::Value *BaseValue = NULL;
2184       Qualifiers BaseQuals;
2185 
2186       // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
2187       if (PseudoDtor->isArrow()) {
2188         BaseValue = EmitScalarExpr(BaseExpr);
2189         const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
2190         BaseQuals = PTy->getPointeeType().getQualifiers();
2191       } else {
2192         LValue BaseLV = EmitLValue(BaseExpr);
2193         BaseValue = BaseLV.getAddress();
2194         QualType BaseTy = BaseExpr->getType();
2195         BaseQuals = BaseTy.getQualifiers();
2196       }
2197 
2198       switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
2199       case Qualifiers::OCL_None:
2200       case Qualifiers::OCL_ExplicitNone:
2201       case Qualifiers::OCL_Autoreleasing:
2202         break;
2203 
2204       case Qualifiers::OCL_Strong:
2205         EmitARCRelease(Builder.CreateLoad(BaseValue,
2206                           PseudoDtor->getDestroyedType().isVolatileQualified()),
2207                        /*precise*/ true);
2208         break;
2209 
2210       case Qualifiers::OCL_Weak:
2211         EmitARCDestroyWeak(BaseValue);
2212         break;
2213       }
2214     } else {
2215       // C++ [expr.pseudo]p1:
2216       //   The result shall only be used as the operand for the function call
2217       //   operator (), and the result of such a call has type void. The only
2218       //   effect is the evaluation of the postfix-expression before the dot or
2219       //   arrow.
2220       EmitScalarExpr(E->getCallee());
2221     }
2222 
2223     return RValue::get(0);
2224   }
2225 
2226   llvm::Value *Callee = EmitScalarExpr(E->getCallee());
2227   return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
2228                   E->arg_begin(), E->arg_end(), TargetDecl);
2229 }
2230 
2231 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
2232   // Comma expressions just emit their LHS then their RHS as an l-value.
2233   if (E->getOpcode() == BO_Comma) {
2234     EmitIgnoredExpr(E->getLHS());
2235     EnsureInsertPoint();
2236     return EmitLValue(E->getRHS());
2237   }
2238 
2239   if (E->getOpcode() == BO_PtrMemD ||
2240       E->getOpcode() == BO_PtrMemI)
2241     return EmitPointerToDataMemberBinaryExpr(E);
2242 
2243   assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
2244 
2245   // Note that in all of these cases, __block variables need the RHS
2246   // evaluated first just in case the variable gets moved by the RHS.
2247 
2248   if (!hasAggregateLLVMType(E->getType())) {
2249     switch (E->getLHS()->getType().getObjCLifetime()) {
2250     case Qualifiers::OCL_Strong:
2251       return EmitARCStoreStrong(E, /*ignored*/ false).first;
2252 
2253     case Qualifiers::OCL_Autoreleasing:
2254       return EmitARCStoreAutoreleasing(E).first;
2255 
2256     // No reason to do any of these differently.
2257     case Qualifiers::OCL_None:
2258     case Qualifiers::OCL_ExplicitNone:
2259     case Qualifiers::OCL_Weak:
2260       break;
2261     }
2262 
2263     RValue RV = EmitAnyExpr(E->getRHS());
2264     LValue LV = EmitLValue(E->getLHS());
2265     EmitStoreThroughLValue(RV, LV);
2266     return LV;
2267   }
2268 
2269   if (E->getType()->isAnyComplexType())
2270     return EmitComplexAssignmentLValue(E);
2271 
2272   return EmitAggExprToLValue(E);
2273 }
2274 
2275 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
2276   RValue RV = EmitCallExpr(E);
2277 
2278   if (!RV.isScalar())
2279     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2280 
2281   assert(E->getCallReturnType()->isReferenceType() &&
2282          "Can't have a scalar return unless the return type is a "
2283          "reference type!");
2284 
2285   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2286 }
2287 
2288 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
2289   // FIXME: This shouldn't require another copy.
2290   return EmitAggExprToLValue(E);
2291 }
2292 
2293 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
2294   assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
2295          && "binding l-value to type which needs a temporary");
2296   AggValueSlot Slot = CreateAggTemp(E->getType(), "tmp");
2297   EmitCXXConstructExpr(E, Slot);
2298   return MakeAddrLValue(Slot.getAddr(), E->getType());
2299 }
2300 
2301 LValue
2302 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
2303   return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
2304 }
2305 
2306 LValue
2307 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
2308   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2309   Slot.setLifetimeExternallyManaged();
2310   EmitAggExpr(E->getSubExpr(), Slot);
2311   EmitCXXTemporary(E->getTemporary(), Slot.getAddr());
2312   return MakeAddrLValue(Slot.getAddr(), E->getType());
2313 }
2314 
2315 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
2316   RValue RV = EmitObjCMessageExpr(E);
2317 
2318   if (!RV.isScalar())
2319     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2320 
2321   assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
2322          "Can't have a scalar return unless the return type is a "
2323          "reference type!");
2324 
2325   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2326 }
2327 
2328 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
2329   llvm::Value *V =
2330     CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
2331   return MakeAddrLValue(V, E->getType());
2332 }
2333 
2334 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2335                                              const ObjCIvarDecl *Ivar) {
2336   return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
2337 }
2338 
2339 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
2340                                           llvm::Value *BaseValue,
2341                                           const ObjCIvarDecl *Ivar,
2342                                           unsigned CVRQualifiers) {
2343   return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
2344                                                    Ivar, CVRQualifiers);
2345 }
2346 
2347 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
2348   // FIXME: A lot of the code below could be shared with EmitMemberExpr.
2349   llvm::Value *BaseValue = 0;
2350   const Expr *BaseExpr = E->getBase();
2351   Qualifiers BaseQuals;
2352   QualType ObjectTy;
2353   if (E->isArrow()) {
2354     BaseValue = EmitScalarExpr(BaseExpr);
2355     ObjectTy = BaseExpr->getType()->getPointeeType();
2356     BaseQuals = ObjectTy.getQualifiers();
2357   } else {
2358     LValue BaseLV = EmitLValue(BaseExpr);
2359     // FIXME: this isn't right for bitfields.
2360     BaseValue = BaseLV.getAddress();
2361     ObjectTy = BaseExpr->getType();
2362     BaseQuals = ObjectTy.getQualifiers();
2363   }
2364 
2365   LValue LV =
2366     EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
2367                       BaseQuals.getCVRQualifiers());
2368   setObjCGCLValueClass(getContext(), E, LV);
2369   return LV;
2370 }
2371 
2372 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
2373   // Can only get l-value for message expression returning aggregate type
2374   RValue RV = EmitAnyExprToTemp(E);
2375   return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2376 }
2377 
2378 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
2379                                  ReturnValueSlot ReturnValue,
2380                                  CallExpr::const_arg_iterator ArgBeg,
2381                                  CallExpr::const_arg_iterator ArgEnd,
2382                                  const Decl *TargetDecl) {
2383   // Get the actual function type. The callee type will always be a pointer to
2384   // function type or a block pointer type.
2385   assert(CalleeType->isFunctionPointerType() &&
2386          "Call must have function pointer type!");
2387 
2388   CalleeType = getContext().getCanonicalType(CalleeType);
2389 
2390   const FunctionType *FnType
2391     = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
2392 
2393   CallArgList Args;
2394   EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
2395 
2396   return EmitCall(CGM.getTypes().getFunctionInfo(Args, FnType),
2397                   Callee, ReturnValue, Args, TargetDecl);
2398 }
2399 
2400 LValue CodeGenFunction::
2401 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
2402   llvm::Value *BaseV;
2403   if (E->getOpcode() == BO_PtrMemI)
2404     BaseV = EmitScalarExpr(E->getLHS());
2405   else
2406     BaseV = EmitLValue(E->getLHS()).getAddress();
2407 
2408   llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
2409 
2410   const MemberPointerType *MPT
2411     = E->getRHS()->getType()->getAs<MemberPointerType>();
2412 
2413   llvm::Value *AddV =
2414     CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
2415 
2416   return MakeAddrLValue(AddV, MPT->getPointeeType());
2417 }
2418