1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CodeGenTBAA.h"
17 #include "CGCall.h"
18 #include "CGCXXABI.h"
19 #include "CGRecordLayout.h"
20 #include "CGObjCRuntime.h"
21 #include "clang/AST/ASTContext.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "llvm/Intrinsics.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Target/TargetData.h"
26 using namespace clang;
27 using namespace CodeGen;
28 
29 //===--------------------------------------------------------------------===//
30 //                        Miscellaneous Helper Methods
31 //===--------------------------------------------------------------------===//
32 
33 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
34 /// block.
35 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
36                                                     const llvm::Twine &Name) {
37   if (!Builder.isNamePreserving())
38     return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
39   return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
40 }
41 
42 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
43                                      llvm::Value *Init) {
44   llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
45   llvm::BasicBlock *Block = AllocaInsertPt->getParent();
46   Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
47 }
48 
49 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
50                                                 const llvm::Twine &Name) {
51   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
52   // FIXME: Should we prefer the preferred type alignment here?
53   CharUnits Align = getContext().getTypeAlignInChars(Ty);
54   Alloc->setAlignment(Align.getQuantity());
55   return Alloc;
56 }
57 
58 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
59                                                  const llvm::Twine &Name) {
60   llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
61   // FIXME: Should we prefer the preferred type alignment here?
62   CharUnits Align = getContext().getTypeAlignInChars(Ty);
63   Alloc->setAlignment(Align.getQuantity());
64   return Alloc;
65 }
66 
67 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
68 /// expression and compare the result against zero, returning an Int1Ty value.
69 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
70   if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
71     llvm::Value *MemPtr = EmitScalarExpr(E);
72     return CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
73   }
74 
75   QualType BoolTy = getContext().BoolTy;
76   if (!E->getType()->isAnyComplexType())
77     return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
78 
79   return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
80 }
81 
82 /// EmitAnyExpr - Emit code to compute the specified expression which
83 /// can have any type.  The result is returned as an RValue struct.
84 /// If this is an aggregate expression, AggSlot indicates where the
85 /// result should be returned.
86 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
87                                     bool IgnoreResult) {
88   if (!hasAggregateLLVMType(E->getType()))
89     return RValue::get(EmitScalarExpr(E, IgnoreResult));
90   else if (E->getType()->isAnyComplexType())
91     return RValue::getComplex(EmitComplexExpr(E, false, false,
92                                               IgnoreResult, IgnoreResult));
93 
94   EmitAggExpr(E, AggSlot, IgnoreResult);
95   return AggSlot.asRValue();
96 }
97 
98 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
99 /// always be accessible even if no aggregate location is provided.
100 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
101   AggValueSlot AggSlot = AggValueSlot::ignored();
102 
103   if (hasAggregateLLVMType(E->getType()) &&
104       !E->getType()->isAnyComplexType())
105     AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
106   return EmitAnyExpr(E, AggSlot);
107 }
108 
109 /// EmitAnyExprToMem - Evaluate an expression into a given memory
110 /// location.
111 void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
112                                        llvm::Value *Location,
113                                        bool IsLocationVolatile,
114                                        bool IsInit) {
115   if (E->getType()->isComplexType())
116     EmitComplexExprIntoAddr(E, Location, IsLocationVolatile);
117   else if (hasAggregateLLVMType(E->getType()))
118     EmitAggExpr(E, AggValueSlot::forAddr(Location, IsLocationVolatile, IsInit));
119   else {
120     RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
121     LValue LV = MakeAddrLValue(Location, E->getType());
122     EmitStoreThroughLValue(RV, LV, E->getType());
123   }
124 }
125 
126 /// \brief An adjustment to be made to the temporary created when emitting a
127 /// reference binding, which accesses a particular subobject of that temporary.
128 struct SubobjectAdjustment {
129   enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
130 
131   union {
132     struct {
133       const CastExpr *BasePath;
134       const CXXRecordDecl *DerivedClass;
135     } DerivedToBase;
136 
137     FieldDecl *Field;
138   };
139 
140   SubobjectAdjustment(const CastExpr *BasePath,
141                       const CXXRecordDecl *DerivedClass)
142     : Kind(DerivedToBaseAdjustment)
143   {
144     DerivedToBase.BasePath = BasePath;
145     DerivedToBase.DerivedClass = DerivedClass;
146   }
147 
148   SubobjectAdjustment(FieldDecl *Field)
149     : Kind(FieldAdjustment)
150   {
151     this->Field = Field;
152   }
153 };
154 
155 static llvm::Value *
156 CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type,
157                          const NamedDecl *InitializedDecl) {
158   if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
159     if (VD->hasGlobalStorage()) {
160       llvm::SmallString<256> Name;
161       CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Name);
162 
163       const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
164 
165       // Create the reference temporary.
166       llvm::GlobalValue *RefTemp =
167         new llvm::GlobalVariable(CGF.CGM.getModule(),
168                                  RefTempTy, /*isConstant=*/false,
169                                  llvm::GlobalValue::InternalLinkage,
170                                  llvm::Constant::getNullValue(RefTempTy),
171                                  Name.str());
172       return RefTemp;
173     }
174   }
175 
176   return CGF.CreateMemTemp(Type, "ref.tmp");
177 }
178 
179 static llvm::Value *
180 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
181                             llvm::Value *&ReferenceTemporary,
182                             const CXXDestructorDecl *&ReferenceTemporaryDtor,
183                             const NamedDecl *InitializedDecl) {
184   if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
185     E = DAE->getExpr();
186 
187   if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) {
188     CodeGenFunction::RunCleanupsScope Scope(CGF);
189 
190     return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
191                                        ReferenceTemporary,
192                                        ReferenceTemporaryDtor,
193                                        InitializedDecl);
194   }
195 
196   RValue RV;
197   if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) {
198     // Emit the expression as an lvalue.
199     LValue LV = CGF.EmitLValue(E);
200     if (LV.isPropertyRef() || LV.isKVCRef()) {
201       QualType QT = E->getType();
202       RValue RV =
203         LV.isPropertyRef() ? CGF.EmitLoadOfPropertyRefLValue(LV, QT)
204                            : CGF.EmitLoadOfKVCRefLValue(LV, QT);
205       assert(RV.isScalar() && "EmitExprForReferenceBinding");
206       return RV.getScalarVal();
207     }
208 
209     if (LV.isSimple())
210       return LV.getAddress();
211 
212     // We have to load the lvalue.
213     RV = CGF.EmitLoadOfLValue(LV, E->getType());
214   } else {
215     QualType ResultTy = E->getType();
216 
217     llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
218     while (true) {
219       if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
220         E = PE->getSubExpr();
221         continue;
222       }
223 
224       if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
225         if ((CE->getCastKind() == CK_DerivedToBase ||
226              CE->getCastKind() == CK_UncheckedDerivedToBase) &&
227             E->getType()->isRecordType()) {
228           E = CE->getSubExpr();
229           CXXRecordDecl *Derived
230             = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
231           Adjustments.push_back(SubobjectAdjustment(CE, Derived));
232           continue;
233         }
234 
235         if (CE->getCastKind() == CK_NoOp) {
236           E = CE->getSubExpr();
237           continue;
238         }
239       } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
240         if (ME->getBase()->isLvalue(CGF.getContext()) != Expr::LV_Valid &&
241             ME->getBase()->getType()->isRecordType()) {
242           if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
243             E = ME->getBase();
244             Adjustments.push_back(SubobjectAdjustment(Field));
245             continue;
246           }
247         }
248       }
249 
250       // Nothing changed.
251       break;
252     }
253 
254     // Create a reference temporary if necessary.
255     AggValueSlot AggSlot = AggValueSlot::ignored();
256     if (CGF.hasAggregateLLVMType(E->getType()) &&
257         !E->getType()->isAnyComplexType()) {
258       ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
259                                                     InitializedDecl);
260       AggSlot = AggValueSlot::forAddr(ReferenceTemporary, false,
261                                       InitializedDecl != 0);
262     }
263 
264     RV = CGF.EmitAnyExpr(E, AggSlot);
265 
266     if (InitializedDecl) {
267       // Get the destructor for the reference temporary.
268       if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
269         CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
270         if (!ClassDecl->hasTrivialDestructor())
271           ReferenceTemporaryDtor = ClassDecl->getDestructor();
272       }
273     }
274 
275     // Check if need to perform derived-to-base casts and/or field accesses, to
276     // get from the temporary object we created (and, potentially, for which we
277     // extended the lifetime) to the subobject we're binding the reference to.
278     if (!Adjustments.empty()) {
279       llvm::Value *Object = RV.getAggregateAddr();
280       for (unsigned I = Adjustments.size(); I != 0; --I) {
281         SubobjectAdjustment &Adjustment = Adjustments[I-1];
282         switch (Adjustment.Kind) {
283         case SubobjectAdjustment::DerivedToBaseAdjustment:
284           Object =
285               CGF.GetAddressOfBaseClass(Object,
286                                         Adjustment.DerivedToBase.DerivedClass,
287                               Adjustment.DerivedToBase.BasePath->path_begin(),
288                               Adjustment.DerivedToBase.BasePath->path_end(),
289                                         /*NullCheckValue=*/false);
290           break;
291 
292         case SubobjectAdjustment::FieldAdjustment: {
293           LValue LV =
294             CGF.EmitLValueForField(Object, Adjustment.Field, 0);
295           if (LV.isSimple()) {
296             Object = LV.getAddress();
297             break;
298           }
299 
300           // For non-simple lvalues, we actually have to create a copy of
301           // the object we're binding to.
302           QualType T = Adjustment.Field->getType().getNonReferenceType()
303                                                   .getUnqualifiedType();
304           Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
305           LValue TempLV = CGF.MakeAddrLValue(Object,
306                                              Adjustment.Field->getType());
307           CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV, T), TempLV, T);
308           break;
309         }
310 
311         }
312       }
313 
314       const llvm::Type *ResultPtrTy = CGF.ConvertType(ResultTy)->getPointerTo();
315       return CGF.Builder.CreateBitCast(Object, ResultPtrTy, "temp");
316     }
317   }
318 
319   if (RV.isAggregate())
320     return RV.getAggregateAddr();
321 
322   // Create a temporary variable that we can bind the reference to.
323   ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
324                                                 InitializedDecl);
325 
326 
327   unsigned Alignment =
328     CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
329   if (RV.isScalar())
330     CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
331                           /*Volatile=*/false, Alignment, E->getType());
332   else
333     CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
334                            /*Volatile=*/false);
335   return ReferenceTemporary;
336 }
337 
338 RValue
339 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
340                                             const NamedDecl *InitializedDecl) {
341   llvm::Value *ReferenceTemporary = 0;
342   const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
343   llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
344                                                    ReferenceTemporaryDtor,
345                                                    InitializedDecl);
346   if (!ReferenceTemporaryDtor)
347     return RValue::get(Value);
348 
349   // Make sure to call the destructor for the reference temporary.
350   if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
351     if (VD->hasGlobalStorage()) {
352       llvm::Constant *DtorFn =
353         CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
354       CGF.EmitCXXGlobalDtorRegistration(DtorFn,
355                                       cast<llvm::Constant>(ReferenceTemporary));
356 
357       return RValue::get(Value);
358     }
359   }
360 
361   PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
362 
363   return RValue::get(Value);
364 }
365 
366 
367 /// getAccessedFieldNo - Given an encoded value and a result number, return the
368 /// input field number being accessed.
369 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
370                                              const llvm::Constant *Elts) {
371   if (isa<llvm::ConstantAggregateZero>(Elts))
372     return 0;
373 
374   return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
375 }
376 
377 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
378   if (!CatchUndefined)
379     return;
380 
381   Address = Builder.CreateBitCast(Address, PtrToInt8Ty);
382 
383   const llvm::Type *IntPtrT = IntPtrTy;
384   llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1);
385   const llvm::IntegerType *Int1Ty = llvm::Type::getInt1Ty(VMContext);
386 
387   // In time, people may want to control this and use a 1 here.
388   llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0);
389   llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
390   llvm::BasicBlock *Cont = createBasicBlock();
391   llvm::BasicBlock *Check = createBasicBlock();
392   llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
393   Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
394 
395   EmitBlock(Check);
396   Builder.CreateCondBr(Builder.CreateICmpUGE(C,
397                                         llvm::ConstantInt::get(IntPtrTy, Size)),
398                        Cont, getTrapBB());
399   EmitBlock(Cont);
400 }
401 
402 
403 CodeGenFunction::ComplexPairTy CodeGenFunction::
404 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
405                          bool isInc, bool isPre) {
406   ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
407                                             LV.isVolatileQualified());
408 
409   llvm::Value *NextVal;
410   if (isa<llvm::IntegerType>(InVal.first->getType())) {
411     uint64_t AmountVal = isInc ? 1 : -1;
412     NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
413 
414     // Add the inc/dec to the real part.
415     NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
416   } else {
417     QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
418     llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
419     if (!isInc)
420       FVal.changeSign();
421     NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
422 
423     // Add the inc/dec to the real part.
424     NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
425   }
426 
427   ComplexPairTy IncVal(NextVal, InVal.second);
428 
429   // Store the updated result through the lvalue.
430   StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
431 
432   // If this is a postinc, return the value read from memory, otherwise use the
433   // updated value.
434   return isPre ? IncVal : InVal;
435 }
436 
437 
438 //===----------------------------------------------------------------------===//
439 //                         LValue Expression Emission
440 //===----------------------------------------------------------------------===//
441 
442 RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
443   if (Ty->isVoidType())
444     return RValue::get(0);
445 
446   if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
447     const llvm::Type *EltTy = ConvertType(CTy->getElementType());
448     llvm::Value *U = llvm::UndefValue::get(EltTy);
449     return RValue::getComplex(std::make_pair(U, U));
450   }
451 
452   // If this is a use of an undefined aggregate type, the aggregate must have an
453   // identifiable address.  Just because the contents of the value are undefined
454   // doesn't mean that the address can't be taken and compared.
455   if (hasAggregateLLVMType(Ty)) {
456     llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
457     return RValue::getAggregate(DestPtr);
458   }
459 
460   return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
461 }
462 
463 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
464                                               const char *Name) {
465   ErrorUnsupported(E, Name);
466   return GetUndefRValue(E->getType());
467 }
468 
469 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
470                                               const char *Name) {
471   ErrorUnsupported(E, Name);
472   llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
473   return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
474 }
475 
476 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
477   LValue LV = EmitLValue(E);
478   if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
479     EmitCheck(LV.getAddress(), getContext().getTypeSize(E->getType()) / 8);
480   return LV;
481 }
482 
483 /// EmitLValue - Emit code to compute a designator that specifies the location
484 /// of the expression.
485 ///
486 /// This can return one of two things: a simple address or a bitfield reference.
487 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
488 /// an LLVM pointer type.
489 ///
490 /// If this returns a bitfield reference, nothing about the pointee type of the
491 /// LLVM value is known: For example, it may not be a pointer to an integer.
492 ///
493 /// If this returns a normal address, and if the lvalue's C type is fixed size,
494 /// this method guarantees that the returned pointer type will point to an LLVM
495 /// type of the same size of the lvalue's type.  If the lvalue has a variable
496 /// length type, this is not possible.
497 ///
498 LValue CodeGenFunction::EmitLValue(const Expr *E) {
499   llvm::DenseMap<const Expr *, LValue>::iterator I =
500                                       CGF.ConditionalSaveLValueExprs.find(E);
501   if (I != CGF.ConditionalSaveLValueExprs.end())
502     return I->second;
503 
504   switch (E->getStmtClass()) {
505   default: return EmitUnsupportedLValue(E, "l-value expression");
506 
507   case Expr::ObjCSelectorExprClass:
508   return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
509   case Expr::ObjCIsaExprClass:
510     return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
511   case Expr::BinaryOperatorClass:
512     return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
513   case Expr::CompoundAssignOperatorClass:
514     return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E));
515   case Expr::CallExprClass:
516   case Expr::CXXMemberCallExprClass:
517   case Expr::CXXOperatorCallExprClass:
518     return EmitCallExprLValue(cast<CallExpr>(E));
519   case Expr::VAArgExprClass:
520     return EmitVAArgExprLValue(cast<VAArgExpr>(E));
521   case Expr::DeclRefExprClass:
522     return EmitDeclRefLValue(cast<DeclRefExpr>(E));
523   case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
524   case Expr::PredefinedExprClass:
525     return EmitPredefinedLValue(cast<PredefinedExpr>(E));
526   case Expr::StringLiteralClass:
527     return EmitStringLiteralLValue(cast<StringLiteral>(E));
528   case Expr::ObjCEncodeExprClass:
529     return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
530 
531   case Expr::BlockDeclRefExprClass:
532     return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
533 
534   case Expr::CXXTemporaryObjectExprClass:
535   case Expr::CXXConstructExprClass:
536     return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
537   case Expr::CXXBindTemporaryExprClass:
538     return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
539   case Expr::CXXExprWithTemporariesClass:
540     return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E));
541   case Expr::CXXScalarValueInitExprClass:
542     return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
543   case Expr::CXXDefaultArgExprClass:
544     return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
545   case Expr::CXXTypeidExprClass:
546     return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
547 
548   case Expr::ObjCMessageExprClass:
549     return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
550   case Expr::ObjCIvarRefExprClass:
551     return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
552   case Expr::ObjCPropertyRefExprClass:
553     return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
554   case Expr::ObjCImplicitSetterGetterRefExprClass:
555     return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E));
556   case Expr::StmtExprClass:
557     return EmitStmtExprLValue(cast<StmtExpr>(E));
558   case Expr::UnaryOperatorClass:
559     return EmitUnaryOpLValue(cast<UnaryOperator>(E));
560   case Expr::ArraySubscriptExprClass:
561     return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
562   case Expr::ExtVectorElementExprClass:
563     return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
564   case Expr::MemberExprClass:
565     return EmitMemberExpr(cast<MemberExpr>(E));
566   case Expr::CompoundLiteralExprClass:
567     return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
568   case Expr::ConditionalOperatorClass:
569     return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
570   case Expr::ChooseExprClass:
571     return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
572   case Expr::ImplicitCastExprClass:
573   case Expr::CStyleCastExprClass:
574   case Expr::CXXFunctionalCastExprClass:
575   case Expr::CXXStaticCastExprClass:
576   case Expr::CXXDynamicCastExprClass:
577   case Expr::CXXReinterpretCastExprClass:
578   case Expr::CXXConstCastExprClass:
579     return EmitCastLValue(cast<CastExpr>(E));
580   }
581 }
582 
583 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
584                                               unsigned Alignment, QualType Ty,
585                                               llvm::MDNode *TBAAInfo) {
586   llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp");
587   if (Volatile)
588     Load->setVolatile(true);
589   if (Alignment)
590     Load->setAlignment(Alignment);
591   if (TBAAInfo)
592     CGM.DecorateInstruction(Load, TBAAInfo);
593 
594   // Bool can have different representation in memory than in registers.
595   llvm::Value *V = Load;
596   if (Ty->isBooleanType())
597     if (V->getType() != llvm::Type::getInt1Ty(VMContext))
598       V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool");
599 
600   return V;
601 }
602 
603 static bool isBooleanUnderlyingType(QualType Ty) {
604   if (const EnumType *ET = dyn_cast<EnumType>(Ty))
605     return ET->getDecl()->getIntegerType()->isBooleanType();
606   return false;
607 }
608 
609 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
610                                         bool Volatile, unsigned Alignment,
611                                         QualType Ty,
612                                         llvm::MDNode *TBAAInfo) {
613 
614   if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) {
615     // Bool can have different representation in memory than in registers.
616     const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
617     Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false);
618   }
619 
620   llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
621   if (Alignment)
622     Store->setAlignment(Alignment);
623   if (TBAAInfo)
624     CGM.DecorateInstruction(Store, TBAAInfo);
625 }
626 
627 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
628 /// method emits the address of the lvalue, then loads the result as an rvalue,
629 /// returning the rvalue.
630 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
631   if (LV.isObjCWeak()) {
632     // load of a __weak object.
633     llvm::Value *AddrWeakObj = LV.getAddress();
634     return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
635                                                              AddrWeakObj));
636   }
637 
638   if (LV.isSimple()) {
639     llvm::Value *Ptr = LV.getAddress();
640 
641     // Functions are l-values that don't require loading.
642     if (ExprType->isFunctionType())
643       return RValue::get(Ptr);
644 
645     // Everything needs a load.
646     return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
647                                         LV.getAlignment(), ExprType,
648                                         LV.getTBAAInfo()));
649 
650   }
651 
652   if (LV.isVectorElt()) {
653     llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
654                                           LV.isVolatileQualified(), "tmp");
655     return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
656                                                     "vecext"));
657   }
658 
659   // If this is a reference to a subset of the elements of a vector, either
660   // shuffle the input or extract/insert them as appropriate.
661   if (LV.isExtVectorElt())
662     return EmitLoadOfExtVectorElementLValue(LV, ExprType);
663 
664   if (LV.isBitField())
665     return EmitLoadOfBitfieldLValue(LV, ExprType);
666 
667   if (LV.isPropertyRef())
668     return EmitLoadOfPropertyRefLValue(LV, ExprType);
669 
670   assert(LV.isKVCRef() && "Unknown LValue type!");
671   return EmitLoadOfKVCRefLValue(LV, ExprType);
672 }
673 
674 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
675                                                  QualType ExprType) {
676   const CGBitFieldInfo &Info = LV.getBitFieldInfo();
677 
678   // Get the output type.
679   const llvm::Type *ResLTy = ConvertType(ExprType);
680   unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
681 
682   // Compute the result as an OR of all of the individual component accesses.
683   llvm::Value *Res = 0;
684   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
685     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
686 
687     // Get the field pointer.
688     llvm::Value *Ptr = LV.getBitFieldBaseAddr();
689 
690     // Only offset by the field index if used, so that incoming values are not
691     // required to be structures.
692     if (AI.FieldIndex)
693       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
694 
695     // Offset by the byte offset, if used.
696     if (AI.FieldByteOffset) {
697       const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
698       Ptr = Builder.CreateBitCast(Ptr, i8PTy);
699       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
700     }
701 
702     // Cast to the access type.
703     const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
704                                                     ExprType.getAddressSpace());
705     Ptr = Builder.CreateBitCast(Ptr, PTy);
706 
707     // Perform the load.
708     llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
709     if (AI.AccessAlignment)
710       Load->setAlignment(AI.AccessAlignment);
711 
712     // Shift out unused low bits and mask out unused high bits.
713     llvm::Value *Val = Load;
714     if (AI.FieldBitStart)
715       Val = Builder.CreateLShr(Load, AI.FieldBitStart);
716     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
717                                                             AI.TargetBitWidth),
718                             "bf.clear");
719 
720     // Extend or truncate to the target size.
721     if (AI.AccessWidth < ResSizeInBits)
722       Val = Builder.CreateZExt(Val, ResLTy);
723     else if (AI.AccessWidth > ResSizeInBits)
724       Val = Builder.CreateTrunc(Val, ResLTy);
725 
726     // Shift into place, and OR into the result.
727     if (AI.TargetBitOffset)
728       Val = Builder.CreateShl(Val, AI.TargetBitOffset);
729     Res = Res ? Builder.CreateOr(Res, Val) : Val;
730   }
731 
732   // If the bit-field is signed, perform the sign-extension.
733   //
734   // FIXME: This can easily be folded into the load of the high bits, which
735   // could also eliminate the mask of high bits in some situations.
736   if (Info.isSigned()) {
737     unsigned ExtraBits = ResSizeInBits - Info.getSize();
738     if (ExtraBits)
739       Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
740                                ExtraBits, "bf.val.sext");
741   }
742 
743   return RValue::get(Res);
744 }
745 
746 RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
747                                                     QualType ExprType) {
748   return EmitObjCPropertyGet(LV.getPropertyRefExpr());
749 }
750 
751 RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV,
752                                                QualType ExprType) {
753   return EmitObjCPropertyGet(LV.getKVCRefExpr());
754 }
755 
756 // If this is a reference to a subset of the elements of a vector, create an
757 // appropriate shufflevector.
758 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
759                                                          QualType ExprType) {
760   llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
761                                         LV.isVolatileQualified(), "tmp");
762 
763   const llvm::Constant *Elts = LV.getExtVectorElts();
764 
765   // If the result of the expression is a non-vector type, we must be extracting
766   // a single element.  Just codegen as an extractelement.
767   const VectorType *ExprVT = ExprType->getAs<VectorType>();
768   if (!ExprVT) {
769     unsigned InIdx = getAccessedFieldNo(0, Elts);
770     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
771     return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
772   }
773 
774   // Always use shuffle vector to try to retain the original program structure
775   unsigned NumResultElts = ExprVT->getNumElements();
776 
777   llvm::SmallVector<llvm::Constant*, 4> Mask;
778   for (unsigned i = 0; i != NumResultElts; ++i) {
779     unsigned InIdx = getAccessedFieldNo(i, Elts);
780     Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx));
781   }
782 
783   llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
784   Vec = Builder.CreateShuffleVector(Vec,
785                                     llvm::UndefValue::get(Vec->getType()),
786                                     MaskV, "tmp");
787   return RValue::get(Vec);
788 }
789 
790 
791 
792 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
793 /// lvalue, where both are guaranteed to the have the same type, and that type
794 /// is 'Ty'.
795 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
796                                              QualType Ty) {
797   if (!Dst.isSimple()) {
798     if (Dst.isVectorElt()) {
799       // Read/modify/write the vector, inserting the new element.
800       llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
801                                             Dst.isVolatileQualified(), "tmp");
802       Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
803                                         Dst.getVectorIdx(), "vecins");
804       Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
805       return;
806     }
807 
808     // If this is an update of extended vector elements, insert them as
809     // appropriate.
810     if (Dst.isExtVectorElt())
811       return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty);
812 
813     if (Dst.isBitField())
814       return EmitStoreThroughBitfieldLValue(Src, Dst, Ty);
815 
816     if (Dst.isPropertyRef())
817       return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty);
818 
819     assert(Dst.isKVCRef() && "Unknown LValue type");
820     return EmitStoreThroughKVCRefLValue(Src, Dst, Ty);
821   }
822 
823   if (Dst.isObjCWeak() && !Dst.isNonGC()) {
824     // load of a __weak object.
825     llvm::Value *LvalueDst = Dst.getAddress();
826     llvm::Value *src = Src.getScalarVal();
827      CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
828     return;
829   }
830 
831   if (Dst.isObjCStrong() && !Dst.isNonGC()) {
832     // load of a __strong object.
833     llvm::Value *LvalueDst = Dst.getAddress();
834     llvm::Value *src = Src.getScalarVal();
835     if (Dst.isObjCIvar()) {
836       assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
837       const llvm::Type *ResultType = ConvertType(getContext().LongTy);
838       llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
839       llvm::Value *dst = RHS;
840       RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
841       llvm::Value *LHS =
842         Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
843       llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
844       CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
845                                               BytesBetween);
846     } else if (Dst.isGlobalObjCRef()) {
847       CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
848                                                 Dst.isThreadLocalRef());
849     }
850     else
851       CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
852     return;
853   }
854 
855   assert(Src.isScalar() && "Can't emit an agg store with this method");
856   EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
857                     Dst.isVolatileQualified(), Dst.getAlignment(), Ty,
858                     Dst.getTBAAInfo());
859 }
860 
861 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
862                                                      QualType Ty,
863                                                      llvm::Value **Result) {
864   const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
865 
866   // Get the output type.
867   const llvm::Type *ResLTy = ConvertTypeForMem(Ty);
868   unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
869 
870   // Get the source value, truncated to the width of the bit-field.
871   llvm::Value *SrcVal = Src.getScalarVal();
872 
873   if (Ty->isBooleanType())
874     SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
875 
876   SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
877                                                                 Info.getSize()),
878                              "bf.value");
879 
880   // Return the new value of the bit-field, if requested.
881   if (Result) {
882     // Cast back to the proper type for result.
883     const llvm::Type *SrcTy = Src.getScalarVal()->getType();
884     llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
885                                                    "bf.reload.val");
886 
887     // Sign extend if necessary.
888     if (Info.isSigned()) {
889       unsigned ExtraBits = ResSizeInBits - Info.getSize();
890       if (ExtraBits)
891         ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
892                                        ExtraBits, "bf.reload.sext");
893     }
894 
895     *Result = ReloadVal;
896   }
897 
898   // Iterate over the components, writing each piece to memory.
899   for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
900     const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
901 
902     // Get the field pointer.
903     llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
904 
905     // Only offset by the field index if used, so that incoming values are not
906     // required to be structures.
907     if (AI.FieldIndex)
908       Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
909 
910     // Offset by the byte offset, if used.
911     if (AI.FieldByteOffset) {
912       const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
913       Ptr = Builder.CreateBitCast(Ptr, i8PTy);
914       Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs");
915     }
916 
917     // Cast to the access type.
918     const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth,
919                                                      Ty.getAddressSpace());
920     Ptr = Builder.CreateBitCast(Ptr, PTy);
921 
922     // Extract the piece of the bit-field value to write in this access, limited
923     // to the values that are part of this access.
924     llvm::Value *Val = SrcVal;
925     if (AI.TargetBitOffset)
926       Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
927     Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
928                                                             AI.TargetBitWidth));
929 
930     // Extend or truncate to the access size.
931     const llvm::Type *AccessLTy =
932       llvm::Type::getIntNTy(VMContext, AI.AccessWidth);
933     if (ResSizeInBits < AI.AccessWidth)
934       Val = Builder.CreateZExt(Val, AccessLTy);
935     else if (ResSizeInBits > AI.AccessWidth)
936       Val = Builder.CreateTrunc(Val, AccessLTy);
937 
938     // Shift into the position in memory.
939     if (AI.FieldBitStart)
940       Val = Builder.CreateShl(Val, AI.FieldBitStart);
941 
942     // If necessary, load and OR in bits that are outside of the bit-field.
943     if (AI.TargetBitWidth != AI.AccessWidth) {
944       llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
945       if (AI.AccessAlignment)
946         Load->setAlignment(AI.AccessAlignment);
947 
948       // Compute the mask for zeroing the bits that are part of the bit-field.
949       llvm::APInt InvMask =
950         ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
951                                  AI.FieldBitStart + AI.TargetBitWidth);
952 
953       // Apply the mask and OR in to the value to write.
954       Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
955     }
956 
957     // Write the value.
958     llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
959                                                  Dst.isVolatileQualified());
960     if (AI.AccessAlignment)
961       Store->setAlignment(AI.AccessAlignment);
962   }
963 }
964 
965 void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
966                                                         LValue Dst,
967                                                         QualType Ty) {
968   EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src);
969 }
970 
971 void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src,
972                                                    LValue Dst,
973                                                    QualType Ty) {
974   EmitObjCPropertySet(Dst.getKVCRefExpr(), Src);
975 }
976 
977 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
978                                                                LValue Dst,
979                                                                QualType Ty) {
980   // This access turns into a read/modify/write of the vector.  Load the input
981   // value now.
982   llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
983                                         Dst.isVolatileQualified(), "tmp");
984   const llvm::Constant *Elts = Dst.getExtVectorElts();
985 
986   llvm::Value *SrcVal = Src.getScalarVal();
987 
988   if (const VectorType *VTy = Ty->getAs<VectorType>()) {
989     unsigned NumSrcElts = VTy->getNumElements();
990     unsigned NumDstElts =
991        cast<llvm::VectorType>(Vec->getType())->getNumElements();
992     if (NumDstElts == NumSrcElts) {
993       // Use shuffle vector is the src and destination are the same number of
994       // elements and restore the vector mask since it is on the side it will be
995       // stored.
996       llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
997       for (unsigned i = 0; i != NumSrcElts; ++i) {
998         unsigned InIdx = getAccessedFieldNo(i, Elts);
999         Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i);
1000       }
1001 
1002       llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
1003       Vec = Builder.CreateShuffleVector(SrcVal,
1004                                         llvm::UndefValue::get(Vec->getType()),
1005                                         MaskV, "tmp");
1006     } else if (NumDstElts > NumSrcElts) {
1007       // Extended the source vector to the same length and then shuffle it
1008       // into the destination.
1009       // FIXME: since we're shuffling with undef, can we just use the indices
1010       //        into that?  This could be simpler.
1011       llvm::SmallVector<llvm::Constant*, 4> ExtMask;
1012       unsigned i;
1013       for (i = 0; i != NumSrcElts; ++i)
1014         ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
1015       for (; i != NumDstElts; ++i)
1016         ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
1017       llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
1018                                                         ExtMask.size());
1019       llvm::Value *ExtSrcVal =
1020         Builder.CreateShuffleVector(SrcVal,
1021                                     llvm::UndefValue::get(SrcVal->getType()),
1022                                     ExtMaskV, "tmp");
1023       // build identity
1024       llvm::SmallVector<llvm::Constant*, 4> Mask;
1025       for (unsigned i = 0; i != NumDstElts; ++i)
1026         Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
1027 
1028       // modify when what gets shuffled in
1029       for (unsigned i = 0; i != NumSrcElts; ++i) {
1030         unsigned Idx = getAccessedFieldNo(i, Elts);
1031         Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
1032       }
1033       llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
1034       Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
1035     } else {
1036       // We should never shorten the vector
1037       assert(0 && "unexpected shorten vector length");
1038     }
1039   } else {
1040     // If the Src is a scalar (not a vector) it must be updating one element.
1041     unsigned InIdx = getAccessedFieldNo(0, Elts);
1042     llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
1043     Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
1044   }
1045 
1046   Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
1047 }
1048 
1049 // setObjCGCLValueClass - sets class of he lvalue for the purpose of
1050 // generating write-barries API. It is currently a global, ivar,
1051 // or neither.
1052 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
1053                                  LValue &LV) {
1054   if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC)
1055     return;
1056 
1057   if (isa<ObjCIvarRefExpr>(E)) {
1058     LV.setObjCIvar(true);
1059     ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
1060     LV.setBaseIvarExp(Exp->getBase());
1061     LV.setObjCArray(E->getType()->isArrayType());
1062     return;
1063   }
1064 
1065   if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
1066     if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
1067       if (VD->hasGlobalStorage()) {
1068         LV.setGlobalObjCRef(true);
1069         LV.setThreadLocalRef(VD->isThreadSpecified());
1070       }
1071     }
1072     LV.setObjCArray(E->getType()->isArrayType());
1073     return;
1074   }
1075 
1076   if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
1077     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
1078     return;
1079   }
1080 
1081   if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
1082     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
1083     if (LV.isObjCIvar()) {
1084       // If cast is to a structure pointer, follow gcc's behavior and make it
1085       // a non-ivar write-barrier.
1086       QualType ExpTy = E->getType();
1087       if (ExpTy->isPointerType())
1088         ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
1089       if (ExpTy->isRecordType())
1090         LV.setObjCIvar(false);
1091     }
1092     return;
1093   }
1094   if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
1095     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
1096     return;
1097   }
1098 
1099   if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
1100     setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
1101     return;
1102   }
1103 
1104   if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
1105     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1106     if (LV.isObjCIvar() && !LV.isObjCArray())
1107       // Using array syntax to assigning to what an ivar points to is not
1108       // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
1109       LV.setObjCIvar(false);
1110     else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
1111       // Using array syntax to assigning to what global points to is not
1112       // same as assigning to the global itself. {id *G;} G[i] = 0;
1113       LV.setGlobalObjCRef(false);
1114     return;
1115   }
1116 
1117   if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
1118     setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
1119     // We don't know if member is an 'ivar', but this flag is looked at
1120     // only in the context of LV.isObjCIvar().
1121     LV.setObjCArray(E->getType()->isArrayType());
1122     return;
1123   }
1124 }
1125 
1126 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
1127                                       const Expr *E, const VarDecl *VD) {
1128   assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
1129          "Var decl must have external storage or be a file var decl!");
1130 
1131   llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
1132   if (VD->getType()->isReferenceType())
1133     V = CGF.Builder.CreateLoad(V, "tmp");
1134   unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity();
1135   LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
1136   setObjCGCLValueClass(CGF.getContext(), E, LV);
1137   return LV;
1138 }
1139 
1140 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
1141                                       const Expr *E, const FunctionDecl *FD) {
1142   llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
1143   if (!FD->hasPrototype()) {
1144     if (const FunctionProtoType *Proto =
1145             FD->getType()->getAs<FunctionProtoType>()) {
1146       // Ugly case: for a K&R-style definition, the type of the definition
1147       // isn't the same as the type of a use.  Correct for this with a
1148       // bitcast.
1149       QualType NoProtoType =
1150           CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
1151       NoProtoType = CGF.getContext().getPointerType(NoProtoType);
1152       V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp");
1153     }
1154   }
1155   unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity();
1156   return CGF.MakeAddrLValue(V, E->getType(), Alignment);
1157 }
1158 
1159 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
1160   const NamedDecl *ND = E->getDecl();
1161   unsigned Alignment = CGF.getContext().getDeclAlign(ND).getQuantity();
1162 
1163   if (ND->hasAttr<WeakRefAttr>()) {
1164     const ValueDecl *VD = cast<ValueDecl>(ND);
1165     llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
1166     return MakeAddrLValue(Aliasee, E->getType(), Alignment);
1167   }
1168 
1169   if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
1170 
1171     // Check if this is a global variable.
1172     if (VD->hasExternalStorage() || VD->isFileVarDecl())
1173       return EmitGlobalVarDeclLValue(*this, E, VD);
1174 
1175     bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>();
1176 
1177     llvm::Value *V = LocalDeclMap[VD];
1178     if (!V && VD->isStaticLocal())
1179       V = CGM.getStaticLocalDeclAddress(VD);
1180     assert(V && "DeclRefExpr not entered in LocalDeclMap?");
1181 
1182     if (VD->hasAttr<BlocksAttr>()) {
1183       V = Builder.CreateStructGEP(V, 1, "forwarding");
1184       V = Builder.CreateLoad(V);
1185       V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
1186                                   VD->getNameAsString());
1187     }
1188     if (VD->getType()->isReferenceType())
1189       V = Builder.CreateLoad(V, "tmp");
1190 
1191     LValue LV = MakeAddrLValue(V, E->getType(), Alignment);
1192     if (NonGCable) {
1193       LV.getQuals().removeObjCGCAttr();
1194       LV.setNonGC(true);
1195     }
1196     setObjCGCLValueClass(getContext(), E, LV);
1197     return LV;
1198   }
1199 
1200   // If we're emitting an instance method as an independent lvalue,
1201   // we're actually emitting a member pointer.
1202   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
1203     if (MD->isInstance()) {
1204       llvm::Value *V = CGM.getCXXABI().EmitMemberPointer(MD);
1205       return MakeAddrLValue(V, MD->getType(), Alignment);
1206     }
1207   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
1208     return EmitFunctionDeclLValue(*this, E, FD);
1209 
1210   // If we're emitting a field as an independent lvalue, we're
1211   // actually emitting a member pointer.
1212   if (const FieldDecl *FD = dyn_cast<FieldDecl>(ND)) {
1213     llvm::Value *V = CGM.getCXXABI().EmitMemberPointer(FD);
1214     return MakeAddrLValue(V, FD->getType(), Alignment);
1215   }
1216 
1217   assert(false && "Unhandled DeclRefExpr");
1218 
1219   // an invalid LValue, but the assert will
1220   // ensure that this point is never reached.
1221   return LValue();
1222 }
1223 
1224 LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
1225   unsigned Alignment =
1226     CGF.getContext().getDeclAlign(E->getDecl()).getQuantity();
1227   return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment);
1228 }
1229 
1230 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
1231   // __extension__ doesn't affect lvalue-ness.
1232   if (E->getOpcode() == UO_Extension)
1233     return EmitLValue(E->getSubExpr());
1234 
1235   QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
1236   switch (E->getOpcode()) {
1237   default: assert(0 && "Unknown unary operator lvalue!");
1238   case UO_Deref: {
1239     QualType T = E->getSubExpr()->getType()->getPointeeType();
1240     assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
1241 
1242     LValue LV = MakeAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
1243     LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
1244 
1245     // We should not generate __weak write barrier on indirect reference
1246     // of a pointer to object; as in void foo (__weak id *param); *param = 0;
1247     // But, we continue to generate __strong write barrier on indirect write
1248     // into a pointer to object.
1249     if (getContext().getLangOptions().ObjC1 &&
1250         getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
1251         LV.isObjCWeak())
1252       LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1253     return LV;
1254   }
1255   case UO_Real:
1256   case UO_Imag: {
1257     LValue LV = EmitLValue(E->getSubExpr());
1258     unsigned Idx = E->getOpcode() == UO_Imag;
1259     return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
1260                                                     Idx, "idx"),
1261                           ExprTy);
1262   }
1263   case UO_PreInc:
1264   case UO_PreDec: {
1265     LValue LV = EmitLValue(E->getSubExpr());
1266     bool isInc = E->getOpcode() == UO_PreInc;
1267 
1268     if (E->getType()->isAnyComplexType())
1269       EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
1270     else
1271       EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
1272     return LV;
1273   }
1274   }
1275 }
1276 
1277 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
1278   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
1279                         E->getType());
1280 }
1281 
1282 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
1283   return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
1284                         E->getType());
1285 }
1286 
1287 
1288 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
1289   switch (E->getIdentType()) {
1290   default:
1291     return EmitUnsupportedLValue(E, "predefined expression");
1292 
1293   case PredefinedExpr::Func:
1294   case PredefinedExpr::Function:
1295   case PredefinedExpr::PrettyFunction: {
1296     unsigned Type = E->getIdentType();
1297     std::string GlobalVarName;
1298 
1299     switch (Type) {
1300     default: assert(0 && "Invalid type");
1301     case PredefinedExpr::Func:
1302       GlobalVarName = "__func__.";
1303       break;
1304     case PredefinedExpr::Function:
1305       GlobalVarName = "__FUNCTION__.";
1306       break;
1307     case PredefinedExpr::PrettyFunction:
1308       GlobalVarName = "__PRETTY_FUNCTION__.";
1309       break;
1310     }
1311 
1312     llvm::StringRef FnName = CurFn->getName();
1313     if (FnName.startswith("\01"))
1314       FnName = FnName.substr(1);
1315     GlobalVarName += FnName;
1316 
1317     const Decl *CurDecl = CurCodeDecl;
1318     if (CurDecl == 0)
1319       CurDecl = getContext().getTranslationUnitDecl();
1320 
1321     std::string FunctionName =
1322       PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl);
1323 
1324     llvm::Constant *C =
1325       CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
1326     return MakeAddrLValue(C, E->getType());
1327   }
1328   }
1329 }
1330 
1331 llvm::BasicBlock *CodeGenFunction::getTrapBB() {
1332   const CodeGenOptions &GCO = CGM.getCodeGenOpts();
1333 
1334   // If we are not optimzing, don't collapse all calls to trap in the function
1335   // to the same call, that way, in the debugger they can see which operation
1336   // did in fact fail.  If we are optimizing, we collapse all calls to trap down
1337   // to just one per function to save on codesize.
1338   if (GCO.OptimizationLevel && TrapBB)
1339     return TrapBB;
1340 
1341   llvm::BasicBlock *Cont = 0;
1342   if (HaveInsertPoint()) {
1343     Cont = createBasicBlock("cont");
1344     EmitBranch(Cont);
1345   }
1346   TrapBB = createBasicBlock("trap");
1347   EmitBlock(TrapBB);
1348 
1349   llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0);
1350   llvm::CallInst *TrapCall = Builder.CreateCall(F);
1351   TrapCall->setDoesNotReturn();
1352   TrapCall->setDoesNotThrow();
1353   Builder.CreateUnreachable();
1354 
1355   if (Cont)
1356     EmitBlock(Cont);
1357   return TrapBB;
1358 }
1359 
1360 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
1361 /// array to pointer, return the array subexpression.
1362 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
1363   // If this isn't just an array->pointer decay, bail out.
1364   const CastExpr *CE = dyn_cast<CastExpr>(E);
1365   if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
1366     return 0;
1367 
1368   // If this is a decay from variable width array, bail out.
1369   const Expr *SubExpr = CE->getSubExpr();
1370   if (SubExpr->getType()->isVariableArrayType())
1371     return 0;
1372 
1373   return SubExpr;
1374 }
1375 
1376 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1377   // The index must always be an integer, which is not an aggregate.  Emit it.
1378   llvm::Value *Idx = EmitScalarExpr(E->getIdx());
1379   QualType IdxTy  = E->getIdx()->getType();
1380   bool IdxSigned = IdxTy->isSignedIntegerType();
1381 
1382   // If the base is a vector type, then we are forming a vector element lvalue
1383   // with this subscript.
1384   if (E->getBase()->getType()->isVectorType()) {
1385     // Emit the vector as an lvalue to get its address.
1386     LValue LHS = EmitLValue(E->getBase());
1387     assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
1388     Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vidx");
1389     return LValue::MakeVectorElt(LHS.getAddress(), Idx,
1390                                  E->getBase()->getType().getCVRQualifiers());
1391   }
1392 
1393   // Extend or truncate the index type to 32 or 64-bits.
1394   if (!Idx->getType()->isIntegerTy(LLVMPointerWidth))
1395     Idx = Builder.CreateIntCast(Idx, IntPtrTy,
1396                                 IdxSigned, "idxprom");
1397 
1398   // FIXME: As llvm implements the object size checking, this can come out.
1399   if (CatchUndefined) {
1400     if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
1401       if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
1402         if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
1403           if (const ConstantArrayType *CAT
1404               = getContext().getAsConstantArrayType(DRE->getType())) {
1405             llvm::APInt Size = CAT->getSize();
1406             llvm::BasicBlock *Cont = createBasicBlock("cont");
1407             Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
1408                                   llvm::ConstantInt::get(Idx->getType(), Size)),
1409                                  Cont, getTrapBB());
1410             EmitBlock(Cont);
1411           }
1412         }
1413       }
1414     }
1415   }
1416 
1417   // We know that the pointer points to a type of the correct size, unless the
1418   // size is a VLA or Objective-C interface.
1419   llvm::Value *Address = 0;
1420   if (const VariableArrayType *VAT =
1421         getContext().getAsVariableArrayType(E->getType())) {
1422     llvm::Value *VLASize = GetVLASize(VAT);
1423 
1424     Idx = Builder.CreateMul(Idx, VLASize);
1425 
1426     QualType BaseType = getContext().getBaseElementType(VAT);
1427 
1428     CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType);
1429     Idx = Builder.CreateUDiv(Idx,
1430                              llvm::ConstantInt::get(Idx->getType(),
1431                                  BaseTypeSize.getQuantity()));
1432 
1433     // The base must be a pointer, which is not an aggregate.  Emit it.
1434     llvm::Value *Base = EmitScalarExpr(E->getBase());
1435 
1436     Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
1437   } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
1438     // Indexing over an interface, as in "NSString *P; P[4];"
1439     llvm::Value *InterfaceSize =
1440       llvm::ConstantInt::get(Idx->getType(),
1441           getContext().getTypeSizeInChars(OIT).getQuantity());
1442 
1443     Idx = Builder.CreateMul(Idx, InterfaceSize);
1444 
1445     const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
1446 
1447     // The base must be a pointer, which is not an aggregate.  Emit it.
1448     llvm::Value *Base = EmitScalarExpr(E->getBase());
1449     Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
1450                                 Idx, "arrayidx");
1451     Address = Builder.CreateBitCast(Address, Base->getType());
1452   } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
1453     // If this is A[i] where A is an array, the frontend will have decayed the
1454     // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
1455     // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
1456     // "gep x, i" here.  Emit one "gep A, 0, i".
1457     assert(Array->getType()->isArrayType() &&
1458            "Array to pointer decay must have array source type!");
1459     llvm::Value *ArrayPtr = EmitLValue(Array).getAddress();
1460     llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
1461     llvm::Value *Args[] = { Zero, Idx };
1462 
1463     Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, Args+2, "arrayidx");
1464   } else {
1465     // The base must be a pointer, which is not an aggregate.  Emit it.
1466     llvm::Value *Base = EmitScalarExpr(E->getBase());
1467     Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
1468   }
1469 
1470   QualType T = E->getBase()->getType()->getPointeeType();
1471   assert(!T.isNull() &&
1472          "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
1473 
1474   LValue LV = MakeAddrLValue(Address, T);
1475   LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
1476 
1477   if (getContext().getLangOptions().ObjC1 &&
1478       getContext().getLangOptions().getGCMode() != LangOptions::NonGC) {
1479     LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
1480     setObjCGCLValueClass(getContext(), E, LV);
1481   }
1482   return LV;
1483 }
1484 
1485 static
1486 llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
1487                                        llvm::SmallVector<unsigned, 4> &Elts) {
1488   llvm::SmallVector<llvm::Constant*, 4> CElts;
1489 
1490   const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
1491   for (unsigned i = 0, e = Elts.size(); i != e; ++i)
1492     CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i]));
1493 
1494   return llvm::ConstantVector::get(&CElts[0], CElts.size());
1495 }
1496 
1497 LValue CodeGenFunction::
1498 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
1499   // Emit the base vector as an l-value.
1500   LValue Base;
1501 
1502   // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1503   if (E->isArrow()) {
1504     // If it is a pointer to a vector, emit the address and form an lvalue with
1505     // it.
1506     llvm::Value *Ptr = EmitScalarExpr(E->getBase());
1507     const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
1508     Base = MakeAddrLValue(Ptr, PT->getPointeeType());
1509     Base.getQuals().removeObjCGCAttr();
1510   } else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) {
1511     // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1512     // emit the base as an lvalue.
1513     assert(E->getBase()->getType()->isVectorType());
1514     Base = EmitLValue(E->getBase());
1515   } else {
1516     // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1517     assert(E->getBase()->getType()->getAs<VectorType>() &&
1518            "Result must be a vector");
1519     llvm::Value *Vec = EmitScalarExpr(E->getBase());
1520 
1521     // Store the vector to memory (because LValue wants an address).
1522     llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
1523     Builder.CreateStore(Vec, VecMem);
1524     Base = MakeAddrLValue(VecMem, E->getBase()->getType());
1525   }
1526 
1527   // Encode the element access list into a vector of unsigned indices.
1528   llvm::SmallVector<unsigned, 4> Indices;
1529   E->getEncodedElementAccess(Indices);
1530 
1531   if (Base.isSimple()) {
1532     llvm::Constant *CV = GenerateConstantVector(VMContext, Indices);
1533     return LValue::MakeExtVectorElt(Base.getAddress(), CV,
1534                                     Base.getVRQualifiers());
1535   }
1536   assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
1537 
1538   llvm::Constant *BaseElts = Base.getExtVectorElts();
1539   llvm::SmallVector<llvm::Constant *, 4> CElts;
1540 
1541   for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
1542     if (isa<llvm::ConstantAggregateZero>(BaseElts))
1543       CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0));
1544     else
1545       CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
1546   }
1547   llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
1548   return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
1549                                   Base.getVRQualifiers());
1550 }
1551 
1552 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
1553   bool isNonGC = false;
1554   Expr *BaseExpr = E->getBase();
1555   llvm::Value *BaseValue = NULL;
1556   Qualifiers BaseQuals;
1557 
1558   // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
1559   if (E->isArrow()) {
1560     BaseValue = EmitScalarExpr(BaseExpr);
1561     const PointerType *PTy =
1562       BaseExpr->getType()->getAs<PointerType>();
1563     BaseQuals = PTy->getPointeeType().getQualifiers();
1564   } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) ||
1565              isa<ObjCImplicitSetterGetterRefExpr>(
1566                BaseExpr->IgnoreParens())) {
1567     RValue RV = EmitObjCPropertyGet(BaseExpr);
1568     BaseValue = RV.getAggregateAddr();
1569     BaseQuals = BaseExpr->getType().getQualifiers();
1570   } else {
1571     LValue BaseLV = EmitLValue(BaseExpr);
1572     if (BaseLV.isNonGC())
1573       isNonGC = true;
1574     // FIXME: this isn't right for bitfields.
1575     BaseValue = BaseLV.getAddress();
1576     QualType BaseTy = BaseExpr->getType();
1577     BaseQuals = BaseTy.getQualifiers();
1578   }
1579 
1580   NamedDecl *ND = E->getMemberDecl();
1581   if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
1582     LValue LV = EmitLValueForField(BaseValue, Field,
1583                                    BaseQuals.getCVRQualifiers());
1584     LV.setNonGC(isNonGC);
1585     setObjCGCLValueClass(getContext(), E, LV);
1586     return LV;
1587   }
1588 
1589   if (VarDecl *VD = dyn_cast<VarDecl>(ND))
1590     return EmitGlobalVarDeclLValue(*this, E, VD);
1591 
1592   if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
1593     return EmitFunctionDeclLValue(*this, E, FD);
1594 
1595   assert(false && "Unhandled member declaration!");
1596   return LValue();
1597 }
1598 
1599 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
1600                                               const FieldDecl *Field,
1601                                               unsigned CVRQualifiers) {
1602   const CGRecordLayout &RL =
1603     CGM.getTypes().getCGRecordLayout(Field->getParent());
1604   const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
1605   return LValue::MakeBitfield(BaseValue, Info,
1606                              Field->getType().getCVRQualifiers()|CVRQualifiers);
1607 }
1608 
1609 /// EmitLValueForAnonRecordField - Given that the field is a member of
1610 /// an anonymous struct or union buried inside a record, and given
1611 /// that the base value is a pointer to the enclosing record, derive
1612 /// an lvalue for the ultimate field.
1613 LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
1614                                                      const FieldDecl *Field,
1615                                                      unsigned CVRQualifiers) {
1616   llvm::SmallVector<const FieldDecl *, 8> Path;
1617   Path.push_back(Field);
1618 
1619   while (Field->getParent()->isAnonymousStructOrUnion()) {
1620     const ValueDecl *VD = Field->getParent()->getAnonymousStructOrUnionObject();
1621     if (!isa<FieldDecl>(VD)) break;
1622     Field = cast<FieldDecl>(VD);
1623     Path.push_back(Field);
1624   }
1625 
1626   llvm::SmallVectorImpl<const FieldDecl*>::reverse_iterator
1627     I = Path.rbegin(), E = Path.rend();
1628   while (true) {
1629     LValue LV = EmitLValueForField(BaseValue, *I, CVRQualifiers);
1630     if (++I == E) return LV;
1631 
1632     assert(LV.isSimple());
1633     BaseValue = LV.getAddress();
1634     CVRQualifiers |= LV.getVRQualifiers();
1635   }
1636 }
1637 
1638 LValue CodeGenFunction::EmitLValueForField(llvm::Value *BaseValue,
1639                                            const FieldDecl *Field,
1640                                            unsigned CVRQualifiers) {
1641   if (Field->isBitField())
1642     return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
1643 
1644   const CGRecordLayout &RL =
1645     CGM.getTypes().getCGRecordLayout(Field->getParent());
1646   unsigned idx = RL.getLLVMFieldNo(Field);
1647   llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
1648 
1649   // Match union field type.
1650   if (Field->getParent()->isUnion()) {
1651     const llvm::Type *FieldTy =
1652       CGM.getTypes().ConvertTypeForMem(Field->getType());
1653     const llvm::PointerType *BaseTy =
1654       cast<llvm::PointerType>(BaseValue->getType());
1655     unsigned AS = BaseTy->getAddressSpace();
1656     V = Builder.CreateBitCast(V,
1657                               llvm::PointerType::get(FieldTy, AS),
1658                               "tmp");
1659   }
1660   if (Field->getType()->isReferenceType())
1661     V = Builder.CreateLoad(V, "tmp");
1662 
1663   unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
1664   LValue LV = MakeAddrLValue(V, Field->getType(), Alignment);
1665   LV.getQuals().addCVRQualifiers(CVRQualifiers);
1666 
1667   // __weak attribute on a field is ignored.
1668   if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
1669     LV.getQuals().removeObjCGCAttr();
1670 
1671   return LV;
1672 }
1673 
1674 LValue
1675 CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
1676                                                   const FieldDecl *Field,
1677                                                   unsigned CVRQualifiers) {
1678   QualType FieldType = Field->getType();
1679 
1680   if (!FieldType->isReferenceType())
1681     return EmitLValueForField(BaseValue, Field, CVRQualifiers);
1682 
1683   const CGRecordLayout &RL =
1684     CGM.getTypes().getCGRecordLayout(Field->getParent());
1685   unsigned idx = RL.getLLVMFieldNo(Field);
1686   llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
1687 
1688   assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
1689 
1690   unsigned Alignment = getContext().getDeclAlign(Field).getQuantity();
1691   return MakeAddrLValue(V, FieldType, Alignment);
1692 }
1693 
1694 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
1695   llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
1696   const Expr *InitExpr = E->getInitializer();
1697   LValue Result = MakeAddrLValue(DeclPtr, E->getType());
1698 
1699   EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false, /*Init*/ true);
1700 
1701   return Result;
1702 }
1703 
1704 LValue
1705 CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator *E) {
1706   if (E->isLvalue(getContext()) == Expr::LV_Valid) {
1707     if (int Cond = ConstantFoldsToSimpleInteger(E->getCond())) {
1708       Expr *Live = Cond == 1 ? E->getLHS() : E->getRHS();
1709       if (Live)
1710         return EmitLValue(Live);
1711     }
1712 
1713     llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1714     llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1715     llvm::BasicBlock *ContBlock = createBasicBlock("cond.end");
1716 
1717     if (E->getLHS())
1718       EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
1719     else {
1720       Expr *save = E->getSAVE();
1721       assert(save && "VisitConditionalOperator - save is null");
1722       // Intentianlly not doing direct assignment to ConditionalSaveExprs[save]
1723       LValue SaveVal = EmitLValue(save);
1724       ConditionalSaveLValueExprs[save] = SaveVal;
1725       EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
1726     }
1727 
1728     // Any temporaries created here are conditional.
1729     BeginConditionalBranch();
1730     EmitBlock(LHSBlock);
1731     LValue LHS = EmitLValue(E->getTrueExpr());
1732 
1733     EndConditionalBranch();
1734 
1735     if (!LHS.isSimple())
1736       return EmitUnsupportedLValue(E, "conditional operator");
1737 
1738     // FIXME: We shouldn't need an alloca for this.
1739     llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp");
1740     Builder.CreateStore(LHS.getAddress(), Temp);
1741     EmitBranch(ContBlock);
1742 
1743     // Any temporaries created here are conditional.
1744     BeginConditionalBranch();
1745     EmitBlock(RHSBlock);
1746     LValue RHS = EmitLValue(E->getRHS());
1747     EndConditionalBranch();
1748     if (!RHS.isSimple())
1749       return EmitUnsupportedLValue(E, "conditional operator");
1750 
1751     Builder.CreateStore(RHS.getAddress(), Temp);
1752     EmitBranch(ContBlock);
1753 
1754     EmitBlock(ContBlock);
1755 
1756     Temp = Builder.CreateLoad(Temp, "lv");
1757     return MakeAddrLValue(Temp, E->getType());
1758   }
1759 
1760   // ?: here should be an aggregate.
1761   assert((hasAggregateLLVMType(E->getType()) &&
1762           !E->getType()->isAnyComplexType()) &&
1763          "Unexpected conditional operator!");
1764 
1765   return EmitAggExprToLValue(E);
1766 }
1767 
1768 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
1769 /// If the cast is a dynamic_cast, we can have the usual lvalue result,
1770 /// otherwise if a cast is needed by the code generator in an lvalue context,
1771 /// then it must mean that we need the address of an aggregate in order to
1772 /// access one of its fields.  This can happen for all the reasons that casts
1773 /// are permitted with aggregate result, including noop aggregate casts, and
1774 /// cast from scalar to union.
1775 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
1776   switch (E->getCastKind()) {
1777   case CK_ToVoid:
1778     return EmitUnsupportedLValue(E, "unexpected cast lvalue");
1779 
1780   case CK_NoOp:
1781     if (E->getSubExpr()->Classify(getContext()).getKind()
1782                                           != Expr::Classification::CL_PRValue) {
1783       LValue LV = EmitLValue(E->getSubExpr());
1784       if (LV.isPropertyRef() || LV.isKVCRef()) {
1785         QualType QT = E->getSubExpr()->getType();
1786         RValue RV =
1787           LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
1788                              : EmitLoadOfKVCRefLValue(LV, QT);
1789         assert(!RV.isScalar() && "EmitCastLValue-scalar cast of property ref");
1790         llvm::Value *V = RV.getAggregateAddr();
1791         return MakeAddrLValue(V, QT);
1792       }
1793       return LV;
1794     }
1795     // Fall through to synthesize a temporary.
1796 
1797   case CK_Unknown:
1798   case CK_BitCast:
1799   case CK_ArrayToPointerDecay:
1800   case CK_FunctionToPointerDecay:
1801   case CK_NullToMemberPointer:
1802   case CK_IntegralToPointer:
1803   case CK_PointerToIntegral:
1804   case CK_VectorSplat:
1805   case CK_IntegralCast:
1806   case CK_IntegralToFloating:
1807   case CK_FloatingToIntegral:
1808   case CK_FloatingCast:
1809   case CK_DerivedToBaseMemberPointer:
1810   case CK_BaseToDerivedMemberPointer:
1811   case CK_MemberPointerToBoolean:
1812   case CK_AnyPointerToBlockPointerCast: {
1813     // These casts only produce lvalues when we're binding a reference to a
1814     // temporary realized from a (converted) pure rvalue. Emit the expression
1815     // as a value, copy it into a temporary, and return an lvalue referring to
1816     // that temporary.
1817     llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
1818     EmitAnyExprToMem(E, V, false, false);
1819     return MakeAddrLValue(V, E->getType());
1820   }
1821 
1822   case CK_Dynamic: {
1823     LValue LV = EmitLValue(E->getSubExpr());
1824     llvm::Value *V = LV.getAddress();
1825     const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
1826     return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
1827   }
1828 
1829   case CK_ConstructorConversion:
1830   case CK_UserDefinedConversion:
1831   case CK_AnyPointerToObjCPointerCast:
1832     return EmitLValue(E->getSubExpr());
1833 
1834   case CK_UncheckedDerivedToBase:
1835   case CK_DerivedToBase: {
1836     const RecordType *DerivedClassTy =
1837       E->getSubExpr()->getType()->getAs<RecordType>();
1838     CXXRecordDecl *DerivedClassDecl =
1839       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
1840 
1841     LValue LV = EmitLValue(E->getSubExpr());
1842     llvm::Value *This;
1843     if (LV.isPropertyRef() || LV.isKVCRef()) {
1844       QualType QT = E->getSubExpr()->getType();
1845       RValue RV =
1846         LV.isPropertyRef() ? EmitLoadOfPropertyRefLValue(LV, QT)
1847                            : EmitLoadOfKVCRefLValue(LV, QT);
1848       assert (!RV.isScalar() && "EmitCastLValue");
1849       This = RV.getAggregateAddr();
1850     }
1851     else
1852       This = LV.getAddress();
1853 
1854     // Perform the derived-to-base conversion
1855     llvm::Value *Base =
1856       GetAddressOfBaseClass(This, DerivedClassDecl,
1857                             E->path_begin(), E->path_end(),
1858                             /*NullCheckValue=*/false);
1859 
1860     return MakeAddrLValue(Base, E->getType());
1861   }
1862   case CK_ToUnion:
1863     return EmitAggExprToLValue(E);
1864   case CK_BaseToDerived: {
1865     const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
1866     CXXRecordDecl *DerivedClassDecl =
1867       cast<CXXRecordDecl>(DerivedClassTy->getDecl());
1868 
1869     LValue LV = EmitLValue(E->getSubExpr());
1870 
1871     // Perform the base-to-derived conversion
1872     llvm::Value *Derived =
1873       GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
1874                                E->path_begin(), E->path_end(),
1875                                /*NullCheckValue=*/false);
1876 
1877     return MakeAddrLValue(Derived, E->getType());
1878   }
1879   case CK_LValueBitCast: {
1880     // This must be a reinterpret_cast (or c-style equivalent).
1881     const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
1882 
1883     LValue LV = EmitLValue(E->getSubExpr());
1884     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
1885                                            ConvertType(CE->getTypeAsWritten()));
1886     return MakeAddrLValue(V, E->getType());
1887   }
1888   case CK_ObjCObjectLValueCast: {
1889     LValue LV = EmitLValue(E->getSubExpr());
1890     QualType ToType = getContext().getLValueReferenceType(E->getType());
1891     llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
1892                                            ConvertType(ToType));
1893     return MakeAddrLValue(V, E->getType());
1894   }
1895   }
1896 
1897   llvm_unreachable("Unhandled lvalue cast kind?");
1898 }
1899 
1900 LValue CodeGenFunction::EmitNullInitializationLValue(
1901                                               const CXXScalarValueInitExpr *E) {
1902   QualType Ty = E->getType();
1903   LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
1904   EmitNullInitialization(LV.getAddress(), Ty);
1905   return LV;
1906 }
1907 
1908 //===--------------------------------------------------------------------===//
1909 //                             Expression Emission
1910 //===--------------------------------------------------------------------===//
1911 
1912 
1913 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
1914                                      ReturnValueSlot ReturnValue) {
1915   // Builtins never have block type.
1916   if (E->getCallee()->getType()->isBlockPointerType())
1917     return EmitBlockCallExpr(E, ReturnValue);
1918 
1919   if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
1920     return EmitCXXMemberCallExpr(CE, ReturnValue);
1921 
1922   const Decl *TargetDecl = 0;
1923   if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
1924     if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
1925       TargetDecl = DRE->getDecl();
1926       if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl))
1927         if (unsigned builtinID = FD->getBuiltinID())
1928           return EmitBuiltinExpr(FD, builtinID, E);
1929     }
1930   }
1931 
1932   if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
1933     if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
1934       return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
1935 
1936   if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
1937     // C++ [expr.pseudo]p1:
1938     //   The result shall only be used as the operand for the function call
1939     //   operator (), and the result of such a call has type void. The only
1940     //   effect is the evaluation of the postfix-expression before the dot or
1941     //   arrow.
1942     EmitScalarExpr(E->getCallee());
1943     return RValue::get(0);
1944   }
1945 
1946   llvm::Value *Callee = EmitScalarExpr(E->getCallee());
1947   return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
1948                   E->arg_begin(), E->arg_end(), TargetDecl);
1949 }
1950 
1951 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
1952   // Comma expressions just emit their LHS then their RHS as an l-value.
1953   if (E->getOpcode() == BO_Comma) {
1954     EmitAnyExpr(E->getLHS());
1955     EnsureInsertPoint();
1956     return EmitLValue(E->getRHS());
1957   }
1958 
1959   if (E->getOpcode() == BO_PtrMemD ||
1960       E->getOpcode() == BO_PtrMemI)
1961     return EmitPointerToDataMemberBinaryExpr(E);
1962 
1963   // Can only get l-value for binary operator expressions which are a
1964   // simple assignment of aggregate type.
1965   if (E->getOpcode() != BO_Assign)
1966     return EmitUnsupportedLValue(E, "binary l-value expression");
1967 
1968   if (!hasAggregateLLVMType(E->getType())) {
1969     // Emit the LHS as an l-value.
1970     LValue LV = EmitLValue(E->getLHS());
1971     // Store the value through the l-value.
1972     EmitStoreThroughLValue(EmitAnyExpr(E->getRHS()), LV, E->getType());
1973     return LV;
1974   }
1975 
1976   return EmitAggExprToLValue(E);
1977 }
1978 
1979 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
1980   RValue RV = EmitCallExpr(E);
1981 
1982   if (!RV.isScalar())
1983     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
1984 
1985   assert(E->getCallReturnType()->isReferenceType() &&
1986          "Can't have a scalar return unless the return type is a "
1987          "reference type!");
1988 
1989   return MakeAddrLValue(RV.getScalarVal(), E->getType());
1990 }
1991 
1992 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
1993   // FIXME: This shouldn't require another copy.
1994   return EmitAggExprToLValue(E);
1995 }
1996 
1997 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
1998   assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
1999          && "binding l-value to type which needs a temporary");
2000   AggValueSlot Slot = CreateAggTemp(E->getType(), "tmp");
2001   EmitCXXConstructExpr(E, Slot);
2002   return MakeAddrLValue(Slot.getAddr(), E->getType());
2003 }
2004 
2005 LValue
2006 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
2007   return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
2008 }
2009 
2010 LValue
2011 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
2012   AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
2013   Slot.setLifetimeExternallyManaged();
2014   EmitAggExpr(E->getSubExpr(), Slot);
2015   EmitCXXTemporary(E->getTemporary(), Slot.getAddr());
2016   return MakeAddrLValue(Slot.getAddr(), E->getType());
2017 }
2018 
2019 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
2020   RValue RV = EmitObjCMessageExpr(E);
2021 
2022   if (!RV.isScalar())
2023     return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2024 
2025   assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
2026          "Can't have a scalar return unless the return type is a "
2027          "reference type!");
2028 
2029   return MakeAddrLValue(RV.getScalarVal(), E->getType());
2030 }
2031 
2032 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
2033   llvm::Value *V =
2034     CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
2035   return MakeAddrLValue(V, E->getType());
2036 }
2037 
2038 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2039                                              const ObjCIvarDecl *Ivar) {
2040   return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
2041 }
2042 
2043 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
2044                                           llvm::Value *BaseValue,
2045                                           const ObjCIvarDecl *Ivar,
2046                                           unsigned CVRQualifiers) {
2047   return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
2048                                                    Ivar, CVRQualifiers);
2049 }
2050 
2051 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
2052   // FIXME: A lot of the code below could be shared with EmitMemberExpr.
2053   llvm::Value *BaseValue = 0;
2054   const Expr *BaseExpr = E->getBase();
2055   Qualifiers BaseQuals;
2056   QualType ObjectTy;
2057   if (E->isArrow()) {
2058     BaseValue = EmitScalarExpr(BaseExpr);
2059     ObjectTy = BaseExpr->getType()->getPointeeType();
2060     BaseQuals = ObjectTy.getQualifiers();
2061   } else {
2062     LValue BaseLV = EmitLValue(BaseExpr);
2063     // FIXME: this isn't right for bitfields.
2064     BaseValue = BaseLV.getAddress();
2065     ObjectTy = BaseExpr->getType();
2066     BaseQuals = ObjectTy.getQualifiers();
2067   }
2068 
2069   LValue LV =
2070     EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
2071                       BaseQuals.getCVRQualifiers());
2072   setObjCGCLValueClass(getContext(), E, LV);
2073   return LV;
2074 }
2075 
2076 LValue
2077 CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
2078   // This is a special l-value that just issues sends when we load or store
2079   // through it.
2080   return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers());
2081 }
2082 
2083 LValue CodeGenFunction::EmitObjCKVCRefLValue(
2084                                 const ObjCImplicitSetterGetterRefExpr *E) {
2085   // This is a special l-value that just issues sends when we load or store
2086   // through it.
2087   return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers());
2088 }
2089 
2090 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
2091   // Can only get l-value for message expression returning aggregate type
2092   RValue RV = EmitAnyExprToTemp(E);
2093   return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
2094 }
2095 
2096 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
2097                                  ReturnValueSlot ReturnValue,
2098                                  CallExpr::const_arg_iterator ArgBeg,
2099                                  CallExpr::const_arg_iterator ArgEnd,
2100                                  const Decl *TargetDecl) {
2101   // Get the actual function type. The callee type will always be a pointer to
2102   // function type or a block pointer type.
2103   assert(CalleeType->isFunctionPointerType() &&
2104          "Call must have function pointer type!");
2105 
2106   CalleeType = getContext().getCanonicalType(CalleeType);
2107 
2108   const FunctionType *FnType
2109     = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
2110   QualType ResultType = FnType->getResultType();
2111 
2112   CallArgList Args;
2113   EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
2114 
2115   return EmitCall(CGM.getTypes().getFunctionInfo(Args, FnType),
2116                   Callee, ReturnValue, Args, TargetDecl);
2117 }
2118 
2119 LValue CodeGenFunction::
2120 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
2121   llvm::Value *BaseV;
2122   if (E->getOpcode() == BO_PtrMemI)
2123     BaseV = EmitScalarExpr(E->getLHS());
2124   else
2125     BaseV = EmitLValue(E->getLHS()).getAddress();
2126 
2127   llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
2128 
2129   const MemberPointerType *MPT
2130     = E->getRHS()->getType()->getAs<MemberPointerType>();
2131 
2132   llvm::Value *AddV =
2133     CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
2134 
2135   return MakeAddrLValue(AddV, MPT->getPointeeType());
2136 }
2137 
2138