1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGCall.h" 17 #include "CGRecordLayout.h" 18 #include "CGObjCRuntime.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "llvm/Intrinsics.h" 22 #include "clang/Frontend/CodeGenOptions.h" 23 #include "llvm/Target/TargetData.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 //===--------------------------------------------------------------------===// 28 // Miscellaneous Helper Methods 29 //===--------------------------------------------------------------------===// 30 31 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 32 /// block. 33 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, 34 const llvm::Twine &Name) { 35 if (!Builder.isNamePreserving()) 36 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 37 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 38 } 39 40 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 41 llvm::Value *Init) { 42 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 43 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 44 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 45 } 46 47 llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty, 48 const llvm::Twine &Name) { 49 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 50 // FIXME: Should we prefer the preferred type alignment here? 51 CharUnits Align = getContext().getTypeAlignInChars(Ty); 52 Alloc->setAlignment(Align.getQuantity()); 53 return Alloc; 54 } 55 56 llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty, 57 const llvm::Twine &Name) { 58 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 59 // FIXME: Should we prefer the preferred type alignment here? 60 CharUnits Align = getContext().getTypeAlignInChars(Ty); 61 Alloc->setAlignment(Align.getQuantity()); 62 return Alloc; 63 } 64 65 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 66 /// expression and compare the result against zero, returning an Int1Ty value. 67 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 68 QualType BoolTy = getContext().BoolTy; 69 if (E->getType()->isMemberFunctionPointerType()) { 70 LValue LV = EmitAggExprToLValue(E); 71 72 // Get the pointer. 73 llvm::Value *FuncPtr = Builder.CreateStructGEP(LV.getAddress(), 0, 74 "src.ptr"); 75 FuncPtr = Builder.CreateLoad(FuncPtr); 76 77 llvm::Value *IsNotNull = 78 Builder.CreateICmpNE(FuncPtr, 79 llvm::Constant::getNullValue(FuncPtr->getType()), 80 "tobool"); 81 82 return IsNotNull; 83 } 84 if (!E->getType()->isAnyComplexType()) 85 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 86 87 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 88 } 89 90 /// EmitAnyExpr - Emit code to compute the specified expression which can have 91 /// any type. The result is returned as an RValue struct. If this is an 92 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where the 93 /// result should be returned. 94 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, 95 bool IsAggLocVolatile, bool IgnoreResult, 96 bool IsInitializer) { 97 if (!hasAggregateLLVMType(E->getType())) 98 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 99 else if (E->getType()->isAnyComplexType()) 100 return RValue::getComplex(EmitComplexExpr(E, false, false, 101 IgnoreResult, IgnoreResult)); 102 103 EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer); 104 return RValue::getAggregate(AggLoc, IsAggLocVolatile); 105 } 106 107 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 108 /// always be accessible even if no aggregate location is provided. 109 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, 110 bool IsAggLocVolatile, 111 bool IsInitializer) { 112 llvm::Value *AggLoc = 0; 113 114 if (hasAggregateLLVMType(E->getType()) && 115 !E->getType()->isAnyComplexType()) 116 AggLoc = CreateMemTemp(E->getType(), "agg.tmp"); 117 return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false, 118 IsInitializer); 119 } 120 121 /// EmitAnyExprToMem - Evaluate an expression into a given memory 122 /// location. 123 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 124 llvm::Value *Location, 125 bool IsLocationVolatile, 126 bool IsInit) { 127 if (E->getType()->isComplexType()) 128 EmitComplexExprIntoAddr(E, Location, IsLocationVolatile); 129 else if (hasAggregateLLVMType(E->getType())) 130 EmitAggExpr(E, Location, IsLocationVolatile, /*Ignore*/ false, IsInit); 131 else { 132 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 133 LValue LV = LValue::MakeAddr(Location, MakeQualifiers(E->getType())); 134 EmitStoreThroughLValue(RV, LV, E->getType()); 135 } 136 } 137 138 /// \brief An adjustment to be made to the temporary created when emitting a 139 /// reference binding, which accesses a particular subobject of that temporary. 140 struct SubobjectAdjustment { 141 enum { DerivedToBaseAdjustment, FieldAdjustment } Kind; 142 143 union { 144 struct { 145 const CXXBaseSpecifierArray *BasePath; 146 const CXXRecordDecl *DerivedClass; 147 } DerivedToBase; 148 149 struct { 150 FieldDecl *Field; 151 unsigned CVRQualifiers; 152 } Field; 153 }; 154 155 SubobjectAdjustment(const CXXBaseSpecifierArray *BasePath, 156 const CXXRecordDecl *DerivedClass) 157 : Kind(DerivedToBaseAdjustment) 158 { 159 DerivedToBase.BasePath = BasePath; 160 DerivedToBase.DerivedClass = DerivedClass; 161 } 162 163 SubobjectAdjustment(FieldDecl *Field, unsigned CVRQualifiers) 164 : Kind(FieldAdjustment) 165 { 166 this->Field.Field = Field; 167 this->Field.CVRQualifiers = CVRQualifiers; 168 } 169 }; 170 171 static llvm::Value * 172 CreateReferenceTemporary(CodeGenFunction& CGF, QualType Type, 173 const NamedDecl *InitializedDecl) { 174 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 175 if (VD->hasGlobalStorage()) { 176 llvm::SmallString<256> Name; 177 CGF.CGM.getMangleContext().mangleReferenceTemporary(VD, Name); 178 179 const llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type); 180 181 // Create the reference temporary. 182 llvm::GlobalValue *RefTemp = 183 new llvm::GlobalVariable(CGF.CGM.getModule(), 184 RefTempTy, /*isConstant=*/false, 185 llvm::GlobalValue::InternalLinkage, 186 llvm::Constant::getNullValue(RefTempTy), 187 Name.str()); 188 return RefTemp; 189 } 190 } 191 192 return CGF.CreateMemTemp(Type, "ref.tmp"); 193 } 194 195 static llvm::Value * 196 EmitExprForReferenceBinding(CodeGenFunction& CGF, const Expr* E, 197 llvm::Value *&ReferenceTemporary, 198 const CXXDestructorDecl *&ReferenceTemporaryDtor, 199 const NamedDecl *InitializedDecl) { 200 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) 201 E = DAE->getExpr(); 202 203 if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) { 204 CodeGenFunction::CXXTemporariesCleanupScope Scope(CGF); 205 206 return EmitExprForReferenceBinding(CGF, TE->getSubExpr(), 207 ReferenceTemporary, 208 ReferenceTemporaryDtor, 209 InitializedDecl); 210 } 211 212 RValue RV; 213 if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) { 214 // Emit the expression as an lvalue. 215 LValue LV = CGF.EmitLValue(E); 216 217 if (LV.isSimple()) 218 return LV.getAddress(); 219 220 // We have to load the lvalue. 221 RV = CGF.EmitLoadOfLValue(LV, E->getType()); 222 } else { 223 QualType ResultTy = E->getType(); 224 225 llvm::SmallVector<SubobjectAdjustment, 2> Adjustments; 226 while (true) { 227 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 228 E = PE->getSubExpr(); 229 continue; 230 } 231 232 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 233 if ((CE->getCastKind() == CastExpr::CK_DerivedToBase || 234 CE->getCastKind() == CastExpr::CK_UncheckedDerivedToBase) && 235 E->getType()->isRecordType()) { 236 E = CE->getSubExpr(); 237 CXXRecordDecl *Derived 238 = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 239 Adjustments.push_back(SubobjectAdjustment(&CE->getBasePath(), 240 Derived)); 241 continue; 242 } 243 244 if (CE->getCastKind() == CastExpr::CK_NoOp) { 245 E = CE->getSubExpr(); 246 continue; 247 } 248 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 249 if (ME->getBase()->isLvalue(CGF.getContext()) != Expr::LV_Valid && 250 ME->getBase()->getType()->isRecordType()) { 251 if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 252 E = ME->getBase(); 253 Adjustments.push_back(SubobjectAdjustment(Field, 254 E->getType().getCVRQualifiers())); 255 continue; 256 } 257 } 258 } 259 260 // Nothing changed. 261 break; 262 } 263 264 // Create a reference temporary if necessary. 265 if (CGF.hasAggregateLLVMType(E->getType()) && 266 !E->getType()->isAnyComplexType()) 267 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 268 InitializedDecl); 269 270 RV = CGF.EmitAnyExpr(E, ReferenceTemporary, /*IsAggLocVolatile=*/false, 271 /*IgnoreResult=*/false, InitializedDecl); 272 273 if (InitializedDecl) { 274 // Get the destructor for the reference temporary. 275 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 276 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 277 if (!ClassDecl->hasTrivialDestructor()) 278 ReferenceTemporaryDtor = ClassDecl->getDestructor(CGF.getContext()); 279 } 280 } 281 282 // Check if need to perform derived-to-base casts and/or field accesses, to 283 // get from the temporary object we created (and, potentially, for which we 284 // extended the lifetime) to the subobject we're binding the reference to. 285 if (!Adjustments.empty()) { 286 llvm::Value *Object = RV.getAggregateAddr(); 287 for (unsigned I = Adjustments.size(); I != 0; --I) { 288 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 289 switch (Adjustment.Kind) { 290 case SubobjectAdjustment::DerivedToBaseAdjustment: 291 Object = 292 CGF.GetAddressOfBaseClass(Object, 293 Adjustment.DerivedToBase.DerivedClass, 294 *Adjustment.DerivedToBase.BasePath, 295 /*NullCheckValue=*/false); 296 break; 297 298 case SubobjectAdjustment::FieldAdjustment: { 299 unsigned CVR = Adjustment.Field.CVRQualifiers; 300 LValue LV = 301 CGF.EmitLValueForField(Object, Adjustment.Field.Field, CVR); 302 if (LV.isSimple()) { 303 Object = LV.getAddress(); 304 break; 305 } 306 307 // For non-simple lvalues, we actually have to create a copy of 308 // the object we're binding to. 309 QualType T = Adjustment.Field.Field->getType().getNonReferenceType() 310 .getUnqualifiedType(); 311 Object = CreateReferenceTemporary(CGF, T, InitializedDecl); 312 LValue TempLV = LValue::MakeAddr(Object, 313 Qualifiers::fromCVRMask(CVR)); 314 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV, T), TempLV, T); 315 break; 316 } 317 318 } 319 } 320 321 const llvm::Type *ResultPtrTy = CGF.ConvertType(ResultTy)->getPointerTo(); 322 return CGF.Builder.CreateBitCast(Object, ResultPtrTy, "temp"); 323 } 324 } 325 326 if (RV.isAggregate()) 327 return RV.getAggregateAddr(); 328 329 // Create a temporary variable that we can bind the reference to. 330 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 331 InitializedDecl); 332 333 if (RV.isScalar()) 334 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary, 335 /*Volatile=*/false, E->getType()); 336 else 337 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary, 338 /*Volatile=*/false); 339 return ReferenceTemporary; 340 } 341 342 RValue 343 CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, 344 const NamedDecl *InitializedDecl) { 345 llvm::Value *ReferenceTemporary = 0; 346 const CXXDestructorDecl *ReferenceTemporaryDtor = 0; 347 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary, 348 ReferenceTemporaryDtor, 349 InitializedDecl); 350 351 if (!ReferenceTemporaryDtor) 352 return RValue::get(Value); 353 354 // Make sure to call the destructor for the reference temporary. 355 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 356 if (VD->hasGlobalStorage()) { 357 llvm::Constant *DtorFn = 358 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete); 359 CGF.EmitCXXGlobalDtorRegistration(DtorFn, 360 cast<llvm::Constant>(ReferenceTemporary)); 361 362 return RValue::get(Value); 363 } 364 } 365 366 { 367 DelayedCleanupBlock Scope(*this); 368 EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete, 369 /*ForVirtualBase=*/false, ReferenceTemporary); 370 371 // Make sure to jump to the exit block. 372 EmitBranch(Scope.getCleanupExitBlock()); 373 } 374 375 if (Exceptions) { 376 EHCleanupBlock Cleanup(*this); 377 EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete, 378 /*ForVirtualBase=*/false, ReferenceTemporary); 379 } 380 381 return RValue::get(Value); 382 } 383 384 385 /// getAccessedFieldNo - Given an encoded value and a result number, return the 386 /// input field number being accessed. 387 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 388 const llvm::Constant *Elts) { 389 if (isa<llvm::ConstantAggregateZero>(Elts)) 390 return 0; 391 392 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 393 } 394 395 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { 396 if (!CatchUndefined) 397 return; 398 399 Address = Builder.CreateBitCast(Address, PtrToInt8Ty); 400 401 const llvm::Type *IntPtrT = IntPtrTy; 402 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1); 403 const llvm::IntegerType *Int1Ty = llvm::Type::getInt1Ty(VMContext); 404 405 // In time, people may want to control this and use a 1 here. 406 llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0); 407 llvm::Value *C = Builder.CreateCall2(F, Address, Arg); 408 llvm::BasicBlock *Cont = createBasicBlock(); 409 llvm::BasicBlock *Check = createBasicBlock(); 410 llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL); 411 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); 412 413 EmitBlock(Check); 414 Builder.CreateCondBr(Builder.CreateICmpUGE(C, 415 llvm::ConstantInt::get(IntPtrTy, Size)), 416 Cont, getTrapBB()); 417 EmitBlock(Cont); 418 } 419 420 421 CodeGenFunction::ComplexPairTy CodeGenFunction:: 422 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 423 bool isInc, bool isPre) { 424 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 425 LV.isVolatileQualified()); 426 427 llvm::Value *NextVal; 428 if (isa<llvm::IntegerType>(InVal.first->getType())) { 429 uint64_t AmountVal = isInc ? 1 : -1; 430 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 431 432 // Add the inc/dec to the real part. 433 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 434 } else { 435 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 436 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 437 if (!isInc) 438 FVal.changeSign(); 439 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 440 441 // Add the inc/dec to the real part. 442 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 443 } 444 445 ComplexPairTy IncVal(NextVal, InVal.second); 446 447 // Store the updated result through the lvalue. 448 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 449 450 // If this is a postinc, return the value read from memory, otherwise use the 451 // updated value. 452 return isPre ? IncVal : InVal; 453 } 454 455 456 //===----------------------------------------------------------------------===// 457 // LValue Expression Emission 458 //===----------------------------------------------------------------------===// 459 460 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 461 if (Ty->isVoidType()) 462 return RValue::get(0); 463 464 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 465 const llvm::Type *EltTy = ConvertType(CTy->getElementType()); 466 llvm::Value *U = llvm::UndefValue::get(EltTy); 467 return RValue::getComplex(std::make_pair(U, U)); 468 } 469 470 if (hasAggregateLLVMType(Ty)) { 471 const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); 472 return RValue::getAggregate(llvm::UndefValue::get(LTy)); 473 } 474 475 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 476 } 477 478 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 479 const char *Name) { 480 ErrorUnsupported(E, Name); 481 return GetUndefRValue(E->getType()); 482 } 483 484 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 485 const char *Name) { 486 ErrorUnsupported(E, Name); 487 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 488 return LValue::MakeAddr(llvm::UndefValue::get(Ty), 489 MakeQualifiers(E->getType())); 490 } 491 492 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) { 493 LValue LV = EmitLValue(E); 494 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 495 EmitCheck(LV.getAddress(), getContext().getTypeSize(E->getType()) / 8); 496 return LV; 497 } 498 499 /// EmitLValue - Emit code to compute a designator that specifies the location 500 /// of the expression. 501 /// 502 /// This can return one of two things: a simple address or a bitfield reference. 503 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 504 /// an LLVM pointer type. 505 /// 506 /// If this returns a bitfield reference, nothing about the pointee type of the 507 /// LLVM value is known: For example, it may not be a pointer to an integer. 508 /// 509 /// If this returns a normal address, and if the lvalue's C type is fixed size, 510 /// this method guarantees that the returned pointer type will point to an LLVM 511 /// type of the same size of the lvalue's type. If the lvalue has a variable 512 /// length type, this is not possible. 513 /// 514 LValue CodeGenFunction::EmitLValue(const Expr *E) { 515 switch (E->getStmtClass()) { 516 default: return EmitUnsupportedLValue(E, "l-value expression"); 517 518 case Expr::ObjCSelectorExprClass: 519 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 520 case Expr::ObjCIsaExprClass: 521 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 522 case Expr::BinaryOperatorClass: 523 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 524 case Expr::CompoundAssignOperatorClass: 525 return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E)); 526 case Expr::CallExprClass: 527 case Expr::CXXMemberCallExprClass: 528 case Expr::CXXOperatorCallExprClass: 529 return EmitCallExprLValue(cast<CallExpr>(E)); 530 case Expr::VAArgExprClass: 531 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 532 case Expr::DeclRefExprClass: 533 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 534 case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 535 case Expr::PredefinedExprClass: 536 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 537 case Expr::StringLiteralClass: 538 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 539 case Expr::ObjCEncodeExprClass: 540 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 541 542 case Expr::BlockDeclRefExprClass: 543 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 544 545 case Expr::CXXTemporaryObjectExprClass: 546 case Expr::CXXConstructExprClass: 547 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 548 case Expr::CXXBindTemporaryExprClass: 549 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 550 case Expr::CXXExprWithTemporariesClass: 551 return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E)); 552 case Expr::CXXZeroInitValueExprClass: 553 return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E)); 554 case Expr::CXXDefaultArgExprClass: 555 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 556 case Expr::CXXTypeidExprClass: 557 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 558 559 case Expr::ObjCMessageExprClass: 560 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 561 case Expr::ObjCIvarRefExprClass: 562 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 563 case Expr::ObjCPropertyRefExprClass: 564 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 565 case Expr::ObjCImplicitSetterGetterRefExprClass: 566 return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E)); 567 case Expr::ObjCSuperExprClass: 568 return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E)); 569 570 case Expr::StmtExprClass: 571 return EmitStmtExprLValue(cast<StmtExpr>(E)); 572 case Expr::UnaryOperatorClass: 573 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 574 case Expr::ArraySubscriptExprClass: 575 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 576 case Expr::ExtVectorElementExprClass: 577 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 578 case Expr::MemberExprClass: 579 return EmitMemberExpr(cast<MemberExpr>(E)); 580 case Expr::CompoundLiteralExprClass: 581 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 582 case Expr::ConditionalOperatorClass: 583 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 584 case Expr::ChooseExprClass: 585 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 586 case Expr::ImplicitCastExprClass: 587 case Expr::CStyleCastExprClass: 588 case Expr::CXXFunctionalCastExprClass: 589 case Expr::CXXStaticCastExprClass: 590 case Expr::CXXDynamicCastExprClass: 591 case Expr::CXXReinterpretCastExprClass: 592 case Expr::CXXConstCastExprClass: 593 return EmitCastLValue(cast<CastExpr>(E)); 594 } 595 } 596 597 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 598 QualType Ty) { 599 llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp"); 600 if (Volatile) 601 Load->setVolatile(true); 602 603 // Bool can have different representation in memory than in registers. 604 llvm::Value *V = Load; 605 if (Ty->isBooleanType()) 606 if (V->getType() != llvm::Type::getInt1Ty(VMContext)) 607 V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool"); 608 609 return V; 610 } 611 612 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 613 bool Volatile, QualType Ty) { 614 615 if (Ty->isBooleanType()) { 616 // Bool can have different representation in memory than in registers. 617 const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 618 Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false); 619 } 620 Builder.CreateStore(Value, Addr, Volatile); 621 } 622 623 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 624 /// method emits the address of the lvalue, then loads the result as an rvalue, 625 /// returning the rvalue. 626 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { 627 if (LV.isObjCWeak()) { 628 // load of a __weak object. 629 llvm::Value *AddrWeakObj = LV.getAddress(); 630 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 631 AddrWeakObj)); 632 } 633 634 if (LV.isSimple()) { 635 llvm::Value *Ptr = LV.getAddress(); 636 const llvm::Type *EltTy = 637 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 638 639 // Simple scalar l-value. 640 // 641 // FIXME: We shouldn't have to use isSingleValueType here. 642 if (EltTy->isSingleValueType()) 643 return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), 644 ExprType)); 645 646 assert(ExprType->isFunctionType() && "Unknown scalar value"); 647 return RValue::get(Ptr); 648 } 649 650 if (LV.isVectorElt()) { 651 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 652 LV.isVolatileQualified(), "tmp"); 653 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 654 "vecext")); 655 } 656 657 // If this is a reference to a subset of the elements of a vector, either 658 // shuffle the input or extract/insert them as appropriate. 659 if (LV.isExtVectorElt()) 660 return EmitLoadOfExtVectorElementLValue(LV, ExprType); 661 662 if (LV.isBitField()) 663 return EmitLoadOfBitfieldLValue(LV, ExprType); 664 665 if (LV.isPropertyRef()) 666 return EmitLoadOfPropertyRefLValue(LV, ExprType); 667 668 assert(LV.isKVCRef() && "Unknown LValue type!"); 669 return EmitLoadOfKVCRefLValue(LV, ExprType); 670 } 671 672 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 673 QualType ExprType) { 674 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 675 676 // Get the output type. 677 const llvm::Type *ResLTy = ConvertType(ExprType); 678 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 679 680 // Compute the result as an OR of all of the individual component accesses. 681 llvm::Value *Res = 0; 682 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 683 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 684 685 // Get the field pointer. 686 llvm::Value *Ptr = LV.getBitFieldBaseAddr(); 687 688 // Only offset by the field index if used, so that incoming values are not 689 // required to be structures. 690 if (AI.FieldIndex) 691 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 692 693 // Offset by the byte offset, if used. 694 if (AI.FieldByteOffset) { 695 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 696 Ptr = Builder.CreateBitCast(Ptr, i8PTy); 697 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs"); 698 } 699 700 // Cast to the access type. 701 const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth, 702 ExprType.getAddressSpace()); 703 Ptr = Builder.CreateBitCast(Ptr, PTy); 704 705 // Perform the load. 706 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified()); 707 if (AI.AccessAlignment) 708 Load->setAlignment(AI.AccessAlignment); 709 710 // Shift out unused low bits and mask out unused high bits. 711 llvm::Value *Val = Load; 712 if (AI.FieldBitStart) 713 Val = Builder.CreateLShr(Load, AI.FieldBitStart); 714 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth, 715 AI.TargetBitWidth), 716 "bf.clear"); 717 718 // Extend or truncate to the target size. 719 if (AI.AccessWidth < ResSizeInBits) 720 Val = Builder.CreateZExt(Val, ResLTy); 721 else if (AI.AccessWidth > ResSizeInBits) 722 Val = Builder.CreateTrunc(Val, ResLTy); 723 724 // Shift into place, and OR into the result. 725 if (AI.TargetBitOffset) 726 Val = Builder.CreateShl(Val, AI.TargetBitOffset); 727 Res = Res ? Builder.CreateOr(Res, Val) : Val; 728 } 729 730 // If the bit-field is signed, perform the sign-extension. 731 // 732 // FIXME: This can easily be folded into the load of the high bits, which 733 // could also eliminate the mask of high bits in some situations. 734 if (Info.isSigned()) { 735 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 736 if (ExtraBits) 737 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits), 738 ExtraBits, "bf.val.sext"); 739 } 740 741 return RValue::get(Res); 742 } 743 744 RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, 745 QualType ExprType) { 746 return EmitObjCPropertyGet(LV.getPropertyRefExpr()); 747 } 748 749 RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, 750 QualType ExprType) { 751 return EmitObjCPropertyGet(LV.getKVCRefExpr()); 752 } 753 754 // If this is a reference to a subset of the elements of a vector, create an 755 // appropriate shufflevector. 756 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, 757 QualType ExprType) { 758 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 759 LV.isVolatileQualified(), "tmp"); 760 761 const llvm::Constant *Elts = LV.getExtVectorElts(); 762 763 // If the result of the expression is a non-vector type, we must be extracting 764 // a single element. Just codegen as an extractelement. 765 const VectorType *ExprVT = ExprType->getAs<VectorType>(); 766 if (!ExprVT) { 767 unsigned InIdx = getAccessedFieldNo(0, Elts); 768 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 769 return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); 770 } 771 772 // Always use shuffle vector to try to retain the original program structure 773 unsigned NumResultElts = ExprVT->getNumElements(); 774 775 llvm::SmallVector<llvm::Constant*, 4> Mask; 776 for (unsigned i = 0; i != NumResultElts; ++i) { 777 unsigned InIdx = getAccessedFieldNo(i, Elts); 778 Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx)); 779 } 780 781 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 782 Vec = Builder.CreateShuffleVector(Vec, 783 llvm::UndefValue::get(Vec->getType()), 784 MaskV, "tmp"); 785 return RValue::get(Vec); 786 } 787 788 789 790 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 791 /// lvalue, where both are guaranteed to the have the same type, and that type 792 /// is 'Ty'. 793 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 794 QualType Ty) { 795 if (!Dst.isSimple()) { 796 if (Dst.isVectorElt()) { 797 // Read/modify/write the vector, inserting the new element. 798 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 799 Dst.isVolatileQualified(), "tmp"); 800 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 801 Dst.getVectorIdx(), "vecins"); 802 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 803 return; 804 } 805 806 // If this is an update of extended vector elements, insert them as 807 // appropriate. 808 if (Dst.isExtVectorElt()) 809 return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); 810 811 if (Dst.isBitField()) 812 return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); 813 814 if (Dst.isPropertyRef()) 815 return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); 816 817 assert(Dst.isKVCRef() && "Unknown LValue type"); 818 return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); 819 } 820 821 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 822 // load of a __weak object. 823 llvm::Value *LvalueDst = Dst.getAddress(); 824 llvm::Value *src = Src.getScalarVal(); 825 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 826 return; 827 } 828 829 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 830 // load of a __strong object. 831 llvm::Value *LvalueDst = Dst.getAddress(); 832 llvm::Value *src = Src.getScalarVal(); 833 if (Dst.isObjCIvar()) { 834 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 835 const llvm::Type *ResultType = ConvertType(getContext().LongTy); 836 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 837 llvm::Value *dst = RHS; 838 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 839 llvm::Value *LHS = 840 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 841 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 842 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 843 BytesBetween); 844 } else if (Dst.isGlobalObjCRef()) 845 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 846 else 847 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 848 return; 849 } 850 851 assert(Src.isScalar() && "Can't emit an agg store with this method"); 852 EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), 853 Dst.isVolatileQualified(), Ty); 854 } 855 856 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 857 QualType Ty, 858 llvm::Value **Result) { 859 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 860 861 // Get the output type. 862 const llvm::Type *ResLTy = ConvertTypeForMem(Ty); 863 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 864 865 // Get the source value, truncated to the width of the bit-field. 866 llvm::Value *SrcVal = Src.getScalarVal(); 867 868 if (Ty->isBooleanType()) 869 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false); 870 871 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits, 872 Info.getSize()), 873 "bf.value"); 874 875 // Return the new value of the bit-field, if requested. 876 if (Result) { 877 // Cast back to the proper type for result. 878 const llvm::Type *SrcTy = Src.getScalarVal()->getType(); 879 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false, 880 "bf.reload.val"); 881 882 // Sign extend if necessary. 883 if (Info.isSigned()) { 884 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 885 if (ExtraBits) 886 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits), 887 ExtraBits, "bf.reload.sext"); 888 } 889 890 *Result = ReloadVal; 891 } 892 893 // Iterate over the components, writing each piece to memory. 894 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 895 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 896 897 // Get the field pointer. 898 llvm::Value *Ptr = Dst.getBitFieldBaseAddr(); 899 900 // Only offset by the field index if used, so that incoming values are not 901 // required to be structures. 902 if (AI.FieldIndex) 903 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 904 905 // Offset by the byte offset, if used. 906 if (AI.FieldByteOffset) { 907 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 908 Ptr = Builder.CreateBitCast(Ptr, i8PTy); 909 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs"); 910 } 911 912 // Cast to the access type. 913 const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth, 914 Ty.getAddressSpace()); 915 Ptr = Builder.CreateBitCast(Ptr, PTy); 916 917 // Extract the piece of the bit-field value to write in this access, limited 918 // to the values that are part of this access. 919 llvm::Value *Val = SrcVal; 920 if (AI.TargetBitOffset) 921 Val = Builder.CreateLShr(Val, AI.TargetBitOffset); 922 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits, 923 AI.TargetBitWidth)); 924 925 // Extend or truncate to the access size. 926 const llvm::Type *AccessLTy = 927 llvm::Type::getIntNTy(VMContext, AI.AccessWidth); 928 if (ResSizeInBits < AI.AccessWidth) 929 Val = Builder.CreateZExt(Val, AccessLTy); 930 else if (ResSizeInBits > AI.AccessWidth) 931 Val = Builder.CreateTrunc(Val, AccessLTy); 932 933 // Shift into the position in memory. 934 if (AI.FieldBitStart) 935 Val = Builder.CreateShl(Val, AI.FieldBitStart); 936 937 // If necessary, load and OR in bits that are outside of the bit-field. 938 if (AI.TargetBitWidth != AI.AccessWidth) { 939 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified()); 940 if (AI.AccessAlignment) 941 Load->setAlignment(AI.AccessAlignment); 942 943 // Compute the mask for zeroing the bits that are part of the bit-field. 944 llvm::APInt InvMask = 945 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart, 946 AI.FieldBitStart + AI.TargetBitWidth); 947 948 // Apply the mask and OR in to the value to write. 949 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val); 950 } 951 952 // Write the value. 953 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr, 954 Dst.isVolatileQualified()); 955 if (AI.AccessAlignment) 956 Store->setAlignment(AI.AccessAlignment); 957 } 958 } 959 960 void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, 961 LValue Dst, 962 QualType Ty) { 963 EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); 964 } 965 966 void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, 967 LValue Dst, 968 QualType Ty) { 969 EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); 970 } 971 972 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 973 LValue Dst, 974 QualType Ty) { 975 // This access turns into a read/modify/write of the vector. Load the input 976 // value now. 977 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 978 Dst.isVolatileQualified(), "tmp"); 979 const llvm::Constant *Elts = Dst.getExtVectorElts(); 980 981 llvm::Value *SrcVal = Src.getScalarVal(); 982 983 if (const VectorType *VTy = Ty->getAs<VectorType>()) { 984 unsigned NumSrcElts = VTy->getNumElements(); 985 unsigned NumDstElts = 986 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 987 if (NumDstElts == NumSrcElts) { 988 // Use shuffle vector is the src and destination are the same number of 989 // elements and restore the vector mask since it is on the side it will be 990 // stored. 991 llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 992 for (unsigned i = 0; i != NumSrcElts; ++i) { 993 unsigned InIdx = getAccessedFieldNo(i, Elts); 994 Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i); 995 } 996 997 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 998 Vec = Builder.CreateShuffleVector(SrcVal, 999 llvm::UndefValue::get(Vec->getType()), 1000 MaskV, "tmp"); 1001 } else if (NumDstElts > NumSrcElts) { 1002 // Extended the source vector to the same length and then shuffle it 1003 // into the destination. 1004 // FIXME: since we're shuffling with undef, can we just use the indices 1005 // into that? This could be simpler. 1006 llvm::SmallVector<llvm::Constant*, 4> ExtMask; 1007 unsigned i; 1008 for (i = 0; i != NumSrcElts; ++i) 1009 ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1010 for (; i != NumDstElts; ++i) 1011 ExtMask.push_back(llvm::UndefValue::get(Int32Ty)); 1012 llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], 1013 ExtMask.size()); 1014 llvm::Value *ExtSrcVal = 1015 Builder.CreateShuffleVector(SrcVal, 1016 llvm::UndefValue::get(SrcVal->getType()), 1017 ExtMaskV, "tmp"); 1018 // build identity 1019 llvm::SmallVector<llvm::Constant*, 4> Mask; 1020 for (unsigned i = 0; i != NumDstElts; ++i) 1021 Mask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1022 1023 // modify when what gets shuffled in 1024 for (unsigned i = 0; i != NumSrcElts; ++i) { 1025 unsigned Idx = getAccessedFieldNo(i, Elts); 1026 Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts); 1027 } 1028 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 1029 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); 1030 } else { 1031 // We should never shorten the vector 1032 assert(0 && "unexpected shorten vector length"); 1033 } 1034 } else { 1035 // If the Src is a scalar (not a vector) it must be updating one element. 1036 unsigned InIdx = getAccessedFieldNo(0, Elts); 1037 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1038 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); 1039 } 1040 1041 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 1042 } 1043 1044 // setObjCGCLValueClass - sets class of he lvalue for the purpose of 1045 // generating write-barries API. It is currently a global, ivar, 1046 // or neither. 1047 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1048 LValue &LV) { 1049 if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC) 1050 return; 1051 1052 if (isa<ObjCIvarRefExpr>(E)) { 1053 LV.SetObjCIvar(LV, true); 1054 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1055 LV.setBaseIvarExp(Exp->getBase()); 1056 LV.SetObjCArray(LV, E->getType()->isArrayType()); 1057 return; 1058 } 1059 1060 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1061 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1062 if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) || 1063 VD->isFileVarDecl()) 1064 LV.SetGlobalObjCRef(LV, true); 1065 } 1066 LV.SetObjCArray(LV, E->getType()->isArrayType()); 1067 return; 1068 } 1069 1070 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1071 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1072 return; 1073 } 1074 1075 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1076 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1077 if (LV.isObjCIvar()) { 1078 // If cast is to a structure pointer, follow gcc's behavior and make it 1079 // a non-ivar write-barrier. 1080 QualType ExpTy = E->getType(); 1081 if (ExpTy->isPointerType()) 1082 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1083 if (ExpTy->isRecordType()) 1084 LV.SetObjCIvar(LV, false); 1085 } 1086 return; 1087 } 1088 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1089 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1090 return; 1091 } 1092 1093 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1094 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1095 return; 1096 } 1097 1098 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1099 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1100 if (LV.isObjCIvar() && !LV.isObjCArray()) 1101 // Using array syntax to assigning to what an ivar points to is not 1102 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1103 LV.SetObjCIvar(LV, false); 1104 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1105 // Using array syntax to assigning to what global points to is not 1106 // same as assigning to the global itself. {id *G;} G[i] = 0; 1107 LV.SetGlobalObjCRef(LV, false); 1108 return; 1109 } 1110 1111 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1112 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1113 // We don't know if member is an 'ivar', but this flag is looked at 1114 // only in the context of LV.isObjCIvar(). 1115 LV.SetObjCArray(LV, E->getType()->isArrayType()); 1116 return; 1117 } 1118 } 1119 1120 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1121 const Expr *E, const VarDecl *VD) { 1122 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1123 "Var decl must have external storage or be a file var decl!"); 1124 1125 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1126 if (VD->getType()->isReferenceType()) 1127 V = CGF.Builder.CreateLoad(V, "tmp"); 1128 LValue LV = LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); 1129 setObjCGCLValueClass(CGF.getContext(), E, LV); 1130 return LV; 1131 } 1132 1133 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1134 const Expr *E, const FunctionDecl *FD) { 1135 llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD); 1136 if (!FD->hasPrototype()) { 1137 if (const FunctionProtoType *Proto = 1138 FD->getType()->getAs<FunctionProtoType>()) { 1139 // Ugly case: for a K&R-style definition, the type of the definition 1140 // isn't the same as the type of a use. Correct for this with a 1141 // bitcast. 1142 QualType NoProtoType = 1143 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1144 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1145 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp"); 1146 } 1147 } 1148 return LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); 1149 } 1150 1151 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1152 const NamedDecl *ND = E->getDecl(); 1153 1154 if (ND->hasAttr<WeakRefAttr>()) { 1155 const ValueDecl* VD = cast<ValueDecl>(ND); 1156 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1157 1158 Qualifiers Quals = MakeQualifiers(E->getType()); 1159 LValue LV = LValue::MakeAddr(Aliasee, Quals); 1160 1161 return LV; 1162 } 1163 1164 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1165 1166 // Check if this is a global variable. 1167 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1168 return EmitGlobalVarDeclLValue(*this, E, VD); 1169 1170 bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>(); 1171 1172 llvm::Value *V = LocalDeclMap[VD]; 1173 if (!V && getContext().getLangOptions().CPlusPlus && 1174 VD->isStaticLocal()) 1175 V = CGM.getStaticLocalDeclAddress(VD); 1176 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1177 1178 Qualifiers Quals = MakeQualifiers(E->getType()); 1179 // local variables do not get their gc attribute set. 1180 // local static? 1181 if (NonGCable) Quals.removeObjCGCAttr(); 1182 1183 if (VD->hasAttr<BlocksAttr>()) { 1184 V = Builder.CreateStructGEP(V, 1, "forwarding"); 1185 V = Builder.CreateLoad(V); 1186 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 1187 VD->getNameAsString()); 1188 } 1189 if (VD->getType()->isReferenceType()) 1190 V = Builder.CreateLoad(V, "tmp"); 1191 LValue LV = LValue::MakeAddr(V, Quals); 1192 LValue::SetObjCNonGC(LV, NonGCable); 1193 setObjCGCLValueClass(getContext(), E, LV); 1194 return LV; 1195 } 1196 1197 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1198 return EmitFunctionDeclLValue(*this, E, FD); 1199 1200 // FIXME: the qualifier check does not seem sufficient here 1201 if (E->getQualifier()) { 1202 const FieldDecl *FD = cast<FieldDecl>(ND); 1203 llvm::Value *V = CGM.EmitPointerToDataMember(FD); 1204 1205 return LValue::MakeAddr(V, MakeQualifiers(FD->getType())); 1206 } 1207 1208 assert(false && "Unhandled DeclRefExpr"); 1209 1210 // an invalid LValue, but the assert will 1211 // ensure that this point is never reached. 1212 return LValue(); 1213 } 1214 1215 LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 1216 return LValue::MakeAddr(GetAddrOfBlockDecl(E), MakeQualifiers(E->getType())); 1217 } 1218 1219 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1220 // __extension__ doesn't affect lvalue-ness. 1221 if (E->getOpcode() == UnaryOperator::Extension) 1222 return EmitLValue(E->getSubExpr()); 1223 1224 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1225 switch (E->getOpcode()) { 1226 default: assert(0 && "Unknown unary operator lvalue!"); 1227 case UnaryOperator::Deref: { 1228 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1229 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1230 1231 Qualifiers Quals = MakeQualifiers(T); 1232 Quals.setAddressSpace(ExprTy.getAddressSpace()); 1233 1234 LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals); 1235 // We should not generate __weak write barrier on indirect reference 1236 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1237 // But, we continue to generate __strong write barrier on indirect write 1238 // into a pointer to object. 1239 if (getContext().getLangOptions().ObjC1 && 1240 getContext().getLangOptions().getGCMode() != LangOptions::NonGC && 1241 LV.isObjCWeak()) 1242 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 1243 return LV; 1244 } 1245 case UnaryOperator::Real: 1246 case UnaryOperator::Imag: { 1247 LValue LV = EmitLValue(E->getSubExpr()); 1248 unsigned Idx = E->getOpcode() == UnaryOperator::Imag; 1249 return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), 1250 Idx, "idx"), 1251 MakeQualifiers(ExprTy)); 1252 } 1253 case UnaryOperator::PreInc: 1254 case UnaryOperator::PreDec: { 1255 LValue LV = EmitLValue(E->getSubExpr()); 1256 bool isInc = E->getOpcode() == UnaryOperator::PreInc; 1257 1258 if (E->getType()->isAnyComplexType()) 1259 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1260 else 1261 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1262 return LV; 1263 } 1264 } 1265 } 1266 1267 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1268 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 1269 Qualifiers()); 1270 } 1271 1272 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1273 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1274 Qualifiers()); 1275 } 1276 1277 1278 LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { 1279 std::string GlobalVarName; 1280 1281 switch (Type) { 1282 default: assert(0 && "Invalid type"); 1283 case PredefinedExpr::Func: 1284 GlobalVarName = "__func__."; 1285 break; 1286 case PredefinedExpr::Function: 1287 GlobalVarName = "__FUNCTION__."; 1288 break; 1289 case PredefinedExpr::PrettyFunction: 1290 GlobalVarName = "__PRETTY_FUNCTION__."; 1291 break; 1292 } 1293 1294 llvm::StringRef FnName = CurFn->getName(); 1295 if (FnName.startswith("\01")) 1296 FnName = FnName.substr(1); 1297 GlobalVarName += FnName; 1298 1299 std::string FunctionName = 1300 PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurCodeDecl); 1301 1302 llvm::Constant *C = 1303 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 1304 return LValue::MakeAddr(C, Qualifiers()); 1305 } 1306 1307 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1308 switch (E->getIdentType()) { 1309 default: 1310 return EmitUnsupportedLValue(E, "predefined expression"); 1311 case PredefinedExpr::Func: 1312 case PredefinedExpr::Function: 1313 case PredefinedExpr::PrettyFunction: 1314 return EmitPredefinedFunctionName(E->getIdentType()); 1315 } 1316 } 1317 1318 llvm::BasicBlock *CodeGenFunction::getTrapBB() { 1319 const CodeGenOptions &GCO = CGM.getCodeGenOpts(); 1320 1321 // If we are not optimzing, don't collapse all calls to trap in the function 1322 // to the same call, that way, in the debugger they can see which operation 1323 // did in fact fail. If we are optimizing, we collpase all call to trap down 1324 // to just one per function to save on codesize. 1325 if (GCO.OptimizationLevel 1326 && TrapBB) 1327 return TrapBB; 1328 1329 llvm::BasicBlock *Cont = 0; 1330 if (HaveInsertPoint()) { 1331 Cont = createBasicBlock("cont"); 1332 EmitBranch(Cont); 1333 } 1334 TrapBB = createBasicBlock("trap"); 1335 EmitBlock(TrapBB); 1336 1337 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0); 1338 llvm::CallInst *TrapCall = Builder.CreateCall(F); 1339 TrapCall->setDoesNotReturn(); 1340 TrapCall->setDoesNotThrow(); 1341 Builder.CreateUnreachable(); 1342 1343 if (Cont) 1344 EmitBlock(Cont); 1345 return TrapBB; 1346 } 1347 1348 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 1349 /// array to pointer, return the array subexpression. 1350 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 1351 // If this isn't just an array->pointer decay, bail out. 1352 const CastExpr *CE = dyn_cast<CastExpr>(E); 1353 if (CE == 0 || CE->getCastKind() != CastExpr::CK_ArrayToPointerDecay) 1354 return 0; 1355 1356 // If this is a decay from variable width array, bail out. 1357 const Expr *SubExpr = CE->getSubExpr(); 1358 if (SubExpr->getType()->isVariableArrayType()) 1359 return 0; 1360 1361 return SubExpr; 1362 } 1363 1364 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1365 // The index must always be an integer, which is not an aggregate. Emit it. 1366 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 1367 QualType IdxTy = E->getIdx()->getType(); 1368 bool IdxSigned = IdxTy->isSignedIntegerType(); 1369 1370 // If the base is a vector type, then we are forming a vector element lvalue 1371 // with this subscript. 1372 if (E->getBase()->getType()->isVectorType()) { 1373 // Emit the vector as an lvalue to get its address. 1374 LValue LHS = EmitLValue(E->getBase()); 1375 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 1376 Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vidx"); 1377 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 1378 E->getBase()->getType().getCVRQualifiers()); 1379 } 1380 1381 // Extend or truncate the index type to 32 or 64-bits. 1382 if (!Idx->getType()->isIntegerTy(LLVMPointerWidth)) 1383 Idx = Builder.CreateIntCast(Idx, IntPtrTy, 1384 IdxSigned, "idxprom"); 1385 1386 // FIXME: As llvm implements the object size checking, this can come out. 1387 if (CatchUndefined) { 1388 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){ 1389 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 1390 if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) { 1391 if (const ConstantArrayType *CAT 1392 = getContext().getAsConstantArrayType(DRE->getType())) { 1393 llvm::APInt Size = CAT->getSize(); 1394 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1395 Builder.CreateCondBr(Builder.CreateICmpULE(Idx, 1396 llvm::ConstantInt::get(Idx->getType(), Size)), 1397 Cont, getTrapBB()); 1398 EmitBlock(Cont); 1399 } 1400 } 1401 } 1402 } 1403 } 1404 1405 // We know that the pointer points to a type of the correct size, unless the 1406 // size is a VLA or Objective-C interface. 1407 llvm::Value *Address = 0; 1408 if (const VariableArrayType *VAT = 1409 getContext().getAsVariableArrayType(E->getType())) { 1410 llvm::Value *VLASize = GetVLASize(VAT); 1411 1412 Idx = Builder.CreateMul(Idx, VLASize); 1413 1414 QualType BaseType = getContext().getBaseElementType(VAT); 1415 1416 CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType); 1417 Idx = Builder.CreateUDiv(Idx, 1418 llvm::ConstantInt::get(Idx->getType(), 1419 BaseTypeSize.getQuantity())); 1420 1421 // The base must be a pointer, which is not an aggregate. Emit it. 1422 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1423 1424 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1425 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 1426 // Indexing over an interface, as in "NSString *P; P[4];" 1427 llvm::Value *InterfaceSize = 1428 llvm::ConstantInt::get(Idx->getType(), 1429 getContext().getTypeSizeInChars(OIT).getQuantity()); 1430 1431 Idx = Builder.CreateMul(Idx, InterfaceSize); 1432 1433 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 1434 1435 // The base must be a pointer, which is not an aggregate. Emit it. 1436 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1437 Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), 1438 Idx, "arrayidx"); 1439 Address = Builder.CreateBitCast(Address, Base->getType()); 1440 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 1441 // If this is A[i] where A is an array, the frontend will have decayed the 1442 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 1443 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 1444 // "gep x, i" here. Emit one "gep A, 0, i". 1445 assert(Array->getType()->isArrayType() && 1446 "Array to pointer decay must have array source type!"); 1447 llvm::Value *ArrayPtr = EmitLValue(Array).getAddress(); 1448 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 1449 llvm::Value *Args[] = { Zero, Idx }; 1450 1451 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, Args+2, "arrayidx"); 1452 } else { 1453 // The base must be a pointer, which is not an aggregate. Emit it. 1454 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1455 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1456 } 1457 1458 QualType T = E->getBase()->getType()->getPointeeType(); 1459 assert(!T.isNull() && 1460 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1461 1462 Qualifiers Quals = MakeQualifiers(T); 1463 Quals.setAddressSpace(E->getBase()->getType().getAddressSpace()); 1464 1465 LValue LV = LValue::MakeAddr(Address, Quals); 1466 if (getContext().getLangOptions().ObjC1 && 1467 getContext().getLangOptions().getGCMode() != LangOptions::NonGC) { 1468 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 1469 setObjCGCLValueClass(getContext(), E, LV); 1470 } 1471 return LV; 1472 } 1473 1474 static 1475 llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 1476 llvm::SmallVector<unsigned, 4> &Elts) { 1477 llvm::SmallVector<llvm::Constant*, 4> CElts; 1478 1479 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1480 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1481 CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i])); 1482 1483 return llvm::ConstantVector::get(&CElts[0], CElts.size()); 1484 } 1485 1486 LValue CodeGenFunction:: 1487 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1488 // Emit the base vector as an l-value. 1489 LValue Base; 1490 1491 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1492 if (E->isArrow()) { 1493 // If it is a pointer to a vector, emit the address and form an lvalue with 1494 // it. 1495 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1496 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1497 Qualifiers Quals = MakeQualifiers(PT->getPointeeType()); 1498 Quals.removeObjCGCAttr(); 1499 Base = LValue::MakeAddr(Ptr, Quals); 1500 } else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) { 1501 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 1502 // emit the base as an lvalue. 1503 assert(E->getBase()->getType()->isVectorType()); 1504 Base = EmitLValue(E->getBase()); 1505 } else { 1506 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 1507 assert(E->getBase()->getType()->getAs<VectorType>() && 1508 "Result must be a vector"); 1509 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 1510 1511 // Store the vector to memory (because LValue wants an address). 1512 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 1513 Builder.CreateStore(Vec, VecMem); 1514 Base = LValue::MakeAddr(VecMem, Qualifiers()); 1515 } 1516 1517 // Encode the element access list into a vector of unsigned indices. 1518 llvm::SmallVector<unsigned, 4> Indices; 1519 E->getEncodedElementAccess(Indices); 1520 1521 if (Base.isSimple()) { 1522 llvm::Constant *CV = GenerateConstantVector(VMContext, Indices); 1523 return LValue::MakeExtVectorElt(Base.getAddress(), CV, 1524 Base.getVRQualifiers()); 1525 } 1526 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1527 1528 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1529 llvm::SmallVector<llvm::Constant *, 4> CElts; 1530 1531 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1532 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1533 CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0)); 1534 else 1535 CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i]))); 1536 } 1537 llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size()); 1538 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, 1539 Base.getVRQualifiers()); 1540 } 1541 1542 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1543 bool isNonGC = false; 1544 Expr *BaseExpr = E->getBase(); 1545 llvm::Value *BaseValue = NULL; 1546 Qualifiers BaseQuals; 1547 1548 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1549 if (E->isArrow()) { 1550 BaseValue = EmitScalarExpr(BaseExpr); 1551 const PointerType *PTy = 1552 BaseExpr->getType()->getAs<PointerType>(); 1553 BaseQuals = PTy->getPointeeType().getQualifiers(); 1554 } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) || 1555 isa<ObjCImplicitSetterGetterRefExpr>( 1556 BaseExpr->IgnoreParens())) { 1557 RValue RV = EmitObjCPropertyGet(BaseExpr); 1558 BaseValue = RV.getAggregateAddr(); 1559 BaseQuals = BaseExpr->getType().getQualifiers(); 1560 } else { 1561 LValue BaseLV = EmitLValue(BaseExpr); 1562 if (BaseLV.isNonGC()) 1563 isNonGC = true; 1564 // FIXME: this isn't right for bitfields. 1565 BaseValue = BaseLV.getAddress(); 1566 QualType BaseTy = BaseExpr->getType(); 1567 BaseQuals = BaseTy.getQualifiers(); 1568 } 1569 1570 NamedDecl *ND = E->getMemberDecl(); 1571 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 1572 LValue LV = EmitLValueForField(BaseValue, Field, 1573 BaseQuals.getCVRQualifiers()); 1574 LValue::SetObjCNonGC(LV, isNonGC); 1575 setObjCGCLValueClass(getContext(), E, LV); 1576 return LV; 1577 } 1578 1579 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 1580 return EmitGlobalVarDeclLValue(*this, E, VD); 1581 1582 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1583 return EmitFunctionDeclLValue(*this, E, FD); 1584 1585 assert(false && "Unhandled member declaration!"); 1586 return LValue(); 1587 } 1588 1589 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, 1590 const FieldDecl* Field, 1591 unsigned CVRQualifiers) { 1592 const CGRecordLayout &RL = 1593 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1594 const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field); 1595 return LValue::MakeBitfield(BaseValue, Info, 1596 Field->getType().getCVRQualifiers()|CVRQualifiers); 1597 } 1598 1599 /// EmitLValueForAnonRecordField - Given that the field is a member of 1600 /// an anonymous struct or union buried inside a record, and given 1601 /// that the base value is a pointer to the enclosing record, derive 1602 /// an lvalue for the ultimate field. 1603 LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue, 1604 const FieldDecl *Field, 1605 unsigned CVRQualifiers) { 1606 llvm::SmallVector<const FieldDecl *, 8> Path; 1607 Path.push_back(Field); 1608 1609 while (Field->getParent()->isAnonymousStructOrUnion()) { 1610 const ValueDecl *VD = Field->getParent()->getAnonymousStructOrUnionObject(); 1611 if (!isa<FieldDecl>(VD)) break; 1612 Field = cast<FieldDecl>(VD); 1613 Path.push_back(Field); 1614 } 1615 1616 llvm::SmallVectorImpl<const FieldDecl*>::reverse_iterator 1617 I = Path.rbegin(), E = Path.rend(); 1618 while (true) { 1619 LValue LV = EmitLValueForField(BaseValue, *I, CVRQualifiers); 1620 if (++I == E) return LV; 1621 1622 assert(LV.isSimple()); 1623 BaseValue = LV.getAddress(); 1624 CVRQualifiers |= LV.getVRQualifiers(); 1625 } 1626 } 1627 1628 LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, 1629 const FieldDecl* Field, 1630 unsigned CVRQualifiers) { 1631 if (Field->isBitField()) 1632 return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); 1633 1634 const CGRecordLayout &RL = 1635 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1636 unsigned idx = RL.getLLVMFieldNo(Field); 1637 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1638 1639 // Match union field type. 1640 if (Field->getParent()->isUnion()) { 1641 const llvm::Type *FieldTy = 1642 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1643 const llvm::PointerType * BaseTy = 1644 cast<llvm::PointerType>(BaseValue->getType()); 1645 unsigned AS = BaseTy->getAddressSpace(); 1646 V = Builder.CreateBitCast(V, 1647 llvm::PointerType::get(FieldTy, AS), 1648 "tmp"); 1649 } 1650 if (Field->getType()->isReferenceType()) 1651 V = Builder.CreateLoad(V, "tmp"); 1652 1653 Qualifiers Quals = MakeQualifiers(Field->getType()); 1654 Quals.addCVRQualifiers(CVRQualifiers); 1655 // __weak attribute on a field is ignored. 1656 if (Quals.getObjCGCAttr() == Qualifiers::Weak) 1657 Quals.removeObjCGCAttr(); 1658 1659 return LValue::MakeAddr(V, Quals); 1660 } 1661 1662 LValue 1663 CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue, 1664 const FieldDecl* Field, 1665 unsigned CVRQualifiers) { 1666 QualType FieldType = Field->getType(); 1667 1668 if (!FieldType->isReferenceType()) 1669 return EmitLValueForField(BaseValue, Field, CVRQualifiers); 1670 1671 const CGRecordLayout &RL = 1672 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1673 unsigned idx = RL.getLLVMFieldNo(Field); 1674 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1675 1676 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 1677 1678 return LValue::MakeAddr(V, MakeQualifiers(FieldType)); 1679 } 1680 1681 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ 1682 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 1683 const Expr* InitExpr = E->getInitializer(); 1684 LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType())); 1685 1686 EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false); 1687 1688 return Result; 1689 } 1690 1691 LValue 1692 CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) { 1693 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 1694 if (int Cond = ConstantFoldsToSimpleInteger(E->getCond())) { 1695 Expr *Live = Cond == 1 ? E->getLHS() : E->getRHS(); 1696 if (Live) 1697 return EmitLValue(Live); 1698 } 1699 1700 if (!E->getLHS()) 1701 return EmitUnsupportedLValue(E, "conditional operator with missing LHS"); 1702 1703 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1704 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1705 llvm::BasicBlock *ContBlock = createBasicBlock("cond.end"); 1706 1707 EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 1708 1709 // Any temporaries created here are conditional. 1710 BeginConditionalBranch(); 1711 EmitBlock(LHSBlock); 1712 LValue LHS = EmitLValue(E->getLHS()); 1713 EndConditionalBranch(); 1714 1715 if (!LHS.isSimple()) 1716 return EmitUnsupportedLValue(E, "conditional operator"); 1717 1718 // FIXME: We shouldn't need an alloca for this. 1719 llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp"); 1720 Builder.CreateStore(LHS.getAddress(), Temp); 1721 EmitBranch(ContBlock); 1722 1723 // Any temporaries created here are conditional. 1724 BeginConditionalBranch(); 1725 EmitBlock(RHSBlock); 1726 LValue RHS = EmitLValue(E->getRHS()); 1727 EndConditionalBranch(); 1728 if (!RHS.isSimple()) 1729 return EmitUnsupportedLValue(E, "conditional operator"); 1730 1731 Builder.CreateStore(RHS.getAddress(), Temp); 1732 EmitBranch(ContBlock); 1733 1734 EmitBlock(ContBlock); 1735 1736 Temp = Builder.CreateLoad(Temp, "lv"); 1737 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1738 } 1739 1740 // ?: here should be an aggregate. 1741 assert((hasAggregateLLVMType(E->getType()) && 1742 !E->getType()->isAnyComplexType()) && 1743 "Unexpected conditional operator!"); 1744 1745 return EmitAggExprToLValue(E); 1746 } 1747 1748 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast. 1749 /// If the cast is a dynamic_cast, we can have the usual lvalue result, 1750 /// otherwise if a cast is needed by the code generator in an lvalue context, 1751 /// then it must mean that we need the address of an aggregate in order to 1752 /// access one of its fields. This can happen for all the reasons that casts 1753 /// are permitted with aggregate result, including noop aggregate casts, and 1754 /// cast from scalar to union. 1755 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 1756 switch (E->getCastKind()) { 1757 default: 1758 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 1759 1760 case CastExpr::CK_Dynamic: { 1761 LValue LV = EmitLValue(E->getSubExpr()); 1762 llvm::Value *V = LV.getAddress(); 1763 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 1764 return LValue::MakeAddr(EmitDynamicCast(V, DCE), 1765 MakeQualifiers(E->getType())); 1766 } 1767 1768 case CastExpr::CK_NoOp: { 1769 LValue LV = EmitLValue(E->getSubExpr()); 1770 if (LV.isPropertyRef()) { 1771 QualType QT = E->getSubExpr()->getType(); 1772 RValue RV = EmitLoadOfPropertyRefLValue(LV, QT); 1773 assert(!RV.isScalar() && "EmitCastLValue - scalar cast of property ref"); 1774 llvm::Value *V = RV.getAggregateAddr(); 1775 return LValue::MakeAddr(V, MakeQualifiers(QT)); 1776 } 1777 return LV; 1778 } 1779 case CastExpr::CK_ConstructorConversion: 1780 case CastExpr::CK_UserDefinedConversion: 1781 case CastExpr::CK_AnyPointerToObjCPointerCast: 1782 return EmitLValue(E->getSubExpr()); 1783 1784 case CastExpr::CK_UncheckedDerivedToBase: 1785 case CastExpr::CK_DerivedToBase: { 1786 const RecordType *DerivedClassTy = 1787 E->getSubExpr()->getType()->getAs<RecordType>(); 1788 CXXRecordDecl *DerivedClassDecl = 1789 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1790 1791 LValue LV = EmitLValue(E->getSubExpr()); 1792 llvm::Value *This; 1793 if (LV.isPropertyRef()) { 1794 RValue RV = EmitLoadOfPropertyRefLValue(LV, E->getSubExpr()->getType()); 1795 assert (!RV.isScalar() && "EmitCastLValue"); 1796 This = RV.getAggregateAddr(); 1797 } 1798 else 1799 This = LV.getAddress(); 1800 1801 // Perform the derived-to-base conversion 1802 llvm::Value *Base = 1803 GetAddressOfBaseClass(This, DerivedClassDecl, 1804 E->getBasePath(), /*NullCheckValue=*/false); 1805 1806 return LValue::MakeAddr(Base, MakeQualifiers(E->getType())); 1807 } 1808 case CastExpr::CK_ToUnion: 1809 return EmitAggExprToLValue(E); 1810 case CastExpr::CK_BaseToDerived: { 1811 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 1812 CXXRecordDecl *DerivedClassDecl = 1813 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1814 1815 LValue LV = EmitLValue(E->getSubExpr()); 1816 1817 // Perform the base-to-derived conversion 1818 llvm::Value *Derived = 1819 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 1820 E->getBasePath(),/*NullCheckValue=*/false); 1821 1822 return LValue::MakeAddr(Derived, MakeQualifiers(E->getType())); 1823 } 1824 case CastExpr::CK_BitCast: { 1825 // This must be a reinterpret_cast (or c-style equivalent). 1826 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 1827 1828 LValue LV = EmitLValue(E->getSubExpr()); 1829 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 1830 ConvertType(CE->getTypeAsWritten())); 1831 return LValue::MakeAddr(V, MakeQualifiers(E->getType())); 1832 } 1833 } 1834 } 1835 1836 LValue CodeGenFunction::EmitNullInitializationLValue( 1837 const CXXZeroInitValueExpr *E) { 1838 QualType Ty = E->getType(); 1839 LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty)); 1840 EmitNullInitialization(LV.getAddress(), Ty); 1841 return LV; 1842 } 1843 1844 //===--------------------------------------------------------------------===// 1845 // Expression Emission 1846 //===--------------------------------------------------------------------===// 1847 1848 1849 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 1850 ReturnValueSlot ReturnValue) { 1851 // Builtins never have block type. 1852 if (E->getCallee()->getType()->isBlockPointerType()) 1853 return EmitBlockCallExpr(E, ReturnValue); 1854 1855 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 1856 return EmitCXXMemberCallExpr(CE, ReturnValue); 1857 1858 const Decl *TargetDecl = 0; 1859 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { 1860 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { 1861 TargetDecl = DRE->getDecl(); 1862 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) 1863 if (unsigned builtinID = FD->getBuiltinID()) 1864 return EmitBuiltinExpr(FD, builtinID, E); 1865 } 1866 } 1867 1868 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 1869 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 1870 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 1871 1872 if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 1873 // C++ [expr.pseudo]p1: 1874 // The result shall only be used as the operand for the function call 1875 // operator (), and the result of such a call has type void. The only 1876 // effect is the evaluation of the postfix-expression before the dot or 1877 // arrow. 1878 EmitScalarExpr(E->getCallee()); 1879 return RValue::get(0); 1880 } 1881 1882 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 1883 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 1884 E->arg_begin(), E->arg_end(), TargetDecl); 1885 } 1886 1887 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 1888 // Comma expressions just emit their LHS then their RHS as an l-value. 1889 if (E->getOpcode() == BinaryOperator::Comma) { 1890 EmitAnyExpr(E->getLHS()); 1891 EnsureInsertPoint(); 1892 return EmitLValue(E->getRHS()); 1893 } 1894 1895 if (E->getOpcode() == BinaryOperator::PtrMemD || 1896 E->getOpcode() == BinaryOperator::PtrMemI) 1897 return EmitPointerToDataMemberBinaryExpr(E); 1898 1899 // Can only get l-value for binary operator expressions which are a 1900 // simple assignment of aggregate type. 1901 if (E->getOpcode() != BinaryOperator::Assign) 1902 return EmitUnsupportedLValue(E, "binary l-value expression"); 1903 1904 if (!hasAggregateLLVMType(E->getType())) { 1905 // Emit the LHS as an l-value. 1906 LValue LV = EmitLValue(E->getLHS()); 1907 1908 llvm::Value *RHS = EmitScalarExpr(E->getRHS()); 1909 EmitStoreOfScalar(RHS, LV.getAddress(), LV.isVolatileQualified(), 1910 E->getType()); 1911 return LV; 1912 } 1913 1914 return EmitAggExprToLValue(E); 1915 } 1916 1917 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 1918 RValue RV = EmitCallExpr(E); 1919 1920 if (!RV.isScalar()) 1921 return LValue::MakeAddr(RV.getAggregateAddr(),MakeQualifiers(E->getType())); 1922 1923 assert(E->getCallReturnType()->isReferenceType() && 1924 "Can't have a scalar return unless the return type is a " 1925 "reference type!"); 1926 1927 return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType())); 1928 } 1929 1930 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 1931 // FIXME: This shouldn't require another copy. 1932 return EmitAggExprToLValue(E); 1933 } 1934 1935 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 1936 llvm::Value *Temp = CreateMemTemp(E->getType(), "tmp"); 1937 EmitCXXConstructExpr(Temp, E); 1938 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1939 } 1940 1941 LValue 1942 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 1943 llvm::Value *Temp = EmitCXXTypeidExpr(E); 1944 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1945 } 1946 1947 LValue 1948 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 1949 LValue LV = EmitLValue(E->getSubExpr()); 1950 PushCXXTemporary(E->getTemporary(), LV.getAddress()); 1951 return LV; 1952 } 1953 1954 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 1955 RValue RV = EmitObjCMessageExpr(E); 1956 1957 if (!RV.isScalar()) 1958 return LValue::MakeAddr(RV.getAggregateAddr(), 1959 MakeQualifiers(E->getType())); 1960 1961 assert(E->getMethodDecl()->getResultType()->isReferenceType() && 1962 "Can't have a scalar return unless the return type is a " 1963 "reference type!"); 1964 1965 return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType())); 1966 } 1967 1968 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 1969 llvm::Value *V = 1970 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true); 1971 return LValue::MakeAddr(V, MakeQualifiers(E->getType())); 1972 } 1973 1974 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 1975 const ObjCIvarDecl *Ivar) { 1976 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 1977 } 1978 1979 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 1980 llvm::Value *BaseValue, 1981 const ObjCIvarDecl *Ivar, 1982 unsigned CVRQualifiers) { 1983 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 1984 Ivar, CVRQualifiers); 1985 } 1986 1987 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 1988 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 1989 llvm::Value *BaseValue = 0; 1990 const Expr *BaseExpr = E->getBase(); 1991 Qualifiers BaseQuals; 1992 QualType ObjectTy; 1993 if (E->isArrow()) { 1994 BaseValue = EmitScalarExpr(BaseExpr); 1995 ObjectTy = BaseExpr->getType()->getPointeeType(); 1996 BaseQuals = ObjectTy.getQualifiers(); 1997 } else { 1998 LValue BaseLV = EmitLValue(BaseExpr); 1999 // FIXME: this isn't right for bitfields. 2000 BaseValue = BaseLV.getAddress(); 2001 ObjectTy = BaseExpr->getType(); 2002 BaseQuals = ObjectTy.getQualifiers(); 2003 } 2004 2005 LValue LV = 2006 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 2007 BaseQuals.getCVRQualifiers()); 2008 setObjCGCLValueClass(getContext(), E, LV); 2009 return LV; 2010 } 2011 2012 LValue 2013 CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { 2014 // This is a special l-value that just issues sends when we load or store 2015 // through it. 2016 return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); 2017 } 2018 2019 LValue CodeGenFunction::EmitObjCKVCRefLValue( 2020 const ObjCImplicitSetterGetterRefExpr *E) { 2021 // This is a special l-value that just issues sends when we load or store 2022 // through it. 2023 return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); 2024 } 2025 2026 LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { 2027 return EmitUnsupportedLValue(E, "use of super"); 2028 } 2029 2030 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 2031 // Can only get l-value for message expression returning aggregate type 2032 RValue RV = EmitAnyExprToTemp(E); 2033 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 2034 } 2035 2036 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 2037 ReturnValueSlot ReturnValue, 2038 CallExpr::const_arg_iterator ArgBeg, 2039 CallExpr::const_arg_iterator ArgEnd, 2040 const Decl *TargetDecl) { 2041 // Get the actual function type. The callee type will always be a pointer to 2042 // function type or a block pointer type. 2043 assert(CalleeType->isFunctionPointerType() && 2044 "Call must have function pointer type!"); 2045 2046 CalleeType = getContext().getCanonicalType(CalleeType); 2047 2048 const FunctionType *FnType 2049 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 2050 QualType ResultType = FnType->getResultType(); 2051 2052 CallArgList Args; 2053 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 2054 2055 return EmitCall(CGM.getTypes().getFunctionInfo(Args, FnType), 2056 Callee, ReturnValue, Args, TargetDecl); 2057 } 2058 2059 LValue CodeGenFunction:: 2060 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 2061 llvm::Value *BaseV; 2062 if (E->getOpcode() == BinaryOperator::PtrMemI) 2063 BaseV = EmitScalarExpr(E->getLHS()); 2064 else 2065 BaseV = EmitLValue(E->getLHS()).getAddress(); 2066 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(getLLVMContext()); 2067 BaseV = Builder.CreateBitCast(BaseV, i8Ty); 2068 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 2069 llvm::Value *AddV = Builder.CreateInBoundsGEP(BaseV, OffsetV, "add.ptr"); 2070 2071 QualType Ty = E->getRHS()->getType(); 2072 Ty = Ty->getAs<MemberPointerType>()->getPointeeType(); 2073 2074 const llvm::Type *PType = ConvertType(getContext().getPointerType(Ty)); 2075 AddV = Builder.CreateBitCast(AddV, PType); 2076 return LValue::MakeAddr(AddV, MakeQualifiers(Ty)); 2077 } 2078 2079