1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGCall.h" 17 #include "CGRecordLayout.h" 18 #include "CGObjCRuntime.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "llvm/Intrinsics.h" 22 #include "clang/CodeGen/CodeGenOptions.h" 23 #include "llvm/Target/TargetData.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 //===--------------------------------------------------------------------===// 28 // Miscellaneous Helper Methods 29 //===--------------------------------------------------------------------===// 30 31 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 32 /// block. 33 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, 34 const llvm::Twine &Name) { 35 if (!Builder.isNamePreserving()) 36 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 37 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 38 } 39 40 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 41 llvm::Value *Init) { 42 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 43 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 44 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 45 } 46 47 llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty, 48 const llvm::Twine &Name) { 49 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 50 // FIXME: Should we prefer the preferred type alignment here? 51 CharUnits Align = getContext().getTypeAlignInChars(Ty); 52 Alloc->setAlignment(Align.getQuantity()); 53 return Alloc; 54 } 55 56 llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty, 57 const llvm::Twine &Name) { 58 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 59 // FIXME: Should we prefer the preferred type alignment here? 60 CharUnits Align = getContext().getTypeAlignInChars(Ty); 61 Alloc->setAlignment(Align.getQuantity()); 62 return Alloc; 63 } 64 65 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 66 /// expression and compare the result against zero, returning an Int1Ty value. 67 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 68 QualType BoolTy = getContext().BoolTy; 69 if (E->getType()->isMemberFunctionPointerType()) { 70 LValue LV = EmitAggExprToLValue(E); 71 72 // Get the pointer. 73 llvm::Value *FuncPtr = Builder.CreateStructGEP(LV.getAddress(), 0, 74 "src.ptr"); 75 FuncPtr = Builder.CreateLoad(FuncPtr); 76 77 llvm::Value *IsNotNull = 78 Builder.CreateICmpNE(FuncPtr, 79 llvm::Constant::getNullValue(FuncPtr->getType()), 80 "tobool"); 81 82 return IsNotNull; 83 } 84 if (!E->getType()->isAnyComplexType()) 85 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 86 87 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 88 } 89 90 /// EmitAnyExpr - Emit code to compute the specified expression which can have 91 /// any type. The result is returned as an RValue struct. If this is an 92 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where the 93 /// result should be returned. 94 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, 95 bool IsAggLocVolatile, bool IgnoreResult, 96 bool IsInitializer) { 97 if (!hasAggregateLLVMType(E->getType())) 98 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 99 else if (E->getType()->isAnyComplexType()) 100 return RValue::getComplex(EmitComplexExpr(E, false, false, 101 IgnoreResult, IgnoreResult)); 102 103 EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer); 104 return RValue::getAggregate(AggLoc, IsAggLocVolatile); 105 } 106 107 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 108 /// always be accessible even if no aggregate location is provided. 109 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, 110 bool IsAggLocVolatile, 111 bool IsInitializer) { 112 llvm::Value *AggLoc = 0; 113 114 if (hasAggregateLLVMType(E->getType()) && 115 !E->getType()->isAnyComplexType()) 116 AggLoc = CreateMemTemp(E->getType(), "agg.tmp"); 117 return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false, 118 IsInitializer); 119 } 120 121 /// EmitAnyExprToMem - Evaluate an expression into a given memory 122 /// location. 123 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 124 llvm::Value *Location, 125 bool IsLocationVolatile, 126 bool IsInit) { 127 if (E->getType()->isComplexType()) 128 EmitComplexExprIntoAddr(E, Location, IsLocationVolatile); 129 else if (hasAggregateLLVMType(E->getType())) 130 EmitAggExpr(E, Location, IsLocationVolatile, /*Ignore*/ false, IsInit); 131 else { 132 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 133 LValue LV = LValue::MakeAddr(Location, MakeQualifiers(E->getType())); 134 EmitStoreThroughLValue(RV, LV, E->getType()); 135 } 136 } 137 138 /// \brief An adjustment to be made to the temporary created when emitting a 139 /// reference binding, which accesses a particular subobject of that temporary. 140 struct SubobjectAdjustment { 141 enum { DerivedToBaseAdjustment, FieldAdjustment } Kind; 142 143 union { 144 struct { 145 const CXXBaseSpecifierArray *BasePath; 146 const CXXRecordDecl *DerivedClass; 147 } DerivedToBase; 148 149 struct { 150 FieldDecl *Field; 151 unsigned CVRQualifiers; 152 } Field; 153 }; 154 155 SubobjectAdjustment(const CXXBaseSpecifierArray *BasePath, 156 const CXXRecordDecl *DerivedClass) 157 : Kind(DerivedToBaseAdjustment) 158 { 159 DerivedToBase.BasePath = BasePath; 160 DerivedToBase.DerivedClass = DerivedClass; 161 } 162 163 SubobjectAdjustment(FieldDecl *Field, unsigned CVRQualifiers) 164 : Kind(FieldAdjustment) 165 { 166 this->Field.Field = Field; 167 this->Field.CVRQualifiers = CVRQualifiers; 168 } 169 }; 170 171 RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, 172 bool IsInitializer) { 173 bool ShouldDestroyTemporaries = false; 174 unsigned OldNumLiveTemporaries = 0; 175 176 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) 177 E = DAE->getExpr(); 178 179 if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) { 180 ShouldDestroyTemporaries = true; 181 182 // Keep track of the current cleanup stack depth. 183 OldNumLiveTemporaries = LiveTemporaries.size(); 184 185 E = TE->getSubExpr(); 186 } 187 188 RValue Val; 189 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 190 // Emit the expr as an lvalue. 191 LValue LV = EmitLValue(E); 192 if (LV.isSimple()) { 193 if (ShouldDestroyTemporaries) { 194 // Pop temporaries. 195 while (LiveTemporaries.size() > OldNumLiveTemporaries) 196 PopCXXTemporary(); 197 } 198 199 return RValue::get(LV.getAddress()); 200 } 201 202 Val = EmitLoadOfLValue(LV, E->getType()); 203 204 if (ShouldDestroyTemporaries) { 205 // Pop temporaries. 206 while (LiveTemporaries.size() > OldNumLiveTemporaries) 207 PopCXXTemporary(); 208 } 209 } else { 210 QualType ResultTy = E->getType(); 211 212 llvm::SmallVector<SubobjectAdjustment, 2> Adjustments; 213 do { 214 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 215 E = PE->getSubExpr(); 216 continue; 217 } 218 219 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 220 if ((CE->getCastKind() == CastExpr::CK_DerivedToBase || 221 CE->getCastKind() == CastExpr::CK_UncheckedDerivedToBase) && 222 E->getType()->isRecordType()) { 223 E = CE->getSubExpr(); 224 CXXRecordDecl *Derived 225 = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 226 Adjustments.push_back(SubobjectAdjustment(&CE->getBasePath(), 227 Derived)); 228 continue; 229 } 230 231 if (CE->getCastKind() == CastExpr::CK_NoOp) { 232 E = CE->getSubExpr(); 233 continue; 234 } 235 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 236 if (ME->getBase()->isLvalue(getContext()) != Expr::LV_Valid && 237 ME->getBase()->getType()->isRecordType()) { 238 if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 239 E = ME->getBase(); 240 Adjustments.push_back(SubobjectAdjustment(Field, 241 E->getType().getCVRQualifiers())); 242 continue; 243 } 244 } 245 } 246 247 // Nothing changed. 248 break; 249 } while (true); 250 251 Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false, 252 IsInitializer); 253 254 if (ShouldDestroyTemporaries) { 255 // Pop temporaries. 256 while (LiveTemporaries.size() > OldNumLiveTemporaries) 257 PopCXXTemporary(); 258 } 259 260 if (IsInitializer) { 261 // We might have to destroy the temporary variable. 262 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 263 if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 264 if (!ClassDecl->hasTrivialDestructor()) { 265 const CXXDestructorDecl *Dtor = 266 ClassDecl->getDestructor(getContext()); 267 268 { 269 DelayedCleanupBlock Scope(*this); 270 EmitCXXDestructorCall(Dtor, Dtor_Complete, 271 /*ForVirtualBase=*/false, 272 Val.getAggregateAddr()); 273 274 // Make sure to jump to the exit block. 275 EmitBranch(Scope.getCleanupExitBlock()); 276 } 277 if (Exceptions) { 278 EHCleanupBlock Cleanup(*this); 279 EmitCXXDestructorCall(Dtor, Dtor_Complete, 280 /*ForVirtualBase=*/false, 281 Val.getAggregateAddr()); 282 } 283 } 284 } 285 } 286 } 287 288 // Check if need to perform derived-to-base casts and/or field accesses, to 289 // get from the temporary object we created (and, potentially, for which we 290 // extended the lifetime) to the subobject we're binding the reference to. 291 if (!Adjustments.empty()) { 292 llvm::Value *Object = Val.getAggregateAddr(); 293 for (unsigned I = Adjustments.size(); I != 0; --I) { 294 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 295 switch (Adjustment.Kind) { 296 case SubobjectAdjustment::DerivedToBaseAdjustment: 297 Object = GetAddressOfBaseClass(Object, 298 Adjustment.DerivedToBase.DerivedClass, 299 *Adjustment.DerivedToBase.BasePath, 300 /*NullCheckValue=*/false); 301 break; 302 303 case SubobjectAdjustment::FieldAdjustment: { 304 unsigned CVR = Adjustment.Field.CVRQualifiers; 305 LValue LV = EmitLValueForField(Object, Adjustment.Field.Field, CVR); 306 if (LV.isSimple()) { 307 Object = LV.getAddress(); 308 break; 309 } 310 311 // For non-simple lvalues, we actually have to create a copy of 312 // the object we're binding to. 313 QualType T = Adjustment.Field.Field->getType().getNonReferenceType() 314 .getUnqualifiedType(); 315 Object = CreateTempAlloca(ConvertType(T), "lv"); 316 EmitStoreThroughLValue(EmitLoadOfLValue(LV, T), 317 LValue::MakeAddr(Object, 318 Qualifiers::fromCVRMask(CVR)), 319 T); 320 break; 321 } 322 } 323 } 324 325 const llvm::Type *ResultPtrTy 326 = llvm::PointerType::get(ConvertType(ResultTy), 0); 327 Object = Builder.CreateBitCast(Object, ResultPtrTy, "temp"); 328 return RValue::get(Object); 329 } 330 } 331 332 if (Val.isAggregate()) { 333 Val = RValue::get(Val.getAggregateAddr()); 334 } else { 335 // Create a temporary variable that we can bind the reference to. 336 llvm::Value *Temp = CreateMemTemp(E->getType(), "reftmp"); 337 if (Val.isScalar()) 338 EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); 339 else 340 StoreComplexToAddr(Val.getComplexVal(), Temp, false); 341 Val = RValue::get(Temp); 342 } 343 344 return Val; 345 } 346 347 348 /// getAccessedFieldNo - Given an encoded value and a result number, return the 349 /// input field number being accessed. 350 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 351 const llvm::Constant *Elts) { 352 if (isa<llvm::ConstantAggregateZero>(Elts)) 353 return 0; 354 355 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 356 } 357 358 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { 359 if (!CatchUndefined) 360 return; 361 362 const llvm::Type *Size_tTy 363 = llvm::IntegerType::get(VMContext, LLVMPointerWidth); 364 Address = Builder.CreateBitCast(Address, PtrToInt8Ty); 365 366 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &Size_tTy, 1); 367 const llvm::IntegerType *Int1Ty = llvm::IntegerType::get(VMContext, 1); 368 369 // In time, people may want to control this and use a 1 here. 370 llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0); 371 llvm::Value *C = Builder.CreateCall2(F, Address, Arg); 372 llvm::BasicBlock *Cont = createBasicBlock(); 373 llvm::BasicBlock *Check = createBasicBlock(); 374 llvm::Value *NegativeOne = llvm::ConstantInt::get(Size_tTy, -1ULL); 375 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); 376 377 EmitBlock(Check); 378 Builder.CreateCondBr(Builder.CreateICmpUGE(C, 379 llvm::ConstantInt::get(Size_tTy, Size)), 380 Cont, getTrapBB()); 381 EmitBlock(Cont); 382 } 383 384 385 llvm::Value *CodeGenFunction:: 386 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 387 bool isInc, bool isPre) { 388 QualType ValTy = E->getSubExpr()->getType(); 389 llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy).getScalarVal(); 390 391 int AmountVal = isInc ? 1 : -1; 392 393 if (ValTy->isPointerType() && 394 ValTy->getAs<PointerType>()->isVariableArrayType()) { 395 // The amount of the addition/subtraction needs to account for the VLA size 396 ErrorUnsupported(E, "VLA pointer inc/dec"); 397 } 398 399 llvm::Value *NextVal; 400 if (const llvm::PointerType *PT = 401 dyn_cast<llvm::PointerType>(InVal->getType())) { 402 llvm::Constant *Inc = 403 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal); 404 if (!isa<llvm::FunctionType>(PT->getElementType())) { 405 QualType PTEE = ValTy->getPointeeType(); 406 if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) { 407 // Handle interface types, which are not represented with a concrete 408 // type. 409 int size = getContext().getTypeSize(OIT) / 8; 410 if (!isInc) 411 size = -size; 412 Inc = llvm::ConstantInt::get(Inc->getType(), size); 413 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 414 InVal = Builder.CreateBitCast(InVal, i8Ty); 415 NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr"); 416 llvm::Value *lhs = LV.getAddress(); 417 lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty)); 418 LV = LValue::MakeAddr(lhs, MakeQualifiers(ValTy)); 419 } else 420 NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec"); 421 } else { 422 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 423 NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp"); 424 NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec"); 425 NextVal = Builder.CreateBitCast(NextVal, InVal->getType()); 426 } 427 } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) { 428 // Bool++ is an interesting case, due to promotion rules, we get: 429 // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 -> 430 // Bool = ((int)Bool+1) != 0 431 // An interesting aspect of this is that increment is always true. 432 // Decrement does not have this property. 433 NextVal = llvm::ConstantInt::getTrue(VMContext); 434 } else if (isa<llvm::IntegerType>(InVal->getType())) { 435 NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal); 436 437 // Signed integer overflow is undefined behavior. 438 if (ValTy->isSignedIntegerType()) 439 NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec"); 440 else 441 NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); 442 } else { 443 // Add the inc/dec to the real part. 444 if (InVal->getType()->isFloatTy()) 445 NextVal = 446 llvm::ConstantFP::get(VMContext, 447 llvm::APFloat(static_cast<float>(AmountVal))); 448 else if (InVal->getType()->isDoubleTy()) 449 NextVal = 450 llvm::ConstantFP::get(VMContext, 451 llvm::APFloat(static_cast<double>(AmountVal))); 452 else { 453 llvm::APFloat F(static_cast<float>(AmountVal)); 454 bool ignored; 455 F.convert(Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero, 456 &ignored); 457 NextVal = llvm::ConstantFP::get(VMContext, F); 458 } 459 NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec"); 460 } 461 462 // Store the updated result through the lvalue. 463 if (LV.isBitField()) 464 EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal); 465 else 466 EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy); 467 468 // If this is a postinc, return the value read from memory, otherwise use the 469 // updated value. 470 return isPre ? NextVal : InVal; 471 } 472 473 474 CodeGenFunction::ComplexPairTy CodeGenFunction:: 475 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 476 bool isInc, bool isPre) { 477 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 478 LV.isVolatileQualified()); 479 480 llvm::Value *NextVal; 481 if (isa<llvm::IntegerType>(InVal.first->getType())) { 482 uint64_t AmountVal = isInc ? 1 : -1; 483 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 484 485 // Add the inc/dec to the real part. 486 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 487 } else { 488 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 489 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 490 if (!isInc) 491 FVal.changeSign(); 492 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 493 494 // Add the inc/dec to the real part. 495 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 496 } 497 498 ComplexPairTy IncVal(NextVal, InVal.second); 499 500 // Store the updated result through the lvalue. 501 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 502 503 // If this is a postinc, return the value read from memory, otherwise use the 504 // updated value. 505 return isPre ? IncVal : InVal; 506 } 507 508 509 //===----------------------------------------------------------------------===// 510 // LValue Expression Emission 511 //===----------------------------------------------------------------------===// 512 513 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 514 if (Ty->isVoidType()) 515 return RValue::get(0); 516 517 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 518 const llvm::Type *EltTy = ConvertType(CTy->getElementType()); 519 llvm::Value *U = llvm::UndefValue::get(EltTy); 520 return RValue::getComplex(std::make_pair(U, U)); 521 } 522 523 if (hasAggregateLLVMType(Ty)) { 524 const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); 525 return RValue::getAggregate(llvm::UndefValue::get(LTy)); 526 } 527 528 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 529 } 530 531 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 532 const char *Name) { 533 ErrorUnsupported(E, Name); 534 return GetUndefRValue(E->getType()); 535 } 536 537 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 538 const char *Name) { 539 ErrorUnsupported(E, Name); 540 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 541 return LValue::MakeAddr(llvm::UndefValue::get(Ty), 542 MakeQualifiers(E->getType())); 543 } 544 545 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) { 546 LValue LV = EmitLValue(E); 547 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 548 EmitCheck(LV.getAddress(), getContext().getTypeSize(E->getType()) / 8); 549 return LV; 550 } 551 552 /// EmitLValue - Emit code to compute a designator that specifies the location 553 /// of the expression. 554 /// 555 /// This can return one of two things: a simple address or a bitfield reference. 556 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 557 /// an LLVM pointer type. 558 /// 559 /// If this returns a bitfield reference, nothing about the pointee type of the 560 /// LLVM value is known: For example, it may not be a pointer to an integer. 561 /// 562 /// If this returns a normal address, and if the lvalue's C type is fixed size, 563 /// this method guarantees that the returned pointer type will point to an LLVM 564 /// type of the same size of the lvalue's type. If the lvalue has a variable 565 /// length type, this is not possible. 566 /// 567 LValue CodeGenFunction::EmitLValue(const Expr *E) { 568 switch (E->getStmtClass()) { 569 default: return EmitUnsupportedLValue(E, "l-value expression"); 570 571 case Expr::ObjCIsaExprClass: 572 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 573 case Expr::BinaryOperatorClass: 574 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 575 case Expr::CompoundAssignOperatorClass: 576 return EmitCompoundAssignOperatorLValue(cast<CompoundAssignOperator>(E)); 577 case Expr::CallExprClass: 578 case Expr::CXXMemberCallExprClass: 579 case Expr::CXXOperatorCallExprClass: 580 return EmitCallExprLValue(cast<CallExpr>(E)); 581 case Expr::VAArgExprClass: 582 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 583 case Expr::DeclRefExprClass: 584 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 585 case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 586 case Expr::PredefinedExprClass: 587 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 588 case Expr::StringLiteralClass: 589 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 590 case Expr::ObjCEncodeExprClass: 591 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 592 593 case Expr::BlockDeclRefExprClass: 594 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 595 596 case Expr::CXXTemporaryObjectExprClass: 597 case Expr::CXXConstructExprClass: 598 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 599 case Expr::CXXBindTemporaryExprClass: 600 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 601 case Expr::CXXExprWithTemporariesClass: 602 return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E)); 603 case Expr::CXXZeroInitValueExprClass: 604 return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E)); 605 case Expr::CXXDefaultArgExprClass: 606 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 607 case Expr::CXXTypeidExprClass: 608 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 609 610 case Expr::ObjCMessageExprClass: 611 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 612 case Expr::ObjCIvarRefExprClass: 613 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 614 case Expr::ObjCPropertyRefExprClass: 615 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 616 case Expr::ObjCImplicitSetterGetterRefExprClass: 617 return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E)); 618 case Expr::ObjCSuperExprClass: 619 return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E)); 620 621 case Expr::StmtExprClass: 622 return EmitStmtExprLValue(cast<StmtExpr>(E)); 623 case Expr::UnaryOperatorClass: 624 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 625 case Expr::ArraySubscriptExprClass: 626 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 627 case Expr::ExtVectorElementExprClass: 628 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 629 case Expr::MemberExprClass: 630 return EmitMemberExpr(cast<MemberExpr>(E)); 631 case Expr::CompoundLiteralExprClass: 632 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 633 case Expr::ConditionalOperatorClass: 634 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 635 case Expr::ChooseExprClass: 636 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 637 case Expr::ImplicitCastExprClass: 638 case Expr::CStyleCastExprClass: 639 case Expr::CXXFunctionalCastExprClass: 640 case Expr::CXXStaticCastExprClass: 641 case Expr::CXXDynamicCastExprClass: 642 case Expr::CXXReinterpretCastExprClass: 643 case Expr::CXXConstCastExprClass: 644 return EmitCastLValue(cast<CastExpr>(E)); 645 } 646 } 647 648 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 649 QualType Ty) { 650 llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp"); 651 if (Volatile) 652 Load->setVolatile(true); 653 654 // Bool can have different representation in memory than in registers. 655 llvm::Value *V = Load; 656 if (Ty->isBooleanType()) 657 if (V->getType() != llvm::Type::getInt1Ty(VMContext)) 658 V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool"); 659 660 return V; 661 } 662 663 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 664 bool Volatile, QualType Ty) { 665 666 if (Ty->isBooleanType()) { 667 // Bool can have different representation in memory than in registers. 668 const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 669 Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false); 670 } 671 Builder.CreateStore(Value, Addr, Volatile); 672 } 673 674 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 675 /// method emits the address of the lvalue, then loads the result as an rvalue, 676 /// returning the rvalue. 677 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { 678 if (LV.isObjCWeak()) { 679 // load of a __weak object. 680 llvm::Value *AddrWeakObj = LV.getAddress(); 681 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 682 AddrWeakObj)); 683 } 684 685 if (LV.isSimple()) { 686 llvm::Value *Ptr = LV.getAddress(); 687 const llvm::Type *EltTy = 688 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 689 690 // Simple scalar l-value. 691 // 692 // FIXME: We shouldn't have to use isSingleValueType here. 693 if (EltTy->isSingleValueType()) 694 return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), 695 ExprType)); 696 697 assert(ExprType->isFunctionType() && "Unknown scalar value"); 698 return RValue::get(Ptr); 699 } 700 701 if (LV.isVectorElt()) { 702 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 703 LV.isVolatileQualified(), "tmp"); 704 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 705 "vecext")); 706 } 707 708 // If this is a reference to a subset of the elements of a vector, either 709 // shuffle the input or extract/insert them as appropriate. 710 if (LV.isExtVectorElt()) 711 return EmitLoadOfExtVectorElementLValue(LV, ExprType); 712 713 if (LV.isBitField()) 714 return EmitLoadOfBitfieldLValue(LV, ExprType); 715 716 if (LV.isPropertyRef()) 717 return EmitLoadOfPropertyRefLValue(LV, ExprType); 718 719 assert(LV.isKVCRef() && "Unknown LValue type!"); 720 return EmitLoadOfKVCRefLValue(LV, ExprType); 721 } 722 723 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 724 QualType ExprType) { 725 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 726 727 // Get the output type. 728 const llvm::Type *ResLTy = ConvertType(ExprType); 729 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 730 731 // Compute the result as an OR of all of the individual component accesses. 732 llvm::Value *Res = 0; 733 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 734 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 735 736 // Get the field pointer. 737 llvm::Value *Ptr = LV.getBitFieldBaseAddr(); 738 739 // Only offset by the field index if used, so that incoming values are not 740 // required to be structures. 741 if (AI.FieldIndex) 742 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 743 744 // Offset by the byte offset, if used. 745 if (AI.FieldByteOffset) { 746 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 747 Ptr = Builder.CreateBitCast(Ptr, i8PTy); 748 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs"); 749 } 750 751 // Cast to the access type. 752 const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth, 753 ExprType.getAddressSpace()); 754 Ptr = Builder.CreateBitCast(Ptr, PTy); 755 756 // Perform the load. 757 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified()); 758 if (AI.AccessAlignment) 759 Load->setAlignment(AI.AccessAlignment); 760 761 // Shift out unused low bits and mask out unused high bits. 762 llvm::Value *Val = Load; 763 if (AI.FieldBitStart) 764 Val = Builder.CreateLShr(Load, AI.FieldBitStart); 765 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth, 766 AI.TargetBitWidth), 767 "bf.clear"); 768 769 // Extend or truncate to the target size. 770 if (AI.AccessWidth < ResSizeInBits) 771 Val = Builder.CreateZExt(Val, ResLTy); 772 else if (AI.AccessWidth > ResSizeInBits) 773 Val = Builder.CreateTrunc(Val, ResLTy); 774 775 // Shift into place, and OR into the result. 776 if (AI.TargetBitOffset) 777 Val = Builder.CreateShl(Val, AI.TargetBitOffset); 778 Res = Res ? Builder.CreateOr(Res, Val) : Val; 779 } 780 781 // If the bit-field is signed, perform the sign-extension. 782 // 783 // FIXME: This can easily be folded into the load of the high bits, which 784 // could also eliminate the mask of high bits in some situations. 785 if (Info.isSigned()) { 786 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 787 if (ExtraBits) 788 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits), 789 ExtraBits, "bf.val.sext"); 790 } 791 792 return RValue::get(Res); 793 } 794 795 RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, 796 QualType ExprType) { 797 return EmitObjCPropertyGet(LV.getPropertyRefExpr()); 798 } 799 800 RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, 801 QualType ExprType) { 802 return EmitObjCPropertyGet(LV.getKVCRefExpr()); 803 } 804 805 // If this is a reference to a subset of the elements of a vector, create an 806 // appropriate shufflevector. 807 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, 808 QualType ExprType) { 809 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 810 LV.isVolatileQualified(), "tmp"); 811 812 const llvm::Constant *Elts = LV.getExtVectorElts(); 813 814 // If the result of the expression is a non-vector type, we must be extracting 815 // a single element. Just codegen as an extractelement. 816 const VectorType *ExprVT = ExprType->getAs<VectorType>(); 817 if (!ExprVT) { 818 unsigned InIdx = getAccessedFieldNo(0, Elts); 819 llvm::Value *Elt = llvm::ConstantInt::get( 820 llvm::Type::getInt32Ty(VMContext), InIdx); 821 return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); 822 } 823 824 // Always use shuffle vector to try to retain the original program structure 825 unsigned NumResultElts = ExprVT->getNumElements(); 826 827 llvm::SmallVector<llvm::Constant*, 4> Mask; 828 for (unsigned i = 0; i != NumResultElts; ++i) { 829 unsigned InIdx = getAccessedFieldNo(i, Elts); 830 Mask.push_back(llvm::ConstantInt::get( 831 llvm::Type::getInt32Ty(VMContext), InIdx)); 832 } 833 834 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 835 Vec = Builder.CreateShuffleVector(Vec, 836 llvm::UndefValue::get(Vec->getType()), 837 MaskV, "tmp"); 838 return RValue::get(Vec); 839 } 840 841 842 843 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 844 /// lvalue, where both are guaranteed to the have the same type, and that type 845 /// is 'Ty'. 846 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 847 QualType Ty) { 848 if (!Dst.isSimple()) { 849 if (Dst.isVectorElt()) { 850 // Read/modify/write the vector, inserting the new element. 851 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 852 Dst.isVolatileQualified(), "tmp"); 853 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 854 Dst.getVectorIdx(), "vecins"); 855 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 856 return; 857 } 858 859 // If this is an update of extended vector elements, insert them as 860 // appropriate. 861 if (Dst.isExtVectorElt()) 862 return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); 863 864 if (Dst.isBitField()) 865 return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); 866 867 if (Dst.isPropertyRef()) 868 return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); 869 870 assert(Dst.isKVCRef() && "Unknown LValue type"); 871 return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); 872 } 873 874 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 875 // load of a __weak object. 876 llvm::Value *LvalueDst = Dst.getAddress(); 877 llvm::Value *src = Src.getScalarVal(); 878 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 879 return; 880 } 881 882 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 883 // load of a __strong object. 884 llvm::Value *LvalueDst = Dst.getAddress(); 885 llvm::Value *src = Src.getScalarVal(); 886 if (Dst.isObjCIvar()) { 887 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 888 const llvm::Type *ResultType = ConvertType(getContext().LongTy); 889 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 890 llvm::Value *dst = RHS; 891 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 892 llvm::Value *LHS = 893 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 894 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 895 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 896 BytesBetween); 897 } else if (Dst.isGlobalObjCRef()) 898 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 899 else 900 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 901 return; 902 } 903 904 assert(Src.isScalar() && "Can't emit an agg store with this method"); 905 EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), 906 Dst.isVolatileQualified(), Ty); 907 } 908 909 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 910 QualType Ty, 911 llvm::Value **Result) { 912 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 913 914 // Get the output type. 915 const llvm::Type *ResLTy = ConvertTypeForMem(Ty); 916 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 917 918 // Get the source value, truncated to the width of the bit-field. 919 llvm::Value *SrcVal = Src.getScalarVal(); 920 921 if (Ty->isBooleanType()) 922 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false); 923 924 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits, 925 Info.getSize()), 926 "bf.value"); 927 928 // Return the new value of the bit-field, if requested. 929 if (Result) { 930 // Cast back to the proper type for result. 931 const llvm::Type *SrcTy = Src.getScalarVal()->getType(); 932 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false, 933 "bf.reload.val"); 934 935 // Sign extend if necessary. 936 if (Info.isSigned()) { 937 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 938 if (ExtraBits) 939 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits), 940 ExtraBits, "bf.reload.sext"); 941 } 942 943 *Result = ReloadVal; 944 } 945 946 // Iterate over the components, writing each piece to memory. 947 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 948 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 949 950 // Get the field pointer. 951 llvm::Value *Ptr = Dst.getBitFieldBaseAddr(); 952 953 // Only offset by the field index if used, so that incoming values are not 954 // required to be structures. 955 if (AI.FieldIndex) 956 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 957 958 // Offset by the byte offset, if used. 959 if (AI.FieldByteOffset) { 960 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 961 Ptr = Builder.CreateBitCast(Ptr, i8PTy); 962 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset,"bf.field.offs"); 963 } 964 965 // Cast to the access type. 966 const llvm::Type *PTy = llvm::Type::getIntNPtrTy(VMContext, AI.AccessWidth, 967 Ty.getAddressSpace()); 968 Ptr = Builder.CreateBitCast(Ptr, PTy); 969 970 // Extract the piece of the bit-field value to write in this access, limited 971 // to the values that are part of this access. 972 llvm::Value *Val = SrcVal; 973 if (AI.TargetBitOffset) 974 Val = Builder.CreateLShr(Val, AI.TargetBitOffset); 975 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits, 976 AI.TargetBitWidth)); 977 978 // Extend or truncate to the access size. 979 const llvm::Type *AccessLTy = 980 llvm::Type::getIntNTy(VMContext, AI.AccessWidth); 981 if (ResSizeInBits < AI.AccessWidth) 982 Val = Builder.CreateZExt(Val, AccessLTy); 983 else if (ResSizeInBits > AI.AccessWidth) 984 Val = Builder.CreateTrunc(Val, AccessLTy); 985 986 // Shift into the position in memory. 987 if (AI.FieldBitStart) 988 Val = Builder.CreateShl(Val, AI.FieldBitStart); 989 990 // If necessary, load and OR in bits that are outside of the bit-field. 991 if (AI.TargetBitWidth != AI.AccessWidth) { 992 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified()); 993 if (AI.AccessAlignment) 994 Load->setAlignment(AI.AccessAlignment); 995 996 // Compute the mask for zeroing the bits that are part of the bit-field. 997 llvm::APInt InvMask = 998 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart, 999 AI.FieldBitStart + AI.TargetBitWidth); 1000 1001 // Apply the mask and OR in to the value to write. 1002 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val); 1003 } 1004 1005 // Write the value. 1006 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr, 1007 Dst.isVolatileQualified()); 1008 if (AI.AccessAlignment) 1009 Store->setAlignment(AI.AccessAlignment); 1010 } 1011 } 1012 1013 void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, 1014 LValue Dst, 1015 QualType Ty) { 1016 EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); 1017 } 1018 1019 void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, 1020 LValue Dst, 1021 QualType Ty) { 1022 EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); 1023 } 1024 1025 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1026 LValue Dst, 1027 QualType Ty) { 1028 // This access turns into a read/modify/write of the vector. Load the input 1029 // value now. 1030 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 1031 Dst.isVolatileQualified(), "tmp"); 1032 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1033 1034 llvm::Value *SrcVal = Src.getScalarVal(); 1035 1036 if (const VectorType *VTy = Ty->getAs<VectorType>()) { 1037 unsigned NumSrcElts = VTy->getNumElements(); 1038 unsigned NumDstElts = 1039 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1040 if (NumDstElts == NumSrcElts) { 1041 // Use shuffle vector is the src and destination are the same number of 1042 // elements and restore the vector mask since it is on the side it will be 1043 // stored. 1044 llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1045 for (unsigned i = 0; i != NumSrcElts; ++i) { 1046 unsigned InIdx = getAccessedFieldNo(i, Elts); 1047 Mask[InIdx] = llvm::ConstantInt::get( 1048 llvm::Type::getInt32Ty(VMContext), i); 1049 } 1050 1051 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 1052 Vec = Builder.CreateShuffleVector(SrcVal, 1053 llvm::UndefValue::get(Vec->getType()), 1054 MaskV, "tmp"); 1055 } else if (NumDstElts > NumSrcElts) { 1056 // Extended the source vector to the same length and then shuffle it 1057 // into the destination. 1058 // FIXME: since we're shuffling with undef, can we just use the indices 1059 // into that? This could be simpler. 1060 llvm::SmallVector<llvm::Constant*, 4> ExtMask; 1061 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1062 unsigned i; 1063 for (i = 0; i != NumSrcElts; ++i) 1064 ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1065 for (; i != NumDstElts; ++i) 1066 ExtMask.push_back(llvm::UndefValue::get(Int32Ty)); 1067 llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], 1068 ExtMask.size()); 1069 llvm::Value *ExtSrcVal = 1070 Builder.CreateShuffleVector(SrcVal, 1071 llvm::UndefValue::get(SrcVal->getType()), 1072 ExtMaskV, "tmp"); 1073 // build identity 1074 llvm::SmallVector<llvm::Constant*, 4> Mask; 1075 for (unsigned i = 0; i != NumDstElts; ++i) 1076 Mask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1077 1078 // modify when what gets shuffled in 1079 for (unsigned i = 0; i != NumSrcElts; ++i) { 1080 unsigned Idx = getAccessedFieldNo(i, Elts); 1081 Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts); 1082 } 1083 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 1084 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); 1085 } else { 1086 // We should never shorten the vector 1087 assert(0 && "unexpected shorten vector length"); 1088 } 1089 } else { 1090 // If the Src is a scalar (not a vector) it must be updating one element. 1091 unsigned InIdx = getAccessedFieldNo(0, Elts); 1092 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1093 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1094 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); 1095 } 1096 1097 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 1098 } 1099 1100 // setObjCGCLValueClass - sets class of he lvalue for the purpose of 1101 // generating write-barries API. It is currently a global, ivar, 1102 // or neither. 1103 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1104 LValue &LV) { 1105 if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC) 1106 return; 1107 1108 if (isa<ObjCIvarRefExpr>(E)) { 1109 LV.SetObjCIvar(LV, true); 1110 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1111 LV.setBaseIvarExp(Exp->getBase()); 1112 LV.SetObjCArray(LV, E->getType()->isArrayType()); 1113 return; 1114 } 1115 1116 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1117 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1118 if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) || 1119 VD->isFileVarDecl()) 1120 LV.SetGlobalObjCRef(LV, true); 1121 } 1122 LV.SetObjCArray(LV, E->getType()->isArrayType()); 1123 return; 1124 } 1125 1126 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1127 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1128 return; 1129 } 1130 1131 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1132 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1133 if (LV.isObjCIvar()) { 1134 // If cast is to a structure pointer, follow gcc's behavior and make it 1135 // a non-ivar write-barrier. 1136 QualType ExpTy = E->getType(); 1137 if (ExpTy->isPointerType()) 1138 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1139 if (ExpTy->isRecordType()) 1140 LV.SetObjCIvar(LV, false); 1141 } 1142 return; 1143 } 1144 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1145 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1146 return; 1147 } 1148 1149 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1150 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1151 return; 1152 } 1153 1154 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1155 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1156 if (LV.isObjCIvar() && !LV.isObjCArray()) 1157 // Using array syntax to assigning to what an ivar points to is not 1158 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1159 LV.SetObjCIvar(LV, false); 1160 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1161 // Using array syntax to assigning to what global points to is not 1162 // same as assigning to the global itself. {id *G;} G[i] = 0; 1163 LV.SetGlobalObjCRef(LV, false); 1164 return; 1165 } 1166 1167 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1168 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1169 // We don't know if member is an 'ivar', but this flag is looked at 1170 // only in the context of LV.isObjCIvar(). 1171 LV.SetObjCArray(LV, E->getType()->isArrayType()); 1172 return; 1173 } 1174 } 1175 1176 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1177 const Expr *E, const VarDecl *VD) { 1178 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1179 "Var decl must have external storage or be a file var decl!"); 1180 1181 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1182 if (VD->getType()->isReferenceType()) 1183 V = CGF.Builder.CreateLoad(V, "tmp"); 1184 LValue LV = LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); 1185 setObjCGCLValueClass(CGF.getContext(), E, LV); 1186 return LV; 1187 } 1188 1189 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1190 const Expr *E, const FunctionDecl *FD) { 1191 llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD); 1192 if (!FD->hasPrototype()) { 1193 if (const FunctionProtoType *Proto = 1194 FD->getType()->getAs<FunctionProtoType>()) { 1195 // Ugly case: for a K&R-style definition, the type of the definition 1196 // isn't the same as the type of a use. Correct for this with a 1197 // bitcast. 1198 QualType NoProtoType = 1199 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1200 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1201 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp"); 1202 } 1203 } 1204 return LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); 1205 } 1206 1207 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1208 const NamedDecl *ND = E->getDecl(); 1209 1210 if (ND->hasAttr<WeakRefAttr>()) { 1211 const ValueDecl* VD = cast<ValueDecl>(ND); 1212 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1213 1214 Qualifiers Quals = MakeQualifiers(E->getType()); 1215 LValue LV = LValue::MakeAddr(Aliasee, Quals); 1216 1217 return LV; 1218 } 1219 1220 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1221 1222 // Check if this is a global variable. 1223 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1224 return EmitGlobalVarDeclLValue(*this, E, VD); 1225 1226 bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>(); 1227 1228 llvm::Value *V = LocalDeclMap[VD]; 1229 if (!V && getContext().getLangOptions().CPlusPlus && 1230 VD->isStaticLocal()) 1231 V = CGM.getStaticLocalDeclAddress(VD); 1232 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1233 1234 Qualifiers Quals = MakeQualifiers(E->getType()); 1235 // local variables do not get their gc attribute set. 1236 // local static? 1237 if (NonGCable) Quals.removeObjCGCAttr(); 1238 1239 if (VD->hasAttr<BlocksAttr>()) { 1240 V = Builder.CreateStructGEP(V, 1, "forwarding"); 1241 V = Builder.CreateLoad(V); 1242 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 1243 VD->getNameAsString()); 1244 } 1245 if (VD->getType()->isReferenceType()) 1246 V = Builder.CreateLoad(V, "tmp"); 1247 LValue LV = LValue::MakeAddr(V, Quals); 1248 LValue::SetObjCNonGC(LV, NonGCable); 1249 setObjCGCLValueClass(getContext(), E, LV); 1250 return LV; 1251 } 1252 1253 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1254 return EmitFunctionDeclLValue(*this, E, FD); 1255 1256 // FIXME: the qualifier check does not seem sufficient here 1257 if (E->getQualifier()) { 1258 const FieldDecl *FD = cast<FieldDecl>(ND); 1259 llvm::Value *V = CGM.EmitPointerToDataMember(FD); 1260 1261 return LValue::MakeAddr(V, MakeQualifiers(FD->getType())); 1262 } 1263 1264 assert(false && "Unhandled DeclRefExpr"); 1265 1266 // an invalid LValue, but the assert will 1267 // ensure that this point is never reached. 1268 return LValue(); 1269 } 1270 1271 LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 1272 return LValue::MakeAddr(GetAddrOfBlockDecl(E), MakeQualifiers(E->getType())); 1273 } 1274 1275 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1276 // __extension__ doesn't affect lvalue-ness. 1277 if (E->getOpcode() == UnaryOperator::Extension) 1278 return EmitLValue(E->getSubExpr()); 1279 1280 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1281 switch (E->getOpcode()) { 1282 default: assert(0 && "Unknown unary operator lvalue!"); 1283 case UnaryOperator::Deref: { 1284 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1285 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1286 1287 Qualifiers Quals = MakeQualifiers(T); 1288 Quals.setAddressSpace(ExprTy.getAddressSpace()); 1289 1290 LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals); 1291 // We should not generate __weak write barrier on indirect reference 1292 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1293 // But, we continue to generate __strong write barrier on indirect write 1294 // into a pointer to object. 1295 if (getContext().getLangOptions().ObjC1 && 1296 getContext().getLangOptions().getGCMode() != LangOptions::NonGC && 1297 LV.isObjCWeak()) 1298 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 1299 return LV; 1300 } 1301 case UnaryOperator::Real: 1302 case UnaryOperator::Imag: { 1303 LValue LV = EmitLValue(E->getSubExpr()); 1304 unsigned Idx = E->getOpcode() == UnaryOperator::Imag; 1305 return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), 1306 Idx, "idx"), 1307 MakeQualifiers(ExprTy)); 1308 } 1309 case UnaryOperator::PreInc: 1310 case UnaryOperator::PreDec: { 1311 LValue LV = EmitLValue(E->getSubExpr()); 1312 bool isInc = E->getOpcode() == UnaryOperator::PreInc; 1313 1314 if (E->getType()->isAnyComplexType()) 1315 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1316 else 1317 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1318 return LV; 1319 } 1320 } 1321 } 1322 1323 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1324 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 1325 Qualifiers()); 1326 } 1327 1328 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1329 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1330 Qualifiers()); 1331 } 1332 1333 1334 LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { 1335 std::string GlobalVarName; 1336 1337 switch (Type) { 1338 default: assert(0 && "Invalid type"); 1339 case PredefinedExpr::Func: 1340 GlobalVarName = "__func__."; 1341 break; 1342 case PredefinedExpr::Function: 1343 GlobalVarName = "__FUNCTION__."; 1344 break; 1345 case PredefinedExpr::PrettyFunction: 1346 GlobalVarName = "__PRETTY_FUNCTION__."; 1347 break; 1348 } 1349 1350 llvm::StringRef FnName = CurFn->getName(); 1351 if (FnName.startswith("\01")) 1352 FnName = FnName.substr(1); 1353 GlobalVarName += FnName; 1354 1355 std::string FunctionName = 1356 PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurCodeDecl); 1357 1358 llvm::Constant *C = 1359 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 1360 return LValue::MakeAddr(C, Qualifiers()); 1361 } 1362 1363 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1364 switch (E->getIdentType()) { 1365 default: 1366 return EmitUnsupportedLValue(E, "predefined expression"); 1367 case PredefinedExpr::Func: 1368 case PredefinedExpr::Function: 1369 case PredefinedExpr::PrettyFunction: 1370 return EmitPredefinedFunctionName(E->getIdentType()); 1371 } 1372 } 1373 1374 llvm::BasicBlock *CodeGenFunction::getTrapBB() { 1375 const CodeGenOptions &GCO = CGM.getCodeGenOpts(); 1376 1377 // If we are not optimzing, don't collapse all calls to trap in the function 1378 // to the same call, that way, in the debugger they can see which operation 1379 // did in fact fail. If we are optimizing, we collpase all call to trap down 1380 // to just one per function to save on codesize. 1381 if (GCO.OptimizationLevel 1382 && TrapBB) 1383 return TrapBB; 1384 1385 llvm::BasicBlock *Cont = 0; 1386 if (HaveInsertPoint()) { 1387 Cont = createBasicBlock("cont"); 1388 EmitBranch(Cont); 1389 } 1390 TrapBB = createBasicBlock("trap"); 1391 EmitBlock(TrapBB); 1392 1393 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0); 1394 llvm::CallInst *TrapCall = Builder.CreateCall(F); 1395 TrapCall->setDoesNotReturn(); 1396 TrapCall->setDoesNotThrow(); 1397 Builder.CreateUnreachable(); 1398 1399 if (Cont) 1400 EmitBlock(Cont); 1401 return TrapBB; 1402 } 1403 1404 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1405 // The index must always be an integer, which is not an aggregate. Emit it. 1406 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 1407 QualType IdxTy = E->getIdx()->getType(); 1408 bool IdxSigned = IdxTy->isSignedIntegerType(); 1409 1410 // If the base is a vector type, then we are forming a vector element lvalue 1411 // with this subscript. 1412 if (E->getBase()->getType()->isVectorType()) { 1413 // Emit the vector as an lvalue to get its address. 1414 LValue LHS = EmitLValue(E->getBase()); 1415 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 1416 Idx = Builder.CreateIntCast(Idx, 1417 llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx"); 1418 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 1419 E->getBase()->getType().getCVRQualifiers()); 1420 } 1421 1422 // The base must be a pointer, which is not an aggregate. Emit it. 1423 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1424 1425 // Extend or truncate the index type to 32 or 64-bits. 1426 unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 1427 if (IdxBitwidth != LLVMPointerWidth) 1428 Idx = Builder.CreateIntCast(Idx, 1429 llvm::IntegerType::get(VMContext, LLVMPointerWidth), 1430 IdxSigned, "idxprom"); 1431 1432 // FIXME: As llvm implements the object size checking, this can come out. 1433 if (CatchUndefined) { 1434 if (const ImplicitCastExpr *ICE=dyn_cast<ImplicitCastExpr>(E->getBase())) { 1435 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 1436 if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) { 1437 if (const ConstantArrayType *CAT 1438 = getContext().getAsConstantArrayType(DRE->getType())) { 1439 llvm::APInt Size = CAT->getSize(); 1440 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1441 Builder.CreateCondBr(Builder.CreateICmpULE(Idx, 1442 llvm::ConstantInt::get(Idx->getType(), Size)), 1443 Cont, getTrapBB()); 1444 EmitBlock(Cont); 1445 } 1446 } 1447 } 1448 } 1449 } 1450 1451 // We know that the pointer points to a type of the correct size, unless the 1452 // size is a VLA or Objective-C interface. 1453 llvm::Value *Address = 0; 1454 if (const VariableArrayType *VAT = 1455 getContext().getAsVariableArrayType(E->getType())) { 1456 llvm::Value *VLASize = GetVLASize(VAT); 1457 1458 Idx = Builder.CreateMul(Idx, VLASize); 1459 1460 QualType BaseType = getContext().getBaseElementType(VAT); 1461 1462 CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType); 1463 Idx = Builder.CreateUDiv(Idx, 1464 llvm::ConstantInt::get(Idx->getType(), 1465 BaseTypeSize.getQuantity())); 1466 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1467 } else if (const ObjCObjectType *OIT = 1468 E->getType()->getAs<ObjCObjectType>()) { 1469 llvm::Value *InterfaceSize = 1470 llvm::ConstantInt::get(Idx->getType(), 1471 getContext().getTypeSizeInChars(OIT).getQuantity()); 1472 1473 Idx = Builder.CreateMul(Idx, InterfaceSize); 1474 1475 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 1476 Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), 1477 Idx, "arrayidx"); 1478 Address = Builder.CreateBitCast(Address, Base->getType()); 1479 } else { 1480 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1481 } 1482 1483 QualType T = E->getBase()->getType()->getPointeeType(); 1484 assert(!T.isNull() && 1485 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1486 1487 Qualifiers Quals = MakeQualifiers(T); 1488 Quals.setAddressSpace(E->getBase()->getType().getAddressSpace()); 1489 1490 LValue LV = LValue::MakeAddr(Address, Quals); 1491 if (getContext().getLangOptions().ObjC1 && 1492 getContext().getLangOptions().getGCMode() != LangOptions::NonGC) { 1493 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 1494 setObjCGCLValueClass(getContext(), E, LV); 1495 } 1496 return LV; 1497 } 1498 1499 static 1500 llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 1501 llvm::SmallVector<unsigned, 4> &Elts) { 1502 llvm::SmallVector<llvm::Constant*, 4> CElts; 1503 1504 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1505 CElts.push_back(llvm::ConstantInt::get( 1506 llvm::Type::getInt32Ty(VMContext), Elts[i])); 1507 1508 return llvm::ConstantVector::get(&CElts[0], CElts.size()); 1509 } 1510 1511 LValue CodeGenFunction:: 1512 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1513 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1514 1515 // Emit the base vector as an l-value. 1516 LValue Base; 1517 1518 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1519 if (E->isArrow()) { 1520 // If it is a pointer to a vector, emit the address and form an lvalue with 1521 // it. 1522 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1523 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1524 Qualifiers Quals = MakeQualifiers(PT->getPointeeType()); 1525 Quals.removeObjCGCAttr(); 1526 Base = LValue::MakeAddr(Ptr, Quals); 1527 } else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) { 1528 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 1529 // emit the base as an lvalue. 1530 assert(E->getBase()->getType()->isVectorType()); 1531 Base = EmitLValue(E->getBase()); 1532 } else { 1533 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 1534 assert(E->getBase()->getType()->getAs<VectorType>() && 1535 "Result must be a vector"); 1536 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 1537 1538 // Store the vector to memory (because LValue wants an address). 1539 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 1540 Builder.CreateStore(Vec, VecMem); 1541 Base = LValue::MakeAddr(VecMem, Qualifiers()); 1542 } 1543 1544 // Encode the element access list into a vector of unsigned indices. 1545 llvm::SmallVector<unsigned, 4> Indices; 1546 E->getEncodedElementAccess(Indices); 1547 1548 if (Base.isSimple()) { 1549 llvm::Constant *CV = GenerateConstantVector(VMContext, Indices); 1550 return LValue::MakeExtVectorElt(Base.getAddress(), CV, 1551 Base.getVRQualifiers()); 1552 } 1553 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1554 1555 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1556 llvm::SmallVector<llvm::Constant *, 4> CElts; 1557 1558 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1559 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1560 CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0)); 1561 else 1562 CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i]))); 1563 } 1564 llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size()); 1565 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, 1566 Base.getVRQualifiers()); 1567 } 1568 1569 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1570 bool isNonGC = false; 1571 Expr *BaseExpr = E->getBase(); 1572 llvm::Value *BaseValue = NULL; 1573 Qualifiers BaseQuals; 1574 1575 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1576 if (E->isArrow()) { 1577 BaseValue = EmitScalarExpr(BaseExpr); 1578 const PointerType *PTy = 1579 BaseExpr->getType()->getAs<PointerType>(); 1580 BaseQuals = PTy->getPointeeType().getQualifiers(); 1581 } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) || 1582 isa<ObjCImplicitSetterGetterRefExpr>( 1583 BaseExpr->IgnoreParens())) { 1584 RValue RV = EmitObjCPropertyGet(BaseExpr); 1585 BaseValue = RV.getAggregateAddr(); 1586 BaseQuals = BaseExpr->getType().getQualifiers(); 1587 } else { 1588 LValue BaseLV = EmitLValue(BaseExpr); 1589 if (BaseLV.isNonGC()) 1590 isNonGC = true; 1591 // FIXME: this isn't right for bitfields. 1592 BaseValue = BaseLV.getAddress(); 1593 QualType BaseTy = BaseExpr->getType(); 1594 BaseQuals = BaseTy.getQualifiers(); 1595 } 1596 1597 NamedDecl *ND = E->getMemberDecl(); 1598 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 1599 LValue LV = EmitLValueForField(BaseValue, Field, 1600 BaseQuals.getCVRQualifiers()); 1601 LValue::SetObjCNonGC(LV, isNonGC); 1602 setObjCGCLValueClass(getContext(), E, LV); 1603 return LV; 1604 } 1605 1606 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 1607 return EmitGlobalVarDeclLValue(*this, E, VD); 1608 1609 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1610 return EmitFunctionDeclLValue(*this, E, FD); 1611 1612 assert(false && "Unhandled member declaration!"); 1613 return LValue(); 1614 } 1615 1616 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, 1617 const FieldDecl* Field, 1618 unsigned CVRQualifiers) { 1619 const CGRecordLayout &RL = 1620 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1621 const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field); 1622 return LValue::MakeBitfield(BaseValue, Info, 1623 Field->getType().getCVRQualifiers()|CVRQualifiers); 1624 } 1625 1626 /// EmitLValueForAnonRecordField - Given that the field is a member of 1627 /// an anonymous struct or union buried inside a record, and given 1628 /// that the base value is a pointer to the enclosing record, derive 1629 /// an lvalue for the ultimate field. 1630 LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue, 1631 const FieldDecl *Field, 1632 unsigned CVRQualifiers) { 1633 llvm::SmallVector<const FieldDecl *, 8> Path; 1634 Path.push_back(Field); 1635 1636 while (Field->getParent()->isAnonymousStructOrUnion()) { 1637 const ValueDecl *VD = Field->getParent()->getAnonymousStructOrUnionObject(); 1638 if (!isa<FieldDecl>(VD)) break; 1639 Field = cast<FieldDecl>(VD); 1640 Path.push_back(Field); 1641 } 1642 1643 llvm::SmallVectorImpl<const FieldDecl*>::reverse_iterator 1644 I = Path.rbegin(), E = Path.rend(); 1645 while (true) { 1646 LValue LV = EmitLValueForField(BaseValue, *I, CVRQualifiers); 1647 if (++I == E) return LV; 1648 1649 assert(LV.isSimple()); 1650 BaseValue = LV.getAddress(); 1651 CVRQualifiers |= LV.getVRQualifiers(); 1652 } 1653 } 1654 1655 LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, 1656 const FieldDecl* Field, 1657 unsigned CVRQualifiers) { 1658 if (Field->isBitField()) 1659 return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); 1660 1661 const CGRecordLayout &RL = 1662 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1663 unsigned idx = RL.getLLVMFieldNo(Field); 1664 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1665 1666 // Match union field type. 1667 if (Field->getParent()->isUnion()) { 1668 const llvm::Type *FieldTy = 1669 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1670 const llvm::PointerType * BaseTy = 1671 cast<llvm::PointerType>(BaseValue->getType()); 1672 unsigned AS = BaseTy->getAddressSpace(); 1673 V = Builder.CreateBitCast(V, 1674 llvm::PointerType::get(FieldTy, AS), 1675 "tmp"); 1676 } 1677 if (Field->getType()->isReferenceType()) 1678 V = Builder.CreateLoad(V, "tmp"); 1679 1680 Qualifiers Quals = MakeQualifiers(Field->getType()); 1681 Quals.addCVRQualifiers(CVRQualifiers); 1682 // __weak attribute on a field is ignored. 1683 if (Quals.getObjCGCAttr() == Qualifiers::Weak) 1684 Quals.removeObjCGCAttr(); 1685 1686 return LValue::MakeAddr(V, Quals); 1687 } 1688 1689 LValue 1690 CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue, 1691 const FieldDecl* Field, 1692 unsigned CVRQualifiers) { 1693 QualType FieldType = Field->getType(); 1694 1695 if (!FieldType->isReferenceType()) 1696 return EmitLValueForField(BaseValue, Field, CVRQualifiers); 1697 1698 const CGRecordLayout &RL = 1699 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1700 unsigned idx = RL.getLLVMFieldNo(Field); 1701 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1702 1703 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 1704 1705 return LValue::MakeAddr(V, MakeQualifiers(FieldType)); 1706 } 1707 1708 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ 1709 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 1710 const Expr* InitExpr = E->getInitializer(); 1711 LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType())); 1712 1713 EmitAnyExprToMem(InitExpr, DeclPtr, /*Volatile*/ false); 1714 1715 return Result; 1716 } 1717 1718 LValue 1719 CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) { 1720 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 1721 if (int Cond = ConstantFoldsToSimpleInteger(E->getCond())) { 1722 Expr *Live = Cond == 1 ? E->getLHS() : E->getRHS(); 1723 if (Live) 1724 return EmitLValue(Live); 1725 } 1726 1727 if (!E->getLHS()) 1728 return EmitUnsupportedLValue(E, "conditional operator with missing LHS"); 1729 1730 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1731 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1732 llvm::BasicBlock *ContBlock = createBasicBlock("cond.end"); 1733 1734 EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 1735 1736 // Any temporaries created here are conditional. 1737 BeginConditionalBranch(); 1738 EmitBlock(LHSBlock); 1739 LValue LHS = EmitLValue(E->getLHS()); 1740 EndConditionalBranch(); 1741 1742 if (!LHS.isSimple()) 1743 return EmitUnsupportedLValue(E, "conditional operator"); 1744 1745 // FIXME: We shouldn't need an alloca for this. 1746 llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp"); 1747 Builder.CreateStore(LHS.getAddress(), Temp); 1748 EmitBranch(ContBlock); 1749 1750 // Any temporaries created here are conditional. 1751 BeginConditionalBranch(); 1752 EmitBlock(RHSBlock); 1753 LValue RHS = EmitLValue(E->getRHS()); 1754 EndConditionalBranch(); 1755 if (!RHS.isSimple()) 1756 return EmitUnsupportedLValue(E, "conditional operator"); 1757 1758 Builder.CreateStore(RHS.getAddress(), Temp); 1759 EmitBranch(ContBlock); 1760 1761 EmitBlock(ContBlock); 1762 1763 Temp = Builder.CreateLoad(Temp, "lv"); 1764 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1765 } 1766 1767 // ?: here should be an aggregate. 1768 assert((hasAggregateLLVMType(E->getType()) && 1769 !E->getType()->isAnyComplexType()) && 1770 "Unexpected conditional operator!"); 1771 1772 return EmitAggExprToLValue(E); 1773 } 1774 1775 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast. 1776 /// If the cast is a dynamic_cast, we can have the usual lvalue result, 1777 /// otherwise if a cast is needed by the code generator in an lvalue context, 1778 /// then it must mean that we need the address of an aggregate in order to 1779 /// access one of its fields. This can happen for all the reasons that casts 1780 /// are permitted with aggregate result, including noop aggregate casts, and 1781 /// cast from scalar to union. 1782 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 1783 switch (E->getCastKind()) { 1784 default: 1785 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 1786 1787 case CastExpr::CK_Dynamic: { 1788 LValue LV = EmitLValue(E->getSubExpr()); 1789 llvm::Value *V = LV.getAddress(); 1790 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 1791 return LValue::MakeAddr(EmitDynamicCast(V, DCE), 1792 MakeQualifiers(E->getType())); 1793 } 1794 1795 case CastExpr::CK_NoOp: { 1796 LValue LV = EmitLValue(E->getSubExpr()); 1797 if (LV.isPropertyRef()) { 1798 QualType QT = E->getSubExpr()->getType(); 1799 RValue RV = EmitLoadOfPropertyRefLValue(LV, QT); 1800 assert(!RV.isScalar() && "EmitCastLValue - scalar cast of property ref"); 1801 llvm::Value *V = RV.getAggregateAddr(); 1802 return LValue::MakeAddr(V, MakeQualifiers(QT)); 1803 } 1804 return LV; 1805 } 1806 case CastExpr::CK_ConstructorConversion: 1807 case CastExpr::CK_UserDefinedConversion: 1808 case CastExpr::CK_AnyPointerToObjCPointerCast: 1809 return EmitLValue(E->getSubExpr()); 1810 1811 case CastExpr::CK_UncheckedDerivedToBase: 1812 case CastExpr::CK_DerivedToBase: { 1813 const RecordType *DerivedClassTy = 1814 E->getSubExpr()->getType()->getAs<RecordType>(); 1815 CXXRecordDecl *DerivedClassDecl = 1816 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1817 1818 LValue LV = EmitLValue(E->getSubExpr()); 1819 1820 // Perform the derived-to-base conversion 1821 llvm::Value *Base = 1822 GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl, 1823 E->getBasePath(), /*NullCheckValue=*/false); 1824 1825 return LValue::MakeAddr(Base, MakeQualifiers(E->getType())); 1826 } 1827 case CastExpr::CK_ToUnion: 1828 return EmitAggExprToLValue(E); 1829 case CastExpr::CK_BaseToDerived: { 1830 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 1831 CXXRecordDecl *DerivedClassDecl = 1832 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1833 1834 LValue LV = EmitLValue(E->getSubExpr()); 1835 1836 // Perform the base-to-derived conversion 1837 llvm::Value *Derived = 1838 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 1839 E->getBasePath(),/*NullCheckValue=*/false); 1840 1841 return LValue::MakeAddr(Derived, MakeQualifiers(E->getType())); 1842 } 1843 case CastExpr::CK_BitCast: { 1844 // This must be a reinterpret_cast (or c-style equivalent). 1845 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 1846 1847 LValue LV = EmitLValue(E->getSubExpr()); 1848 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 1849 ConvertType(CE->getTypeAsWritten())); 1850 return LValue::MakeAddr(V, MakeQualifiers(E->getType())); 1851 } 1852 } 1853 } 1854 1855 LValue CodeGenFunction::EmitNullInitializationLValue( 1856 const CXXZeroInitValueExpr *E) { 1857 QualType Ty = E->getType(); 1858 LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty)); 1859 EmitNullInitialization(LV.getAddress(), Ty); 1860 return LV; 1861 } 1862 1863 //===--------------------------------------------------------------------===// 1864 // Expression Emission 1865 //===--------------------------------------------------------------------===// 1866 1867 1868 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 1869 ReturnValueSlot ReturnValue) { 1870 // Builtins never have block type. 1871 if (E->getCallee()->getType()->isBlockPointerType()) 1872 return EmitBlockCallExpr(E, ReturnValue); 1873 1874 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 1875 return EmitCXXMemberCallExpr(CE, ReturnValue); 1876 1877 const Decl *TargetDecl = 0; 1878 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { 1879 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { 1880 TargetDecl = DRE->getDecl(); 1881 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) 1882 if (unsigned builtinID = FD->getBuiltinID()) 1883 return EmitBuiltinExpr(FD, builtinID, E); 1884 } 1885 } 1886 1887 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 1888 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 1889 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 1890 1891 if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 1892 // C++ [expr.pseudo]p1: 1893 // The result shall only be used as the operand for the function call 1894 // operator (), and the result of such a call has type void. The only 1895 // effect is the evaluation of the postfix-expression before the dot or 1896 // arrow. 1897 EmitScalarExpr(E->getCallee()); 1898 return RValue::get(0); 1899 } 1900 1901 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 1902 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 1903 E->arg_begin(), E->arg_end(), TargetDecl); 1904 } 1905 1906 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 1907 // Comma expressions just emit their LHS then their RHS as an l-value. 1908 if (E->getOpcode() == BinaryOperator::Comma) { 1909 EmitAnyExpr(E->getLHS()); 1910 EnsureInsertPoint(); 1911 return EmitLValue(E->getRHS()); 1912 } 1913 1914 if (E->getOpcode() == BinaryOperator::PtrMemD || 1915 E->getOpcode() == BinaryOperator::PtrMemI) 1916 return EmitPointerToDataMemberBinaryExpr(E); 1917 1918 // Can only get l-value for binary operator expressions which are a 1919 // simple assignment of aggregate type. 1920 if (E->getOpcode() != BinaryOperator::Assign) 1921 return EmitUnsupportedLValue(E, "binary l-value expression"); 1922 1923 if (!hasAggregateLLVMType(E->getType())) { 1924 // Emit the LHS as an l-value. 1925 LValue LV = EmitLValue(E->getLHS()); 1926 1927 llvm::Value *RHS = EmitScalarExpr(E->getRHS()); 1928 EmitStoreOfScalar(RHS, LV.getAddress(), LV.isVolatileQualified(), 1929 E->getType()); 1930 return LV; 1931 } 1932 1933 return EmitAggExprToLValue(E); 1934 } 1935 1936 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 1937 RValue RV = EmitCallExpr(E); 1938 1939 if (!RV.isScalar()) 1940 return LValue::MakeAddr(RV.getAggregateAddr(),MakeQualifiers(E->getType())); 1941 1942 assert(E->getCallReturnType()->isReferenceType() && 1943 "Can't have a scalar return unless the return type is a " 1944 "reference type!"); 1945 1946 return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType())); 1947 } 1948 1949 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 1950 // FIXME: This shouldn't require another copy. 1951 return EmitAggExprToLValue(E); 1952 } 1953 1954 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 1955 llvm::Value *Temp = CreateMemTemp(E->getType(), "tmp"); 1956 EmitCXXConstructExpr(Temp, E); 1957 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1958 } 1959 1960 LValue 1961 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 1962 llvm::Value *Temp = EmitCXXTypeidExpr(E); 1963 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1964 } 1965 1966 LValue 1967 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 1968 LValue LV = EmitLValue(E->getSubExpr()); 1969 PushCXXTemporary(E->getTemporary(), LV.getAddress()); 1970 return LV; 1971 } 1972 1973 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 1974 // Can only get l-value for message expression returning aggregate type 1975 RValue RV = EmitObjCMessageExpr(E); 1976 // FIXME: can this be volatile? 1977 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 1978 } 1979 1980 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 1981 const ObjCIvarDecl *Ivar) { 1982 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 1983 } 1984 1985 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 1986 llvm::Value *BaseValue, 1987 const ObjCIvarDecl *Ivar, 1988 unsigned CVRQualifiers) { 1989 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 1990 Ivar, CVRQualifiers); 1991 } 1992 1993 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 1994 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 1995 llvm::Value *BaseValue = 0; 1996 const Expr *BaseExpr = E->getBase(); 1997 Qualifiers BaseQuals; 1998 QualType ObjectTy; 1999 if (E->isArrow()) { 2000 BaseValue = EmitScalarExpr(BaseExpr); 2001 ObjectTy = BaseExpr->getType()->getPointeeType(); 2002 BaseQuals = ObjectTy.getQualifiers(); 2003 } else { 2004 LValue BaseLV = EmitLValue(BaseExpr); 2005 // FIXME: this isn't right for bitfields. 2006 BaseValue = BaseLV.getAddress(); 2007 ObjectTy = BaseExpr->getType(); 2008 BaseQuals = ObjectTy.getQualifiers(); 2009 } 2010 2011 LValue LV = 2012 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 2013 BaseQuals.getCVRQualifiers()); 2014 setObjCGCLValueClass(getContext(), E, LV); 2015 return LV; 2016 } 2017 2018 LValue 2019 CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { 2020 // This is a special l-value that just issues sends when we load or store 2021 // through it. 2022 return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); 2023 } 2024 2025 LValue CodeGenFunction::EmitObjCKVCRefLValue( 2026 const ObjCImplicitSetterGetterRefExpr *E) { 2027 // This is a special l-value that just issues sends when we load or store 2028 // through it. 2029 return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); 2030 } 2031 2032 LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { 2033 return EmitUnsupportedLValue(E, "use of super"); 2034 } 2035 2036 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 2037 // Can only get l-value for message expression returning aggregate type 2038 RValue RV = EmitAnyExprToTemp(E); 2039 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 2040 } 2041 2042 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 2043 ReturnValueSlot ReturnValue, 2044 CallExpr::const_arg_iterator ArgBeg, 2045 CallExpr::const_arg_iterator ArgEnd, 2046 const Decl *TargetDecl) { 2047 // Get the actual function type. The callee type will always be a pointer to 2048 // function type or a block pointer type. 2049 assert(CalleeType->isFunctionPointerType() && 2050 "Call must have function pointer type!"); 2051 2052 CalleeType = getContext().getCanonicalType(CalleeType); 2053 2054 const FunctionType *FnType 2055 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 2056 QualType ResultType = FnType->getResultType(); 2057 2058 CallArgList Args; 2059 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 2060 2061 return EmitCall(CGM.getTypes().getFunctionInfo(Args, FnType), 2062 Callee, ReturnValue, Args, TargetDecl); 2063 } 2064 2065 LValue CodeGenFunction:: 2066 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 2067 llvm::Value *BaseV; 2068 if (E->getOpcode() == BinaryOperator::PtrMemI) 2069 BaseV = EmitScalarExpr(E->getLHS()); 2070 else 2071 BaseV = EmitLValue(E->getLHS()).getAddress(); 2072 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(getLLVMContext()); 2073 BaseV = Builder.CreateBitCast(BaseV, i8Ty); 2074 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 2075 llvm::Value *AddV = Builder.CreateInBoundsGEP(BaseV, OffsetV, "add.ptr"); 2076 2077 QualType Ty = E->getRHS()->getType(); 2078 Ty = Ty->getAs<MemberPointerType>()->getPointeeType(); 2079 2080 const llvm::Type *PType = ConvertType(getContext().getPointerType(Ty)); 2081 AddV = Builder.CreateBitCast(AddV, PType); 2082 return LValue::MakeAddr(AddV, MakeQualifiers(Ty)); 2083 } 2084 2085