1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGCall.h" 17 #include "CGObjCRuntime.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/DeclObjC.h" 20 #include "llvm/Target/TargetData.h" 21 using namespace clang; 22 using namespace CodeGen; 23 24 //===--------------------------------------------------------------------===// 25 // Miscellaneous Helper Methods 26 //===--------------------------------------------------------------------===// 27 28 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 29 /// block. 30 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, 31 const llvm::Twine &Name) { 32 if (!Builder.isNamePreserving()) 33 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 34 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 35 } 36 37 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 38 /// expression and compare the result against zero, returning an Int1Ty value. 39 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 40 QualType BoolTy = getContext().BoolTy; 41 if (E->getType()->isMemberFunctionPointerType()) { 42 llvm::Value *Ptr = CreateTempAlloca(ConvertType(E->getType())); 43 EmitAggExpr(E, Ptr, /*VolatileDest=*/false); 44 45 // Get the pointer. 46 llvm::Value *FuncPtr = Builder.CreateStructGEP(Ptr, 0, "src.ptr"); 47 FuncPtr = Builder.CreateLoad(FuncPtr); 48 49 llvm::Value *IsNotNull = 50 Builder.CreateICmpNE(FuncPtr, 51 llvm::Constant::getNullValue(FuncPtr->getType()), 52 "tobool"); 53 54 return IsNotNull; 55 } 56 if (!E->getType()->isAnyComplexType()) 57 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 58 59 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 60 } 61 62 /// EmitAnyExpr - Emit code to compute the specified expression which can have 63 /// any type. The result is returned as an RValue struct. If this is an 64 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where the 65 /// result should be returned. 66 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, 67 bool IsAggLocVolatile, bool IgnoreResult, 68 bool IsInitializer) { 69 if (!hasAggregateLLVMType(E->getType())) 70 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 71 else if (E->getType()->isAnyComplexType()) 72 return RValue::getComplex(EmitComplexExpr(E, false, false, 73 IgnoreResult, IgnoreResult)); 74 75 EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer); 76 return RValue::getAggregate(AggLoc, IsAggLocVolatile); 77 } 78 79 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 80 /// always be accessible even if no aggregate location is provided. 81 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, 82 bool IsAggLocVolatile, 83 bool IsInitializer) { 84 llvm::Value *AggLoc = 0; 85 86 if (hasAggregateLLVMType(E->getType()) && 87 !E->getType()->isAnyComplexType()) 88 AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp"); 89 return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false, 90 IsInitializer); 91 } 92 93 RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, 94 QualType DestType, 95 bool IsInitializer) { 96 bool ShouldDestroyTemporaries = false; 97 unsigned OldNumLiveTemporaries = 0; 98 99 if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) { 100 ShouldDestroyTemporaries = TE->shouldDestroyTemporaries(); 101 102 // Keep track of the current cleanup stack depth. 103 if (ShouldDestroyTemporaries) 104 OldNumLiveTemporaries = LiveTemporaries.size(); 105 106 E = TE->getSubExpr(); 107 } 108 109 RValue Val; 110 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 111 // Emit the expr as an lvalue. 112 LValue LV = EmitLValue(E); 113 if (LV.isSimple()) 114 return RValue::get(LV.getAddress()); 115 Val = EmitLoadOfLValue(LV, E->getType()); 116 117 if (ShouldDestroyTemporaries) { 118 // Pop temporaries. 119 while (LiveTemporaries.size() > OldNumLiveTemporaries) 120 PopCXXTemporary(); 121 } 122 } else { 123 const CXXRecordDecl *BaseClassDecl = 0; 124 const CXXRecordDecl *DerivedClassDecl = 0; 125 126 if (const CastExpr *CE = 127 dyn_cast<CastExpr>(E->IgnoreParenNoopCasts(getContext()))) { 128 if (CE->getCastKind() == CastExpr::CK_DerivedToBase) { 129 E = CE->getSubExpr(); 130 131 BaseClassDecl = 132 cast<CXXRecordDecl>(CE->getType()->getAs<RecordType>()->getDecl()); 133 DerivedClassDecl = 134 cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 135 } 136 } 137 138 Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false, 139 IsInitializer); 140 141 if (ShouldDestroyTemporaries) { 142 // Pop temporaries. 143 while (LiveTemporaries.size() > OldNumLiveTemporaries) 144 PopCXXTemporary(); 145 } 146 147 if (IsInitializer) { 148 // We might have to destroy the temporary variable. 149 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 150 if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 151 if (!ClassDecl->hasTrivialDestructor()) { 152 const CXXDestructorDecl *Dtor = 153 ClassDecl->getDestructor(getContext()); 154 155 { 156 DelayedCleanupBlock Scope(*this); 157 EmitCXXDestructorCall(Dtor, Dtor_Complete, 158 Val.getAggregateAddr()); 159 160 // Make sure to jump to the exit block. 161 EmitBranch(Scope.getCleanupExitBlock()); 162 } 163 if (Exceptions) { 164 EHCleanupBlock Cleanup(*this); 165 EmitCXXDestructorCall(Dtor, Dtor_Complete, 166 Val.getAggregateAddr()); 167 } 168 } 169 } 170 } 171 } 172 173 // Check if need to perform the derived-to-base cast. 174 if (BaseClassDecl) { 175 llvm::Value *Derived = Val.getAggregateAddr(); 176 llvm::Value *Base = 177 GetAddressOfBaseClass(Derived, DerivedClassDecl, BaseClassDecl, 178 /*NullCheckValue=*/false); 179 return RValue::get(Base); 180 } 181 } 182 183 if (Val.isAggregate()) { 184 Val = RValue::get(Val.getAggregateAddr()); 185 } else { 186 // Create a temporary variable that we can bind the reference to. 187 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), 188 "reftmp"); 189 if (Val.isScalar()) 190 EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); 191 else 192 StoreComplexToAddr(Val.getComplexVal(), Temp, false); 193 Val = RValue::get(Temp); 194 } 195 196 return Val; 197 } 198 199 200 /// getAccessedFieldNo - Given an encoded value and a result number, return the 201 /// input field number being accessed. 202 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 203 const llvm::Constant *Elts) { 204 if (isa<llvm::ConstantAggregateZero>(Elts)) 205 return 0; 206 207 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 208 } 209 210 211 //===----------------------------------------------------------------------===// 212 // LValue Expression Emission 213 //===----------------------------------------------------------------------===// 214 215 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 216 if (Ty->isVoidType()) 217 return RValue::get(0); 218 219 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 220 const llvm::Type *EltTy = ConvertType(CTy->getElementType()); 221 llvm::Value *U = llvm::UndefValue::get(EltTy); 222 return RValue::getComplex(std::make_pair(U, U)); 223 } 224 225 if (hasAggregateLLVMType(Ty)) { 226 const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); 227 return RValue::getAggregate(llvm::UndefValue::get(LTy)); 228 } 229 230 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 231 } 232 233 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 234 const char *Name) { 235 ErrorUnsupported(E, Name); 236 return GetUndefRValue(E->getType()); 237 } 238 239 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 240 const char *Name) { 241 ErrorUnsupported(E, Name); 242 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 243 return LValue::MakeAddr(llvm::UndefValue::get(Ty), 244 MakeQualifiers(E->getType())); 245 } 246 247 /// EmitLValue - Emit code to compute a designator that specifies the location 248 /// of the expression. 249 /// 250 /// This can return one of two things: a simple address or a bitfield reference. 251 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 252 /// an LLVM pointer type. 253 /// 254 /// If this returns a bitfield reference, nothing about the pointee type of the 255 /// LLVM value is known: For example, it may not be a pointer to an integer. 256 /// 257 /// If this returns a normal address, and if the lvalue's C type is fixed size, 258 /// this method guarantees that the returned pointer type will point to an LLVM 259 /// type of the same size of the lvalue's type. If the lvalue has a variable 260 /// length type, this is not possible. 261 /// 262 LValue CodeGenFunction::EmitLValue(const Expr *E) { 263 switch (E->getStmtClass()) { 264 default: return EmitUnsupportedLValue(E, "l-value expression"); 265 266 case Expr::ObjCIsaExprClass: 267 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 268 case Expr::BinaryOperatorClass: 269 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 270 case Expr::CallExprClass: 271 case Expr::CXXMemberCallExprClass: 272 case Expr::CXXOperatorCallExprClass: 273 return EmitCallExprLValue(cast<CallExpr>(E)); 274 case Expr::VAArgExprClass: 275 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 276 case Expr::DeclRefExprClass: 277 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 278 case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 279 case Expr::PredefinedExprClass: 280 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 281 case Expr::StringLiteralClass: 282 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 283 case Expr::ObjCEncodeExprClass: 284 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 285 286 case Expr::BlockDeclRefExprClass: 287 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 288 289 case Expr::CXXTemporaryObjectExprClass: 290 case Expr::CXXConstructExprClass: 291 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 292 case Expr::CXXBindTemporaryExprClass: 293 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 294 case Expr::CXXExprWithTemporariesClass: 295 return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E)); 296 case Expr::CXXZeroInitValueExprClass: 297 return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E)); 298 case Expr::CXXDefaultArgExprClass: 299 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 300 case Expr::CXXTypeidExprClass: 301 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 302 303 case Expr::ObjCMessageExprClass: 304 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 305 case Expr::ObjCIvarRefExprClass: 306 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 307 case Expr::ObjCPropertyRefExprClass: 308 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 309 case Expr::ObjCImplicitSetterGetterRefExprClass: 310 return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E)); 311 case Expr::ObjCSuperExprClass: 312 return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E)); 313 314 case Expr::StmtExprClass: 315 return EmitStmtExprLValue(cast<StmtExpr>(E)); 316 case Expr::UnaryOperatorClass: 317 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 318 case Expr::ArraySubscriptExprClass: 319 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 320 case Expr::ExtVectorElementExprClass: 321 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 322 case Expr::MemberExprClass: 323 return EmitMemberExpr(cast<MemberExpr>(E)); 324 case Expr::CompoundLiteralExprClass: 325 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 326 case Expr::ConditionalOperatorClass: 327 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 328 case Expr::ChooseExprClass: 329 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 330 case Expr::ImplicitCastExprClass: 331 case Expr::CStyleCastExprClass: 332 case Expr::CXXFunctionalCastExprClass: 333 case Expr::CXXStaticCastExprClass: 334 case Expr::CXXDynamicCastExprClass: 335 case Expr::CXXReinterpretCastExprClass: 336 case Expr::CXXConstCastExprClass: 337 return EmitCastLValue(cast<CastExpr>(E)); 338 } 339 } 340 341 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 342 QualType Ty) { 343 llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp"); 344 if (Volatile) 345 Load->setVolatile(true); 346 347 // Bool can have different representation in memory than in registers. 348 llvm::Value *V = Load; 349 if (Ty->isBooleanType()) 350 if (V->getType() != llvm::Type::getInt1Ty(VMContext)) 351 V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool"); 352 353 return V; 354 } 355 356 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 357 bool Volatile, QualType Ty) { 358 359 if (Ty->isBooleanType()) { 360 // Bool can have different representation in memory than in registers. 361 const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 362 Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false); 363 } 364 Builder.CreateStore(Value, Addr, Volatile); 365 } 366 367 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 368 /// method emits the address of the lvalue, then loads the result as an rvalue, 369 /// returning the rvalue. 370 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { 371 if (LV.isObjCWeak()) { 372 // load of a __weak object. 373 llvm::Value *AddrWeakObj = LV.getAddress(); 374 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 375 AddrWeakObj)); 376 } 377 378 if (LV.isSimple()) { 379 llvm::Value *Ptr = LV.getAddress(); 380 const llvm::Type *EltTy = 381 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 382 383 // Simple scalar l-value. 384 if (EltTy->isSingleValueType()) 385 return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), 386 ExprType)); 387 388 assert(ExprType->isFunctionType() && "Unknown scalar value"); 389 return RValue::get(Ptr); 390 } 391 392 if (LV.isVectorElt()) { 393 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 394 LV.isVolatileQualified(), "tmp"); 395 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 396 "vecext")); 397 } 398 399 // If this is a reference to a subset of the elements of a vector, either 400 // shuffle the input or extract/insert them as appropriate. 401 if (LV.isExtVectorElt()) 402 return EmitLoadOfExtVectorElementLValue(LV, ExprType); 403 404 if (LV.isBitfield()) 405 return EmitLoadOfBitfieldLValue(LV, ExprType); 406 407 if (LV.isPropertyRef()) 408 return EmitLoadOfPropertyRefLValue(LV, ExprType); 409 410 assert(LV.isKVCRef() && "Unknown LValue type!"); 411 return EmitLoadOfKVCRefLValue(LV, ExprType); 412 } 413 414 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 415 QualType ExprType) { 416 unsigned StartBit = LV.getBitfieldStartBit(); 417 unsigned BitfieldSize = LV.getBitfieldSize(); 418 llvm::Value *Ptr = LV.getBitfieldAddr(); 419 420 const llvm::Type *EltTy = 421 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 422 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 423 424 // In some cases the bitfield may straddle two memory locations. Currently we 425 // load the entire bitfield, then do the magic to sign-extend it if 426 // necessary. This results in somewhat more code than necessary for the common 427 // case (one load), since two shifts accomplish both the masking and sign 428 // extension. 429 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 430 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp"); 431 432 // Shift to proper location. 433 if (StartBit) 434 Val = Builder.CreateLShr(Val, StartBit, "bf.lo"); 435 436 // Mask off unused bits. 437 llvm::Constant *LowMask = llvm::ConstantInt::get(VMContext, 438 llvm::APInt::getLowBitsSet(EltTySize, LowBits)); 439 Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared"); 440 441 // Fetch the high bits if necessary. 442 if (LowBits < BitfieldSize) { 443 unsigned HighBits = BitfieldSize - LowBits; 444 llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( 445 llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); 446 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 447 LV.isVolatileQualified(), 448 "tmp"); 449 450 // Mask off unused bits. 451 llvm::Constant *HighMask = llvm::ConstantInt::get(VMContext, 452 llvm::APInt::getLowBitsSet(EltTySize, HighBits)); 453 HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared"); 454 455 // Shift to proper location and or in to bitfield value. 456 HighVal = Builder.CreateShl(HighVal, LowBits); 457 Val = Builder.CreateOr(Val, HighVal, "bf.val"); 458 } 459 460 // Sign extend if necessary. 461 if (LV.isBitfieldSigned()) { 462 llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy, 463 EltTySize - BitfieldSize); 464 Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits), 465 ExtraBits, "bf.val.sext"); 466 } 467 468 // The bitfield type and the normal type differ when the storage sizes differ 469 // (currently just _Bool). 470 Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp"); 471 472 return RValue::get(Val); 473 } 474 475 RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, 476 QualType ExprType) { 477 return EmitObjCPropertyGet(LV.getPropertyRefExpr()); 478 } 479 480 RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, 481 QualType ExprType) { 482 return EmitObjCPropertyGet(LV.getKVCRefExpr()); 483 } 484 485 // If this is a reference to a subset of the elements of a vector, create an 486 // appropriate shufflevector. 487 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, 488 QualType ExprType) { 489 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 490 LV.isVolatileQualified(), "tmp"); 491 492 const llvm::Constant *Elts = LV.getExtVectorElts(); 493 494 // If the result of the expression is a non-vector type, we must be extracting 495 // a single element. Just codegen as an extractelement. 496 const VectorType *ExprVT = ExprType->getAs<VectorType>(); 497 if (!ExprVT) { 498 unsigned InIdx = getAccessedFieldNo(0, Elts); 499 llvm::Value *Elt = llvm::ConstantInt::get( 500 llvm::Type::getInt32Ty(VMContext), InIdx); 501 return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); 502 } 503 504 // Always use shuffle vector to try to retain the original program structure 505 unsigned NumResultElts = ExprVT->getNumElements(); 506 507 llvm::SmallVector<llvm::Constant*, 4> Mask; 508 for (unsigned i = 0; i != NumResultElts; ++i) { 509 unsigned InIdx = getAccessedFieldNo(i, Elts); 510 Mask.push_back(llvm::ConstantInt::get( 511 llvm::Type::getInt32Ty(VMContext), InIdx)); 512 } 513 514 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 515 Vec = Builder.CreateShuffleVector(Vec, 516 llvm::UndefValue::get(Vec->getType()), 517 MaskV, "tmp"); 518 return RValue::get(Vec); 519 } 520 521 522 523 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 524 /// lvalue, where both are guaranteed to the have the same type, and that type 525 /// is 'Ty'. 526 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 527 QualType Ty) { 528 if (!Dst.isSimple()) { 529 if (Dst.isVectorElt()) { 530 // Read/modify/write the vector, inserting the new element. 531 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 532 Dst.isVolatileQualified(), "tmp"); 533 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 534 Dst.getVectorIdx(), "vecins"); 535 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 536 return; 537 } 538 539 // If this is an update of extended vector elements, insert them as 540 // appropriate. 541 if (Dst.isExtVectorElt()) 542 return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); 543 544 if (Dst.isBitfield()) 545 return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); 546 547 if (Dst.isPropertyRef()) 548 return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); 549 550 assert(Dst.isKVCRef() && "Unknown LValue type"); 551 return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); 552 } 553 554 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 555 // load of a __weak object. 556 llvm::Value *LvalueDst = Dst.getAddress(); 557 llvm::Value *src = Src.getScalarVal(); 558 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 559 return; 560 } 561 562 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 563 // load of a __strong object. 564 llvm::Value *LvalueDst = Dst.getAddress(); 565 llvm::Value *src = Src.getScalarVal(); 566 if (Dst.isObjCIvar()) { 567 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 568 const llvm::Type *ResultType = ConvertType(getContext().LongTy); 569 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 570 llvm::Value *dst = RHS; 571 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 572 llvm::Value *LHS = 573 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 574 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 575 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 576 BytesBetween); 577 } else if (Dst.isGlobalObjCRef()) 578 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 579 else 580 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 581 return; 582 } 583 584 assert(Src.isScalar() && "Can't emit an agg store with this method"); 585 EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), 586 Dst.isVolatileQualified(), Ty); 587 } 588 589 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 590 QualType Ty, 591 llvm::Value **Result) { 592 unsigned StartBit = Dst.getBitfieldStartBit(); 593 unsigned BitfieldSize = Dst.getBitfieldSize(); 594 llvm::Value *Ptr = Dst.getBitfieldAddr(); 595 596 const llvm::Type *EltTy = 597 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 598 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 599 600 // Get the new value, cast to the appropriate type and masked to exactly the 601 // size of the bit-field. 602 llvm::Value *SrcVal = Src.getScalarVal(); 603 llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp"); 604 llvm::Constant *Mask = llvm::ConstantInt::get(VMContext, 605 llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize)); 606 NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value"); 607 608 // Return the new value of the bit-field, if requested. 609 if (Result) { 610 // Cast back to the proper type for result. 611 const llvm::Type *SrcTy = SrcVal->getType(); 612 llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false, 613 "bf.reload.val"); 614 615 // Sign extend if necessary. 616 if (Dst.isBitfieldSigned()) { 617 unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy); 618 llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy, 619 SrcTySize - BitfieldSize); 620 SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits), 621 ExtraBits, "bf.reload.sext"); 622 } 623 624 *Result = SrcTrunc; 625 } 626 627 // In some cases the bitfield may straddle two memory locations. Emit the low 628 // part first and check to see if the high needs to be done. 629 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 630 llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), 631 "bf.prev.low"); 632 633 // Compute the mask for zero-ing the low part of this bitfield. 634 llvm::Constant *InvMask = 635 llvm::ConstantInt::get(VMContext, 636 ~llvm::APInt::getBitsSet(EltTySize, StartBit, StartBit + LowBits)); 637 638 // Compute the new low part as 639 // LowVal = (LowVal & InvMask) | (NewVal << StartBit), 640 // with the shift of NewVal implicitly stripping the high bits. 641 llvm::Value *NewLowVal = 642 Builder.CreateShl(NewVal, StartBit, "bf.value.lo"); 643 LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared"); 644 LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo"); 645 646 // Write back. 647 Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified()); 648 649 // If the low part doesn't cover the bitfield emit a high part. 650 if (LowBits < BitfieldSize) { 651 unsigned HighBits = BitfieldSize - LowBits; 652 llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( 653 llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); 654 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 655 Dst.isVolatileQualified(), 656 "bf.prev.hi"); 657 658 // Compute the mask for zero-ing the high part of this bitfield. 659 llvm::Constant *InvMask = 660 llvm::ConstantInt::get(VMContext, ~llvm::APInt::getLowBitsSet(EltTySize, 661 HighBits)); 662 663 // Compute the new high part as 664 // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits), 665 // where the high bits of NewVal have already been cleared and the 666 // shift stripping the low bits. 667 llvm::Value *NewHighVal = 668 Builder.CreateLShr(NewVal, LowBits, "bf.value.high"); 669 HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared"); 670 HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi"); 671 672 // Write back. 673 Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified()); 674 } 675 } 676 677 void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, 678 LValue Dst, 679 QualType Ty) { 680 EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); 681 } 682 683 void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, 684 LValue Dst, 685 QualType Ty) { 686 EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); 687 } 688 689 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 690 LValue Dst, 691 QualType Ty) { 692 // This access turns into a read/modify/write of the vector. Load the input 693 // value now. 694 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 695 Dst.isVolatileQualified(), "tmp"); 696 const llvm::Constant *Elts = Dst.getExtVectorElts(); 697 698 llvm::Value *SrcVal = Src.getScalarVal(); 699 700 if (const VectorType *VTy = Ty->getAs<VectorType>()) { 701 unsigned NumSrcElts = VTy->getNumElements(); 702 unsigned NumDstElts = 703 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 704 if (NumDstElts == NumSrcElts) { 705 // Use shuffle vector is the src and destination are the same number of 706 // elements and restore the vector mask since it is on the side it will be 707 // stored. 708 llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 709 for (unsigned i = 0; i != NumSrcElts; ++i) { 710 unsigned InIdx = getAccessedFieldNo(i, Elts); 711 Mask[InIdx] = llvm::ConstantInt::get( 712 llvm::Type::getInt32Ty(VMContext), i); 713 } 714 715 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 716 Vec = Builder.CreateShuffleVector(SrcVal, 717 llvm::UndefValue::get(Vec->getType()), 718 MaskV, "tmp"); 719 } else if (NumDstElts > NumSrcElts) { 720 // Extended the source vector to the same length and then shuffle it 721 // into the destination. 722 // FIXME: since we're shuffling with undef, can we just use the indices 723 // into that? This could be simpler. 724 llvm::SmallVector<llvm::Constant*, 4> ExtMask; 725 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 726 unsigned i; 727 for (i = 0; i != NumSrcElts; ++i) 728 ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 729 for (; i != NumDstElts; ++i) 730 ExtMask.push_back(llvm::UndefValue::get(Int32Ty)); 731 llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], 732 ExtMask.size()); 733 llvm::Value *ExtSrcVal = 734 Builder.CreateShuffleVector(SrcVal, 735 llvm::UndefValue::get(SrcVal->getType()), 736 ExtMaskV, "tmp"); 737 // build identity 738 llvm::SmallVector<llvm::Constant*, 4> Mask; 739 for (unsigned i = 0; i != NumDstElts; ++i) 740 Mask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 741 742 // modify when what gets shuffled in 743 for (unsigned i = 0; i != NumSrcElts; ++i) { 744 unsigned Idx = getAccessedFieldNo(i, Elts); 745 Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts); 746 } 747 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 748 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); 749 } else { 750 // We should never shorten the vector 751 assert(0 && "unexpected shorten vector length"); 752 } 753 } else { 754 // If the Src is a scalar (not a vector) it must be updating one element. 755 unsigned InIdx = getAccessedFieldNo(0, Elts); 756 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 757 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 758 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); 759 } 760 761 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 762 } 763 764 // setObjCGCLValueClass - sets class of he lvalue for the purpose of 765 // generating write-barries API. It is currently a global, ivar, 766 // or neither. 767 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 768 LValue &LV) { 769 if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC) 770 return; 771 772 if (isa<ObjCIvarRefExpr>(E)) { 773 LV.SetObjCIvar(LV, true); 774 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 775 LV.setBaseIvarExp(Exp->getBase()); 776 LV.SetObjCArray(LV, E->getType()->isArrayType()); 777 return; 778 } 779 780 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 781 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 782 if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) || 783 VD->isFileVarDecl()) 784 LV.SetGlobalObjCRef(LV, true); 785 } 786 LV.SetObjCArray(LV, E->getType()->isArrayType()); 787 return; 788 } 789 790 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 791 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 792 return; 793 } 794 795 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 796 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 797 if (LV.isObjCIvar()) { 798 // If cast is to a structure pointer, follow gcc's behavior and make it 799 // a non-ivar write-barrier. 800 QualType ExpTy = E->getType(); 801 if (ExpTy->isPointerType()) 802 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 803 if (ExpTy->isRecordType()) 804 LV.SetObjCIvar(LV, false); 805 } 806 return; 807 } 808 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 809 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 810 return; 811 } 812 813 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 814 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 815 return; 816 } 817 818 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 819 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 820 if (LV.isObjCIvar() && !LV.isObjCArray()) 821 // Using array syntax to assigning to what an ivar points to is not 822 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 823 LV.SetObjCIvar(LV, false); 824 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 825 // Using array syntax to assigning to what global points to is not 826 // same as assigning to the global itself. {id *G;} G[i] = 0; 827 LV.SetGlobalObjCRef(LV, false); 828 return; 829 } 830 831 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 832 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 833 // We don't know if member is an 'ivar', but this flag is looked at 834 // only in the context of LV.isObjCIvar(). 835 LV.SetObjCArray(LV, E->getType()->isArrayType()); 836 return; 837 } 838 } 839 840 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 841 const Expr *E, const VarDecl *VD) { 842 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 843 "Var decl must have external storage or be a file var decl!"); 844 845 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 846 if (VD->getType()->isReferenceType()) 847 V = CGF.Builder.CreateLoad(V, "tmp"); 848 LValue LV = LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); 849 setObjCGCLValueClass(CGF.getContext(), E, LV); 850 return LV; 851 } 852 853 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 854 const Expr *E, const FunctionDecl *FD) { 855 llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD); 856 if (!FD->hasPrototype()) { 857 if (const FunctionProtoType *Proto = 858 FD->getType()->getAs<FunctionProtoType>()) { 859 // Ugly case: for a K&R-style definition, the type of the definition 860 // isn't the same as the type of a use. Correct for this with a 861 // bitcast. 862 QualType NoProtoType = 863 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 864 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 865 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp"); 866 } 867 } 868 return LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); 869 } 870 871 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 872 const NamedDecl *ND = E->getDecl(); 873 874 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 875 876 // Check if this is a global variable. 877 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 878 return EmitGlobalVarDeclLValue(*this, E, VD); 879 880 bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>(); 881 882 llvm::Value *V = LocalDeclMap[VD]; 883 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 884 885 Qualifiers Quals = MakeQualifiers(E->getType()); 886 // local variables do not get their gc attribute set. 887 // local static? 888 if (NonGCable) Quals.removeObjCGCAttr(); 889 890 if (VD->hasAttr<BlocksAttr>()) { 891 V = Builder.CreateStructGEP(V, 1, "forwarding"); 892 V = Builder.CreateLoad(V); 893 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 894 VD->getNameAsString()); 895 } 896 if (VD->getType()->isReferenceType()) 897 V = Builder.CreateLoad(V, "tmp"); 898 LValue LV = LValue::MakeAddr(V, Quals); 899 LValue::SetObjCNonGC(LV, NonGCable); 900 setObjCGCLValueClass(getContext(), E, LV); 901 return LV; 902 } 903 904 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 905 return EmitFunctionDeclLValue(*this, E, FD); 906 907 if (E->getQualifier()) { 908 // FIXME: the qualifier check does not seem sufficient here 909 return EmitPointerToDataMemberLValue(cast<FieldDecl>(ND)); 910 } 911 912 assert(false && "Unhandled DeclRefExpr"); 913 914 // an invalid LValue, but the assert will 915 // ensure that this point is never reached. 916 return LValue(); 917 } 918 919 LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 920 return LValue::MakeAddr(GetAddrOfBlockDecl(E), MakeQualifiers(E->getType())); 921 } 922 923 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 924 // __extension__ doesn't affect lvalue-ness. 925 if (E->getOpcode() == UnaryOperator::Extension) 926 return EmitLValue(E->getSubExpr()); 927 928 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 929 switch (E->getOpcode()) { 930 default: assert(0 && "Unknown unary operator lvalue!"); 931 case UnaryOperator::Deref: { 932 QualType T = E->getSubExpr()->getType()->getPointeeType(); 933 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 934 935 Qualifiers Quals = MakeQualifiers(T); 936 Quals.setAddressSpace(ExprTy.getAddressSpace()); 937 938 LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals); 939 // We should not generate __weak write barrier on indirect reference 940 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 941 // But, we continue to generate __strong write barrier on indirect write 942 // into a pointer to object. 943 if (getContext().getLangOptions().ObjC1 && 944 getContext().getLangOptions().getGCMode() != LangOptions::NonGC && 945 LV.isObjCWeak()) 946 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 947 return LV; 948 } 949 case UnaryOperator::Real: 950 case UnaryOperator::Imag: { 951 LValue LV = EmitLValue(E->getSubExpr()); 952 unsigned Idx = E->getOpcode() == UnaryOperator::Imag; 953 return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), 954 Idx, "idx"), 955 MakeQualifiers(ExprTy)); 956 } 957 case UnaryOperator::PreInc: 958 case UnaryOperator::PreDec: 959 return EmitUnsupportedLValue(E, "pre-inc/dec expression"); 960 } 961 } 962 963 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 964 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 965 Qualifiers()); 966 } 967 968 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 969 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 970 Qualifiers()); 971 } 972 973 974 LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { 975 std::string GlobalVarName; 976 977 switch (Type) { 978 default: assert(0 && "Invalid type"); 979 case PredefinedExpr::Func: 980 GlobalVarName = "__func__."; 981 break; 982 case PredefinedExpr::Function: 983 GlobalVarName = "__FUNCTION__."; 984 break; 985 case PredefinedExpr::PrettyFunction: 986 GlobalVarName = "__PRETTY_FUNCTION__."; 987 break; 988 } 989 990 llvm::StringRef FnName = CurFn->getName(); 991 if (FnName.startswith("\01")) 992 FnName = FnName.substr(1); 993 GlobalVarName += FnName; 994 995 std::string FunctionName = 996 PredefinedExpr::ComputeName(getContext(), (PredefinedExpr::IdentType)Type, 997 CurCodeDecl); 998 999 llvm::Constant *C = 1000 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 1001 return LValue::MakeAddr(C, Qualifiers()); 1002 } 1003 1004 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1005 switch (E->getIdentType()) { 1006 default: 1007 return EmitUnsupportedLValue(E, "predefined expression"); 1008 case PredefinedExpr::Func: 1009 case PredefinedExpr::Function: 1010 case PredefinedExpr::PrettyFunction: 1011 return EmitPredefinedFunctionName(E->getIdentType()); 1012 } 1013 } 1014 1015 static llvm::Constant *getAbortFn(CodeGenFunction &CGF) { 1016 // void abort(); 1017 1018 const llvm::FunctionType *FTy = 1019 llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false); 1020 1021 return CGF.CGM.CreateRuntimeFunction(FTy, "abort"); 1022 } 1023 1024 llvm::BasicBlock*CodeGenFunction::getAbortBB() { 1025 if (AbortBB) 1026 return AbortBB; 1027 1028 llvm::BasicBlock *Cont = 0; 1029 if (HaveInsertPoint()) { 1030 Cont = createBasicBlock("cont"); 1031 EmitBranch(Cont); 1032 } 1033 AbortBB = createBasicBlock("abort"); 1034 EmitBlock(AbortBB); 1035 llvm::CallInst *AbortCall = Builder.CreateCall(getAbortFn(*this)); 1036 AbortCall->setDoesNotReturn(); 1037 AbortCall->setDoesNotThrow(); 1038 Builder.CreateUnreachable(); 1039 1040 if (Cont) 1041 EmitBlock(Cont); 1042 return AbortBB; 1043 } 1044 1045 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1046 // The index must always be an integer, which is not an aggregate. Emit it. 1047 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 1048 QualType IdxTy = E->getIdx()->getType(); 1049 bool IdxSigned = IdxTy->isSignedIntegerType(); 1050 1051 // If the base is a vector type, then we are forming a vector element lvalue 1052 // with this subscript. 1053 if (E->getBase()->getType()->isVectorType()) { 1054 // Emit the vector as an lvalue to get its address. 1055 LValue LHS = EmitLValue(E->getBase()); 1056 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 1057 Idx = Builder.CreateIntCast(Idx, 1058 llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx"); 1059 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 1060 E->getBase()->getType().getCVRQualifiers()); 1061 } 1062 1063 // The base must be a pointer, which is not an aggregate. Emit it. 1064 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1065 1066 // Extend or truncate the index type to 32 or 64-bits. 1067 unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 1068 if (IdxBitwidth != LLVMPointerWidth) 1069 Idx = Builder.CreateIntCast(Idx, 1070 llvm::IntegerType::get(VMContext, LLVMPointerWidth), 1071 IdxSigned, "idxprom"); 1072 1073 if (CatchUndefined) { 1074 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())) { 1075 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 1076 if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) { 1077 if (const ConstantArrayType *CAT 1078 = getContext().getAsConstantArrayType(DRE->getType())) { 1079 llvm::APInt Size = CAT->getSize(); 1080 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1081 if (IdxSigned) { 1082 Builder.CreateCondBr(Builder.CreateICmpSGE(Idx, 1083 llvm::ConstantInt::get(Idx->getType(), 0)), 1084 Cont, getAbortBB()); 1085 EmitBlock(Cont); 1086 Cont = createBasicBlock("cont"); 1087 Builder.CreateCondBr(Builder.CreateICmpSLT(Idx, 1088 llvm::ConstantInt::get(Idx->getType(), Size)), 1089 Cont, getAbortBB()); 1090 EmitBlock(Cont); 1091 } else { 1092 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1093 Builder.CreateCondBr(Builder.CreateICmpULT(Idx, 1094 llvm::ConstantInt::get(Idx->getType(), Size)), 1095 Cont, getAbortBB()); 1096 EmitBlock(Cont); 1097 } 1098 } 1099 } 1100 } 1101 } 1102 } 1103 1104 // We know that the pointer points to a type of the correct size, unless the 1105 // size is a VLA or Objective-C interface. 1106 llvm::Value *Address = 0; 1107 if (const VariableArrayType *VAT = 1108 getContext().getAsVariableArrayType(E->getType())) { 1109 llvm::Value *VLASize = GetVLASize(VAT); 1110 1111 Idx = Builder.CreateMul(Idx, VLASize); 1112 1113 QualType BaseType = getContext().getBaseElementType(VAT); 1114 1115 uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8; 1116 Idx = Builder.CreateUDiv(Idx, 1117 llvm::ConstantInt::get(Idx->getType(), 1118 BaseTypeSize)); 1119 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1120 } else if (const ObjCInterfaceType *OIT = 1121 dyn_cast<ObjCInterfaceType>(E->getType())) { 1122 llvm::Value *InterfaceSize = 1123 llvm::ConstantInt::get(Idx->getType(), 1124 getContext().getTypeSize(OIT) / 8); 1125 1126 Idx = Builder.CreateMul(Idx, InterfaceSize); 1127 1128 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 1129 Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), 1130 Idx, "arrayidx"); 1131 Address = Builder.CreateBitCast(Address, Base->getType()); 1132 } else { 1133 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1134 } 1135 1136 QualType T = E->getBase()->getType()->getPointeeType(); 1137 assert(!T.isNull() && 1138 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1139 1140 Qualifiers Quals = MakeQualifiers(T); 1141 Quals.setAddressSpace(E->getBase()->getType().getAddressSpace()); 1142 1143 LValue LV = LValue::MakeAddr(Address, Quals); 1144 if (getContext().getLangOptions().ObjC1 && 1145 getContext().getLangOptions().getGCMode() != LangOptions::NonGC) { 1146 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 1147 setObjCGCLValueClass(getContext(), E, LV); 1148 } 1149 return LV; 1150 } 1151 1152 static 1153 llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 1154 llvm::SmallVector<unsigned, 4> &Elts) { 1155 llvm::SmallVector<llvm::Constant *, 4> CElts; 1156 1157 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1158 CElts.push_back(llvm::ConstantInt::get( 1159 llvm::Type::getInt32Ty(VMContext), Elts[i])); 1160 1161 return llvm::ConstantVector::get(&CElts[0], CElts.size()); 1162 } 1163 1164 LValue CodeGenFunction:: 1165 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1166 // Emit the base vector as an l-value. 1167 LValue Base; 1168 1169 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1170 if (!E->isArrow()) { 1171 assert(E->getBase()->getType()->isVectorType()); 1172 Base = EmitLValue(E->getBase()); 1173 } else { 1174 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1175 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1176 Qualifiers Quals = MakeQualifiers(PT->getPointeeType()); 1177 Quals.removeObjCGCAttr(); 1178 Base = LValue::MakeAddr(Ptr, Quals); 1179 } 1180 1181 // Encode the element access list into a vector of unsigned indices. 1182 llvm::SmallVector<unsigned, 4> Indices; 1183 E->getEncodedElementAccess(Indices); 1184 1185 if (Base.isSimple()) { 1186 llvm::Constant *CV = GenerateConstantVector(VMContext, Indices); 1187 return LValue::MakeExtVectorElt(Base.getAddress(), CV, 1188 Base.getVRQualifiers()); 1189 } 1190 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1191 1192 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1193 llvm::SmallVector<llvm::Constant *, 4> CElts; 1194 1195 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1196 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1197 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1198 CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0)); 1199 else 1200 CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i]))); 1201 } 1202 llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size()); 1203 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, 1204 Base.getVRQualifiers()); 1205 } 1206 1207 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1208 bool isUnion = false; 1209 bool isNonGC = false; 1210 Expr *BaseExpr = E->getBase(); 1211 llvm::Value *BaseValue = NULL; 1212 Qualifiers BaseQuals; 1213 1214 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1215 if (E->isArrow()) { 1216 BaseValue = EmitScalarExpr(BaseExpr); 1217 const PointerType *PTy = 1218 BaseExpr->getType()->getAs<PointerType>(); 1219 if (PTy->getPointeeType()->isUnionType()) 1220 isUnion = true; 1221 BaseQuals = PTy->getPointeeType().getQualifiers(); 1222 } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) || 1223 isa<ObjCImplicitSetterGetterRefExpr>( 1224 BaseExpr->IgnoreParens())) { 1225 RValue RV = EmitObjCPropertyGet(BaseExpr); 1226 BaseValue = RV.getAggregateAddr(); 1227 if (BaseExpr->getType()->isUnionType()) 1228 isUnion = true; 1229 BaseQuals = BaseExpr->getType().getQualifiers(); 1230 } else { 1231 LValue BaseLV = EmitLValue(BaseExpr); 1232 if (BaseLV.isNonGC()) 1233 isNonGC = true; 1234 // FIXME: this isn't right for bitfields. 1235 BaseValue = BaseLV.getAddress(); 1236 QualType BaseTy = BaseExpr->getType(); 1237 if (BaseTy->isUnionType()) 1238 isUnion = true; 1239 BaseQuals = BaseTy.getQualifiers(); 1240 } 1241 1242 NamedDecl *ND = E->getMemberDecl(); 1243 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 1244 LValue LV = EmitLValueForField(BaseValue, Field, isUnion, 1245 BaseQuals.getCVRQualifiers()); 1246 LValue::SetObjCNonGC(LV, isNonGC); 1247 setObjCGCLValueClass(getContext(), E, LV); 1248 return LV; 1249 } 1250 1251 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 1252 return EmitGlobalVarDeclLValue(*this, E, VD); 1253 1254 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1255 return EmitFunctionDeclLValue(*this, E, FD); 1256 1257 assert(false && "Unhandled member declaration!"); 1258 return LValue(); 1259 } 1260 1261 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, 1262 const FieldDecl* Field, 1263 unsigned CVRQualifiers) { 1264 CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field); 1265 1266 // FIXME: CodeGenTypes should expose a method to get the appropriate type for 1267 // FieldTy (the appropriate type is ABI-dependent). 1268 const llvm::Type *FieldTy = 1269 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1270 const llvm::PointerType *BaseTy = 1271 cast<llvm::PointerType>(BaseValue->getType()); 1272 unsigned AS = BaseTy->getAddressSpace(); 1273 BaseValue = Builder.CreateBitCast(BaseValue, 1274 llvm::PointerType::get(FieldTy, AS), 1275 "tmp"); 1276 1277 llvm::Value *Idx = 1278 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Info.FieldNo); 1279 llvm::Value *V = Builder.CreateGEP(BaseValue, Idx, "tmp"); 1280 1281 return LValue::MakeBitfield(V, Info.Start, Info.Size, 1282 Field->getType()->isSignedIntegerType(), 1283 Field->getType().getCVRQualifiers()|CVRQualifiers); 1284 } 1285 1286 LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, 1287 const FieldDecl* Field, 1288 bool isUnion, 1289 unsigned CVRQualifiers) { 1290 if (Field->isBitField()) 1291 return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); 1292 1293 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 1294 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1295 1296 // Match union field type. 1297 if (isUnion) { 1298 const llvm::Type *FieldTy = 1299 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1300 const llvm::PointerType * BaseTy = 1301 cast<llvm::PointerType>(BaseValue->getType()); 1302 unsigned AS = BaseTy->getAddressSpace(); 1303 V = Builder.CreateBitCast(V, 1304 llvm::PointerType::get(FieldTy, AS), 1305 "tmp"); 1306 } 1307 if (Field->getType()->isReferenceType()) 1308 V = Builder.CreateLoad(V, "tmp"); 1309 1310 Qualifiers Quals = MakeQualifiers(Field->getType()); 1311 Quals.addCVRQualifiers(CVRQualifiers); 1312 // __weak attribute on a field is ignored. 1313 if (Quals.getObjCGCAttr() == Qualifiers::Weak) 1314 Quals.removeObjCGCAttr(); 1315 1316 return LValue::MakeAddr(V, Quals); 1317 } 1318 1319 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ 1320 const llvm::Type *LTy = ConvertType(E->getType()); 1321 llvm::Value *DeclPtr = CreateTempAlloca(LTy, ".compoundliteral"); 1322 1323 const Expr* InitExpr = E->getInitializer(); 1324 LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType())); 1325 1326 if (E->getType()->isComplexType()) 1327 EmitComplexExprIntoAddr(InitExpr, DeclPtr, false); 1328 else if (hasAggregateLLVMType(E->getType())) 1329 EmitAnyExpr(InitExpr, DeclPtr, false); 1330 else 1331 EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType()); 1332 1333 return Result; 1334 } 1335 1336 LValue 1337 CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) { 1338 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 1339 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1340 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1341 llvm::BasicBlock *ContBlock = createBasicBlock("cond.end"); 1342 1343 llvm::Value *Cond = EvaluateExprAsBool(E->getCond()); 1344 Builder.CreateCondBr(Cond, LHSBlock, RHSBlock); 1345 1346 EmitBlock(LHSBlock); 1347 1348 LValue LHS = EmitLValue(E->getLHS()); 1349 if (!LHS.isSimple()) 1350 return EmitUnsupportedLValue(E, "conditional operator"); 1351 1352 llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp"); 1353 Builder.CreateStore(LHS.getAddress(), Temp); 1354 EmitBranch(ContBlock); 1355 1356 EmitBlock(RHSBlock); 1357 LValue RHS = EmitLValue(E->getRHS()); 1358 if (!RHS.isSimple()) 1359 return EmitUnsupportedLValue(E, "conditional operator"); 1360 1361 Builder.CreateStore(RHS.getAddress(), Temp); 1362 EmitBranch(ContBlock); 1363 1364 EmitBlock(ContBlock); 1365 1366 Temp = Builder.CreateLoad(Temp, "lv"); 1367 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1368 } 1369 1370 // ?: here should be an aggregate. 1371 assert((hasAggregateLLVMType(E->getType()) && 1372 !E->getType()->isAnyComplexType()) && 1373 "Unexpected conditional operator!"); 1374 1375 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1376 EmitAggExpr(E, Temp, false); 1377 1378 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1379 } 1380 1381 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast. 1382 /// If the cast is a dynamic_cast, we can have the usual lvalue result, 1383 /// otherwise if a cast is needed by the code generator in an lvalue context, 1384 /// then it must mean that we need the address of an aggregate in order to 1385 /// access one of its fields. This can happen for all the reasons that casts 1386 /// are permitted with aggregate result, including noop aggregate casts, and 1387 /// cast from scalar to union. 1388 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 1389 switch (E->getCastKind()) { 1390 default: 1391 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 1392 1393 case CastExpr::CK_Dynamic: { 1394 LValue LV = EmitLValue(E->getSubExpr()); 1395 llvm::Value *V = LV.getAddress(); 1396 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 1397 return LValue::MakeAddr(EmitDynamicCast(V, DCE), 1398 MakeQualifiers(E->getType())); 1399 } 1400 1401 case CastExpr::CK_NoOp: 1402 case CastExpr::CK_ConstructorConversion: 1403 case CastExpr::CK_UserDefinedConversion: 1404 return EmitLValue(E->getSubExpr()); 1405 1406 case CastExpr::CK_DerivedToBase: { 1407 const RecordType *DerivedClassTy = 1408 E->getSubExpr()->getType()->getAs<RecordType>(); 1409 CXXRecordDecl *DerivedClassDecl = 1410 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1411 1412 const RecordType *BaseClassTy = E->getType()->getAs<RecordType>(); 1413 CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseClassTy->getDecl()); 1414 1415 LValue LV = EmitLValue(E->getSubExpr()); 1416 1417 // Perform the derived-to-base conversion 1418 llvm::Value *Base = 1419 GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl, 1420 BaseClassDecl, /*NullCheckValue=*/false); 1421 1422 return LValue::MakeAddr(Base, MakeQualifiers(E->getType())); 1423 } 1424 case CastExpr::CK_ToUnion: { 1425 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1426 EmitAnyExpr(E->getSubExpr(), Temp, false); 1427 1428 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1429 } 1430 case CastExpr::CK_BaseToDerived: { 1431 const RecordType *BaseClassTy = 1432 E->getSubExpr()->getType()->getAs<RecordType>(); 1433 CXXRecordDecl *BaseClassDecl = 1434 cast<CXXRecordDecl>(BaseClassTy->getDecl()); 1435 1436 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 1437 CXXRecordDecl *DerivedClassDecl = 1438 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1439 1440 LValue LV = EmitLValue(E->getSubExpr()); 1441 1442 // Perform the base-to-derived conversion 1443 llvm::Value *Derived = 1444 GetAddressOfDerivedClass(LV.getAddress(), BaseClassDecl, 1445 DerivedClassDecl, /*NullCheckValue=*/false); 1446 1447 return LValue::MakeAddr(Derived, MakeQualifiers(E->getType())); 1448 } 1449 case CastExpr::CK_BitCast: { 1450 // This must be a reinterpret_cast (or c-style equivalent). 1451 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 1452 1453 LValue LV = EmitLValue(E->getSubExpr()); 1454 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 1455 ConvertType(CE->getTypeAsWritten())); 1456 return LValue::MakeAddr(V, MakeQualifiers(E->getType())); 1457 } 1458 } 1459 } 1460 1461 LValue CodeGenFunction::EmitNullInitializationLValue( 1462 const CXXZeroInitValueExpr *E) { 1463 QualType Ty = E->getType(); 1464 const llvm::Type *LTy = ConvertTypeForMem(Ty); 1465 llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); 1466 unsigned Align = getContext().getTypeAlign(Ty)/8; 1467 Alloc->setAlignment(Align); 1468 LValue lvalue = LValue::MakeAddr(Alloc, Qualifiers()); 1469 EmitMemSetToZero(lvalue.getAddress(), Ty); 1470 return lvalue; 1471 } 1472 1473 //===--------------------------------------------------------------------===// 1474 // Expression Emission 1475 //===--------------------------------------------------------------------===// 1476 1477 1478 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) { 1479 // Builtins never have block type. 1480 if (E->getCallee()->getType()->isBlockPointerType()) 1481 return EmitBlockCallExpr(E); 1482 1483 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 1484 return EmitCXXMemberCallExpr(CE); 1485 1486 const Decl *TargetDecl = 0; 1487 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { 1488 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { 1489 TargetDecl = DRE->getDecl(); 1490 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) 1491 if (unsigned builtinID = FD->getBuiltinID()) 1492 return EmitBuiltinExpr(FD, builtinID, E); 1493 } 1494 } 1495 1496 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 1497 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 1498 return EmitCXXOperatorMemberCallExpr(CE, MD); 1499 1500 if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 1501 // C++ [expr.pseudo]p1: 1502 // The result shall only be used as the operand for the function call 1503 // operator (), and the result of such a call has type void. The only 1504 // effect is the evaluation of the postfix-expression before the dot or 1505 // arrow. 1506 EmitScalarExpr(E->getCallee()); 1507 return RValue::get(0); 1508 } 1509 1510 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 1511 return EmitCall(Callee, E->getCallee()->getType(), 1512 E->arg_begin(), E->arg_end(), TargetDecl); 1513 } 1514 1515 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 1516 // Comma expressions just emit their LHS then their RHS as an l-value. 1517 if (E->getOpcode() == BinaryOperator::Comma) { 1518 EmitAnyExpr(E->getLHS()); 1519 EnsureInsertPoint(); 1520 return EmitLValue(E->getRHS()); 1521 } 1522 1523 if (E->getOpcode() == BinaryOperator::PtrMemD || 1524 E->getOpcode() == BinaryOperator::PtrMemI) 1525 return EmitPointerToDataMemberBinaryExpr(E); 1526 1527 // Can only get l-value for binary operator expressions which are a 1528 // simple assignment of aggregate type. 1529 if (E->getOpcode() != BinaryOperator::Assign) 1530 return EmitUnsupportedLValue(E, "binary l-value expression"); 1531 1532 if (!hasAggregateLLVMType(E->getType())) { 1533 // Emit the LHS as an l-value. 1534 LValue LV = EmitLValue(E->getLHS()); 1535 1536 llvm::Value *RHS = EmitScalarExpr(E->getRHS()); 1537 EmitStoreOfScalar(RHS, LV.getAddress(), LV.isVolatileQualified(), 1538 E->getType()); 1539 return LV; 1540 } 1541 1542 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1543 EmitAggExpr(E, Temp, false); 1544 // FIXME: Are these qualifiers correct? 1545 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1546 } 1547 1548 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 1549 RValue RV = EmitCallExpr(E); 1550 1551 if (!RV.isScalar()) 1552 return LValue::MakeAddr(RV.getAggregateAddr(),MakeQualifiers(E->getType())); 1553 1554 assert(E->getCallReturnType()->isReferenceType() && 1555 "Can't have a scalar return unless the return type is a " 1556 "reference type!"); 1557 1558 return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType())); 1559 } 1560 1561 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 1562 // FIXME: This shouldn't require another copy. 1563 llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); 1564 EmitAggExpr(E, Temp, false); 1565 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1566 } 1567 1568 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 1569 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), "tmp"); 1570 EmitCXXConstructExpr(Temp, E); 1571 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1572 } 1573 1574 LValue 1575 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 1576 llvm::Value *Temp = EmitCXXTypeidExpr(E); 1577 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1578 } 1579 1580 LValue 1581 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 1582 LValue LV = EmitLValue(E->getSubExpr()); 1583 PushCXXTemporary(E->getTemporary(), LV.getAddress()); 1584 return LV; 1585 } 1586 1587 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 1588 // Can only get l-value for message expression returning aggregate type 1589 RValue RV = EmitObjCMessageExpr(E); 1590 // FIXME: can this be volatile? 1591 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 1592 } 1593 1594 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 1595 const ObjCIvarDecl *Ivar) { 1596 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 1597 } 1598 1599 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 1600 llvm::Value *BaseValue, 1601 const ObjCIvarDecl *Ivar, 1602 unsigned CVRQualifiers) { 1603 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 1604 Ivar, CVRQualifiers); 1605 } 1606 1607 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 1608 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 1609 llvm::Value *BaseValue = 0; 1610 const Expr *BaseExpr = E->getBase(); 1611 Qualifiers BaseQuals; 1612 QualType ObjectTy; 1613 if (E->isArrow()) { 1614 BaseValue = EmitScalarExpr(BaseExpr); 1615 ObjectTy = BaseExpr->getType()->getPointeeType(); 1616 BaseQuals = ObjectTy.getQualifiers(); 1617 } else { 1618 LValue BaseLV = EmitLValue(BaseExpr); 1619 // FIXME: this isn't right for bitfields. 1620 BaseValue = BaseLV.getAddress(); 1621 ObjectTy = BaseExpr->getType(); 1622 BaseQuals = ObjectTy.getQualifiers(); 1623 } 1624 1625 LValue LV = 1626 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 1627 BaseQuals.getCVRQualifiers()); 1628 setObjCGCLValueClass(getContext(), E, LV); 1629 return LV; 1630 } 1631 1632 LValue 1633 CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { 1634 // This is a special l-value that just issues sends when we load or store 1635 // through it. 1636 return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); 1637 } 1638 1639 LValue CodeGenFunction::EmitObjCKVCRefLValue( 1640 const ObjCImplicitSetterGetterRefExpr *E) { 1641 // This is a special l-value that just issues sends when we load or store 1642 // through it. 1643 return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); 1644 } 1645 1646 LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { 1647 return EmitUnsupportedLValue(E, "use of super"); 1648 } 1649 1650 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 1651 // Can only get l-value for message expression returning aggregate type 1652 RValue RV = EmitAnyExprToTemp(E); 1653 // FIXME: can this be volatile? 1654 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 1655 } 1656 1657 1658 LValue CodeGenFunction::EmitPointerToDataMemberLValue(const FieldDecl *Field) { 1659 const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Field->getDeclContext()); 1660 QualType NNSpecTy = 1661 getContext().getCanonicalType( 1662 getContext().getTypeDeclType(const_cast<CXXRecordDecl*>(ClassDecl))); 1663 NNSpecTy = getContext().getPointerType(NNSpecTy); 1664 llvm::Value *V = llvm::Constant::getNullValue(ConvertType(NNSpecTy)); 1665 LValue MemExpLV = EmitLValueForField(V, Field, /*isUnion=*/false, 1666 /*Qualifiers=*/0); 1667 const llvm::Type *ResultType = ConvertType(getContext().getPointerDiffType()); 1668 V = Builder.CreatePtrToInt(MemExpLV.getAddress(), ResultType, "datamember"); 1669 return LValue::MakeAddr(V, MakeQualifiers(Field->getType())); 1670 } 1671 1672 RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType, 1673 CallExpr::const_arg_iterator ArgBeg, 1674 CallExpr::const_arg_iterator ArgEnd, 1675 const Decl *TargetDecl) { 1676 // Get the actual function type. The callee type will always be a pointer to 1677 // function type or a block pointer type. 1678 assert(CalleeType->isFunctionPointerType() && 1679 "Call must have function pointer type!"); 1680 1681 CalleeType = getContext().getCanonicalType(CalleeType); 1682 1683 QualType FnType = cast<PointerType>(CalleeType)->getPointeeType(); 1684 QualType ResultType = cast<FunctionType>(FnType)->getResultType(); 1685 1686 CallArgList Args; 1687 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 1688 1689 // FIXME: We should not need to do this, it should be part of the function 1690 // type. 1691 unsigned CallingConvention = 0; 1692 if (const llvm::Function *F = 1693 dyn_cast<llvm::Function>(Callee->stripPointerCasts())) 1694 CallingConvention = F->getCallingConv(); 1695 return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args, 1696 CallingConvention), 1697 Callee, Args, TargetDecl); 1698 } 1699 1700 LValue CodeGenFunction:: 1701 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 1702 llvm::Value *BaseV; 1703 if (E->getOpcode() == BinaryOperator::PtrMemI) 1704 BaseV = EmitScalarExpr(E->getLHS()); 1705 else 1706 BaseV = EmitLValue(E->getLHS()).getAddress(); 1707 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(getLLVMContext()); 1708 BaseV = Builder.CreateBitCast(BaseV, i8Ty); 1709 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 1710 llvm::Value *AddV = Builder.CreateInBoundsGEP(BaseV, OffsetV, "add.ptr"); 1711 1712 QualType Ty = E->getRHS()->getType(); 1713 Ty = Ty->getAs<MemberPointerType>()->getPointeeType(); 1714 1715 const llvm::Type *PType = ConvertType(getContext().getPointerType(Ty)); 1716 AddV = Builder.CreateBitCast(AddV, PType); 1717 return LValue::MakeAddr(AddV, MakeQualifiers(Ty)); 1718 } 1719 1720