1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGCall.h" 17 #include "CGObjCRuntime.h" 18 #include "clang/AST/ASTContext.h" 19 #include "clang/AST/DeclObjC.h" 20 #include "llvm/Intrinsics.h" 21 #include "clang/CodeGen/CodeGenOptions.h" 22 #include "llvm/Target/TargetData.h" 23 using namespace clang; 24 using namespace CodeGen; 25 26 //===--------------------------------------------------------------------===// 27 // Miscellaneous Helper Methods 28 //===--------------------------------------------------------------------===// 29 30 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 31 /// block. 32 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, 33 const llvm::Twine &Name) { 34 if (!Builder.isNamePreserving()) 35 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 36 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 37 } 38 39 llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty, 40 const llvm::Twine &Name) { 41 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 42 // FIXME: Should we prefer the preferred type alignment here? 43 CharUnits Align = getContext().getTypeAlignInChars(Ty); 44 Alloc->setAlignment(Align.getQuantity()); 45 return Alloc; 46 } 47 48 llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty, 49 const llvm::Twine &Name) { 50 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 51 // FIXME: Should we prefer the preferred type alignment here? 52 CharUnits Align = getContext().getTypeAlignInChars(Ty); 53 Alloc->setAlignment(Align.getQuantity()); 54 return Alloc; 55 } 56 57 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 58 /// expression and compare the result against zero, returning an Int1Ty value. 59 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 60 QualType BoolTy = getContext().BoolTy; 61 if (E->getType()->isMemberFunctionPointerType()) { 62 LValue LV = EmitAggExprToLValue(E); 63 64 // Get the pointer. 65 llvm::Value *FuncPtr = Builder.CreateStructGEP(LV.getAddress(), 0, 66 "src.ptr"); 67 FuncPtr = Builder.CreateLoad(FuncPtr); 68 69 llvm::Value *IsNotNull = 70 Builder.CreateICmpNE(FuncPtr, 71 llvm::Constant::getNullValue(FuncPtr->getType()), 72 "tobool"); 73 74 return IsNotNull; 75 } 76 if (!E->getType()->isAnyComplexType()) 77 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 78 79 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 80 } 81 82 /// EmitAnyExpr - Emit code to compute the specified expression which can have 83 /// any type. The result is returned as an RValue struct. If this is an 84 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where the 85 /// result should be returned. 86 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, 87 bool IsAggLocVolatile, bool IgnoreResult, 88 bool IsInitializer) { 89 if (!hasAggregateLLVMType(E->getType())) 90 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 91 else if (E->getType()->isAnyComplexType()) 92 return RValue::getComplex(EmitComplexExpr(E, false, false, 93 IgnoreResult, IgnoreResult)); 94 95 EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer); 96 return RValue::getAggregate(AggLoc, IsAggLocVolatile); 97 } 98 99 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 100 /// always be accessible even if no aggregate location is provided. 101 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, 102 bool IsAggLocVolatile, 103 bool IsInitializer) { 104 llvm::Value *AggLoc = 0; 105 106 if (hasAggregateLLVMType(E->getType()) && 107 !E->getType()->isAnyComplexType()) 108 AggLoc = CreateMemTemp(E->getType(), "agg.tmp"); 109 return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false, 110 IsInitializer); 111 } 112 113 RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, 114 bool IsInitializer) { 115 bool ShouldDestroyTemporaries = false; 116 unsigned OldNumLiveTemporaries = 0; 117 118 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) 119 E = DAE->getExpr(); 120 121 if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) { 122 ShouldDestroyTemporaries = true; 123 124 // Keep track of the current cleanup stack depth. 125 OldNumLiveTemporaries = LiveTemporaries.size(); 126 127 E = TE->getSubExpr(); 128 } 129 130 RValue Val; 131 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 132 // Emit the expr as an lvalue. 133 LValue LV = EmitLValue(E); 134 if (LV.isSimple()) { 135 if (ShouldDestroyTemporaries) { 136 // Pop temporaries. 137 while (LiveTemporaries.size() > OldNumLiveTemporaries) 138 PopCXXTemporary(); 139 } 140 141 return RValue::get(LV.getAddress()); 142 } 143 144 Val = EmitLoadOfLValue(LV, E->getType()); 145 146 if (ShouldDestroyTemporaries) { 147 // Pop temporaries. 148 while (LiveTemporaries.size() > OldNumLiveTemporaries) 149 PopCXXTemporary(); 150 } 151 } else { 152 const CXXRecordDecl *BaseClassDecl = 0; 153 const CXXRecordDecl *DerivedClassDecl = 0; 154 155 if (const CastExpr *CE = 156 dyn_cast<CastExpr>(E->IgnoreParenNoopCasts(getContext()))) { 157 if (CE->getCastKind() == CastExpr::CK_DerivedToBase) { 158 E = CE->getSubExpr(); 159 160 BaseClassDecl = 161 cast<CXXRecordDecl>(CE->getType()->getAs<RecordType>()->getDecl()); 162 DerivedClassDecl = 163 cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 164 } 165 } 166 167 Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false, 168 IsInitializer); 169 170 if (ShouldDestroyTemporaries) { 171 // Pop temporaries. 172 while (LiveTemporaries.size() > OldNumLiveTemporaries) 173 PopCXXTemporary(); 174 } 175 176 if (IsInitializer) { 177 // We might have to destroy the temporary variable. 178 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 179 if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { 180 if (!ClassDecl->hasTrivialDestructor()) { 181 const CXXDestructorDecl *Dtor = 182 ClassDecl->getDestructor(getContext()); 183 184 { 185 DelayedCleanupBlock Scope(*this); 186 EmitCXXDestructorCall(Dtor, Dtor_Complete, 187 Val.getAggregateAddr()); 188 189 // Make sure to jump to the exit block. 190 EmitBranch(Scope.getCleanupExitBlock()); 191 } 192 if (Exceptions) { 193 EHCleanupBlock Cleanup(*this); 194 EmitCXXDestructorCall(Dtor, Dtor_Complete, 195 Val.getAggregateAddr()); 196 } 197 } 198 } 199 } 200 } 201 202 // Check if need to perform the derived-to-base cast. 203 if (BaseClassDecl) { 204 llvm::Value *Derived = Val.getAggregateAddr(); 205 llvm::Value *Base = 206 GetAddressOfBaseClass(Derived, DerivedClassDecl, BaseClassDecl, 207 /*NullCheckValue=*/false); 208 return RValue::get(Base); 209 } 210 } 211 212 if (Val.isAggregate()) { 213 Val = RValue::get(Val.getAggregateAddr()); 214 } else { 215 // Create a temporary variable that we can bind the reference to. 216 llvm::Value *Temp = CreateMemTemp(E->getType(), "reftmp"); 217 if (Val.isScalar()) 218 EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); 219 else 220 StoreComplexToAddr(Val.getComplexVal(), Temp, false); 221 Val = RValue::get(Temp); 222 } 223 224 return Val; 225 } 226 227 228 /// getAccessedFieldNo - Given an encoded value and a result number, return the 229 /// input field number being accessed. 230 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 231 const llvm::Constant *Elts) { 232 if (isa<llvm::ConstantAggregateZero>(Elts)) 233 return 0; 234 235 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 236 } 237 238 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { 239 if (!CatchUndefined) 240 return; 241 242 const llvm::IntegerType *Size_tTy 243 = llvm::IntegerType::get(VMContext, LLVMPointerWidth); 244 Address = Builder.CreateBitCast(Address, PtrToInt8Ty); 245 246 const llvm::Type *ResType[] = { 247 Size_tTy 248 }; 249 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, ResType, 1); 250 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 251 CGM.getTypes().ConvertType(CGM.getContext().IntTy)); 252 // In time, people may want to control this and use a 1 here. 253 llvm::Value *Arg = llvm::ConstantInt::get(IntTy, 0); 254 llvm::Value *C = Builder.CreateCall2(F, Address, Arg); 255 llvm::BasicBlock *Cont = createBasicBlock(); 256 llvm::BasicBlock *Check = createBasicBlock(); 257 llvm::Value *NegativeOne = llvm::ConstantInt::get(Size_tTy, -1ULL); 258 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); 259 260 EmitBlock(Check); 261 Builder.CreateCondBr(Builder.CreateICmpUGE(C, 262 llvm::ConstantInt::get(Size_tTy, Size)), 263 Cont, getTrapBB()); 264 EmitBlock(Cont); 265 } 266 267 268 llvm::Value *CodeGenFunction:: 269 EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, 270 bool isInc, bool isPre) { 271 QualType ValTy = E->getSubExpr()->getType(); 272 llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy).getScalarVal(); 273 274 int AmountVal = isInc ? 1 : -1; 275 276 if (ValTy->isPointerType() && 277 ValTy->getAs<PointerType>()->isVariableArrayType()) { 278 // The amount of the addition/subtraction needs to account for the VLA size 279 ErrorUnsupported(E, "VLA pointer inc/dec"); 280 } 281 282 llvm::Value *NextVal; 283 if (const llvm::PointerType *PT = 284 dyn_cast<llvm::PointerType>(InVal->getType())) { 285 llvm::Constant *Inc = 286 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal); 287 if (!isa<llvm::FunctionType>(PT->getElementType())) { 288 QualType PTEE = ValTy->getPointeeType(); 289 if (const ObjCInterfaceType *OIT = 290 dyn_cast<ObjCInterfaceType>(PTEE)) { 291 // Handle interface types, which are not represented with a concrete 292 // type. 293 int size = getContext().getTypeSize(OIT) / 8; 294 if (!isInc) 295 size = -size; 296 Inc = llvm::ConstantInt::get(Inc->getType(), size); 297 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 298 InVal = Builder.CreateBitCast(InVal, i8Ty); 299 NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr"); 300 llvm::Value *lhs = LV.getAddress(); 301 lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty)); 302 LV = LValue::MakeAddr(lhs, MakeQualifiers(ValTy)); 303 } else 304 NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec"); 305 } else { 306 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext); 307 NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp"); 308 NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec"); 309 NextVal = Builder.CreateBitCast(NextVal, InVal->getType()); 310 } 311 } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) { 312 // Bool++ is an interesting case, due to promotion rules, we get: 313 // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 -> 314 // Bool = ((int)Bool+1) != 0 315 // An interesting aspect of this is that increment is always true. 316 // Decrement does not have this property. 317 NextVal = llvm::ConstantInt::getTrue(VMContext); 318 } else if (isa<llvm::IntegerType>(InVal->getType())) { 319 NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal); 320 321 // Signed integer overflow is undefined behavior. 322 if (ValTy->isSignedIntegerType()) 323 NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec"); 324 else 325 NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); 326 } else { 327 // Add the inc/dec to the real part. 328 if (InVal->getType()->isFloatTy()) 329 NextVal = 330 llvm::ConstantFP::get(VMContext, 331 llvm::APFloat(static_cast<float>(AmountVal))); 332 else if (InVal->getType()->isDoubleTy()) 333 NextVal = 334 llvm::ConstantFP::get(VMContext, 335 llvm::APFloat(static_cast<double>(AmountVal))); 336 else { 337 llvm::APFloat F(static_cast<float>(AmountVal)); 338 bool ignored; 339 F.convert(Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero, 340 &ignored); 341 NextVal = llvm::ConstantFP::get(VMContext, F); 342 } 343 NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec"); 344 } 345 346 // Store the updated result through the lvalue. 347 if (LV.isBitfield()) 348 EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal); 349 else 350 EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy); 351 352 // If this is a postinc, return the value read from memory, otherwise use the 353 // updated value. 354 return isPre ? NextVal : InVal; 355 } 356 357 358 CodeGenFunction::ComplexPairTy CodeGenFunction:: 359 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 360 bool isInc, bool isPre) { 361 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 362 LV.isVolatileQualified()); 363 364 llvm::Value *NextVal; 365 if (isa<llvm::IntegerType>(InVal.first->getType())) { 366 uint64_t AmountVal = isInc ? 1 : -1; 367 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 368 369 // Add the inc/dec to the real part. 370 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 371 } else { 372 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 373 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 374 if (!isInc) 375 FVal.changeSign(); 376 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 377 378 // Add the inc/dec to the real part. 379 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 380 } 381 382 ComplexPairTy IncVal(NextVal, InVal.second); 383 384 // Store the updated result through the lvalue. 385 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 386 387 // If this is a postinc, return the value read from memory, otherwise use the 388 // updated value. 389 return isPre ? IncVal : InVal; 390 } 391 392 393 //===----------------------------------------------------------------------===// 394 // LValue Expression Emission 395 //===----------------------------------------------------------------------===// 396 397 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 398 if (Ty->isVoidType()) 399 return RValue::get(0); 400 401 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 402 const llvm::Type *EltTy = ConvertType(CTy->getElementType()); 403 llvm::Value *U = llvm::UndefValue::get(EltTy); 404 return RValue::getComplex(std::make_pair(U, U)); 405 } 406 407 if (hasAggregateLLVMType(Ty)) { 408 const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); 409 return RValue::getAggregate(llvm::UndefValue::get(LTy)); 410 } 411 412 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 413 } 414 415 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 416 const char *Name) { 417 ErrorUnsupported(E, Name); 418 return GetUndefRValue(E->getType()); 419 } 420 421 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 422 const char *Name) { 423 ErrorUnsupported(E, Name); 424 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 425 return LValue::MakeAddr(llvm::UndefValue::get(Ty), 426 MakeQualifiers(E->getType())); 427 } 428 429 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) { 430 LValue LV = EmitLValue(E); 431 if (!isa<DeclRefExpr>(E) && !LV.isBitfield() && LV.isSimple()) 432 EmitCheck(LV.getAddress(), getContext().getTypeSize(E->getType()) / 8); 433 return LV; 434 } 435 436 /// EmitLValue - Emit code to compute a designator that specifies the location 437 /// of the expression. 438 /// 439 /// This can return one of two things: a simple address or a bitfield reference. 440 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 441 /// an LLVM pointer type. 442 /// 443 /// If this returns a bitfield reference, nothing about the pointee type of the 444 /// LLVM value is known: For example, it may not be a pointer to an integer. 445 /// 446 /// If this returns a normal address, and if the lvalue's C type is fixed size, 447 /// this method guarantees that the returned pointer type will point to an LLVM 448 /// type of the same size of the lvalue's type. If the lvalue has a variable 449 /// length type, this is not possible. 450 /// 451 LValue CodeGenFunction::EmitLValue(const Expr *E) { 452 switch (E->getStmtClass()) { 453 default: return EmitUnsupportedLValue(E, "l-value expression"); 454 455 case Expr::ObjCIsaExprClass: 456 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 457 case Expr::BinaryOperatorClass: 458 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 459 case Expr::CallExprClass: 460 case Expr::CXXMemberCallExprClass: 461 case Expr::CXXOperatorCallExprClass: 462 return EmitCallExprLValue(cast<CallExpr>(E)); 463 case Expr::VAArgExprClass: 464 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 465 case Expr::DeclRefExprClass: 466 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 467 case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 468 case Expr::PredefinedExprClass: 469 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 470 case Expr::StringLiteralClass: 471 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 472 case Expr::ObjCEncodeExprClass: 473 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 474 475 case Expr::BlockDeclRefExprClass: 476 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 477 478 case Expr::CXXTemporaryObjectExprClass: 479 case Expr::CXXConstructExprClass: 480 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 481 case Expr::CXXBindTemporaryExprClass: 482 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 483 case Expr::CXXExprWithTemporariesClass: 484 return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E)); 485 case Expr::CXXZeroInitValueExprClass: 486 return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E)); 487 case Expr::CXXDefaultArgExprClass: 488 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 489 case Expr::CXXTypeidExprClass: 490 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 491 492 case Expr::ObjCMessageExprClass: 493 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 494 case Expr::ObjCIvarRefExprClass: 495 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 496 case Expr::ObjCPropertyRefExprClass: 497 return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); 498 case Expr::ObjCImplicitSetterGetterRefExprClass: 499 return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E)); 500 case Expr::ObjCSuperExprClass: 501 return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E)); 502 503 case Expr::StmtExprClass: 504 return EmitStmtExprLValue(cast<StmtExpr>(E)); 505 case Expr::UnaryOperatorClass: 506 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 507 case Expr::ArraySubscriptExprClass: 508 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 509 case Expr::ExtVectorElementExprClass: 510 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 511 case Expr::MemberExprClass: 512 return EmitMemberExpr(cast<MemberExpr>(E)); 513 case Expr::CompoundLiteralExprClass: 514 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 515 case Expr::ConditionalOperatorClass: 516 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 517 case Expr::ChooseExprClass: 518 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 519 case Expr::ImplicitCastExprClass: 520 case Expr::CStyleCastExprClass: 521 case Expr::CXXFunctionalCastExprClass: 522 case Expr::CXXStaticCastExprClass: 523 case Expr::CXXDynamicCastExprClass: 524 case Expr::CXXReinterpretCastExprClass: 525 case Expr::CXXConstCastExprClass: 526 return EmitCastLValue(cast<CastExpr>(E)); 527 } 528 } 529 530 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 531 QualType Ty) { 532 llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp"); 533 if (Volatile) 534 Load->setVolatile(true); 535 536 // Bool can have different representation in memory than in registers. 537 llvm::Value *V = Load; 538 if (Ty->isBooleanType()) 539 if (V->getType() != llvm::Type::getInt1Ty(VMContext)) 540 V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool"); 541 542 return V; 543 } 544 545 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 546 bool Volatile, QualType Ty) { 547 548 if (Ty->isBooleanType()) { 549 // Bool can have different representation in memory than in registers. 550 const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 551 Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false); 552 } 553 Builder.CreateStore(Value, Addr, Volatile); 554 } 555 556 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 557 /// method emits the address of the lvalue, then loads the result as an rvalue, 558 /// returning the rvalue. 559 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { 560 if (LV.isObjCWeak()) { 561 // load of a __weak object. 562 llvm::Value *AddrWeakObj = LV.getAddress(); 563 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 564 AddrWeakObj)); 565 } 566 567 if (LV.isSimple()) { 568 llvm::Value *Ptr = LV.getAddress(); 569 const llvm::Type *EltTy = 570 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 571 572 // Simple scalar l-value. 573 // 574 // FIXME: We shouldn't have to use isSingleValueType here. 575 if (EltTy->isSingleValueType()) 576 return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), 577 ExprType)); 578 579 assert(ExprType->isFunctionType() && "Unknown scalar value"); 580 return RValue::get(Ptr); 581 } 582 583 if (LV.isVectorElt()) { 584 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 585 LV.isVolatileQualified(), "tmp"); 586 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 587 "vecext")); 588 } 589 590 // If this is a reference to a subset of the elements of a vector, either 591 // shuffle the input or extract/insert them as appropriate. 592 if (LV.isExtVectorElt()) 593 return EmitLoadOfExtVectorElementLValue(LV, ExprType); 594 595 if (LV.isBitfield()) 596 return EmitLoadOfBitfieldLValue(LV, ExprType); 597 598 if (LV.isPropertyRef()) 599 return EmitLoadOfPropertyRefLValue(LV, ExprType); 600 601 assert(LV.isKVCRef() && "Unknown LValue type!"); 602 return EmitLoadOfKVCRefLValue(LV, ExprType); 603 } 604 605 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 606 QualType ExprType) { 607 unsigned StartBit = LV.getBitfieldStartBit(); 608 unsigned BitfieldSize = LV.getBitfieldSize(); 609 llvm::Value *Ptr = LV.getBitfieldAddr(); 610 611 const llvm::Type *EltTy = 612 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 613 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 614 615 // In some cases the bitfield may straddle two memory locations. Currently we 616 // load the entire bitfield, then do the magic to sign-extend it if 617 // necessary. This results in somewhat more code than necessary for the common 618 // case (one load), since two shifts accomplish both the masking and sign 619 // extension. 620 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 621 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp"); 622 623 // Shift to proper location. 624 if (StartBit) 625 Val = Builder.CreateLShr(Val, StartBit, "bf.lo"); 626 627 // Mask off unused bits. 628 llvm::Constant *LowMask = llvm::ConstantInt::get(VMContext, 629 llvm::APInt::getLowBitsSet(EltTySize, LowBits)); 630 Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared"); 631 632 // Fetch the high bits if necessary. 633 if (LowBits < BitfieldSize) { 634 unsigned HighBits = BitfieldSize - LowBits; 635 llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( 636 llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); 637 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 638 LV.isVolatileQualified(), 639 "tmp"); 640 641 // Mask off unused bits. 642 llvm::Constant *HighMask = llvm::ConstantInt::get(VMContext, 643 llvm::APInt::getLowBitsSet(EltTySize, HighBits)); 644 HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared"); 645 646 // Shift to proper location and or in to bitfield value. 647 HighVal = Builder.CreateShl(HighVal, LowBits); 648 Val = Builder.CreateOr(Val, HighVal, "bf.val"); 649 } 650 651 // Sign extend if necessary. 652 if (LV.isBitfieldSigned()) { 653 llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy, 654 EltTySize - BitfieldSize); 655 Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits), 656 ExtraBits, "bf.val.sext"); 657 } 658 659 // The bitfield type and the normal type differ when the storage sizes differ 660 // (currently just _Bool). 661 Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp"); 662 663 return RValue::get(Val); 664 } 665 666 RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, 667 QualType ExprType) { 668 return EmitObjCPropertyGet(LV.getPropertyRefExpr()); 669 } 670 671 RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, 672 QualType ExprType) { 673 return EmitObjCPropertyGet(LV.getKVCRefExpr()); 674 } 675 676 // If this is a reference to a subset of the elements of a vector, create an 677 // appropriate shufflevector. 678 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, 679 QualType ExprType) { 680 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 681 LV.isVolatileQualified(), "tmp"); 682 683 const llvm::Constant *Elts = LV.getExtVectorElts(); 684 685 // If the result of the expression is a non-vector type, we must be extracting 686 // a single element. Just codegen as an extractelement. 687 const VectorType *ExprVT = ExprType->getAs<VectorType>(); 688 if (!ExprVT) { 689 unsigned InIdx = getAccessedFieldNo(0, Elts); 690 llvm::Value *Elt = llvm::ConstantInt::get( 691 llvm::Type::getInt32Ty(VMContext), InIdx); 692 return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); 693 } 694 695 // Always use shuffle vector to try to retain the original program structure 696 unsigned NumResultElts = ExprVT->getNumElements(); 697 698 llvm::SmallVector<llvm::Constant*, 4> Mask; 699 for (unsigned i = 0; i != NumResultElts; ++i) { 700 unsigned InIdx = getAccessedFieldNo(i, Elts); 701 Mask.push_back(llvm::ConstantInt::get( 702 llvm::Type::getInt32Ty(VMContext), InIdx)); 703 } 704 705 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 706 Vec = Builder.CreateShuffleVector(Vec, 707 llvm::UndefValue::get(Vec->getType()), 708 MaskV, "tmp"); 709 return RValue::get(Vec); 710 } 711 712 713 714 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 715 /// lvalue, where both are guaranteed to the have the same type, and that type 716 /// is 'Ty'. 717 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 718 QualType Ty) { 719 if (!Dst.isSimple()) { 720 if (Dst.isVectorElt()) { 721 // Read/modify/write the vector, inserting the new element. 722 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 723 Dst.isVolatileQualified(), "tmp"); 724 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 725 Dst.getVectorIdx(), "vecins"); 726 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 727 return; 728 } 729 730 // If this is an update of extended vector elements, insert them as 731 // appropriate. 732 if (Dst.isExtVectorElt()) 733 return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); 734 735 if (Dst.isBitfield()) 736 return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); 737 738 if (Dst.isPropertyRef()) 739 return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); 740 741 assert(Dst.isKVCRef() && "Unknown LValue type"); 742 return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); 743 } 744 745 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 746 // load of a __weak object. 747 llvm::Value *LvalueDst = Dst.getAddress(); 748 llvm::Value *src = Src.getScalarVal(); 749 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 750 return; 751 } 752 753 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 754 // load of a __strong object. 755 llvm::Value *LvalueDst = Dst.getAddress(); 756 llvm::Value *src = Src.getScalarVal(); 757 if (Dst.isObjCIvar()) { 758 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 759 const llvm::Type *ResultType = ConvertType(getContext().LongTy); 760 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 761 llvm::Value *dst = RHS; 762 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 763 llvm::Value *LHS = 764 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 765 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 766 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 767 BytesBetween); 768 } else if (Dst.isGlobalObjCRef()) 769 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); 770 else 771 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 772 return; 773 } 774 775 assert(Src.isScalar() && "Can't emit an agg store with this method"); 776 EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), 777 Dst.isVolatileQualified(), Ty); 778 } 779 780 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 781 QualType Ty, 782 llvm::Value **Result) { 783 unsigned StartBit = Dst.getBitfieldStartBit(); 784 unsigned BitfieldSize = Dst.getBitfieldSize(); 785 llvm::Value *Ptr = Dst.getBitfieldAddr(); 786 787 const llvm::Type *EltTy = 788 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 789 unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); 790 791 // Get the new value, cast to the appropriate type and masked to exactly the 792 // size of the bit-field. 793 llvm::Value *SrcVal = Src.getScalarVal(); 794 llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp"); 795 llvm::Constant *Mask = llvm::ConstantInt::get(VMContext, 796 llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize)); 797 NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value"); 798 799 // Return the new value of the bit-field, if requested. 800 if (Result) { 801 // Cast back to the proper type for result. 802 const llvm::Type *SrcTy = SrcVal->getType(); 803 llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false, 804 "bf.reload.val"); 805 806 // Sign extend if necessary. 807 if (Dst.isBitfieldSigned()) { 808 unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy); 809 llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy, 810 SrcTySize - BitfieldSize); 811 SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits), 812 ExtraBits, "bf.reload.sext"); 813 } 814 815 *Result = SrcTrunc; 816 } 817 818 // In some cases the bitfield may straddle two memory locations. Emit the low 819 // part first and check to see if the high needs to be done. 820 unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); 821 llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), 822 "bf.prev.low"); 823 824 // Compute the mask for zero-ing the low part of this bitfield. 825 llvm::Constant *InvMask = 826 llvm::ConstantInt::get(VMContext, 827 ~llvm::APInt::getBitsSet(EltTySize, StartBit, StartBit + LowBits)); 828 829 // Compute the new low part as 830 // LowVal = (LowVal & InvMask) | (NewVal << StartBit), 831 // with the shift of NewVal implicitly stripping the high bits. 832 llvm::Value *NewLowVal = 833 Builder.CreateShl(NewVal, StartBit, "bf.value.lo"); 834 LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared"); 835 LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo"); 836 837 // Write back. 838 Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified()); 839 840 // If the low part doesn't cover the bitfield emit a high part. 841 if (LowBits < BitfieldSize) { 842 unsigned HighBits = BitfieldSize - LowBits; 843 llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( 844 llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); 845 llvm::Value *HighVal = Builder.CreateLoad(HighPtr, 846 Dst.isVolatileQualified(), 847 "bf.prev.hi"); 848 849 // Compute the mask for zero-ing the high part of this bitfield. 850 llvm::Constant *InvMask = 851 llvm::ConstantInt::get(VMContext, ~llvm::APInt::getLowBitsSet(EltTySize, 852 HighBits)); 853 854 // Compute the new high part as 855 // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits), 856 // where the high bits of NewVal have already been cleared and the 857 // shift stripping the low bits. 858 llvm::Value *NewHighVal = 859 Builder.CreateLShr(NewVal, LowBits, "bf.value.high"); 860 HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared"); 861 HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi"); 862 863 // Write back. 864 Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified()); 865 } 866 } 867 868 void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, 869 LValue Dst, 870 QualType Ty) { 871 EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); 872 } 873 874 void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, 875 LValue Dst, 876 QualType Ty) { 877 EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); 878 } 879 880 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 881 LValue Dst, 882 QualType Ty) { 883 // This access turns into a read/modify/write of the vector. Load the input 884 // value now. 885 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 886 Dst.isVolatileQualified(), "tmp"); 887 const llvm::Constant *Elts = Dst.getExtVectorElts(); 888 889 llvm::Value *SrcVal = Src.getScalarVal(); 890 891 if (const VectorType *VTy = Ty->getAs<VectorType>()) { 892 unsigned NumSrcElts = VTy->getNumElements(); 893 unsigned NumDstElts = 894 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 895 if (NumDstElts == NumSrcElts) { 896 // Use shuffle vector is the src and destination are the same number of 897 // elements and restore the vector mask since it is on the side it will be 898 // stored. 899 llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 900 for (unsigned i = 0; i != NumSrcElts; ++i) { 901 unsigned InIdx = getAccessedFieldNo(i, Elts); 902 Mask[InIdx] = llvm::ConstantInt::get( 903 llvm::Type::getInt32Ty(VMContext), i); 904 } 905 906 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 907 Vec = Builder.CreateShuffleVector(SrcVal, 908 llvm::UndefValue::get(Vec->getType()), 909 MaskV, "tmp"); 910 } else if (NumDstElts > NumSrcElts) { 911 // Extended the source vector to the same length and then shuffle it 912 // into the destination. 913 // FIXME: since we're shuffling with undef, can we just use the indices 914 // into that? This could be simpler. 915 llvm::SmallVector<llvm::Constant*, 4> ExtMask; 916 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 917 unsigned i; 918 for (i = 0; i != NumSrcElts; ++i) 919 ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 920 for (; i != NumDstElts; ++i) 921 ExtMask.push_back(llvm::UndefValue::get(Int32Ty)); 922 llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], 923 ExtMask.size()); 924 llvm::Value *ExtSrcVal = 925 Builder.CreateShuffleVector(SrcVal, 926 llvm::UndefValue::get(SrcVal->getType()), 927 ExtMaskV, "tmp"); 928 // build identity 929 llvm::SmallVector<llvm::Constant*, 4> Mask; 930 for (unsigned i = 0; i != NumDstElts; ++i) 931 Mask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 932 933 // modify when what gets shuffled in 934 for (unsigned i = 0; i != NumSrcElts; ++i) { 935 unsigned Idx = getAccessedFieldNo(i, Elts); 936 Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts); 937 } 938 llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); 939 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); 940 } else { 941 // We should never shorten the vector 942 assert(0 && "unexpected shorten vector length"); 943 } 944 } else { 945 // If the Src is a scalar (not a vector) it must be updating one element. 946 unsigned InIdx = getAccessedFieldNo(0, Elts); 947 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 948 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 949 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); 950 } 951 952 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 953 } 954 955 // setObjCGCLValueClass - sets class of he lvalue for the purpose of 956 // generating write-barries API. It is currently a global, ivar, 957 // or neither. 958 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 959 LValue &LV) { 960 if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC) 961 return; 962 963 if (isa<ObjCIvarRefExpr>(E)) { 964 LV.SetObjCIvar(LV, true); 965 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 966 LV.setBaseIvarExp(Exp->getBase()); 967 LV.SetObjCArray(LV, E->getType()->isArrayType()); 968 return; 969 } 970 971 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 972 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 973 if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) || 974 VD->isFileVarDecl()) 975 LV.SetGlobalObjCRef(LV, true); 976 } 977 LV.SetObjCArray(LV, E->getType()->isArrayType()); 978 return; 979 } 980 981 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 982 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 983 return; 984 } 985 986 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 987 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 988 if (LV.isObjCIvar()) { 989 // If cast is to a structure pointer, follow gcc's behavior and make it 990 // a non-ivar write-barrier. 991 QualType ExpTy = E->getType(); 992 if (ExpTy->isPointerType()) 993 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 994 if (ExpTy->isRecordType()) 995 LV.SetObjCIvar(LV, false); 996 } 997 return; 998 } 999 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1000 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1001 return; 1002 } 1003 1004 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1005 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV); 1006 return; 1007 } 1008 1009 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1010 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1011 if (LV.isObjCIvar() && !LV.isObjCArray()) 1012 // Using array syntax to assigning to what an ivar points to is not 1013 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1014 LV.SetObjCIvar(LV, false); 1015 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1016 // Using array syntax to assigning to what global points to is not 1017 // same as assigning to the global itself. {id *G;} G[i] = 0; 1018 LV.SetGlobalObjCRef(LV, false); 1019 return; 1020 } 1021 1022 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1023 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1024 // We don't know if member is an 'ivar', but this flag is looked at 1025 // only in the context of LV.isObjCIvar(). 1026 LV.SetObjCArray(LV, E->getType()->isArrayType()); 1027 return; 1028 } 1029 } 1030 1031 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1032 const Expr *E, const VarDecl *VD) { 1033 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1034 "Var decl must have external storage or be a file var decl!"); 1035 1036 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1037 if (VD->getType()->isReferenceType()) 1038 V = CGF.Builder.CreateLoad(V, "tmp"); 1039 LValue LV = LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); 1040 setObjCGCLValueClass(CGF.getContext(), E, LV); 1041 return LV; 1042 } 1043 1044 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1045 const Expr *E, const FunctionDecl *FD) { 1046 llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD); 1047 if (!FD->hasPrototype()) { 1048 if (const FunctionProtoType *Proto = 1049 FD->getType()->getAs<FunctionProtoType>()) { 1050 // Ugly case: for a K&R-style definition, the type of the definition 1051 // isn't the same as the type of a use. Correct for this with a 1052 // bitcast. 1053 QualType NoProtoType = 1054 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1055 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1056 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp"); 1057 } 1058 } 1059 return LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType())); 1060 } 1061 1062 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1063 const NamedDecl *ND = E->getDecl(); 1064 1065 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1066 1067 // Check if this is a global variable. 1068 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1069 return EmitGlobalVarDeclLValue(*this, E, VD); 1070 1071 bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>(); 1072 1073 llvm::Value *V = LocalDeclMap[VD]; 1074 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1075 1076 Qualifiers Quals = MakeQualifiers(E->getType()); 1077 // local variables do not get their gc attribute set. 1078 // local static? 1079 if (NonGCable) Quals.removeObjCGCAttr(); 1080 1081 if (VD->hasAttr<BlocksAttr>()) { 1082 V = Builder.CreateStructGEP(V, 1, "forwarding"); 1083 V = Builder.CreateLoad(V); 1084 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 1085 VD->getNameAsString()); 1086 } 1087 if (VD->getType()->isReferenceType()) 1088 V = Builder.CreateLoad(V, "tmp"); 1089 LValue LV = LValue::MakeAddr(V, Quals); 1090 LValue::SetObjCNonGC(LV, NonGCable); 1091 setObjCGCLValueClass(getContext(), E, LV); 1092 return LV; 1093 } 1094 1095 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1096 return EmitFunctionDeclLValue(*this, E, FD); 1097 1098 // FIXME: the qualifier check does not seem sufficient here 1099 if (E->getQualifier()) { 1100 const FieldDecl *FD = cast<FieldDecl>(ND); 1101 llvm::Value *V = CGM.EmitPointerToDataMember(FD); 1102 1103 return LValue::MakeAddr(V, MakeQualifiers(FD->getType())); 1104 } 1105 1106 assert(false && "Unhandled DeclRefExpr"); 1107 1108 // an invalid LValue, but the assert will 1109 // ensure that this point is never reached. 1110 return LValue(); 1111 } 1112 1113 LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 1114 return LValue::MakeAddr(GetAddrOfBlockDecl(E), MakeQualifiers(E->getType())); 1115 } 1116 1117 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1118 // __extension__ doesn't affect lvalue-ness. 1119 if (E->getOpcode() == UnaryOperator::Extension) 1120 return EmitLValue(E->getSubExpr()); 1121 1122 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1123 switch (E->getOpcode()) { 1124 default: assert(0 && "Unknown unary operator lvalue!"); 1125 case UnaryOperator::Deref: { 1126 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1127 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1128 1129 Qualifiers Quals = MakeQualifiers(T); 1130 Quals.setAddressSpace(ExprTy.getAddressSpace()); 1131 1132 LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals); 1133 // We should not generate __weak write barrier on indirect reference 1134 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1135 // But, we continue to generate __strong write barrier on indirect write 1136 // into a pointer to object. 1137 if (getContext().getLangOptions().ObjC1 && 1138 getContext().getLangOptions().getGCMode() != LangOptions::NonGC && 1139 LV.isObjCWeak()) 1140 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 1141 return LV; 1142 } 1143 case UnaryOperator::Real: 1144 case UnaryOperator::Imag: { 1145 LValue LV = EmitLValue(E->getSubExpr()); 1146 unsigned Idx = E->getOpcode() == UnaryOperator::Imag; 1147 return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), 1148 Idx, "idx"), 1149 MakeQualifiers(ExprTy)); 1150 } 1151 case UnaryOperator::PreInc: 1152 case UnaryOperator::PreDec: { 1153 LValue LV = EmitLValue(E->getSubExpr()); 1154 bool isInc = E->getOpcode() == UnaryOperator::PreInc; 1155 1156 if (E->getType()->isAnyComplexType()) 1157 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1158 else 1159 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1160 return LV; 1161 } 1162 } 1163 } 1164 1165 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1166 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 1167 Qualifiers()); 1168 } 1169 1170 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1171 return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1172 Qualifiers()); 1173 } 1174 1175 1176 LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { 1177 std::string GlobalVarName; 1178 1179 switch (Type) { 1180 default: assert(0 && "Invalid type"); 1181 case PredefinedExpr::Func: 1182 GlobalVarName = "__func__."; 1183 break; 1184 case PredefinedExpr::Function: 1185 GlobalVarName = "__FUNCTION__."; 1186 break; 1187 case PredefinedExpr::PrettyFunction: 1188 GlobalVarName = "__PRETTY_FUNCTION__."; 1189 break; 1190 } 1191 1192 llvm::StringRef FnName = CurFn->getName(); 1193 if (FnName.startswith("\01")) 1194 FnName = FnName.substr(1); 1195 GlobalVarName += FnName; 1196 1197 std::string FunctionName = 1198 PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurCodeDecl); 1199 1200 llvm::Constant *C = 1201 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 1202 return LValue::MakeAddr(C, Qualifiers()); 1203 } 1204 1205 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1206 switch (E->getIdentType()) { 1207 default: 1208 return EmitUnsupportedLValue(E, "predefined expression"); 1209 case PredefinedExpr::Func: 1210 case PredefinedExpr::Function: 1211 case PredefinedExpr::PrettyFunction: 1212 return EmitPredefinedFunctionName(E->getIdentType()); 1213 } 1214 } 1215 1216 llvm::BasicBlock *CodeGenFunction::getTrapBB() { 1217 const CodeGenOptions &GCO = CGM.getCodeGenOpts(); 1218 1219 // If we are not optimzing, don't collapse all calls to trap in the function 1220 // to the same call, that way, in the debugger they can see which operation 1221 // did in fact fail. If we are optimizing, we collpase all call to trap down 1222 // to just one per function to save on codesize. 1223 if (GCO.OptimizationLevel 1224 && TrapBB) 1225 return TrapBB; 1226 1227 llvm::BasicBlock *Cont = 0; 1228 if (HaveInsertPoint()) { 1229 Cont = createBasicBlock("cont"); 1230 EmitBranch(Cont); 1231 } 1232 TrapBB = createBasicBlock("trap"); 1233 EmitBlock(TrapBB); 1234 1235 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0); 1236 llvm::CallInst *TrapCall = Builder.CreateCall(F); 1237 TrapCall->setDoesNotReturn(); 1238 TrapCall->setDoesNotThrow(); 1239 Builder.CreateUnreachable(); 1240 1241 if (Cont) 1242 EmitBlock(Cont); 1243 return TrapBB; 1244 } 1245 1246 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1247 // The index must always be an integer, which is not an aggregate. Emit it. 1248 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 1249 QualType IdxTy = E->getIdx()->getType(); 1250 bool IdxSigned = IdxTy->isSignedIntegerType(); 1251 1252 // If the base is a vector type, then we are forming a vector element lvalue 1253 // with this subscript. 1254 if (E->getBase()->getType()->isVectorType()) { 1255 // Emit the vector as an lvalue to get its address. 1256 LValue LHS = EmitLValue(E->getBase()); 1257 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 1258 Idx = Builder.CreateIntCast(Idx, 1259 llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx"); 1260 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 1261 E->getBase()->getType().getCVRQualifiers()); 1262 } 1263 1264 // The base must be a pointer, which is not an aggregate. Emit it. 1265 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1266 1267 // Extend or truncate the index type to 32 or 64-bits. 1268 unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); 1269 if (IdxBitwidth != LLVMPointerWidth) 1270 Idx = Builder.CreateIntCast(Idx, 1271 llvm::IntegerType::get(VMContext, LLVMPointerWidth), 1272 IdxSigned, "idxprom"); 1273 1274 // FIXME: As llvm implements the object size checking, this can come out. 1275 if (CatchUndefined) { 1276 if (const ImplicitCastExpr *ICE=dyn_cast<ImplicitCastExpr>(E->getBase())) { 1277 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 1278 if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) { 1279 if (const ConstantArrayType *CAT 1280 = getContext().getAsConstantArrayType(DRE->getType())) { 1281 llvm::APInt Size = CAT->getSize(); 1282 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1283 Builder.CreateCondBr(Builder.CreateICmpULE(Idx, 1284 llvm::ConstantInt::get(Idx->getType(), Size)), 1285 Cont, getTrapBB()); 1286 EmitBlock(Cont); 1287 } 1288 } 1289 } 1290 } 1291 } 1292 1293 // We know that the pointer points to a type of the correct size, unless the 1294 // size is a VLA or Objective-C interface. 1295 llvm::Value *Address = 0; 1296 if (const VariableArrayType *VAT = 1297 getContext().getAsVariableArrayType(E->getType())) { 1298 llvm::Value *VLASize = GetVLASize(VAT); 1299 1300 Idx = Builder.CreateMul(Idx, VLASize); 1301 1302 QualType BaseType = getContext().getBaseElementType(VAT); 1303 1304 CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType); 1305 Idx = Builder.CreateUDiv(Idx, 1306 llvm::ConstantInt::get(Idx->getType(), 1307 BaseTypeSize.getQuantity())); 1308 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1309 } else if (const ObjCInterfaceType *OIT = 1310 dyn_cast<ObjCInterfaceType>(E->getType())) { 1311 llvm::Value *InterfaceSize = 1312 llvm::ConstantInt::get(Idx->getType(), 1313 getContext().getTypeSizeInChars(OIT).getQuantity()); 1314 1315 Idx = Builder.CreateMul(Idx, InterfaceSize); 1316 1317 const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext); 1318 Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), 1319 Idx, "arrayidx"); 1320 Address = Builder.CreateBitCast(Address, Base->getType()); 1321 } else { 1322 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1323 } 1324 1325 QualType T = E->getBase()->getType()->getPointeeType(); 1326 assert(!T.isNull() && 1327 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1328 1329 Qualifiers Quals = MakeQualifiers(T); 1330 Quals.setAddressSpace(E->getBase()->getType().getAddressSpace()); 1331 1332 LValue LV = LValue::MakeAddr(Address, Quals); 1333 if (getContext().getLangOptions().ObjC1 && 1334 getContext().getLangOptions().getGCMode() != LangOptions::NonGC) { 1335 LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); 1336 setObjCGCLValueClass(getContext(), E, LV); 1337 } 1338 return LV; 1339 } 1340 1341 static 1342 llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 1343 llvm::SmallVector<unsigned, 4> &Elts) { 1344 llvm::SmallVector<llvm::Constant*, 4> CElts; 1345 1346 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1347 CElts.push_back(llvm::ConstantInt::get( 1348 llvm::Type::getInt32Ty(VMContext), Elts[i])); 1349 1350 return llvm::ConstantVector::get(&CElts[0], CElts.size()); 1351 } 1352 1353 LValue CodeGenFunction:: 1354 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1355 const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1356 1357 // Emit the base vector as an l-value. 1358 LValue Base; 1359 1360 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1361 if (E->isArrow()) { 1362 // If it is a pointer to a vector, emit the address and form an lvalue with 1363 // it. 1364 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1365 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1366 Qualifiers Quals = MakeQualifiers(PT->getPointeeType()); 1367 Quals.removeObjCGCAttr(); 1368 Base = LValue::MakeAddr(Ptr, Quals); 1369 } else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) { 1370 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 1371 // emit the base as an lvalue. 1372 assert(E->getBase()->getType()->isVectorType()); 1373 Base = EmitLValue(E->getBase()); 1374 } else { 1375 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 1376 assert(E->getBase()->getType()->getAs<VectorType>() && 1377 "Result must be a vector"); 1378 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 1379 1380 // Store the vector to memory (because LValue wants an address). 1381 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 1382 Builder.CreateStore(Vec, VecMem); 1383 Base = LValue::MakeAddr(VecMem, Qualifiers()); 1384 } 1385 1386 // Encode the element access list into a vector of unsigned indices. 1387 llvm::SmallVector<unsigned, 4> Indices; 1388 E->getEncodedElementAccess(Indices); 1389 1390 if (Base.isSimple()) { 1391 llvm::Constant *CV = GenerateConstantVector(VMContext, Indices); 1392 return LValue::MakeExtVectorElt(Base.getAddress(), CV, 1393 Base.getVRQualifiers()); 1394 } 1395 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1396 1397 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1398 llvm::SmallVector<llvm::Constant *, 4> CElts; 1399 1400 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1401 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1402 CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0)); 1403 else 1404 CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i]))); 1405 } 1406 llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size()); 1407 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, 1408 Base.getVRQualifiers()); 1409 } 1410 1411 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1412 bool isNonGC = false; 1413 Expr *BaseExpr = E->getBase(); 1414 llvm::Value *BaseValue = NULL; 1415 Qualifiers BaseQuals; 1416 1417 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1418 if (E->isArrow()) { 1419 BaseValue = EmitScalarExpr(BaseExpr); 1420 const PointerType *PTy = 1421 BaseExpr->getType()->getAs<PointerType>(); 1422 BaseQuals = PTy->getPointeeType().getQualifiers(); 1423 } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) || 1424 isa<ObjCImplicitSetterGetterRefExpr>( 1425 BaseExpr->IgnoreParens())) { 1426 RValue RV = EmitObjCPropertyGet(BaseExpr); 1427 BaseValue = RV.getAggregateAddr(); 1428 BaseQuals = BaseExpr->getType().getQualifiers(); 1429 } else { 1430 LValue BaseLV = EmitLValue(BaseExpr); 1431 if (BaseLV.isNonGC()) 1432 isNonGC = true; 1433 // FIXME: this isn't right for bitfields. 1434 BaseValue = BaseLV.getAddress(); 1435 QualType BaseTy = BaseExpr->getType(); 1436 BaseQuals = BaseTy.getQualifiers(); 1437 } 1438 1439 NamedDecl *ND = E->getMemberDecl(); 1440 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 1441 LValue LV = EmitLValueForField(BaseValue, Field, 1442 BaseQuals.getCVRQualifiers()); 1443 LValue::SetObjCNonGC(LV, isNonGC); 1444 setObjCGCLValueClass(getContext(), E, LV); 1445 return LV; 1446 } 1447 1448 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 1449 return EmitGlobalVarDeclLValue(*this, E, VD); 1450 1451 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1452 return EmitFunctionDeclLValue(*this, E, FD); 1453 1454 assert(false && "Unhandled member declaration!"); 1455 return LValue(); 1456 } 1457 1458 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, 1459 const FieldDecl* Field, 1460 unsigned CVRQualifiers) { 1461 CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field); 1462 1463 // FIXME: CodeGenTypes should expose a method to get the appropriate type for 1464 // FieldTy (the appropriate type is ABI-dependent). 1465 const llvm::Type *FieldTy = 1466 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1467 const llvm::PointerType *BaseTy = 1468 cast<llvm::PointerType>(BaseValue->getType()); 1469 unsigned AS = BaseTy->getAddressSpace(); 1470 BaseValue = Builder.CreateBitCast(BaseValue, 1471 llvm::PointerType::get(FieldTy, AS), 1472 "tmp"); 1473 1474 llvm::Value *Idx = 1475 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Info.FieldNo); 1476 llvm::Value *V = Builder.CreateGEP(BaseValue, Idx, "tmp"); 1477 1478 return LValue::MakeBitfield(V, Info.Start, Info.Size, 1479 Field->getType()->isSignedIntegerType(), 1480 Field->getType().getCVRQualifiers()|CVRQualifiers); 1481 } 1482 1483 LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, 1484 const FieldDecl* Field, 1485 unsigned CVRQualifiers) { 1486 if (Field->isBitField()) 1487 return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); 1488 1489 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 1490 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1491 1492 // Match union field type. 1493 if (Field->getParent()->isUnion()) { 1494 const llvm::Type *FieldTy = 1495 CGM.getTypes().ConvertTypeForMem(Field->getType()); 1496 const llvm::PointerType * BaseTy = 1497 cast<llvm::PointerType>(BaseValue->getType()); 1498 unsigned AS = BaseTy->getAddressSpace(); 1499 V = Builder.CreateBitCast(V, 1500 llvm::PointerType::get(FieldTy, AS), 1501 "tmp"); 1502 } 1503 if (Field->getType()->isReferenceType()) 1504 V = Builder.CreateLoad(V, "tmp"); 1505 1506 Qualifiers Quals = MakeQualifiers(Field->getType()); 1507 Quals.addCVRQualifiers(CVRQualifiers); 1508 // __weak attribute on a field is ignored. 1509 if (Quals.getObjCGCAttr() == Qualifiers::Weak) 1510 Quals.removeObjCGCAttr(); 1511 1512 return LValue::MakeAddr(V, Quals); 1513 } 1514 1515 LValue 1516 CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue, 1517 const FieldDecl* Field, 1518 unsigned CVRQualifiers) { 1519 QualType FieldType = Field->getType(); 1520 1521 if (!FieldType->isReferenceType()) 1522 return EmitLValueForField(BaseValue, Field, CVRQualifiers); 1523 1524 unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); 1525 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); 1526 1527 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 1528 1529 return LValue::MakeAddr(V, MakeQualifiers(FieldType)); 1530 } 1531 1532 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ 1533 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 1534 const Expr* InitExpr = E->getInitializer(); 1535 LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType())); 1536 1537 if (E->getType()->isComplexType()) 1538 EmitComplexExprIntoAddr(InitExpr, DeclPtr, false); 1539 else if (hasAggregateLLVMType(E->getType())) 1540 EmitAnyExpr(InitExpr, DeclPtr, false); 1541 else 1542 EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType()); 1543 1544 return Result; 1545 } 1546 1547 LValue 1548 CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) { 1549 if (E->isLvalue(getContext()) == Expr::LV_Valid) { 1550 if (int Cond = ConstantFoldsToSimpleInteger(E->getCond())) { 1551 Expr *Live = Cond == 1 ? E->getLHS() : E->getRHS(); 1552 if (Live) 1553 return EmitLValue(Live); 1554 } 1555 1556 if (!E->getLHS()) 1557 return EmitUnsupportedLValue(E, "conditional operator with missing LHS"); 1558 1559 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1560 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1561 llvm::BasicBlock *ContBlock = createBasicBlock("cond.end"); 1562 1563 EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); 1564 1565 // Any temporaries created here are conditional. 1566 BeginConditionalBranch(); 1567 EmitBlock(LHSBlock); 1568 LValue LHS = EmitLValue(E->getLHS()); 1569 EndConditionalBranch(); 1570 1571 if (!LHS.isSimple()) 1572 return EmitUnsupportedLValue(E, "conditional operator"); 1573 1574 // FIXME: We shouldn't need an alloca for this. 1575 llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp"); 1576 Builder.CreateStore(LHS.getAddress(), Temp); 1577 EmitBranch(ContBlock); 1578 1579 // Any temporaries created here are conditional. 1580 BeginConditionalBranch(); 1581 EmitBlock(RHSBlock); 1582 LValue RHS = EmitLValue(E->getRHS()); 1583 EndConditionalBranch(); 1584 if (!RHS.isSimple()) 1585 return EmitUnsupportedLValue(E, "conditional operator"); 1586 1587 Builder.CreateStore(RHS.getAddress(), Temp); 1588 EmitBranch(ContBlock); 1589 1590 EmitBlock(ContBlock); 1591 1592 Temp = Builder.CreateLoad(Temp, "lv"); 1593 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1594 } 1595 1596 // ?: here should be an aggregate. 1597 assert((hasAggregateLLVMType(E->getType()) && 1598 !E->getType()->isAnyComplexType()) && 1599 "Unexpected conditional operator!"); 1600 1601 return EmitAggExprToLValue(E); 1602 } 1603 1604 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast. 1605 /// If the cast is a dynamic_cast, we can have the usual lvalue result, 1606 /// otherwise if a cast is needed by the code generator in an lvalue context, 1607 /// then it must mean that we need the address of an aggregate in order to 1608 /// access one of its fields. This can happen for all the reasons that casts 1609 /// are permitted with aggregate result, including noop aggregate casts, and 1610 /// cast from scalar to union. 1611 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 1612 switch (E->getCastKind()) { 1613 default: 1614 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 1615 1616 case CastExpr::CK_Dynamic: { 1617 LValue LV = EmitLValue(E->getSubExpr()); 1618 llvm::Value *V = LV.getAddress(); 1619 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 1620 return LValue::MakeAddr(EmitDynamicCast(V, DCE), 1621 MakeQualifiers(E->getType())); 1622 } 1623 1624 case CastExpr::CK_NoOp: 1625 case CastExpr::CK_ConstructorConversion: 1626 case CastExpr::CK_UserDefinedConversion: 1627 case CastExpr::CK_AnyPointerToObjCPointerCast: 1628 return EmitLValue(E->getSubExpr()); 1629 1630 case CastExpr::CK_DerivedToBase: { 1631 const RecordType *DerivedClassTy = 1632 E->getSubExpr()->getType()->getAs<RecordType>(); 1633 CXXRecordDecl *DerivedClassDecl = 1634 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1635 1636 const RecordType *BaseClassTy = E->getType()->getAs<RecordType>(); 1637 CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseClassTy->getDecl()); 1638 1639 LValue LV = EmitLValue(E->getSubExpr()); 1640 1641 // Perform the derived-to-base conversion 1642 llvm::Value *Base = 1643 GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl, 1644 BaseClassDecl, /*NullCheckValue=*/false); 1645 1646 return LValue::MakeAddr(Base, MakeQualifiers(E->getType())); 1647 } 1648 case CastExpr::CK_ToUnion: 1649 return EmitAggExprToLValue(E); 1650 case CastExpr::CK_BaseToDerived: { 1651 const RecordType *BaseClassTy = 1652 E->getSubExpr()->getType()->getAs<RecordType>(); 1653 CXXRecordDecl *BaseClassDecl = 1654 cast<CXXRecordDecl>(BaseClassTy->getDecl()); 1655 1656 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 1657 CXXRecordDecl *DerivedClassDecl = 1658 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 1659 1660 LValue LV = EmitLValue(E->getSubExpr()); 1661 1662 // Perform the base-to-derived conversion 1663 llvm::Value *Derived = 1664 GetAddressOfDerivedClass(LV.getAddress(), BaseClassDecl, 1665 DerivedClassDecl, /*NullCheckValue=*/false); 1666 1667 return LValue::MakeAddr(Derived, MakeQualifiers(E->getType())); 1668 } 1669 case CastExpr::CK_BitCast: { 1670 // This must be a reinterpret_cast (or c-style equivalent). 1671 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 1672 1673 LValue LV = EmitLValue(E->getSubExpr()); 1674 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 1675 ConvertType(CE->getTypeAsWritten())); 1676 return LValue::MakeAddr(V, MakeQualifiers(E->getType())); 1677 } 1678 } 1679 } 1680 1681 LValue CodeGenFunction::EmitNullInitializationLValue( 1682 const CXXZeroInitValueExpr *E) { 1683 QualType Ty = E->getType(); 1684 LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty)); 1685 EmitMemSetToZero(LV.getAddress(), Ty); 1686 return LV; 1687 } 1688 1689 //===--------------------------------------------------------------------===// 1690 // Expression Emission 1691 //===--------------------------------------------------------------------===// 1692 1693 1694 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 1695 ReturnValueSlot ReturnValue) { 1696 // Builtins never have block type. 1697 if (E->getCallee()->getType()->isBlockPointerType()) 1698 return EmitBlockCallExpr(E, ReturnValue); 1699 1700 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 1701 return EmitCXXMemberCallExpr(CE, ReturnValue); 1702 1703 const Decl *TargetDecl = 0; 1704 if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { 1705 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { 1706 TargetDecl = DRE->getDecl(); 1707 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) 1708 if (unsigned builtinID = FD->getBuiltinID()) 1709 return EmitBuiltinExpr(FD, builtinID, E); 1710 } 1711 } 1712 1713 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 1714 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 1715 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 1716 1717 if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 1718 // C++ [expr.pseudo]p1: 1719 // The result shall only be used as the operand for the function call 1720 // operator (), and the result of such a call has type void. The only 1721 // effect is the evaluation of the postfix-expression before the dot or 1722 // arrow. 1723 EmitScalarExpr(E->getCallee()); 1724 return RValue::get(0); 1725 } 1726 1727 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 1728 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 1729 E->arg_begin(), E->arg_end(), TargetDecl); 1730 } 1731 1732 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 1733 // Comma expressions just emit their LHS then their RHS as an l-value. 1734 if (E->getOpcode() == BinaryOperator::Comma) { 1735 EmitAnyExpr(E->getLHS()); 1736 EnsureInsertPoint(); 1737 return EmitLValue(E->getRHS()); 1738 } 1739 1740 if (E->getOpcode() == BinaryOperator::PtrMemD || 1741 E->getOpcode() == BinaryOperator::PtrMemI) 1742 return EmitPointerToDataMemberBinaryExpr(E); 1743 1744 // Can only get l-value for binary operator expressions which are a 1745 // simple assignment of aggregate type. 1746 if (E->getOpcode() != BinaryOperator::Assign) 1747 return EmitUnsupportedLValue(E, "binary l-value expression"); 1748 1749 if (!hasAggregateLLVMType(E->getType())) { 1750 // Emit the LHS as an l-value. 1751 LValue LV = EmitLValue(E->getLHS()); 1752 1753 llvm::Value *RHS = EmitScalarExpr(E->getRHS()); 1754 EmitStoreOfScalar(RHS, LV.getAddress(), LV.isVolatileQualified(), 1755 E->getType()); 1756 return LV; 1757 } 1758 1759 return EmitAggExprToLValue(E); 1760 } 1761 1762 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 1763 RValue RV = EmitCallExpr(E); 1764 1765 if (!RV.isScalar()) 1766 return LValue::MakeAddr(RV.getAggregateAddr(),MakeQualifiers(E->getType())); 1767 1768 assert(E->getCallReturnType()->isReferenceType() && 1769 "Can't have a scalar return unless the return type is a " 1770 "reference type!"); 1771 1772 return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType())); 1773 } 1774 1775 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 1776 // FIXME: This shouldn't require another copy. 1777 return EmitAggExprToLValue(E); 1778 } 1779 1780 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 1781 llvm::Value *Temp = CreateMemTemp(E->getType(), "tmp"); 1782 EmitCXXConstructExpr(Temp, E); 1783 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1784 } 1785 1786 LValue 1787 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 1788 llvm::Value *Temp = EmitCXXTypeidExpr(E); 1789 return LValue::MakeAddr(Temp, MakeQualifiers(E->getType())); 1790 } 1791 1792 LValue 1793 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 1794 LValue LV = EmitLValue(E->getSubExpr()); 1795 PushCXXTemporary(E->getTemporary(), LV.getAddress()); 1796 return LV; 1797 } 1798 1799 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 1800 // Can only get l-value for message expression returning aggregate type 1801 RValue RV = EmitObjCMessageExpr(E); 1802 // FIXME: can this be volatile? 1803 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 1804 } 1805 1806 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 1807 const ObjCIvarDecl *Ivar) { 1808 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 1809 } 1810 1811 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 1812 llvm::Value *BaseValue, 1813 const ObjCIvarDecl *Ivar, 1814 unsigned CVRQualifiers) { 1815 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 1816 Ivar, CVRQualifiers); 1817 } 1818 1819 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 1820 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 1821 llvm::Value *BaseValue = 0; 1822 const Expr *BaseExpr = E->getBase(); 1823 Qualifiers BaseQuals; 1824 QualType ObjectTy; 1825 if (E->isArrow()) { 1826 BaseValue = EmitScalarExpr(BaseExpr); 1827 ObjectTy = BaseExpr->getType()->getPointeeType(); 1828 BaseQuals = ObjectTy.getQualifiers(); 1829 } else { 1830 LValue BaseLV = EmitLValue(BaseExpr); 1831 // FIXME: this isn't right for bitfields. 1832 BaseValue = BaseLV.getAddress(); 1833 ObjectTy = BaseExpr->getType(); 1834 BaseQuals = ObjectTy.getQualifiers(); 1835 } 1836 1837 LValue LV = 1838 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 1839 BaseQuals.getCVRQualifiers()); 1840 setObjCGCLValueClass(getContext(), E, LV); 1841 return LV; 1842 } 1843 1844 LValue 1845 CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { 1846 // This is a special l-value that just issues sends when we load or store 1847 // through it. 1848 return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); 1849 } 1850 1851 LValue CodeGenFunction::EmitObjCKVCRefLValue( 1852 const ObjCImplicitSetterGetterRefExpr *E) { 1853 // This is a special l-value that just issues sends when we load or store 1854 // through it. 1855 return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); 1856 } 1857 1858 LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { 1859 return EmitUnsupportedLValue(E, "use of super"); 1860 } 1861 1862 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 1863 // Can only get l-value for message expression returning aggregate type 1864 RValue RV = EmitAnyExprToTemp(E); 1865 // FIXME: can this be volatile? 1866 return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType())); 1867 } 1868 1869 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 1870 ReturnValueSlot ReturnValue, 1871 CallExpr::const_arg_iterator ArgBeg, 1872 CallExpr::const_arg_iterator ArgEnd, 1873 const Decl *TargetDecl) { 1874 // Get the actual function type. The callee type will always be a pointer to 1875 // function type or a block pointer type. 1876 assert(CalleeType->isFunctionPointerType() && 1877 "Call must have function pointer type!"); 1878 1879 CalleeType = getContext().getCanonicalType(CalleeType); 1880 1881 const FunctionType *FnType 1882 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 1883 QualType ResultType = FnType->getResultType(); 1884 1885 CallArgList Args; 1886 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 1887 1888 return EmitCall(CGM.getTypes().getFunctionInfo(Args, FnType), 1889 Callee, ReturnValue, Args, TargetDecl); 1890 } 1891 1892 LValue CodeGenFunction:: 1893 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 1894 llvm::Value *BaseV; 1895 if (E->getOpcode() == BinaryOperator::PtrMemI) 1896 BaseV = EmitScalarExpr(E->getLHS()); 1897 else 1898 BaseV = EmitLValue(E->getLHS()).getAddress(); 1899 const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(getLLVMContext()); 1900 BaseV = Builder.CreateBitCast(BaseV, i8Ty); 1901 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 1902 llvm::Value *AddV = Builder.CreateInBoundsGEP(BaseV, OffsetV, "add.ptr"); 1903 1904 QualType Ty = E->getRHS()->getType(); 1905 Ty = Ty->getAs<MemberPointerType>()->getPointeeType(); 1906 1907 const llvm::Type *PType = ConvertType(getContext().getPointerType(Ty)); 1908 AddV = Builder.CreateBitCast(AddV, PType); 1909 return LValue::MakeAddr(AddV, MakeQualifiers(Ty)); 1910 } 1911 1912