1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGCall.h" 17 #include "CGCXXABI.h" 18 #include "CGDebugInfo.h" 19 #include "CGRecordLayout.h" 20 #include "CGObjCRuntime.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/ASTContext.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Frontend/CodeGenOptions.h" 25 #include "llvm/Intrinsics.h" 26 #include "llvm/LLVMContext.h" 27 #include "llvm/Target/TargetData.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 //===--------------------------------------------------------------------===// 32 // Miscellaneous Helper Methods 33 //===--------------------------------------------------------------------===// 34 35 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 36 unsigned addressSpace = 37 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 38 39 llvm::PointerType *destType = Int8PtrTy; 40 if (addressSpace) 41 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 42 43 if (value->getType() == destType) return value; 44 return Builder.CreateBitCast(value, destType); 45 } 46 47 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 48 /// block. 49 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 50 const Twine &Name) { 51 if (!Builder.isNamePreserving()) 52 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 53 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 54 } 55 56 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 57 llvm::Value *Init) { 58 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 59 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 60 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 61 } 62 63 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, 64 const Twine &Name) { 65 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 66 // FIXME: Should we prefer the preferred type alignment here? 67 CharUnits Align = getContext().getTypeAlignInChars(Ty); 68 Alloc->setAlignment(Align.getQuantity()); 69 return Alloc; 70 } 71 72 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, 73 const Twine &Name) { 74 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 75 // FIXME: Should we prefer the preferred type alignment here? 76 CharUnits Align = getContext().getTypeAlignInChars(Ty); 77 Alloc->setAlignment(Align.getQuantity()); 78 return Alloc; 79 } 80 81 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 82 /// expression and compare the result against zero, returning an Int1Ty value. 83 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 84 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 85 llvm::Value *MemPtr = EmitScalarExpr(E); 86 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 87 } 88 89 QualType BoolTy = getContext().BoolTy; 90 if (!E->getType()->isAnyComplexType()) 91 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 92 93 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 94 } 95 96 /// EmitIgnoredExpr - Emit code to compute the specified expression, 97 /// ignoring the result. 98 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 99 if (E->isRValue()) 100 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 101 102 // Just emit it as an l-value and drop the result. 103 EmitLValue(E); 104 } 105 106 /// EmitAnyExpr - Emit code to compute the specified expression which 107 /// can have any type. The result is returned as an RValue struct. 108 /// If this is an aggregate expression, AggSlot indicates where the 109 /// result should be returned. 110 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot, 111 bool IgnoreResult) { 112 if (!hasAggregateLLVMType(E->getType())) 113 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 114 else if (E->getType()->isAnyComplexType()) 115 return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult)); 116 117 EmitAggExpr(E, AggSlot, IgnoreResult); 118 return AggSlot.asRValue(); 119 } 120 121 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 122 /// always be accessible even if no aggregate location is provided. 123 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 124 AggValueSlot AggSlot = AggValueSlot::ignored(); 125 126 if (hasAggregateLLVMType(E->getType()) && 127 !E->getType()->isAnyComplexType()) 128 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 129 return EmitAnyExpr(E, AggSlot); 130 } 131 132 /// EmitAnyExprToMem - Evaluate an expression into a given memory 133 /// location. 134 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 135 llvm::Value *Location, 136 Qualifiers Quals, 137 bool IsInit) { 138 if (E->getType()->isAnyComplexType()) 139 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile()); 140 else if (hasAggregateLLVMType(E->getType())) 141 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, 142 AggValueSlot::IsDestructed_t(IsInit), 143 AggValueSlot::DoesNotNeedGCBarriers, 144 AggValueSlot::IsAliased_t(!IsInit))); 145 else { 146 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 147 LValue LV = MakeAddrLValue(Location, E->getType()); 148 EmitStoreThroughLValue(RV, LV); 149 } 150 } 151 152 namespace { 153 /// \brief An adjustment to be made to the temporary created when emitting a 154 /// reference binding, which accesses a particular subobject of that temporary. 155 struct SubobjectAdjustment { 156 enum { DerivedToBaseAdjustment, FieldAdjustment } Kind; 157 158 union { 159 struct { 160 const CastExpr *BasePath; 161 const CXXRecordDecl *DerivedClass; 162 } DerivedToBase; 163 164 FieldDecl *Field; 165 }; 166 167 SubobjectAdjustment(const CastExpr *BasePath, 168 const CXXRecordDecl *DerivedClass) 169 : Kind(DerivedToBaseAdjustment) { 170 DerivedToBase.BasePath = BasePath; 171 DerivedToBase.DerivedClass = DerivedClass; 172 } 173 174 SubobjectAdjustment(FieldDecl *Field) 175 : Kind(FieldAdjustment) { 176 this->Field = Field; 177 } 178 }; 179 } 180 181 static llvm::Value * 182 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type, 183 const NamedDecl *InitializedDecl) { 184 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 185 if (VD->hasGlobalStorage()) { 186 llvm::SmallString<256> Name; 187 llvm::raw_svector_ostream Out(Name); 188 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out); 189 Out.flush(); 190 191 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type); 192 193 // Create the reference temporary. 194 llvm::GlobalValue *RefTemp = 195 new llvm::GlobalVariable(CGF.CGM.getModule(), 196 RefTempTy, /*isConstant=*/false, 197 llvm::GlobalValue::InternalLinkage, 198 llvm::Constant::getNullValue(RefTempTy), 199 Name.str()); 200 return RefTemp; 201 } 202 } 203 204 return CGF.CreateMemTemp(Type, "ref.tmp"); 205 } 206 207 static llvm::Value * 208 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E, 209 llvm::Value *&ReferenceTemporary, 210 const CXXDestructorDecl *&ReferenceTemporaryDtor, 211 QualType &ObjCARCReferenceLifetimeType, 212 const NamedDecl *InitializedDecl) { 213 // Look through expressions for materialized temporaries (for now). 214 if (const MaterializeTemporaryExpr *M 215 = dyn_cast<MaterializeTemporaryExpr>(E)) { 216 // Objective-C++ ARC: 217 // If we are binding a reference to a temporary that has ownership, we 218 // need to perform retain/release operations on the temporary. 219 if (CGF.getContext().getLangOptions().ObjCAutoRefCount && 220 E->getType()->isObjCLifetimeType() && 221 (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong || 222 E->getType().getObjCLifetime() == Qualifiers::OCL_Weak || 223 E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing)) 224 ObjCARCReferenceLifetimeType = E->getType(); 225 226 E = M->GetTemporaryExpr(); 227 } 228 229 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) 230 E = DAE->getExpr(); 231 232 if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) { 233 CGF.enterFullExpression(EWC); 234 CodeGenFunction::RunCleanupsScope Scope(CGF); 235 236 return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(), 237 ReferenceTemporary, 238 ReferenceTemporaryDtor, 239 ObjCARCReferenceLifetimeType, 240 InitializedDecl); 241 } 242 243 RValue RV; 244 if (E->isGLValue()) { 245 // Emit the expression as an lvalue. 246 LValue LV = CGF.EmitLValue(E); 247 248 if (LV.isSimple()) 249 return LV.getAddress(); 250 251 // We have to load the lvalue. 252 RV = CGF.EmitLoadOfLValue(LV); 253 } else { 254 if (!ObjCARCReferenceLifetimeType.isNull()) { 255 ReferenceTemporary = CreateReferenceTemporary(CGF, 256 ObjCARCReferenceLifetimeType, 257 InitializedDecl); 258 259 260 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary, 261 ObjCARCReferenceLifetimeType); 262 263 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl), 264 RefTempDst, false); 265 266 bool ExtendsLifeOfTemporary = false; 267 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 268 if (Var->extendsLifetimeOfTemporary()) 269 ExtendsLifeOfTemporary = true; 270 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) { 271 ExtendsLifeOfTemporary = true; 272 } 273 274 if (!ExtendsLifeOfTemporary) { 275 // Since the lifetime of this temporary isn't going to be extended, 276 // we need to clean it up ourselves at the end of the full expression. 277 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 278 case Qualifiers::OCL_None: 279 case Qualifiers::OCL_ExplicitNone: 280 case Qualifiers::OCL_Autoreleasing: 281 break; 282 283 case Qualifiers::OCL_Strong: { 284 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 285 CleanupKind cleanupKind = CGF.getARCCleanupKind(); 286 CGF.pushDestroy(cleanupKind, 287 ReferenceTemporary, 288 ObjCARCReferenceLifetimeType, 289 CodeGenFunction::destroyARCStrongImprecise, 290 cleanupKind & EHCleanup); 291 break; 292 } 293 294 case Qualifiers::OCL_Weak: 295 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 296 CGF.pushDestroy(NormalAndEHCleanup, 297 ReferenceTemporary, 298 ObjCARCReferenceLifetimeType, 299 CodeGenFunction::destroyARCWeak, 300 /*useEHCleanupForArray*/ true); 301 break; 302 } 303 304 ObjCARCReferenceLifetimeType = QualType(); 305 } 306 307 return ReferenceTemporary; 308 } 309 310 SmallVector<SubobjectAdjustment, 2> Adjustments; 311 while (true) { 312 E = E->IgnoreParens(); 313 314 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 315 if ((CE->getCastKind() == CK_DerivedToBase || 316 CE->getCastKind() == CK_UncheckedDerivedToBase) && 317 E->getType()->isRecordType()) { 318 E = CE->getSubExpr(); 319 CXXRecordDecl *Derived 320 = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 321 Adjustments.push_back(SubobjectAdjustment(CE, Derived)); 322 continue; 323 } 324 325 if (CE->getCastKind() == CK_NoOp) { 326 E = CE->getSubExpr(); 327 continue; 328 } 329 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 330 if (!ME->isArrow() && ME->getBase()->isRValue()) { 331 assert(ME->getBase()->getType()->isRecordType()); 332 if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 333 E = ME->getBase(); 334 Adjustments.push_back(SubobjectAdjustment(Field)); 335 continue; 336 } 337 } 338 } 339 340 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E)) 341 if (opaque->getType()->isRecordType()) 342 return CGF.EmitOpaqueValueLValue(opaque).getAddress(); 343 344 // Nothing changed. 345 break; 346 } 347 348 // Create a reference temporary if necessary. 349 AggValueSlot AggSlot = AggValueSlot::ignored(); 350 if (CGF.hasAggregateLLVMType(E->getType()) && 351 !E->getType()->isAnyComplexType()) { 352 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 353 InitializedDecl); 354 AggValueSlot::IsDestructed_t isDestructed 355 = AggValueSlot::IsDestructed_t(InitializedDecl != 0); 356 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Qualifiers(), 357 isDestructed, 358 AggValueSlot::DoesNotNeedGCBarriers, 359 AggValueSlot::IsNotAliased); 360 } 361 362 if (InitializedDecl) { 363 // Get the destructor for the reference temporary. 364 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 365 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 366 if (!ClassDecl->hasTrivialDestructor()) 367 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 368 } 369 } 370 371 RV = CGF.EmitAnyExpr(E, AggSlot); 372 373 // Check if need to perform derived-to-base casts and/or field accesses, to 374 // get from the temporary object we created (and, potentially, for which we 375 // extended the lifetime) to the subobject we're binding the reference to. 376 if (!Adjustments.empty()) { 377 llvm::Value *Object = RV.getAggregateAddr(); 378 for (unsigned I = Adjustments.size(); I != 0; --I) { 379 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 380 switch (Adjustment.Kind) { 381 case SubobjectAdjustment::DerivedToBaseAdjustment: 382 Object = 383 CGF.GetAddressOfBaseClass(Object, 384 Adjustment.DerivedToBase.DerivedClass, 385 Adjustment.DerivedToBase.BasePath->path_begin(), 386 Adjustment.DerivedToBase.BasePath->path_end(), 387 /*NullCheckValue=*/false); 388 break; 389 390 case SubobjectAdjustment::FieldAdjustment: { 391 LValue LV = 392 CGF.EmitLValueForField(Object, Adjustment.Field, 0); 393 if (LV.isSimple()) { 394 Object = LV.getAddress(); 395 break; 396 } 397 398 // For non-simple lvalues, we actually have to create a copy of 399 // the object we're binding to. 400 QualType T = Adjustment.Field->getType().getNonReferenceType() 401 .getUnqualifiedType(); 402 Object = CreateReferenceTemporary(CGF, T, InitializedDecl); 403 LValue TempLV = CGF.MakeAddrLValue(Object, 404 Adjustment.Field->getType()); 405 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV); 406 break; 407 } 408 409 } 410 } 411 412 return Object; 413 } 414 } 415 416 if (RV.isAggregate()) 417 return RV.getAggregateAddr(); 418 419 // Create a temporary variable that we can bind the reference to. 420 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 421 InitializedDecl); 422 423 424 unsigned Alignment = 425 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity(); 426 if (RV.isScalar()) 427 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary, 428 /*Volatile=*/false, Alignment, E->getType()); 429 else 430 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary, 431 /*Volatile=*/false); 432 return ReferenceTemporary; 433 } 434 435 RValue 436 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E, 437 const NamedDecl *InitializedDecl) { 438 llvm::Value *ReferenceTemporary = 0; 439 const CXXDestructorDecl *ReferenceTemporaryDtor = 0; 440 QualType ObjCARCReferenceLifetimeType; 441 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary, 442 ReferenceTemporaryDtor, 443 ObjCARCReferenceLifetimeType, 444 InitializedDecl); 445 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull()) 446 return RValue::get(Value); 447 448 // Make sure to call the destructor for the reference temporary. 449 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl); 450 if (VD && VD->hasGlobalStorage()) { 451 if (ReferenceTemporaryDtor) { 452 llvm::Constant *DtorFn = 453 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete); 454 EmitCXXGlobalDtorRegistration(DtorFn, 455 cast<llvm::Constant>(ReferenceTemporary)); 456 } else { 457 assert(!ObjCARCReferenceLifetimeType.isNull()); 458 // Note: We intentionally do not register a global "destructor" to 459 // release the object. 460 } 461 462 return RValue::get(Value); 463 } 464 465 if (ReferenceTemporaryDtor) 466 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary); 467 else { 468 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 469 case Qualifiers::OCL_None: 470 llvm_unreachable( 471 "Not a reference temporary that needs to be deallocated"); 472 case Qualifiers::OCL_ExplicitNone: 473 case Qualifiers::OCL_Autoreleasing: 474 // Nothing to do. 475 break; 476 477 case Qualifiers::OCL_Strong: { 478 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 479 CleanupKind cleanupKind = getARCCleanupKind(); 480 // This local is a GCC and MSVC compiler workaround. 481 Destroyer *destroyer = precise ? &destroyARCStrongPrecise : 482 &destroyARCStrongImprecise; 483 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType, 484 *destroyer, cleanupKind & EHCleanup); 485 break; 486 } 487 488 case Qualifiers::OCL_Weak: { 489 // This local is a GCC and MSVC compiler workaround. 490 Destroyer *destroyer = &destroyARCWeak; 491 // __weak objects always get EH cleanups; otherwise, exceptions 492 // could cause really nasty crashes instead of mere leaks. 493 pushDestroy(NormalAndEHCleanup, ReferenceTemporary, 494 ObjCARCReferenceLifetimeType, *destroyer, true); 495 break; 496 } 497 } 498 } 499 500 return RValue::get(Value); 501 } 502 503 504 /// getAccessedFieldNo - Given an encoded value and a result number, return the 505 /// input field number being accessed. 506 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 507 const llvm::Constant *Elts) { 508 if (isa<llvm::ConstantAggregateZero>(Elts)) 509 return 0; 510 511 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 512 } 513 514 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { 515 if (!CatchUndefined) 516 return; 517 518 // This needs to be to the standard address space. 519 Address = Builder.CreateBitCast(Address, Int8PtrTy); 520 521 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy); 522 523 // In time, people may want to control this and use a 1 here. 524 llvm::Value *Arg = Builder.getFalse(); 525 llvm::Value *C = Builder.CreateCall2(F, Address, Arg); 526 llvm::BasicBlock *Cont = createBasicBlock(); 527 llvm::BasicBlock *Check = createBasicBlock(); 528 llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL); 529 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); 530 531 EmitBlock(Check); 532 Builder.CreateCondBr(Builder.CreateICmpUGE(C, 533 llvm::ConstantInt::get(IntPtrTy, Size)), 534 Cont, getTrapBB()); 535 EmitBlock(Cont); 536 } 537 538 539 CodeGenFunction::ComplexPairTy CodeGenFunction:: 540 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 541 bool isInc, bool isPre) { 542 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 543 LV.isVolatileQualified()); 544 545 llvm::Value *NextVal; 546 if (isa<llvm::IntegerType>(InVal.first->getType())) { 547 uint64_t AmountVal = isInc ? 1 : -1; 548 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 549 550 // Add the inc/dec to the real part. 551 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 552 } else { 553 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 554 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 555 if (!isInc) 556 FVal.changeSign(); 557 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 558 559 // Add the inc/dec to the real part. 560 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 561 } 562 563 ComplexPairTy IncVal(NextVal, InVal.second); 564 565 // Store the updated result through the lvalue. 566 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 567 568 // If this is a postinc, return the value read from memory, otherwise use the 569 // updated value. 570 return isPre ? IncVal : InVal; 571 } 572 573 574 //===----------------------------------------------------------------------===// 575 // LValue Expression Emission 576 //===----------------------------------------------------------------------===// 577 578 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 579 if (Ty->isVoidType()) 580 return RValue::get(0); 581 582 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 583 llvm::Type *EltTy = ConvertType(CTy->getElementType()); 584 llvm::Value *U = llvm::UndefValue::get(EltTy); 585 return RValue::getComplex(std::make_pair(U, U)); 586 } 587 588 // If this is a use of an undefined aggregate type, the aggregate must have an 589 // identifiable address. Just because the contents of the value are undefined 590 // doesn't mean that the address can't be taken and compared. 591 if (hasAggregateLLVMType(Ty)) { 592 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 593 return RValue::getAggregate(DestPtr); 594 } 595 596 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 597 } 598 599 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 600 const char *Name) { 601 ErrorUnsupported(E, Name); 602 return GetUndefRValue(E->getType()); 603 } 604 605 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 606 const char *Name) { 607 ErrorUnsupported(E, Name); 608 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 609 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); 610 } 611 612 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) { 613 LValue LV = EmitLValue(E); 614 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 615 EmitCheck(LV.getAddress(), 616 getContext().getTypeSizeInChars(E->getType()).getQuantity()); 617 return LV; 618 } 619 620 /// EmitLValue - Emit code to compute a designator that specifies the location 621 /// of the expression. 622 /// 623 /// This can return one of two things: a simple address or a bitfield reference. 624 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 625 /// an LLVM pointer type. 626 /// 627 /// If this returns a bitfield reference, nothing about the pointee type of the 628 /// LLVM value is known: For example, it may not be a pointer to an integer. 629 /// 630 /// If this returns a normal address, and if the lvalue's C type is fixed size, 631 /// this method guarantees that the returned pointer type will point to an LLVM 632 /// type of the same size of the lvalue's type. If the lvalue has a variable 633 /// length type, this is not possible. 634 /// 635 LValue CodeGenFunction::EmitLValue(const Expr *E) { 636 switch (E->getStmtClass()) { 637 default: return EmitUnsupportedLValue(E, "l-value expression"); 638 639 case Expr::ObjCPropertyRefExprClass: 640 llvm_unreachable("cannot emit a property reference directly"); 641 642 case Expr::ObjCSelectorExprClass: 643 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 644 case Expr::ObjCIsaExprClass: 645 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 646 case Expr::BinaryOperatorClass: 647 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 648 case Expr::CompoundAssignOperatorClass: 649 if (!E->getType()->isAnyComplexType()) 650 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 651 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 652 case Expr::CallExprClass: 653 case Expr::CXXMemberCallExprClass: 654 case Expr::CXXOperatorCallExprClass: 655 return EmitCallExprLValue(cast<CallExpr>(E)); 656 case Expr::VAArgExprClass: 657 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 658 case Expr::DeclRefExprClass: 659 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 660 case Expr::ParenExprClass: 661 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 662 case Expr::GenericSelectionExprClass: 663 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 664 case Expr::PredefinedExprClass: 665 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 666 case Expr::StringLiteralClass: 667 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 668 case Expr::ObjCEncodeExprClass: 669 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 670 case Expr::PseudoObjectExprClass: 671 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 672 673 case Expr::BlockDeclRefExprClass: 674 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 675 676 case Expr::CXXTemporaryObjectExprClass: 677 case Expr::CXXConstructExprClass: 678 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 679 case Expr::CXXBindTemporaryExprClass: 680 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 681 682 case Expr::ExprWithCleanupsClass: { 683 const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E); 684 enterFullExpression(cleanups); 685 RunCleanupsScope Scope(*this); 686 return EmitLValue(cleanups->getSubExpr()); 687 } 688 689 case Expr::CXXScalarValueInitExprClass: 690 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E)); 691 case Expr::CXXDefaultArgExprClass: 692 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 693 case Expr::CXXTypeidExprClass: 694 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 695 696 case Expr::ObjCMessageExprClass: 697 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 698 case Expr::ObjCIvarRefExprClass: 699 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 700 case Expr::StmtExprClass: 701 return EmitStmtExprLValue(cast<StmtExpr>(E)); 702 case Expr::UnaryOperatorClass: 703 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 704 case Expr::ArraySubscriptExprClass: 705 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 706 case Expr::ExtVectorElementExprClass: 707 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 708 case Expr::MemberExprClass: 709 return EmitMemberExpr(cast<MemberExpr>(E)); 710 case Expr::CompoundLiteralExprClass: 711 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 712 case Expr::ConditionalOperatorClass: 713 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 714 case Expr::BinaryConditionalOperatorClass: 715 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 716 case Expr::ChooseExprClass: 717 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 718 case Expr::OpaqueValueExprClass: 719 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 720 case Expr::SubstNonTypeTemplateParmExprClass: 721 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 722 case Expr::ImplicitCastExprClass: 723 case Expr::CStyleCastExprClass: 724 case Expr::CXXFunctionalCastExprClass: 725 case Expr::CXXStaticCastExprClass: 726 case Expr::CXXDynamicCastExprClass: 727 case Expr::CXXReinterpretCastExprClass: 728 case Expr::CXXConstCastExprClass: 729 case Expr::ObjCBridgedCastExprClass: 730 return EmitCastLValue(cast<CastExpr>(E)); 731 732 case Expr::MaterializeTemporaryExprClass: 733 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 734 } 735 } 736 737 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) { 738 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 739 lvalue.getAlignment(), lvalue.getType(), 740 lvalue.getTBAAInfo()); 741 } 742 743 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 744 unsigned Alignment, QualType Ty, 745 llvm::MDNode *TBAAInfo) { 746 llvm::LoadInst *Load = Builder.CreateLoad(Addr); 747 if (Volatile) 748 Load->setVolatile(true); 749 if (Alignment) 750 Load->setAlignment(Alignment); 751 if (TBAAInfo) 752 CGM.DecorateInstruction(Load, TBAAInfo); 753 754 return EmitFromMemory(Load, Ty); 755 } 756 757 static bool isBooleanUnderlyingType(QualType Ty) { 758 if (const EnumType *ET = dyn_cast<EnumType>(Ty)) 759 return ET->getDecl()->getIntegerType()->isBooleanType(); 760 return false; 761 } 762 763 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 764 // Bool has a different representation in memory than in registers. 765 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) { 766 // This should really always be an i1, but sometimes it's already 767 // an i8, and it's awkward to track those cases down. 768 if (Value->getType()->isIntegerTy(1)) 769 return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool"); 770 assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8"); 771 } 772 773 return Value; 774 } 775 776 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 777 // Bool has a different representation in memory than in registers. 778 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) { 779 assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8"); 780 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 781 } 782 783 return Value; 784 } 785 786 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 787 bool Volatile, unsigned Alignment, 788 QualType Ty, 789 llvm::MDNode *TBAAInfo) { 790 Value = EmitToMemory(Value, Ty); 791 792 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 793 if (Alignment) 794 Store->setAlignment(Alignment); 795 if (TBAAInfo) 796 CGM.DecorateInstruction(Store, TBAAInfo); 797 } 798 799 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) { 800 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 801 lvalue.getAlignment(), lvalue.getType(), 802 lvalue.getTBAAInfo()); 803 } 804 805 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 806 /// method emits the address of the lvalue, then loads the result as an rvalue, 807 /// returning the rvalue. 808 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) { 809 if (LV.isObjCWeak()) { 810 // load of a __weak object. 811 llvm::Value *AddrWeakObj = LV.getAddress(); 812 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 813 AddrWeakObj)); 814 } 815 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) 816 return RValue::get(EmitARCLoadWeak(LV.getAddress())); 817 818 if (LV.isSimple()) { 819 assert(!LV.getType()->isFunctionType()); 820 821 // Everything needs a load. 822 return RValue::get(EmitLoadOfScalar(LV)); 823 } 824 825 if (LV.isVectorElt()) { 826 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 827 LV.isVolatileQualified()); 828 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 829 "vecext")); 830 } 831 832 // If this is a reference to a subset of the elements of a vector, either 833 // shuffle the input or extract/insert them as appropriate. 834 if (LV.isExtVectorElt()) 835 return EmitLoadOfExtVectorElementLValue(LV); 836 837 assert(LV.isBitField() && "Unknown LValue type!"); 838 return EmitLoadOfBitfieldLValue(LV); 839 } 840 841 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { 842 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 843 844 // Get the output type. 845 llvm::Type *ResLTy = ConvertType(LV.getType()); 846 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 847 848 // Compute the result as an OR of all of the individual component accesses. 849 llvm::Value *Res = 0; 850 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 851 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 852 853 // Get the field pointer. 854 llvm::Value *Ptr = LV.getBitFieldBaseAddr(); 855 856 // Only offset by the field index if used, so that incoming values are not 857 // required to be structures. 858 if (AI.FieldIndex) 859 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 860 861 // Offset by the byte offset, if used. 862 if (!AI.FieldByteOffset.isZero()) { 863 Ptr = EmitCastToVoidPtr(Ptr); 864 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 865 "bf.field.offs"); 866 } 867 868 // Cast to the access type. 869 llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 870 AI.AccessWidth, 871 CGM.getContext().getTargetAddressSpace(LV.getType())); 872 Ptr = Builder.CreateBitCast(Ptr, PTy); 873 874 // Perform the load. 875 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified()); 876 if (!AI.AccessAlignment.isZero()) 877 Load->setAlignment(AI.AccessAlignment.getQuantity()); 878 879 // Shift out unused low bits and mask out unused high bits. 880 llvm::Value *Val = Load; 881 if (AI.FieldBitStart) 882 Val = Builder.CreateLShr(Load, AI.FieldBitStart); 883 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth, 884 AI.TargetBitWidth), 885 "bf.clear"); 886 887 // Extend or truncate to the target size. 888 if (AI.AccessWidth < ResSizeInBits) 889 Val = Builder.CreateZExt(Val, ResLTy); 890 else if (AI.AccessWidth > ResSizeInBits) 891 Val = Builder.CreateTrunc(Val, ResLTy); 892 893 // Shift into place, and OR into the result. 894 if (AI.TargetBitOffset) 895 Val = Builder.CreateShl(Val, AI.TargetBitOffset); 896 Res = Res ? Builder.CreateOr(Res, Val) : Val; 897 } 898 899 // If the bit-field is signed, perform the sign-extension. 900 // 901 // FIXME: This can easily be folded into the load of the high bits, which 902 // could also eliminate the mask of high bits in some situations. 903 if (Info.isSigned()) { 904 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 905 if (ExtraBits) 906 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits), 907 ExtraBits, "bf.val.sext"); 908 } 909 910 return RValue::get(Res); 911 } 912 913 // If this is a reference to a subset of the elements of a vector, create an 914 // appropriate shufflevector. 915 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 916 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 917 LV.isVolatileQualified()); 918 919 const llvm::Constant *Elts = LV.getExtVectorElts(); 920 921 // If the result of the expression is a non-vector type, we must be extracting 922 // a single element. Just codegen as an extractelement. 923 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 924 if (!ExprVT) { 925 unsigned InIdx = getAccessedFieldNo(0, Elts); 926 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 927 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 928 } 929 930 // Always use shuffle vector to try to retain the original program structure 931 unsigned NumResultElts = ExprVT->getNumElements(); 932 933 SmallVector<llvm::Constant*, 4> Mask; 934 for (unsigned i = 0; i != NumResultElts; ++i) { 935 unsigned InIdx = getAccessedFieldNo(i, Elts); 936 Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx)); 937 } 938 939 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 940 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 941 MaskV); 942 return RValue::get(Vec); 943 } 944 945 946 947 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 948 /// lvalue, where both are guaranteed to the have the same type, and that type 949 /// is 'Ty'. 950 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) { 951 if (!Dst.isSimple()) { 952 if (Dst.isVectorElt()) { 953 // Read/modify/write the vector, inserting the new element. 954 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 955 Dst.isVolatileQualified()); 956 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 957 Dst.getVectorIdx(), "vecins"); 958 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 959 return; 960 } 961 962 // If this is an update of extended vector elements, insert them as 963 // appropriate. 964 if (Dst.isExtVectorElt()) 965 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 966 967 assert(Dst.isBitField() && "Unknown LValue type"); 968 return EmitStoreThroughBitfieldLValue(Src, Dst); 969 } 970 971 // There's special magic for assigning into an ARC-qualified l-value. 972 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 973 switch (Lifetime) { 974 case Qualifiers::OCL_None: 975 llvm_unreachable("present but none"); 976 977 case Qualifiers::OCL_ExplicitNone: 978 // nothing special 979 break; 980 981 case Qualifiers::OCL_Strong: 982 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 983 return; 984 985 case Qualifiers::OCL_Weak: 986 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 987 return; 988 989 case Qualifiers::OCL_Autoreleasing: 990 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 991 Src.getScalarVal())); 992 // fall into the normal path 993 break; 994 } 995 } 996 997 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 998 // load of a __weak object. 999 llvm::Value *LvalueDst = Dst.getAddress(); 1000 llvm::Value *src = Src.getScalarVal(); 1001 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1002 return; 1003 } 1004 1005 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1006 // load of a __strong object. 1007 llvm::Value *LvalueDst = Dst.getAddress(); 1008 llvm::Value *src = Src.getScalarVal(); 1009 if (Dst.isObjCIvar()) { 1010 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1011 llvm::Type *ResultType = ConvertType(getContext().LongTy); 1012 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 1013 llvm::Value *dst = RHS; 1014 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1015 llvm::Value *LHS = 1016 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 1017 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 1018 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 1019 BytesBetween); 1020 } else if (Dst.isGlobalObjCRef()) { 1021 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 1022 Dst.isThreadLocalRef()); 1023 } 1024 else 1025 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 1026 return; 1027 } 1028 1029 assert(Src.isScalar() && "Can't emit an agg store with this method"); 1030 EmitStoreOfScalar(Src.getScalarVal(), Dst); 1031 } 1032 1033 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 1034 llvm::Value **Result) { 1035 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 1036 1037 // Get the output type. 1038 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 1039 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 1040 1041 // Get the source value, truncated to the width of the bit-field. 1042 llvm::Value *SrcVal = Src.getScalarVal(); 1043 1044 if (Dst.getType()->isBooleanType()) 1045 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false); 1046 1047 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits, 1048 Info.getSize()), 1049 "bf.value"); 1050 1051 // Return the new value of the bit-field, if requested. 1052 if (Result) { 1053 // Cast back to the proper type for result. 1054 llvm::Type *SrcTy = Src.getScalarVal()->getType(); 1055 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false, 1056 "bf.reload.val"); 1057 1058 // Sign extend if necessary. 1059 if (Info.isSigned()) { 1060 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 1061 if (ExtraBits) 1062 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits), 1063 ExtraBits, "bf.reload.sext"); 1064 } 1065 1066 *Result = ReloadVal; 1067 } 1068 1069 // Iterate over the components, writing each piece to memory. 1070 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1071 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1072 1073 // Get the field pointer. 1074 llvm::Value *Ptr = Dst.getBitFieldBaseAddr(); 1075 unsigned addressSpace = 1076 cast<llvm::PointerType>(Ptr->getType())->getAddressSpace(); 1077 1078 // Only offset by the field index if used, so that incoming values are not 1079 // required to be structures. 1080 if (AI.FieldIndex) 1081 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 1082 1083 // Offset by the byte offset, if used. 1084 if (!AI.FieldByteOffset.isZero()) { 1085 Ptr = EmitCastToVoidPtr(Ptr); 1086 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 1087 "bf.field.offs"); 1088 } 1089 1090 // Cast to the access type. 1091 llvm::Type *AccessLTy = 1092 llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth); 1093 1094 llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace); 1095 Ptr = Builder.CreateBitCast(Ptr, PTy); 1096 1097 // Extract the piece of the bit-field value to write in this access, limited 1098 // to the values that are part of this access. 1099 llvm::Value *Val = SrcVal; 1100 if (AI.TargetBitOffset) 1101 Val = Builder.CreateLShr(Val, AI.TargetBitOffset); 1102 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits, 1103 AI.TargetBitWidth)); 1104 1105 // Extend or truncate to the access size. 1106 if (ResSizeInBits < AI.AccessWidth) 1107 Val = Builder.CreateZExt(Val, AccessLTy); 1108 else if (ResSizeInBits > AI.AccessWidth) 1109 Val = Builder.CreateTrunc(Val, AccessLTy); 1110 1111 // Shift into the position in memory. 1112 if (AI.FieldBitStart) 1113 Val = Builder.CreateShl(Val, AI.FieldBitStart); 1114 1115 // If necessary, load and OR in bits that are outside of the bit-field. 1116 if (AI.TargetBitWidth != AI.AccessWidth) { 1117 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified()); 1118 if (!AI.AccessAlignment.isZero()) 1119 Load->setAlignment(AI.AccessAlignment.getQuantity()); 1120 1121 // Compute the mask for zeroing the bits that are part of the bit-field. 1122 llvm::APInt InvMask = 1123 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart, 1124 AI.FieldBitStart + AI.TargetBitWidth); 1125 1126 // Apply the mask and OR in to the value to write. 1127 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val); 1128 } 1129 1130 // Write the value. 1131 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr, 1132 Dst.isVolatileQualified()); 1133 if (!AI.AccessAlignment.isZero()) 1134 Store->setAlignment(AI.AccessAlignment.getQuantity()); 1135 } 1136 } 1137 1138 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1139 LValue Dst) { 1140 // This access turns into a read/modify/write of the vector. Load the input 1141 // value now. 1142 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 1143 Dst.isVolatileQualified()); 1144 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1145 1146 llvm::Value *SrcVal = Src.getScalarVal(); 1147 1148 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 1149 unsigned NumSrcElts = VTy->getNumElements(); 1150 unsigned NumDstElts = 1151 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1152 if (NumDstElts == NumSrcElts) { 1153 // Use shuffle vector is the src and destination are the same number of 1154 // elements and restore the vector mask since it is on the side it will be 1155 // stored. 1156 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1157 for (unsigned i = 0; i != NumSrcElts; ++i) { 1158 unsigned InIdx = getAccessedFieldNo(i, Elts); 1159 Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i); 1160 } 1161 1162 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1163 Vec = Builder.CreateShuffleVector(SrcVal, 1164 llvm::UndefValue::get(Vec->getType()), 1165 MaskV); 1166 } else if (NumDstElts > NumSrcElts) { 1167 // Extended the source vector to the same length and then shuffle it 1168 // into the destination. 1169 // FIXME: since we're shuffling with undef, can we just use the indices 1170 // into that? This could be simpler. 1171 SmallVector<llvm::Constant*, 4> ExtMask; 1172 unsigned i; 1173 for (i = 0; i != NumSrcElts; ++i) 1174 ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1175 for (; i != NumDstElts; ++i) 1176 ExtMask.push_back(llvm::UndefValue::get(Int32Ty)); 1177 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 1178 llvm::Value *ExtSrcVal = 1179 Builder.CreateShuffleVector(SrcVal, 1180 llvm::UndefValue::get(SrcVal->getType()), 1181 ExtMaskV); 1182 // build identity 1183 SmallVector<llvm::Constant*, 4> Mask; 1184 for (unsigned i = 0; i != NumDstElts; ++i) 1185 Mask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1186 1187 // modify when what gets shuffled in 1188 for (unsigned i = 0; i != NumSrcElts; ++i) { 1189 unsigned Idx = getAccessedFieldNo(i, Elts); 1190 Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts); 1191 } 1192 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1193 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 1194 } else { 1195 // We should never shorten the vector 1196 llvm_unreachable("unexpected shorten vector length"); 1197 } 1198 } else { 1199 // If the Src is a scalar (not a vector) it must be updating one element. 1200 unsigned InIdx = getAccessedFieldNo(0, Elts); 1201 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1202 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 1203 } 1204 1205 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 1206 } 1207 1208 // setObjCGCLValueClass - sets class of he lvalue for the purpose of 1209 // generating write-barries API. It is currently a global, ivar, 1210 // or neither. 1211 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1212 LValue &LV, 1213 bool IsMemberAccess=false) { 1214 if (Ctx.getLangOptions().getGC() == LangOptions::NonGC) 1215 return; 1216 1217 if (isa<ObjCIvarRefExpr>(E)) { 1218 QualType ExpTy = E->getType(); 1219 if (IsMemberAccess && ExpTy->isPointerType()) { 1220 // If ivar is a structure pointer, assigning to field of 1221 // this struct follows gcc's behavior and makes it a non-ivar 1222 // writer-barrier conservatively. 1223 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1224 if (ExpTy->isRecordType()) { 1225 LV.setObjCIvar(false); 1226 return; 1227 } 1228 } 1229 LV.setObjCIvar(true); 1230 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1231 LV.setBaseIvarExp(Exp->getBase()); 1232 LV.setObjCArray(E->getType()->isArrayType()); 1233 return; 1234 } 1235 1236 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1237 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1238 if (VD->hasGlobalStorage()) { 1239 LV.setGlobalObjCRef(true); 1240 LV.setThreadLocalRef(VD->isThreadSpecified()); 1241 } 1242 } 1243 LV.setObjCArray(E->getType()->isArrayType()); 1244 return; 1245 } 1246 1247 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1248 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1249 return; 1250 } 1251 1252 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1253 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1254 if (LV.isObjCIvar()) { 1255 // If cast is to a structure pointer, follow gcc's behavior and make it 1256 // a non-ivar write-barrier. 1257 QualType ExpTy = E->getType(); 1258 if (ExpTy->isPointerType()) 1259 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1260 if (ExpTy->isRecordType()) 1261 LV.setObjCIvar(false); 1262 } 1263 return; 1264 } 1265 1266 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) { 1267 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 1268 return; 1269 } 1270 1271 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1272 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1273 return; 1274 } 1275 1276 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1277 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1278 return; 1279 } 1280 1281 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 1282 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1283 return; 1284 } 1285 1286 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1287 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1288 if (LV.isObjCIvar() && !LV.isObjCArray()) 1289 // Using array syntax to assigning to what an ivar points to is not 1290 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1291 LV.setObjCIvar(false); 1292 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1293 // Using array syntax to assigning to what global points to is not 1294 // same as assigning to the global itself. {id *G;} G[i] = 0; 1295 LV.setGlobalObjCRef(false); 1296 return; 1297 } 1298 1299 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1300 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 1301 // We don't know if member is an 'ivar', but this flag is looked at 1302 // only in the context of LV.isObjCIvar(). 1303 LV.setObjCArray(E->getType()->isArrayType()); 1304 return; 1305 } 1306 } 1307 1308 static llvm::Value * 1309 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 1310 llvm::Value *V, llvm::Type *IRType, 1311 StringRef Name = StringRef()) { 1312 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1313 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 1314 } 1315 1316 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1317 const Expr *E, const VarDecl *VD) { 1318 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1319 "Var decl must have external storage or be a file var decl!"); 1320 1321 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1322 if (VD->getType()->isReferenceType()) 1323 V = CGF.Builder.CreateLoad(V); 1324 1325 V = EmitBitCastOfLValueToProperType(CGF, V, 1326 CGF.getTypes().ConvertTypeForMem(E->getType())); 1327 1328 unsigned Alignment = CGF.getContext().getDeclAlign(VD).getQuantity(); 1329 LValue LV = CGF.MakeAddrLValue(V, E->getType(), Alignment); 1330 setObjCGCLValueClass(CGF.getContext(), E, LV); 1331 return LV; 1332 } 1333 1334 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1335 const Expr *E, const FunctionDecl *FD) { 1336 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); 1337 if (!FD->hasPrototype()) { 1338 if (const FunctionProtoType *Proto = 1339 FD->getType()->getAs<FunctionProtoType>()) { 1340 // Ugly case: for a K&R-style definition, the type of the definition 1341 // isn't the same as the type of a use. Correct for this with a 1342 // bitcast. 1343 QualType NoProtoType = 1344 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1345 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1346 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); 1347 } 1348 } 1349 unsigned Alignment = CGF.getContext().getDeclAlign(FD).getQuantity(); 1350 return CGF.MakeAddrLValue(V, E->getType(), Alignment); 1351 } 1352 1353 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1354 const NamedDecl *ND = E->getDecl(); 1355 unsigned Alignment = getContext().getDeclAlign(ND).getQuantity(); 1356 1357 if (ND->hasAttr<WeakRefAttr>()) { 1358 const ValueDecl *VD = cast<ValueDecl>(ND); 1359 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1360 return MakeAddrLValue(Aliasee, E->getType(), Alignment); 1361 } 1362 1363 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1364 1365 // Check if this is a global variable. 1366 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1367 return EmitGlobalVarDeclLValue(*this, E, VD); 1368 1369 bool NonGCable = VD->hasLocalStorage() && 1370 !VD->getType()->isReferenceType() && 1371 !VD->hasAttr<BlocksAttr>(); 1372 1373 llvm::Value *V = LocalDeclMap[VD]; 1374 if (!V && VD->isStaticLocal()) 1375 V = CGM.getStaticLocalDeclAddress(VD); 1376 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1377 1378 if (VD->hasAttr<BlocksAttr>()) 1379 V = BuildBlockByrefAddress(V, VD); 1380 1381 if (VD->getType()->isReferenceType()) 1382 V = Builder.CreateLoad(V); 1383 1384 V = EmitBitCastOfLValueToProperType(*this, V, 1385 getTypes().ConvertTypeForMem(E->getType())); 1386 1387 LValue LV = MakeAddrLValue(V, E->getType(), Alignment); 1388 if (NonGCable) { 1389 LV.getQuals().removeObjCGCAttr(); 1390 LV.setNonGC(true); 1391 } 1392 setObjCGCLValueClass(getContext(), E, LV); 1393 return LV; 1394 } 1395 1396 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) 1397 return EmitFunctionDeclLValue(*this, E, fn); 1398 1399 llvm_unreachable("Unhandled DeclRefExpr"); 1400 1401 // an invalid LValue, but the assert will 1402 // ensure that this point is never reached. 1403 return LValue(); 1404 } 1405 1406 LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 1407 unsigned Alignment = 1408 getContext().getDeclAlign(E->getDecl()).getQuantity(); 1409 return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment); 1410 } 1411 1412 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1413 // __extension__ doesn't affect lvalue-ness. 1414 if (E->getOpcode() == UO_Extension) 1415 return EmitLValue(E->getSubExpr()); 1416 1417 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1418 switch (E->getOpcode()) { 1419 default: llvm_unreachable("Unknown unary operator lvalue!"); 1420 case UO_Deref: { 1421 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1422 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1423 1424 LValue LV = MakeAddrLValue(EmitScalarExpr(E->getSubExpr()), T); 1425 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 1426 1427 // We should not generate __weak write barrier on indirect reference 1428 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1429 // But, we continue to generate __strong write barrier on indirect write 1430 // into a pointer to object. 1431 if (getContext().getLangOptions().ObjC1 && 1432 getContext().getLangOptions().getGC() != LangOptions::NonGC && 1433 LV.isObjCWeak()) 1434 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1435 return LV; 1436 } 1437 case UO_Real: 1438 case UO_Imag: { 1439 LValue LV = EmitLValue(E->getSubExpr()); 1440 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 1441 llvm::Value *Addr = LV.getAddress(); 1442 1443 // real and imag are valid on scalars. This is a faster way of 1444 // testing that. 1445 if (!cast<llvm::PointerType>(Addr->getType()) 1446 ->getElementType()->isStructTy()) { 1447 assert(E->getSubExpr()->getType()->isArithmeticType()); 1448 return LV; 1449 } 1450 1451 assert(E->getSubExpr()->getType()->isAnyComplexType()); 1452 1453 unsigned Idx = E->getOpcode() == UO_Imag; 1454 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(), 1455 Idx, "idx"), 1456 ExprTy); 1457 } 1458 case UO_PreInc: 1459 case UO_PreDec: { 1460 LValue LV = EmitLValue(E->getSubExpr()); 1461 bool isInc = E->getOpcode() == UO_PreInc; 1462 1463 if (E->getType()->isAnyComplexType()) 1464 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1465 else 1466 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1467 return LV; 1468 } 1469 } 1470 } 1471 1472 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1473 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 1474 E->getType()); 1475 } 1476 1477 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1478 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1479 E->getType()); 1480 } 1481 1482 1483 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1484 switch (E->getIdentType()) { 1485 default: 1486 return EmitUnsupportedLValue(E, "predefined expression"); 1487 1488 case PredefinedExpr::Func: 1489 case PredefinedExpr::Function: 1490 case PredefinedExpr::PrettyFunction: { 1491 unsigned Type = E->getIdentType(); 1492 std::string GlobalVarName; 1493 1494 switch (Type) { 1495 default: llvm_unreachable("Invalid type"); 1496 case PredefinedExpr::Func: 1497 GlobalVarName = "__func__."; 1498 break; 1499 case PredefinedExpr::Function: 1500 GlobalVarName = "__FUNCTION__."; 1501 break; 1502 case PredefinedExpr::PrettyFunction: 1503 GlobalVarName = "__PRETTY_FUNCTION__."; 1504 break; 1505 } 1506 1507 StringRef FnName = CurFn->getName(); 1508 if (FnName.startswith("\01")) 1509 FnName = FnName.substr(1); 1510 GlobalVarName += FnName; 1511 1512 const Decl *CurDecl = CurCodeDecl; 1513 if (CurDecl == 0) 1514 CurDecl = getContext().getTranslationUnitDecl(); 1515 1516 std::string FunctionName = 1517 (isa<BlockDecl>(CurDecl) 1518 ? FnName.str() 1519 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl)); 1520 1521 llvm::Constant *C = 1522 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 1523 return MakeAddrLValue(C, E->getType()); 1524 } 1525 } 1526 } 1527 1528 llvm::BasicBlock *CodeGenFunction::getTrapBB() { 1529 const CodeGenOptions &GCO = CGM.getCodeGenOpts(); 1530 1531 // If we are not optimzing, don't collapse all calls to trap in the function 1532 // to the same call, that way, in the debugger they can see which operation 1533 // did in fact fail. If we are optimizing, we collapse all calls to trap down 1534 // to just one per function to save on codesize. 1535 if (GCO.OptimizationLevel && TrapBB) 1536 return TrapBB; 1537 1538 llvm::BasicBlock *Cont = 0; 1539 if (HaveInsertPoint()) { 1540 Cont = createBasicBlock("cont"); 1541 EmitBranch(Cont); 1542 } 1543 TrapBB = createBasicBlock("trap"); 1544 EmitBlock(TrapBB); 1545 1546 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); 1547 llvm::CallInst *TrapCall = Builder.CreateCall(F); 1548 TrapCall->setDoesNotReturn(); 1549 TrapCall->setDoesNotThrow(); 1550 Builder.CreateUnreachable(); 1551 1552 if (Cont) 1553 EmitBlock(Cont); 1554 return TrapBB; 1555 } 1556 1557 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 1558 /// array to pointer, return the array subexpression. 1559 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 1560 // If this isn't just an array->pointer decay, bail out. 1561 const CastExpr *CE = dyn_cast<CastExpr>(E); 1562 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay) 1563 return 0; 1564 1565 // If this is a decay from variable width array, bail out. 1566 const Expr *SubExpr = CE->getSubExpr(); 1567 if (SubExpr->getType()->isVariableArrayType()) 1568 return 0; 1569 1570 return SubExpr; 1571 } 1572 1573 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1574 // The index must always be an integer, which is not an aggregate. Emit it. 1575 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 1576 QualType IdxTy = E->getIdx()->getType(); 1577 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 1578 1579 // If the base is a vector type, then we are forming a vector element lvalue 1580 // with this subscript. 1581 if (E->getBase()->getType()->isVectorType()) { 1582 // Emit the vector as an lvalue to get its address. 1583 LValue LHS = EmitLValue(E->getBase()); 1584 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 1585 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx"); 1586 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 1587 E->getBase()->getType()); 1588 } 1589 1590 // Extend or truncate the index type to 32 or 64-bits. 1591 if (Idx->getType() != IntPtrTy) 1592 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 1593 1594 // FIXME: As llvm implements the object size checking, this can come out. 1595 if (CatchUndefined) { 1596 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){ 1597 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 1598 if (ICE->getCastKind() == CK_ArrayToPointerDecay) { 1599 if (const ConstantArrayType *CAT 1600 = getContext().getAsConstantArrayType(DRE->getType())) { 1601 llvm::APInt Size = CAT->getSize(); 1602 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1603 Builder.CreateCondBr(Builder.CreateICmpULE(Idx, 1604 llvm::ConstantInt::get(Idx->getType(), Size)), 1605 Cont, getTrapBB()); 1606 EmitBlock(Cont); 1607 } 1608 } 1609 } 1610 } 1611 } 1612 1613 // We know that the pointer points to a type of the correct size, unless the 1614 // size is a VLA or Objective-C interface. 1615 llvm::Value *Address = 0; 1616 unsigned ArrayAlignment = 0; 1617 if (const VariableArrayType *vla = 1618 getContext().getAsVariableArrayType(E->getType())) { 1619 // The base must be a pointer, which is not an aggregate. Emit 1620 // it. It needs to be emitted first in case it's what captures 1621 // the VLA bounds. 1622 Address = EmitScalarExpr(E->getBase()); 1623 1624 // The element count here is the total number of non-VLA elements. 1625 llvm::Value *numElements = getVLASize(vla).first; 1626 1627 // Effectively, the multiply by the VLA size is part of the GEP. 1628 // GEP indexes are signed, and scaling an index isn't permitted to 1629 // signed-overflow, so we use the same semantics for our explicit 1630 // multiply. We suppress this if overflow is not undefined behavior. 1631 if (getLangOptions().isSignedOverflowDefined()) { 1632 Idx = Builder.CreateMul(Idx, numElements); 1633 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 1634 } else { 1635 Idx = Builder.CreateNSWMul(Idx, numElements); 1636 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); 1637 } 1638 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 1639 // Indexing over an interface, as in "NSString *P; P[4];" 1640 llvm::Value *InterfaceSize = 1641 llvm::ConstantInt::get(Idx->getType(), 1642 getContext().getTypeSizeInChars(OIT).getQuantity()); 1643 1644 Idx = Builder.CreateMul(Idx, InterfaceSize); 1645 1646 // The base must be a pointer, which is not an aggregate. Emit it. 1647 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1648 Address = EmitCastToVoidPtr(Base); 1649 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 1650 Address = Builder.CreateBitCast(Address, Base->getType()); 1651 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 1652 // If this is A[i] where A is an array, the frontend will have decayed the 1653 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 1654 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 1655 // "gep x, i" here. Emit one "gep A, 0, i". 1656 assert(Array->getType()->isArrayType() && 1657 "Array to pointer decay must have array source type!"); 1658 LValue ArrayLV = EmitLValue(Array); 1659 llvm::Value *ArrayPtr = ArrayLV.getAddress(); 1660 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 1661 llvm::Value *Args[] = { Zero, Idx }; 1662 1663 // Propagate the alignment from the array itself to the result. 1664 ArrayAlignment = ArrayLV.getAlignment(); 1665 1666 if (getContext().getLangOptions().isSignedOverflowDefined()) 1667 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); 1668 else 1669 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); 1670 } else { 1671 // The base must be a pointer, which is not an aggregate. Emit it. 1672 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1673 if (getContext().getLangOptions().isSignedOverflowDefined()) 1674 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 1675 else 1676 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1677 } 1678 1679 QualType T = E->getBase()->getType()->getPointeeType(); 1680 assert(!T.isNull() && 1681 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1682 1683 // Limit the alignment to that of the result type. 1684 if (ArrayAlignment) { 1685 unsigned Align = getContext().getTypeAlignInChars(T).getQuantity(); 1686 ArrayAlignment = std::min(Align, ArrayAlignment); 1687 } 1688 1689 LValue LV = MakeAddrLValue(Address, T, ArrayAlignment); 1690 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace()); 1691 1692 if (getContext().getLangOptions().ObjC1 && 1693 getContext().getLangOptions().getGC() != LangOptions::NonGC) { 1694 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1695 setObjCGCLValueClass(getContext(), E, LV); 1696 } 1697 return LV; 1698 } 1699 1700 static 1701 llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 1702 SmallVector<unsigned, 4> &Elts) { 1703 SmallVector<llvm::Constant*, 4> CElts; 1704 1705 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1706 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1707 CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i])); 1708 1709 return llvm::ConstantVector::get(CElts); 1710 } 1711 1712 LValue CodeGenFunction:: 1713 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1714 // Emit the base vector as an l-value. 1715 LValue Base; 1716 1717 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1718 if (E->isArrow()) { 1719 // If it is a pointer to a vector, emit the address and form an lvalue with 1720 // it. 1721 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1722 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1723 Base = MakeAddrLValue(Ptr, PT->getPointeeType()); 1724 Base.getQuals().removeObjCGCAttr(); 1725 } else if (E->getBase()->isGLValue()) { 1726 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 1727 // emit the base as an lvalue. 1728 assert(E->getBase()->getType()->isVectorType()); 1729 Base = EmitLValue(E->getBase()); 1730 } else { 1731 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 1732 assert(E->getBase()->getType()->isVectorType() && 1733 "Result must be a vector"); 1734 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 1735 1736 // Store the vector to memory (because LValue wants an address). 1737 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 1738 Builder.CreateStore(Vec, VecMem); 1739 Base = MakeAddrLValue(VecMem, E->getBase()->getType()); 1740 } 1741 1742 QualType type = 1743 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 1744 1745 // Encode the element access list into a vector of unsigned indices. 1746 SmallVector<unsigned, 4> Indices; 1747 E->getEncodedElementAccess(Indices); 1748 1749 if (Base.isSimple()) { 1750 llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices); 1751 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type); 1752 } 1753 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1754 1755 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1756 SmallVector<llvm::Constant *, 4> CElts; 1757 1758 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1759 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1760 CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0)); 1761 else 1762 CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i]))); 1763 } 1764 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 1765 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type); 1766 } 1767 1768 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1769 bool isNonGC = false; 1770 Expr *BaseExpr = E->getBase(); 1771 llvm::Value *BaseValue = NULL; 1772 Qualifiers BaseQuals; 1773 1774 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1775 if (E->isArrow()) { 1776 BaseValue = EmitScalarExpr(BaseExpr); 1777 const PointerType *PTy = 1778 BaseExpr->getType()->getAs<PointerType>(); 1779 BaseQuals = PTy->getPointeeType().getQualifiers(); 1780 } else { 1781 LValue BaseLV = EmitLValue(BaseExpr); 1782 if (BaseLV.isNonGC()) 1783 isNonGC = true; 1784 // FIXME: this isn't right for bitfields. 1785 BaseValue = BaseLV.getAddress(); 1786 QualType BaseTy = BaseExpr->getType(); 1787 BaseQuals = BaseTy.getQualifiers(); 1788 } 1789 1790 NamedDecl *ND = E->getMemberDecl(); 1791 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 1792 LValue LV = EmitLValueForField(BaseValue, Field, 1793 BaseQuals.getCVRQualifiers()); 1794 LV.setNonGC(isNonGC); 1795 setObjCGCLValueClass(getContext(), E, LV); 1796 return LV; 1797 } 1798 1799 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 1800 return EmitGlobalVarDeclLValue(*this, E, VD); 1801 1802 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1803 return EmitFunctionDeclLValue(*this, E, FD); 1804 1805 llvm_unreachable("Unhandled member declaration!"); 1806 } 1807 1808 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue, 1809 const FieldDecl *Field, 1810 unsigned CVRQualifiers) { 1811 const CGRecordLayout &RL = 1812 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1813 const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field); 1814 return LValue::MakeBitfield(BaseValue, Info, 1815 Field->getType().withCVRQualifiers(CVRQualifiers)); 1816 } 1817 1818 /// EmitLValueForAnonRecordField - Given that the field is a member of 1819 /// an anonymous struct or union buried inside a record, and given 1820 /// that the base value is a pointer to the enclosing record, derive 1821 /// an lvalue for the ultimate field. 1822 LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue, 1823 const IndirectFieldDecl *Field, 1824 unsigned CVRQualifiers) { 1825 IndirectFieldDecl::chain_iterator I = Field->chain_begin(), 1826 IEnd = Field->chain_end(); 1827 while (true) { 1828 LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I), 1829 CVRQualifiers); 1830 if (++I == IEnd) return LV; 1831 1832 assert(LV.isSimple()); 1833 BaseValue = LV.getAddress(); 1834 CVRQualifiers |= LV.getVRQualifiers(); 1835 } 1836 } 1837 1838 LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr, 1839 const FieldDecl *field, 1840 unsigned cvr) { 1841 if (field->isBitField()) 1842 return EmitLValueForBitfield(baseAddr, field, cvr); 1843 1844 const RecordDecl *rec = field->getParent(); 1845 QualType type = field->getType(); 1846 1847 bool mayAlias = rec->hasAttr<MayAliasAttr>(); 1848 1849 llvm::Value *addr = baseAddr; 1850 if (rec->isUnion()) { 1851 // For unions, there is no pointer adjustment. 1852 assert(!type->isReferenceType() && "union has reference member"); 1853 } else { 1854 // For structs, we GEP to the field that the record layout suggests. 1855 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 1856 addr = Builder.CreateStructGEP(addr, idx, field->getName()); 1857 1858 // If this is a reference field, load the reference right now. 1859 if (const ReferenceType *refType = type->getAs<ReferenceType>()) { 1860 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); 1861 if (cvr & Qualifiers::Volatile) load->setVolatile(true); 1862 1863 if (CGM.shouldUseTBAA()) { 1864 llvm::MDNode *tbaa; 1865 if (mayAlias) 1866 tbaa = CGM.getTBAAInfo(getContext().CharTy); 1867 else 1868 tbaa = CGM.getTBAAInfo(type); 1869 CGM.DecorateInstruction(load, tbaa); 1870 } 1871 1872 addr = load; 1873 mayAlias = false; 1874 type = refType->getPointeeType(); 1875 cvr = 0; // qualifiers don't recursively apply to referencee 1876 } 1877 } 1878 1879 // Make sure that the address is pointing to the right type. This is critical 1880 // for both unions and structs. A union needs a bitcast, a struct element 1881 // will need a bitcast if the LLVM type laid out doesn't match the desired 1882 // type. 1883 addr = EmitBitCastOfLValueToProperType(*this, addr, 1884 CGM.getTypes().ConvertTypeForMem(type), 1885 field->getName()); 1886 1887 if (field->hasAttr<AnnotateAttr>()) 1888 addr = EmitFieldAnnotations(field, addr); 1889 1890 unsigned alignment = getContext().getDeclAlign(field).getQuantity(); 1891 LValue LV = MakeAddrLValue(addr, type, alignment); 1892 LV.getQuals().addCVRQualifiers(cvr); 1893 1894 // __weak attribute on a field is ignored. 1895 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 1896 LV.getQuals().removeObjCGCAttr(); 1897 1898 // Fields of may_alias structs act like 'char' for TBAA purposes. 1899 // FIXME: this should get propagated down through anonymous structs 1900 // and unions. 1901 if (mayAlias && LV.getTBAAInfo()) 1902 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); 1903 1904 return LV; 1905 } 1906 1907 LValue 1908 CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue, 1909 const FieldDecl *Field, 1910 unsigned CVRQualifiers) { 1911 QualType FieldType = Field->getType(); 1912 1913 if (!FieldType->isReferenceType()) 1914 return EmitLValueForField(BaseValue, Field, CVRQualifiers); 1915 1916 const CGRecordLayout &RL = 1917 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1918 unsigned idx = RL.getLLVMFieldNo(Field); 1919 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx); 1920 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 1921 1922 1923 // Make sure that the address is pointing to the right type. This is critical 1924 // for both unions and structs. A union needs a bitcast, a struct element 1925 // will need a bitcast if the LLVM type laid out doesn't match the desired 1926 // type. 1927 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 1928 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1929 V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS)); 1930 1931 unsigned Alignment = getContext().getDeclAlign(Field).getQuantity(); 1932 return MakeAddrLValue(V, FieldType, Alignment); 1933 } 1934 1935 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 1936 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 1937 const Expr *InitExpr = E->getInitializer(); 1938 LValue Result = MakeAddrLValue(DeclPtr, E->getType()); 1939 1940 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 1941 /*Init*/ true); 1942 1943 return Result; 1944 } 1945 1946 LValue CodeGenFunction:: 1947 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 1948 if (!expr->isGLValue()) { 1949 // ?: here should be an aggregate. 1950 assert((hasAggregateLLVMType(expr->getType()) && 1951 !expr->getType()->isAnyComplexType()) && 1952 "Unexpected conditional operator!"); 1953 return EmitAggExprToLValue(expr); 1954 } 1955 1956 const Expr *condExpr = expr->getCond(); 1957 bool CondExprBool; 1958 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 1959 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 1960 if (!CondExprBool) std::swap(live, dead); 1961 1962 if (!ContainsLabel(dead)) 1963 return EmitLValue(live); 1964 } 1965 1966 OpaqueValueMapping binding(*this, expr); 1967 1968 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 1969 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 1970 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 1971 1972 ConditionalEvaluation eval(*this); 1973 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock); 1974 1975 // Any temporaries created here are conditional. 1976 EmitBlock(lhsBlock); 1977 eval.begin(*this); 1978 LValue lhs = EmitLValue(expr->getTrueExpr()); 1979 eval.end(*this); 1980 1981 if (!lhs.isSimple()) 1982 return EmitUnsupportedLValue(expr, "conditional operator"); 1983 1984 lhsBlock = Builder.GetInsertBlock(); 1985 Builder.CreateBr(contBlock); 1986 1987 // Any temporaries created here are conditional. 1988 EmitBlock(rhsBlock); 1989 eval.begin(*this); 1990 LValue rhs = EmitLValue(expr->getFalseExpr()); 1991 eval.end(*this); 1992 if (!rhs.isSimple()) 1993 return EmitUnsupportedLValue(expr, "conditional operator"); 1994 rhsBlock = Builder.GetInsertBlock(); 1995 1996 EmitBlock(contBlock); 1997 1998 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2, 1999 "cond-lvalue"); 2000 phi->addIncoming(lhs.getAddress(), lhsBlock); 2001 phi->addIncoming(rhs.getAddress(), rhsBlock); 2002 return MakeAddrLValue(phi, expr->getType()); 2003 } 2004 2005 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast. 2006 /// If the cast is a dynamic_cast, we can have the usual lvalue result, 2007 /// otherwise if a cast is needed by the code generator in an lvalue context, 2008 /// then it must mean that we need the address of an aggregate in order to 2009 /// access one of its fields. This can happen for all the reasons that casts 2010 /// are permitted with aggregate result, including noop aggregate casts, and 2011 /// cast from scalar to union. 2012 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 2013 switch (E->getCastKind()) { 2014 case CK_ToVoid: 2015 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 2016 2017 case CK_Dependent: 2018 llvm_unreachable("dependent cast kind in IR gen!"); 2019 2020 case CK_NoOp: 2021 case CK_LValueToRValue: 2022 if (!E->getSubExpr()->Classify(getContext()).isPRValue() 2023 || E->getType()->isRecordType()) 2024 return EmitLValue(E->getSubExpr()); 2025 // Fall through to synthesize a temporary. 2026 2027 case CK_BitCast: 2028 case CK_ArrayToPointerDecay: 2029 case CK_FunctionToPointerDecay: 2030 case CK_NullToMemberPointer: 2031 case CK_NullToPointer: 2032 case CK_IntegralToPointer: 2033 case CK_PointerToIntegral: 2034 case CK_PointerToBoolean: 2035 case CK_VectorSplat: 2036 case CK_IntegralCast: 2037 case CK_IntegralToBoolean: 2038 case CK_IntegralToFloating: 2039 case CK_FloatingToIntegral: 2040 case CK_FloatingToBoolean: 2041 case CK_FloatingCast: 2042 case CK_FloatingRealToComplex: 2043 case CK_FloatingComplexToReal: 2044 case CK_FloatingComplexToBoolean: 2045 case CK_FloatingComplexCast: 2046 case CK_FloatingComplexToIntegralComplex: 2047 case CK_IntegralRealToComplex: 2048 case CK_IntegralComplexToReal: 2049 case CK_IntegralComplexToBoolean: 2050 case CK_IntegralComplexCast: 2051 case CK_IntegralComplexToFloatingComplex: 2052 case CK_DerivedToBaseMemberPointer: 2053 case CK_BaseToDerivedMemberPointer: 2054 case CK_MemberPointerToBoolean: 2055 case CK_AnyPointerToBlockPointerCast: 2056 case CK_ARCProduceObject: 2057 case CK_ARCConsumeObject: 2058 case CK_ARCReclaimReturnedObject: 2059 case CK_ARCExtendBlockObject: { 2060 // These casts only produce lvalues when we're binding a reference to a 2061 // temporary realized from a (converted) pure rvalue. Emit the expression 2062 // as a value, copy it into a temporary, and return an lvalue referring to 2063 // that temporary. 2064 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp"); 2065 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false); 2066 return MakeAddrLValue(V, E->getType()); 2067 } 2068 2069 case CK_Dynamic: { 2070 LValue LV = EmitLValue(E->getSubExpr()); 2071 llvm::Value *V = LV.getAddress(); 2072 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 2073 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 2074 } 2075 2076 case CK_ConstructorConversion: 2077 case CK_UserDefinedConversion: 2078 case CK_CPointerToObjCPointerCast: 2079 case CK_BlockPointerToObjCPointerCast: 2080 return EmitLValue(E->getSubExpr()); 2081 2082 case CK_UncheckedDerivedToBase: 2083 case CK_DerivedToBase: { 2084 const RecordType *DerivedClassTy = 2085 E->getSubExpr()->getType()->getAs<RecordType>(); 2086 CXXRecordDecl *DerivedClassDecl = 2087 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2088 2089 LValue LV = EmitLValue(E->getSubExpr()); 2090 llvm::Value *This = LV.getAddress(); 2091 2092 // Perform the derived-to-base conversion 2093 llvm::Value *Base = 2094 GetAddressOfBaseClass(This, DerivedClassDecl, 2095 E->path_begin(), E->path_end(), 2096 /*NullCheckValue=*/false); 2097 2098 return MakeAddrLValue(Base, E->getType()); 2099 } 2100 case CK_ToUnion: 2101 return EmitAggExprToLValue(E); 2102 case CK_BaseToDerived: { 2103 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 2104 CXXRecordDecl *DerivedClassDecl = 2105 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2106 2107 LValue LV = EmitLValue(E->getSubExpr()); 2108 2109 // Perform the base-to-derived conversion 2110 llvm::Value *Derived = 2111 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 2112 E->path_begin(), E->path_end(), 2113 /*NullCheckValue=*/false); 2114 2115 return MakeAddrLValue(Derived, E->getType()); 2116 } 2117 case CK_LValueBitCast: { 2118 // This must be a reinterpret_cast (or c-style equivalent). 2119 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 2120 2121 LValue LV = EmitLValue(E->getSubExpr()); 2122 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2123 ConvertType(CE->getTypeAsWritten())); 2124 return MakeAddrLValue(V, E->getType()); 2125 } 2126 case CK_ObjCObjectLValueCast: { 2127 LValue LV = EmitLValue(E->getSubExpr()); 2128 QualType ToType = getContext().getLValueReferenceType(E->getType()); 2129 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2130 ConvertType(ToType)); 2131 return MakeAddrLValue(V, E->getType()); 2132 } 2133 } 2134 2135 llvm_unreachable("Unhandled lvalue cast kind?"); 2136 } 2137 2138 LValue CodeGenFunction::EmitNullInitializationLValue( 2139 const CXXScalarValueInitExpr *E) { 2140 QualType Ty = E->getType(); 2141 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty); 2142 EmitNullInitialization(LV.getAddress(), Ty); 2143 return LV; 2144 } 2145 2146 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 2147 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 2148 return getOpaqueLValueMapping(e); 2149 } 2150 2151 LValue CodeGenFunction::EmitMaterializeTemporaryExpr( 2152 const MaterializeTemporaryExpr *E) { 2153 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 2154 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2155 } 2156 2157 2158 //===--------------------------------------------------------------------===// 2159 // Expression Emission 2160 //===--------------------------------------------------------------------===// 2161 2162 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 2163 ReturnValueSlot ReturnValue) { 2164 if (CGDebugInfo *DI = getDebugInfo()) 2165 DI->EmitLocation(Builder, E->getLocStart()); 2166 2167 // Builtins never have block type. 2168 if (E->getCallee()->getType()->isBlockPointerType()) 2169 return EmitBlockCallExpr(E, ReturnValue); 2170 2171 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 2172 return EmitCXXMemberCallExpr(CE, ReturnValue); 2173 2174 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E)) 2175 return EmitCUDAKernelCallExpr(CE, ReturnValue); 2176 2177 const Decl *TargetDecl = E->getCalleeDecl(); 2178 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 2179 if (unsigned builtinID = FD->getBuiltinID()) 2180 return EmitBuiltinExpr(FD, builtinID, E); 2181 } 2182 2183 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 2184 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 2185 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 2186 2187 if (const CXXPseudoDestructorExpr *PseudoDtor 2188 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 2189 QualType DestroyedType = PseudoDtor->getDestroyedType(); 2190 if (getContext().getLangOptions().ObjCAutoRefCount && 2191 DestroyedType->isObjCLifetimeType() && 2192 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong || 2193 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) { 2194 // Automatic Reference Counting: 2195 // If the pseudo-expression names a retainable object with weak or 2196 // strong lifetime, the object shall be released. 2197 Expr *BaseExpr = PseudoDtor->getBase(); 2198 llvm::Value *BaseValue = NULL; 2199 Qualifiers BaseQuals; 2200 2201 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2202 if (PseudoDtor->isArrow()) { 2203 BaseValue = EmitScalarExpr(BaseExpr); 2204 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 2205 BaseQuals = PTy->getPointeeType().getQualifiers(); 2206 } else { 2207 LValue BaseLV = EmitLValue(BaseExpr); 2208 BaseValue = BaseLV.getAddress(); 2209 QualType BaseTy = BaseExpr->getType(); 2210 BaseQuals = BaseTy.getQualifiers(); 2211 } 2212 2213 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) { 2214 case Qualifiers::OCL_None: 2215 case Qualifiers::OCL_ExplicitNone: 2216 case Qualifiers::OCL_Autoreleasing: 2217 break; 2218 2219 case Qualifiers::OCL_Strong: 2220 EmitARCRelease(Builder.CreateLoad(BaseValue, 2221 PseudoDtor->getDestroyedType().isVolatileQualified()), 2222 /*precise*/ true); 2223 break; 2224 2225 case Qualifiers::OCL_Weak: 2226 EmitARCDestroyWeak(BaseValue); 2227 break; 2228 } 2229 } else { 2230 // C++ [expr.pseudo]p1: 2231 // The result shall only be used as the operand for the function call 2232 // operator (), and the result of such a call has type void. The only 2233 // effect is the evaluation of the postfix-expression before the dot or 2234 // arrow. 2235 EmitScalarExpr(E->getCallee()); 2236 } 2237 2238 return RValue::get(0); 2239 } 2240 2241 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 2242 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 2243 E->arg_begin(), E->arg_end(), TargetDecl); 2244 } 2245 2246 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 2247 // Comma expressions just emit their LHS then their RHS as an l-value. 2248 if (E->getOpcode() == BO_Comma) { 2249 EmitIgnoredExpr(E->getLHS()); 2250 EnsureInsertPoint(); 2251 return EmitLValue(E->getRHS()); 2252 } 2253 2254 if (E->getOpcode() == BO_PtrMemD || 2255 E->getOpcode() == BO_PtrMemI) 2256 return EmitPointerToDataMemberBinaryExpr(E); 2257 2258 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 2259 2260 // Note that in all of these cases, __block variables need the RHS 2261 // evaluated first just in case the variable gets moved by the RHS. 2262 2263 if (!hasAggregateLLVMType(E->getType())) { 2264 switch (E->getLHS()->getType().getObjCLifetime()) { 2265 case Qualifiers::OCL_Strong: 2266 return EmitARCStoreStrong(E, /*ignored*/ false).first; 2267 2268 case Qualifiers::OCL_Autoreleasing: 2269 return EmitARCStoreAutoreleasing(E).first; 2270 2271 // No reason to do any of these differently. 2272 case Qualifiers::OCL_None: 2273 case Qualifiers::OCL_ExplicitNone: 2274 case Qualifiers::OCL_Weak: 2275 break; 2276 } 2277 2278 RValue RV = EmitAnyExpr(E->getRHS()); 2279 LValue LV = EmitLValue(E->getLHS()); 2280 EmitStoreThroughLValue(RV, LV); 2281 return LV; 2282 } 2283 2284 if (E->getType()->isAnyComplexType()) 2285 return EmitComplexAssignmentLValue(E); 2286 2287 return EmitAggExprToLValue(E); 2288 } 2289 2290 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 2291 RValue RV = EmitCallExpr(E); 2292 2293 if (!RV.isScalar()) 2294 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2295 2296 assert(E->getCallReturnType()->isReferenceType() && 2297 "Can't have a scalar return unless the return type is a " 2298 "reference type!"); 2299 2300 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2301 } 2302 2303 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 2304 // FIXME: This shouldn't require another copy. 2305 return EmitAggExprToLValue(E); 2306 } 2307 2308 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 2309 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 2310 && "binding l-value to type which needs a temporary"); 2311 AggValueSlot Slot = CreateAggTemp(E->getType()); 2312 EmitCXXConstructExpr(E, Slot); 2313 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2314 } 2315 2316 LValue 2317 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 2318 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 2319 } 2320 2321 LValue 2322 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 2323 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2324 Slot.setExternallyDestructed(); 2325 EmitAggExpr(E->getSubExpr(), Slot); 2326 EmitCXXTemporary(E->getTemporary(), Slot.getAddr()); 2327 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2328 } 2329 2330 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 2331 RValue RV = EmitObjCMessageExpr(E); 2332 2333 if (!RV.isScalar()) 2334 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2335 2336 assert(E->getMethodDecl()->getResultType()->isReferenceType() && 2337 "Can't have a scalar return unless the return type is a " 2338 "reference type!"); 2339 2340 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2341 } 2342 2343 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 2344 llvm::Value *V = 2345 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true); 2346 return MakeAddrLValue(V, E->getType()); 2347 } 2348 2349 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 2350 const ObjCIvarDecl *Ivar) { 2351 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 2352 } 2353 2354 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 2355 llvm::Value *BaseValue, 2356 const ObjCIvarDecl *Ivar, 2357 unsigned CVRQualifiers) { 2358 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 2359 Ivar, CVRQualifiers); 2360 } 2361 2362 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 2363 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 2364 llvm::Value *BaseValue = 0; 2365 const Expr *BaseExpr = E->getBase(); 2366 Qualifiers BaseQuals; 2367 QualType ObjectTy; 2368 if (E->isArrow()) { 2369 BaseValue = EmitScalarExpr(BaseExpr); 2370 ObjectTy = BaseExpr->getType()->getPointeeType(); 2371 BaseQuals = ObjectTy.getQualifiers(); 2372 } else { 2373 LValue BaseLV = EmitLValue(BaseExpr); 2374 // FIXME: this isn't right for bitfields. 2375 BaseValue = BaseLV.getAddress(); 2376 ObjectTy = BaseExpr->getType(); 2377 BaseQuals = ObjectTy.getQualifiers(); 2378 } 2379 2380 LValue LV = 2381 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 2382 BaseQuals.getCVRQualifiers()); 2383 setObjCGCLValueClass(getContext(), E, LV); 2384 return LV; 2385 } 2386 2387 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 2388 // Can only get l-value for message expression returning aggregate type 2389 RValue RV = EmitAnyExprToTemp(E); 2390 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2391 } 2392 2393 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 2394 ReturnValueSlot ReturnValue, 2395 CallExpr::const_arg_iterator ArgBeg, 2396 CallExpr::const_arg_iterator ArgEnd, 2397 const Decl *TargetDecl) { 2398 // Get the actual function type. The callee type will always be a pointer to 2399 // function type or a block pointer type. 2400 assert(CalleeType->isFunctionPointerType() && 2401 "Call must have function pointer type!"); 2402 2403 CalleeType = getContext().getCanonicalType(CalleeType); 2404 2405 const FunctionType *FnType 2406 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 2407 2408 CallArgList Args; 2409 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 2410 2411 const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FnType); 2412 2413 // C99 6.5.2.2p6: 2414 // If the expression that denotes the called function has a type 2415 // that does not include a prototype, [the default argument 2416 // promotions are performed]. If the number of arguments does not 2417 // equal the number of parameters, the behavior is undefined. If 2418 // the function is defined with a type that includes a prototype, 2419 // and either the prototype ends with an ellipsis (, ...) or the 2420 // types of the arguments after promotion are not compatible with 2421 // the types of the parameters, the behavior is undefined. If the 2422 // function is defined with a type that does not include a 2423 // prototype, and the types of the arguments after promotion are 2424 // not compatible with those of the parameters after promotion, 2425 // the behavior is undefined [except in some trivial cases]. 2426 // That is, in the general case, we should assume that a call 2427 // through an unprototyped function type works like a *non-variadic* 2428 // call. The way we make this work is to cast to the exact type 2429 // of the promoted arguments. 2430 if (isa<FunctionNoProtoType>(FnType) && 2431 !getTargetHooks().isNoProtoCallVariadic(FnType->getCallConv())) { 2432 assert(cast<llvm::FunctionType>(Callee->getType()->getContainedType(0)) 2433 ->isVarArg()); 2434 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo, false); 2435 CalleeTy = CalleeTy->getPointerTo(); 2436 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); 2437 } 2438 2439 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl); 2440 } 2441 2442 LValue CodeGenFunction:: 2443 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 2444 llvm::Value *BaseV; 2445 if (E->getOpcode() == BO_PtrMemI) 2446 BaseV = EmitScalarExpr(E->getLHS()); 2447 else 2448 BaseV = EmitLValue(E->getLHS()).getAddress(); 2449 2450 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 2451 2452 const MemberPointerType *MPT 2453 = E->getRHS()->getType()->getAs<MemberPointerType>(); 2454 2455 llvm::Value *AddV = 2456 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT); 2457 2458 return MakeAddrLValue(AddV, MPT->getPointeeType()); 2459 } 2460 2461 static void 2462 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, 2463 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, 2464 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { 2465 if (E->isCmpXChg()) { 2466 // Note that cmpxchg only supports specifying one ordering and 2467 // doesn't support weak cmpxchg, at least at the moment. 2468 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2469 LoadVal1->setAlignment(Align); 2470 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2); 2471 LoadVal2->setAlignment(Align); 2472 llvm::AtomicCmpXchgInst *CXI = 2473 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order); 2474 CXI->setVolatile(E->isVolatile()); 2475 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1); 2476 StoreVal1->setAlignment(Align); 2477 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1); 2478 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); 2479 return; 2480 } 2481 2482 if (E->getOp() == AtomicExpr::Load) { 2483 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); 2484 Load->setAtomic(Order); 2485 Load->setAlignment(Size); 2486 Load->setVolatile(E->isVolatile()); 2487 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); 2488 StoreDest->setAlignment(Align); 2489 return; 2490 } 2491 2492 if (E->getOp() == AtomicExpr::Store) { 2493 assert(!Dest && "Store does not return a value"); 2494 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2495 LoadVal1->setAlignment(Align); 2496 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); 2497 Store->setAtomic(Order); 2498 Store->setAlignment(Size); 2499 Store->setVolatile(E->isVolatile()); 2500 return; 2501 } 2502 2503 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; 2504 switch (E->getOp()) { 2505 case AtomicExpr::CmpXchgWeak: 2506 case AtomicExpr::CmpXchgStrong: 2507 case AtomicExpr::Store: 2508 case AtomicExpr::Load: assert(0 && "Already handled!"); 2509 case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break; 2510 case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break; 2511 case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break; 2512 case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break; 2513 case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break; 2514 case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break; 2515 } 2516 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2517 LoadVal1->setAlignment(Align); 2518 llvm::AtomicRMWInst *RMWI = 2519 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); 2520 RMWI->setVolatile(E->isVolatile()); 2521 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest); 2522 StoreDest->setAlignment(Align); 2523 } 2524 2525 // This function emits any expression (scalar, complex, or aggregate) 2526 // into a temporary alloca. 2527 static llvm::Value * 2528 EmitValToTemp(CodeGenFunction &CGF, Expr *E) { 2529 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); 2530 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), 2531 /*Init*/ true); 2532 return DeclPtr; 2533 } 2534 2535 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty, 2536 llvm::Value *Dest) { 2537 if (Ty->isAnyComplexType()) 2538 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false)); 2539 if (CGF.hasAggregateLLVMType(Ty)) 2540 return RValue::getAggregate(Dest); 2541 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty))); 2542 } 2543 2544 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { 2545 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 2546 QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType(); 2547 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); 2548 uint64_t Size = sizeChars.getQuantity(); 2549 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); 2550 unsigned Align = alignChars.getQuantity(); 2551 unsigned MaxInlineWidth = 2552 getContext().getTargetInfo().getMaxAtomicInlineWidth(); 2553 bool UseLibcall = (Size != Align || Size > MaxInlineWidth); 2554 2555 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; 2556 Ptr = EmitScalarExpr(E->getPtr()); 2557 Order = EmitScalarExpr(E->getOrder()); 2558 if (E->isCmpXChg()) { 2559 Val1 = EmitScalarExpr(E->getVal1()); 2560 Val2 = EmitValToTemp(*this, E->getVal2()); 2561 OrderFail = EmitScalarExpr(E->getOrderFail()); 2562 (void)OrderFail; // OrderFail is unused at the moment 2563 } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) && 2564 MemTy->isPointerType()) { 2565 // For pointers, we're required to do a bit of math: adding 1 to an int* 2566 // is not the same as adding 1 to a uintptr_t. 2567 QualType Val1Ty = E->getVal1()->getType(); 2568 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); 2569 CharUnits PointeeIncAmt = 2570 getContext().getTypeSizeInChars(MemTy->getPointeeType()); 2571 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); 2572 Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); 2573 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); 2574 } else if (E->getOp() != AtomicExpr::Load) { 2575 Val1 = EmitValToTemp(*this, E->getVal1()); 2576 } 2577 2578 if (E->getOp() != AtomicExpr::Store && !Dest) 2579 Dest = CreateMemTemp(E->getType(), ".atomicdst"); 2580 2581 if (UseLibcall) { 2582 // FIXME: Finalize what the libcalls are actually supposed to look like. 2583 // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . 2584 return EmitUnsupportedRValue(E, "atomic library call"); 2585 } 2586 #if 0 2587 if (UseLibcall) { 2588 const char* LibCallName; 2589 switch (E->getOp()) { 2590 case AtomicExpr::CmpXchgWeak: 2591 LibCallName = "__atomic_compare_exchange_generic"; break; 2592 case AtomicExpr::CmpXchgStrong: 2593 LibCallName = "__atomic_compare_exchange_generic"; break; 2594 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break; 2595 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break; 2596 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break; 2597 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break; 2598 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break; 2599 case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break; 2600 case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break; 2601 case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break; 2602 } 2603 llvm::SmallVector<QualType, 4> Params; 2604 CallArgList Args; 2605 QualType RetTy = getContext().VoidTy; 2606 if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg()) 2607 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 2608 getContext().VoidPtrTy); 2609 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), 2610 getContext().VoidPtrTy); 2611 if (E->getOp() != AtomicExpr::Load) 2612 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 2613 getContext().VoidPtrTy); 2614 if (E->isCmpXChg()) { 2615 Args.add(RValue::get(EmitCastToVoidPtr(Val2)), 2616 getContext().VoidPtrTy); 2617 RetTy = getContext().IntTy; 2618 } 2619 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), 2620 getContext().getSizeType()); 2621 const CGFunctionInfo &FuncInfo = 2622 CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo()); 2623 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false); 2624 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 2625 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 2626 if (E->isCmpXChg()) 2627 return Res; 2628 if (E->getOp() == AtomicExpr::Store) 2629 return RValue::get(0); 2630 return ConvertTempToRValue(*this, E->getType(), Dest); 2631 } 2632 #endif 2633 llvm::Type *IPtrTy = 2634 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo(); 2635 llvm::Value *OrigDest = Dest; 2636 Ptr = Builder.CreateBitCast(Ptr, IPtrTy); 2637 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy); 2638 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy); 2639 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy); 2640 2641 if (isa<llvm::ConstantInt>(Order)) { 2642 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 2643 switch (ord) { 2644 case 0: // memory_order_relaxed 2645 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2646 llvm::Monotonic); 2647 break; 2648 case 1: // memory_order_consume 2649 case 2: // memory_order_acquire 2650 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2651 llvm::Acquire); 2652 break; 2653 case 3: // memory_order_release 2654 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2655 llvm::Release); 2656 break; 2657 case 4: // memory_order_acq_rel 2658 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2659 llvm::AcquireRelease); 2660 break; 2661 case 5: // memory_order_seq_cst 2662 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2663 llvm::SequentiallyConsistent); 2664 break; 2665 default: // invalid order 2666 // We should not ever get here normally, but it's hard to 2667 // enforce that in general. 2668 break; 2669 } 2670 if (E->getOp() == AtomicExpr::Store) 2671 return RValue::get(0); 2672 return ConvertTempToRValue(*this, E->getType(), OrigDest); 2673 } 2674 2675 // Long case, when Order isn't obviously constant. 2676 2677 // Create all the relevant BB's 2678 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0, 2679 *AcqRelBB = 0, *SeqCstBB = 0; 2680 MonotonicBB = createBasicBlock("monotonic", CurFn); 2681 if (E->getOp() != AtomicExpr::Store) 2682 AcquireBB = createBasicBlock("acquire", CurFn); 2683 if (E->getOp() != AtomicExpr::Load) 2684 ReleaseBB = createBasicBlock("release", CurFn); 2685 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) 2686 AcqRelBB = createBasicBlock("acqrel", CurFn); 2687 SeqCstBB = createBasicBlock("seqcst", CurFn); 2688 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 2689 2690 // Create the switch for the split 2691 // MonotonicBB is arbitrarily chosen as the default case; in practice, this 2692 // doesn't matter unless someone is crazy enough to use something that 2693 // doesn't fold to a constant for the ordering. 2694 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 2695 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); 2696 2697 // Emit all the different atomics 2698 Builder.SetInsertPoint(MonotonicBB); 2699 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2700 llvm::Monotonic); 2701 Builder.CreateBr(ContBB); 2702 if (E->getOp() != AtomicExpr::Store) { 2703 Builder.SetInsertPoint(AcquireBB); 2704 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2705 llvm::Acquire); 2706 Builder.CreateBr(ContBB); 2707 SI->addCase(Builder.getInt32(1), AcquireBB); 2708 SI->addCase(Builder.getInt32(2), AcquireBB); 2709 } 2710 if (E->getOp() != AtomicExpr::Load) { 2711 Builder.SetInsertPoint(ReleaseBB); 2712 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2713 llvm::Release); 2714 Builder.CreateBr(ContBB); 2715 SI->addCase(Builder.getInt32(3), ReleaseBB); 2716 } 2717 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) { 2718 Builder.SetInsertPoint(AcqRelBB); 2719 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2720 llvm::AcquireRelease); 2721 Builder.CreateBr(ContBB); 2722 SI->addCase(Builder.getInt32(4), AcqRelBB); 2723 } 2724 Builder.SetInsertPoint(SeqCstBB); 2725 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2726 llvm::SequentiallyConsistent); 2727 Builder.CreateBr(ContBB); 2728 SI->addCase(Builder.getInt32(5), SeqCstBB); 2729 2730 // Cleanup and return 2731 Builder.SetInsertPoint(ContBB); 2732 if (E->getOp() == AtomicExpr::Store) 2733 return RValue::get(0); 2734 return ConvertTempToRValue(*this, E->getType(), OrigDest); 2735 } 2736 2737 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, unsigned AccuracyN, 2738 unsigned AccuracyD) { 2739 assert(Val->getType()->isFPOrFPVectorTy()); 2740 if (!AccuracyN || !isa<llvm::Instruction>(Val)) 2741 return; 2742 2743 llvm::Value *Vals[2]; 2744 Vals[0] = llvm::ConstantInt::get(Int32Ty, AccuracyN); 2745 Vals[1] = llvm::ConstantInt::get(Int32Ty, AccuracyD); 2746 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), Vals); 2747 2748 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpaccuracy, 2749 Node); 2750 } 2751 2752 namespace { 2753 struct LValueOrRValue { 2754 LValue LV; 2755 RValue RV; 2756 }; 2757 } 2758 2759 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 2760 const PseudoObjectExpr *E, 2761 bool forLValue, 2762 AggValueSlot slot) { 2763 llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 2764 2765 // Find the result expression, if any. 2766 const Expr *resultExpr = E->getResultExpr(); 2767 LValueOrRValue result; 2768 2769 for (PseudoObjectExpr::const_semantics_iterator 2770 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 2771 const Expr *semantic = *i; 2772 2773 // If this semantic expression is an opaque value, bind it 2774 // to the result of its source expression. 2775 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 2776 2777 // If this is the result expression, we may need to evaluate 2778 // directly into the slot. 2779 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 2780 OVMA opaqueData; 2781 if (ov == resultExpr && ov->isRValue() && !forLValue && 2782 CodeGenFunction::hasAggregateLLVMType(ov->getType()) && 2783 !ov->getType()->isAnyComplexType()) { 2784 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 2785 2786 LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType()); 2787 opaqueData = OVMA::bind(CGF, ov, LV); 2788 result.RV = slot.asRValue(); 2789 2790 // Otherwise, emit as normal. 2791 } else { 2792 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 2793 2794 // If this is the result, also evaluate the result now. 2795 if (ov == resultExpr) { 2796 if (forLValue) 2797 result.LV = CGF.EmitLValue(ov); 2798 else 2799 result.RV = CGF.EmitAnyExpr(ov, slot); 2800 } 2801 } 2802 2803 opaques.push_back(opaqueData); 2804 2805 // Otherwise, if the expression is the result, evaluate it 2806 // and remember the result. 2807 } else if (semantic == resultExpr) { 2808 if (forLValue) 2809 result.LV = CGF.EmitLValue(semantic); 2810 else 2811 result.RV = CGF.EmitAnyExpr(semantic, slot); 2812 2813 // Otherwise, evaluate the expression in an ignored context. 2814 } else { 2815 CGF.EmitIgnoredExpr(semantic); 2816 } 2817 } 2818 2819 // Unbind all the opaques now. 2820 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 2821 opaques[i].unbind(CGF); 2822 2823 return result; 2824 } 2825 2826 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 2827 AggValueSlot slot) { 2828 return emitPseudoObjectExpr(*this, E, false, slot).RV; 2829 } 2830 2831 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 2832 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 2833 } 2834