1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGCall.h" 17 #include "CGCXXABI.h" 18 #include "CGDebugInfo.h" 19 #include "CGRecordLayout.h" 20 #include "CGObjCRuntime.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/ASTContext.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Frontend/CodeGenOptions.h" 25 #include "llvm/Intrinsics.h" 26 #include "llvm/LLVMContext.h" 27 #include "llvm/Target/TargetData.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 //===--------------------------------------------------------------------===// 32 // Miscellaneous Helper Methods 33 //===--------------------------------------------------------------------===// 34 35 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 36 unsigned addressSpace = 37 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 38 39 llvm::PointerType *destType = Int8PtrTy; 40 if (addressSpace) 41 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 42 43 if (value->getType() == destType) return value; 44 return Builder.CreateBitCast(value, destType); 45 } 46 47 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 48 /// block. 49 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 50 const Twine &Name) { 51 if (!Builder.isNamePreserving()) 52 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 53 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 54 } 55 56 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 57 llvm::Value *Init) { 58 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 59 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 60 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 61 } 62 63 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, 64 const Twine &Name) { 65 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 66 // FIXME: Should we prefer the preferred type alignment here? 67 CharUnits Align = getContext().getTypeAlignInChars(Ty); 68 Alloc->setAlignment(Align.getQuantity()); 69 return Alloc; 70 } 71 72 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, 73 const Twine &Name) { 74 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 75 // FIXME: Should we prefer the preferred type alignment here? 76 CharUnits Align = getContext().getTypeAlignInChars(Ty); 77 Alloc->setAlignment(Align.getQuantity()); 78 return Alloc; 79 } 80 81 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 82 /// expression and compare the result against zero, returning an Int1Ty value. 83 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 84 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 85 llvm::Value *MemPtr = EmitScalarExpr(E); 86 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 87 } 88 89 QualType BoolTy = getContext().BoolTy; 90 if (!E->getType()->isAnyComplexType()) 91 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 92 93 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 94 } 95 96 /// EmitIgnoredExpr - Emit code to compute the specified expression, 97 /// ignoring the result. 98 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 99 if (E->isRValue()) 100 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 101 102 // Just emit it as an l-value and drop the result. 103 EmitLValue(E); 104 } 105 106 /// EmitAnyExpr - Emit code to compute the specified expression which 107 /// can have any type. The result is returned as an RValue struct. 108 /// If this is an aggregate expression, AggSlot indicates where the 109 /// result should be returned. 110 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot, 111 bool IgnoreResult) { 112 if (!hasAggregateLLVMType(E->getType())) 113 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 114 else if (E->getType()->isAnyComplexType()) 115 return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult)); 116 117 EmitAggExpr(E, AggSlot, IgnoreResult); 118 return AggSlot.asRValue(); 119 } 120 121 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 122 /// always be accessible even if no aggregate location is provided. 123 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 124 AggValueSlot AggSlot = AggValueSlot::ignored(); 125 126 if (hasAggregateLLVMType(E->getType()) && 127 !E->getType()->isAnyComplexType()) 128 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 129 return EmitAnyExpr(E, AggSlot); 130 } 131 132 /// EmitAnyExprToMem - Evaluate an expression into a given memory 133 /// location. 134 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 135 llvm::Value *Location, 136 Qualifiers Quals, 137 bool IsInit) { 138 // FIXME: This function should take an LValue as an argument. 139 if (E->getType()->isAnyComplexType()) { 140 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile()); 141 } else if (hasAggregateLLVMType(E->getType())) { 142 CharUnits Alignment = getContext().getTypeAlignInChars(E->getType()); 143 EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals, 144 AggValueSlot::IsDestructed_t(IsInit), 145 AggValueSlot::DoesNotNeedGCBarriers, 146 AggValueSlot::IsAliased_t(!IsInit))); 147 } else { 148 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 149 LValue LV = MakeAddrLValue(Location, E->getType()); 150 EmitStoreThroughLValue(RV, LV); 151 } 152 } 153 154 namespace { 155 /// \brief An adjustment to be made to the temporary created when emitting a 156 /// reference binding, which accesses a particular subobject of that temporary. 157 struct SubobjectAdjustment { 158 enum { DerivedToBaseAdjustment, FieldAdjustment } Kind; 159 160 union { 161 struct { 162 const CastExpr *BasePath; 163 const CXXRecordDecl *DerivedClass; 164 } DerivedToBase; 165 166 FieldDecl *Field; 167 }; 168 169 SubobjectAdjustment(const CastExpr *BasePath, 170 const CXXRecordDecl *DerivedClass) 171 : Kind(DerivedToBaseAdjustment) { 172 DerivedToBase.BasePath = BasePath; 173 DerivedToBase.DerivedClass = DerivedClass; 174 } 175 176 SubobjectAdjustment(FieldDecl *Field) 177 : Kind(FieldAdjustment) { 178 this->Field = Field; 179 } 180 }; 181 } 182 183 static llvm::Value * 184 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type, 185 const NamedDecl *InitializedDecl) { 186 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 187 if (VD->hasGlobalStorage()) { 188 llvm::SmallString<256> Name; 189 llvm::raw_svector_ostream Out(Name); 190 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out); 191 Out.flush(); 192 193 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type); 194 195 // Create the reference temporary. 196 llvm::GlobalValue *RefTemp = 197 new llvm::GlobalVariable(CGF.CGM.getModule(), 198 RefTempTy, /*isConstant=*/false, 199 llvm::GlobalValue::InternalLinkage, 200 llvm::Constant::getNullValue(RefTempTy), 201 Name.str()); 202 return RefTemp; 203 } 204 } 205 206 return CGF.CreateMemTemp(Type, "ref.tmp"); 207 } 208 209 static llvm::Value * 210 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E, 211 llvm::Value *&ReferenceTemporary, 212 const CXXDestructorDecl *&ReferenceTemporaryDtor, 213 QualType &ObjCARCReferenceLifetimeType, 214 const NamedDecl *InitializedDecl) { 215 // Look through single-element init lists that claim to be lvalues. They're 216 // just syntactic wrappers in this case. 217 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) { 218 if (ILE->getNumInits() == 1 && ILE->isGLValue()) 219 E = ILE->getInit(0); 220 } 221 222 // Look through expressions for materialized temporaries (for now). 223 if (const MaterializeTemporaryExpr *M 224 = dyn_cast<MaterializeTemporaryExpr>(E)) { 225 // Objective-C++ ARC: 226 // If we are binding a reference to a temporary that has ownership, we 227 // need to perform retain/release operations on the temporary. 228 if (CGF.getContext().getLangOptions().ObjCAutoRefCount && 229 E->getType()->isObjCLifetimeType() && 230 (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong || 231 E->getType().getObjCLifetime() == Qualifiers::OCL_Weak || 232 E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing)) 233 ObjCARCReferenceLifetimeType = E->getType(); 234 235 E = M->GetTemporaryExpr(); 236 } 237 238 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) 239 E = DAE->getExpr(); 240 241 if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) { 242 CGF.enterFullExpression(EWC); 243 CodeGenFunction::RunCleanupsScope Scope(CGF); 244 245 return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(), 246 ReferenceTemporary, 247 ReferenceTemporaryDtor, 248 ObjCARCReferenceLifetimeType, 249 InitializedDecl); 250 } 251 252 RValue RV; 253 if (E->isGLValue()) { 254 // Emit the expression as an lvalue. 255 LValue LV = CGF.EmitLValue(E); 256 257 if (LV.isSimple()) 258 return LV.getAddress(); 259 260 // We have to load the lvalue. 261 RV = CGF.EmitLoadOfLValue(LV); 262 } else { 263 if (!ObjCARCReferenceLifetimeType.isNull()) { 264 ReferenceTemporary = CreateReferenceTemporary(CGF, 265 ObjCARCReferenceLifetimeType, 266 InitializedDecl); 267 268 269 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary, 270 ObjCARCReferenceLifetimeType); 271 272 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl), 273 RefTempDst, false); 274 275 bool ExtendsLifeOfTemporary = false; 276 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 277 if (Var->extendsLifetimeOfTemporary()) 278 ExtendsLifeOfTemporary = true; 279 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) { 280 ExtendsLifeOfTemporary = true; 281 } 282 283 if (!ExtendsLifeOfTemporary) { 284 // Since the lifetime of this temporary isn't going to be extended, 285 // we need to clean it up ourselves at the end of the full expression. 286 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 287 case Qualifiers::OCL_None: 288 case Qualifiers::OCL_ExplicitNone: 289 case Qualifiers::OCL_Autoreleasing: 290 break; 291 292 case Qualifiers::OCL_Strong: { 293 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 294 CleanupKind cleanupKind = CGF.getARCCleanupKind(); 295 CGF.pushDestroy(cleanupKind, 296 ReferenceTemporary, 297 ObjCARCReferenceLifetimeType, 298 CodeGenFunction::destroyARCStrongImprecise, 299 cleanupKind & EHCleanup); 300 break; 301 } 302 303 case Qualifiers::OCL_Weak: 304 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 305 CGF.pushDestroy(NormalAndEHCleanup, 306 ReferenceTemporary, 307 ObjCARCReferenceLifetimeType, 308 CodeGenFunction::destroyARCWeak, 309 /*useEHCleanupForArray*/ true); 310 break; 311 } 312 313 ObjCARCReferenceLifetimeType = QualType(); 314 } 315 316 return ReferenceTemporary; 317 } 318 319 SmallVector<SubobjectAdjustment, 2> Adjustments; 320 while (true) { 321 E = E->IgnoreParens(); 322 323 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 324 if ((CE->getCastKind() == CK_DerivedToBase || 325 CE->getCastKind() == CK_UncheckedDerivedToBase) && 326 E->getType()->isRecordType()) { 327 E = CE->getSubExpr(); 328 CXXRecordDecl *Derived 329 = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 330 Adjustments.push_back(SubobjectAdjustment(CE, Derived)); 331 continue; 332 } 333 334 if (CE->getCastKind() == CK_NoOp) { 335 E = CE->getSubExpr(); 336 continue; 337 } 338 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 339 if (!ME->isArrow() && ME->getBase()->isRValue()) { 340 assert(ME->getBase()->getType()->isRecordType()); 341 if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 342 E = ME->getBase(); 343 Adjustments.push_back(SubobjectAdjustment(Field)); 344 continue; 345 } 346 } 347 } 348 349 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E)) 350 if (opaque->getType()->isRecordType()) 351 return CGF.EmitOpaqueValueLValue(opaque).getAddress(); 352 353 // Nothing changed. 354 break; 355 } 356 357 // Create a reference temporary if necessary. 358 AggValueSlot AggSlot = AggValueSlot::ignored(); 359 if (CGF.hasAggregateLLVMType(E->getType()) && 360 !E->getType()->isAnyComplexType()) { 361 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 362 InitializedDecl); 363 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType()); 364 AggValueSlot::IsDestructed_t isDestructed 365 = AggValueSlot::IsDestructed_t(InitializedDecl != 0); 366 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment, 367 Qualifiers(), isDestructed, 368 AggValueSlot::DoesNotNeedGCBarriers, 369 AggValueSlot::IsNotAliased); 370 } 371 372 if (InitializedDecl) { 373 // Get the destructor for the reference temporary. 374 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 375 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 376 if (!ClassDecl->hasTrivialDestructor()) 377 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 378 } 379 } 380 381 RV = CGF.EmitAnyExpr(E, AggSlot); 382 383 // Check if need to perform derived-to-base casts and/or field accesses, to 384 // get from the temporary object we created (and, potentially, for which we 385 // extended the lifetime) to the subobject we're binding the reference to. 386 if (!Adjustments.empty()) { 387 llvm::Value *Object = RV.getAggregateAddr(); 388 for (unsigned I = Adjustments.size(); I != 0; --I) { 389 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 390 switch (Adjustment.Kind) { 391 case SubobjectAdjustment::DerivedToBaseAdjustment: 392 Object = 393 CGF.GetAddressOfBaseClass(Object, 394 Adjustment.DerivedToBase.DerivedClass, 395 Adjustment.DerivedToBase.BasePath->path_begin(), 396 Adjustment.DerivedToBase.BasePath->path_end(), 397 /*NullCheckValue=*/false); 398 break; 399 400 case SubobjectAdjustment::FieldAdjustment: { 401 LValue LV = 402 CGF.EmitLValueForField(Object, Adjustment.Field, 0); 403 if (LV.isSimple()) { 404 Object = LV.getAddress(); 405 break; 406 } 407 408 // For non-simple lvalues, we actually have to create a copy of 409 // the object we're binding to. 410 QualType T = Adjustment.Field->getType().getNonReferenceType() 411 .getUnqualifiedType(); 412 Object = CreateReferenceTemporary(CGF, T, InitializedDecl); 413 LValue TempLV = CGF.MakeAddrLValue(Object, 414 Adjustment.Field->getType()); 415 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV); 416 break; 417 } 418 419 } 420 } 421 422 return Object; 423 } 424 } 425 426 if (RV.isAggregate()) 427 return RV.getAggregateAddr(); 428 429 // Create a temporary variable that we can bind the reference to. 430 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 431 InitializedDecl); 432 433 434 unsigned Alignment = 435 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity(); 436 if (RV.isScalar()) 437 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary, 438 /*Volatile=*/false, Alignment, E->getType()); 439 else 440 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary, 441 /*Volatile=*/false); 442 return ReferenceTemporary; 443 } 444 445 RValue 446 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E, 447 const NamedDecl *InitializedDecl) { 448 llvm::Value *ReferenceTemporary = 0; 449 const CXXDestructorDecl *ReferenceTemporaryDtor = 0; 450 QualType ObjCARCReferenceLifetimeType; 451 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary, 452 ReferenceTemporaryDtor, 453 ObjCARCReferenceLifetimeType, 454 InitializedDecl); 455 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull()) 456 return RValue::get(Value); 457 458 // Make sure to call the destructor for the reference temporary. 459 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl); 460 if (VD && VD->hasGlobalStorage()) { 461 if (ReferenceTemporaryDtor) { 462 llvm::Constant *DtorFn = 463 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete); 464 EmitCXXGlobalDtorRegistration(DtorFn, 465 cast<llvm::Constant>(ReferenceTemporary)); 466 } else { 467 assert(!ObjCARCReferenceLifetimeType.isNull()); 468 // Note: We intentionally do not register a global "destructor" to 469 // release the object. 470 } 471 472 return RValue::get(Value); 473 } 474 475 if (ReferenceTemporaryDtor) 476 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary); 477 else { 478 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 479 case Qualifiers::OCL_None: 480 llvm_unreachable( 481 "Not a reference temporary that needs to be deallocated"); 482 case Qualifiers::OCL_ExplicitNone: 483 case Qualifiers::OCL_Autoreleasing: 484 // Nothing to do. 485 break; 486 487 case Qualifiers::OCL_Strong: { 488 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 489 CleanupKind cleanupKind = getARCCleanupKind(); 490 // This local is a GCC and MSVC compiler workaround. 491 Destroyer *destroyer = precise ? &destroyARCStrongPrecise : 492 &destroyARCStrongImprecise; 493 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType, 494 *destroyer, cleanupKind & EHCleanup); 495 break; 496 } 497 498 case Qualifiers::OCL_Weak: { 499 // This local is a GCC and MSVC compiler workaround. 500 Destroyer *destroyer = &destroyARCWeak; 501 // __weak objects always get EH cleanups; otherwise, exceptions 502 // could cause really nasty crashes instead of mere leaks. 503 pushDestroy(NormalAndEHCleanup, ReferenceTemporary, 504 ObjCARCReferenceLifetimeType, *destroyer, true); 505 break; 506 } 507 } 508 } 509 510 return RValue::get(Value); 511 } 512 513 514 /// getAccessedFieldNo - Given an encoded value and a result number, return the 515 /// input field number being accessed. 516 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 517 const llvm::Constant *Elts) { 518 if (isa<llvm::ConstantAggregateZero>(Elts)) 519 return 0; 520 521 return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); 522 } 523 524 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { 525 if (!CatchUndefined) 526 return; 527 528 // This needs to be to the standard address space. 529 Address = Builder.CreateBitCast(Address, Int8PtrTy); 530 531 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy); 532 533 // In time, people may want to control this and use a 1 here. 534 llvm::Value *Arg = Builder.getFalse(); 535 llvm::Value *C = Builder.CreateCall2(F, Address, Arg); 536 llvm::BasicBlock *Cont = createBasicBlock(); 537 llvm::BasicBlock *Check = createBasicBlock(); 538 llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL); 539 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); 540 541 EmitBlock(Check); 542 Builder.CreateCondBr(Builder.CreateICmpUGE(C, 543 llvm::ConstantInt::get(IntPtrTy, Size)), 544 Cont, getTrapBB()); 545 EmitBlock(Cont); 546 } 547 548 549 CodeGenFunction::ComplexPairTy CodeGenFunction:: 550 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 551 bool isInc, bool isPre) { 552 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 553 LV.isVolatileQualified()); 554 555 llvm::Value *NextVal; 556 if (isa<llvm::IntegerType>(InVal.first->getType())) { 557 uint64_t AmountVal = isInc ? 1 : -1; 558 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 559 560 // Add the inc/dec to the real part. 561 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 562 } else { 563 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 564 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 565 if (!isInc) 566 FVal.changeSign(); 567 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 568 569 // Add the inc/dec to the real part. 570 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 571 } 572 573 ComplexPairTy IncVal(NextVal, InVal.second); 574 575 // Store the updated result through the lvalue. 576 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 577 578 // If this is a postinc, return the value read from memory, otherwise use the 579 // updated value. 580 return isPre ? IncVal : InVal; 581 } 582 583 584 //===----------------------------------------------------------------------===// 585 // LValue Expression Emission 586 //===----------------------------------------------------------------------===// 587 588 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 589 if (Ty->isVoidType()) 590 return RValue::get(0); 591 592 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 593 llvm::Type *EltTy = ConvertType(CTy->getElementType()); 594 llvm::Value *U = llvm::UndefValue::get(EltTy); 595 return RValue::getComplex(std::make_pair(U, U)); 596 } 597 598 // If this is a use of an undefined aggregate type, the aggregate must have an 599 // identifiable address. Just because the contents of the value are undefined 600 // doesn't mean that the address can't be taken and compared. 601 if (hasAggregateLLVMType(Ty)) { 602 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 603 return RValue::getAggregate(DestPtr); 604 } 605 606 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 607 } 608 609 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 610 const char *Name) { 611 ErrorUnsupported(E, Name); 612 return GetUndefRValue(E->getType()); 613 } 614 615 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 616 const char *Name) { 617 ErrorUnsupported(E, Name); 618 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 619 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); 620 } 621 622 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) { 623 LValue LV = EmitLValue(E); 624 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 625 EmitCheck(LV.getAddress(), 626 getContext().getTypeSizeInChars(E->getType()).getQuantity()); 627 return LV; 628 } 629 630 /// EmitLValue - Emit code to compute a designator that specifies the location 631 /// of the expression. 632 /// 633 /// This can return one of two things: a simple address or a bitfield reference. 634 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 635 /// an LLVM pointer type. 636 /// 637 /// If this returns a bitfield reference, nothing about the pointee type of the 638 /// LLVM value is known: For example, it may not be a pointer to an integer. 639 /// 640 /// If this returns a normal address, and if the lvalue's C type is fixed size, 641 /// this method guarantees that the returned pointer type will point to an LLVM 642 /// type of the same size of the lvalue's type. If the lvalue has a variable 643 /// length type, this is not possible. 644 /// 645 LValue CodeGenFunction::EmitLValue(const Expr *E) { 646 switch (E->getStmtClass()) { 647 default: return EmitUnsupportedLValue(E, "l-value expression"); 648 649 case Expr::ObjCPropertyRefExprClass: 650 llvm_unreachable("cannot emit a property reference directly"); 651 652 case Expr::ObjCSelectorExprClass: 653 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 654 case Expr::ObjCIsaExprClass: 655 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 656 case Expr::BinaryOperatorClass: 657 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 658 case Expr::CompoundAssignOperatorClass: 659 if (!E->getType()->isAnyComplexType()) 660 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 661 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 662 case Expr::CallExprClass: 663 case Expr::CXXMemberCallExprClass: 664 case Expr::CXXOperatorCallExprClass: 665 return EmitCallExprLValue(cast<CallExpr>(E)); 666 case Expr::VAArgExprClass: 667 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 668 case Expr::DeclRefExprClass: 669 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 670 case Expr::ParenExprClass: 671 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 672 case Expr::GenericSelectionExprClass: 673 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 674 case Expr::PredefinedExprClass: 675 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 676 case Expr::StringLiteralClass: 677 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 678 case Expr::ObjCEncodeExprClass: 679 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 680 case Expr::PseudoObjectExprClass: 681 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 682 case Expr::InitListExprClass: 683 assert(cast<InitListExpr>(E)->getNumInits() == 1 && 684 "Only single-element init list can be lvalue."); 685 return EmitLValue(cast<InitListExpr>(E)->getInit(0)); 686 687 case Expr::BlockDeclRefExprClass: 688 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 689 690 case Expr::CXXTemporaryObjectExprClass: 691 case Expr::CXXConstructExprClass: 692 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 693 case Expr::CXXBindTemporaryExprClass: 694 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 695 696 case Expr::ExprWithCleanupsClass: { 697 const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E); 698 enterFullExpression(cleanups); 699 RunCleanupsScope Scope(*this); 700 return EmitLValue(cleanups->getSubExpr()); 701 } 702 703 case Expr::CXXScalarValueInitExprClass: 704 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E)); 705 case Expr::CXXDefaultArgExprClass: 706 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 707 case Expr::CXXTypeidExprClass: 708 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 709 710 case Expr::ObjCMessageExprClass: 711 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 712 case Expr::ObjCIvarRefExprClass: 713 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 714 case Expr::StmtExprClass: 715 return EmitStmtExprLValue(cast<StmtExpr>(E)); 716 case Expr::UnaryOperatorClass: 717 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 718 case Expr::ArraySubscriptExprClass: 719 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 720 case Expr::ExtVectorElementExprClass: 721 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 722 case Expr::MemberExprClass: 723 return EmitMemberExpr(cast<MemberExpr>(E)); 724 case Expr::CompoundLiteralExprClass: 725 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 726 case Expr::ConditionalOperatorClass: 727 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 728 case Expr::BinaryConditionalOperatorClass: 729 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 730 case Expr::ChooseExprClass: 731 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 732 case Expr::OpaqueValueExprClass: 733 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 734 case Expr::SubstNonTypeTemplateParmExprClass: 735 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 736 case Expr::ImplicitCastExprClass: 737 case Expr::CStyleCastExprClass: 738 case Expr::CXXFunctionalCastExprClass: 739 case Expr::CXXStaticCastExprClass: 740 case Expr::CXXDynamicCastExprClass: 741 case Expr::CXXReinterpretCastExprClass: 742 case Expr::CXXConstCastExprClass: 743 case Expr::ObjCBridgedCastExprClass: 744 return EmitCastLValue(cast<CastExpr>(E)); 745 746 case Expr::MaterializeTemporaryExprClass: 747 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 748 } 749 } 750 751 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) { 752 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 753 lvalue.getAlignment().getQuantity(), 754 lvalue.getType(), lvalue.getTBAAInfo()); 755 } 756 757 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 758 unsigned Alignment, QualType Ty, 759 llvm::MDNode *TBAAInfo) { 760 llvm::LoadInst *Load = Builder.CreateLoad(Addr); 761 if (Volatile) 762 Load->setVolatile(true); 763 if (Alignment) 764 Load->setAlignment(Alignment); 765 if (TBAAInfo) 766 CGM.DecorateInstruction(Load, TBAAInfo); 767 // If this is an atomic type, all normal reads must be atomic 768 if (Ty->isAtomicType()) 769 Load->setAtomic(llvm::SequentiallyConsistent); 770 771 return EmitFromMemory(Load, Ty); 772 } 773 774 static bool isBooleanUnderlyingType(QualType Ty) { 775 if (const EnumType *ET = dyn_cast<EnumType>(Ty)) 776 return ET->getDecl()->getIntegerType()->isBooleanType(); 777 return false; 778 } 779 780 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 781 // Bool has a different representation in memory than in registers. 782 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) { 783 // This should really always be an i1, but sometimes it's already 784 // an i8, and it's awkward to track those cases down. 785 if (Value->getType()->isIntegerTy(1)) 786 return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool"); 787 assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8"); 788 } 789 790 return Value; 791 } 792 793 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 794 // Bool has a different representation in memory than in registers. 795 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) { 796 assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8"); 797 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 798 } 799 800 return Value; 801 } 802 803 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 804 bool Volatile, unsigned Alignment, 805 QualType Ty, 806 llvm::MDNode *TBAAInfo, 807 bool isInit) { 808 Value = EmitToMemory(Value, Ty); 809 810 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 811 if (Alignment) 812 Store->setAlignment(Alignment); 813 if (TBAAInfo) 814 CGM.DecorateInstruction(Store, TBAAInfo); 815 if (!isInit && Ty->isAtomicType()) 816 Store->setAtomic(llvm::SequentiallyConsistent); 817 } 818 819 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 820 bool isInit) { 821 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 822 lvalue.getAlignment().getQuantity(), lvalue.getType(), 823 lvalue.getTBAAInfo(), isInit); 824 } 825 826 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 827 /// method emits the address of the lvalue, then loads the result as an rvalue, 828 /// returning the rvalue. 829 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) { 830 if (LV.isObjCWeak()) { 831 // load of a __weak object. 832 llvm::Value *AddrWeakObj = LV.getAddress(); 833 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 834 AddrWeakObj)); 835 } 836 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) 837 return RValue::get(EmitARCLoadWeak(LV.getAddress())); 838 839 if (LV.isSimple()) { 840 assert(!LV.getType()->isFunctionType()); 841 842 // Everything needs a load. 843 return RValue::get(EmitLoadOfScalar(LV)); 844 } 845 846 if (LV.isVectorElt()) { 847 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 848 LV.isVolatileQualified()); 849 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 850 "vecext")); 851 } 852 853 // If this is a reference to a subset of the elements of a vector, either 854 // shuffle the input or extract/insert them as appropriate. 855 if (LV.isExtVectorElt()) 856 return EmitLoadOfExtVectorElementLValue(LV); 857 858 assert(LV.isBitField() && "Unknown LValue type!"); 859 return EmitLoadOfBitfieldLValue(LV); 860 } 861 862 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { 863 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 864 865 // Get the output type. 866 llvm::Type *ResLTy = ConvertType(LV.getType()); 867 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 868 869 // Compute the result as an OR of all of the individual component accesses. 870 llvm::Value *Res = 0; 871 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 872 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 873 874 // Get the field pointer. 875 llvm::Value *Ptr = LV.getBitFieldBaseAddr(); 876 877 // Only offset by the field index if used, so that incoming values are not 878 // required to be structures. 879 if (AI.FieldIndex) 880 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 881 882 // Offset by the byte offset, if used. 883 if (!AI.FieldByteOffset.isZero()) { 884 Ptr = EmitCastToVoidPtr(Ptr); 885 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 886 "bf.field.offs"); 887 } 888 889 // Cast to the access type. 890 llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 891 AI.AccessWidth, 892 CGM.getContext().getTargetAddressSpace(LV.getType())); 893 Ptr = Builder.CreateBitCast(Ptr, PTy); 894 895 // Perform the load. 896 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified()); 897 if (!AI.AccessAlignment.isZero()) 898 Load->setAlignment(AI.AccessAlignment.getQuantity()); 899 900 // Shift out unused low bits and mask out unused high bits. 901 llvm::Value *Val = Load; 902 if (AI.FieldBitStart) 903 Val = Builder.CreateLShr(Load, AI.FieldBitStart); 904 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth, 905 AI.TargetBitWidth), 906 "bf.clear"); 907 908 // Extend or truncate to the target size. 909 if (AI.AccessWidth < ResSizeInBits) 910 Val = Builder.CreateZExt(Val, ResLTy); 911 else if (AI.AccessWidth > ResSizeInBits) 912 Val = Builder.CreateTrunc(Val, ResLTy); 913 914 // Shift into place, and OR into the result. 915 if (AI.TargetBitOffset) 916 Val = Builder.CreateShl(Val, AI.TargetBitOffset); 917 Res = Res ? Builder.CreateOr(Res, Val) : Val; 918 } 919 920 // If the bit-field is signed, perform the sign-extension. 921 // 922 // FIXME: This can easily be folded into the load of the high bits, which 923 // could also eliminate the mask of high bits in some situations. 924 if (Info.isSigned()) { 925 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 926 if (ExtraBits) 927 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits), 928 ExtraBits, "bf.val.sext"); 929 } 930 931 return RValue::get(Res); 932 } 933 934 // If this is a reference to a subset of the elements of a vector, create an 935 // appropriate shufflevector. 936 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 937 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 938 LV.isVolatileQualified()); 939 940 const llvm::Constant *Elts = LV.getExtVectorElts(); 941 942 // If the result of the expression is a non-vector type, we must be extracting 943 // a single element. Just codegen as an extractelement. 944 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 945 if (!ExprVT) { 946 unsigned InIdx = getAccessedFieldNo(0, Elts); 947 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 948 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 949 } 950 951 // Always use shuffle vector to try to retain the original program structure 952 unsigned NumResultElts = ExprVT->getNumElements(); 953 954 SmallVector<llvm::Constant*, 4> Mask; 955 for (unsigned i = 0; i != NumResultElts; ++i) { 956 unsigned InIdx = getAccessedFieldNo(i, Elts); 957 Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx)); 958 } 959 960 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 961 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 962 MaskV); 963 return RValue::get(Vec); 964 } 965 966 967 968 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 969 /// lvalue, where both are guaranteed to the have the same type, and that type 970 /// is 'Ty'. 971 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) { 972 if (!Dst.isSimple()) { 973 if (Dst.isVectorElt()) { 974 // Read/modify/write the vector, inserting the new element. 975 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 976 Dst.isVolatileQualified()); 977 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 978 Dst.getVectorIdx(), "vecins"); 979 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 980 return; 981 } 982 983 // If this is an update of extended vector elements, insert them as 984 // appropriate. 985 if (Dst.isExtVectorElt()) 986 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 987 988 assert(Dst.isBitField() && "Unknown LValue type"); 989 return EmitStoreThroughBitfieldLValue(Src, Dst); 990 } 991 992 // There's special magic for assigning into an ARC-qualified l-value. 993 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 994 switch (Lifetime) { 995 case Qualifiers::OCL_None: 996 llvm_unreachable("present but none"); 997 998 case Qualifiers::OCL_ExplicitNone: 999 // nothing special 1000 break; 1001 1002 case Qualifiers::OCL_Strong: 1003 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 1004 return; 1005 1006 case Qualifiers::OCL_Weak: 1007 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 1008 return; 1009 1010 case Qualifiers::OCL_Autoreleasing: 1011 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 1012 Src.getScalarVal())); 1013 // fall into the normal path 1014 break; 1015 } 1016 } 1017 1018 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 1019 // load of a __weak object. 1020 llvm::Value *LvalueDst = Dst.getAddress(); 1021 llvm::Value *src = Src.getScalarVal(); 1022 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1023 return; 1024 } 1025 1026 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1027 // load of a __strong object. 1028 llvm::Value *LvalueDst = Dst.getAddress(); 1029 llvm::Value *src = Src.getScalarVal(); 1030 if (Dst.isObjCIvar()) { 1031 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1032 llvm::Type *ResultType = ConvertType(getContext().LongTy); 1033 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 1034 llvm::Value *dst = RHS; 1035 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1036 llvm::Value *LHS = 1037 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 1038 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 1039 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 1040 BytesBetween); 1041 } else if (Dst.isGlobalObjCRef()) { 1042 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 1043 Dst.isThreadLocalRef()); 1044 } 1045 else 1046 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 1047 return; 1048 } 1049 1050 assert(Src.isScalar() && "Can't emit an agg store with this method"); 1051 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 1052 } 1053 1054 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 1055 llvm::Value **Result) { 1056 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 1057 1058 // Get the output type. 1059 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 1060 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 1061 1062 // Get the source value, truncated to the width of the bit-field. 1063 llvm::Value *SrcVal = Src.getScalarVal(); 1064 1065 if (Dst.getType()->isBooleanType()) 1066 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false); 1067 1068 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits, 1069 Info.getSize()), 1070 "bf.value"); 1071 1072 // Return the new value of the bit-field, if requested. 1073 if (Result) { 1074 // Cast back to the proper type for result. 1075 llvm::Type *SrcTy = Src.getScalarVal()->getType(); 1076 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false, 1077 "bf.reload.val"); 1078 1079 // Sign extend if necessary. 1080 if (Info.isSigned()) { 1081 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 1082 if (ExtraBits) 1083 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits), 1084 ExtraBits, "bf.reload.sext"); 1085 } 1086 1087 *Result = ReloadVal; 1088 } 1089 1090 // Iterate over the components, writing each piece to memory. 1091 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1092 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1093 1094 // Get the field pointer. 1095 llvm::Value *Ptr = Dst.getBitFieldBaseAddr(); 1096 unsigned addressSpace = 1097 cast<llvm::PointerType>(Ptr->getType())->getAddressSpace(); 1098 1099 // Only offset by the field index if used, so that incoming values are not 1100 // required to be structures. 1101 if (AI.FieldIndex) 1102 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 1103 1104 // Offset by the byte offset, if used. 1105 if (!AI.FieldByteOffset.isZero()) { 1106 Ptr = EmitCastToVoidPtr(Ptr); 1107 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 1108 "bf.field.offs"); 1109 } 1110 1111 // Cast to the access type. 1112 llvm::Type *AccessLTy = 1113 llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth); 1114 1115 llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace); 1116 Ptr = Builder.CreateBitCast(Ptr, PTy); 1117 1118 // Extract the piece of the bit-field value to write in this access, limited 1119 // to the values that are part of this access. 1120 llvm::Value *Val = SrcVal; 1121 if (AI.TargetBitOffset) 1122 Val = Builder.CreateLShr(Val, AI.TargetBitOffset); 1123 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits, 1124 AI.TargetBitWidth)); 1125 1126 // Extend or truncate to the access size. 1127 if (ResSizeInBits < AI.AccessWidth) 1128 Val = Builder.CreateZExt(Val, AccessLTy); 1129 else if (ResSizeInBits > AI.AccessWidth) 1130 Val = Builder.CreateTrunc(Val, AccessLTy); 1131 1132 // Shift into the position in memory. 1133 if (AI.FieldBitStart) 1134 Val = Builder.CreateShl(Val, AI.FieldBitStart); 1135 1136 // If necessary, load and OR in bits that are outside of the bit-field. 1137 if (AI.TargetBitWidth != AI.AccessWidth) { 1138 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified()); 1139 if (!AI.AccessAlignment.isZero()) 1140 Load->setAlignment(AI.AccessAlignment.getQuantity()); 1141 1142 // Compute the mask for zeroing the bits that are part of the bit-field. 1143 llvm::APInt InvMask = 1144 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart, 1145 AI.FieldBitStart + AI.TargetBitWidth); 1146 1147 // Apply the mask and OR in to the value to write. 1148 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val); 1149 } 1150 1151 // Write the value. 1152 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr, 1153 Dst.isVolatileQualified()); 1154 if (!AI.AccessAlignment.isZero()) 1155 Store->setAlignment(AI.AccessAlignment.getQuantity()); 1156 } 1157 } 1158 1159 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1160 LValue Dst) { 1161 // This access turns into a read/modify/write of the vector. Load the input 1162 // value now. 1163 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 1164 Dst.isVolatileQualified()); 1165 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1166 1167 llvm::Value *SrcVal = Src.getScalarVal(); 1168 1169 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 1170 unsigned NumSrcElts = VTy->getNumElements(); 1171 unsigned NumDstElts = 1172 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1173 if (NumDstElts == NumSrcElts) { 1174 // Use shuffle vector is the src and destination are the same number of 1175 // elements and restore the vector mask since it is on the side it will be 1176 // stored. 1177 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1178 for (unsigned i = 0; i != NumSrcElts; ++i) { 1179 unsigned InIdx = getAccessedFieldNo(i, Elts); 1180 Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i); 1181 } 1182 1183 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1184 Vec = Builder.CreateShuffleVector(SrcVal, 1185 llvm::UndefValue::get(Vec->getType()), 1186 MaskV); 1187 } else if (NumDstElts > NumSrcElts) { 1188 // Extended the source vector to the same length and then shuffle it 1189 // into the destination. 1190 // FIXME: since we're shuffling with undef, can we just use the indices 1191 // into that? This could be simpler. 1192 SmallVector<llvm::Constant*, 4> ExtMask; 1193 unsigned i; 1194 for (i = 0; i != NumSrcElts; ++i) 1195 ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1196 for (; i != NumDstElts; ++i) 1197 ExtMask.push_back(llvm::UndefValue::get(Int32Ty)); 1198 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 1199 llvm::Value *ExtSrcVal = 1200 Builder.CreateShuffleVector(SrcVal, 1201 llvm::UndefValue::get(SrcVal->getType()), 1202 ExtMaskV); 1203 // build identity 1204 SmallVector<llvm::Constant*, 4> Mask; 1205 for (unsigned i = 0; i != NumDstElts; ++i) 1206 Mask.push_back(llvm::ConstantInt::get(Int32Ty, i)); 1207 1208 // modify when what gets shuffled in 1209 for (unsigned i = 0; i != NumSrcElts; ++i) { 1210 unsigned Idx = getAccessedFieldNo(i, Elts); 1211 Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts); 1212 } 1213 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1214 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 1215 } else { 1216 // We should never shorten the vector 1217 llvm_unreachable("unexpected shorten vector length"); 1218 } 1219 } else { 1220 // If the Src is a scalar (not a vector) it must be updating one element. 1221 unsigned InIdx = getAccessedFieldNo(0, Elts); 1222 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1223 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 1224 } 1225 1226 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 1227 } 1228 1229 // setObjCGCLValueClass - sets class of he lvalue for the purpose of 1230 // generating write-barries API. It is currently a global, ivar, 1231 // or neither. 1232 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1233 LValue &LV, 1234 bool IsMemberAccess=false) { 1235 if (Ctx.getLangOptions().getGC() == LangOptions::NonGC) 1236 return; 1237 1238 if (isa<ObjCIvarRefExpr>(E)) { 1239 QualType ExpTy = E->getType(); 1240 if (IsMemberAccess && ExpTy->isPointerType()) { 1241 // If ivar is a structure pointer, assigning to field of 1242 // this struct follows gcc's behavior and makes it a non-ivar 1243 // writer-barrier conservatively. 1244 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1245 if (ExpTy->isRecordType()) { 1246 LV.setObjCIvar(false); 1247 return; 1248 } 1249 } 1250 LV.setObjCIvar(true); 1251 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1252 LV.setBaseIvarExp(Exp->getBase()); 1253 LV.setObjCArray(E->getType()->isArrayType()); 1254 return; 1255 } 1256 1257 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1258 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1259 if (VD->hasGlobalStorage()) { 1260 LV.setGlobalObjCRef(true); 1261 LV.setThreadLocalRef(VD->isThreadSpecified()); 1262 } 1263 } 1264 LV.setObjCArray(E->getType()->isArrayType()); 1265 return; 1266 } 1267 1268 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1269 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1270 return; 1271 } 1272 1273 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1274 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1275 if (LV.isObjCIvar()) { 1276 // If cast is to a structure pointer, follow gcc's behavior and make it 1277 // a non-ivar write-barrier. 1278 QualType ExpTy = E->getType(); 1279 if (ExpTy->isPointerType()) 1280 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1281 if (ExpTy->isRecordType()) 1282 LV.setObjCIvar(false); 1283 } 1284 return; 1285 } 1286 1287 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) { 1288 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 1289 return; 1290 } 1291 1292 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1293 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1294 return; 1295 } 1296 1297 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1298 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1299 return; 1300 } 1301 1302 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 1303 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1304 return; 1305 } 1306 1307 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1308 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1309 if (LV.isObjCIvar() && !LV.isObjCArray()) 1310 // Using array syntax to assigning to what an ivar points to is not 1311 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1312 LV.setObjCIvar(false); 1313 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1314 // Using array syntax to assigning to what global points to is not 1315 // same as assigning to the global itself. {id *G;} G[i] = 0; 1316 LV.setGlobalObjCRef(false); 1317 return; 1318 } 1319 1320 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1321 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 1322 // We don't know if member is an 'ivar', but this flag is looked at 1323 // only in the context of LV.isObjCIvar(). 1324 LV.setObjCArray(E->getType()->isArrayType()); 1325 return; 1326 } 1327 } 1328 1329 static llvm::Value * 1330 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 1331 llvm::Value *V, llvm::Type *IRType, 1332 StringRef Name = StringRef()) { 1333 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1334 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 1335 } 1336 1337 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1338 const Expr *E, const VarDecl *VD) { 1339 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1340 "Var decl must have external storage or be a file var decl!"); 1341 1342 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1343 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 1344 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 1345 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 1346 QualType T = E->getType(); 1347 LValue LV; 1348 if (VD->getType()->isReferenceType()) { 1349 llvm::LoadInst *LI = CGF.Builder.CreateLoad(V); 1350 LI->setAlignment(Alignment.getQuantity()); 1351 V = LI; 1352 LV = CGF.MakeNaturalAlignAddrLValue(V, T); 1353 } else { 1354 LV = CGF.MakeAddrLValue(V, E->getType(), Alignment); 1355 } 1356 setObjCGCLValueClass(CGF.getContext(), E, LV); 1357 return LV; 1358 } 1359 1360 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1361 const Expr *E, const FunctionDecl *FD) { 1362 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); 1363 if (!FD->hasPrototype()) { 1364 if (const FunctionProtoType *Proto = 1365 FD->getType()->getAs<FunctionProtoType>()) { 1366 // Ugly case: for a K&R-style definition, the type of the definition 1367 // isn't the same as the type of a use. Correct for this with a 1368 // bitcast. 1369 QualType NoProtoType = 1370 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1371 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1372 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); 1373 } 1374 } 1375 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 1376 return CGF.MakeAddrLValue(V, E->getType(), Alignment); 1377 } 1378 1379 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1380 const NamedDecl *ND = E->getDecl(); 1381 CharUnits Alignment = getContext().getDeclAlign(ND); 1382 QualType T = E->getType(); 1383 1384 if (ND->hasAttr<WeakRefAttr>()) { 1385 const ValueDecl *VD = cast<ValueDecl>(ND); 1386 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1387 return MakeAddrLValue(Aliasee, E->getType(), Alignment); 1388 } 1389 1390 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1391 1392 // Check if this is a global variable. 1393 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1394 return EmitGlobalVarDeclLValue(*this, E, VD); 1395 1396 bool NonGCable = VD->hasLocalStorage() && 1397 !VD->getType()->isReferenceType() && 1398 !VD->hasAttr<BlocksAttr>(); 1399 1400 llvm::Value *V = LocalDeclMap[VD]; 1401 if (!V && VD->isStaticLocal()) 1402 V = CGM.getStaticLocalDeclAddress(VD); 1403 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1404 1405 if (VD->hasAttr<BlocksAttr>()) 1406 V = BuildBlockByrefAddress(V, VD); 1407 1408 LValue LV; 1409 if (VD->getType()->isReferenceType()) { 1410 llvm::LoadInst *LI = Builder.CreateLoad(V); 1411 LI->setAlignment(Alignment.getQuantity()); 1412 V = LI; 1413 LV = MakeNaturalAlignAddrLValue(V, T); 1414 } else { 1415 LV = MakeAddrLValue(V, T, Alignment); 1416 } 1417 1418 if (NonGCable) { 1419 LV.getQuals().removeObjCGCAttr(); 1420 LV.setNonGC(true); 1421 } 1422 setObjCGCLValueClass(getContext(), E, LV); 1423 return LV; 1424 } 1425 1426 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) 1427 return EmitFunctionDeclLValue(*this, E, fn); 1428 1429 llvm_unreachable("Unhandled DeclRefExpr"); 1430 1431 // an invalid LValue, but the assert will 1432 // ensure that this point is never reached. 1433 return LValue(); 1434 } 1435 1436 LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 1437 CharUnits Alignment = getContext().getDeclAlign(E->getDecl()); 1438 return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment); 1439 } 1440 1441 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1442 // __extension__ doesn't affect lvalue-ness. 1443 if (E->getOpcode() == UO_Extension) 1444 return EmitLValue(E->getSubExpr()); 1445 1446 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1447 switch (E->getOpcode()) { 1448 default: llvm_unreachable("Unknown unary operator lvalue!"); 1449 case UO_Deref: { 1450 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1451 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1452 1453 LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T); 1454 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 1455 1456 // We should not generate __weak write barrier on indirect reference 1457 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1458 // But, we continue to generate __strong write barrier on indirect write 1459 // into a pointer to object. 1460 if (getContext().getLangOptions().ObjC1 && 1461 getContext().getLangOptions().getGC() != LangOptions::NonGC && 1462 LV.isObjCWeak()) 1463 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1464 return LV; 1465 } 1466 case UO_Real: 1467 case UO_Imag: { 1468 LValue LV = EmitLValue(E->getSubExpr()); 1469 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 1470 llvm::Value *Addr = LV.getAddress(); 1471 1472 // real and imag are valid on scalars. This is a faster way of 1473 // testing that. 1474 if (!cast<llvm::PointerType>(Addr->getType()) 1475 ->getElementType()->isStructTy()) { 1476 assert(E->getSubExpr()->getType()->isArithmeticType()); 1477 return LV; 1478 } 1479 1480 assert(E->getSubExpr()->getType()->isAnyComplexType()); 1481 1482 unsigned Idx = E->getOpcode() == UO_Imag; 1483 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(), 1484 Idx, "idx"), 1485 ExprTy); 1486 } 1487 case UO_PreInc: 1488 case UO_PreDec: { 1489 LValue LV = EmitLValue(E->getSubExpr()); 1490 bool isInc = E->getOpcode() == UO_PreInc; 1491 1492 if (E->getType()->isAnyComplexType()) 1493 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1494 else 1495 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1496 return LV; 1497 } 1498 } 1499 } 1500 1501 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1502 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 1503 E->getType()); 1504 } 1505 1506 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1507 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1508 E->getType()); 1509 } 1510 1511 1512 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1513 switch (E->getIdentType()) { 1514 default: 1515 return EmitUnsupportedLValue(E, "predefined expression"); 1516 1517 case PredefinedExpr::Func: 1518 case PredefinedExpr::Function: 1519 case PredefinedExpr::PrettyFunction: { 1520 unsigned Type = E->getIdentType(); 1521 std::string GlobalVarName; 1522 1523 switch (Type) { 1524 default: llvm_unreachable("Invalid type"); 1525 case PredefinedExpr::Func: 1526 GlobalVarName = "__func__."; 1527 break; 1528 case PredefinedExpr::Function: 1529 GlobalVarName = "__FUNCTION__."; 1530 break; 1531 case PredefinedExpr::PrettyFunction: 1532 GlobalVarName = "__PRETTY_FUNCTION__."; 1533 break; 1534 } 1535 1536 StringRef FnName = CurFn->getName(); 1537 if (FnName.startswith("\01")) 1538 FnName = FnName.substr(1); 1539 GlobalVarName += FnName; 1540 1541 const Decl *CurDecl = CurCodeDecl; 1542 if (CurDecl == 0) 1543 CurDecl = getContext().getTranslationUnitDecl(); 1544 1545 std::string FunctionName = 1546 (isa<BlockDecl>(CurDecl) 1547 ? FnName.str() 1548 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl)); 1549 1550 llvm::Constant *C = 1551 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 1552 return MakeAddrLValue(C, E->getType()); 1553 } 1554 } 1555 } 1556 1557 llvm::BasicBlock *CodeGenFunction::getTrapBB() { 1558 const CodeGenOptions &GCO = CGM.getCodeGenOpts(); 1559 1560 // If we are not optimzing, don't collapse all calls to trap in the function 1561 // to the same call, that way, in the debugger they can see which operation 1562 // did in fact fail. If we are optimizing, we collapse all calls to trap down 1563 // to just one per function to save on codesize. 1564 if (GCO.OptimizationLevel && TrapBB) 1565 return TrapBB; 1566 1567 llvm::BasicBlock *Cont = 0; 1568 if (HaveInsertPoint()) { 1569 Cont = createBasicBlock("cont"); 1570 EmitBranch(Cont); 1571 } 1572 TrapBB = createBasicBlock("trap"); 1573 EmitBlock(TrapBB); 1574 1575 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); 1576 llvm::CallInst *TrapCall = Builder.CreateCall(F); 1577 TrapCall->setDoesNotReturn(); 1578 TrapCall->setDoesNotThrow(); 1579 Builder.CreateUnreachable(); 1580 1581 if (Cont) 1582 EmitBlock(Cont); 1583 return TrapBB; 1584 } 1585 1586 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 1587 /// array to pointer, return the array subexpression. 1588 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 1589 // If this isn't just an array->pointer decay, bail out. 1590 const CastExpr *CE = dyn_cast<CastExpr>(E); 1591 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay) 1592 return 0; 1593 1594 // If this is a decay from variable width array, bail out. 1595 const Expr *SubExpr = CE->getSubExpr(); 1596 if (SubExpr->getType()->isVariableArrayType()) 1597 return 0; 1598 1599 return SubExpr; 1600 } 1601 1602 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1603 // The index must always be an integer, which is not an aggregate. Emit it. 1604 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 1605 QualType IdxTy = E->getIdx()->getType(); 1606 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 1607 1608 // If the base is a vector type, then we are forming a vector element lvalue 1609 // with this subscript. 1610 if (E->getBase()->getType()->isVectorType()) { 1611 // Emit the vector as an lvalue to get its address. 1612 LValue LHS = EmitLValue(E->getBase()); 1613 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 1614 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx"); 1615 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 1616 E->getBase()->getType()); 1617 } 1618 1619 // Extend or truncate the index type to 32 or 64-bits. 1620 if (Idx->getType() != IntPtrTy) 1621 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 1622 1623 // FIXME: As llvm implements the object size checking, this can come out. 1624 if (CatchUndefined) { 1625 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){ 1626 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 1627 if (ICE->getCastKind() == CK_ArrayToPointerDecay) { 1628 if (const ConstantArrayType *CAT 1629 = getContext().getAsConstantArrayType(DRE->getType())) { 1630 llvm::APInt Size = CAT->getSize(); 1631 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1632 Builder.CreateCondBr(Builder.CreateICmpULE(Idx, 1633 llvm::ConstantInt::get(Idx->getType(), Size)), 1634 Cont, getTrapBB()); 1635 EmitBlock(Cont); 1636 } 1637 } 1638 } 1639 } 1640 } 1641 1642 // We know that the pointer points to a type of the correct size, unless the 1643 // size is a VLA or Objective-C interface. 1644 llvm::Value *Address = 0; 1645 CharUnits ArrayAlignment; 1646 if (const VariableArrayType *vla = 1647 getContext().getAsVariableArrayType(E->getType())) { 1648 // The base must be a pointer, which is not an aggregate. Emit 1649 // it. It needs to be emitted first in case it's what captures 1650 // the VLA bounds. 1651 Address = EmitScalarExpr(E->getBase()); 1652 1653 // The element count here is the total number of non-VLA elements. 1654 llvm::Value *numElements = getVLASize(vla).first; 1655 1656 // Effectively, the multiply by the VLA size is part of the GEP. 1657 // GEP indexes are signed, and scaling an index isn't permitted to 1658 // signed-overflow, so we use the same semantics for our explicit 1659 // multiply. We suppress this if overflow is not undefined behavior. 1660 if (getLangOptions().isSignedOverflowDefined()) { 1661 Idx = Builder.CreateMul(Idx, numElements); 1662 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 1663 } else { 1664 Idx = Builder.CreateNSWMul(Idx, numElements); 1665 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); 1666 } 1667 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 1668 // Indexing over an interface, as in "NSString *P; P[4];" 1669 llvm::Value *InterfaceSize = 1670 llvm::ConstantInt::get(Idx->getType(), 1671 getContext().getTypeSizeInChars(OIT).getQuantity()); 1672 1673 Idx = Builder.CreateMul(Idx, InterfaceSize); 1674 1675 // The base must be a pointer, which is not an aggregate. Emit it. 1676 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1677 Address = EmitCastToVoidPtr(Base); 1678 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 1679 Address = Builder.CreateBitCast(Address, Base->getType()); 1680 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 1681 // If this is A[i] where A is an array, the frontend will have decayed the 1682 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 1683 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 1684 // "gep x, i" here. Emit one "gep A, 0, i". 1685 assert(Array->getType()->isArrayType() && 1686 "Array to pointer decay must have array source type!"); 1687 LValue ArrayLV = EmitLValue(Array); 1688 llvm::Value *ArrayPtr = ArrayLV.getAddress(); 1689 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 1690 llvm::Value *Args[] = { Zero, Idx }; 1691 1692 // Propagate the alignment from the array itself to the result. 1693 ArrayAlignment = ArrayLV.getAlignment(); 1694 1695 if (getContext().getLangOptions().isSignedOverflowDefined()) 1696 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); 1697 else 1698 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); 1699 } else { 1700 // The base must be a pointer, which is not an aggregate. Emit it. 1701 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1702 if (getContext().getLangOptions().isSignedOverflowDefined()) 1703 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 1704 else 1705 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1706 } 1707 1708 QualType T = E->getBase()->getType()->getPointeeType(); 1709 assert(!T.isNull() && 1710 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1711 1712 1713 // Limit the alignment to that of the result type. 1714 LValue LV; 1715 if (!ArrayAlignment.isZero()) { 1716 CharUnits Align = getContext().getTypeAlignInChars(T); 1717 ArrayAlignment = std::min(Align, ArrayAlignment); 1718 LV = MakeAddrLValue(Address, T, ArrayAlignment); 1719 } else { 1720 LV = MakeNaturalAlignAddrLValue(Address, T); 1721 } 1722 1723 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace()); 1724 1725 if (getContext().getLangOptions().ObjC1 && 1726 getContext().getLangOptions().getGC() != LangOptions::NonGC) { 1727 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1728 setObjCGCLValueClass(getContext(), E, LV); 1729 } 1730 return LV; 1731 } 1732 1733 static 1734 llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, 1735 SmallVector<unsigned, 4> &Elts) { 1736 SmallVector<llvm::Constant*, 4> CElts; 1737 1738 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); 1739 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1740 CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i])); 1741 1742 return llvm::ConstantVector::get(CElts); 1743 } 1744 1745 LValue CodeGenFunction:: 1746 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1747 // Emit the base vector as an l-value. 1748 LValue Base; 1749 1750 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1751 if (E->isArrow()) { 1752 // If it is a pointer to a vector, emit the address and form an lvalue with 1753 // it. 1754 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1755 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1756 Base = MakeAddrLValue(Ptr, PT->getPointeeType()); 1757 Base.getQuals().removeObjCGCAttr(); 1758 } else if (E->getBase()->isGLValue()) { 1759 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 1760 // emit the base as an lvalue. 1761 assert(E->getBase()->getType()->isVectorType()); 1762 Base = EmitLValue(E->getBase()); 1763 } else { 1764 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 1765 assert(E->getBase()->getType()->isVectorType() && 1766 "Result must be a vector"); 1767 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 1768 1769 // Store the vector to memory (because LValue wants an address). 1770 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 1771 Builder.CreateStore(Vec, VecMem); 1772 Base = MakeAddrLValue(VecMem, E->getBase()->getType()); 1773 } 1774 1775 QualType type = 1776 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 1777 1778 // Encode the element access list into a vector of unsigned indices. 1779 SmallVector<unsigned, 4> Indices; 1780 E->getEncodedElementAccess(Indices); 1781 1782 if (Base.isSimple()) { 1783 llvm::Constant *CV = GenerateConstantVector(getLLVMContext(), Indices); 1784 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type); 1785 } 1786 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1787 1788 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1789 SmallVector<llvm::Constant *, 4> CElts; 1790 1791 for (unsigned i = 0, e = Indices.size(); i != e; ++i) { 1792 if (isa<llvm::ConstantAggregateZero>(BaseElts)) 1793 CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0)); 1794 else 1795 CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i]))); 1796 } 1797 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 1798 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type); 1799 } 1800 1801 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1802 bool isNonGC = false; 1803 Expr *BaseExpr = E->getBase(); 1804 llvm::Value *BaseValue = NULL; 1805 Qualifiers BaseQuals; 1806 1807 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1808 if (E->isArrow()) { 1809 BaseValue = EmitScalarExpr(BaseExpr); 1810 const PointerType *PTy = 1811 BaseExpr->getType()->getAs<PointerType>(); 1812 BaseQuals = PTy->getPointeeType().getQualifiers(); 1813 } else { 1814 LValue BaseLV = EmitLValue(BaseExpr); 1815 if (BaseLV.isNonGC()) 1816 isNonGC = true; 1817 // FIXME: this isn't right for bitfields. 1818 BaseValue = BaseLV.getAddress(); 1819 QualType BaseTy = BaseExpr->getType(); 1820 BaseQuals = BaseTy.getQualifiers(); 1821 } 1822 1823 NamedDecl *ND = E->getMemberDecl(); 1824 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 1825 LValue LV = EmitLValueForField(BaseValue, Field, 1826 BaseQuals.getCVRQualifiers()); 1827 LV.setNonGC(isNonGC); 1828 setObjCGCLValueClass(getContext(), E, LV); 1829 return LV; 1830 } 1831 1832 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 1833 return EmitGlobalVarDeclLValue(*this, E, VD); 1834 1835 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1836 return EmitFunctionDeclLValue(*this, E, FD); 1837 1838 llvm_unreachable("Unhandled member declaration!"); 1839 } 1840 1841 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue, 1842 const FieldDecl *Field, 1843 unsigned CVRQualifiers) { 1844 const CGRecordLayout &RL = 1845 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1846 const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field); 1847 return LValue::MakeBitfield(BaseValue, Info, 1848 Field->getType().withCVRQualifiers(CVRQualifiers)); 1849 } 1850 1851 /// EmitLValueForAnonRecordField - Given that the field is a member of 1852 /// an anonymous struct or union buried inside a record, and given 1853 /// that the base value is a pointer to the enclosing record, derive 1854 /// an lvalue for the ultimate field. 1855 LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue, 1856 const IndirectFieldDecl *Field, 1857 unsigned CVRQualifiers) { 1858 IndirectFieldDecl::chain_iterator I = Field->chain_begin(), 1859 IEnd = Field->chain_end(); 1860 while (true) { 1861 LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I), 1862 CVRQualifiers); 1863 if (++I == IEnd) return LV; 1864 1865 assert(LV.isSimple()); 1866 BaseValue = LV.getAddress(); 1867 CVRQualifiers |= LV.getVRQualifiers(); 1868 } 1869 } 1870 1871 LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr, 1872 const FieldDecl *field, 1873 unsigned cvr) { 1874 if (field->isBitField()) 1875 return EmitLValueForBitfield(baseAddr, field, cvr); 1876 1877 const RecordDecl *rec = field->getParent(); 1878 QualType type = field->getType(); 1879 CharUnits alignment = getContext().getDeclAlign(field); 1880 1881 bool mayAlias = rec->hasAttr<MayAliasAttr>(); 1882 1883 llvm::Value *addr = baseAddr; 1884 if (rec->isUnion()) { 1885 // For unions, there is no pointer adjustment. 1886 assert(!type->isReferenceType() && "union has reference member"); 1887 } else { 1888 // For structs, we GEP to the field that the record layout suggests. 1889 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 1890 addr = Builder.CreateStructGEP(addr, idx, field->getName()); 1891 1892 // If this is a reference field, load the reference right now. 1893 if (const ReferenceType *refType = type->getAs<ReferenceType>()) { 1894 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); 1895 if (cvr & Qualifiers::Volatile) load->setVolatile(true); 1896 load->setAlignment(alignment.getQuantity()); 1897 1898 if (CGM.shouldUseTBAA()) { 1899 llvm::MDNode *tbaa; 1900 if (mayAlias) 1901 tbaa = CGM.getTBAAInfo(getContext().CharTy); 1902 else 1903 tbaa = CGM.getTBAAInfo(type); 1904 CGM.DecorateInstruction(load, tbaa); 1905 } 1906 1907 addr = load; 1908 mayAlias = false; 1909 type = refType->getPointeeType(); 1910 if (type->isIncompleteType()) 1911 alignment = CharUnits(); 1912 else 1913 alignment = getContext().getTypeAlignInChars(type); 1914 cvr = 0; // qualifiers don't recursively apply to referencee 1915 } 1916 } 1917 1918 // Make sure that the address is pointing to the right type. This is critical 1919 // for both unions and structs. A union needs a bitcast, a struct element 1920 // will need a bitcast if the LLVM type laid out doesn't match the desired 1921 // type. 1922 addr = EmitBitCastOfLValueToProperType(*this, addr, 1923 CGM.getTypes().ConvertTypeForMem(type), 1924 field->getName()); 1925 1926 if (field->hasAttr<AnnotateAttr>()) 1927 addr = EmitFieldAnnotations(field, addr); 1928 1929 LValue LV = MakeAddrLValue(addr, type, alignment); 1930 LV.getQuals().addCVRQualifiers(cvr); 1931 1932 // __weak attribute on a field is ignored. 1933 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 1934 LV.getQuals().removeObjCGCAttr(); 1935 1936 // Fields of may_alias structs act like 'char' for TBAA purposes. 1937 // FIXME: this should get propagated down through anonymous structs 1938 // and unions. 1939 if (mayAlias && LV.getTBAAInfo()) 1940 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); 1941 1942 return LV; 1943 } 1944 1945 LValue 1946 CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue, 1947 const FieldDecl *Field, 1948 unsigned CVRQualifiers) { 1949 QualType FieldType = Field->getType(); 1950 1951 if (!FieldType->isReferenceType()) 1952 return EmitLValueForField(BaseValue, Field, CVRQualifiers); 1953 1954 const CGRecordLayout &RL = 1955 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1956 unsigned idx = RL.getLLVMFieldNo(Field); 1957 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx); 1958 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 1959 1960 1961 // Make sure that the address is pointing to the right type. This is critical 1962 // for both unions and structs. A union needs a bitcast, a struct element 1963 // will need a bitcast if the LLVM type laid out doesn't match the desired 1964 // type. 1965 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 1966 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1967 V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS)); 1968 1969 CharUnits Alignment = getContext().getDeclAlign(Field); 1970 return MakeAddrLValue(V, FieldType, Alignment); 1971 } 1972 1973 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 1974 if (E->isFileScope()) { 1975 llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 1976 return MakeAddrLValue(GlobalPtr, E->getType()); 1977 } 1978 1979 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 1980 const Expr *InitExpr = E->getInitializer(); 1981 LValue Result = MakeAddrLValue(DeclPtr, E->getType()); 1982 1983 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 1984 /*Init*/ true); 1985 1986 return Result; 1987 } 1988 1989 LValue CodeGenFunction:: 1990 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 1991 if (!expr->isGLValue()) { 1992 // ?: here should be an aggregate. 1993 assert((hasAggregateLLVMType(expr->getType()) && 1994 !expr->getType()->isAnyComplexType()) && 1995 "Unexpected conditional operator!"); 1996 return EmitAggExprToLValue(expr); 1997 } 1998 1999 const Expr *condExpr = expr->getCond(); 2000 bool CondExprBool; 2001 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 2002 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 2003 if (!CondExprBool) std::swap(live, dead); 2004 2005 if (!ContainsLabel(dead)) 2006 return EmitLValue(live); 2007 } 2008 2009 OpaqueValueMapping binding(*this, expr); 2010 2011 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 2012 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 2013 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 2014 2015 ConditionalEvaluation eval(*this); 2016 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock); 2017 2018 // Any temporaries created here are conditional. 2019 EmitBlock(lhsBlock); 2020 eval.begin(*this); 2021 LValue lhs = EmitLValue(expr->getTrueExpr()); 2022 eval.end(*this); 2023 2024 if (!lhs.isSimple()) 2025 return EmitUnsupportedLValue(expr, "conditional operator"); 2026 2027 lhsBlock = Builder.GetInsertBlock(); 2028 Builder.CreateBr(contBlock); 2029 2030 // Any temporaries created here are conditional. 2031 EmitBlock(rhsBlock); 2032 eval.begin(*this); 2033 LValue rhs = EmitLValue(expr->getFalseExpr()); 2034 eval.end(*this); 2035 if (!rhs.isSimple()) 2036 return EmitUnsupportedLValue(expr, "conditional operator"); 2037 rhsBlock = Builder.GetInsertBlock(); 2038 2039 EmitBlock(contBlock); 2040 2041 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2, 2042 "cond-lvalue"); 2043 phi->addIncoming(lhs.getAddress(), lhsBlock); 2044 phi->addIncoming(rhs.getAddress(), rhsBlock); 2045 return MakeAddrLValue(phi, expr->getType()); 2046 } 2047 2048 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast. 2049 /// If the cast is a dynamic_cast, we can have the usual lvalue result, 2050 /// otherwise if a cast is needed by the code generator in an lvalue context, 2051 /// then it must mean that we need the address of an aggregate in order to 2052 /// access one of its fields. This can happen for all the reasons that casts 2053 /// are permitted with aggregate result, including noop aggregate casts, and 2054 /// cast from scalar to union. 2055 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 2056 switch (E->getCastKind()) { 2057 case CK_ToVoid: 2058 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 2059 2060 case CK_Dependent: 2061 llvm_unreachable("dependent cast kind in IR gen!"); 2062 2063 // These two casts are currently treated as no-ops, although they could 2064 // potentially be real operations depending on the target's ABI. 2065 case CK_NonAtomicToAtomic: 2066 case CK_AtomicToNonAtomic: 2067 2068 case CK_NoOp: 2069 case CK_LValueToRValue: 2070 if (!E->getSubExpr()->Classify(getContext()).isPRValue() 2071 || E->getType()->isRecordType()) 2072 return EmitLValue(E->getSubExpr()); 2073 // Fall through to synthesize a temporary. 2074 2075 case CK_BitCast: 2076 case CK_ArrayToPointerDecay: 2077 case CK_FunctionToPointerDecay: 2078 case CK_NullToMemberPointer: 2079 case CK_NullToPointer: 2080 case CK_IntegralToPointer: 2081 case CK_PointerToIntegral: 2082 case CK_PointerToBoolean: 2083 case CK_VectorSplat: 2084 case CK_IntegralCast: 2085 case CK_IntegralToBoolean: 2086 case CK_IntegralToFloating: 2087 case CK_FloatingToIntegral: 2088 case CK_FloatingToBoolean: 2089 case CK_FloatingCast: 2090 case CK_FloatingRealToComplex: 2091 case CK_FloatingComplexToReal: 2092 case CK_FloatingComplexToBoolean: 2093 case CK_FloatingComplexCast: 2094 case CK_FloatingComplexToIntegralComplex: 2095 case CK_IntegralRealToComplex: 2096 case CK_IntegralComplexToReal: 2097 case CK_IntegralComplexToBoolean: 2098 case CK_IntegralComplexCast: 2099 case CK_IntegralComplexToFloatingComplex: 2100 case CK_DerivedToBaseMemberPointer: 2101 case CK_BaseToDerivedMemberPointer: 2102 case CK_MemberPointerToBoolean: 2103 case CK_AnyPointerToBlockPointerCast: 2104 case CK_ARCProduceObject: 2105 case CK_ARCConsumeObject: 2106 case CK_ARCReclaimReturnedObject: 2107 case CK_ARCExtendBlockObject: { 2108 // These casts only produce lvalues when we're binding a reference to a 2109 // temporary realized from a (converted) pure rvalue. Emit the expression 2110 // as a value, copy it into a temporary, and return an lvalue referring to 2111 // that temporary. 2112 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp"); 2113 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false); 2114 return MakeAddrLValue(V, E->getType()); 2115 } 2116 2117 case CK_Dynamic: { 2118 LValue LV = EmitLValue(E->getSubExpr()); 2119 llvm::Value *V = LV.getAddress(); 2120 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 2121 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 2122 } 2123 2124 case CK_ConstructorConversion: 2125 case CK_UserDefinedConversion: 2126 case CK_CPointerToObjCPointerCast: 2127 case CK_BlockPointerToObjCPointerCast: 2128 return EmitLValue(E->getSubExpr()); 2129 2130 case CK_UncheckedDerivedToBase: 2131 case CK_DerivedToBase: { 2132 const RecordType *DerivedClassTy = 2133 E->getSubExpr()->getType()->getAs<RecordType>(); 2134 CXXRecordDecl *DerivedClassDecl = 2135 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2136 2137 LValue LV = EmitLValue(E->getSubExpr()); 2138 llvm::Value *This = LV.getAddress(); 2139 2140 // Perform the derived-to-base conversion 2141 llvm::Value *Base = 2142 GetAddressOfBaseClass(This, DerivedClassDecl, 2143 E->path_begin(), E->path_end(), 2144 /*NullCheckValue=*/false); 2145 2146 return MakeAddrLValue(Base, E->getType()); 2147 } 2148 case CK_ToUnion: 2149 return EmitAggExprToLValue(E); 2150 case CK_BaseToDerived: { 2151 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 2152 CXXRecordDecl *DerivedClassDecl = 2153 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2154 2155 LValue LV = EmitLValue(E->getSubExpr()); 2156 2157 // Perform the base-to-derived conversion 2158 llvm::Value *Derived = 2159 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 2160 E->path_begin(), E->path_end(), 2161 /*NullCheckValue=*/false); 2162 2163 return MakeAddrLValue(Derived, E->getType()); 2164 } 2165 case CK_LValueBitCast: { 2166 // This must be a reinterpret_cast (or c-style equivalent). 2167 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 2168 2169 LValue LV = EmitLValue(E->getSubExpr()); 2170 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2171 ConvertType(CE->getTypeAsWritten())); 2172 return MakeAddrLValue(V, E->getType()); 2173 } 2174 case CK_ObjCObjectLValueCast: { 2175 LValue LV = EmitLValue(E->getSubExpr()); 2176 QualType ToType = getContext().getLValueReferenceType(E->getType()); 2177 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2178 ConvertType(ToType)); 2179 return MakeAddrLValue(V, E->getType()); 2180 } 2181 } 2182 2183 llvm_unreachable("Unhandled lvalue cast kind?"); 2184 } 2185 2186 LValue CodeGenFunction::EmitNullInitializationLValue( 2187 const CXXScalarValueInitExpr *E) { 2188 QualType Ty = E->getType(); 2189 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty); 2190 EmitNullInitialization(LV.getAddress(), Ty); 2191 return LV; 2192 } 2193 2194 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 2195 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 2196 return getOpaqueLValueMapping(e); 2197 } 2198 2199 LValue CodeGenFunction::EmitMaterializeTemporaryExpr( 2200 const MaterializeTemporaryExpr *E) { 2201 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 2202 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2203 } 2204 2205 2206 //===--------------------------------------------------------------------===// 2207 // Expression Emission 2208 //===--------------------------------------------------------------------===// 2209 2210 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 2211 ReturnValueSlot ReturnValue) { 2212 if (CGDebugInfo *DI = getDebugInfo()) 2213 DI->EmitLocation(Builder, E->getLocStart()); 2214 2215 // Builtins never have block type. 2216 if (E->getCallee()->getType()->isBlockPointerType()) 2217 return EmitBlockCallExpr(E, ReturnValue); 2218 2219 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 2220 return EmitCXXMemberCallExpr(CE, ReturnValue); 2221 2222 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E)) 2223 return EmitCUDAKernelCallExpr(CE, ReturnValue); 2224 2225 const Decl *TargetDecl = E->getCalleeDecl(); 2226 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 2227 if (unsigned builtinID = FD->getBuiltinID()) 2228 return EmitBuiltinExpr(FD, builtinID, E); 2229 } 2230 2231 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 2232 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 2233 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 2234 2235 if (const CXXPseudoDestructorExpr *PseudoDtor 2236 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 2237 QualType DestroyedType = PseudoDtor->getDestroyedType(); 2238 if (getContext().getLangOptions().ObjCAutoRefCount && 2239 DestroyedType->isObjCLifetimeType() && 2240 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong || 2241 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) { 2242 // Automatic Reference Counting: 2243 // If the pseudo-expression names a retainable object with weak or 2244 // strong lifetime, the object shall be released. 2245 Expr *BaseExpr = PseudoDtor->getBase(); 2246 llvm::Value *BaseValue = NULL; 2247 Qualifiers BaseQuals; 2248 2249 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2250 if (PseudoDtor->isArrow()) { 2251 BaseValue = EmitScalarExpr(BaseExpr); 2252 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 2253 BaseQuals = PTy->getPointeeType().getQualifiers(); 2254 } else { 2255 LValue BaseLV = EmitLValue(BaseExpr); 2256 BaseValue = BaseLV.getAddress(); 2257 QualType BaseTy = BaseExpr->getType(); 2258 BaseQuals = BaseTy.getQualifiers(); 2259 } 2260 2261 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) { 2262 case Qualifiers::OCL_None: 2263 case Qualifiers::OCL_ExplicitNone: 2264 case Qualifiers::OCL_Autoreleasing: 2265 break; 2266 2267 case Qualifiers::OCL_Strong: 2268 EmitARCRelease(Builder.CreateLoad(BaseValue, 2269 PseudoDtor->getDestroyedType().isVolatileQualified()), 2270 /*precise*/ true); 2271 break; 2272 2273 case Qualifiers::OCL_Weak: 2274 EmitARCDestroyWeak(BaseValue); 2275 break; 2276 } 2277 } else { 2278 // C++ [expr.pseudo]p1: 2279 // The result shall only be used as the operand for the function call 2280 // operator (), and the result of such a call has type void. The only 2281 // effect is the evaluation of the postfix-expression before the dot or 2282 // arrow. 2283 EmitScalarExpr(E->getCallee()); 2284 } 2285 2286 return RValue::get(0); 2287 } 2288 2289 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 2290 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 2291 E->arg_begin(), E->arg_end(), TargetDecl); 2292 } 2293 2294 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 2295 // Comma expressions just emit their LHS then their RHS as an l-value. 2296 if (E->getOpcode() == BO_Comma) { 2297 EmitIgnoredExpr(E->getLHS()); 2298 EnsureInsertPoint(); 2299 return EmitLValue(E->getRHS()); 2300 } 2301 2302 if (E->getOpcode() == BO_PtrMemD || 2303 E->getOpcode() == BO_PtrMemI) 2304 return EmitPointerToDataMemberBinaryExpr(E); 2305 2306 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 2307 2308 // Note that in all of these cases, __block variables need the RHS 2309 // evaluated first just in case the variable gets moved by the RHS. 2310 2311 if (!hasAggregateLLVMType(E->getType())) { 2312 switch (E->getLHS()->getType().getObjCLifetime()) { 2313 case Qualifiers::OCL_Strong: 2314 return EmitARCStoreStrong(E, /*ignored*/ false).first; 2315 2316 case Qualifiers::OCL_Autoreleasing: 2317 return EmitARCStoreAutoreleasing(E).first; 2318 2319 // No reason to do any of these differently. 2320 case Qualifiers::OCL_None: 2321 case Qualifiers::OCL_ExplicitNone: 2322 case Qualifiers::OCL_Weak: 2323 break; 2324 } 2325 2326 RValue RV = EmitAnyExpr(E->getRHS()); 2327 LValue LV = EmitLValue(E->getLHS()); 2328 EmitStoreThroughLValue(RV, LV); 2329 return LV; 2330 } 2331 2332 if (E->getType()->isAnyComplexType()) 2333 return EmitComplexAssignmentLValue(E); 2334 2335 return EmitAggExprToLValue(E); 2336 } 2337 2338 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 2339 RValue RV = EmitCallExpr(E); 2340 2341 if (!RV.isScalar()) 2342 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2343 2344 assert(E->getCallReturnType()->isReferenceType() && 2345 "Can't have a scalar return unless the return type is a " 2346 "reference type!"); 2347 2348 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2349 } 2350 2351 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 2352 // FIXME: This shouldn't require another copy. 2353 return EmitAggExprToLValue(E); 2354 } 2355 2356 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 2357 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 2358 && "binding l-value to type which needs a temporary"); 2359 AggValueSlot Slot = CreateAggTemp(E->getType()); 2360 EmitCXXConstructExpr(E, Slot); 2361 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2362 } 2363 2364 LValue 2365 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 2366 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 2367 } 2368 2369 LValue 2370 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 2371 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2372 Slot.setExternallyDestructed(); 2373 EmitAggExpr(E->getSubExpr(), Slot); 2374 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr()); 2375 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2376 } 2377 2378 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 2379 RValue RV = EmitObjCMessageExpr(E); 2380 2381 if (!RV.isScalar()) 2382 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2383 2384 assert(E->getMethodDecl()->getResultType()->isReferenceType() && 2385 "Can't have a scalar return unless the return type is a " 2386 "reference type!"); 2387 2388 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2389 } 2390 2391 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 2392 llvm::Value *V = 2393 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true); 2394 return MakeAddrLValue(V, E->getType()); 2395 } 2396 2397 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 2398 const ObjCIvarDecl *Ivar) { 2399 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 2400 } 2401 2402 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 2403 llvm::Value *BaseValue, 2404 const ObjCIvarDecl *Ivar, 2405 unsigned CVRQualifiers) { 2406 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 2407 Ivar, CVRQualifiers); 2408 } 2409 2410 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 2411 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 2412 llvm::Value *BaseValue = 0; 2413 const Expr *BaseExpr = E->getBase(); 2414 Qualifiers BaseQuals; 2415 QualType ObjectTy; 2416 if (E->isArrow()) { 2417 BaseValue = EmitScalarExpr(BaseExpr); 2418 ObjectTy = BaseExpr->getType()->getPointeeType(); 2419 BaseQuals = ObjectTy.getQualifiers(); 2420 } else { 2421 LValue BaseLV = EmitLValue(BaseExpr); 2422 // FIXME: this isn't right for bitfields. 2423 BaseValue = BaseLV.getAddress(); 2424 ObjectTy = BaseExpr->getType(); 2425 BaseQuals = ObjectTy.getQualifiers(); 2426 } 2427 2428 LValue LV = 2429 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 2430 BaseQuals.getCVRQualifiers()); 2431 setObjCGCLValueClass(getContext(), E, LV); 2432 return LV; 2433 } 2434 2435 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 2436 // Can only get l-value for message expression returning aggregate type 2437 RValue RV = EmitAnyExprToTemp(E); 2438 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2439 } 2440 2441 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 2442 ReturnValueSlot ReturnValue, 2443 CallExpr::const_arg_iterator ArgBeg, 2444 CallExpr::const_arg_iterator ArgEnd, 2445 const Decl *TargetDecl) { 2446 // Get the actual function type. The callee type will always be a pointer to 2447 // function type or a block pointer type. 2448 assert(CalleeType->isFunctionPointerType() && 2449 "Call must have function pointer type!"); 2450 2451 CalleeType = getContext().getCanonicalType(CalleeType); 2452 2453 const FunctionType *FnType 2454 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 2455 2456 CallArgList Args; 2457 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 2458 2459 const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FnType); 2460 2461 // C99 6.5.2.2p6: 2462 // If the expression that denotes the called function has a type 2463 // that does not include a prototype, [the default argument 2464 // promotions are performed]. If the number of arguments does not 2465 // equal the number of parameters, the behavior is undefined. If 2466 // the function is defined with a type that includes a prototype, 2467 // and either the prototype ends with an ellipsis (, ...) or the 2468 // types of the arguments after promotion are not compatible with 2469 // the types of the parameters, the behavior is undefined. If the 2470 // function is defined with a type that does not include a 2471 // prototype, and the types of the arguments after promotion are 2472 // not compatible with those of the parameters after promotion, 2473 // the behavior is undefined [except in some trivial cases]. 2474 // That is, in the general case, we should assume that a call 2475 // through an unprototyped function type works like a *non-variadic* 2476 // call. The way we make this work is to cast to the exact type 2477 // of the promoted arguments. 2478 if (isa<FunctionNoProtoType>(FnType) && 2479 !getTargetHooks().isNoProtoCallVariadic(FnInfo)) { 2480 assert(cast<llvm::FunctionType>(Callee->getType()->getContainedType(0)) 2481 ->isVarArg()); 2482 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo, false); 2483 CalleeTy = CalleeTy->getPointerTo(); 2484 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); 2485 } 2486 2487 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl); 2488 } 2489 2490 LValue CodeGenFunction:: 2491 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 2492 llvm::Value *BaseV; 2493 if (E->getOpcode() == BO_PtrMemI) 2494 BaseV = EmitScalarExpr(E->getLHS()); 2495 else 2496 BaseV = EmitLValue(E->getLHS()).getAddress(); 2497 2498 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 2499 2500 const MemberPointerType *MPT 2501 = E->getRHS()->getType()->getAs<MemberPointerType>(); 2502 2503 llvm::Value *AddV = 2504 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT); 2505 2506 return MakeAddrLValue(AddV, MPT->getPointeeType()); 2507 } 2508 2509 static void 2510 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, 2511 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, 2512 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { 2513 if (E->isCmpXChg()) { 2514 // Note that cmpxchg only supports specifying one ordering and 2515 // doesn't support weak cmpxchg, at least at the moment. 2516 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2517 LoadVal1->setAlignment(Align); 2518 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2); 2519 LoadVal2->setAlignment(Align); 2520 llvm::AtomicCmpXchgInst *CXI = 2521 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order); 2522 CXI->setVolatile(E->isVolatile()); 2523 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1); 2524 StoreVal1->setAlignment(Align); 2525 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1); 2526 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); 2527 return; 2528 } 2529 2530 if (E->getOp() == AtomicExpr::Load) { 2531 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); 2532 Load->setAtomic(Order); 2533 Load->setAlignment(Size); 2534 Load->setVolatile(E->isVolatile()); 2535 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); 2536 StoreDest->setAlignment(Align); 2537 return; 2538 } 2539 2540 if (E->getOp() == AtomicExpr::Store) { 2541 assert(!Dest && "Store does not return a value"); 2542 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2543 LoadVal1->setAlignment(Align); 2544 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); 2545 Store->setAtomic(Order); 2546 Store->setAlignment(Size); 2547 Store->setVolatile(E->isVolatile()); 2548 return; 2549 } 2550 2551 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; 2552 switch (E->getOp()) { 2553 case AtomicExpr::CmpXchgWeak: 2554 case AtomicExpr::CmpXchgStrong: 2555 case AtomicExpr::Store: 2556 case AtomicExpr::Init: 2557 case AtomicExpr::Load: assert(0 && "Already handled!"); 2558 case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break; 2559 case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break; 2560 case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break; 2561 case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break; 2562 case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break; 2563 case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break; 2564 } 2565 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2566 LoadVal1->setAlignment(Align); 2567 llvm::AtomicRMWInst *RMWI = 2568 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); 2569 RMWI->setVolatile(E->isVolatile()); 2570 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest); 2571 StoreDest->setAlignment(Align); 2572 } 2573 2574 // This function emits any expression (scalar, complex, or aggregate) 2575 // into a temporary alloca. 2576 static llvm::Value * 2577 EmitValToTemp(CodeGenFunction &CGF, Expr *E) { 2578 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); 2579 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), 2580 /*Init*/ true); 2581 return DeclPtr; 2582 } 2583 2584 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty, 2585 llvm::Value *Dest) { 2586 if (Ty->isAnyComplexType()) 2587 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false)); 2588 if (CGF.hasAggregateLLVMType(Ty)) 2589 return RValue::getAggregate(Dest); 2590 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty))); 2591 } 2592 2593 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { 2594 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 2595 QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType(); 2596 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); 2597 uint64_t Size = sizeChars.getQuantity(); 2598 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); 2599 unsigned Align = alignChars.getQuantity(); 2600 unsigned MaxInlineWidth = 2601 getContext().getTargetInfo().getMaxAtomicInlineWidth(); 2602 bool UseLibcall = (Size != Align || Size > MaxInlineWidth); 2603 2604 2605 2606 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; 2607 Ptr = EmitScalarExpr(E->getPtr()); 2608 2609 if (E->getOp() == AtomicExpr::Init) { 2610 assert(!Dest && "Init does not return a value"); 2611 Val1 = EmitScalarExpr(E->getVal1()); 2612 llvm::StoreInst *Store = Builder.CreateStore(Val1, Ptr); 2613 Store->setAlignment(Size); 2614 Store->setVolatile(E->isVolatile()); 2615 return RValue::get(0); 2616 } 2617 2618 Order = EmitScalarExpr(E->getOrder()); 2619 if (E->isCmpXChg()) { 2620 Val1 = EmitScalarExpr(E->getVal1()); 2621 Val2 = EmitValToTemp(*this, E->getVal2()); 2622 OrderFail = EmitScalarExpr(E->getOrderFail()); 2623 (void)OrderFail; // OrderFail is unused at the moment 2624 } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) && 2625 MemTy->isPointerType()) { 2626 // For pointers, we're required to do a bit of math: adding 1 to an int* 2627 // is not the same as adding 1 to a uintptr_t. 2628 QualType Val1Ty = E->getVal1()->getType(); 2629 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); 2630 CharUnits PointeeIncAmt = 2631 getContext().getTypeSizeInChars(MemTy->getPointeeType()); 2632 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); 2633 Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); 2634 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); 2635 } else if (E->getOp() != AtomicExpr::Load) { 2636 Val1 = EmitValToTemp(*this, E->getVal1()); 2637 } 2638 2639 if (E->getOp() != AtomicExpr::Store && !Dest) 2640 Dest = CreateMemTemp(E->getType(), ".atomicdst"); 2641 2642 if (UseLibcall) { 2643 // FIXME: Finalize what the libcalls are actually supposed to look like. 2644 // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . 2645 return EmitUnsupportedRValue(E, "atomic library call"); 2646 } 2647 #if 0 2648 if (UseLibcall) { 2649 const char* LibCallName; 2650 switch (E->getOp()) { 2651 case AtomicExpr::CmpXchgWeak: 2652 LibCallName = "__atomic_compare_exchange_generic"; break; 2653 case AtomicExpr::CmpXchgStrong: 2654 LibCallName = "__atomic_compare_exchange_generic"; break; 2655 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break; 2656 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break; 2657 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break; 2658 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break; 2659 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break; 2660 case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break; 2661 case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break; 2662 case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break; 2663 } 2664 llvm::SmallVector<QualType, 4> Params; 2665 CallArgList Args; 2666 QualType RetTy = getContext().VoidTy; 2667 if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg()) 2668 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 2669 getContext().VoidPtrTy); 2670 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), 2671 getContext().VoidPtrTy); 2672 if (E->getOp() != AtomicExpr::Load) 2673 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 2674 getContext().VoidPtrTy); 2675 if (E->isCmpXChg()) { 2676 Args.add(RValue::get(EmitCastToVoidPtr(Val2)), 2677 getContext().VoidPtrTy); 2678 RetTy = getContext().IntTy; 2679 } 2680 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), 2681 getContext().getSizeType()); 2682 const CGFunctionInfo &FuncInfo = 2683 CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo()); 2684 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false); 2685 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 2686 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 2687 if (E->isCmpXChg()) 2688 return Res; 2689 if (E->getOp() == AtomicExpr::Store) 2690 return RValue::get(0); 2691 return ConvertTempToRValue(*this, E->getType(), Dest); 2692 } 2693 #endif 2694 llvm::Type *IPtrTy = 2695 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo(); 2696 llvm::Value *OrigDest = Dest; 2697 Ptr = Builder.CreateBitCast(Ptr, IPtrTy); 2698 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy); 2699 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy); 2700 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy); 2701 2702 if (isa<llvm::ConstantInt>(Order)) { 2703 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 2704 switch (ord) { 2705 case 0: // memory_order_relaxed 2706 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2707 llvm::Monotonic); 2708 break; 2709 case 1: // memory_order_consume 2710 case 2: // memory_order_acquire 2711 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2712 llvm::Acquire); 2713 break; 2714 case 3: // memory_order_release 2715 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2716 llvm::Release); 2717 break; 2718 case 4: // memory_order_acq_rel 2719 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2720 llvm::AcquireRelease); 2721 break; 2722 case 5: // memory_order_seq_cst 2723 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2724 llvm::SequentiallyConsistent); 2725 break; 2726 default: // invalid order 2727 // We should not ever get here normally, but it's hard to 2728 // enforce that in general. 2729 break; 2730 } 2731 if (E->getOp() == AtomicExpr::Store || E->getOp() == AtomicExpr::Init) 2732 return RValue::get(0); 2733 return ConvertTempToRValue(*this, E->getType(), OrigDest); 2734 } 2735 2736 // Long case, when Order isn't obviously constant. 2737 2738 // Create all the relevant BB's 2739 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0, 2740 *AcqRelBB = 0, *SeqCstBB = 0; 2741 MonotonicBB = createBasicBlock("monotonic", CurFn); 2742 if (E->getOp() != AtomicExpr::Store) 2743 AcquireBB = createBasicBlock("acquire", CurFn); 2744 if (E->getOp() != AtomicExpr::Load) 2745 ReleaseBB = createBasicBlock("release", CurFn); 2746 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) 2747 AcqRelBB = createBasicBlock("acqrel", CurFn); 2748 SeqCstBB = createBasicBlock("seqcst", CurFn); 2749 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 2750 2751 // Create the switch for the split 2752 // MonotonicBB is arbitrarily chosen as the default case; in practice, this 2753 // doesn't matter unless someone is crazy enough to use something that 2754 // doesn't fold to a constant for the ordering. 2755 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 2756 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); 2757 2758 // Emit all the different atomics 2759 Builder.SetInsertPoint(MonotonicBB); 2760 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2761 llvm::Monotonic); 2762 Builder.CreateBr(ContBB); 2763 if (E->getOp() != AtomicExpr::Store) { 2764 Builder.SetInsertPoint(AcquireBB); 2765 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2766 llvm::Acquire); 2767 Builder.CreateBr(ContBB); 2768 SI->addCase(Builder.getInt32(1), AcquireBB); 2769 SI->addCase(Builder.getInt32(2), AcquireBB); 2770 } 2771 if (E->getOp() != AtomicExpr::Load) { 2772 Builder.SetInsertPoint(ReleaseBB); 2773 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2774 llvm::Release); 2775 Builder.CreateBr(ContBB); 2776 SI->addCase(Builder.getInt32(3), ReleaseBB); 2777 } 2778 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) { 2779 Builder.SetInsertPoint(AcqRelBB); 2780 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2781 llvm::AcquireRelease); 2782 Builder.CreateBr(ContBB); 2783 SI->addCase(Builder.getInt32(4), AcqRelBB); 2784 } 2785 Builder.SetInsertPoint(SeqCstBB); 2786 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2787 llvm::SequentiallyConsistent); 2788 Builder.CreateBr(ContBB); 2789 SI->addCase(Builder.getInt32(5), SeqCstBB); 2790 2791 // Cleanup and return 2792 Builder.SetInsertPoint(ContBB); 2793 if (E->getOp() == AtomicExpr::Store) 2794 return RValue::get(0); 2795 return ConvertTempToRValue(*this, E->getType(), OrigDest); 2796 } 2797 2798 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, unsigned AccuracyN, 2799 unsigned AccuracyD) { 2800 assert(Val->getType()->isFPOrFPVectorTy()); 2801 if (!AccuracyN || !isa<llvm::Instruction>(Val)) 2802 return; 2803 2804 llvm::Value *Vals[2]; 2805 Vals[0] = llvm::ConstantInt::get(Int32Ty, AccuracyN); 2806 Vals[1] = llvm::ConstantInt::get(Int32Ty, AccuracyD); 2807 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), Vals); 2808 2809 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpaccuracy, 2810 Node); 2811 } 2812 2813 namespace { 2814 struct LValueOrRValue { 2815 LValue LV; 2816 RValue RV; 2817 }; 2818 } 2819 2820 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 2821 const PseudoObjectExpr *E, 2822 bool forLValue, 2823 AggValueSlot slot) { 2824 llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 2825 2826 // Find the result expression, if any. 2827 const Expr *resultExpr = E->getResultExpr(); 2828 LValueOrRValue result; 2829 2830 for (PseudoObjectExpr::const_semantics_iterator 2831 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 2832 const Expr *semantic = *i; 2833 2834 // If this semantic expression is an opaque value, bind it 2835 // to the result of its source expression. 2836 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 2837 2838 // If this is the result expression, we may need to evaluate 2839 // directly into the slot. 2840 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 2841 OVMA opaqueData; 2842 if (ov == resultExpr && ov->isRValue() && !forLValue && 2843 CodeGenFunction::hasAggregateLLVMType(ov->getType()) && 2844 !ov->getType()->isAnyComplexType()) { 2845 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 2846 2847 LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType()); 2848 opaqueData = OVMA::bind(CGF, ov, LV); 2849 result.RV = slot.asRValue(); 2850 2851 // Otherwise, emit as normal. 2852 } else { 2853 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 2854 2855 // If this is the result, also evaluate the result now. 2856 if (ov == resultExpr) { 2857 if (forLValue) 2858 result.LV = CGF.EmitLValue(ov); 2859 else 2860 result.RV = CGF.EmitAnyExpr(ov, slot); 2861 } 2862 } 2863 2864 opaques.push_back(opaqueData); 2865 2866 // Otherwise, if the expression is the result, evaluate it 2867 // and remember the result. 2868 } else if (semantic == resultExpr) { 2869 if (forLValue) 2870 result.LV = CGF.EmitLValue(semantic); 2871 else 2872 result.RV = CGF.EmitAnyExpr(semantic, slot); 2873 2874 // Otherwise, evaluate the expression in an ignored context. 2875 } else { 2876 CGF.EmitIgnoredExpr(semantic); 2877 } 2878 } 2879 2880 // Unbind all the opaques now. 2881 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 2882 opaques[i].unbind(CGF); 2883 2884 return result; 2885 } 2886 2887 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 2888 AggValueSlot slot) { 2889 return emitPseudoObjectExpr(*this, E, false, slot).RV; 2890 } 2891 2892 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 2893 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 2894 } 2895