1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGCall.h" 17 #include "CGCXXABI.h" 18 #include "CGDebugInfo.h" 19 #include "CGRecordLayout.h" 20 #include "CGObjCRuntime.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/ASTContext.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Frontend/CodeGenOptions.h" 25 #include "llvm/Intrinsics.h" 26 #include "llvm/LLVMContext.h" 27 #include "llvm/Target/TargetData.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 //===--------------------------------------------------------------------===// 32 // Miscellaneous Helper Methods 33 //===--------------------------------------------------------------------===// 34 35 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 36 unsigned addressSpace = 37 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 38 39 llvm::PointerType *destType = Int8PtrTy; 40 if (addressSpace) 41 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 42 43 if (value->getType() == destType) return value; 44 return Builder.CreateBitCast(value, destType); 45 } 46 47 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 48 /// block. 49 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 50 const Twine &Name) { 51 if (!Builder.isNamePreserving()) 52 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 53 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 54 } 55 56 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 57 llvm::Value *Init) { 58 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 59 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 60 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 61 } 62 63 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, 64 const Twine &Name) { 65 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 66 // FIXME: Should we prefer the preferred type alignment here? 67 CharUnits Align = getContext().getTypeAlignInChars(Ty); 68 Alloc->setAlignment(Align.getQuantity()); 69 return Alloc; 70 } 71 72 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, 73 const Twine &Name) { 74 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 75 // FIXME: Should we prefer the preferred type alignment here? 76 CharUnits Align = getContext().getTypeAlignInChars(Ty); 77 Alloc->setAlignment(Align.getQuantity()); 78 return Alloc; 79 } 80 81 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 82 /// expression and compare the result against zero, returning an Int1Ty value. 83 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 84 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 85 llvm::Value *MemPtr = EmitScalarExpr(E); 86 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 87 } 88 89 QualType BoolTy = getContext().BoolTy; 90 if (!E->getType()->isAnyComplexType()) 91 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 92 93 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 94 } 95 96 /// EmitIgnoredExpr - Emit code to compute the specified expression, 97 /// ignoring the result. 98 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 99 if (E->isRValue()) 100 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 101 102 // Just emit it as an l-value and drop the result. 103 EmitLValue(E); 104 } 105 106 /// EmitAnyExpr - Emit code to compute the specified expression which 107 /// can have any type. The result is returned as an RValue struct. 108 /// If this is an aggregate expression, AggSlot indicates where the 109 /// result should be returned. 110 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot, 111 bool IgnoreResult) { 112 if (!hasAggregateLLVMType(E->getType())) 113 return RValue::get(EmitScalarExpr(E, IgnoreResult)); 114 else if (E->getType()->isAnyComplexType()) 115 return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult)); 116 117 EmitAggExpr(E, AggSlot, IgnoreResult); 118 return AggSlot.asRValue(); 119 } 120 121 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 122 /// always be accessible even if no aggregate location is provided. 123 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 124 AggValueSlot AggSlot = AggValueSlot::ignored(); 125 126 if (hasAggregateLLVMType(E->getType()) && 127 !E->getType()->isAnyComplexType()) 128 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 129 return EmitAnyExpr(E, AggSlot); 130 } 131 132 /// EmitAnyExprToMem - Evaluate an expression into a given memory 133 /// location. 134 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 135 llvm::Value *Location, 136 Qualifiers Quals, 137 bool IsInit) { 138 // FIXME: This function should take an LValue as an argument. 139 if (E->getType()->isAnyComplexType()) { 140 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile()); 141 } else if (hasAggregateLLVMType(E->getType())) { 142 CharUnits Alignment = getContext().getTypeAlignInChars(E->getType()); 143 EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals, 144 AggValueSlot::IsDestructed_t(IsInit), 145 AggValueSlot::DoesNotNeedGCBarriers, 146 AggValueSlot::IsAliased_t(!IsInit))); 147 } else { 148 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 149 LValue LV = MakeAddrLValue(Location, E->getType()); 150 EmitStoreThroughLValue(RV, LV); 151 } 152 } 153 154 namespace { 155 /// \brief An adjustment to be made to the temporary created when emitting a 156 /// reference binding, which accesses a particular subobject of that temporary. 157 struct SubobjectAdjustment { 158 enum { DerivedToBaseAdjustment, FieldAdjustment } Kind; 159 160 union { 161 struct { 162 const CastExpr *BasePath; 163 const CXXRecordDecl *DerivedClass; 164 } DerivedToBase; 165 166 FieldDecl *Field; 167 }; 168 169 SubobjectAdjustment(const CastExpr *BasePath, 170 const CXXRecordDecl *DerivedClass) 171 : Kind(DerivedToBaseAdjustment) { 172 DerivedToBase.BasePath = BasePath; 173 DerivedToBase.DerivedClass = DerivedClass; 174 } 175 176 SubobjectAdjustment(FieldDecl *Field) 177 : Kind(FieldAdjustment) { 178 this->Field = Field; 179 } 180 }; 181 } 182 183 static llvm::Value * 184 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type, 185 const NamedDecl *InitializedDecl) { 186 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 187 if (VD->hasGlobalStorage()) { 188 llvm::SmallString<256> Name; 189 llvm::raw_svector_ostream Out(Name); 190 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out); 191 Out.flush(); 192 193 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type); 194 195 // Create the reference temporary. 196 llvm::GlobalValue *RefTemp = 197 new llvm::GlobalVariable(CGF.CGM.getModule(), 198 RefTempTy, /*isConstant=*/false, 199 llvm::GlobalValue::InternalLinkage, 200 llvm::Constant::getNullValue(RefTempTy), 201 Name.str()); 202 return RefTemp; 203 } 204 } 205 206 return CGF.CreateMemTemp(Type, "ref.tmp"); 207 } 208 209 static llvm::Value * 210 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E, 211 llvm::Value *&ReferenceTemporary, 212 const CXXDestructorDecl *&ReferenceTemporaryDtor, 213 QualType &ObjCARCReferenceLifetimeType, 214 const NamedDecl *InitializedDecl) { 215 // Look through single-element init lists that claim to be lvalues. They're 216 // just syntactic wrappers in this case. 217 if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) { 218 if (ILE->getNumInits() == 1 && ILE->isGLValue()) 219 E = ILE->getInit(0); 220 } 221 222 // Look through expressions for materialized temporaries (for now). 223 if (const MaterializeTemporaryExpr *M 224 = dyn_cast<MaterializeTemporaryExpr>(E)) { 225 // Objective-C++ ARC: 226 // If we are binding a reference to a temporary that has ownership, we 227 // need to perform retain/release operations on the temporary. 228 if (CGF.getContext().getLangOptions().ObjCAutoRefCount && 229 E->getType()->isObjCLifetimeType() && 230 (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong || 231 E->getType().getObjCLifetime() == Qualifiers::OCL_Weak || 232 E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing)) 233 ObjCARCReferenceLifetimeType = E->getType(); 234 235 E = M->GetTemporaryExpr(); 236 } 237 238 if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E)) 239 E = DAE->getExpr(); 240 241 if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) { 242 CGF.enterFullExpression(EWC); 243 CodeGenFunction::RunCleanupsScope Scope(CGF); 244 245 return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(), 246 ReferenceTemporary, 247 ReferenceTemporaryDtor, 248 ObjCARCReferenceLifetimeType, 249 InitializedDecl); 250 } 251 252 RValue RV; 253 if (E->isGLValue()) { 254 // Emit the expression as an lvalue. 255 LValue LV = CGF.EmitLValue(E); 256 257 if (LV.isSimple()) 258 return LV.getAddress(); 259 260 // We have to load the lvalue. 261 RV = CGF.EmitLoadOfLValue(LV); 262 } else { 263 if (!ObjCARCReferenceLifetimeType.isNull()) { 264 ReferenceTemporary = CreateReferenceTemporary(CGF, 265 ObjCARCReferenceLifetimeType, 266 InitializedDecl); 267 268 269 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary, 270 ObjCARCReferenceLifetimeType); 271 272 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl), 273 RefTempDst, false); 274 275 bool ExtendsLifeOfTemporary = false; 276 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 277 if (Var->extendsLifetimeOfTemporary()) 278 ExtendsLifeOfTemporary = true; 279 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) { 280 ExtendsLifeOfTemporary = true; 281 } 282 283 if (!ExtendsLifeOfTemporary) { 284 // Since the lifetime of this temporary isn't going to be extended, 285 // we need to clean it up ourselves at the end of the full expression. 286 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 287 case Qualifiers::OCL_None: 288 case Qualifiers::OCL_ExplicitNone: 289 case Qualifiers::OCL_Autoreleasing: 290 break; 291 292 case Qualifiers::OCL_Strong: { 293 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 294 CleanupKind cleanupKind = CGF.getARCCleanupKind(); 295 CGF.pushDestroy(cleanupKind, 296 ReferenceTemporary, 297 ObjCARCReferenceLifetimeType, 298 CodeGenFunction::destroyARCStrongImprecise, 299 cleanupKind & EHCleanup); 300 break; 301 } 302 303 case Qualifiers::OCL_Weak: 304 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 305 CGF.pushDestroy(NormalAndEHCleanup, 306 ReferenceTemporary, 307 ObjCARCReferenceLifetimeType, 308 CodeGenFunction::destroyARCWeak, 309 /*useEHCleanupForArray*/ true); 310 break; 311 } 312 313 ObjCARCReferenceLifetimeType = QualType(); 314 } 315 316 return ReferenceTemporary; 317 } 318 319 SmallVector<SubobjectAdjustment, 2> Adjustments; 320 while (true) { 321 E = E->IgnoreParens(); 322 323 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 324 if ((CE->getCastKind() == CK_DerivedToBase || 325 CE->getCastKind() == CK_UncheckedDerivedToBase) && 326 E->getType()->isRecordType()) { 327 E = CE->getSubExpr(); 328 CXXRecordDecl *Derived 329 = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl()); 330 Adjustments.push_back(SubobjectAdjustment(CE, Derived)); 331 continue; 332 } 333 334 if (CE->getCastKind() == CK_NoOp) { 335 E = CE->getSubExpr(); 336 continue; 337 } 338 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 339 if (!ME->isArrow() && ME->getBase()->isRValue()) { 340 assert(ME->getBase()->getType()->isRecordType()); 341 if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 342 E = ME->getBase(); 343 Adjustments.push_back(SubobjectAdjustment(Field)); 344 continue; 345 } 346 } 347 } 348 349 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E)) 350 if (opaque->getType()->isRecordType()) 351 return CGF.EmitOpaqueValueLValue(opaque).getAddress(); 352 353 // Nothing changed. 354 break; 355 } 356 357 // Create a reference temporary if necessary. 358 AggValueSlot AggSlot = AggValueSlot::ignored(); 359 if (CGF.hasAggregateLLVMType(E->getType()) && 360 !E->getType()->isAnyComplexType()) { 361 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 362 InitializedDecl); 363 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType()); 364 AggValueSlot::IsDestructed_t isDestructed 365 = AggValueSlot::IsDestructed_t(InitializedDecl != 0); 366 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment, 367 Qualifiers(), isDestructed, 368 AggValueSlot::DoesNotNeedGCBarriers, 369 AggValueSlot::IsNotAliased); 370 } 371 372 if (InitializedDecl) { 373 // Get the destructor for the reference temporary. 374 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 375 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 376 if (!ClassDecl->hasTrivialDestructor()) 377 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 378 } 379 } 380 381 RV = CGF.EmitAnyExpr(E, AggSlot); 382 383 // Check if need to perform derived-to-base casts and/or field accesses, to 384 // get from the temporary object we created (and, potentially, for which we 385 // extended the lifetime) to the subobject we're binding the reference to. 386 if (!Adjustments.empty()) { 387 llvm::Value *Object = RV.getAggregateAddr(); 388 for (unsigned I = Adjustments.size(); I != 0; --I) { 389 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 390 switch (Adjustment.Kind) { 391 case SubobjectAdjustment::DerivedToBaseAdjustment: 392 Object = 393 CGF.GetAddressOfBaseClass(Object, 394 Adjustment.DerivedToBase.DerivedClass, 395 Adjustment.DerivedToBase.BasePath->path_begin(), 396 Adjustment.DerivedToBase.BasePath->path_end(), 397 /*NullCheckValue=*/false); 398 break; 399 400 case SubobjectAdjustment::FieldAdjustment: { 401 LValue LV = 402 CGF.EmitLValueForField(Object, Adjustment.Field, 0); 403 if (LV.isSimple()) { 404 Object = LV.getAddress(); 405 break; 406 } 407 408 // For non-simple lvalues, we actually have to create a copy of 409 // the object we're binding to. 410 QualType T = Adjustment.Field->getType().getNonReferenceType() 411 .getUnqualifiedType(); 412 Object = CreateReferenceTemporary(CGF, T, InitializedDecl); 413 LValue TempLV = CGF.MakeAddrLValue(Object, 414 Adjustment.Field->getType()); 415 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV); 416 break; 417 } 418 419 } 420 } 421 422 return Object; 423 } 424 } 425 426 if (RV.isAggregate()) 427 return RV.getAggregateAddr(); 428 429 // Create a temporary variable that we can bind the reference to. 430 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 431 InitializedDecl); 432 433 434 unsigned Alignment = 435 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity(); 436 if (RV.isScalar()) 437 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary, 438 /*Volatile=*/false, Alignment, E->getType()); 439 else 440 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary, 441 /*Volatile=*/false); 442 return ReferenceTemporary; 443 } 444 445 RValue 446 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E, 447 const NamedDecl *InitializedDecl) { 448 llvm::Value *ReferenceTemporary = 0; 449 const CXXDestructorDecl *ReferenceTemporaryDtor = 0; 450 QualType ObjCARCReferenceLifetimeType; 451 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary, 452 ReferenceTemporaryDtor, 453 ObjCARCReferenceLifetimeType, 454 InitializedDecl); 455 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull()) 456 return RValue::get(Value); 457 458 // Make sure to call the destructor for the reference temporary. 459 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl); 460 if (VD && VD->hasGlobalStorage()) { 461 if (ReferenceTemporaryDtor) { 462 llvm::Constant *DtorFn = 463 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete); 464 EmitCXXGlobalDtorRegistration(DtorFn, 465 cast<llvm::Constant>(ReferenceTemporary)); 466 } else { 467 assert(!ObjCARCReferenceLifetimeType.isNull()); 468 // Note: We intentionally do not register a global "destructor" to 469 // release the object. 470 } 471 472 return RValue::get(Value); 473 } 474 475 if (ReferenceTemporaryDtor) 476 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary); 477 else { 478 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 479 case Qualifiers::OCL_None: 480 llvm_unreachable( 481 "Not a reference temporary that needs to be deallocated"); 482 case Qualifiers::OCL_ExplicitNone: 483 case Qualifiers::OCL_Autoreleasing: 484 // Nothing to do. 485 break; 486 487 case Qualifiers::OCL_Strong: { 488 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 489 CleanupKind cleanupKind = getARCCleanupKind(); 490 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType, 491 precise ? destroyARCStrongPrecise : destroyARCStrongImprecise, 492 cleanupKind & EHCleanup); 493 break; 494 } 495 496 case Qualifiers::OCL_Weak: { 497 // __weak objects always get EH cleanups; otherwise, exceptions 498 // could cause really nasty crashes instead of mere leaks. 499 pushDestroy(NormalAndEHCleanup, ReferenceTemporary, 500 ObjCARCReferenceLifetimeType, destroyARCWeak, true); 501 break; 502 } 503 } 504 } 505 506 return RValue::get(Value); 507 } 508 509 510 /// getAccessedFieldNo - Given an encoded value and a result number, return the 511 /// input field number being accessed. 512 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 513 const llvm::Constant *Elts) { 514 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) 515 ->getZExtValue(); 516 } 517 518 void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { 519 if (!CatchUndefined) 520 return; 521 522 // This needs to be to the standard address space. 523 Address = Builder.CreateBitCast(Address, Int8PtrTy); 524 525 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy); 526 527 // In time, people may want to control this and use a 1 here. 528 llvm::Value *Arg = Builder.getFalse(); 529 llvm::Value *C = Builder.CreateCall2(F, Address, Arg); 530 llvm::BasicBlock *Cont = createBasicBlock(); 531 llvm::BasicBlock *Check = createBasicBlock(); 532 llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL); 533 Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); 534 535 EmitBlock(Check); 536 Builder.CreateCondBr(Builder.CreateICmpUGE(C, 537 llvm::ConstantInt::get(IntPtrTy, Size)), 538 Cont, getTrapBB()); 539 EmitBlock(Cont); 540 } 541 542 543 CodeGenFunction::ComplexPairTy CodeGenFunction:: 544 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 545 bool isInc, bool isPre) { 546 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 547 LV.isVolatileQualified()); 548 549 llvm::Value *NextVal; 550 if (isa<llvm::IntegerType>(InVal.first->getType())) { 551 uint64_t AmountVal = isInc ? 1 : -1; 552 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 553 554 // Add the inc/dec to the real part. 555 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 556 } else { 557 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 558 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 559 if (!isInc) 560 FVal.changeSign(); 561 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 562 563 // Add the inc/dec to the real part. 564 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 565 } 566 567 ComplexPairTy IncVal(NextVal, InVal.second); 568 569 // Store the updated result through the lvalue. 570 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 571 572 // If this is a postinc, return the value read from memory, otherwise use the 573 // updated value. 574 return isPre ? IncVal : InVal; 575 } 576 577 578 //===----------------------------------------------------------------------===// 579 // LValue Expression Emission 580 //===----------------------------------------------------------------------===// 581 582 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 583 if (Ty->isVoidType()) 584 return RValue::get(0); 585 586 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 587 llvm::Type *EltTy = ConvertType(CTy->getElementType()); 588 llvm::Value *U = llvm::UndefValue::get(EltTy); 589 return RValue::getComplex(std::make_pair(U, U)); 590 } 591 592 // If this is a use of an undefined aggregate type, the aggregate must have an 593 // identifiable address. Just because the contents of the value are undefined 594 // doesn't mean that the address can't be taken and compared. 595 if (hasAggregateLLVMType(Ty)) { 596 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 597 return RValue::getAggregate(DestPtr); 598 } 599 600 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 601 } 602 603 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 604 const char *Name) { 605 ErrorUnsupported(E, Name); 606 return GetUndefRValue(E->getType()); 607 } 608 609 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 610 const char *Name) { 611 ErrorUnsupported(E, Name); 612 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 613 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); 614 } 615 616 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) { 617 LValue LV = EmitLValue(E); 618 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 619 EmitCheck(LV.getAddress(), 620 getContext().getTypeSizeInChars(E->getType()).getQuantity()); 621 return LV; 622 } 623 624 /// EmitLValue - Emit code to compute a designator that specifies the location 625 /// of the expression. 626 /// 627 /// This can return one of two things: a simple address or a bitfield reference. 628 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 629 /// an LLVM pointer type. 630 /// 631 /// If this returns a bitfield reference, nothing about the pointee type of the 632 /// LLVM value is known: For example, it may not be a pointer to an integer. 633 /// 634 /// If this returns a normal address, and if the lvalue's C type is fixed size, 635 /// this method guarantees that the returned pointer type will point to an LLVM 636 /// type of the same size of the lvalue's type. If the lvalue has a variable 637 /// length type, this is not possible. 638 /// 639 LValue CodeGenFunction::EmitLValue(const Expr *E) { 640 switch (E->getStmtClass()) { 641 default: return EmitUnsupportedLValue(E, "l-value expression"); 642 643 case Expr::ObjCPropertyRefExprClass: 644 llvm_unreachable("cannot emit a property reference directly"); 645 646 case Expr::ObjCSelectorExprClass: 647 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 648 case Expr::ObjCIsaExprClass: 649 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 650 case Expr::BinaryOperatorClass: 651 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 652 case Expr::CompoundAssignOperatorClass: 653 if (!E->getType()->isAnyComplexType()) 654 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 655 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 656 case Expr::CallExprClass: 657 case Expr::CXXMemberCallExprClass: 658 case Expr::CXXOperatorCallExprClass: 659 return EmitCallExprLValue(cast<CallExpr>(E)); 660 case Expr::VAArgExprClass: 661 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 662 case Expr::DeclRefExprClass: 663 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 664 case Expr::ParenExprClass: 665 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 666 case Expr::GenericSelectionExprClass: 667 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 668 case Expr::PredefinedExprClass: 669 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 670 case Expr::StringLiteralClass: 671 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 672 case Expr::ObjCEncodeExprClass: 673 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 674 case Expr::PseudoObjectExprClass: 675 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 676 case Expr::InitListExprClass: 677 assert(cast<InitListExpr>(E)->getNumInits() == 1 && 678 "Only single-element init list can be lvalue."); 679 return EmitLValue(cast<InitListExpr>(E)->getInit(0)); 680 681 case Expr::BlockDeclRefExprClass: 682 return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); 683 684 case Expr::CXXTemporaryObjectExprClass: 685 case Expr::CXXConstructExprClass: 686 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 687 case Expr::CXXBindTemporaryExprClass: 688 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 689 690 case Expr::ExprWithCleanupsClass: { 691 const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E); 692 enterFullExpression(cleanups); 693 RunCleanupsScope Scope(*this); 694 return EmitLValue(cleanups->getSubExpr()); 695 } 696 697 case Expr::CXXScalarValueInitExprClass: 698 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E)); 699 case Expr::CXXDefaultArgExprClass: 700 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 701 case Expr::CXXTypeidExprClass: 702 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 703 704 case Expr::ObjCMessageExprClass: 705 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 706 case Expr::ObjCIvarRefExprClass: 707 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 708 case Expr::StmtExprClass: 709 return EmitStmtExprLValue(cast<StmtExpr>(E)); 710 case Expr::UnaryOperatorClass: 711 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 712 case Expr::ArraySubscriptExprClass: 713 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 714 case Expr::ExtVectorElementExprClass: 715 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 716 case Expr::MemberExprClass: 717 return EmitMemberExpr(cast<MemberExpr>(E)); 718 case Expr::CompoundLiteralExprClass: 719 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 720 case Expr::ConditionalOperatorClass: 721 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 722 case Expr::BinaryConditionalOperatorClass: 723 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 724 case Expr::ChooseExprClass: 725 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 726 case Expr::OpaqueValueExprClass: 727 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 728 case Expr::SubstNonTypeTemplateParmExprClass: 729 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 730 case Expr::ImplicitCastExprClass: 731 case Expr::CStyleCastExprClass: 732 case Expr::CXXFunctionalCastExprClass: 733 case Expr::CXXStaticCastExprClass: 734 case Expr::CXXDynamicCastExprClass: 735 case Expr::CXXReinterpretCastExprClass: 736 case Expr::CXXConstCastExprClass: 737 case Expr::ObjCBridgedCastExprClass: 738 return EmitCastLValue(cast<CastExpr>(E)); 739 740 case Expr::MaterializeTemporaryExprClass: 741 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 742 } 743 } 744 745 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) { 746 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 747 lvalue.getAlignment().getQuantity(), 748 lvalue.getType(), lvalue.getTBAAInfo()); 749 } 750 751 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 752 unsigned Alignment, QualType Ty, 753 llvm::MDNode *TBAAInfo) { 754 llvm::LoadInst *Load = Builder.CreateLoad(Addr); 755 if (Volatile) 756 Load->setVolatile(true); 757 if (Alignment) 758 Load->setAlignment(Alignment); 759 if (TBAAInfo) 760 CGM.DecorateInstruction(Load, TBAAInfo); 761 // If this is an atomic type, all normal reads must be atomic 762 if (Ty->isAtomicType()) 763 Load->setAtomic(llvm::SequentiallyConsistent); 764 765 return EmitFromMemory(Load, Ty); 766 } 767 768 static bool isBooleanUnderlyingType(QualType Ty) { 769 if (const EnumType *ET = dyn_cast<EnumType>(Ty)) 770 return ET->getDecl()->getIntegerType()->isBooleanType(); 771 return false; 772 } 773 774 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 775 // Bool has a different representation in memory than in registers. 776 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) { 777 // This should really always be an i1, but sometimes it's already 778 // an i8, and it's awkward to track those cases down. 779 if (Value->getType()->isIntegerTy(1)) 780 return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool"); 781 assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8"); 782 } 783 784 return Value; 785 } 786 787 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 788 // Bool has a different representation in memory than in registers. 789 if (Ty->isBooleanType() || isBooleanUnderlyingType(Ty)) { 790 assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8"); 791 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 792 } 793 794 return Value; 795 } 796 797 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 798 bool Volatile, unsigned Alignment, 799 QualType Ty, 800 llvm::MDNode *TBAAInfo, 801 bool isInit) { 802 Value = EmitToMemory(Value, Ty); 803 804 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 805 if (Alignment) 806 Store->setAlignment(Alignment); 807 if (TBAAInfo) 808 CGM.DecorateInstruction(Store, TBAAInfo); 809 if (!isInit && Ty->isAtomicType()) 810 Store->setAtomic(llvm::SequentiallyConsistent); 811 } 812 813 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 814 bool isInit) { 815 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 816 lvalue.getAlignment().getQuantity(), lvalue.getType(), 817 lvalue.getTBAAInfo(), isInit); 818 } 819 820 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 821 /// method emits the address of the lvalue, then loads the result as an rvalue, 822 /// returning the rvalue. 823 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) { 824 if (LV.isObjCWeak()) { 825 // load of a __weak object. 826 llvm::Value *AddrWeakObj = LV.getAddress(); 827 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 828 AddrWeakObj)); 829 } 830 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) 831 return RValue::get(EmitARCLoadWeak(LV.getAddress())); 832 833 if (LV.isSimple()) { 834 assert(!LV.getType()->isFunctionType()); 835 836 // Everything needs a load. 837 return RValue::get(EmitLoadOfScalar(LV)); 838 } 839 840 if (LV.isVectorElt()) { 841 llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), 842 LV.isVolatileQualified()); 843 return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), 844 "vecext")); 845 } 846 847 // If this is a reference to a subset of the elements of a vector, either 848 // shuffle the input or extract/insert them as appropriate. 849 if (LV.isExtVectorElt()) 850 return EmitLoadOfExtVectorElementLValue(LV); 851 852 assert(LV.isBitField() && "Unknown LValue type!"); 853 return EmitLoadOfBitfieldLValue(LV); 854 } 855 856 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { 857 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 858 859 // Get the output type. 860 llvm::Type *ResLTy = ConvertType(LV.getType()); 861 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 862 863 // Compute the result as an OR of all of the individual component accesses. 864 llvm::Value *Res = 0; 865 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 866 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 867 868 // Get the field pointer. 869 llvm::Value *Ptr = LV.getBitFieldBaseAddr(); 870 871 // Only offset by the field index if used, so that incoming values are not 872 // required to be structures. 873 if (AI.FieldIndex) 874 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 875 876 // Offset by the byte offset, if used. 877 if (!AI.FieldByteOffset.isZero()) { 878 Ptr = EmitCastToVoidPtr(Ptr); 879 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 880 "bf.field.offs"); 881 } 882 883 // Cast to the access type. 884 llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 885 AI.AccessWidth, 886 CGM.getContext().getTargetAddressSpace(LV.getType())); 887 Ptr = Builder.CreateBitCast(Ptr, PTy); 888 889 // Perform the load. 890 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified()); 891 if (!AI.AccessAlignment.isZero()) 892 Load->setAlignment(AI.AccessAlignment.getQuantity()); 893 894 // Shift out unused low bits and mask out unused high bits. 895 llvm::Value *Val = Load; 896 if (AI.FieldBitStart) 897 Val = Builder.CreateLShr(Load, AI.FieldBitStart); 898 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth, 899 AI.TargetBitWidth), 900 "bf.clear"); 901 902 // Extend or truncate to the target size. 903 if (AI.AccessWidth < ResSizeInBits) 904 Val = Builder.CreateZExt(Val, ResLTy); 905 else if (AI.AccessWidth > ResSizeInBits) 906 Val = Builder.CreateTrunc(Val, ResLTy); 907 908 // Shift into place, and OR into the result. 909 if (AI.TargetBitOffset) 910 Val = Builder.CreateShl(Val, AI.TargetBitOffset); 911 Res = Res ? Builder.CreateOr(Res, Val) : Val; 912 } 913 914 // If the bit-field is signed, perform the sign-extension. 915 // 916 // FIXME: This can easily be folded into the load of the high bits, which 917 // could also eliminate the mask of high bits in some situations. 918 if (Info.isSigned()) { 919 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 920 if (ExtraBits) 921 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits), 922 ExtraBits, "bf.val.sext"); 923 } 924 925 return RValue::get(Res); 926 } 927 928 // If this is a reference to a subset of the elements of a vector, create an 929 // appropriate shufflevector. 930 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 931 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), 932 LV.isVolatileQualified()); 933 934 const llvm::Constant *Elts = LV.getExtVectorElts(); 935 936 // If the result of the expression is a non-vector type, we must be extracting 937 // a single element. Just codegen as an extractelement. 938 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 939 if (!ExprVT) { 940 unsigned InIdx = getAccessedFieldNo(0, Elts); 941 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 942 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 943 } 944 945 // Always use shuffle vector to try to retain the original program structure 946 unsigned NumResultElts = ExprVT->getNumElements(); 947 948 SmallVector<llvm::Constant*, 4> Mask; 949 for (unsigned i = 0; i != NumResultElts; ++i) 950 Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts))); 951 952 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 953 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 954 MaskV); 955 return RValue::get(Vec); 956 } 957 958 959 960 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 961 /// lvalue, where both are guaranteed to the have the same type, and that type 962 /// is 'Ty'. 963 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) { 964 if (!Dst.isSimple()) { 965 if (Dst.isVectorElt()) { 966 // Read/modify/write the vector, inserting the new element. 967 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), 968 Dst.isVolatileQualified()); 969 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 970 Dst.getVectorIdx(), "vecins"); 971 Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); 972 return; 973 } 974 975 // If this is an update of extended vector elements, insert them as 976 // appropriate. 977 if (Dst.isExtVectorElt()) 978 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 979 980 assert(Dst.isBitField() && "Unknown LValue type"); 981 return EmitStoreThroughBitfieldLValue(Src, Dst); 982 } 983 984 // There's special magic for assigning into an ARC-qualified l-value. 985 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 986 switch (Lifetime) { 987 case Qualifiers::OCL_None: 988 llvm_unreachable("present but none"); 989 990 case Qualifiers::OCL_ExplicitNone: 991 // nothing special 992 break; 993 994 case Qualifiers::OCL_Strong: 995 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 996 return; 997 998 case Qualifiers::OCL_Weak: 999 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 1000 return; 1001 1002 case Qualifiers::OCL_Autoreleasing: 1003 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 1004 Src.getScalarVal())); 1005 // fall into the normal path 1006 break; 1007 } 1008 } 1009 1010 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 1011 // load of a __weak object. 1012 llvm::Value *LvalueDst = Dst.getAddress(); 1013 llvm::Value *src = Src.getScalarVal(); 1014 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1015 return; 1016 } 1017 1018 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1019 // load of a __strong object. 1020 llvm::Value *LvalueDst = Dst.getAddress(); 1021 llvm::Value *src = Src.getScalarVal(); 1022 if (Dst.isObjCIvar()) { 1023 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1024 llvm::Type *ResultType = ConvertType(getContext().LongTy); 1025 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 1026 llvm::Value *dst = RHS; 1027 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1028 llvm::Value *LHS = 1029 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 1030 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 1031 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 1032 BytesBetween); 1033 } else if (Dst.isGlobalObjCRef()) { 1034 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 1035 Dst.isThreadLocalRef()); 1036 } 1037 else 1038 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 1039 return; 1040 } 1041 1042 assert(Src.isScalar() && "Can't emit an agg store with this method"); 1043 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 1044 } 1045 1046 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 1047 llvm::Value **Result) { 1048 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 1049 1050 // Get the output type. 1051 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 1052 unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy); 1053 1054 // Get the source value, truncated to the width of the bit-field. 1055 llvm::Value *SrcVal = Src.getScalarVal(); 1056 1057 if (Dst.getType()->isBooleanType()) 1058 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false); 1059 1060 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits, 1061 Info.getSize()), 1062 "bf.value"); 1063 1064 // Return the new value of the bit-field, if requested. 1065 if (Result) { 1066 // Cast back to the proper type for result. 1067 llvm::Type *SrcTy = Src.getScalarVal()->getType(); 1068 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false, 1069 "bf.reload.val"); 1070 1071 // Sign extend if necessary. 1072 if (Info.isSigned()) { 1073 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 1074 if (ExtraBits) 1075 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits), 1076 ExtraBits, "bf.reload.sext"); 1077 } 1078 1079 *Result = ReloadVal; 1080 } 1081 1082 // Iterate over the components, writing each piece to memory. 1083 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1084 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1085 1086 // Get the field pointer. 1087 llvm::Value *Ptr = Dst.getBitFieldBaseAddr(); 1088 unsigned addressSpace = 1089 cast<llvm::PointerType>(Ptr->getType())->getAddressSpace(); 1090 1091 // Only offset by the field index if used, so that incoming values are not 1092 // required to be structures. 1093 if (AI.FieldIndex) 1094 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 1095 1096 // Offset by the byte offset, if used. 1097 if (!AI.FieldByteOffset.isZero()) { 1098 Ptr = EmitCastToVoidPtr(Ptr); 1099 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 1100 "bf.field.offs"); 1101 } 1102 1103 // Cast to the access type. 1104 llvm::Type *AccessLTy = 1105 llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth); 1106 1107 llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace); 1108 Ptr = Builder.CreateBitCast(Ptr, PTy); 1109 1110 // Extract the piece of the bit-field value to write in this access, limited 1111 // to the values that are part of this access. 1112 llvm::Value *Val = SrcVal; 1113 if (AI.TargetBitOffset) 1114 Val = Builder.CreateLShr(Val, AI.TargetBitOffset); 1115 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits, 1116 AI.TargetBitWidth)); 1117 1118 // Extend or truncate to the access size. 1119 if (ResSizeInBits < AI.AccessWidth) 1120 Val = Builder.CreateZExt(Val, AccessLTy); 1121 else if (ResSizeInBits > AI.AccessWidth) 1122 Val = Builder.CreateTrunc(Val, AccessLTy); 1123 1124 // Shift into the position in memory. 1125 if (AI.FieldBitStart) 1126 Val = Builder.CreateShl(Val, AI.FieldBitStart); 1127 1128 // If necessary, load and OR in bits that are outside of the bit-field. 1129 if (AI.TargetBitWidth != AI.AccessWidth) { 1130 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified()); 1131 if (!AI.AccessAlignment.isZero()) 1132 Load->setAlignment(AI.AccessAlignment.getQuantity()); 1133 1134 // Compute the mask for zeroing the bits that are part of the bit-field. 1135 llvm::APInt InvMask = 1136 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart, 1137 AI.FieldBitStart + AI.TargetBitWidth); 1138 1139 // Apply the mask and OR in to the value to write. 1140 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val); 1141 } 1142 1143 // Write the value. 1144 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr, 1145 Dst.isVolatileQualified()); 1146 if (!AI.AccessAlignment.isZero()) 1147 Store->setAlignment(AI.AccessAlignment.getQuantity()); 1148 } 1149 } 1150 1151 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1152 LValue Dst) { 1153 // This access turns into a read/modify/write of the vector. Load the input 1154 // value now. 1155 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), 1156 Dst.isVolatileQualified()); 1157 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1158 1159 llvm::Value *SrcVal = Src.getScalarVal(); 1160 1161 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 1162 unsigned NumSrcElts = VTy->getNumElements(); 1163 unsigned NumDstElts = 1164 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1165 if (NumDstElts == NumSrcElts) { 1166 // Use shuffle vector is the src and destination are the same number of 1167 // elements and restore the vector mask since it is on the side it will be 1168 // stored. 1169 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1170 for (unsigned i = 0; i != NumSrcElts; ++i) 1171 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i); 1172 1173 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1174 Vec = Builder.CreateShuffleVector(SrcVal, 1175 llvm::UndefValue::get(Vec->getType()), 1176 MaskV); 1177 } else if (NumDstElts > NumSrcElts) { 1178 // Extended the source vector to the same length and then shuffle it 1179 // into the destination. 1180 // FIXME: since we're shuffling with undef, can we just use the indices 1181 // into that? This could be simpler. 1182 SmallVector<llvm::Constant*, 4> ExtMask; 1183 unsigned i; 1184 for (i = 0; i != NumSrcElts; ++i) 1185 ExtMask.push_back(Builder.getInt32(i)); 1186 for (; i != NumDstElts; ++i) 1187 ExtMask.push_back(llvm::UndefValue::get(Int32Ty)); 1188 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 1189 llvm::Value *ExtSrcVal = 1190 Builder.CreateShuffleVector(SrcVal, 1191 llvm::UndefValue::get(SrcVal->getType()), 1192 ExtMaskV); 1193 // build identity 1194 SmallVector<llvm::Constant*, 4> Mask; 1195 for (unsigned i = 0; i != NumDstElts; ++i) 1196 Mask.push_back(Builder.getInt32(i)); 1197 1198 // modify when what gets shuffled in 1199 for (unsigned i = 0; i != NumSrcElts; ++i) 1200 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts); 1201 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1202 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 1203 } else { 1204 // We should never shorten the vector 1205 llvm_unreachable("unexpected shorten vector length"); 1206 } 1207 } else { 1208 // If the Src is a scalar (not a vector) it must be updating one element. 1209 unsigned InIdx = getAccessedFieldNo(0, Elts); 1210 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1211 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 1212 } 1213 1214 Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); 1215 } 1216 1217 // setObjCGCLValueClass - sets class of he lvalue for the purpose of 1218 // generating write-barries API. It is currently a global, ivar, 1219 // or neither. 1220 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1221 LValue &LV, 1222 bool IsMemberAccess=false) { 1223 if (Ctx.getLangOptions().getGC() == LangOptions::NonGC) 1224 return; 1225 1226 if (isa<ObjCIvarRefExpr>(E)) { 1227 QualType ExpTy = E->getType(); 1228 if (IsMemberAccess && ExpTy->isPointerType()) { 1229 // If ivar is a structure pointer, assigning to field of 1230 // this struct follows gcc's behavior and makes it a non-ivar 1231 // writer-barrier conservatively. 1232 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1233 if (ExpTy->isRecordType()) { 1234 LV.setObjCIvar(false); 1235 return; 1236 } 1237 } 1238 LV.setObjCIvar(true); 1239 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1240 LV.setBaseIvarExp(Exp->getBase()); 1241 LV.setObjCArray(E->getType()->isArrayType()); 1242 return; 1243 } 1244 1245 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1246 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1247 if (VD->hasGlobalStorage()) { 1248 LV.setGlobalObjCRef(true); 1249 LV.setThreadLocalRef(VD->isThreadSpecified()); 1250 } 1251 } 1252 LV.setObjCArray(E->getType()->isArrayType()); 1253 return; 1254 } 1255 1256 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1257 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1258 return; 1259 } 1260 1261 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1262 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1263 if (LV.isObjCIvar()) { 1264 // If cast is to a structure pointer, follow gcc's behavior and make it 1265 // a non-ivar write-barrier. 1266 QualType ExpTy = E->getType(); 1267 if (ExpTy->isPointerType()) 1268 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1269 if (ExpTy->isRecordType()) 1270 LV.setObjCIvar(false); 1271 } 1272 return; 1273 } 1274 1275 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) { 1276 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 1277 return; 1278 } 1279 1280 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1281 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1282 return; 1283 } 1284 1285 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1286 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1287 return; 1288 } 1289 1290 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 1291 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1292 return; 1293 } 1294 1295 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1296 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1297 if (LV.isObjCIvar() && !LV.isObjCArray()) 1298 // Using array syntax to assigning to what an ivar points to is not 1299 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1300 LV.setObjCIvar(false); 1301 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1302 // Using array syntax to assigning to what global points to is not 1303 // same as assigning to the global itself. {id *G;} G[i] = 0; 1304 LV.setGlobalObjCRef(false); 1305 return; 1306 } 1307 1308 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1309 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 1310 // We don't know if member is an 'ivar', but this flag is looked at 1311 // only in the context of LV.isObjCIvar(). 1312 LV.setObjCArray(E->getType()->isArrayType()); 1313 return; 1314 } 1315 } 1316 1317 static llvm::Value * 1318 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 1319 llvm::Value *V, llvm::Type *IRType, 1320 StringRef Name = StringRef()) { 1321 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1322 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 1323 } 1324 1325 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1326 const Expr *E, const VarDecl *VD) { 1327 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1328 "Var decl must have external storage or be a file var decl!"); 1329 1330 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1331 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 1332 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 1333 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 1334 QualType T = E->getType(); 1335 LValue LV; 1336 if (VD->getType()->isReferenceType()) { 1337 llvm::LoadInst *LI = CGF.Builder.CreateLoad(V); 1338 LI->setAlignment(Alignment.getQuantity()); 1339 V = LI; 1340 LV = CGF.MakeNaturalAlignAddrLValue(V, T); 1341 } else { 1342 LV = CGF.MakeAddrLValue(V, E->getType(), Alignment); 1343 } 1344 setObjCGCLValueClass(CGF.getContext(), E, LV); 1345 return LV; 1346 } 1347 1348 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1349 const Expr *E, const FunctionDecl *FD) { 1350 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); 1351 if (!FD->hasPrototype()) { 1352 if (const FunctionProtoType *Proto = 1353 FD->getType()->getAs<FunctionProtoType>()) { 1354 // Ugly case: for a K&R-style definition, the type of the definition 1355 // isn't the same as the type of a use. Correct for this with a 1356 // bitcast. 1357 QualType NoProtoType = 1358 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1359 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1360 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); 1361 } 1362 } 1363 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 1364 return CGF.MakeAddrLValue(V, E->getType(), Alignment); 1365 } 1366 1367 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1368 const NamedDecl *ND = E->getDecl(); 1369 CharUnits Alignment = getContext().getDeclAlign(ND); 1370 QualType T = E->getType(); 1371 1372 // FIXME: We should be able to assert this for FunctionDecls as well! 1373 // FIXME: We should be able to assert this for all DeclRefExprs, not just 1374 // those with a valid source location. 1375 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || 1376 !E->getLocation().isValid()) && 1377 "Should not use decl without marking it used!"); 1378 1379 if (ND->hasAttr<WeakRefAttr>()) { 1380 const ValueDecl *VD = cast<ValueDecl>(ND); 1381 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1382 return MakeAddrLValue(Aliasee, E->getType(), Alignment); 1383 } 1384 1385 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1386 1387 // Check if this is a global variable. 1388 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1389 return EmitGlobalVarDeclLValue(*this, E, VD); 1390 1391 bool NonGCable = VD->hasLocalStorage() && 1392 !VD->getType()->isReferenceType() && 1393 !VD->hasAttr<BlocksAttr>(); 1394 1395 llvm::Value *V = LocalDeclMap[VD]; 1396 if (!V && VD->isStaticLocal()) 1397 V = CGM.getStaticLocalDeclAddress(VD); 1398 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1399 1400 if (VD->hasAttr<BlocksAttr>()) 1401 V = BuildBlockByrefAddress(V, VD); 1402 1403 LValue LV; 1404 if (VD->getType()->isReferenceType()) { 1405 llvm::LoadInst *LI = Builder.CreateLoad(V); 1406 LI->setAlignment(Alignment.getQuantity()); 1407 V = LI; 1408 LV = MakeNaturalAlignAddrLValue(V, T); 1409 } else { 1410 LV = MakeAddrLValue(V, T, Alignment); 1411 } 1412 1413 if (NonGCable) { 1414 LV.getQuals().removeObjCGCAttr(); 1415 LV.setNonGC(true); 1416 } 1417 setObjCGCLValueClass(getContext(), E, LV); 1418 return LV; 1419 } 1420 1421 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) 1422 return EmitFunctionDeclLValue(*this, E, fn); 1423 1424 llvm_unreachable("Unhandled DeclRefExpr"); 1425 } 1426 1427 LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { 1428 CharUnits Alignment = getContext().getDeclAlign(E->getDecl()); 1429 return MakeAddrLValue(GetAddrOfBlockDecl(E), E->getType(), Alignment); 1430 } 1431 1432 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1433 // __extension__ doesn't affect lvalue-ness. 1434 if (E->getOpcode() == UO_Extension) 1435 return EmitLValue(E->getSubExpr()); 1436 1437 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1438 switch (E->getOpcode()) { 1439 default: llvm_unreachable("Unknown unary operator lvalue!"); 1440 case UO_Deref: { 1441 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1442 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1443 1444 LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T); 1445 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 1446 1447 // We should not generate __weak write barrier on indirect reference 1448 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1449 // But, we continue to generate __strong write barrier on indirect write 1450 // into a pointer to object. 1451 if (getContext().getLangOptions().ObjC1 && 1452 getContext().getLangOptions().getGC() != LangOptions::NonGC && 1453 LV.isObjCWeak()) 1454 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1455 return LV; 1456 } 1457 case UO_Real: 1458 case UO_Imag: { 1459 LValue LV = EmitLValue(E->getSubExpr()); 1460 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 1461 llvm::Value *Addr = LV.getAddress(); 1462 1463 // real and imag are valid on scalars. This is a faster way of 1464 // testing that. 1465 if (!cast<llvm::PointerType>(Addr->getType()) 1466 ->getElementType()->isStructTy()) { 1467 assert(E->getSubExpr()->getType()->isArithmeticType()); 1468 return LV; 1469 } 1470 1471 assert(E->getSubExpr()->getType()->isAnyComplexType()); 1472 1473 unsigned Idx = E->getOpcode() == UO_Imag; 1474 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(), 1475 Idx, "idx"), 1476 ExprTy); 1477 } 1478 case UO_PreInc: 1479 case UO_PreDec: { 1480 LValue LV = EmitLValue(E->getSubExpr()); 1481 bool isInc = E->getOpcode() == UO_PreInc; 1482 1483 if (E->getType()->isAnyComplexType()) 1484 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1485 else 1486 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1487 return LV; 1488 } 1489 } 1490 } 1491 1492 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1493 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 1494 E->getType()); 1495 } 1496 1497 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1498 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1499 E->getType()); 1500 } 1501 1502 1503 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1504 switch (E->getIdentType()) { 1505 default: 1506 return EmitUnsupportedLValue(E, "predefined expression"); 1507 1508 case PredefinedExpr::Func: 1509 case PredefinedExpr::Function: 1510 case PredefinedExpr::PrettyFunction: { 1511 unsigned Type = E->getIdentType(); 1512 std::string GlobalVarName; 1513 1514 switch (Type) { 1515 default: llvm_unreachable("Invalid type"); 1516 case PredefinedExpr::Func: 1517 GlobalVarName = "__func__."; 1518 break; 1519 case PredefinedExpr::Function: 1520 GlobalVarName = "__FUNCTION__."; 1521 break; 1522 case PredefinedExpr::PrettyFunction: 1523 GlobalVarName = "__PRETTY_FUNCTION__."; 1524 break; 1525 } 1526 1527 StringRef FnName = CurFn->getName(); 1528 if (FnName.startswith("\01")) 1529 FnName = FnName.substr(1); 1530 GlobalVarName += FnName; 1531 1532 const Decl *CurDecl = CurCodeDecl; 1533 if (CurDecl == 0) 1534 CurDecl = getContext().getTranslationUnitDecl(); 1535 1536 std::string FunctionName = 1537 (isa<BlockDecl>(CurDecl) 1538 ? FnName.str() 1539 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl)); 1540 1541 llvm::Constant *C = 1542 CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); 1543 return MakeAddrLValue(C, E->getType()); 1544 } 1545 } 1546 } 1547 1548 llvm::BasicBlock *CodeGenFunction::getTrapBB() { 1549 const CodeGenOptions &GCO = CGM.getCodeGenOpts(); 1550 1551 // If we are not optimzing, don't collapse all calls to trap in the function 1552 // to the same call, that way, in the debugger they can see which operation 1553 // did in fact fail. If we are optimizing, we collapse all calls to trap down 1554 // to just one per function to save on codesize. 1555 if (GCO.OptimizationLevel && TrapBB) 1556 return TrapBB; 1557 1558 llvm::BasicBlock *Cont = 0; 1559 if (HaveInsertPoint()) { 1560 Cont = createBasicBlock("cont"); 1561 EmitBranch(Cont); 1562 } 1563 TrapBB = createBasicBlock("trap"); 1564 EmitBlock(TrapBB); 1565 1566 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); 1567 llvm::CallInst *TrapCall = Builder.CreateCall(F); 1568 TrapCall->setDoesNotReturn(); 1569 TrapCall->setDoesNotThrow(); 1570 Builder.CreateUnreachable(); 1571 1572 if (Cont) 1573 EmitBlock(Cont); 1574 return TrapBB; 1575 } 1576 1577 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 1578 /// array to pointer, return the array subexpression. 1579 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 1580 // If this isn't just an array->pointer decay, bail out. 1581 const CastExpr *CE = dyn_cast<CastExpr>(E); 1582 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay) 1583 return 0; 1584 1585 // If this is a decay from variable width array, bail out. 1586 const Expr *SubExpr = CE->getSubExpr(); 1587 if (SubExpr->getType()->isVariableArrayType()) 1588 return 0; 1589 1590 return SubExpr; 1591 } 1592 1593 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 1594 // The index must always be an integer, which is not an aggregate. Emit it. 1595 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 1596 QualType IdxTy = E->getIdx()->getType(); 1597 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 1598 1599 // If the base is a vector type, then we are forming a vector element lvalue 1600 // with this subscript. 1601 if (E->getBase()->getType()->isVectorType()) { 1602 // Emit the vector as an lvalue to get its address. 1603 LValue LHS = EmitLValue(E->getBase()); 1604 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 1605 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx"); 1606 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 1607 E->getBase()->getType()); 1608 } 1609 1610 // Extend or truncate the index type to 32 or 64-bits. 1611 if (Idx->getType() != IntPtrTy) 1612 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 1613 1614 // FIXME: As llvm implements the object size checking, this can come out. 1615 if (CatchUndefined) { 1616 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){ 1617 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) { 1618 if (ICE->getCastKind() == CK_ArrayToPointerDecay) { 1619 if (const ConstantArrayType *CAT 1620 = getContext().getAsConstantArrayType(DRE->getType())) { 1621 llvm::APInt Size = CAT->getSize(); 1622 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1623 Builder.CreateCondBr(Builder.CreateICmpULE(Idx, 1624 llvm::ConstantInt::get(Idx->getType(), Size)), 1625 Cont, getTrapBB()); 1626 EmitBlock(Cont); 1627 } 1628 } 1629 } 1630 } 1631 } 1632 1633 // We know that the pointer points to a type of the correct size, unless the 1634 // size is a VLA or Objective-C interface. 1635 llvm::Value *Address = 0; 1636 CharUnits ArrayAlignment; 1637 if (const VariableArrayType *vla = 1638 getContext().getAsVariableArrayType(E->getType())) { 1639 // The base must be a pointer, which is not an aggregate. Emit 1640 // it. It needs to be emitted first in case it's what captures 1641 // the VLA bounds. 1642 Address = EmitScalarExpr(E->getBase()); 1643 1644 // The element count here is the total number of non-VLA elements. 1645 llvm::Value *numElements = getVLASize(vla).first; 1646 1647 // Effectively, the multiply by the VLA size is part of the GEP. 1648 // GEP indexes are signed, and scaling an index isn't permitted to 1649 // signed-overflow, so we use the same semantics for our explicit 1650 // multiply. We suppress this if overflow is not undefined behavior. 1651 if (getLangOptions().isSignedOverflowDefined()) { 1652 Idx = Builder.CreateMul(Idx, numElements); 1653 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 1654 } else { 1655 Idx = Builder.CreateNSWMul(Idx, numElements); 1656 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); 1657 } 1658 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 1659 // Indexing over an interface, as in "NSString *P; P[4];" 1660 llvm::Value *InterfaceSize = 1661 llvm::ConstantInt::get(Idx->getType(), 1662 getContext().getTypeSizeInChars(OIT).getQuantity()); 1663 1664 Idx = Builder.CreateMul(Idx, InterfaceSize); 1665 1666 // The base must be a pointer, which is not an aggregate. Emit it. 1667 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1668 Address = EmitCastToVoidPtr(Base); 1669 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 1670 Address = Builder.CreateBitCast(Address, Base->getType()); 1671 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 1672 // If this is A[i] where A is an array, the frontend will have decayed the 1673 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 1674 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 1675 // "gep x, i" here. Emit one "gep A, 0, i". 1676 assert(Array->getType()->isArrayType() && 1677 "Array to pointer decay must have array source type!"); 1678 LValue ArrayLV = EmitLValue(Array); 1679 llvm::Value *ArrayPtr = ArrayLV.getAddress(); 1680 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 1681 llvm::Value *Args[] = { Zero, Idx }; 1682 1683 // Propagate the alignment from the array itself to the result. 1684 ArrayAlignment = ArrayLV.getAlignment(); 1685 1686 if (getContext().getLangOptions().isSignedOverflowDefined()) 1687 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); 1688 else 1689 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); 1690 } else { 1691 // The base must be a pointer, which is not an aggregate. Emit it. 1692 llvm::Value *Base = EmitScalarExpr(E->getBase()); 1693 if (getContext().getLangOptions().isSignedOverflowDefined()) 1694 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 1695 else 1696 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 1697 } 1698 1699 QualType T = E->getBase()->getType()->getPointeeType(); 1700 assert(!T.isNull() && 1701 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 1702 1703 1704 // Limit the alignment to that of the result type. 1705 LValue LV; 1706 if (!ArrayAlignment.isZero()) { 1707 CharUnits Align = getContext().getTypeAlignInChars(T); 1708 ArrayAlignment = std::min(Align, ArrayAlignment); 1709 LV = MakeAddrLValue(Address, T, ArrayAlignment); 1710 } else { 1711 LV = MakeNaturalAlignAddrLValue(Address, T); 1712 } 1713 1714 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace()); 1715 1716 if (getContext().getLangOptions().ObjC1 && 1717 getContext().getLangOptions().getGC() != LangOptions::NonGC) { 1718 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1719 setObjCGCLValueClass(getContext(), E, LV); 1720 } 1721 return LV; 1722 } 1723 1724 static 1725 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder, 1726 SmallVector<unsigned, 4> &Elts) { 1727 SmallVector<llvm::Constant*, 4> CElts; 1728 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 1729 CElts.push_back(Builder.getInt32(Elts[i])); 1730 1731 return llvm::ConstantVector::get(CElts); 1732 } 1733 1734 LValue CodeGenFunction:: 1735 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 1736 // Emit the base vector as an l-value. 1737 LValue Base; 1738 1739 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 1740 if (E->isArrow()) { 1741 // If it is a pointer to a vector, emit the address and form an lvalue with 1742 // it. 1743 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 1744 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 1745 Base = MakeAddrLValue(Ptr, PT->getPointeeType()); 1746 Base.getQuals().removeObjCGCAttr(); 1747 } else if (E->getBase()->isGLValue()) { 1748 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 1749 // emit the base as an lvalue. 1750 assert(E->getBase()->getType()->isVectorType()); 1751 Base = EmitLValue(E->getBase()); 1752 } else { 1753 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 1754 assert(E->getBase()->getType()->isVectorType() && 1755 "Result must be a vector"); 1756 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 1757 1758 // Store the vector to memory (because LValue wants an address). 1759 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 1760 Builder.CreateStore(Vec, VecMem); 1761 Base = MakeAddrLValue(VecMem, E->getBase()->getType()); 1762 } 1763 1764 QualType type = 1765 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 1766 1767 // Encode the element access list into a vector of unsigned indices. 1768 SmallVector<unsigned, 4> Indices; 1769 E->getEncodedElementAccess(Indices); 1770 1771 if (Base.isSimple()) { 1772 llvm::Constant *CV = GenerateConstantVector(Builder, Indices); 1773 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type); 1774 } 1775 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 1776 1777 llvm::Constant *BaseElts = Base.getExtVectorElts(); 1778 SmallVector<llvm::Constant *, 4> CElts; 1779 1780 for (unsigned i = 0, e = Indices.size(); i != e; ++i) 1781 CElts.push_back(BaseElts->getAggregateElement(Indices[i])); 1782 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 1783 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type); 1784 } 1785 1786 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 1787 bool isNonGC = false; 1788 Expr *BaseExpr = E->getBase(); 1789 llvm::Value *BaseValue = NULL; 1790 Qualifiers BaseQuals; 1791 1792 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 1793 if (E->isArrow()) { 1794 BaseValue = EmitScalarExpr(BaseExpr); 1795 const PointerType *PTy = 1796 BaseExpr->getType()->getAs<PointerType>(); 1797 BaseQuals = PTy->getPointeeType().getQualifiers(); 1798 } else { 1799 LValue BaseLV = EmitLValue(BaseExpr); 1800 if (BaseLV.isNonGC()) 1801 isNonGC = true; 1802 // FIXME: this isn't right for bitfields. 1803 BaseValue = BaseLV.getAddress(); 1804 QualType BaseTy = BaseExpr->getType(); 1805 BaseQuals = BaseTy.getQualifiers(); 1806 } 1807 1808 NamedDecl *ND = E->getMemberDecl(); 1809 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 1810 LValue LV = EmitLValueForField(BaseValue, Field, 1811 BaseQuals.getCVRQualifiers()); 1812 LV.setNonGC(isNonGC); 1813 setObjCGCLValueClass(getContext(), E, LV); 1814 return LV; 1815 } 1816 1817 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 1818 return EmitGlobalVarDeclLValue(*this, E, VD); 1819 1820 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 1821 return EmitFunctionDeclLValue(*this, E, FD); 1822 1823 llvm_unreachable("Unhandled member declaration!"); 1824 } 1825 1826 LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue, 1827 const FieldDecl *Field, 1828 unsigned CVRQualifiers) { 1829 const CGRecordLayout &RL = 1830 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1831 const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field); 1832 return LValue::MakeBitfield(BaseValue, Info, 1833 Field->getType().withCVRQualifiers(CVRQualifiers)); 1834 } 1835 1836 /// EmitLValueForAnonRecordField - Given that the field is a member of 1837 /// an anonymous struct or union buried inside a record, and given 1838 /// that the base value is a pointer to the enclosing record, derive 1839 /// an lvalue for the ultimate field. 1840 LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue, 1841 const IndirectFieldDecl *Field, 1842 unsigned CVRQualifiers) { 1843 IndirectFieldDecl::chain_iterator I = Field->chain_begin(), 1844 IEnd = Field->chain_end(); 1845 while (true) { 1846 LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I), 1847 CVRQualifiers); 1848 if (++I == IEnd) return LV; 1849 1850 assert(LV.isSimple()); 1851 BaseValue = LV.getAddress(); 1852 CVRQualifiers |= LV.getVRQualifiers(); 1853 } 1854 } 1855 1856 LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr, 1857 const FieldDecl *field, 1858 unsigned cvr) { 1859 if (field->isBitField()) 1860 return EmitLValueForBitfield(baseAddr, field, cvr); 1861 1862 const RecordDecl *rec = field->getParent(); 1863 QualType type = field->getType(); 1864 CharUnits alignment = getContext().getDeclAlign(field); 1865 1866 bool mayAlias = rec->hasAttr<MayAliasAttr>(); 1867 1868 llvm::Value *addr = baseAddr; 1869 if (rec->isUnion()) { 1870 // For unions, there is no pointer adjustment. 1871 assert(!type->isReferenceType() && "union has reference member"); 1872 } else { 1873 // For structs, we GEP to the field that the record layout suggests. 1874 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 1875 addr = Builder.CreateStructGEP(addr, idx, field->getName()); 1876 1877 // If this is a reference field, load the reference right now. 1878 if (const ReferenceType *refType = type->getAs<ReferenceType>()) { 1879 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); 1880 if (cvr & Qualifiers::Volatile) load->setVolatile(true); 1881 load->setAlignment(alignment.getQuantity()); 1882 1883 if (CGM.shouldUseTBAA()) { 1884 llvm::MDNode *tbaa; 1885 if (mayAlias) 1886 tbaa = CGM.getTBAAInfo(getContext().CharTy); 1887 else 1888 tbaa = CGM.getTBAAInfo(type); 1889 CGM.DecorateInstruction(load, tbaa); 1890 } 1891 1892 addr = load; 1893 mayAlias = false; 1894 type = refType->getPointeeType(); 1895 if (type->isIncompleteType()) 1896 alignment = CharUnits(); 1897 else 1898 alignment = getContext().getTypeAlignInChars(type); 1899 cvr = 0; // qualifiers don't recursively apply to referencee 1900 } 1901 } 1902 1903 // Make sure that the address is pointing to the right type. This is critical 1904 // for both unions and structs. A union needs a bitcast, a struct element 1905 // will need a bitcast if the LLVM type laid out doesn't match the desired 1906 // type. 1907 addr = EmitBitCastOfLValueToProperType(*this, addr, 1908 CGM.getTypes().ConvertTypeForMem(type), 1909 field->getName()); 1910 1911 if (field->hasAttr<AnnotateAttr>()) 1912 addr = EmitFieldAnnotations(field, addr); 1913 1914 LValue LV = MakeAddrLValue(addr, type, alignment); 1915 LV.getQuals().addCVRQualifiers(cvr); 1916 1917 // __weak attribute on a field is ignored. 1918 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 1919 LV.getQuals().removeObjCGCAttr(); 1920 1921 // Fields of may_alias structs act like 'char' for TBAA purposes. 1922 // FIXME: this should get propagated down through anonymous structs 1923 // and unions. 1924 if (mayAlias && LV.getTBAAInfo()) 1925 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); 1926 1927 return LV; 1928 } 1929 1930 LValue 1931 CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue, 1932 const FieldDecl *Field, 1933 unsigned CVRQualifiers) { 1934 QualType FieldType = Field->getType(); 1935 1936 if (!FieldType->isReferenceType()) 1937 return EmitLValueForField(BaseValue, Field, CVRQualifiers); 1938 1939 const CGRecordLayout &RL = 1940 CGM.getTypes().getCGRecordLayout(Field->getParent()); 1941 unsigned idx = RL.getLLVMFieldNo(Field); 1942 llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx); 1943 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 1944 1945 1946 // Make sure that the address is pointing to the right type. This is critical 1947 // for both unions and structs. A union needs a bitcast, a struct element 1948 // will need a bitcast if the LLVM type laid out doesn't match the desired 1949 // type. 1950 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 1951 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1952 V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS)); 1953 1954 CharUnits Alignment = getContext().getDeclAlign(Field); 1955 return MakeAddrLValue(V, FieldType, Alignment); 1956 } 1957 1958 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 1959 if (E->isFileScope()) { 1960 llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 1961 return MakeAddrLValue(GlobalPtr, E->getType()); 1962 } 1963 1964 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 1965 const Expr *InitExpr = E->getInitializer(); 1966 LValue Result = MakeAddrLValue(DeclPtr, E->getType()); 1967 1968 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 1969 /*Init*/ true); 1970 1971 return Result; 1972 } 1973 1974 LValue CodeGenFunction:: 1975 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 1976 if (!expr->isGLValue()) { 1977 // ?: here should be an aggregate. 1978 assert((hasAggregateLLVMType(expr->getType()) && 1979 !expr->getType()->isAnyComplexType()) && 1980 "Unexpected conditional operator!"); 1981 return EmitAggExprToLValue(expr); 1982 } 1983 1984 OpaqueValueMapping binding(*this, expr); 1985 1986 const Expr *condExpr = expr->getCond(); 1987 bool CondExprBool; 1988 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 1989 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 1990 if (!CondExprBool) std::swap(live, dead); 1991 1992 if (!ContainsLabel(dead)) 1993 return EmitLValue(live); 1994 } 1995 1996 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 1997 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 1998 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 1999 2000 ConditionalEvaluation eval(*this); 2001 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock); 2002 2003 // Any temporaries created here are conditional. 2004 EmitBlock(lhsBlock); 2005 eval.begin(*this); 2006 LValue lhs = EmitLValue(expr->getTrueExpr()); 2007 eval.end(*this); 2008 2009 if (!lhs.isSimple()) 2010 return EmitUnsupportedLValue(expr, "conditional operator"); 2011 2012 lhsBlock = Builder.GetInsertBlock(); 2013 Builder.CreateBr(contBlock); 2014 2015 // Any temporaries created here are conditional. 2016 EmitBlock(rhsBlock); 2017 eval.begin(*this); 2018 LValue rhs = EmitLValue(expr->getFalseExpr()); 2019 eval.end(*this); 2020 if (!rhs.isSimple()) 2021 return EmitUnsupportedLValue(expr, "conditional operator"); 2022 rhsBlock = Builder.GetInsertBlock(); 2023 2024 EmitBlock(contBlock); 2025 2026 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2, 2027 "cond-lvalue"); 2028 phi->addIncoming(lhs.getAddress(), lhsBlock); 2029 phi->addIncoming(rhs.getAddress(), rhsBlock); 2030 return MakeAddrLValue(phi, expr->getType()); 2031 } 2032 2033 /// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast. 2034 /// If the cast is a dynamic_cast, we can have the usual lvalue result, 2035 /// otherwise if a cast is needed by the code generator in an lvalue context, 2036 /// then it must mean that we need the address of an aggregate in order to 2037 /// access one of its fields. This can happen for all the reasons that casts 2038 /// are permitted with aggregate result, including noop aggregate casts, and 2039 /// cast from scalar to union. 2040 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 2041 switch (E->getCastKind()) { 2042 case CK_ToVoid: 2043 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 2044 2045 case CK_Dependent: 2046 llvm_unreachable("dependent cast kind in IR gen!"); 2047 2048 // These two casts are currently treated as no-ops, although they could 2049 // potentially be real operations depending on the target's ABI. 2050 case CK_NonAtomicToAtomic: 2051 case CK_AtomicToNonAtomic: 2052 2053 case CK_NoOp: 2054 case CK_LValueToRValue: 2055 if (!E->getSubExpr()->Classify(getContext()).isPRValue() 2056 || E->getType()->isRecordType()) 2057 return EmitLValue(E->getSubExpr()); 2058 // Fall through to synthesize a temporary. 2059 2060 case CK_BitCast: 2061 case CK_ArrayToPointerDecay: 2062 case CK_FunctionToPointerDecay: 2063 case CK_NullToMemberPointer: 2064 case CK_NullToPointer: 2065 case CK_IntegralToPointer: 2066 case CK_PointerToIntegral: 2067 case CK_PointerToBoolean: 2068 case CK_VectorSplat: 2069 case CK_IntegralCast: 2070 case CK_IntegralToBoolean: 2071 case CK_IntegralToFloating: 2072 case CK_FloatingToIntegral: 2073 case CK_FloatingToBoolean: 2074 case CK_FloatingCast: 2075 case CK_FloatingRealToComplex: 2076 case CK_FloatingComplexToReal: 2077 case CK_FloatingComplexToBoolean: 2078 case CK_FloatingComplexCast: 2079 case CK_FloatingComplexToIntegralComplex: 2080 case CK_IntegralRealToComplex: 2081 case CK_IntegralComplexToReal: 2082 case CK_IntegralComplexToBoolean: 2083 case CK_IntegralComplexCast: 2084 case CK_IntegralComplexToFloatingComplex: 2085 case CK_DerivedToBaseMemberPointer: 2086 case CK_BaseToDerivedMemberPointer: 2087 case CK_MemberPointerToBoolean: 2088 case CK_AnyPointerToBlockPointerCast: 2089 case CK_ARCProduceObject: 2090 case CK_ARCConsumeObject: 2091 case CK_ARCReclaimReturnedObject: 2092 case CK_ARCExtendBlockObject: { 2093 // These casts only produce lvalues when we're binding a reference to a 2094 // temporary realized from a (converted) pure rvalue. Emit the expression 2095 // as a value, copy it into a temporary, and return an lvalue referring to 2096 // that temporary. 2097 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp"); 2098 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false); 2099 return MakeAddrLValue(V, E->getType()); 2100 } 2101 2102 case CK_Dynamic: { 2103 LValue LV = EmitLValue(E->getSubExpr()); 2104 llvm::Value *V = LV.getAddress(); 2105 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 2106 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 2107 } 2108 2109 case CK_ConstructorConversion: 2110 case CK_UserDefinedConversion: 2111 case CK_CPointerToObjCPointerCast: 2112 case CK_BlockPointerToObjCPointerCast: 2113 return EmitLValue(E->getSubExpr()); 2114 2115 case CK_UncheckedDerivedToBase: 2116 case CK_DerivedToBase: { 2117 const RecordType *DerivedClassTy = 2118 E->getSubExpr()->getType()->getAs<RecordType>(); 2119 CXXRecordDecl *DerivedClassDecl = 2120 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2121 2122 LValue LV = EmitLValue(E->getSubExpr()); 2123 llvm::Value *This = LV.getAddress(); 2124 2125 // Perform the derived-to-base conversion 2126 llvm::Value *Base = 2127 GetAddressOfBaseClass(This, DerivedClassDecl, 2128 E->path_begin(), E->path_end(), 2129 /*NullCheckValue=*/false); 2130 2131 return MakeAddrLValue(Base, E->getType()); 2132 } 2133 case CK_ToUnion: 2134 return EmitAggExprToLValue(E); 2135 case CK_BaseToDerived: { 2136 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 2137 CXXRecordDecl *DerivedClassDecl = 2138 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2139 2140 LValue LV = EmitLValue(E->getSubExpr()); 2141 2142 // Perform the base-to-derived conversion 2143 llvm::Value *Derived = 2144 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 2145 E->path_begin(), E->path_end(), 2146 /*NullCheckValue=*/false); 2147 2148 return MakeAddrLValue(Derived, E->getType()); 2149 } 2150 case CK_LValueBitCast: { 2151 // This must be a reinterpret_cast (or c-style equivalent). 2152 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 2153 2154 LValue LV = EmitLValue(E->getSubExpr()); 2155 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2156 ConvertType(CE->getTypeAsWritten())); 2157 return MakeAddrLValue(V, E->getType()); 2158 } 2159 case CK_ObjCObjectLValueCast: { 2160 LValue LV = EmitLValue(E->getSubExpr()); 2161 QualType ToType = getContext().getLValueReferenceType(E->getType()); 2162 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2163 ConvertType(ToType)); 2164 return MakeAddrLValue(V, E->getType()); 2165 } 2166 } 2167 2168 llvm_unreachable("Unhandled lvalue cast kind?"); 2169 } 2170 2171 LValue CodeGenFunction::EmitNullInitializationLValue( 2172 const CXXScalarValueInitExpr *E) { 2173 QualType Ty = E->getType(); 2174 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty); 2175 EmitNullInitialization(LV.getAddress(), Ty); 2176 return LV; 2177 } 2178 2179 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 2180 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 2181 return getOpaqueLValueMapping(e); 2182 } 2183 2184 LValue CodeGenFunction::EmitMaterializeTemporaryExpr( 2185 const MaterializeTemporaryExpr *E) { 2186 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 2187 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2188 } 2189 2190 2191 //===--------------------------------------------------------------------===// 2192 // Expression Emission 2193 //===--------------------------------------------------------------------===// 2194 2195 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 2196 ReturnValueSlot ReturnValue) { 2197 if (CGDebugInfo *DI = getDebugInfo()) 2198 DI->EmitLocation(Builder, E->getLocStart()); 2199 2200 // Builtins never have block type. 2201 if (E->getCallee()->getType()->isBlockPointerType()) 2202 return EmitBlockCallExpr(E, ReturnValue); 2203 2204 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 2205 return EmitCXXMemberCallExpr(CE, ReturnValue); 2206 2207 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E)) 2208 return EmitCUDAKernelCallExpr(CE, ReturnValue); 2209 2210 const Decl *TargetDecl = E->getCalleeDecl(); 2211 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 2212 if (unsigned builtinID = FD->getBuiltinID()) 2213 return EmitBuiltinExpr(FD, builtinID, E); 2214 } 2215 2216 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 2217 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 2218 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 2219 2220 if (const CXXPseudoDestructorExpr *PseudoDtor 2221 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 2222 QualType DestroyedType = PseudoDtor->getDestroyedType(); 2223 if (getContext().getLangOptions().ObjCAutoRefCount && 2224 DestroyedType->isObjCLifetimeType() && 2225 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong || 2226 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) { 2227 // Automatic Reference Counting: 2228 // If the pseudo-expression names a retainable object with weak or 2229 // strong lifetime, the object shall be released. 2230 Expr *BaseExpr = PseudoDtor->getBase(); 2231 llvm::Value *BaseValue = NULL; 2232 Qualifiers BaseQuals; 2233 2234 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2235 if (PseudoDtor->isArrow()) { 2236 BaseValue = EmitScalarExpr(BaseExpr); 2237 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 2238 BaseQuals = PTy->getPointeeType().getQualifiers(); 2239 } else { 2240 LValue BaseLV = EmitLValue(BaseExpr); 2241 BaseValue = BaseLV.getAddress(); 2242 QualType BaseTy = BaseExpr->getType(); 2243 BaseQuals = BaseTy.getQualifiers(); 2244 } 2245 2246 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) { 2247 case Qualifiers::OCL_None: 2248 case Qualifiers::OCL_ExplicitNone: 2249 case Qualifiers::OCL_Autoreleasing: 2250 break; 2251 2252 case Qualifiers::OCL_Strong: 2253 EmitARCRelease(Builder.CreateLoad(BaseValue, 2254 PseudoDtor->getDestroyedType().isVolatileQualified()), 2255 /*precise*/ true); 2256 break; 2257 2258 case Qualifiers::OCL_Weak: 2259 EmitARCDestroyWeak(BaseValue); 2260 break; 2261 } 2262 } else { 2263 // C++ [expr.pseudo]p1: 2264 // The result shall only be used as the operand for the function call 2265 // operator (), and the result of such a call has type void. The only 2266 // effect is the evaluation of the postfix-expression before the dot or 2267 // arrow. 2268 EmitScalarExpr(E->getCallee()); 2269 } 2270 2271 return RValue::get(0); 2272 } 2273 2274 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 2275 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 2276 E->arg_begin(), E->arg_end(), TargetDecl); 2277 } 2278 2279 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 2280 // Comma expressions just emit their LHS then their RHS as an l-value. 2281 if (E->getOpcode() == BO_Comma) { 2282 EmitIgnoredExpr(E->getLHS()); 2283 EnsureInsertPoint(); 2284 return EmitLValue(E->getRHS()); 2285 } 2286 2287 if (E->getOpcode() == BO_PtrMemD || 2288 E->getOpcode() == BO_PtrMemI) 2289 return EmitPointerToDataMemberBinaryExpr(E); 2290 2291 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 2292 2293 // Note that in all of these cases, __block variables need the RHS 2294 // evaluated first just in case the variable gets moved by the RHS. 2295 2296 if (!hasAggregateLLVMType(E->getType())) { 2297 switch (E->getLHS()->getType().getObjCLifetime()) { 2298 case Qualifiers::OCL_Strong: 2299 return EmitARCStoreStrong(E, /*ignored*/ false).first; 2300 2301 case Qualifiers::OCL_Autoreleasing: 2302 return EmitARCStoreAutoreleasing(E).first; 2303 2304 // No reason to do any of these differently. 2305 case Qualifiers::OCL_None: 2306 case Qualifiers::OCL_ExplicitNone: 2307 case Qualifiers::OCL_Weak: 2308 break; 2309 } 2310 2311 RValue RV = EmitAnyExpr(E->getRHS()); 2312 LValue LV = EmitLValue(E->getLHS()); 2313 EmitStoreThroughLValue(RV, LV); 2314 return LV; 2315 } 2316 2317 if (E->getType()->isAnyComplexType()) 2318 return EmitComplexAssignmentLValue(E); 2319 2320 return EmitAggExprToLValue(E); 2321 } 2322 2323 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 2324 RValue RV = EmitCallExpr(E); 2325 2326 if (!RV.isScalar()) 2327 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2328 2329 assert(E->getCallReturnType()->isReferenceType() && 2330 "Can't have a scalar return unless the return type is a " 2331 "reference type!"); 2332 2333 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2334 } 2335 2336 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 2337 // FIXME: This shouldn't require another copy. 2338 return EmitAggExprToLValue(E); 2339 } 2340 2341 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 2342 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 2343 && "binding l-value to type which needs a temporary"); 2344 AggValueSlot Slot = CreateAggTemp(E->getType()); 2345 EmitCXXConstructExpr(E, Slot); 2346 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2347 } 2348 2349 LValue 2350 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 2351 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 2352 } 2353 2354 LValue 2355 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 2356 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2357 Slot.setExternallyDestructed(); 2358 EmitAggExpr(E->getSubExpr(), Slot); 2359 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr()); 2360 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2361 } 2362 2363 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 2364 RValue RV = EmitObjCMessageExpr(E); 2365 2366 if (!RV.isScalar()) 2367 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2368 2369 assert(E->getMethodDecl()->getResultType()->isReferenceType() && 2370 "Can't have a scalar return unless the return type is a " 2371 "reference type!"); 2372 2373 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2374 } 2375 2376 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 2377 llvm::Value *V = 2378 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true); 2379 return MakeAddrLValue(V, E->getType()); 2380 } 2381 2382 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 2383 const ObjCIvarDecl *Ivar) { 2384 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 2385 } 2386 2387 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 2388 llvm::Value *BaseValue, 2389 const ObjCIvarDecl *Ivar, 2390 unsigned CVRQualifiers) { 2391 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 2392 Ivar, CVRQualifiers); 2393 } 2394 2395 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 2396 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 2397 llvm::Value *BaseValue = 0; 2398 const Expr *BaseExpr = E->getBase(); 2399 Qualifiers BaseQuals; 2400 QualType ObjectTy; 2401 if (E->isArrow()) { 2402 BaseValue = EmitScalarExpr(BaseExpr); 2403 ObjectTy = BaseExpr->getType()->getPointeeType(); 2404 BaseQuals = ObjectTy.getQualifiers(); 2405 } else { 2406 LValue BaseLV = EmitLValue(BaseExpr); 2407 // FIXME: this isn't right for bitfields. 2408 BaseValue = BaseLV.getAddress(); 2409 ObjectTy = BaseExpr->getType(); 2410 BaseQuals = ObjectTy.getQualifiers(); 2411 } 2412 2413 LValue LV = 2414 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 2415 BaseQuals.getCVRQualifiers()); 2416 setObjCGCLValueClass(getContext(), E, LV); 2417 return LV; 2418 } 2419 2420 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 2421 // Can only get l-value for message expression returning aggregate type 2422 RValue RV = EmitAnyExprToTemp(E); 2423 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2424 } 2425 2426 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 2427 ReturnValueSlot ReturnValue, 2428 CallExpr::const_arg_iterator ArgBeg, 2429 CallExpr::const_arg_iterator ArgEnd, 2430 const Decl *TargetDecl) { 2431 // Get the actual function type. The callee type will always be a pointer to 2432 // function type or a block pointer type. 2433 assert(CalleeType->isFunctionPointerType() && 2434 "Call must have function pointer type!"); 2435 2436 CalleeType = getContext().getCanonicalType(CalleeType); 2437 2438 const FunctionType *FnType 2439 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 2440 2441 CallArgList Args; 2442 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 2443 2444 const CGFunctionInfo &FnInfo = CGM.getTypes().getFunctionInfo(Args, FnType); 2445 2446 // C99 6.5.2.2p6: 2447 // If the expression that denotes the called function has a type 2448 // that does not include a prototype, [the default argument 2449 // promotions are performed]. If the number of arguments does not 2450 // equal the number of parameters, the behavior is undefined. If 2451 // the function is defined with a type that includes a prototype, 2452 // and either the prototype ends with an ellipsis (, ...) or the 2453 // types of the arguments after promotion are not compatible with 2454 // the types of the parameters, the behavior is undefined. If the 2455 // function is defined with a type that does not include a 2456 // prototype, and the types of the arguments after promotion are 2457 // not compatible with those of the parameters after promotion, 2458 // the behavior is undefined [except in some trivial cases]. 2459 // That is, in the general case, we should assume that a call 2460 // through an unprototyped function type works like a *non-variadic* 2461 // call. The way we make this work is to cast to the exact type 2462 // of the promoted arguments. 2463 if (isa<FunctionNoProtoType>(FnType) && 2464 !getTargetHooks().isNoProtoCallVariadic(FnInfo)) { 2465 assert(cast<llvm::FunctionType>(Callee->getType()->getContainedType(0)) 2466 ->isVarArg()); 2467 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo, false); 2468 CalleeTy = CalleeTy->getPointerTo(); 2469 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); 2470 } 2471 2472 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl); 2473 } 2474 2475 LValue CodeGenFunction:: 2476 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 2477 llvm::Value *BaseV; 2478 if (E->getOpcode() == BO_PtrMemI) 2479 BaseV = EmitScalarExpr(E->getLHS()); 2480 else 2481 BaseV = EmitLValue(E->getLHS()).getAddress(); 2482 2483 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 2484 2485 const MemberPointerType *MPT 2486 = E->getRHS()->getType()->getAs<MemberPointerType>(); 2487 2488 llvm::Value *AddV = 2489 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT); 2490 2491 return MakeAddrLValue(AddV, MPT->getPointeeType()); 2492 } 2493 2494 static void 2495 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, 2496 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, 2497 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { 2498 if (E->isCmpXChg()) { 2499 // Note that cmpxchg only supports specifying one ordering and 2500 // doesn't support weak cmpxchg, at least at the moment. 2501 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2502 LoadVal1->setAlignment(Align); 2503 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2); 2504 LoadVal2->setAlignment(Align); 2505 llvm::AtomicCmpXchgInst *CXI = 2506 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order); 2507 CXI->setVolatile(E->isVolatile()); 2508 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1); 2509 StoreVal1->setAlignment(Align); 2510 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1); 2511 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); 2512 return; 2513 } 2514 2515 if (E->getOp() == AtomicExpr::Load) { 2516 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); 2517 Load->setAtomic(Order); 2518 Load->setAlignment(Size); 2519 Load->setVolatile(E->isVolatile()); 2520 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); 2521 StoreDest->setAlignment(Align); 2522 return; 2523 } 2524 2525 if (E->getOp() == AtomicExpr::Store) { 2526 assert(!Dest && "Store does not return a value"); 2527 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2528 LoadVal1->setAlignment(Align); 2529 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); 2530 Store->setAtomic(Order); 2531 Store->setAlignment(Size); 2532 Store->setVolatile(E->isVolatile()); 2533 return; 2534 } 2535 2536 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; 2537 switch (E->getOp()) { 2538 case AtomicExpr::CmpXchgWeak: 2539 case AtomicExpr::CmpXchgStrong: 2540 case AtomicExpr::Store: 2541 case AtomicExpr::Init: 2542 case AtomicExpr::Load: assert(0 && "Already handled!"); 2543 case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break; 2544 case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break; 2545 case AtomicExpr::And: Op = llvm::AtomicRMWInst::And; break; 2546 case AtomicExpr::Or: Op = llvm::AtomicRMWInst::Or; break; 2547 case AtomicExpr::Xor: Op = llvm::AtomicRMWInst::Xor; break; 2548 case AtomicExpr::Xchg: Op = llvm::AtomicRMWInst::Xchg; break; 2549 } 2550 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 2551 LoadVal1->setAlignment(Align); 2552 llvm::AtomicRMWInst *RMWI = 2553 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); 2554 RMWI->setVolatile(E->isVolatile()); 2555 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest); 2556 StoreDest->setAlignment(Align); 2557 } 2558 2559 // This function emits any expression (scalar, complex, or aggregate) 2560 // into a temporary alloca. 2561 static llvm::Value * 2562 EmitValToTemp(CodeGenFunction &CGF, Expr *E) { 2563 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); 2564 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), 2565 /*Init*/ true); 2566 return DeclPtr; 2567 } 2568 2569 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty, 2570 llvm::Value *Dest) { 2571 if (Ty->isAnyComplexType()) 2572 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false)); 2573 if (CGF.hasAggregateLLVMType(Ty)) 2574 return RValue::getAggregate(Dest); 2575 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty))); 2576 } 2577 2578 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { 2579 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 2580 QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType(); 2581 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); 2582 uint64_t Size = sizeChars.getQuantity(); 2583 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); 2584 unsigned Align = alignChars.getQuantity(); 2585 unsigned MaxInlineWidth = 2586 getContext().getTargetInfo().getMaxAtomicInlineWidth(); 2587 bool UseLibcall = (Size != Align || Size > MaxInlineWidth); 2588 2589 2590 2591 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; 2592 Ptr = EmitScalarExpr(E->getPtr()); 2593 2594 if (E->getOp() == AtomicExpr::Init) { 2595 assert(!Dest && "Init does not return a value"); 2596 Val1 = EmitScalarExpr(E->getVal1()); 2597 llvm::StoreInst *Store = Builder.CreateStore(Val1, Ptr); 2598 Store->setAlignment(Size); 2599 Store->setVolatile(E->isVolatile()); 2600 return RValue::get(0); 2601 } 2602 2603 Order = EmitScalarExpr(E->getOrder()); 2604 if (E->isCmpXChg()) { 2605 Val1 = EmitScalarExpr(E->getVal1()); 2606 Val2 = EmitValToTemp(*this, E->getVal2()); 2607 OrderFail = EmitScalarExpr(E->getOrderFail()); 2608 (void)OrderFail; // OrderFail is unused at the moment 2609 } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) && 2610 MemTy->isPointerType()) { 2611 // For pointers, we're required to do a bit of math: adding 1 to an int* 2612 // is not the same as adding 1 to a uintptr_t. 2613 QualType Val1Ty = E->getVal1()->getType(); 2614 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); 2615 CharUnits PointeeIncAmt = 2616 getContext().getTypeSizeInChars(MemTy->getPointeeType()); 2617 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); 2618 Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); 2619 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); 2620 } else if (E->getOp() != AtomicExpr::Load) { 2621 Val1 = EmitValToTemp(*this, E->getVal1()); 2622 } 2623 2624 if (E->getOp() != AtomicExpr::Store && !Dest) 2625 Dest = CreateMemTemp(E->getType(), ".atomicdst"); 2626 2627 if (UseLibcall) { 2628 // FIXME: Finalize what the libcalls are actually supposed to look like. 2629 // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . 2630 return EmitUnsupportedRValue(E, "atomic library call"); 2631 } 2632 #if 0 2633 if (UseLibcall) { 2634 const char* LibCallName; 2635 switch (E->getOp()) { 2636 case AtomicExpr::CmpXchgWeak: 2637 LibCallName = "__atomic_compare_exchange_generic"; break; 2638 case AtomicExpr::CmpXchgStrong: 2639 LibCallName = "__atomic_compare_exchange_generic"; break; 2640 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break; 2641 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break; 2642 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break; 2643 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break; 2644 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break; 2645 case AtomicExpr::Xchg: LibCallName = "__atomic_exchange_generic"; break; 2646 case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break; 2647 case AtomicExpr::Load: LibCallName = "__atomic_load_generic"; break; 2648 } 2649 llvm::SmallVector<QualType, 4> Params; 2650 CallArgList Args; 2651 QualType RetTy = getContext().VoidTy; 2652 if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg()) 2653 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 2654 getContext().VoidPtrTy); 2655 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), 2656 getContext().VoidPtrTy); 2657 if (E->getOp() != AtomicExpr::Load) 2658 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 2659 getContext().VoidPtrTy); 2660 if (E->isCmpXChg()) { 2661 Args.add(RValue::get(EmitCastToVoidPtr(Val2)), 2662 getContext().VoidPtrTy); 2663 RetTy = getContext().IntTy; 2664 } 2665 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), 2666 getContext().getSizeType()); 2667 const CGFunctionInfo &FuncInfo = 2668 CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo()); 2669 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false); 2670 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 2671 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 2672 if (E->isCmpXChg()) 2673 return Res; 2674 if (E->getOp() == AtomicExpr::Store) 2675 return RValue::get(0); 2676 return ConvertTempToRValue(*this, E->getType(), Dest); 2677 } 2678 #endif 2679 llvm::Type *IPtrTy = 2680 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo(); 2681 llvm::Value *OrigDest = Dest; 2682 Ptr = Builder.CreateBitCast(Ptr, IPtrTy); 2683 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy); 2684 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy); 2685 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy); 2686 2687 if (isa<llvm::ConstantInt>(Order)) { 2688 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 2689 switch (ord) { 2690 case 0: // memory_order_relaxed 2691 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2692 llvm::Monotonic); 2693 break; 2694 case 1: // memory_order_consume 2695 case 2: // memory_order_acquire 2696 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2697 llvm::Acquire); 2698 break; 2699 case 3: // memory_order_release 2700 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2701 llvm::Release); 2702 break; 2703 case 4: // memory_order_acq_rel 2704 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2705 llvm::AcquireRelease); 2706 break; 2707 case 5: // memory_order_seq_cst 2708 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2709 llvm::SequentiallyConsistent); 2710 break; 2711 default: // invalid order 2712 // We should not ever get here normally, but it's hard to 2713 // enforce that in general. 2714 break; 2715 } 2716 if (E->getOp() == AtomicExpr::Store || E->getOp() == AtomicExpr::Init) 2717 return RValue::get(0); 2718 return ConvertTempToRValue(*this, E->getType(), OrigDest); 2719 } 2720 2721 // Long case, when Order isn't obviously constant. 2722 2723 // Create all the relevant BB's 2724 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0, 2725 *AcqRelBB = 0, *SeqCstBB = 0; 2726 MonotonicBB = createBasicBlock("monotonic", CurFn); 2727 if (E->getOp() != AtomicExpr::Store) 2728 AcquireBB = createBasicBlock("acquire", CurFn); 2729 if (E->getOp() != AtomicExpr::Load) 2730 ReleaseBB = createBasicBlock("release", CurFn); 2731 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) 2732 AcqRelBB = createBasicBlock("acqrel", CurFn); 2733 SeqCstBB = createBasicBlock("seqcst", CurFn); 2734 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 2735 2736 // Create the switch for the split 2737 // MonotonicBB is arbitrarily chosen as the default case; in practice, this 2738 // doesn't matter unless someone is crazy enough to use something that 2739 // doesn't fold to a constant for the ordering. 2740 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 2741 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); 2742 2743 // Emit all the different atomics 2744 Builder.SetInsertPoint(MonotonicBB); 2745 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2746 llvm::Monotonic); 2747 Builder.CreateBr(ContBB); 2748 if (E->getOp() != AtomicExpr::Store) { 2749 Builder.SetInsertPoint(AcquireBB); 2750 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2751 llvm::Acquire); 2752 Builder.CreateBr(ContBB); 2753 SI->addCase(Builder.getInt32(1), AcquireBB); 2754 SI->addCase(Builder.getInt32(2), AcquireBB); 2755 } 2756 if (E->getOp() != AtomicExpr::Load) { 2757 Builder.SetInsertPoint(ReleaseBB); 2758 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2759 llvm::Release); 2760 Builder.CreateBr(ContBB); 2761 SI->addCase(Builder.getInt32(3), ReleaseBB); 2762 } 2763 if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) { 2764 Builder.SetInsertPoint(AcqRelBB); 2765 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2766 llvm::AcquireRelease); 2767 Builder.CreateBr(ContBB); 2768 SI->addCase(Builder.getInt32(4), AcqRelBB); 2769 } 2770 Builder.SetInsertPoint(SeqCstBB); 2771 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 2772 llvm::SequentiallyConsistent); 2773 Builder.CreateBr(ContBB); 2774 SI->addCase(Builder.getInt32(5), SeqCstBB); 2775 2776 // Cleanup and return 2777 Builder.SetInsertPoint(ContBB); 2778 if (E->getOp() == AtomicExpr::Store) 2779 return RValue::get(0); 2780 return ConvertTempToRValue(*this, E->getType(), OrigDest); 2781 } 2782 2783 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, unsigned AccuracyN, 2784 unsigned AccuracyD) { 2785 assert(Val->getType()->isFPOrFPVectorTy()); 2786 if (!AccuracyN || !isa<llvm::Instruction>(Val)) 2787 return; 2788 2789 llvm::Value *Vals[2]; 2790 Vals[0] = llvm::ConstantInt::get(Int32Ty, AccuracyN); 2791 Vals[1] = llvm::ConstantInt::get(Int32Ty, AccuracyD); 2792 llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), Vals); 2793 2794 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpaccuracy, 2795 Node); 2796 } 2797 2798 namespace { 2799 struct LValueOrRValue { 2800 LValue LV; 2801 RValue RV; 2802 }; 2803 } 2804 2805 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 2806 const PseudoObjectExpr *E, 2807 bool forLValue, 2808 AggValueSlot slot) { 2809 llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 2810 2811 // Find the result expression, if any. 2812 const Expr *resultExpr = E->getResultExpr(); 2813 LValueOrRValue result; 2814 2815 for (PseudoObjectExpr::const_semantics_iterator 2816 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 2817 const Expr *semantic = *i; 2818 2819 // If this semantic expression is an opaque value, bind it 2820 // to the result of its source expression. 2821 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 2822 2823 // If this is the result expression, we may need to evaluate 2824 // directly into the slot. 2825 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 2826 OVMA opaqueData; 2827 if (ov == resultExpr && ov->isRValue() && !forLValue && 2828 CodeGenFunction::hasAggregateLLVMType(ov->getType()) && 2829 !ov->getType()->isAnyComplexType()) { 2830 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 2831 2832 LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType()); 2833 opaqueData = OVMA::bind(CGF, ov, LV); 2834 result.RV = slot.asRValue(); 2835 2836 // Otherwise, emit as normal. 2837 } else { 2838 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 2839 2840 // If this is the result, also evaluate the result now. 2841 if (ov == resultExpr) { 2842 if (forLValue) 2843 result.LV = CGF.EmitLValue(ov); 2844 else 2845 result.RV = CGF.EmitAnyExpr(ov, slot); 2846 } 2847 } 2848 2849 opaques.push_back(opaqueData); 2850 2851 // Otherwise, if the expression is the result, evaluate it 2852 // and remember the result. 2853 } else if (semantic == resultExpr) { 2854 if (forLValue) 2855 result.LV = CGF.EmitLValue(semantic); 2856 else 2857 result.RV = CGF.EmitAnyExpr(semantic, slot); 2858 2859 // Otherwise, evaluate the expression in an ignored context. 2860 } else { 2861 CGF.EmitIgnoredExpr(semantic); 2862 } 2863 } 2864 2865 // Unbind all the opaques now. 2866 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 2867 opaques[i].unbind(CGF); 2868 2869 return result; 2870 } 2871 2872 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 2873 AggValueSlot slot) { 2874 return emitPseudoObjectExpr(*this, E, false, slot).RV; 2875 } 2876 2877 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 2878 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 2879 } 2880