1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGCall.h" 17 #include "CGCXXABI.h" 18 #include "CGDebugInfo.h" 19 #include "CGRecordLayout.h" 20 #include "CGObjCRuntime.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/ASTContext.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Basic/ConvertUTF.h" 25 #include "clang/Frontend/CodeGenOptions.h" 26 #include "llvm/Intrinsics.h" 27 #include "llvm/LLVMContext.h" 28 #include "llvm/MDBuilder.h" 29 #include "llvm/DataLayout.h" 30 #include "llvm/ADT/Hashing.h" 31 using namespace clang; 32 using namespace CodeGen; 33 34 //===--------------------------------------------------------------------===// 35 // Miscellaneous Helper Methods 36 //===--------------------------------------------------------------------===// 37 38 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 39 unsigned addressSpace = 40 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 41 42 llvm::PointerType *destType = Int8PtrTy; 43 if (addressSpace) 44 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 45 46 if (value->getType() == destType) return value; 47 return Builder.CreateBitCast(value, destType); 48 } 49 50 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 51 /// block. 52 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 53 const Twine &Name) { 54 if (!Builder.isNamePreserving()) 55 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 56 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 57 } 58 59 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 60 llvm::Value *Init) { 61 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 62 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 63 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 64 } 65 66 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, 67 const Twine &Name) { 68 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 69 // FIXME: Should we prefer the preferred type alignment here? 70 CharUnits Align = getContext().getTypeAlignInChars(Ty); 71 Alloc->setAlignment(Align.getQuantity()); 72 return Alloc; 73 } 74 75 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, 76 const Twine &Name) { 77 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 78 // FIXME: Should we prefer the preferred type alignment here? 79 CharUnits Align = getContext().getTypeAlignInChars(Ty); 80 Alloc->setAlignment(Align.getQuantity()); 81 return Alloc; 82 } 83 84 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 85 /// expression and compare the result against zero, returning an Int1Ty value. 86 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 87 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 88 llvm::Value *MemPtr = EmitScalarExpr(E); 89 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 90 } 91 92 QualType BoolTy = getContext().BoolTy; 93 if (!E->getType()->isAnyComplexType()) 94 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 95 96 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 97 } 98 99 /// EmitIgnoredExpr - Emit code to compute the specified expression, 100 /// ignoring the result. 101 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 102 if (E->isRValue()) 103 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 104 105 // Just emit it as an l-value and drop the result. 106 EmitLValue(E); 107 } 108 109 /// EmitAnyExpr - Emit code to compute the specified expression which 110 /// can have any type. The result is returned as an RValue struct. 111 /// If this is an aggregate expression, AggSlot indicates where the 112 /// result should be returned. 113 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, 114 AggValueSlot aggSlot, 115 bool ignoreResult) { 116 if (!hasAggregateLLVMType(E->getType())) 117 return RValue::get(EmitScalarExpr(E, ignoreResult)); 118 else if (E->getType()->isAnyComplexType()) 119 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); 120 121 if (!ignoreResult && aggSlot.isIgnored()) 122 aggSlot = CreateAggTemp(E->getType(), "agg-temp"); 123 EmitAggExpr(E, aggSlot); 124 return aggSlot.asRValue(); 125 } 126 127 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 128 /// always be accessible even if no aggregate location is provided. 129 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 130 AggValueSlot AggSlot = AggValueSlot::ignored(); 131 132 if (hasAggregateLLVMType(E->getType()) && 133 !E->getType()->isAnyComplexType()) 134 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 135 return EmitAnyExpr(E, AggSlot); 136 } 137 138 /// EmitAnyExprToMem - Evaluate an expression into a given memory 139 /// location. 140 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 141 llvm::Value *Location, 142 Qualifiers Quals, 143 bool IsInit) { 144 // FIXME: This function should take an LValue as an argument. 145 if (E->getType()->isAnyComplexType()) { 146 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile()); 147 } else if (hasAggregateLLVMType(E->getType())) { 148 CharUnits Alignment = getContext().getTypeAlignInChars(E->getType()); 149 EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals, 150 AggValueSlot::IsDestructed_t(IsInit), 151 AggValueSlot::DoesNotNeedGCBarriers, 152 AggValueSlot::IsAliased_t(!IsInit))); 153 } else { 154 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 155 LValue LV = MakeAddrLValue(Location, E->getType()); 156 EmitStoreThroughLValue(RV, LV); 157 } 158 } 159 160 static llvm::Value * 161 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type, 162 const NamedDecl *InitializedDecl) { 163 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 164 if (VD->hasGlobalStorage()) { 165 SmallString<256> Name; 166 llvm::raw_svector_ostream Out(Name); 167 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out); 168 Out.flush(); 169 170 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type); 171 172 // Create the reference temporary. 173 llvm::GlobalValue *RefTemp = 174 new llvm::GlobalVariable(CGF.CGM.getModule(), 175 RefTempTy, /*isConstant=*/false, 176 llvm::GlobalValue::InternalLinkage, 177 llvm::Constant::getNullValue(RefTempTy), 178 Name.str()); 179 return RefTemp; 180 } 181 } 182 183 return CGF.CreateMemTemp(Type, "ref.tmp"); 184 } 185 186 static llvm::Value * 187 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E, 188 llvm::Value *&ReferenceTemporary, 189 const CXXDestructorDecl *&ReferenceTemporaryDtor, 190 QualType &ObjCARCReferenceLifetimeType, 191 const NamedDecl *InitializedDecl) { 192 const MaterializeTemporaryExpr *M = NULL; 193 E = E->findMaterializedTemporary(M); 194 // Objective-C++ ARC: 195 // If we are binding a reference to a temporary that has ownership, we 196 // need to perform retain/release operations on the temporary. 197 if (M && CGF.getLangOpts().ObjCAutoRefCount && 198 M->getType()->isObjCLifetimeType() && 199 (M->getType().getObjCLifetime() == Qualifiers::OCL_Strong || 200 M->getType().getObjCLifetime() == Qualifiers::OCL_Weak || 201 M->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing)) 202 ObjCARCReferenceLifetimeType = M->getType(); 203 204 if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) { 205 CGF.enterFullExpression(EWC); 206 CodeGenFunction::RunCleanupsScope Scope(CGF); 207 208 return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(), 209 ReferenceTemporary, 210 ReferenceTemporaryDtor, 211 ObjCARCReferenceLifetimeType, 212 InitializedDecl); 213 } 214 215 RValue RV; 216 if (E->isGLValue()) { 217 // Emit the expression as an lvalue. 218 LValue LV = CGF.EmitLValue(E); 219 220 if (LV.isSimple()) 221 return LV.getAddress(); 222 223 // We have to load the lvalue. 224 RV = CGF.EmitLoadOfLValue(LV); 225 } else { 226 if (!ObjCARCReferenceLifetimeType.isNull()) { 227 ReferenceTemporary = CreateReferenceTemporary(CGF, 228 ObjCARCReferenceLifetimeType, 229 InitializedDecl); 230 231 232 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary, 233 ObjCARCReferenceLifetimeType); 234 235 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl), 236 RefTempDst, false); 237 238 bool ExtendsLifeOfTemporary = false; 239 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 240 if (Var->extendsLifetimeOfTemporary()) 241 ExtendsLifeOfTemporary = true; 242 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) { 243 ExtendsLifeOfTemporary = true; 244 } 245 246 if (!ExtendsLifeOfTemporary) { 247 // Since the lifetime of this temporary isn't going to be extended, 248 // we need to clean it up ourselves at the end of the full expression. 249 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 250 case Qualifiers::OCL_None: 251 case Qualifiers::OCL_ExplicitNone: 252 case Qualifiers::OCL_Autoreleasing: 253 break; 254 255 case Qualifiers::OCL_Strong: { 256 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 257 CleanupKind cleanupKind = CGF.getARCCleanupKind(); 258 CGF.pushDestroy(cleanupKind, 259 ReferenceTemporary, 260 ObjCARCReferenceLifetimeType, 261 CodeGenFunction::destroyARCStrongImprecise, 262 cleanupKind & EHCleanup); 263 break; 264 } 265 266 case Qualifiers::OCL_Weak: 267 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 268 CGF.pushDestroy(NormalAndEHCleanup, 269 ReferenceTemporary, 270 ObjCARCReferenceLifetimeType, 271 CodeGenFunction::destroyARCWeak, 272 /*useEHCleanupForArray*/ true); 273 break; 274 } 275 276 ObjCARCReferenceLifetimeType = QualType(); 277 } 278 279 return ReferenceTemporary; 280 } 281 282 SmallVector<SubobjectAdjustment, 2> Adjustments; 283 E = E->skipRValueSubobjectAdjustments(Adjustments); 284 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E)) 285 if (opaque->getType()->isRecordType()) 286 return CGF.EmitOpaqueValueLValue(opaque).getAddress(); 287 288 // Create a reference temporary if necessary. 289 AggValueSlot AggSlot = AggValueSlot::ignored(); 290 if (CGF.hasAggregateLLVMType(E->getType()) && 291 !E->getType()->isAnyComplexType()) { 292 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 293 InitializedDecl); 294 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType()); 295 AggValueSlot::IsDestructed_t isDestructed 296 = AggValueSlot::IsDestructed_t(InitializedDecl != 0); 297 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment, 298 Qualifiers(), isDestructed, 299 AggValueSlot::DoesNotNeedGCBarriers, 300 AggValueSlot::IsNotAliased); 301 } 302 303 if (InitializedDecl) { 304 // Get the destructor for the reference temporary. 305 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 306 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 307 if (!ClassDecl->hasTrivialDestructor()) 308 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 309 } 310 } 311 312 RV = CGF.EmitAnyExpr(E, AggSlot); 313 314 // Check if need to perform derived-to-base casts and/or field accesses, to 315 // get from the temporary object we created (and, potentially, for which we 316 // extended the lifetime) to the subobject we're binding the reference to. 317 if (!Adjustments.empty()) { 318 llvm::Value *Object = RV.getAggregateAddr(); 319 for (unsigned I = Adjustments.size(); I != 0; --I) { 320 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 321 switch (Adjustment.Kind) { 322 case SubobjectAdjustment::DerivedToBaseAdjustment: 323 Object = 324 CGF.GetAddressOfBaseClass(Object, 325 Adjustment.DerivedToBase.DerivedClass, 326 Adjustment.DerivedToBase.BasePath->path_begin(), 327 Adjustment.DerivedToBase.BasePath->path_end(), 328 /*NullCheckValue=*/false); 329 break; 330 331 case SubobjectAdjustment::FieldAdjustment: { 332 LValue LV = CGF.MakeAddrLValue(Object, E->getType()); 333 LV = CGF.EmitLValueForField(LV, Adjustment.Field); 334 if (LV.isSimple()) { 335 Object = LV.getAddress(); 336 break; 337 } 338 339 // For non-simple lvalues, we actually have to create a copy of 340 // the object we're binding to. 341 QualType T = Adjustment.Field->getType().getNonReferenceType() 342 .getUnqualifiedType(); 343 Object = CreateReferenceTemporary(CGF, T, InitializedDecl); 344 LValue TempLV = CGF.MakeAddrLValue(Object, 345 Adjustment.Field->getType()); 346 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV); 347 break; 348 } 349 350 case SubobjectAdjustment::MemberPointerAdjustment: { 351 llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS); 352 Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress( 353 CGF, Object, Ptr, Adjustment.Ptr.MPT); 354 break; 355 } 356 } 357 } 358 359 return Object; 360 } 361 } 362 363 if (RV.isAggregate()) 364 return RV.getAggregateAddr(); 365 366 // Create a temporary variable that we can bind the reference to. 367 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 368 InitializedDecl); 369 370 371 unsigned Alignment = 372 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity(); 373 if (RV.isScalar()) 374 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary, 375 /*Volatile=*/false, Alignment, E->getType()); 376 else 377 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary, 378 /*Volatile=*/false); 379 return ReferenceTemporary; 380 } 381 382 RValue 383 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E, 384 const NamedDecl *InitializedDecl) { 385 llvm::Value *ReferenceTemporary = 0; 386 const CXXDestructorDecl *ReferenceTemporaryDtor = 0; 387 QualType ObjCARCReferenceLifetimeType; 388 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary, 389 ReferenceTemporaryDtor, 390 ObjCARCReferenceLifetimeType, 391 InitializedDecl); 392 if (SanitizePerformTypeCheck && !E->getType()->isFunctionType()) { 393 // C++11 [dcl.ref]p5 (as amended by core issue 453): 394 // If a glvalue to which a reference is directly bound designates neither 395 // an existing object or function of an appropriate type nor a region of 396 // storage of suitable size and alignment to contain an object of the 397 // reference's type, the behavior is undefined. 398 QualType Ty = E->getType(); 399 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); 400 } 401 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull()) 402 return RValue::get(Value); 403 404 // Make sure to call the destructor for the reference temporary. 405 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl); 406 if (VD && VD->hasGlobalStorage()) { 407 if (ReferenceTemporaryDtor) { 408 llvm::Constant *DtorFn = 409 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete); 410 CGM.getCXXABI().registerGlobalDtor(*this, DtorFn, 411 cast<llvm::Constant>(ReferenceTemporary)); 412 } else { 413 assert(!ObjCARCReferenceLifetimeType.isNull()); 414 // Note: We intentionally do not register a global "destructor" to 415 // release the object. 416 } 417 418 return RValue::get(Value); 419 } 420 421 if (ReferenceTemporaryDtor) 422 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary); 423 else { 424 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 425 case Qualifiers::OCL_None: 426 llvm_unreachable( 427 "Not a reference temporary that needs to be deallocated"); 428 case Qualifiers::OCL_ExplicitNone: 429 case Qualifiers::OCL_Autoreleasing: 430 // Nothing to do. 431 break; 432 433 case Qualifiers::OCL_Strong: { 434 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 435 CleanupKind cleanupKind = getARCCleanupKind(); 436 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType, 437 precise ? destroyARCStrongPrecise : destroyARCStrongImprecise, 438 cleanupKind & EHCleanup); 439 break; 440 } 441 442 case Qualifiers::OCL_Weak: { 443 // __weak objects always get EH cleanups; otherwise, exceptions 444 // could cause really nasty crashes instead of mere leaks. 445 pushDestroy(NormalAndEHCleanup, ReferenceTemporary, 446 ObjCARCReferenceLifetimeType, destroyARCWeak, true); 447 break; 448 } 449 } 450 } 451 452 return RValue::get(Value); 453 } 454 455 456 /// getAccessedFieldNo - Given an encoded value and a result number, return the 457 /// input field number being accessed. 458 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 459 const llvm::Constant *Elts) { 460 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) 461 ->getZExtValue(); 462 } 463 464 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. 465 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, 466 llvm::Value *High) { 467 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); 468 llvm::Value *K47 = Builder.getInt64(47); 469 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); 470 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); 471 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); 472 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); 473 return Builder.CreateMul(B1, KMul); 474 } 475 476 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, 477 llvm::Value *Address, 478 QualType Ty, CharUnits Alignment) { 479 if (!SanitizePerformTypeCheck) 480 return; 481 482 // Don't check pointers outside the default address space. The null check 483 // isn't correct, the object-size check isn't supported by LLVM, and we can't 484 // communicate the addresses to the runtime handler for the vptr check. 485 if (Address->getType()->getPointerAddressSpace()) 486 return; 487 488 llvm::Value *Cond = 0; 489 490 if (getLangOpts().SanitizeNull) { 491 // The glvalue must not be an empty glvalue. 492 Cond = Builder.CreateICmpNE( 493 Address, llvm::Constant::getNullValue(Address->getType())); 494 } 495 496 if (getLangOpts().SanitizeObjectSize && !Ty->isIncompleteType()) { 497 uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity(); 498 499 // The glvalue must refer to a large enough storage region. 500 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation 501 // to check this. 502 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy); 503 llvm::Value *Min = Builder.getFalse(); 504 llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy); 505 llvm::Value *LargeEnough = 506 Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min), 507 llvm::ConstantInt::get(IntPtrTy, Size)); 508 Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough; 509 } 510 511 uint64_t AlignVal = 0; 512 513 if (getLangOpts().SanitizeAlignment) { 514 AlignVal = Alignment.getQuantity(); 515 if (!Ty->isIncompleteType() && !AlignVal) 516 AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity(); 517 518 // The glvalue must be suitably aligned. 519 if (AlignVal) { 520 llvm::Value *Align = 521 Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy), 522 llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); 523 llvm::Value *Aligned = 524 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); 525 Cond = Cond ? Builder.CreateAnd(Cond, Aligned) : Aligned; 526 } 527 } 528 529 if (Cond) { 530 llvm::Constant *StaticData[] = { 531 EmitCheckSourceLocation(Loc), 532 EmitCheckTypeDescriptor(Ty), 533 llvm::ConstantInt::get(SizeTy, AlignVal), 534 llvm::ConstantInt::get(Int8Ty, TCK) 535 }; 536 EmitCheck(Cond, "type_mismatch", StaticData, Address); 537 } 538 539 // If possible, check that the vptr indicates that there is a subobject of 540 // type Ty at offset zero within this object. 541 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 542 if (getLangOpts().SanitizeVptr && TCK != TCK_ConstructorCall && 543 RD && RD->hasDefinition() && RD->isDynamicClass()) { 544 // Compute a hash of the mangled name of the type. 545 // 546 // FIXME: This is not guaranteed to be deterministic! Move to a 547 // fingerprinting mechanism once LLVM provides one. For the time 548 // being the implementation happens to be deterministic. 549 llvm::SmallString<64> MangledName; 550 llvm::raw_svector_ostream Out(MangledName); 551 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), 552 Out); 553 llvm::hash_code TypeHash = hash_value(Out.str()); 554 555 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). 556 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); 557 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); 558 llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy); 559 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); 560 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); 561 562 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); 563 Hash = Builder.CreateTrunc(Hash, IntPtrTy); 564 565 // Look the hash up in our cache. 566 const int CacheSize = 128; 567 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); 568 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, 569 "__ubsan_vptr_type_cache"); 570 llvm::Value *Slot = Builder.CreateAnd(Hash, 571 llvm::ConstantInt::get(IntPtrTy, 572 CacheSize-1)); 573 llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; 574 llvm::Value *CacheVal = 575 Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices)); 576 577 // If the hash isn't in the cache, call a runtime handler to perform the 578 // hard work of checking whether the vptr is for an object of the right 579 // type. This will either fill in the cache and return, or produce a 580 // diagnostic. 581 llvm::Constant *StaticData[] = { 582 EmitCheckSourceLocation(Loc), 583 EmitCheckTypeDescriptor(Ty), 584 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), 585 llvm::ConstantInt::get(Int8Ty, TCK) 586 }; 587 llvm::Value *DynamicData[] = { Address, Hash }; 588 EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash), 589 "dynamic_type_cache_miss", StaticData, DynamicData, true); 590 } 591 } 592 593 594 CodeGenFunction::ComplexPairTy CodeGenFunction:: 595 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 596 bool isInc, bool isPre) { 597 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 598 LV.isVolatileQualified()); 599 600 llvm::Value *NextVal; 601 if (isa<llvm::IntegerType>(InVal.first->getType())) { 602 uint64_t AmountVal = isInc ? 1 : -1; 603 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 604 605 // Add the inc/dec to the real part. 606 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 607 } else { 608 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 609 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 610 if (!isInc) 611 FVal.changeSign(); 612 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 613 614 // Add the inc/dec to the real part. 615 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 616 } 617 618 ComplexPairTy IncVal(NextVal, InVal.second); 619 620 // Store the updated result through the lvalue. 621 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 622 623 // If this is a postinc, return the value read from memory, otherwise use the 624 // updated value. 625 return isPre ? IncVal : InVal; 626 } 627 628 629 //===----------------------------------------------------------------------===// 630 // LValue Expression Emission 631 //===----------------------------------------------------------------------===// 632 633 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 634 if (Ty->isVoidType()) 635 return RValue::get(0); 636 637 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 638 llvm::Type *EltTy = ConvertType(CTy->getElementType()); 639 llvm::Value *U = llvm::UndefValue::get(EltTy); 640 return RValue::getComplex(std::make_pair(U, U)); 641 } 642 643 // If this is a use of an undefined aggregate type, the aggregate must have an 644 // identifiable address. Just because the contents of the value are undefined 645 // doesn't mean that the address can't be taken and compared. 646 if (hasAggregateLLVMType(Ty)) { 647 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 648 return RValue::getAggregate(DestPtr); 649 } 650 651 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 652 } 653 654 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 655 const char *Name) { 656 ErrorUnsupported(E, Name); 657 return GetUndefRValue(E->getType()); 658 } 659 660 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 661 const char *Name) { 662 ErrorUnsupported(E, Name); 663 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 664 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); 665 } 666 667 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { 668 LValue LV = EmitLValue(E); 669 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 670 EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(), 671 E->getType(), LV.getAlignment()); 672 return LV; 673 } 674 675 /// EmitLValue - Emit code to compute a designator that specifies the location 676 /// of the expression. 677 /// 678 /// This can return one of two things: a simple address or a bitfield reference. 679 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 680 /// an LLVM pointer type. 681 /// 682 /// If this returns a bitfield reference, nothing about the pointee type of the 683 /// LLVM value is known: For example, it may not be a pointer to an integer. 684 /// 685 /// If this returns a normal address, and if the lvalue's C type is fixed size, 686 /// this method guarantees that the returned pointer type will point to an LLVM 687 /// type of the same size of the lvalue's type. If the lvalue has a variable 688 /// length type, this is not possible. 689 /// 690 LValue CodeGenFunction::EmitLValue(const Expr *E) { 691 switch (E->getStmtClass()) { 692 default: return EmitUnsupportedLValue(E, "l-value expression"); 693 694 case Expr::ObjCPropertyRefExprClass: 695 llvm_unreachable("cannot emit a property reference directly"); 696 697 case Expr::ObjCSelectorExprClass: 698 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 699 case Expr::ObjCIsaExprClass: 700 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 701 case Expr::BinaryOperatorClass: 702 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 703 case Expr::CompoundAssignOperatorClass: 704 if (!E->getType()->isAnyComplexType()) 705 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 706 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 707 case Expr::CallExprClass: 708 case Expr::CXXMemberCallExprClass: 709 case Expr::CXXOperatorCallExprClass: 710 case Expr::UserDefinedLiteralClass: 711 return EmitCallExprLValue(cast<CallExpr>(E)); 712 case Expr::VAArgExprClass: 713 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 714 case Expr::DeclRefExprClass: 715 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 716 case Expr::ParenExprClass: 717 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 718 case Expr::GenericSelectionExprClass: 719 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 720 case Expr::PredefinedExprClass: 721 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 722 case Expr::StringLiteralClass: 723 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 724 case Expr::ObjCEncodeExprClass: 725 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 726 case Expr::PseudoObjectExprClass: 727 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 728 case Expr::InitListExprClass: 729 return EmitInitListLValue(cast<InitListExpr>(E)); 730 case Expr::CXXTemporaryObjectExprClass: 731 case Expr::CXXConstructExprClass: 732 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 733 case Expr::CXXBindTemporaryExprClass: 734 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 735 case Expr::CXXUuidofExprClass: 736 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); 737 case Expr::LambdaExprClass: 738 return EmitLambdaLValue(cast<LambdaExpr>(E)); 739 740 case Expr::ExprWithCleanupsClass: { 741 const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E); 742 enterFullExpression(cleanups); 743 RunCleanupsScope Scope(*this); 744 return EmitLValue(cleanups->getSubExpr()); 745 } 746 747 case Expr::CXXScalarValueInitExprClass: 748 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E)); 749 case Expr::CXXDefaultArgExprClass: 750 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 751 case Expr::CXXTypeidExprClass: 752 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 753 754 case Expr::ObjCMessageExprClass: 755 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 756 case Expr::ObjCIvarRefExprClass: 757 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 758 case Expr::StmtExprClass: 759 return EmitStmtExprLValue(cast<StmtExpr>(E)); 760 case Expr::UnaryOperatorClass: 761 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 762 case Expr::ArraySubscriptExprClass: 763 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 764 case Expr::ExtVectorElementExprClass: 765 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 766 case Expr::MemberExprClass: 767 return EmitMemberExpr(cast<MemberExpr>(E)); 768 case Expr::CompoundLiteralExprClass: 769 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 770 case Expr::ConditionalOperatorClass: 771 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 772 case Expr::BinaryConditionalOperatorClass: 773 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 774 case Expr::ChooseExprClass: 775 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 776 case Expr::OpaqueValueExprClass: 777 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 778 case Expr::SubstNonTypeTemplateParmExprClass: 779 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 780 case Expr::ImplicitCastExprClass: 781 case Expr::CStyleCastExprClass: 782 case Expr::CXXFunctionalCastExprClass: 783 case Expr::CXXStaticCastExprClass: 784 case Expr::CXXDynamicCastExprClass: 785 case Expr::CXXReinterpretCastExprClass: 786 case Expr::CXXConstCastExprClass: 787 case Expr::ObjCBridgedCastExprClass: 788 return EmitCastLValue(cast<CastExpr>(E)); 789 790 case Expr::MaterializeTemporaryExprClass: 791 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 792 } 793 } 794 795 /// Given an object of the given canonical type, can we safely copy a 796 /// value out of it based on its initializer? 797 static bool isConstantEmittableObjectType(QualType type) { 798 assert(type.isCanonical()); 799 assert(!type->isReferenceType()); 800 801 // Must be const-qualified but non-volatile. 802 Qualifiers qs = type.getLocalQualifiers(); 803 if (!qs.hasConst() || qs.hasVolatile()) return false; 804 805 // Otherwise, all object types satisfy this except C++ classes with 806 // mutable subobjects or non-trivial copy/destroy behavior. 807 if (const RecordType *RT = dyn_cast<RecordType>(type)) 808 if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) 809 if (RD->hasMutableFields() || !RD->isTrivial()) 810 return false; 811 812 return true; 813 } 814 815 /// Can we constant-emit a load of a reference to a variable of the 816 /// given type? This is different from predicates like 817 /// Decl::isUsableInConstantExpressions because we do want it to apply 818 /// in situations that don't necessarily satisfy the language's rules 819 /// for this (e.g. C++'s ODR-use rules). For example, we want to able 820 /// to do this with const float variables even if those variables 821 /// aren't marked 'constexpr'. 822 enum ConstantEmissionKind { 823 CEK_None, 824 CEK_AsReferenceOnly, 825 CEK_AsValueOrReference, 826 CEK_AsValueOnly 827 }; 828 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { 829 type = type.getCanonicalType(); 830 if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) { 831 if (isConstantEmittableObjectType(ref->getPointeeType())) 832 return CEK_AsValueOrReference; 833 return CEK_AsReferenceOnly; 834 } 835 if (isConstantEmittableObjectType(type)) 836 return CEK_AsValueOnly; 837 return CEK_None; 838 } 839 840 /// Try to emit a reference to the given value without producing it as 841 /// an l-value. This is actually more than an optimization: we can't 842 /// produce an l-value for variables that we never actually captured 843 /// in a block or lambda, which means const int variables or constexpr 844 /// literals or similar. 845 CodeGenFunction::ConstantEmission 846 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { 847 ValueDecl *value = refExpr->getDecl(); 848 849 // The value needs to be an enum constant or a constant variable. 850 ConstantEmissionKind CEK; 851 if (isa<ParmVarDecl>(value)) { 852 CEK = CEK_None; 853 } else if (VarDecl *var = dyn_cast<VarDecl>(value)) { 854 CEK = checkVarTypeForConstantEmission(var->getType()); 855 } else if (isa<EnumConstantDecl>(value)) { 856 CEK = CEK_AsValueOnly; 857 } else { 858 CEK = CEK_None; 859 } 860 if (CEK == CEK_None) return ConstantEmission(); 861 862 Expr::EvalResult result; 863 bool resultIsReference; 864 QualType resultType; 865 866 // It's best to evaluate all the way as an r-value if that's permitted. 867 if (CEK != CEK_AsReferenceOnly && 868 refExpr->EvaluateAsRValue(result, getContext())) { 869 resultIsReference = false; 870 resultType = refExpr->getType(); 871 872 // Otherwise, try to evaluate as an l-value. 873 } else if (CEK != CEK_AsValueOnly && 874 refExpr->EvaluateAsLValue(result, getContext())) { 875 resultIsReference = true; 876 resultType = value->getType(); 877 878 // Failure. 879 } else { 880 return ConstantEmission(); 881 } 882 883 // In any case, if the initializer has side-effects, abandon ship. 884 if (result.HasSideEffects) 885 return ConstantEmission(); 886 887 // Emit as a constant. 888 llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this); 889 890 // Make sure we emit a debug reference to the global variable. 891 // This should probably fire even for 892 if (isa<VarDecl>(value)) { 893 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value))) 894 EmitDeclRefExprDbgValue(refExpr, C); 895 } else { 896 assert(isa<EnumConstantDecl>(value)); 897 EmitDeclRefExprDbgValue(refExpr, C); 898 } 899 900 // If we emitted a reference constant, we need to dereference that. 901 if (resultIsReference) 902 return ConstantEmission::forReference(C); 903 904 return ConstantEmission::forValue(C); 905 } 906 907 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) { 908 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 909 lvalue.getAlignment().getQuantity(), 910 lvalue.getType(), lvalue.getTBAAInfo()); 911 } 912 913 static bool hasBooleanRepresentation(QualType Ty) { 914 if (Ty->isBooleanType()) 915 return true; 916 917 if (const EnumType *ET = Ty->getAs<EnumType>()) 918 return ET->getDecl()->getIntegerType()->isBooleanType(); 919 920 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 921 return hasBooleanRepresentation(AT->getValueType()); 922 923 return false; 924 } 925 926 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { 927 const EnumType *ET = Ty->getAs<EnumType>(); 928 bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET && 929 CGM.getCodeGenOpts().StrictEnums && 930 !ET->getDecl()->isFixed()); 931 bool IsBool = hasBooleanRepresentation(Ty); 932 if (!IsBool && !IsRegularCPlusPlusEnum) 933 return NULL; 934 935 llvm::APInt Min; 936 llvm::APInt End; 937 if (IsBool) { 938 Min = llvm::APInt(8, 0); 939 End = llvm::APInt(8, 2); 940 } else { 941 const EnumDecl *ED = ET->getDecl(); 942 llvm::Type *LTy = ConvertTypeForMem(ED->getIntegerType()); 943 unsigned Bitwidth = LTy->getScalarSizeInBits(); 944 unsigned NumNegativeBits = ED->getNumNegativeBits(); 945 unsigned NumPositiveBits = ED->getNumPositiveBits(); 946 947 if (NumNegativeBits) { 948 unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); 949 assert(NumBits <= Bitwidth); 950 End = llvm::APInt(Bitwidth, 1) << (NumBits - 1); 951 Min = -End; 952 } else { 953 assert(NumPositiveBits <= Bitwidth); 954 End = llvm::APInt(Bitwidth, 1) << NumPositiveBits; 955 Min = llvm::APInt(Bitwidth, 0); 956 } 957 } 958 959 llvm::MDBuilder MDHelper(getLLVMContext()); 960 return MDHelper.createRange(Min, End); 961 } 962 963 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 964 unsigned Alignment, QualType Ty, 965 llvm::MDNode *TBAAInfo) { 966 967 // For better performance, handle vector loads differently. 968 if (Ty->isVectorType()) { 969 llvm::Value *V; 970 const llvm::Type *EltTy = 971 cast<llvm::PointerType>(Addr->getType())->getElementType(); 972 973 const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy); 974 975 // Handle vectors of size 3, like size 4 for better performance. 976 if (VTy->getNumElements() == 3) { 977 978 // Bitcast to vec4 type. 979 llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(), 980 4); 981 llvm::PointerType *ptVec4Ty = 982 llvm::PointerType::get(vec4Ty, 983 (cast<llvm::PointerType>( 984 Addr->getType()))->getAddressSpace()); 985 llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty, 986 "castToVec4"); 987 // Now load value. 988 llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4"); 989 990 // Shuffle vector to get vec3. 991 llvm::SmallVector<llvm::Constant*, 3> Mask; 992 Mask.push_back(llvm::ConstantInt::get( 993 llvm::Type::getInt32Ty(getLLVMContext()), 994 0)); 995 Mask.push_back(llvm::ConstantInt::get( 996 llvm::Type::getInt32Ty(getLLVMContext()), 997 1)); 998 Mask.push_back(llvm::ConstantInt::get( 999 llvm::Type::getInt32Ty(getLLVMContext()), 1000 2)); 1001 1002 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1003 V = Builder.CreateShuffleVector(LoadVal, 1004 llvm::UndefValue::get(vec4Ty), 1005 MaskV, "extractVec"); 1006 return EmitFromMemory(V, Ty); 1007 } 1008 } 1009 1010 llvm::LoadInst *Load = Builder.CreateLoad(Addr); 1011 if (Volatile) 1012 Load->setVolatile(true); 1013 if (Alignment) 1014 Load->setAlignment(Alignment); 1015 if (TBAAInfo) 1016 CGM.DecorateInstruction(Load, TBAAInfo); 1017 // If this is an atomic type, all normal reads must be atomic 1018 if (Ty->isAtomicType()) 1019 Load->setAtomic(llvm::SequentiallyConsistent); 1020 1021 if (CGM.getCodeGenOpts().OptimizationLevel > 0) 1022 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) 1023 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); 1024 1025 return EmitFromMemory(Load, Ty); 1026 } 1027 1028 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 1029 // Bool has a different representation in memory than in registers. 1030 if (hasBooleanRepresentation(Ty)) { 1031 // This should really always be an i1, but sometimes it's already 1032 // an i8, and it's awkward to track those cases down. 1033 if (Value->getType()->isIntegerTy(1)) 1034 return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool"); 1035 assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8"); 1036 } 1037 1038 return Value; 1039 } 1040 1041 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 1042 // Bool has a different representation in memory than in registers. 1043 if (hasBooleanRepresentation(Ty)) { 1044 assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8"); 1045 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 1046 } 1047 1048 return Value; 1049 } 1050 1051 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 1052 bool Volatile, unsigned Alignment, 1053 QualType Ty, 1054 llvm::MDNode *TBAAInfo, 1055 bool isInit) { 1056 1057 // Handle vectors differently to get better performance. 1058 if (Ty->isVectorType()) { 1059 llvm::Type *SrcTy = Value->getType(); 1060 llvm::VectorType *VecTy = cast<llvm::VectorType>(SrcTy); 1061 // Handle vec3 special. 1062 if (VecTy->getNumElements() == 3) { 1063 llvm::LLVMContext &VMContext = getLLVMContext(); 1064 1065 // Our source is a vec3, do a shuffle vector to make it a vec4. 1066 llvm::SmallVector<llvm::Constant*, 4> Mask; 1067 Mask.push_back(llvm::ConstantInt::get( 1068 llvm::Type::getInt32Ty(VMContext), 1069 0)); 1070 Mask.push_back(llvm::ConstantInt::get( 1071 llvm::Type::getInt32Ty(VMContext), 1072 1)); 1073 Mask.push_back(llvm::ConstantInt::get( 1074 llvm::Type::getInt32Ty(VMContext), 1075 2)); 1076 Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext))); 1077 1078 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1079 Value = Builder.CreateShuffleVector(Value, 1080 llvm::UndefValue::get(VecTy), 1081 MaskV, "extractVec"); 1082 SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4); 1083 } 1084 llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 1085 if (DstPtr->getElementType() != SrcTy) { 1086 llvm::Type *MemTy = 1087 llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); 1088 Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); 1089 } 1090 } 1091 1092 Value = EmitToMemory(Value, Ty); 1093 1094 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 1095 if (Alignment) 1096 Store->setAlignment(Alignment); 1097 if (TBAAInfo) 1098 CGM.DecorateInstruction(Store, TBAAInfo); 1099 if (!isInit && Ty->isAtomicType()) 1100 Store->setAtomic(llvm::SequentiallyConsistent); 1101 } 1102 1103 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 1104 bool isInit) { 1105 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 1106 lvalue.getAlignment().getQuantity(), lvalue.getType(), 1107 lvalue.getTBAAInfo(), isInit); 1108 } 1109 1110 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 1111 /// method emits the address of the lvalue, then loads the result as an rvalue, 1112 /// returning the rvalue. 1113 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) { 1114 if (LV.isObjCWeak()) { 1115 // load of a __weak object. 1116 llvm::Value *AddrWeakObj = LV.getAddress(); 1117 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 1118 AddrWeakObj)); 1119 } 1120 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) 1121 return RValue::get(EmitARCLoadWeak(LV.getAddress())); 1122 1123 if (LV.isSimple()) { 1124 assert(!LV.getType()->isFunctionType()); 1125 1126 // Everything needs a load. 1127 return RValue::get(EmitLoadOfScalar(LV)); 1128 } 1129 1130 if (LV.isVectorElt()) { 1131 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(), 1132 LV.isVolatileQualified()); 1133 Load->setAlignment(LV.getAlignment().getQuantity()); 1134 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), 1135 "vecext")); 1136 } 1137 1138 // If this is a reference to a subset of the elements of a vector, either 1139 // shuffle the input or extract/insert them as appropriate. 1140 if (LV.isExtVectorElt()) 1141 return EmitLoadOfExtVectorElementLValue(LV); 1142 1143 assert(LV.isBitField() && "Unknown LValue type!"); 1144 return EmitLoadOfBitfieldLValue(LV); 1145 } 1146 1147 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { 1148 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 1149 1150 // Get the output type. 1151 llvm::Type *ResLTy = ConvertType(LV.getType()); 1152 unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy); 1153 1154 // Compute the result as an OR of all of the individual component accesses. 1155 llvm::Value *Res = 0; 1156 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1157 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1158 CharUnits AccessAlignment = AI.AccessAlignment; 1159 if (!LV.getAlignment().isZero()) 1160 AccessAlignment = std::min(AccessAlignment, LV.getAlignment()); 1161 1162 // Get the field pointer. 1163 llvm::Value *Ptr = LV.getBitFieldBaseAddr(); 1164 1165 // Only offset by the field index if used, so that incoming values are not 1166 // required to be structures. 1167 if (AI.FieldIndex) 1168 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 1169 1170 // Offset by the byte offset, if used. 1171 if (!AI.FieldByteOffset.isZero()) { 1172 Ptr = EmitCastToVoidPtr(Ptr); 1173 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 1174 "bf.field.offs"); 1175 } 1176 1177 // Cast to the access type. 1178 llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth, 1179 CGM.getContext().getTargetAddressSpace(LV.getType())); 1180 Ptr = Builder.CreateBitCast(Ptr, PTy); 1181 1182 // Perform the load. 1183 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified()); 1184 Load->setAlignment(AccessAlignment.getQuantity()); 1185 1186 // Shift out unused low bits and mask out unused high bits. 1187 llvm::Value *Val = Load; 1188 if (AI.FieldBitStart) 1189 Val = Builder.CreateLShr(Load, AI.FieldBitStart); 1190 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth, 1191 AI.TargetBitWidth), 1192 "bf.clear"); 1193 1194 // Extend or truncate to the target size. 1195 if (AI.AccessWidth < ResSizeInBits) 1196 Val = Builder.CreateZExt(Val, ResLTy); 1197 else if (AI.AccessWidth > ResSizeInBits) 1198 Val = Builder.CreateTrunc(Val, ResLTy); 1199 1200 // Shift into place, and OR into the result. 1201 if (AI.TargetBitOffset) 1202 Val = Builder.CreateShl(Val, AI.TargetBitOffset); 1203 Res = Res ? Builder.CreateOr(Res, Val) : Val; 1204 } 1205 1206 // If the bit-field is signed, perform the sign-extension. 1207 // 1208 // FIXME: This can easily be folded into the load of the high bits, which 1209 // could also eliminate the mask of high bits in some situations. 1210 if (Info.isSigned()) { 1211 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 1212 if (ExtraBits) 1213 Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits), 1214 ExtraBits, "bf.val.sext"); 1215 } 1216 1217 return RValue::get(Res); 1218 } 1219 1220 // If this is a reference to a subset of the elements of a vector, create an 1221 // appropriate shufflevector. 1222 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 1223 llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(), 1224 LV.isVolatileQualified()); 1225 Load->setAlignment(LV.getAlignment().getQuantity()); 1226 llvm::Value *Vec = Load; 1227 1228 const llvm::Constant *Elts = LV.getExtVectorElts(); 1229 1230 // If the result of the expression is a non-vector type, we must be extracting 1231 // a single element. Just codegen as an extractelement. 1232 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 1233 if (!ExprVT) { 1234 unsigned InIdx = getAccessedFieldNo(0, Elts); 1235 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1236 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 1237 } 1238 1239 // Always use shuffle vector to try to retain the original program structure 1240 unsigned NumResultElts = ExprVT->getNumElements(); 1241 1242 SmallVector<llvm::Constant*, 4> Mask; 1243 for (unsigned i = 0; i != NumResultElts; ++i) 1244 Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts))); 1245 1246 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1247 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 1248 MaskV); 1249 return RValue::get(Vec); 1250 } 1251 1252 1253 1254 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 1255 /// lvalue, where both are guaranteed to the have the same type, and that type 1256 /// is 'Ty'. 1257 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) { 1258 if (!Dst.isSimple()) { 1259 if (Dst.isVectorElt()) { 1260 // Read/modify/write the vector, inserting the new element. 1261 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(), 1262 Dst.isVolatileQualified()); 1263 Load->setAlignment(Dst.getAlignment().getQuantity()); 1264 llvm::Value *Vec = Load; 1265 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 1266 Dst.getVectorIdx(), "vecins"); 1267 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(), 1268 Dst.isVolatileQualified()); 1269 Store->setAlignment(Dst.getAlignment().getQuantity()); 1270 return; 1271 } 1272 1273 // If this is an update of extended vector elements, insert them as 1274 // appropriate. 1275 if (Dst.isExtVectorElt()) 1276 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 1277 1278 assert(Dst.isBitField() && "Unknown LValue type"); 1279 return EmitStoreThroughBitfieldLValue(Src, Dst); 1280 } 1281 1282 // There's special magic for assigning into an ARC-qualified l-value. 1283 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 1284 switch (Lifetime) { 1285 case Qualifiers::OCL_None: 1286 llvm_unreachable("present but none"); 1287 1288 case Qualifiers::OCL_ExplicitNone: 1289 // nothing special 1290 break; 1291 1292 case Qualifiers::OCL_Strong: 1293 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 1294 return; 1295 1296 case Qualifiers::OCL_Weak: 1297 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 1298 return; 1299 1300 case Qualifiers::OCL_Autoreleasing: 1301 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 1302 Src.getScalarVal())); 1303 // fall into the normal path 1304 break; 1305 } 1306 } 1307 1308 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 1309 // load of a __weak object. 1310 llvm::Value *LvalueDst = Dst.getAddress(); 1311 llvm::Value *src = Src.getScalarVal(); 1312 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1313 return; 1314 } 1315 1316 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1317 // load of a __strong object. 1318 llvm::Value *LvalueDst = Dst.getAddress(); 1319 llvm::Value *src = Src.getScalarVal(); 1320 if (Dst.isObjCIvar()) { 1321 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1322 llvm::Type *ResultType = ConvertType(getContext().LongTy); 1323 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 1324 llvm::Value *dst = RHS; 1325 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1326 llvm::Value *LHS = 1327 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 1328 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 1329 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 1330 BytesBetween); 1331 } else if (Dst.isGlobalObjCRef()) { 1332 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 1333 Dst.isThreadLocalRef()); 1334 } 1335 else 1336 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 1337 return; 1338 } 1339 1340 assert(Src.isScalar() && "Can't emit an agg store with this method"); 1341 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 1342 } 1343 1344 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 1345 llvm::Value **Result) { 1346 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 1347 1348 // Get the output type. 1349 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 1350 unsigned ResSizeInBits = CGM.getDataLayout().getTypeSizeInBits(ResLTy); 1351 1352 // Get the source value, truncated to the width of the bit-field. 1353 llvm::Value *SrcVal = Src.getScalarVal(); 1354 1355 if (hasBooleanRepresentation(Dst.getType())) 1356 SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false); 1357 1358 SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits, 1359 Info.getSize()), 1360 "bf.value"); 1361 1362 // Return the new value of the bit-field, if requested. 1363 if (Result) { 1364 // Cast back to the proper type for result. 1365 llvm::Type *SrcTy = Src.getScalarVal()->getType(); 1366 llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false, 1367 "bf.reload.val"); 1368 1369 // Sign extend if necessary. 1370 if (Info.isSigned()) { 1371 unsigned ExtraBits = ResSizeInBits - Info.getSize(); 1372 if (ExtraBits) 1373 ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits), 1374 ExtraBits, "bf.reload.sext"); 1375 } 1376 1377 *Result = ReloadVal; 1378 } 1379 1380 // Iterate over the components, writing each piece to memory. 1381 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) { 1382 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i); 1383 CharUnits AccessAlignment = AI.AccessAlignment; 1384 if (!Dst.getAlignment().isZero()) 1385 AccessAlignment = std::min(AccessAlignment, Dst.getAlignment()); 1386 1387 // Get the field pointer. 1388 llvm::Value *Ptr = Dst.getBitFieldBaseAddr(); 1389 unsigned addressSpace = 1390 cast<llvm::PointerType>(Ptr->getType())->getAddressSpace(); 1391 1392 // Only offset by the field index if used, so that incoming values are not 1393 // required to be structures. 1394 if (AI.FieldIndex) 1395 Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field"); 1396 1397 // Offset by the byte offset, if used. 1398 if (!AI.FieldByteOffset.isZero()) { 1399 Ptr = EmitCastToVoidPtr(Ptr); 1400 Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(), 1401 "bf.field.offs"); 1402 } 1403 1404 // Cast to the access type. 1405 llvm::Type *AccessLTy = 1406 llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth); 1407 1408 llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace); 1409 Ptr = Builder.CreateBitCast(Ptr, PTy); 1410 1411 // Extract the piece of the bit-field value to write in this access, limited 1412 // to the values that are part of this access. 1413 llvm::Value *Val = SrcVal; 1414 if (AI.TargetBitOffset) 1415 Val = Builder.CreateLShr(Val, AI.TargetBitOffset); 1416 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits, 1417 AI.TargetBitWidth)); 1418 1419 // Extend or truncate to the access size. 1420 if (ResSizeInBits < AI.AccessWidth) 1421 Val = Builder.CreateZExt(Val, AccessLTy); 1422 else if (ResSizeInBits > AI.AccessWidth) 1423 Val = Builder.CreateTrunc(Val, AccessLTy); 1424 1425 // Shift into the position in memory. 1426 if (AI.FieldBitStart) 1427 Val = Builder.CreateShl(Val, AI.FieldBitStart); 1428 1429 // If necessary, load and OR in bits that are outside of the bit-field. 1430 if (AI.TargetBitWidth != AI.AccessWidth) { 1431 llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified()); 1432 Load->setAlignment(AccessAlignment.getQuantity()); 1433 1434 // Compute the mask for zeroing the bits that are part of the bit-field. 1435 llvm::APInt InvMask = 1436 ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart, 1437 AI.FieldBitStart + AI.TargetBitWidth); 1438 1439 // Apply the mask and OR in to the value to write. 1440 Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val); 1441 } 1442 1443 // Write the value. 1444 llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr, 1445 Dst.isVolatileQualified()); 1446 Store->setAlignment(AccessAlignment.getQuantity()); 1447 } 1448 } 1449 1450 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1451 LValue Dst) { 1452 // This access turns into a read/modify/write of the vector. Load the input 1453 // value now. 1454 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(), 1455 Dst.isVolatileQualified()); 1456 Load->setAlignment(Dst.getAlignment().getQuantity()); 1457 llvm::Value *Vec = Load; 1458 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1459 1460 llvm::Value *SrcVal = Src.getScalarVal(); 1461 1462 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 1463 unsigned NumSrcElts = VTy->getNumElements(); 1464 unsigned NumDstElts = 1465 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1466 if (NumDstElts == NumSrcElts) { 1467 // Use shuffle vector is the src and destination are the same number of 1468 // elements and restore the vector mask since it is on the side it will be 1469 // stored. 1470 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1471 for (unsigned i = 0; i != NumSrcElts; ++i) 1472 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i); 1473 1474 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1475 Vec = Builder.CreateShuffleVector(SrcVal, 1476 llvm::UndefValue::get(Vec->getType()), 1477 MaskV); 1478 } else if (NumDstElts > NumSrcElts) { 1479 // Extended the source vector to the same length and then shuffle it 1480 // into the destination. 1481 // FIXME: since we're shuffling with undef, can we just use the indices 1482 // into that? This could be simpler. 1483 SmallVector<llvm::Constant*, 4> ExtMask; 1484 for (unsigned i = 0; i != NumSrcElts; ++i) 1485 ExtMask.push_back(Builder.getInt32(i)); 1486 ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty)); 1487 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 1488 llvm::Value *ExtSrcVal = 1489 Builder.CreateShuffleVector(SrcVal, 1490 llvm::UndefValue::get(SrcVal->getType()), 1491 ExtMaskV); 1492 // build identity 1493 SmallVector<llvm::Constant*, 4> Mask; 1494 for (unsigned i = 0; i != NumDstElts; ++i) 1495 Mask.push_back(Builder.getInt32(i)); 1496 1497 // modify when what gets shuffled in 1498 for (unsigned i = 0; i != NumSrcElts; ++i) 1499 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts); 1500 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1501 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 1502 } else { 1503 // We should never shorten the vector 1504 llvm_unreachable("unexpected shorten vector length"); 1505 } 1506 } else { 1507 // If the Src is a scalar (not a vector) it must be updating one element. 1508 unsigned InIdx = getAccessedFieldNo(0, Elts); 1509 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1510 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 1511 } 1512 1513 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(), 1514 Dst.isVolatileQualified()); 1515 Store->setAlignment(Dst.getAlignment().getQuantity()); 1516 } 1517 1518 // setObjCGCLValueClass - sets class of he lvalue for the purpose of 1519 // generating write-barries API. It is currently a global, ivar, 1520 // or neither. 1521 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1522 LValue &LV, 1523 bool IsMemberAccess=false) { 1524 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) 1525 return; 1526 1527 if (isa<ObjCIvarRefExpr>(E)) { 1528 QualType ExpTy = E->getType(); 1529 if (IsMemberAccess && ExpTy->isPointerType()) { 1530 // If ivar is a structure pointer, assigning to field of 1531 // this struct follows gcc's behavior and makes it a non-ivar 1532 // writer-barrier conservatively. 1533 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1534 if (ExpTy->isRecordType()) { 1535 LV.setObjCIvar(false); 1536 return; 1537 } 1538 } 1539 LV.setObjCIvar(true); 1540 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1541 LV.setBaseIvarExp(Exp->getBase()); 1542 LV.setObjCArray(E->getType()->isArrayType()); 1543 return; 1544 } 1545 1546 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1547 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1548 if (VD->hasGlobalStorage()) { 1549 LV.setGlobalObjCRef(true); 1550 LV.setThreadLocalRef(VD->isThreadSpecified()); 1551 } 1552 } 1553 LV.setObjCArray(E->getType()->isArrayType()); 1554 return; 1555 } 1556 1557 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1558 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1559 return; 1560 } 1561 1562 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1563 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1564 if (LV.isObjCIvar()) { 1565 // If cast is to a structure pointer, follow gcc's behavior and make it 1566 // a non-ivar write-barrier. 1567 QualType ExpTy = E->getType(); 1568 if (ExpTy->isPointerType()) 1569 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1570 if (ExpTy->isRecordType()) 1571 LV.setObjCIvar(false); 1572 } 1573 return; 1574 } 1575 1576 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) { 1577 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 1578 return; 1579 } 1580 1581 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1582 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1583 return; 1584 } 1585 1586 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1587 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1588 return; 1589 } 1590 1591 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 1592 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1593 return; 1594 } 1595 1596 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1597 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1598 if (LV.isObjCIvar() && !LV.isObjCArray()) 1599 // Using array syntax to assigning to what an ivar points to is not 1600 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1601 LV.setObjCIvar(false); 1602 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1603 // Using array syntax to assigning to what global points to is not 1604 // same as assigning to the global itself. {id *G;} G[i] = 0; 1605 LV.setGlobalObjCRef(false); 1606 return; 1607 } 1608 1609 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1610 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 1611 // We don't know if member is an 'ivar', but this flag is looked at 1612 // only in the context of LV.isObjCIvar(). 1613 LV.setObjCArray(E->getType()->isArrayType()); 1614 return; 1615 } 1616 } 1617 1618 static llvm::Value * 1619 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 1620 llvm::Value *V, llvm::Type *IRType, 1621 StringRef Name = StringRef()) { 1622 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1623 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 1624 } 1625 1626 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1627 const Expr *E, const VarDecl *VD) { 1628 assert((VD->hasExternalStorage() || VD->isFileVarDecl()) && 1629 "Var decl must have external storage or be a file var decl!"); 1630 1631 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1632 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 1633 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 1634 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 1635 QualType T = E->getType(); 1636 LValue LV; 1637 if (VD->getType()->isReferenceType()) { 1638 llvm::LoadInst *LI = CGF.Builder.CreateLoad(V); 1639 LI->setAlignment(Alignment.getQuantity()); 1640 V = LI; 1641 LV = CGF.MakeNaturalAlignAddrLValue(V, T); 1642 } else { 1643 LV = CGF.MakeAddrLValue(V, E->getType(), Alignment); 1644 } 1645 setObjCGCLValueClass(CGF.getContext(), E, LV); 1646 return LV; 1647 } 1648 1649 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1650 const Expr *E, const FunctionDecl *FD) { 1651 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); 1652 if (!FD->hasPrototype()) { 1653 if (const FunctionProtoType *Proto = 1654 FD->getType()->getAs<FunctionProtoType>()) { 1655 // Ugly case: for a K&R-style definition, the type of the definition 1656 // isn't the same as the type of a use. Correct for this with a 1657 // bitcast. 1658 QualType NoProtoType = 1659 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1660 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1661 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); 1662 } 1663 } 1664 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 1665 return CGF.MakeAddrLValue(V, E->getType(), Alignment); 1666 } 1667 1668 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1669 const NamedDecl *ND = E->getDecl(); 1670 CharUnits Alignment = getContext().getDeclAlign(ND); 1671 QualType T = E->getType(); 1672 1673 // A DeclRefExpr for a reference initialized by a constant expression can 1674 // appear without being odr-used. Directly emit the constant initializer. 1675 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1676 const Expr *Init = VD->getAnyInitializer(VD); 1677 if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() && 1678 VD->isUsableInConstantExpressions(getContext()) && 1679 VD->checkInitIsICE()) { 1680 llvm::Constant *Val = 1681 CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this); 1682 assert(Val && "failed to emit reference constant expression"); 1683 // FIXME: Eventually we will want to emit vector element references. 1684 return MakeAddrLValue(Val, T, Alignment); 1685 } 1686 } 1687 1688 // FIXME: We should be able to assert this for FunctionDecls as well! 1689 // FIXME: We should be able to assert this for all DeclRefExprs, not just 1690 // those with a valid source location. 1691 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || 1692 !E->getLocation().isValid()) && 1693 "Should not use decl without marking it used!"); 1694 1695 if (ND->hasAttr<WeakRefAttr>()) { 1696 const ValueDecl *VD = cast<ValueDecl>(ND); 1697 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1698 return MakeAddrLValue(Aliasee, T, Alignment); 1699 } 1700 1701 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1702 // Check if this is a global variable. 1703 if (VD->hasExternalStorage() || VD->isFileVarDecl()) 1704 return EmitGlobalVarDeclLValue(*this, E, VD); 1705 1706 bool isBlockVariable = VD->hasAttr<BlocksAttr>(); 1707 1708 bool NonGCable = VD->hasLocalStorage() && 1709 !VD->getType()->isReferenceType() && 1710 !isBlockVariable; 1711 1712 llvm::Value *V = LocalDeclMap[VD]; 1713 if (!V && VD->isStaticLocal()) 1714 V = CGM.getStaticLocalDeclAddress(VD); 1715 1716 // Use special handling for lambdas. 1717 if (!V) { 1718 if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) { 1719 QualType LambdaTagType = getContext().getTagDeclType(FD->getParent()); 1720 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, 1721 LambdaTagType); 1722 return EmitLValueForField(LambdaLV, FD); 1723 } 1724 1725 assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal()); 1726 return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable), 1727 T, Alignment); 1728 } 1729 1730 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1731 1732 if (isBlockVariable) 1733 V = BuildBlockByrefAddress(V, VD); 1734 1735 LValue LV; 1736 if (VD->getType()->isReferenceType()) { 1737 llvm::LoadInst *LI = Builder.CreateLoad(V); 1738 LI->setAlignment(Alignment.getQuantity()); 1739 V = LI; 1740 LV = MakeNaturalAlignAddrLValue(V, T); 1741 } else { 1742 LV = MakeAddrLValue(V, T, Alignment); 1743 } 1744 1745 if (NonGCable) { 1746 LV.getQuals().removeObjCGCAttr(); 1747 LV.setNonGC(true); 1748 } 1749 setObjCGCLValueClass(getContext(), E, LV); 1750 return LV; 1751 } 1752 1753 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) 1754 return EmitFunctionDeclLValue(*this, E, fn); 1755 1756 llvm_unreachable("Unhandled DeclRefExpr"); 1757 } 1758 1759 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1760 // __extension__ doesn't affect lvalue-ness. 1761 if (E->getOpcode() == UO_Extension) 1762 return EmitLValue(E->getSubExpr()); 1763 1764 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1765 switch (E->getOpcode()) { 1766 default: llvm_unreachable("Unknown unary operator lvalue!"); 1767 case UO_Deref: { 1768 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1769 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1770 1771 LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T); 1772 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 1773 1774 // We should not generate __weak write barrier on indirect reference 1775 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1776 // But, we continue to generate __strong write barrier on indirect write 1777 // into a pointer to object. 1778 if (getLangOpts().ObjC1 && 1779 getLangOpts().getGC() != LangOptions::NonGC && 1780 LV.isObjCWeak()) 1781 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1782 return LV; 1783 } 1784 case UO_Real: 1785 case UO_Imag: { 1786 LValue LV = EmitLValue(E->getSubExpr()); 1787 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 1788 llvm::Value *Addr = LV.getAddress(); 1789 1790 // __real is valid on scalars. This is a faster way of testing that. 1791 // __imag can only produce an rvalue on scalars. 1792 if (E->getOpcode() == UO_Real && 1793 !cast<llvm::PointerType>(Addr->getType()) 1794 ->getElementType()->isStructTy()) { 1795 assert(E->getSubExpr()->getType()->isArithmeticType()); 1796 return LV; 1797 } 1798 1799 assert(E->getSubExpr()->getType()->isAnyComplexType()); 1800 1801 unsigned Idx = E->getOpcode() == UO_Imag; 1802 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(), 1803 Idx, "idx"), 1804 ExprTy); 1805 } 1806 case UO_PreInc: 1807 case UO_PreDec: { 1808 LValue LV = EmitLValue(E->getSubExpr()); 1809 bool isInc = E->getOpcode() == UO_PreInc; 1810 1811 if (E->getType()->isAnyComplexType()) 1812 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1813 else 1814 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1815 return LV; 1816 } 1817 } 1818 } 1819 1820 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1821 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 1822 E->getType()); 1823 } 1824 1825 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1826 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1827 E->getType()); 1828 } 1829 1830 static llvm::Constant* 1831 GetAddrOfConstantWideString(StringRef Str, 1832 const char *GlobalName, 1833 ASTContext &Context, 1834 QualType Ty, SourceLocation Loc, 1835 CodeGenModule &CGM) { 1836 1837 StringLiteral *SL = StringLiteral::Create(Context, 1838 Str, 1839 StringLiteral::Wide, 1840 /*Pascal = */false, 1841 Ty, Loc); 1842 llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL); 1843 llvm::GlobalVariable *GV = 1844 new llvm::GlobalVariable(CGM.getModule(), C->getType(), 1845 !CGM.getLangOpts().WritableStrings, 1846 llvm::GlobalValue::PrivateLinkage, 1847 C, GlobalName); 1848 const unsigned WideAlignment = 1849 Context.getTypeAlignInChars(Ty).getQuantity(); 1850 GV->setAlignment(WideAlignment); 1851 return GV; 1852 } 1853 1854 static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source, 1855 SmallString<32>& Target) { 1856 Target.resize(CharByteWidth * (Source.size() + 1)); 1857 char *ResultPtr = &Target[0]; 1858 const UTF8 *ErrorPtr; 1859 bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr); 1860 (void)success; 1861 assert(success); 1862 Target.resize(ResultPtr - &Target[0]); 1863 } 1864 1865 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1866 switch (E->getIdentType()) { 1867 default: 1868 return EmitUnsupportedLValue(E, "predefined expression"); 1869 1870 case PredefinedExpr::Func: 1871 case PredefinedExpr::Function: 1872 case PredefinedExpr::LFunction: 1873 case PredefinedExpr::PrettyFunction: { 1874 unsigned IdentType = E->getIdentType(); 1875 std::string GlobalVarName; 1876 1877 switch (IdentType) { 1878 default: llvm_unreachable("Invalid type"); 1879 case PredefinedExpr::Func: 1880 GlobalVarName = "__func__."; 1881 break; 1882 case PredefinedExpr::Function: 1883 GlobalVarName = "__FUNCTION__."; 1884 break; 1885 case PredefinedExpr::LFunction: 1886 GlobalVarName = "L__FUNCTION__."; 1887 break; 1888 case PredefinedExpr::PrettyFunction: 1889 GlobalVarName = "__PRETTY_FUNCTION__."; 1890 break; 1891 } 1892 1893 StringRef FnName = CurFn->getName(); 1894 if (FnName.startswith("\01")) 1895 FnName = FnName.substr(1); 1896 GlobalVarName += FnName; 1897 1898 const Decl *CurDecl = CurCodeDecl; 1899 if (CurDecl == 0) 1900 CurDecl = getContext().getTranslationUnitDecl(); 1901 1902 std::string FunctionName = 1903 (isa<BlockDecl>(CurDecl) 1904 ? FnName.str() 1905 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType, 1906 CurDecl)); 1907 1908 const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual(); 1909 llvm::Constant *C; 1910 if (ElemType->isWideCharType()) { 1911 SmallString<32> RawChars; 1912 ConvertUTF8ToWideString( 1913 getContext().getTypeSizeInChars(ElemType).getQuantity(), 1914 FunctionName, RawChars); 1915 C = GetAddrOfConstantWideString(RawChars, 1916 GlobalVarName.c_str(), 1917 getContext(), 1918 E->getType(), 1919 E->getLocation(), 1920 CGM); 1921 } else { 1922 C = CGM.GetAddrOfConstantCString(FunctionName, 1923 GlobalVarName.c_str(), 1924 1); 1925 } 1926 return MakeAddrLValue(C, E->getType()); 1927 } 1928 } 1929 } 1930 1931 /// Emit a type description suitable for use by a runtime sanitizer library. The 1932 /// format of a type descriptor is 1933 /// 1934 /// \code 1935 /// { i16 TypeKind, i16 TypeInfo } 1936 /// \endcode 1937 /// 1938 /// followed by an array of i8 containing the type name. TypeKind is 0 for an 1939 /// integer, 1 for a floating point value, and -1 for anything else. 1940 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { 1941 // FIXME: Only emit each type's descriptor once. 1942 uint16_t TypeKind = -1; 1943 uint16_t TypeInfo = 0; 1944 1945 if (T->isIntegerType()) { 1946 TypeKind = 0; 1947 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | 1948 T->isSignedIntegerType(); 1949 } else if (T->isFloatingType()) { 1950 TypeKind = 1; 1951 TypeInfo = getContext().getTypeSize(T); 1952 } 1953 1954 // Format the type name as if for a diagnostic, including quotes and 1955 // optionally an 'aka'. 1956 llvm::SmallString<32> Buffer; 1957 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, 1958 (intptr_t)T.getAsOpaquePtr(), 1959 0, 0, 0, 0, 0, 0, Buffer, 1960 ArrayRef<intptr_t>()); 1961 1962 llvm::Constant *Components[] = { 1963 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), 1964 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) 1965 }; 1966 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); 1967 1968 llvm::GlobalVariable *GV = 1969 new llvm::GlobalVariable(CGM.getModule(), Descriptor->getType(), 1970 /*isConstant=*/true, 1971 llvm::GlobalVariable::PrivateLinkage, 1972 Descriptor); 1973 GV->setUnnamedAddr(true); 1974 return GV; 1975 } 1976 1977 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { 1978 llvm::Type *TargetTy = IntPtrTy; 1979 1980 // Integers which fit in intptr_t are zero-extended and passed directly. 1981 if (V->getType()->isIntegerTy() && 1982 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) 1983 return Builder.CreateZExt(V, TargetTy); 1984 1985 // Pointers are passed directly, everything else is passed by address. 1986 if (!V->getType()->isPointerTy()) { 1987 llvm::Value *Ptr = Builder.CreateAlloca(V->getType()); 1988 Builder.CreateStore(V, Ptr); 1989 V = Ptr; 1990 } 1991 return Builder.CreatePtrToInt(V, TargetTy); 1992 } 1993 1994 /// \brief Emit a representation of a SourceLocation for passing to a handler 1995 /// in a sanitizer runtime library. The format for this data is: 1996 /// \code 1997 /// struct SourceLocation { 1998 /// const char *Filename; 1999 /// int32_t Line, Column; 2000 /// }; 2001 /// \endcode 2002 /// For an invalid SourceLocation, the Filename pointer is null. 2003 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { 2004 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); 2005 2006 llvm::Constant *Data[] = { 2007 // FIXME: Only emit each file name once. 2008 PLoc.isValid() ? cast<llvm::Constant>( 2009 Builder.CreateGlobalStringPtr(PLoc.getFilename())) 2010 : llvm::Constant::getNullValue(Int8PtrTy), 2011 Builder.getInt32(PLoc.getLine()), 2012 Builder.getInt32(PLoc.getColumn()) 2013 }; 2014 2015 return llvm::ConstantStruct::getAnon(Data); 2016 } 2017 2018 void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName, 2019 llvm::ArrayRef<llvm::Constant *> StaticArgs, 2020 llvm::ArrayRef<llvm::Value *> DynamicArgs, 2021 bool Recoverable) { 2022 llvm::BasicBlock *Cont = createBasicBlock("cont"); 2023 2024 llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName); 2025 Builder.CreateCondBr(Checked, Cont, Handler); 2026 EmitBlock(Handler); 2027 2028 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 2029 llvm::GlobalValue *InfoPtr = 2030 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), true, 2031 llvm::GlobalVariable::PrivateLinkage, Info); 2032 InfoPtr->setUnnamedAddr(true); 2033 2034 llvm::SmallVector<llvm::Value *, 4> Args; 2035 llvm::SmallVector<llvm::Type *, 4> ArgTypes; 2036 Args.reserve(DynamicArgs.size() + 1); 2037 ArgTypes.reserve(DynamicArgs.size() + 1); 2038 2039 // Handler functions take an i8* pointing to the (handler-specific) static 2040 // information block, followed by a sequence of intptr_t arguments 2041 // representing operand values. 2042 Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy)); 2043 ArgTypes.push_back(Int8PtrTy); 2044 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { 2045 Args.push_back(EmitCheckValue(DynamicArgs[i])); 2046 ArgTypes.push_back(IntPtrTy); 2047 } 2048 2049 llvm::FunctionType *FnType = 2050 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); 2051 llvm::AttrBuilder B; 2052 if (!Recoverable) { 2053 B.addAttribute(llvm::Attributes::NoReturn) 2054 .addAttribute(llvm::Attributes::NoUnwind); 2055 } 2056 B.addAttribute(llvm::Attributes::UWTable); 2057 llvm::Value *Fn = CGM.CreateRuntimeFunction(FnType, 2058 ("__ubsan_handle_" + CheckName).str(), 2059 llvm::Attributes::get(getLLVMContext(), 2060 B)); 2061 llvm::CallInst *HandlerCall = Builder.CreateCall(Fn, Args); 2062 if (Recoverable) { 2063 Builder.CreateBr(Cont); 2064 } else { 2065 HandlerCall->setDoesNotReturn(); 2066 HandlerCall->setDoesNotThrow(); 2067 Builder.CreateUnreachable(); 2068 } 2069 2070 EmitBlock(Cont); 2071 } 2072 2073 void CodeGenFunction::EmitTrapvCheck(llvm::Value *Checked) { 2074 llvm::BasicBlock *Cont = createBasicBlock("cont"); 2075 2076 // If we're optimizing, collapse all calls to trap down to just one per 2077 // function to save on code size. 2078 if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) { 2079 TrapBB = createBasicBlock("trap"); 2080 Builder.CreateCondBr(Checked, Cont, TrapBB); 2081 EmitBlock(TrapBB); 2082 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); 2083 llvm::CallInst *TrapCall = Builder.CreateCall(F); 2084 TrapCall->setDoesNotReturn(); 2085 TrapCall->setDoesNotThrow(); 2086 Builder.CreateUnreachable(); 2087 } else { 2088 Builder.CreateCondBr(Checked, Cont, TrapBB); 2089 } 2090 2091 EmitBlock(Cont); 2092 } 2093 2094 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 2095 /// array to pointer, return the array subexpression. 2096 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 2097 // If this isn't just an array->pointer decay, bail out. 2098 const CastExpr *CE = dyn_cast<CastExpr>(E); 2099 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay) 2100 return 0; 2101 2102 // If this is a decay from variable width array, bail out. 2103 const Expr *SubExpr = CE->getSubExpr(); 2104 if (SubExpr->getType()->isVariableArrayType()) 2105 return 0; 2106 2107 return SubExpr; 2108 } 2109 2110 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 2111 // The index must always be an integer, which is not an aggregate. Emit it. 2112 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 2113 QualType IdxTy = E->getIdx()->getType(); 2114 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 2115 2116 // If the base is a vector type, then we are forming a vector element lvalue 2117 // with this subscript. 2118 if (E->getBase()->getType()->isVectorType()) { 2119 // Emit the vector as an lvalue to get its address. 2120 LValue LHS = EmitLValue(E->getBase()); 2121 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 2122 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx"); 2123 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 2124 E->getBase()->getType(), LHS.getAlignment()); 2125 } 2126 2127 // Extend or truncate the index type to 32 or 64-bits. 2128 if (Idx->getType() != IntPtrTy) 2129 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 2130 2131 // We know that the pointer points to a type of the correct size, unless the 2132 // size is a VLA or Objective-C interface. 2133 llvm::Value *Address = 0; 2134 CharUnits ArrayAlignment; 2135 if (const VariableArrayType *vla = 2136 getContext().getAsVariableArrayType(E->getType())) { 2137 // The base must be a pointer, which is not an aggregate. Emit 2138 // it. It needs to be emitted first in case it's what captures 2139 // the VLA bounds. 2140 Address = EmitScalarExpr(E->getBase()); 2141 2142 // The element count here is the total number of non-VLA elements. 2143 llvm::Value *numElements = getVLASize(vla).first; 2144 2145 // Effectively, the multiply by the VLA size is part of the GEP. 2146 // GEP indexes are signed, and scaling an index isn't permitted to 2147 // signed-overflow, so we use the same semantics for our explicit 2148 // multiply. We suppress this if overflow is not undefined behavior. 2149 if (getLangOpts().isSignedOverflowDefined()) { 2150 Idx = Builder.CreateMul(Idx, numElements); 2151 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 2152 } else { 2153 Idx = Builder.CreateNSWMul(Idx, numElements); 2154 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); 2155 } 2156 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 2157 // Indexing over an interface, as in "NSString *P; P[4];" 2158 llvm::Value *InterfaceSize = 2159 llvm::ConstantInt::get(Idx->getType(), 2160 getContext().getTypeSizeInChars(OIT).getQuantity()); 2161 2162 Idx = Builder.CreateMul(Idx, InterfaceSize); 2163 2164 // The base must be a pointer, which is not an aggregate. Emit it. 2165 llvm::Value *Base = EmitScalarExpr(E->getBase()); 2166 Address = EmitCastToVoidPtr(Base); 2167 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 2168 Address = Builder.CreateBitCast(Address, Base->getType()); 2169 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 2170 // If this is A[i] where A is an array, the frontend will have decayed the 2171 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 2172 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 2173 // "gep x, i" here. Emit one "gep A, 0, i". 2174 assert(Array->getType()->isArrayType() && 2175 "Array to pointer decay must have array source type!"); 2176 LValue ArrayLV = EmitLValue(Array); 2177 llvm::Value *ArrayPtr = ArrayLV.getAddress(); 2178 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 2179 llvm::Value *Args[] = { Zero, Idx }; 2180 2181 // Propagate the alignment from the array itself to the result. 2182 ArrayAlignment = ArrayLV.getAlignment(); 2183 2184 if (getLangOpts().isSignedOverflowDefined()) 2185 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); 2186 else 2187 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); 2188 } else { 2189 // The base must be a pointer, which is not an aggregate. Emit it. 2190 llvm::Value *Base = EmitScalarExpr(E->getBase()); 2191 if (getLangOpts().isSignedOverflowDefined()) 2192 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 2193 else 2194 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 2195 } 2196 2197 QualType T = E->getBase()->getType()->getPointeeType(); 2198 assert(!T.isNull() && 2199 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 2200 2201 2202 // Limit the alignment to that of the result type. 2203 LValue LV; 2204 if (!ArrayAlignment.isZero()) { 2205 CharUnits Align = getContext().getTypeAlignInChars(T); 2206 ArrayAlignment = std::min(Align, ArrayAlignment); 2207 LV = MakeAddrLValue(Address, T, ArrayAlignment); 2208 } else { 2209 LV = MakeNaturalAlignAddrLValue(Address, T); 2210 } 2211 2212 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace()); 2213 2214 if (getLangOpts().ObjC1 && 2215 getLangOpts().getGC() != LangOptions::NonGC) { 2216 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 2217 setObjCGCLValueClass(getContext(), E, LV); 2218 } 2219 return LV; 2220 } 2221 2222 static 2223 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder, 2224 SmallVector<unsigned, 4> &Elts) { 2225 SmallVector<llvm::Constant*, 4> CElts; 2226 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 2227 CElts.push_back(Builder.getInt32(Elts[i])); 2228 2229 return llvm::ConstantVector::get(CElts); 2230 } 2231 2232 LValue CodeGenFunction:: 2233 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 2234 // Emit the base vector as an l-value. 2235 LValue Base; 2236 2237 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 2238 if (E->isArrow()) { 2239 // If it is a pointer to a vector, emit the address and form an lvalue with 2240 // it. 2241 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 2242 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 2243 Base = MakeAddrLValue(Ptr, PT->getPointeeType()); 2244 Base.getQuals().removeObjCGCAttr(); 2245 } else if (E->getBase()->isGLValue()) { 2246 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 2247 // emit the base as an lvalue. 2248 assert(E->getBase()->getType()->isVectorType()); 2249 Base = EmitLValue(E->getBase()); 2250 } else { 2251 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 2252 assert(E->getBase()->getType()->isVectorType() && 2253 "Result must be a vector"); 2254 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 2255 2256 // Store the vector to memory (because LValue wants an address). 2257 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 2258 Builder.CreateStore(Vec, VecMem); 2259 Base = MakeAddrLValue(VecMem, E->getBase()->getType()); 2260 } 2261 2262 QualType type = 2263 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 2264 2265 // Encode the element access list into a vector of unsigned indices. 2266 SmallVector<unsigned, 4> Indices; 2267 E->getEncodedElementAccess(Indices); 2268 2269 if (Base.isSimple()) { 2270 llvm::Constant *CV = GenerateConstantVector(Builder, Indices); 2271 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type, 2272 Base.getAlignment()); 2273 } 2274 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 2275 2276 llvm::Constant *BaseElts = Base.getExtVectorElts(); 2277 SmallVector<llvm::Constant *, 4> CElts; 2278 2279 for (unsigned i = 0, e = Indices.size(); i != e; ++i) 2280 CElts.push_back(BaseElts->getAggregateElement(Indices[i])); 2281 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 2282 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type, 2283 Base.getAlignment()); 2284 } 2285 2286 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 2287 Expr *BaseExpr = E->getBase(); 2288 2289 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2290 LValue BaseLV; 2291 if (E->isArrow()) { 2292 llvm::Value *Ptr = EmitScalarExpr(BaseExpr); 2293 QualType PtrTy = BaseExpr->getType()->getPointeeType(); 2294 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy); 2295 BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy); 2296 } else 2297 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); 2298 2299 NamedDecl *ND = E->getMemberDecl(); 2300 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 2301 LValue LV = EmitLValueForField(BaseLV, Field); 2302 setObjCGCLValueClass(getContext(), E, LV); 2303 return LV; 2304 } 2305 2306 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 2307 return EmitGlobalVarDeclLValue(*this, E, VD); 2308 2309 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 2310 return EmitFunctionDeclLValue(*this, E, FD); 2311 2312 llvm_unreachable("Unhandled member declaration!"); 2313 } 2314 2315 LValue CodeGenFunction::EmitLValueForField(LValue base, 2316 const FieldDecl *field) { 2317 if (field->isBitField()) { 2318 const CGRecordLayout &RL = 2319 CGM.getTypes().getCGRecordLayout(field->getParent()); 2320 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); 2321 QualType fieldType = 2322 field->getType().withCVRQualifiers(base.getVRQualifiers()); 2323 return LValue::MakeBitfield(base.getAddress(), Info, fieldType, 2324 base.getAlignment()); 2325 } 2326 2327 const RecordDecl *rec = field->getParent(); 2328 QualType type = field->getType(); 2329 CharUnits alignment = getContext().getDeclAlign(field); 2330 2331 // FIXME: It should be impossible to have an LValue without alignment for a 2332 // complete type. 2333 if (!base.getAlignment().isZero()) 2334 alignment = std::min(alignment, base.getAlignment()); 2335 2336 bool mayAlias = rec->hasAttr<MayAliasAttr>(); 2337 2338 llvm::Value *addr = base.getAddress(); 2339 unsigned cvr = base.getVRQualifiers(); 2340 if (rec->isUnion()) { 2341 // For unions, there is no pointer adjustment. 2342 assert(!type->isReferenceType() && "union has reference member"); 2343 } else { 2344 // For structs, we GEP to the field that the record layout suggests. 2345 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 2346 addr = Builder.CreateStructGEP(addr, idx, field->getName()); 2347 2348 // If this is a reference field, load the reference right now. 2349 if (const ReferenceType *refType = type->getAs<ReferenceType>()) { 2350 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); 2351 if (cvr & Qualifiers::Volatile) load->setVolatile(true); 2352 load->setAlignment(alignment.getQuantity()); 2353 2354 if (CGM.shouldUseTBAA()) { 2355 llvm::MDNode *tbaa; 2356 if (mayAlias) 2357 tbaa = CGM.getTBAAInfo(getContext().CharTy); 2358 else 2359 tbaa = CGM.getTBAAInfo(type); 2360 CGM.DecorateInstruction(load, tbaa); 2361 } 2362 2363 addr = load; 2364 mayAlias = false; 2365 type = refType->getPointeeType(); 2366 if (type->isIncompleteType()) 2367 alignment = CharUnits(); 2368 else 2369 alignment = getContext().getTypeAlignInChars(type); 2370 cvr = 0; // qualifiers don't recursively apply to referencee 2371 } 2372 } 2373 2374 // Make sure that the address is pointing to the right type. This is critical 2375 // for both unions and structs. A union needs a bitcast, a struct element 2376 // will need a bitcast if the LLVM type laid out doesn't match the desired 2377 // type. 2378 addr = EmitBitCastOfLValueToProperType(*this, addr, 2379 CGM.getTypes().ConvertTypeForMem(type), 2380 field->getName()); 2381 2382 if (field->hasAttr<AnnotateAttr>()) 2383 addr = EmitFieldAnnotations(field, addr); 2384 2385 LValue LV = MakeAddrLValue(addr, type, alignment); 2386 LV.getQuals().addCVRQualifiers(cvr); 2387 2388 // __weak attribute on a field is ignored. 2389 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 2390 LV.getQuals().removeObjCGCAttr(); 2391 2392 // Fields of may_alias structs act like 'char' for TBAA purposes. 2393 // FIXME: this should get propagated down through anonymous structs 2394 // and unions. 2395 if (mayAlias && LV.getTBAAInfo()) 2396 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); 2397 2398 return LV; 2399 } 2400 2401 LValue 2402 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, 2403 const FieldDecl *Field) { 2404 QualType FieldType = Field->getType(); 2405 2406 if (!FieldType->isReferenceType()) 2407 return EmitLValueForField(Base, Field); 2408 2409 const CGRecordLayout &RL = 2410 CGM.getTypes().getCGRecordLayout(Field->getParent()); 2411 unsigned idx = RL.getLLVMFieldNo(Field); 2412 llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx); 2413 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 2414 2415 // Make sure that the address is pointing to the right type. This is critical 2416 // for both unions and structs. A union needs a bitcast, a struct element 2417 // will need a bitcast if the LLVM type laid out doesn't match the desired 2418 // type. 2419 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 2420 V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName()); 2421 2422 CharUnits Alignment = getContext().getDeclAlign(Field); 2423 2424 // FIXME: It should be impossible to have an LValue without alignment for a 2425 // complete type. 2426 if (!Base.getAlignment().isZero()) 2427 Alignment = std::min(Alignment, Base.getAlignment()); 2428 2429 return MakeAddrLValue(V, FieldType, Alignment); 2430 } 2431 2432 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 2433 if (E->isFileScope()) { 2434 llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 2435 return MakeAddrLValue(GlobalPtr, E->getType()); 2436 } 2437 if (E->getType()->isVariablyModifiedType()) 2438 // make sure to emit the VLA size. 2439 EmitVariablyModifiedType(E->getType()); 2440 2441 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 2442 const Expr *InitExpr = E->getInitializer(); 2443 LValue Result = MakeAddrLValue(DeclPtr, E->getType()); 2444 2445 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 2446 /*Init*/ true); 2447 2448 return Result; 2449 } 2450 2451 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { 2452 if (!E->isGLValue()) 2453 // Initializing an aggregate temporary in C++11: T{...}. 2454 return EmitAggExprToLValue(E); 2455 2456 // An lvalue initializer list must be initializing a reference. 2457 assert(E->getNumInits() == 1 && "reference init with multiple values"); 2458 return EmitLValue(E->getInit(0)); 2459 } 2460 2461 LValue CodeGenFunction:: 2462 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 2463 if (!expr->isGLValue()) { 2464 // ?: here should be an aggregate. 2465 assert((hasAggregateLLVMType(expr->getType()) && 2466 !expr->getType()->isAnyComplexType()) && 2467 "Unexpected conditional operator!"); 2468 return EmitAggExprToLValue(expr); 2469 } 2470 2471 OpaqueValueMapping binding(*this, expr); 2472 2473 const Expr *condExpr = expr->getCond(); 2474 bool CondExprBool; 2475 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 2476 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 2477 if (!CondExprBool) std::swap(live, dead); 2478 2479 if (!ContainsLabel(dead)) 2480 return EmitLValue(live); 2481 } 2482 2483 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 2484 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 2485 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 2486 2487 ConditionalEvaluation eval(*this); 2488 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock); 2489 2490 // Any temporaries created here are conditional. 2491 EmitBlock(lhsBlock); 2492 eval.begin(*this); 2493 LValue lhs = EmitLValue(expr->getTrueExpr()); 2494 eval.end(*this); 2495 2496 if (!lhs.isSimple()) 2497 return EmitUnsupportedLValue(expr, "conditional operator"); 2498 2499 lhsBlock = Builder.GetInsertBlock(); 2500 Builder.CreateBr(contBlock); 2501 2502 // Any temporaries created here are conditional. 2503 EmitBlock(rhsBlock); 2504 eval.begin(*this); 2505 LValue rhs = EmitLValue(expr->getFalseExpr()); 2506 eval.end(*this); 2507 if (!rhs.isSimple()) 2508 return EmitUnsupportedLValue(expr, "conditional operator"); 2509 rhsBlock = Builder.GetInsertBlock(); 2510 2511 EmitBlock(contBlock); 2512 2513 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2, 2514 "cond-lvalue"); 2515 phi->addIncoming(lhs.getAddress(), lhsBlock); 2516 phi->addIncoming(rhs.getAddress(), rhsBlock); 2517 return MakeAddrLValue(phi, expr->getType()); 2518 } 2519 2520 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference 2521 /// type. If the cast is to a reference, we can have the usual lvalue result, 2522 /// otherwise if a cast is needed by the code generator in an lvalue context, 2523 /// then it must mean that we need the address of an aggregate in order to 2524 /// access one of its members. This can happen for all the reasons that casts 2525 /// are permitted with aggregate result, including noop aggregate casts, and 2526 /// cast from scalar to union. 2527 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 2528 switch (E->getCastKind()) { 2529 case CK_ToVoid: 2530 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 2531 2532 case CK_Dependent: 2533 llvm_unreachable("dependent cast kind in IR gen!"); 2534 2535 case CK_BuiltinFnToFnPtr: 2536 llvm_unreachable("builtin functions are handled elsewhere"); 2537 2538 // These two casts are currently treated as no-ops, although they could 2539 // potentially be real operations depending on the target's ABI. 2540 case CK_NonAtomicToAtomic: 2541 case CK_AtomicToNonAtomic: 2542 2543 case CK_NoOp: 2544 case CK_LValueToRValue: 2545 if (!E->getSubExpr()->Classify(getContext()).isPRValue() 2546 || E->getType()->isRecordType()) 2547 return EmitLValue(E->getSubExpr()); 2548 // Fall through to synthesize a temporary. 2549 2550 case CK_BitCast: 2551 case CK_ArrayToPointerDecay: 2552 case CK_FunctionToPointerDecay: 2553 case CK_NullToMemberPointer: 2554 case CK_NullToPointer: 2555 case CK_IntegralToPointer: 2556 case CK_PointerToIntegral: 2557 case CK_PointerToBoolean: 2558 case CK_VectorSplat: 2559 case CK_IntegralCast: 2560 case CK_IntegralToBoolean: 2561 case CK_IntegralToFloating: 2562 case CK_FloatingToIntegral: 2563 case CK_FloatingToBoolean: 2564 case CK_FloatingCast: 2565 case CK_FloatingRealToComplex: 2566 case CK_FloatingComplexToReal: 2567 case CK_FloatingComplexToBoolean: 2568 case CK_FloatingComplexCast: 2569 case CK_FloatingComplexToIntegralComplex: 2570 case CK_IntegralRealToComplex: 2571 case CK_IntegralComplexToReal: 2572 case CK_IntegralComplexToBoolean: 2573 case CK_IntegralComplexCast: 2574 case CK_IntegralComplexToFloatingComplex: 2575 case CK_DerivedToBaseMemberPointer: 2576 case CK_BaseToDerivedMemberPointer: 2577 case CK_MemberPointerToBoolean: 2578 case CK_ReinterpretMemberPointer: 2579 case CK_AnyPointerToBlockPointerCast: 2580 case CK_ARCProduceObject: 2581 case CK_ARCConsumeObject: 2582 case CK_ARCReclaimReturnedObject: 2583 case CK_ARCExtendBlockObject: 2584 case CK_CopyAndAutoreleaseBlockObject: { 2585 // These casts only produce lvalues when we're binding a reference to a 2586 // temporary realized from a (converted) pure rvalue. Emit the expression 2587 // as a value, copy it into a temporary, and return an lvalue referring to 2588 // that temporary. 2589 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp"); 2590 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false); 2591 return MakeAddrLValue(V, E->getType()); 2592 } 2593 2594 case CK_Dynamic: { 2595 LValue LV = EmitLValue(E->getSubExpr()); 2596 llvm::Value *V = LV.getAddress(); 2597 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 2598 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 2599 } 2600 2601 case CK_ConstructorConversion: 2602 case CK_UserDefinedConversion: 2603 case CK_CPointerToObjCPointerCast: 2604 case CK_BlockPointerToObjCPointerCast: 2605 return EmitLValue(E->getSubExpr()); 2606 2607 case CK_UncheckedDerivedToBase: 2608 case CK_DerivedToBase: { 2609 const RecordType *DerivedClassTy = 2610 E->getSubExpr()->getType()->getAs<RecordType>(); 2611 CXXRecordDecl *DerivedClassDecl = 2612 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2613 2614 LValue LV = EmitLValue(E->getSubExpr()); 2615 llvm::Value *This = LV.getAddress(); 2616 2617 // Perform the derived-to-base conversion 2618 llvm::Value *Base = 2619 GetAddressOfBaseClass(This, DerivedClassDecl, 2620 E->path_begin(), E->path_end(), 2621 /*NullCheckValue=*/false); 2622 2623 return MakeAddrLValue(Base, E->getType()); 2624 } 2625 case CK_ToUnion: 2626 return EmitAggExprToLValue(E); 2627 case CK_BaseToDerived: { 2628 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 2629 CXXRecordDecl *DerivedClassDecl = 2630 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2631 2632 LValue LV = EmitLValue(E->getSubExpr()); 2633 2634 // Perform the base-to-derived conversion 2635 llvm::Value *Derived = 2636 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 2637 E->path_begin(), E->path_end(), 2638 /*NullCheckValue=*/false); 2639 2640 return MakeAddrLValue(Derived, E->getType()); 2641 } 2642 case CK_LValueBitCast: { 2643 // This must be a reinterpret_cast (or c-style equivalent). 2644 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 2645 2646 LValue LV = EmitLValue(E->getSubExpr()); 2647 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2648 ConvertType(CE->getTypeAsWritten())); 2649 return MakeAddrLValue(V, E->getType()); 2650 } 2651 case CK_ObjCObjectLValueCast: { 2652 LValue LV = EmitLValue(E->getSubExpr()); 2653 QualType ToType = getContext().getLValueReferenceType(E->getType()); 2654 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2655 ConvertType(ToType)); 2656 return MakeAddrLValue(V, E->getType()); 2657 } 2658 } 2659 2660 llvm_unreachable("Unhandled lvalue cast kind?"); 2661 } 2662 2663 LValue CodeGenFunction::EmitNullInitializationLValue( 2664 const CXXScalarValueInitExpr *E) { 2665 QualType Ty = E->getType(); 2666 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty); 2667 EmitNullInitialization(LV.getAddress(), Ty); 2668 return LV; 2669 } 2670 2671 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 2672 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 2673 return getOpaqueLValueMapping(e); 2674 } 2675 2676 LValue CodeGenFunction::EmitMaterializeTemporaryExpr( 2677 const MaterializeTemporaryExpr *E) { 2678 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 2679 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2680 } 2681 2682 RValue CodeGenFunction::EmitRValueForField(LValue LV, 2683 const FieldDecl *FD) { 2684 QualType FT = FD->getType(); 2685 LValue FieldLV = EmitLValueForField(LV, FD); 2686 if (FT->isAnyComplexType()) 2687 return RValue::getComplex( 2688 LoadComplexFromAddr(FieldLV.getAddress(), 2689 FieldLV.isVolatileQualified())); 2690 else if (CodeGenFunction::hasAggregateLLVMType(FT)) 2691 return FieldLV.asAggregateRValue(); 2692 2693 return EmitLoadOfLValue(FieldLV); 2694 } 2695 2696 //===--------------------------------------------------------------------===// 2697 // Expression Emission 2698 //===--------------------------------------------------------------------===// 2699 2700 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 2701 ReturnValueSlot ReturnValue) { 2702 if (CGDebugInfo *DI = getDebugInfo()) 2703 DI->EmitLocation(Builder, E->getLocStart()); 2704 2705 // Builtins never have block type. 2706 if (E->getCallee()->getType()->isBlockPointerType()) 2707 return EmitBlockCallExpr(E, ReturnValue); 2708 2709 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 2710 return EmitCXXMemberCallExpr(CE, ReturnValue); 2711 2712 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E)) 2713 return EmitCUDAKernelCallExpr(CE, ReturnValue); 2714 2715 const Decl *TargetDecl = E->getCalleeDecl(); 2716 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 2717 if (unsigned builtinID = FD->getBuiltinID()) 2718 return EmitBuiltinExpr(FD, builtinID, E); 2719 } 2720 2721 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 2722 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 2723 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 2724 2725 if (const CXXPseudoDestructorExpr *PseudoDtor 2726 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 2727 QualType DestroyedType = PseudoDtor->getDestroyedType(); 2728 if (getLangOpts().ObjCAutoRefCount && 2729 DestroyedType->isObjCLifetimeType() && 2730 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong || 2731 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) { 2732 // Automatic Reference Counting: 2733 // If the pseudo-expression names a retainable object with weak or 2734 // strong lifetime, the object shall be released. 2735 Expr *BaseExpr = PseudoDtor->getBase(); 2736 llvm::Value *BaseValue = NULL; 2737 Qualifiers BaseQuals; 2738 2739 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2740 if (PseudoDtor->isArrow()) { 2741 BaseValue = EmitScalarExpr(BaseExpr); 2742 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 2743 BaseQuals = PTy->getPointeeType().getQualifiers(); 2744 } else { 2745 LValue BaseLV = EmitLValue(BaseExpr); 2746 BaseValue = BaseLV.getAddress(); 2747 QualType BaseTy = BaseExpr->getType(); 2748 BaseQuals = BaseTy.getQualifiers(); 2749 } 2750 2751 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) { 2752 case Qualifiers::OCL_None: 2753 case Qualifiers::OCL_ExplicitNone: 2754 case Qualifiers::OCL_Autoreleasing: 2755 break; 2756 2757 case Qualifiers::OCL_Strong: 2758 EmitARCRelease(Builder.CreateLoad(BaseValue, 2759 PseudoDtor->getDestroyedType().isVolatileQualified()), 2760 /*precise*/ true); 2761 break; 2762 2763 case Qualifiers::OCL_Weak: 2764 EmitARCDestroyWeak(BaseValue); 2765 break; 2766 } 2767 } else { 2768 // C++ [expr.pseudo]p1: 2769 // The result shall only be used as the operand for the function call 2770 // operator (), and the result of such a call has type void. The only 2771 // effect is the evaluation of the postfix-expression before the dot or 2772 // arrow. 2773 EmitScalarExpr(E->getCallee()); 2774 } 2775 2776 return RValue::get(0); 2777 } 2778 2779 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 2780 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 2781 E->arg_begin(), E->arg_end(), TargetDecl); 2782 } 2783 2784 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 2785 // Comma expressions just emit their LHS then their RHS as an l-value. 2786 if (E->getOpcode() == BO_Comma) { 2787 EmitIgnoredExpr(E->getLHS()); 2788 EnsureInsertPoint(); 2789 return EmitLValue(E->getRHS()); 2790 } 2791 2792 if (E->getOpcode() == BO_PtrMemD || 2793 E->getOpcode() == BO_PtrMemI) 2794 return EmitPointerToDataMemberBinaryExpr(E); 2795 2796 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 2797 2798 // Note that in all of these cases, __block variables need the RHS 2799 // evaluated first just in case the variable gets moved by the RHS. 2800 2801 if (!hasAggregateLLVMType(E->getType())) { 2802 switch (E->getLHS()->getType().getObjCLifetime()) { 2803 case Qualifiers::OCL_Strong: 2804 return EmitARCStoreStrong(E, /*ignored*/ false).first; 2805 2806 case Qualifiers::OCL_Autoreleasing: 2807 return EmitARCStoreAutoreleasing(E).first; 2808 2809 // No reason to do any of these differently. 2810 case Qualifiers::OCL_None: 2811 case Qualifiers::OCL_ExplicitNone: 2812 case Qualifiers::OCL_Weak: 2813 break; 2814 } 2815 2816 RValue RV = EmitAnyExpr(E->getRHS()); 2817 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); 2818 EmitStoreThroughLValue(RV, LV); 2819 return LV; 2820 } 2821 2822 if (E->getType()->isAnyComplexType()) 2823 return EmitComplexAssignmentLValue(E); 2824 2825 return EmitAggExprToLValue(E); 2826 } 2827 2828 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 2829 RValue RV = EmitCallExpr(E); 2830 2831 if (!RV.isScalar()) 2832 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2833 2834 assert(E->getCallReturnType()->isReferenceType() && 2835 "Can't have a scalar return unless the return type is a " 2836 "reference type!"); 2837 2838 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2839 } 2840 2841 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 2842 // FIXME: This shouldn't require another copy. 2843 return EmitAggExprToLValue(E); 2844 } 2845 2846 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 2847 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 2848 && "binding l-value to type which needs a temporary"); 2849 AggValueSlot Slot = CreateAggTemp(E->getType()); 2850 EmitCXXConstructExpr(E, Slot); 2851 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2852 } 2853 2854 LValue 2855 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 2856 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 2857 } 2858 2859 llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { 2860 return CGM.GetAddrOfUuidDescriptor(E); 2861 } 2862 2863 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { 2864 return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType()); 2865 } 2866 2867 LValue 2868 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 2869 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2870 Slot.setExternallyDestructed(); 2871 EmitAggExpr(E->getSubExpr(), Slot); 2872 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr()); 2873 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2874 } 2875 2876 LValue 2877 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) { 2878 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2879 EmitLambdaExpr(E, Slot); 2880 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2881 } 2882 2883 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 2884 RValue RV = EmitObjCMessageExpr(E); 2885 2886 if (!RV.isScalar()) 2887 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2888 2889 assert(E->getMethodDecl()->getResultType()->isReferenceType() && 2890 "Can't have a scalar return unless the return type is a " 2891 "reference type!"); 2892 2893 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2894 } 2895 2896 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 2897 llvm::Value *V = 2898 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true); 2899 return MakeAddrLValue(V, E->getType()); 2900 } 2901 2902 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 2903 const ObjCIvarDecl *Ivar) { 2904 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 2905 } 2906 2907 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 2908 llvm::Value *BaseValue, 2909 const ObjCIvarDecl *Ivar, 2910 unsigned CVRQualifiers) { 2911 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 2912 Ivar, CVRQualifiers); 2913 } 2914 2915 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 2916 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 2917 llvm::Value *BaseValue = 0; 2918 const Expr *BaseExpr = E->getBase(); 2919 Qualifiers BaseQuals; 2920 QualType ObjectTy; 2921 if (E->isArrow()) { 2922 BaseValue = EmitScalarExpr(BaseExpr); 2923 ObjectTy = BaseExpr->getType()->getPointeeType(); 2924 BaseQuals = ObjectTy.getQualifiers(); 2925 } else { 2926 LValue BaseLV = EmitLValue(BaseExpr); 2927 // FIXME: this isn't right for bitfields. 2928 BaseValue = BaseLV.getAddress(); 2929 ObjectTy = BaseExpr->getType(); 2930 BaseQuals = ObjectTy.getQualifiers(); 2931 } 2932 2933 LValue LV = 2934 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 2935 BaseQuals.getCVRQualifiers()); 2936 setObjCGCLValueClass(getContext(), E, LV); 2937 return LV; 2938 } 2939 2940 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 2941 // Can only get l-value for message expression returning aggregate type 2942 RValue RV = EmitAnyExprToTemp(E); 2943 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2944 } 2945 2946 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 2947 ReturnValueSlot ReturnValue, 2948 CallExpr::const_arg_iterator ArgBeg, 2949 CallExpr::const_arg_iterator ArgEnd, 2950 const Decl *TargetDecl) { 2951 // Get the actual function type. The callee type will always be a pointer to 2952 // function type or a block pointer type. 2953 assert(CalleeType->isFunctionPointerType() && 2954 "Call must have function pointer type!"); 2955 2956 CalleeType = getContext().getCanonicalType(CalleeType); 2957 2958 const FunctionType *FnType 2959 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 2960 2961 CallArgList Args; 2962 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 2963 2964 const CGFunctionInfo &FnInfo = 2965 CGM.getTypes().arrangeFreeFunctionCall(Args, FnType); 2966 2967 // C99 6.5.2.2p6: 2968 // If the expression that denotes the called function has a type 2969 // that does not include a prototype, [the default argument 2970 // promotions are performed]. If the number of arguments does not 2971 // equal the number of parameters, the behavior is undefined. If 2972 // the function is defined with a type that includes a prototype, 2973 // and either the prototype ends with an ellipsis (, ...) or the 2974 // types of the arguments after promotion are not compatible with 2975 // the types of the parameters, the behavior is undefined. If the 2976 // function is defined with a type that does not include a 2977 // prototype, and the types of the arguments after promotion are 2978 // not compatible with those of the parameters after promotion, 2979 // the behavior is undefined [except in some trivial cases]. 2980 // That is, in the general case, we should assume that a call 2981 // through an unprototyped function type works like a *non-variadic* 2982 // call. The way we make this work is to cast to the exact type 2983 // of the promoted arguments. 2984 if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) { 2985 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo); 2986 CalleeTy = CalleeTy->getPointerTo(); 2987 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); 2988 } 2989 2990 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl); 2991 } 2992 2993 LValue CodeGenFunction:: 2994 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 2995 llvm::Value *BaseV; 2996 if (E->getOpcode() == BO_PtrMemI) 2997 BaseV = EmitScalarExpr(E->getLHS()); 2998 else 2999 BaseV = EmitLValue(E->getLHS()).getAddress(); 3000 3001 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 3002 3003 const MemberPointerType *MPT 3004 = E->getRHS()->getType()->getAs<MemberPointerType>(); 3005 3006 llvm::Value *AddV = 3007 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT); 3008 3009 return MakeAddrLValue(AddV, MPT->getPointeeType()); 3010 } 3011 3012 static void 3013 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, 3014 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, 3015 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { 3016 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; 3017 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0; 3018 3019 switch (E->getOp()) { 3020 case AtomicExpr::AO__c11_atomic_init: 3021 llvm_unreachable("Already handled!"); 3022 3023 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 3024 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 3025 case AtomicExpr::AO__atomic_compare_exchange: 3026 case AtomicExpr::AO__atomic_compare_exchange_n: { 3027 // Note that cmpxchg only supports specifying one ordering and 3028 // doesn't support weak cmpxchg, at least at the moment. 3029 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 3030 LoadVal1->setAlignment(Align); 3031 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2); 3032 LoadVal2->setAlignment(Align); 3033 llvm::AtomicCmpXchgInst *CXI = 3034 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order); 3035 CXI->setVolatile(E->isVolatile()); 3036 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1); 3037 StoreVal1->setAlignment(Align); 3038 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1); 3039 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); 3040 return; 3041 } 3042 3043 case AtomicExpr::AO__c11_atomic_load: 3044 case AtomicExpr::AO__atomic_load_n: 3045 case AtomicExpr::AO__atomic_load: { 3046 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); 3047 Load->setAtomic(Order); 3048 Load->setAlignment(Size); 3049 Load->setVolatile(E->isVolatile()); 3050 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); 3051 StoreDest->setAlignment(Align); 3052 return; 3053 } 3054 3055 case AtomicExpr::AO__c11_atomic_store: 3056 case AtomicExpr::AO__atomic_store: 3057 case AtomicExpr::AO__atomic_store_n: { 3058 assert(!Dest && "Store does not return a value"); 3059 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 3060 LoadVal1->setAlignment(Align); 3061 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); 3062 Store->setAtomic(Order); 3063 Store->setAlignment(Size); 3064 Store->setVolatile(E->isVolatile()); 3065 return; 3066 } 3067 3068 case AtomicExpr::AO__c11_atomic_exchange: 3069 case AtomicExpr::AO__atomic_exchange_n: 3070 case AtomicExpr::AO__atomic_exchange: 3071 Op = llvm::AtomicRMWInst::Xchg; 3072 break; 3073 3074 case AtomicExpr::AO__atomic_add_fetch: 3075 PostOp = llvm::Instruction::Add; 3076 // Fall through. 3077 case AtomicExpr::AO__c11_atomic_fetch_add: 3078 case AtomicExpr::AO__atomic_fetch_add: 3079 Op = llvm::AtomicRMWInst::Add; 3080 break; 3081 3082 case AtomicExpr::AO__atomic_sub_fetch: 3083 PostOp = llvm::Instruction::Sub; 3084 // Fall through. 3085 case AtomicExpr::AO__c11_atomic_fetch_sub: 3086 case AtomicExpr::AO__atomic_fetch_sub: 3087 Op = llvm::AtomicRMWInst::Sub; 3088 break; 3089 3090 case AtomicExpr::AO__atomic_and_fetch: 3091 PostOp = llvm::Instruction::And; 3092 // Fall through. 3093 case AtomicExpr::AO__c11_atomic_fetch_and: 3094 case AtomicExpr::AO__atomic_fetch_and: 3095 Op = llvm::AtomicRMWInst::And; 3096 break; 3097 3098 case AtomicExpr::AO__atomic_or_fetch: 3099 PostOp = llvm::Instruction::Or; 3100 // Fall through. 3101 case AtomicExpr::AO__c11_atomic_fetch_or: 3102 case AtomicExpr::AO__atomic_fetch_or: 3103 Op = llvm::AtomicRMWInst::Or; 3104 break; 3105 3106 case AtomicExpr::AO__atomic_xor_fetch: 3107 PostOp = llvm::Instruction::Xor; 3108 // Fall through. 3109 case AtomicExpr::AO__c11_atomic_fetch_xor: 3110 case AtomicExpr::AO__atomic_fetch_xor: 3111 Op = llvm::AtomicRMWInst::Xor; 3112 break; 3113 3114 case AtomicExpr::AO__atomic_nand_fetch: 3115 PostOp = llvm::Instruction::And; 3116 // Fall through. 3117 case AtomicExpr::AO__atomic_fetch_nand: 3118 Op = llvm::AtomicRMWInst::Nand; 3119 break; 3120 } 3121 3122 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 3123 LoadVal1->setAlignment(Align); 3124 llvm::AtomicRMWInst *RMWI = 3125 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); 3126 RMWI->setVolatile(E->isVolatile()); 3127 3128 // For __atomic_*_fetch operations, perform the operation again to 3129 // determine the value which was written. 3130 llvm::Value *Result = RMWI; 3131 if (PostOp) 3132 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1); 3133 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch) 3134 Result = CGF.Builder.CreateNot(Result); 3135 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest); 3136 StoreDest->setAlignment(Align); 3137 } 3138 3139 // This function emits any expression (scalar, complex, or aggregate) 3140 // into a temporary alloca. 3141 static llvm::Value * 3142 EmitValToTemp(CodeGenFunction &CGF, Expr *E) { 3143 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); 3144 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), 3145 /*Init*/ true); 3146 return DeclPtr; 3147 } 3148 3149 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty, 3150 llvm::Value *Dest) { 3151 if (Ty->isAnyComplexType()) 3152 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false)); 3153 if (CGF.hasAggregateLLVMType(Ty)) 3154 return RValue::getAggregate(Dest); 3155 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty))); 3156 } 3157 3158 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { 3159 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 3160 QualType MemTy = AtomicTy; 3161 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>()) 3162 MemTy = AT->getValueType(); 3163 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); 3164 uint64_t Size = sizeChars.getQuantity(); 3165 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); 3166 unsigned Align = alignChars.getQuantity(); 3167 unsigned MaxInlineWidth = 3168 getContext().getTargetInfo().getMaxAtomicInlineWidth(); 3169 bool UseLibcall = (Size != Align || Size > MaxInlineWidth); 3170 3171 3172 3173 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; 3174 Ptr = EmitScalarExpr(E->getPtr()); 3175 3176 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) { 3177 assert(!Dest && "Init does not return a value"); 3178 if (!hasAggregateLLVMType(E->getVal1()->getType())) { 3179 QualType PointeeType 3180 = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType(); 3181 EmitScalarInit(EmitScalarExpr(E->getVal1()), 3182 LValue::MakeAddr(Ptr, PointeeType, alignChars, 3183 getContext())); 3184 } else if (E->getType()->isAnyComplexType()) { 3185 EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile()); 3186 } else { 3187 AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars, 3188 AtomicTy.getQualifiers(), 3189 AggValueSlot::IsNotDestructed, 3190 AggValueSlot::DoesNotNeedGCBarriers, 3191 AggValueSlot::IsNotAliased); 3192 EmitAggExpr(E->getVal1(), Slot); 3193 } 3194 return RValue::get(0); 3195 } 3196 3197 Order = EmitScalarExpr(E->getOrder()); 3198 3199 switch (E->getOp()) { 3200 case AtomicExpr::AO__c11_atomic_init: 3201 llvm_unreachable("Already handled!"); 3202 3203 case AtomicExpr::AO__c11_atomic_load: 3204 case AtomicExpr::AO__atomic_load_n: 3205 break; 3206 3207 case AtomicExpr::AO__atomic_load: 3208 Dest = EmitScalarExpr(E->getVal1()); 3209 break; 3210 3211 case AtomicExpr::AO__atomic_store: 3212 Val1 = EmitScalarExpr(E->getVal1()); 3213 break; 3214 3215 case AtomicExpr::AO__atomic_exchange: 3216 Val1 = EmitScalarExpr(E->getVal1()); 3217 Dest = EmitScalarExpr(E->getVal2()); 3218 break; 3219 3220 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 3221 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 3222 case AtomicExpr::AO__atomic_compare_exchange_n: 3223 case AtomicExpr::AO__atomic_compare_exchange: 3224 Val1 = EmitScalarExpr(E->getVal1()); 3225 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange) 3226 Val2 = EmitScalarExpr(E->getVal2()); 3227 else 3228 Val2 = EmitValToTemp(*this, E->getVal2()); 3229 OrderFail = EmitScalarExpr(E->getOrderFail()); 3230 // Evaluate and discard the 'weak' argument. 3231 if (E->getNumSubExprs() == 6) 3232 EmitScalarExpr(E->getWeak()); 3233 break; 3234 3235 case AtomicExpr::AO__c11_atomic_fetch_add: 3236 case AtomicExpr::AO__c11_atomic_fetch_sub: 3237 if (MemTy->isPointerType()) { 3238 // For pointer arithmetic, we're required to do a bit of math: 3239 // adding 1 to an int* is not the same as adding 1 to a uintptr_t. 3240 // ... but only for the C11 builtins. The GNU builtins expect the 3241 // user to multiply by sizeof(T). 3242 QualType Val1Ty = E->getVal1()->getType(); 3243 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); 3244 CharUnits PointeeIncAmt = 3245 getContext().getTypeSizeInChars(MemTy->getPointeeType()); 3246 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); 3247 Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); 3248 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); 3249 break; 3250 } 3251 // Fall through. 3252 case AtomicExpr::AO__atomic_fetch_add: 3253 case AtomicExpr::AO__atomic_fetch_sub: 3254 case AtomicExpr::AO__atomic_add_fetch: 3255 case AtomicExpr::AO__atomic_sub_fetch: 3256 case AtomicExpr::AO__c11_atomic_store: 3257 case AtomicExpr::AO__c11_atomic_exchange: 3258 case AtomicExpr::AO__atomic_store_n: 3259 case AtomicExpr::AO__atomic_exchange_n: 3260 case AtomicExpr::AO__c11_atomic_fetch_and: 3261 case AtomicExpr::AO__c11_atomic_fetch_or: 3262 case AtomicExpr::AO__c11_atomic_fetch_xor: 3263 case AtomicExpr::AO__atomic_fetch_and: 3264 case AtomicExpr::AO__atomic_fetch_or: 3265 case AtomicExpr::AO__atomic_fetch_xor: 3266 case AtomicExpr::AO__atomic_fetch_nand: 3267 case AtomicExpr::AO__atomic_and_fetch: 3268 case AtomicExpr::AO__atomic_or_fetch: 3269 case AtomicExpr::AO__atomic_xor_fetch: 3270 case AtomicExpr::AO__atomic_nand_fetch: 3271 Val1 = EmitValToTemp(*this, E->getVal1()); 3272 break; 3273 } 3274 3275 if (!E->getType()->isVoidType() && !Dest) 3276 Dest = CreateMemTemp(E->getType(), ".atomicdst"); 3277 3278 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . 3279 if (UseLibcall) { 3280 3281 llvm::SmallVector<QualType, 5> Params; 3282 CallArgList Args; 3283 // Size is always the first parameter 3284 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), 3285 getContext().getSizeType()); 3286 // Atomic address is always the second parameter 3287 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), 3288 getContext().VoidPtrTy); 3289 3290 const char* LibCallName; 3291 QualType RetTy = getContext().VoidTy; 3292 switch (E->getOp()) { 3293 // There is only one libcall for compare an exchange, because there is no 3294 // optimisation benefit possible from a libcall version of a weak compare 3295 // and exchange. 3296 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected, 3297 // void *desired, int success, int failure) 3298 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 3299 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 3300 case AtomicExpr::AO__atomic_compare_exchange: 3301 case AtomicExpr::AO__atomic_compare_exchange_n: 3302 LibCallName = "__atomic_compare_exchange"; 3303 RetTy = getContext().BoolTy; 3304 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 3305 getContext().VoidPtrTy); 3306 Args.add(RValue::get(EmitCastToVoidPtr(Val2)), 3307 getContext().VoidPtrTy); 3308 Args.add(RValue::get(Order), 3309 getContext().IntTy); 3310 Order = OrderFail; 3311 break; 3312 // void __atomic_exchange(size_t size, void *mem, void *val, void *return, 3313 // int order) 3314 case AtomicExpr::AO__c11_atomic_exchange: 3315 case AtomicExpr::AO__atomic_exchange_n: 3316 case AtomicExpr::AO__atomic_exchange: 3317 LibCallName = "__atomic_exchange"; 3318 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 3319 getContext().VoidPtrTy); 3320 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 3321 getContext().VoidPtrTy); 3322 break; 3323 // void __atomic_store(size_t size, void *mem, void *val, int order) 3324 case AtomicExpr::AO__c11_atomic_store: 3325 case AtomicExpr::AO__atomic_store: 3326 case AtomicExpr::AO__atomic_store_n: 3327 LibCallName = "__atomic_store"; 3328 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 3329 getContext().VoidPtrTy); 3330 break; 3331 // void __atomic_load(size_t size, void *mem, void *return, int order) 3332 case AtomicExpr::AO__c11_atomic_load: 3333 case AtomicExpr::AO__atomic_load: 3334 case AtomicExpr::AO__atomic_load_n: 3335 LibCallName = "__atomic_load"; 3336 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 3337 getContext().VoidPtrTy); 3338 break; 3339 #if 0 3340 // These are only defined for 1-16 byte integers. It is not clear what 3341 // their semantics would be on anything else... 3342 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break; 3343 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break; 3344 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break; 3345 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break; 3346 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break; 3347 #endif 3348 default: return EmitUnsupportedRValue(E, "atomic library call"); 3349 } 3350 // order is always the last parameter 3351 Args.add(RValue::get(Order), 3352 getContext().IntTy); 3353 3354 const CGFunctionInfo &FuncInfo = 3355 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args, 3356 FunctionType::ExtInfo(), RequiredArgs::All); 3357 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); 3358 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 3359 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 3360 if (E->isCmpXChg()) 3361 return Res; 3362 if (E->getType()->isVoidType()) 3363 return RValue::get(0); 3364 return ConvertTempToRValue(*this, E->getType(), Dest); 3365 } 3366 3367 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store || 3368 E->getOp() == AtomicExpr::AO__atomic_store || 3369 E->getOp() == AtomicExpr::AO__atomic_store_n; 3370 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || 3371 E->getOp() == AtomicExpr::AO__atomic_load || 3372 E->getOp() == AtomicExpr::AO__atomic_load_n; 3373 3374 llvm::Type *IPtrTy = 3375 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo(); 3376 llvm::Value *OrigDest = Dest; 3377 Ptr = Builder.CreateBitCast(Ptr, IPtrTy); 3378 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy); 3379 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy); 3380 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy); 3381 3382 if (isa<llvm::ConstantInt>(Order)) { 3383 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 3384 switch (ord) { 3385 case 0: // memory_order_relaxed 3386 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3387 llvm::Monotonic); 3388 break; 3389 case 1: // memory_order_consume 3390 case 2: // memory_order_acquire 3391 if (IsStore) 3392 break; // Avoid crashing on code with undefined behavior 3393 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3394 llvm::Acquire); 3395 break; 3396 case 3: // memory_order_release 3397 if (IsLoad) 3398 break; // Avoid crashing on code with undefined behavior 3399 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3400 llvm::Release); 3401 break; 3402 case 4: // memory_order_acq_rel 3403 if (IsLoad || IsStore) 3404 break; // Avoid crashing on code with undefined behavior 3405 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3406 llvm::AcquireRelease); 3407 break; 3408 case 5: // memory_order_seq_cst 3409 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3410 llvm::SequentiallyConsistent); 3411 break; 3412 default: // invalid order 3413 // We should not ever get here normally, but it's hard to 3414 // enforce that in general. 3415 break; 3416 } 3417 if (E->getType()->isVoidType()) 3418 return RValue::get(0); 3419 return ConvertTempToRValue(*this, E->getType(), OrigDest); 3420 } 3421 3422 // Long case, when Order isn't obviously constant. 3423 3424 // Create all the relevant BB's 3425 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0, 3426 *AcqRelBB = 0, *SeqCstBB = 0; 3427 MonotonicBB = createBasicBlock("monotonic", CurFn); 3428 if (!IsStore) 3429 AcquireBB = createBasicBlock("acquire", CurFn); 3430 if (!IsLoad) 3431 ReleaseBB = createBasicBlock("release", CurFn); 3432 if (!IsLoad && !IsStore) 3433 AcqRelBB = createBasicBlock("acqrel", CurFn); 3434 SeqCstBB = createBasicBlock("seqcst", CurFn); 3435 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 3436 3437 // Create the switch for the split 3438 // MonotonicBB is arbitrarily chosen as the default case; in practice, this 3439 // doesn't matter unless someone is crazy enough to use something that 3440 // doesn't fold to a constant for the ordering. 3441 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 3442 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); 3443 3444 // Emit all the different atomics 3445 Builder.SetInsertPoint(MonotonicBB); 3446 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3447 llvm::Monotonic); 3448 Builder.CreateBr(ContBB); 3449 if (!IsStore) { 3450 Builder.SetInsertPoint(AcquireBB); 3451 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3452 llvm::Acquire); 3453 Builder.CreateBr(ContBB); 3454 SI->addCase(Builder.getInt32(1), AcquireBB); 3455 SI->addCase(Builder.getInt32(2), AcquireBB); 3456 } 3457 if (!IsLoad) { 3458 Builder.SetInsertPoint(ReleaseBB); 3459 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3460 llvm::Release); 3461 Builder.CreateBr(ContBB); 3462 SI->addCase(Builder.getInt32(3), ReleaseBB); 3463 } 3464 if (!IsLoad && !IsStore) { 3465 Builder.SetInsertPoint(AcqRelBB); 3466 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3467 llvm::AcquireRelease); 3468 Builder.CreateBr(ContBB); 3469 SI->addCase(Builder.getInt32(4), AcqRelBB); 3470 } 3471 Builder.SetInsertPoint(SeqCstBB); 3472 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3473 llvm::SequentiallyConsistent); 3474 Builder.CreateBr(ContBB); 3475 SI->addCase(Builder.getInt32(5), SeqCstBB); 3476 3477 // Cleanup and return 3478 Builder.SetInsertPoint(ContBB); 3479 if (E->getType()->isVoidType()) 3480 return RValue::get(0); 3481 return ConvertTempToRValue(*this, E->getType(), OrigDest); 3482 } 3483 3484 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { 3485 assert(Val->getType()->isFPOrFPVectorTy()); 3486 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) 3487 return; 3488 3489 llvm::MDBuilder MDHelper(getLLVMContext()); 3490 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); 3491 3492 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); 3493 } 3494 3495 namespace { 3496 struct LValueOrRValue { 3497 LValue LV; 3498 RValue RV; 3499 }; 3500 } 3501 3502 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 3503 const PseudoObjectExpr *E, 3504 bool forLValue, 3505 AggValueSlot slot) { 3506 llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 3507 3508 // Find the result expression, if any. 3509 const Expr *resultExpr = E->getResultExpr(); 3510 LValueOrRValue result; 3511 3512 for (PseudoObjectExpr::const_semantics_iterator 3513 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 3514 const Expr *semantic = *i; 3515 3516 // If this semantic expression is an opaque value, bind it 3517 // to the result of its source expression. 3518 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 3519 3520 // If this is the result expression, we may need to evaluate 3521 // directly into the slot. 3522 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 3523 OVMA opaqueData; 3524 if (ov == resultExpr && ov->isRValue() && !forLValue && 3525 CodeGenFunction::hasAggregateLLVMType(ov->getType()) && 3526 !ov->getType()->isAnyComplexType()) { 3527 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 3528 3529 LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType()); 3530 opaqueData = OVMA::bind(CGF, ov, LV); 3531 result.RV = slot.asRValue(); 3532 3533 // Otherwise, emit as normal. 3534 } else { 3535 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 3536 3537 // If this is the result, also evaluate the result now. 3538 if (ov == resultExpr) { 3539 if (forLValue) 3540 result.LV = CGF.EmitLValue(ov); 3541 else 3542 result.RV = CGF.EmitAnyExpr(ov, slot); 3543 } 3544 } 3545 3546 opaques.push_back(opaqueData); 3547 3548 // Otherwise, if the expression is the result, evaluate it 3549 // and remember the result. 3550 } else if (semantic == resultExpr) { 3551 if (forLValue) 3552 result.LV = CGF.EmitLValue(semantic); 3553 else 3554 result.RV = CGF.EmitAnyExpr(semantic, slot); 3555 3556 // Otherwise, evaluate the expression in an ignored context. 3557 } else { 3558 CGF.EmitIgnoredExpr(semantic); 3559 } 3560 } 3561 3562 // Unbind all the opaques now. 3563 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 3564 opaques[i].unbind(CGF); 3565 3566 return result; 3567 } 3568 3569 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 3570 AggValueSlot slot) { 3571 return emitPseudoObjectExpr(*this, E, false, slot).RV; 3572 } 3573 3574 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 3575 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 3576 } 3577