1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGCXXABI.h" 16 #include "CGCall.h" 17 #include "CGDebugInfo.h" 18 #include "CGObjCRuntime.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenModule.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/ASTContext.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Basic/ConvertUTF.h" 25 #include "clang/Frontend/CodeGenOptions.h" 26 #include "llvm/ADT/Hashing.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/Intrinsics.h" 29 #include "llvm/IR/LLVMContext.h" 30 #include "llvm/IR/MDBuilder.h" 31 using namespace clang; 32 using namespace CodeGen; 33 34 //===--------------------------------------------------------------------===// 35 // Miscellaneous Helper Methods 36 //===--------------------------------------------------------------------===// 37 38 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 39 unsigned addressSpace = 40 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 41 42 llvm::PointerType *destType = Int8PtrTy; 43 if (addressSpace) 44 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 45 46 if (value->getType() == destType) return value; 47 return Builder.CreateBitCast(value, destType); 48 } 49 50 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 51 /// block. 52 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 53 const Twine &Name) { 54 if (!Builder.isNamePreserving()) 55 return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt); 56 return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); 57 } 58 59 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 60 llvm::Value *Init) { 61 llvm::StoreInst *Store = new llvm::StoreInst(Init, Var); 62 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 63 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 64 } 65 66 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, 67 const Twine &Name) { 68 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 69 // FIXME: Should we prefer the preferred type alignment here? 70 CharUnits Align = getContext().getTypeAlignInChars(Ty); 71 Alloc->setAlignment(Align.getQuantity()); 72 return Alloc; 73 } 74 75 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, 76 const Twine &Name) { 77 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 78 // FIXME: Should we prefer the preferred type alignment here? 79 CharUnits Align = getContext().getTypeAlignInChars(Ty); 80 Alloc->setAlignment(Align.getQuantity()); 81 return Alloc; 82 } 83 84 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 85 /// expression and compare the result against zero, returning an Int1Ty value. 86 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 87 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 88 llvm::Value *MemPtr = EmitScalarExpr(E); 89 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 90 } 91 92 QualType BoolTy = getContext().BoolTy; 93 if (!E->getType()->isAnyComplexType()) 94 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 95 96 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 97 } 98 99 /// EmitIgnoredExpr - Emit code to compute the specified expression, 100 /// ignoring the result. 101 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 102 if (E->isRValue()) 103 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 104 105 // Just emit it as an l-value and drop the result. 106 EmitLValue(E); 107 } 108 109 /// EmitAnyExpr - Emit code to compute the specified expression which 110 /// can have any type. The result is returned as an RValue struct. 111 /// If this is an aggregate expression, AggSlot indicates where the 112 /// result should be returned. 113 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, 114 AggValueSlot aggSlot, 115 bool ignoreResult) { 116 if (!hasAggregateLLVMType(E->getType())) 117 return RValue::get(EmitScalarExpr(E, ignoreResult)); 118 else if (E->getType()->isAnyComplexType()) 119 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); 120 121 if (!ignoreResult && aggSlot.isIgnored()) 122 aggSlot = CreateAggTemp(E->getType(), "agg-temp"); 123 EmitAggExpr(E, aggSlot); 124 return aggSlot.asRValue(); 125 } 126 127 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 128 /// always be accessible even if no aggregate location is provided. 129 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 130 AggValueSlot AggSlot = AggValueSlot::ignored(); 131 132 if (hasAggregateLLVMType(E->getType()) && 133 !E->getType()->isAnyComplexType()) 134 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 135 return EmitAnyExpr(E, AggSlot); 136 } 137 138 /// EmitAnyExprToMem - Evaluate an expression into a given memory 139 /// location. 140 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 141 llvm::Value *Location, 142 Qualifiers Quals, 143 bool IsInit) { 144 // FIXME: This function should take an LValue as an argument. 145 if (E->getType()->isAnyComplexType()) { 146 EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile()); 147 } else if (hasAggregateLLVMType(E->getType())) { 148 CharUnits Alignment = getContext().getTypeAlignInChars(E->getType()); 149 EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals, 150 AggValueSlot::IsDestructed_t(IsInit), 151 AggValueSlot::DoesNotNeedGCBarriers, 152 AggValueSlot::IsAliased_t(!IsInit))); 153 } else { 154 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 155 LValue LV = MakeAddrLValue(Location, E->getType()); 156 EmitStoreThroughLValue(RV, LV); 157 } 158 } 159 160 static llvm::Value * 161 CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type, 162 const NamedDecl *InitializedDecl) { 163 if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 164 if (VD->hasGlobalStorage()) { 165 SmallString<256> Name; 166 llvm::raw_svector_ostream Out(Name); 167 CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out); 168 Out.flush(); 169 170 llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type); 171 172 // Create the reference temporary. 173 llvm::GlobalValue *RefTemp = 174 new llvm::GlobalVariable(CGF.CGM.getModule(), 175 RefTempTy, /*isConstant=*/false, 176 llvm::GlobalValue::InternalLinkage, 177 llvm::Constant::getNullValue(RefTempTy), 178 Name.str()); 179 return RefTemp; 180 } 181 } 182 183 return CGF.CreateMemTemp(Type, "ref.tmp"); 184 } 185 186 static llvm::Value * 187 EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E, 188 llvm::Value *&ReferenceTemporary, 189 const CXXDestructorDecl *&ReferenceTemporaryDtor, 190 QualType &ObjCARCReferenceLifetimeType, 191 const NamedDecl *InitializedDecl) { 192 const MaterializeTemporaryExpr *M = NULL; 193 E = E->findMaterializedTemporary(M); 194 // Objective-C++ ARC: 195 // If we are binding a reference to a temporary that has ownership, we 196 // need to perform retain/release operations on the temporary. 197 if (M && CGF.getLangOpts().ObjCAutoRefCount && 198 M->getType()->isObjCLifetimeType() && 199 (M->getType().getObjCLifetime() == Qualifiers::OCL_Strong || 200 M->getType().getObjCLifetime() == Qualifiers::OCL_Weak || 201 M->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing)) 202 ObjCARCReferenceLifetimeType = M->getType(); 203 204 if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) { 205 CGF.enterFullExpression(EWC); 206 CodeGenFunction::RunCleanupsScope Scope(CGF); 207 208 return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(), 209 ReferenceTemporary, 210 ReferenceTemporaryDtor, 211 ObjCARCReferenceLifetimeType, 212 InitializedDecl); 213 } 214 215 RValue RV; 216 if (E->isGLValue()) { 217 // Emit the expression as an lvalue. 218 LValue LV = CGF.EmitLValue(E); 219 220 if (LV.isSimple()) 221 return LV.getAddress(); 222 223 // We have to load the lvalue. 224 RV = CGF.EmitLoadOfLValue(LV); 225 } else { 226 if (!ObjCARCReferenceLifetimeType.isNull()) { 227 ReferenceTemporary = CreateReferenceTemporary(CGF, 228 ObjCARCReferenceLifetimeType, 229 InitializedDecl); 230 231 232 LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary, 233 ObjCARCReferenceLifetimeType); 234 235 CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl), 236 RefTempDst, false); 237 238 bool ExtendsLifeOfTemporary = false; 239 if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) { 240 if (Var->extendsLifetimeOfTemporary()) 241 ExtendsLifeOfTemporary = true; 242 } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) { 243 ExtendsLifeOfTemporary = true; 244 } 245 246 if (!ExtendsLifeOfTemporary) { 247 // Since the lifetime of this temporary isn't going to be extended, 248 // we need to clean it up ourselves at the end of the full expression. 249 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 250 case Qualifiers::OCL_None: 251 case Qualifiers::OCL_ExplicitNone: 252 case Qualifiers::OCL_Autoreleasing: 253 break; 254 255 case Qualifiers::OCL_Strong: { 256 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 257 CleanupKind cleanupKind = CGF.getARCCleanupKind(); 258 CGF.pushDestroy(cleanupKind, 259 ReferenceTemporary, 260 ObjCARCReferenceLifetimeType, 261 CodeGenFunction::destroyARCStrongImprecise, 262 cleanupKind & EHCleanup); 263 break; 264 } 265 266 case Qualifiers::OCL_Weak: 267 assert(!ObjCARCReferenceLifetimeType->isArrayType()); 268 CGF.pushDestroy(NormalAndEHCleanup, 269 ReferenceTemporary, 270 ObjCARCReferenceLifetimeType, 271 CodeGenFunction::destroyARCWeak, 272 /*useEHCleanupForArray*/ true); 273 break; 274 } 275 276 ObjCARCReferenceLifetimeType = QualType(); 277 } 278 279 return ReferenceTemporary; 280 } 281 282 SmallVector<SubobjectAdjustment, 2> Adjustments; 283 E = E->skipRValueSubobjectAdjustments(Adjustments); 284 if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E)) 285 if (opaque->getType()->isRecordType()) 286 return CGF.EmitOpaqueValueLValue(opaque).getAddress(); 287 288 // Create a reference temporary if necessary. 289 AggValueSlot AggSlot = AggValueSlot::ignored(); 290 if (CGF.hasAggregateLLVMType(E->getType()) && 291 !E->getType()->isAnyComplexType()) { 292 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 293 InitializedDecl); 294 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType()); 295 AggValueSlot::IsDestructed_t isDestructed 296 = AggValueSlot::IsDestructed_t(InitializedDecl != 0); 297 AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment, 298 Qualifiers(), isDestructed, 299 AggValueSlot::DoesNotNeedGCBarriers, 300 AggValueSlot::IsNotAliased); 301 } 302 303 if (InitializedDecl) { 304 // Get the destructor for the reference temporary. 305 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 306 CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 307 if (!ClassDecl->hasTrivialDestructor()) 308 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 309 } 310 } 311 312 RV = CGF.EmitAnyExpr(E, AggSlot); 313 314 // Check if need to perform derived-to-base casts and/or field accesses, to 315 // get from the temporary object we created (and, potentially, for which we 316 // extended the lifetime) to the subobject we're binding the reference to. 317 if (!Adjustments.empty()) { 318 llvm::Value *Object = RV.getAggregateAddr(); 319 for (unsigned I = Adjustments.size(); I != 0; --I) { 320 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 321 switch (Adjustment.Kind) { 322 case SubobjectAdjustment::DerivedToBaseAdjustment: 323 Object = 324 CGF.GetAddressOfBaseClass(Object, 325 Adjustment.DerivedToBase.DerivedClass, 326 Adjustment.DerivedToBase.BasePath->path_begin(), 327 Adjustment.DerivedToBase.BasePath->path_end(), 328 /*NullCheckValue=*/false); 329 break; 330 331 case SubobjectAdjustment::FieldAdjustment: { 332 LValue LV = CGF.MakeAddrLValue(Object, E->getType()); 333 LV = CGF.EmitLValueForField(LV, Adjustment.Field); 334 if (LV.isSimple()) { 335 Object = LV.getAddress(); 336 break; 337 } 338 339 // For non-simple lvalues, we actually have to create a copy of 340 // the object we're binding to. 341 QualType T = Adjustment.Field->getType().getNonReferenceType() 342 .getUnqualifiedType(); 343 Object = CreateReferenceTemporary(CGF, T, InitializedDecl); 344 LValue TempLV = CGF.MakeAddrLValue(Object, 345 Adjustment.Field->getType()); 346 CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV); 347 break; 348 } 349 350 case SubobjectAdjustment::MemberPointerAdjustment: { 351 llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS); 352 Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress( 353 CGF, Object, Ptr, Adjustment.Ptr.MPT); 354 break; 355 } 356 } 357 } 358 359 return Object; 360 } 361 } 362 363 if (RV.isAggregate()) 364 return RV.getAggregateAddr(); 365 366 // Create a temporary variable that we can bind the reference to. 367 ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(), 368 InitializedDecl); 369 370 371 unsigned Alignment = 372 CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity(); 373 if (RV.isScalar()) 374 CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary, 375 /*Volatile=*/false, Alignment, E->getType()); 376 else 377 CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary, 378 /*Volatile=*/false); 379 return ReferenceTemporary; 380 } 381 382 RValue 383 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E, 384 const NamedDecl *InitializedDecl) { 385 llvm::Value *ReferenceTemporary = 0; 386 const CXXDestructorDecl *ReferenceTemporaryDtor = 0; 387 QualType ObjCARCReferenceLifetimeType; 388 llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary, 389 ReferenceTemporaryDtor, 390 ObjCARCReferenceLifetimeType, 391 InitializedDecl); 392 if (SanitizePerformTypeCheck && !E->getType()->isFunctionType()) { 393 // C++11 [dcl.ref]p5 (as amended by core issue 453): 394 // If a glvalue to which a reference is directly bound designates neither 395 // an existing object or function of an appropriate type nor a region of 396 // storage of suitable size and alignment to contain an object of the 397 // reference's type, the behavior is undefined. 398 QualType Ty = E->getType(); 399 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); 400 } 401 if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull()) 402 return RValue::get(Value); 403 404 // Make sure to call the destructor for the reference temporary. 405 const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl); 406 if (VD && VD->hasGlobalStorage()) { 407 if (ReferenceTemporaryDtor) { 408 llvm::Constant *DtorFn = 409 CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete); 410 CGM.getCXXABI().registerGlobalDtor(*this, DtorFn, 411 cast<llvm::Constant>(ReferenceTemporary)); 412 } else { 413 assert(!ObjCARCReferenceLifetimeType.isNull()); 414 // Note: We intentionally do not register a global "destructor" to 415 // release the object. 416 } 417 418 return RValue::get(Value); 419 } 420 421 if (ReferenceTemporaryDtor) 422 PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary); 423 else { 424 switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) { 425 case Qualifiers::OCL_None: 426 llvm_unreachable( 427 "Not a reference temporary that needs to be deallocated"); 428 case Qualifiers::OCL_ExplicitNone: 429 case Qualifiers::OCL_Autoreleasing: 430 // Nothing to do. 431 break; 432 433 case Qualifiers::OCL_Strong: { 434 bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 435 CleanupKind cleanupKind = getARCCleanupKind(); 436 pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType, 437 precise ? destroyARCStrongPrecise : destroyARCStrongImprecise, 438 cleanupKind & EHCleanup); 439 break; 440 } 441 442 case Qualifiers::OCL_Weak: { 443 // __weak objects always get EH cleanups; otherwise, exceptions 444 // could cause really nasty crashes instead of mere leaks. 445 pushDestroy(NormalAndEHCleanup, ReferenceTemporary, 446 ObjCARCReferenceLifetimeType, destroyARCWeak, true); 447 break; 448 } 449 } 450 } 451 452 return RValue::get(Value); 453 } 454 455 456 /// getAccessedFieldNo - Given an encoded value and a result number, return the 457 /// input field number being accessed. 458 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 459 const llvm::Constant *Elts) { 460 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) 461 ->getZExtValue(); 462 } 463 464 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. 465 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, 466 llvm::Value *High) { 467 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); 468 llvm::Value *K47 = Builder.getInt64(47); 469 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); 470 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); 471 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); 472 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); 473 return Builder.CreateMul(B1, KMul); 474 } 475 476 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, 477 llvm::Value *Address, 478 QualType Ty, CharUnits Alignment) { 479 if (!SanitizePerformTypeCheck) 480 return; 481 482 // Don't check pointers outside the default address space. The null check 483 // isn't correct, the object-size check isn't supported by LLVM, and we can't 484 // communicate the addresses to the runtime handler for the vptr check. 485 if (Address->getType()->getPointerAddressSpace()) 486 return; 487 488 llvm::Value *Cond = 0; 489 490 if (getLangOpts().SanitizeNull) { 491 // The glvalue must not be an empty glvalue. 492 Cond = Builder.CreateICmpNE( 493 Address, llvm::Constant::getNullValue(Address->getType())); 494 } 495 496 if (getLangOpts().SanitizeObjectSize && !Ty->isIncompleteType()) { 497 uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity(); 498 499 // The glvalue must refer to a large enough storage region. 500 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation 501 // to check this. 502 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy); 503 llvm::Value *Min = Builder.getFalse(); 504 llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy); 505 llvm::Value *LargeEnough = 506 Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min), 507 llvm::ConstantInt::get(IntPtrTy, Size)); 508 Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough; 509 } 510 511 uint64_t AlignVal = 0; 512 513 if (getLangOpts().SanitizeAlignment) { 514 AlignVal = Alignment.getQuantity(); 515 if (!Ty->isIncompleteType() && !AlignVal) 516 AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity(); 517 518 // The glvalue must be suitably aligned. 519 if (AlignVal) { 520 llvm::Value *Align = 521 Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy), 522 llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); 523 llvm::Value *Aligned = 524 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); 525 Cond = Cond ? Builder.CreateAnd(Cond, Aligned) : Aligned; 526 } 527 } 528 529 if (Cond) { 530 llvm::Constant *StaticData[] = { 531 EmitCheckSourceLocation(Loc), 532 EmitCheckTypeDescriptor(Ty), 533 llvm::ConstantInt::get(SizeTy, AlignVal), 534 llvm::ConstantInt::get(Int8Ty, TCK) 535 }; 536 EmitCheck(Cond, "type_mismatch", StaticData, Address, CRK_Recoverable); 537 } 538 539 // If possible, check that the vptr indicates that there is a subobject of 540 // type Ty at offset zero within this object. 541 // 542 // C++11 [basic.life]p5,6: 543 // [For storage which does not refer to an object within its lifetime] 544 // The program has undefined behavior if: 545 // -- the [pointer or glvalue] is used to access a non-static data member 546 // or call a non-static member function 547 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 548 if (getLangOpts().SanitizeVptr && 549 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall) && 550 RD && RD->hasDefinition() && RD->isDynamicClass()) { 551 // Compute a hash of the mangled name of the type. 552 // 553 // FIXME: This is not guaranteed to be deterministic! Move to a 554 // fingerprinting mechanism once LLVM provides one. For the time 555 // being the implementation happens to be deterministic. 556 SmallString<64> MangledName; 557 llvm::raw_svector_ostream Out(MangledName); 558 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), 559 Out); 560 llvm::hash_code TypeHash = hash_value(Out.str()); 561 562 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). 563 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); 564 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); 565 llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy); 566 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); 567 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); 568 569 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); 570 Hash = Builder.CreateTrunc(Hash, IntPtrTy); 571 572 // Look the hash up in our cache. 573 const int CacheSize = 128; 574 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); 575 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, 576 "__ubsan_vptr_type_cache"); 577 llvm::Value *Slot = Builder.CreateAnd(Hash, 578 llvm::ConstantInt::get(IntPtrTy, 579 CacheSize-1)); 580 llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; 581 llvm::Value *CacheVal = 582 Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices)); 583 584 // If the hash isn't in the cache, call a runtime handler to perform the 585 // hard work of checking whether the vptr is for an object of the right 586 // type. This will either fill in the cache and return, or produce a 587 // diagnostic. 588 llvm::Constant *StaticData[] = { 589 EmitCheckSourceLocation(Loc), 590 EmitCheckTypeDescriptor(Ty), 591 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), 592 llvm::ConstantInt::get(Int8Ty, TCK) 593 }; 594 llvm::Value *DynamicData[] = { Address, Hash }; 595 EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash), 596 "dynamic_type_cache_miss", StaticData, DynamicData, 597 CRK_AlwaysRecoverable); 598 } 599 } 600 601 602 CodeGenFunction::ComplexPairTy CodeGenFunction:: 603 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 604 bool isInc, bool isPre) { 605 ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(), 606 LV.isVolatileQualified()); 607 608 llvm::Value *NextVal; 609 if (isa<llvm::IntegerType>(InVal.first->getType())) { 610 uint64_t AmountVal = isInc ? 1 : -1; 611 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 612 613 // Add the inc/dec to the real part. 614 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 615 } else { 616 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 617 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 618 if (!isInc) 619 FVal.changeSign(); 620 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 621 622 // Add the inc/dec to the real part. 623 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 624 } 625 626 ComplexPairTy IncVal(NextVal, InVal.second); 627 628 // Store the updated result through the lvalue. 629 StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified()); 630 631 // If this is a postinc, return the value read from memory, otherwise use the 632 // updated value. 633 return isPre ? IncVal : InVal; 634 } 635 636 637 //===----------------------------------------------------------------------===// 638 // LValue Expression Emission 639 //===----------------------------------------------------------------------===// 640 641 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 642 if (Ty->isVoidType()) 643 return RValue::get(0); 644 645 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) { 646 llvm::Type *EltTy = ConvertType(CTy->getElementType()); 647 llvm::Value *U = llvm::UndefValue::get(EltTy); 648 return RValue::getComplex(std::make_pair(U, U)); 649 } 650 651 // If this is a use of an undefined aggregate type, the aggregate must have an 652 // identifiable address. Just because the contents of the value are undefined 653 // doesn't mean that the address can't be taken and compared. 654 if (hasAggregateLLVMType(Ty)) { 655 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 656 return RValue::getAggregate(DestPtr); 657 } 658 659 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 660 } 661 662 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 663 const char *Name) { 664 ErrorUnsupported(E, Name); 665 return GetUndefRValue(E->getType()); 666 } 667 668 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 669 const char *Name) { 670 ErrorUnsupported(E, Name); 671 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 672 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); 673 } 674 675 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { 676 LValue LV = EmitLValue(E); 677 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 678 EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(), 679 E->getType(), LV.getAlignment()); 680 return LV; 681 } 682 683 /// EmitLValue - Emit code to compute a designator that specifies the location 684 /// of the expression. 685 /// 686 /// This can return one of two things: a simple address or a bitfield reference. 687 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 688 /// an LLVM pointer type. 689 /// 690 /// If this returns a bitfield reference, nothing about the pointee type of the 691 /// LLVM value is known: For example, it may not be a pointer to an integer. 692 /// 693 /// If this returns a normal address, and if the lvalue's C type is fixed size, 694 /// this method guarantees that the returned pointer type will point to an LLVM 695 /// type of the same size of the lvalue's type. If the lvalue has a variable 696 /// length type, this is not possible. 697 /// 698 LValue CodeGenFunction::EmitLValue(const Expr *E) { 699 switch (E->getStmtClass()) { 700 default: return EmitUnsupportedLValue(E, "l-value expression"); 701 702 case Expr::ObjCPropertyRefExprClass: 703 llvm_unreachable("cannot emit a property reference directly"); 704 705 case Expr::ObjCSelectorExprClass: 706 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 707 case Expr::ObjCIsaExprClass: 708 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 709 case Expr::BinaryOperatorClass: 710 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 711 case Expr::CompoundAssignOperatorClass: 712 if (!E->getType()->isAnyComplexType()) 713 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 714 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 715 case Expr::CallExprClass: 716 case Expr::CXXMemberCallExprClass: 717 case Expr::CXXOperatorCallExprClass: 718 case Expr::UserDefinedLiteralClass: 719 return EmitCallExprLValue(cast<CallExpr>(E)); 720 case Expr::VAArgExprClass: 721 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 722 case Expr::DeclRefExprClass: 723 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 724 case Expr::ParenExprClass: 725 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 726 case Expr::GenericSelectionExprClass: 727 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 728 case Expr::PredefinedExprClass: 729 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 730 case Expr::StringLiteralClass: 731 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 732 case Expr::ObjCEncodeExprClass: 733 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 734 case Expr::PseudoObjectExprClass: 735 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 736 case Expr::InitListExprClass: 737 return EmitInitListLValue(cast<InitListExpr>(E)); 738 case Expr::CXXTemporaryObjectExprClass: 739 case Expr::CXXConstructExprClass: 740 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 741 case Expr::CXXBindTemporaryExprClass: 742 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 743 case Expr::CXXUuidofExprClass: 744 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); 745 case Expr::LambdaExprClass: 746 return EmitLambdaLValue(cast<LambdaExpr>(E)); 747 748 case Expr::ExprWithCleanupsClass: { 749 const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E); 750 enterFullExpression(cleanups); 751 RunCleanupsScope Scope(*this); 752 return EmitLValue(cleanups->getSubExpr()); 753 } 754 755 case Expr::CXXScalarValueInitExprClass: 756 return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E)); 757 case Expr::CXXDefaultArgExprClass: 758 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 759 case Expr::CXXTypeidExprClass: 760 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 761 762 case Expr::ObjCMessageExprClass: 763 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 764 case Expr::ObjCIvarRefExprClass: 765 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 766 case Expr::StmtExprClass: 767 return EmitStmtExprLValue(cast<StmtExpr>(E)); 768 case Expr::UnaryOperatorClass: 769 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 770 case Expr::ArraySubscriptExprClass: 771 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 772 case Expr::ExtVectorElementExprClass: 773 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 774 case Expr::MemberExprClass: 775 return EmitMemberExpr(cast<MemberExpr>(E)); 776 case Expr::CompoundLiteralExprClass: 777 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 778 case Expr::ConditionalOperatorClass: 779 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 780 case Expr::BinaryConditionalOperatorClass: 781 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 782 case Expr::ChooseExprClass: 783 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); 784 case Expr::OpaqueValueExprClass: 785 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 786 case Expr::SubstNonTypeTemplateParmExprClass: 787 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 788 case Expr::ImplicitCastExprClass: 789 case Expr::CStyleCastExprClass: 790 case Expr::CXXFunctionalCastExprClass: 791 case Expr::CXXStaticCastExprClass: 792 case Expr::CXXDynamicCastExprClass: 793 case Expr::CXXReinterpretCastExprClass: 794 case Expr::CXXConstCastExprClass: 795 case Expr::ObjCBridgedCastExprClass: 796 return EmitCastLValue(cast<CastExpr>(E)); 797 798 case Expr::MaterializeTemporaryExprClass: 799 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 800 } 801 } 802 803 /// Given an object of the given canonical type, can we safely copy a 804 /// value out of it based on its initializer? 805 static bool isConstantEmittableObjectType(QualType type) { 806 assert(type.isCanonical()); 807 assert(!type->isReferenceType()); 808 809 // Must be const-qualified but non-volatile. 810 Qualifiers qs = type.getLocalQualifiers(); 811 if (!qs.hasConst() || qs.hasVolatile()) return false; 812 813 // Otherwise, all object types satisfy this except C++ classes with 814 // mutable subobjects or non-trivial copy/destroy behavior. 815 if (const RecordType *RT = dyn_cast<RecordType>(type)) 816 if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) 817 if (RD->hasMutableFields() || !RD->isTrivial()) 818 return false; 819 820 return true; 821 } 822 823 /// Can we constant-emit a load of a reference to a variable of the 824 /// given type? This is different from predicates like 825 /// Decl::isUsableInConstantExpressions because we do want it to apply 826 /// in situations that don't necessarily satisfy the language's rules 827 /// for this (e.g. C++'s ODR-use rules). For example, we want to able 828 /// to do this with const float variables even if those variables 829 /// aren't marked 'constexpr'. 830 enum ConstantEmissionKind { 831 CEK_None, 832 CEK_AsReferenceOnly, 833 CEK_AsValueOrReference, 834 CEK_AsValueOnly 835 }; 836 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { 837 type = type.getCanonicalType(); 838 if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) { 839 if (isConstantEmittableObjectType(ref->getPointeeType())) 840 return CEK_AsValueOrReference; 841 return CEK_AsReferenceOnly; 842 } 843 if (isConstantEmittableObjectType(type)) 844 return CEK_AsValueOnly; 845 return CEK_None; 846 } 847 848 /// Try to emit a reference to the given value without producing it as 849 /// an l-value. This is actually more than an optimization: we can't 850 /// produce an l-value for variables that we never actually captured 851 /// in a block or lambda, which means const int variables or constexpr 852 /// literals or similar. 853 CodeGenFunction::ConstantEmission 854 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { 855 ValueDecl *value = refExpr->getDecl(); 856 857 // The value needs to be an enum constant or a constant variable. 858 ConstantEmissionKind CEK; 859 if (isa<ParmVarDecl>(value)) { 860 CEK = CEK_None; 861 } else if (VarDecl *var = dyn_cast<VarDecl>(value)) { 862 CEK = checkVarTypeForConstantEmission(var->getType()); 863 } else if (isa<EnumConstantDecl>(value)) { 864 CEK = CEK_AsValueOnly; 865 } else { 866 CEK = CEK_None; 867 } 868 if (CEK == CEK_None) return ConstantEmission(); 869 870 Expr::EvalResult result; 871 bool resultIsReference; 872 QualType resultType; 873 874 // It's best to evaluate all the way as an r-value if that's permitted. 875 if (CEK != CEK_AsReferenceOnly && 876 refExpr->EvaluateAsRValue(result, getContext())) { 877 resultIsReference = false; 878 resultType = refExpr->getType(); 879 880 // Otherwise, try to evaluate as an l-value. 881 } else if (CEK != CEK_AsValueOnly && 882 refExpr->EvaluateAsLValue(result, getContext())) { 883 resultIsReference = true; 884 resultType = value->getType(); 885 886 // Failure. 887 } else { 888 return ConstantEmission(); 889 } 890 891 // In any case, if the initializer has side-effects, abandon ship. 892 if (result.HasSideEffects) 893 return ConstantEmission(); 894 895 // Emit as a constant. 896 llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this); 897 898 // Make sure we emit a debug reference to the global variable. 899 // This should probably fire even for 900 if (isa<VarDecl>(value)) { 901 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value))) 902 EmitDeclRefExprDbgValue(refExpr, C); 903 } else { 904 assert(isa<EnumConstantDecl>(value)); 905 EmitDeclRefExprDbgValue(refExpr, C); 906 } 907 908 // If we emitted a reference constant, we need to dereference that. 909 if (resultIsReference) 910 return ConstantEmission::forReference(C); 911 912 return ConstantEmission::forValue(C); 913 } 914 915 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) { 916 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 917 lvalue.getAlignment().getQuantity(), 918 lvalue.getType(), lvalue.getTBAAInfo()); 919 } 920 921 static bool hasBooleanRepresentation(QualType Ty) { 922 if (Ty->isBooleanType()) 923 return true; 924 925 if (const EnumType *ET = Ty->getAs<EnumType>()) 926 return ET->getDecl()->getIntegerType()->isBooleanType(); 927 928 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 929 return hasBooleanRepresentation(AT->getValueType()); 930 931 return false; 932 } 933 934 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, 935 llvm::APInt &Min, llvm::APInt &End, 936 bool StrictEnums) { 937 const EnumType *ET = Ty->getAs<EnumType>(); 938 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && 939 ET && !ET->getDecl()->isFixed(); 940 bool IsBool = hasBooleanRepresentation(Ty); 941 if (!IsBool && !IsRegularCPlusPlusEnum) 942 return false; 943 944 if (IsBool) { 945 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0); 946 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2); 947 } else { 948 const EnumDecl *ED = ET->getDecl(); 949 llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType()); 950 unsigned Bitwidth = LTy->getScalarSizeInBits(); 951 unsigned NumNegativeBits = ED->getNumNegativeBits(); 952 unsigned NumPositiveBits = ED->getNumPositiveBits(); 953 954 if (NumNegativeBits) { 955 unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); 956 assert(NumBits <= Bitwidth); 957 End = llvm::APInt(Bitwidth, 1) << (NumBits - 1); 958 Min = -End; 959 } else { 960 assert(NumPositiveBits <= Bitwidth); 961 End = llvm::APInt(Bitwidth, 1) << NumPositiveBits; 962 Min = llvm::APInt(Bitwidth, 0); 963 } 964 } 965 return true; 966 } 967 968 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { 969 llvm::APInt Min, End; 970 if (!getRangeForType(*this, Ty, Min, End, 971 CGM.getCodeGenOpts().StrictEnums)) 972 return 0; 973 974 llvm::MDBuilder MDHelper(getLLVMContext()); 975 return MDHelper.createRange(Min, End); 976 } 977 978 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 979 unsigned Alignment, QualType Ty, 980 llvm::MDNode *TBAAInfo) { 981 982 // For better performance, handle vector loads differently. 983 if (Ty->isVectorType()) { 984 llvm::Value *V; 985 const llvm::Type *EltTy = 986 cast<llvm::PointerType>(Addr->getType())->getElementType(); 987 988 const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy); 989 990 // Handle vectors of size 3, like size 4 for better performance. 991 if (VTy->getNumElements() == 3) { 992 993 // Bitcast to vec4 type. 994 llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(), 995 4); 996 llvm::PointerType *ptVec4Ty = 997 llvm::PointerType::get(vec4Ty, 998 (cast<llvm::PointerType>( 999 Addr->getType()))->getAddressSpace()); 1000 llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty, 1001 "castToVec4"); 1002 // Now load value. 1003 llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4"); 1004 1005 // Shuffle vector to get vec3. 1006 llvm::Constant *Mask[] = { 1007 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 0), 1008 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 1), 1009 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 2) 1010 }; 1011 1012 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1013 V = Builder.CreateShuffleVector(LoadVal, 1014 llvm::UndefValue::get(vec4Ty), 1015 MaskV, "extractVec"); 1016 return EmitFromMemory(V, Ty); 1017 } 1018 } 1019 1020 llvm::LoadInst *Load = Builder.CreateLoad(Addr); 1021 if (Volatile) 1022 Load->setVolatile(true); 1023 if (Alignment) 1024 Load->setAlignment(Alignment); 1025 if (TBAAInfo) 1026 CGM.DecorateInstruction(Load, TBAAInfo); 1027 // If this is an atomic type, all normal reads must be atomic 1028 if (Ty->isAtomicType()) 1029 Load->setAtomic(llvm::SequentiallyConsistent); 1030 1031 if ((getLangOpts().SanitizeBool && hasBooleanRepresentation(Ty)) || 1032 (getLangOpts().SanitizeEnum && Ty->getAs<EnumType>())) { 1033 llvm::APInt Min, End; 1034 if (getRangeForType(*this, Ty, Min, End, true)) { 1035 --End; 1036 llvm::Value *Check; 1037 if (!Min) 1038 Check = Builder.CreateICmpULE( 1039 Load, llvm::ConstantInt::get(getLLVMContext(), End)); 1040 else { 1041 llvm::Value *Upper = Builder.CreateICmpSLE( 1042 Load, llvm::ConstantInt::get(getLLVMContext(), End)); 1043 llvm::Value *Lower = Builder.CreateICmpSGE( 1044 Load, llvm::ConstantInt::get(getLLVMContext(), Min)); 1045 Check = Builder.CreateAnd(Upper, Lower); 1046 } 1047 // FIXME: Provide a SourceLocation. 1048 EmitCheck(Check, "load_invalid_value", EmitCheckTypeDescriptor(Ty), 1049 EmitCheckValue(Load), CRK_Recoverable); 1050 } 1051 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) 1052 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) 1053 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); 1054 1055 return EmitFromMemory(Load, Ty); 1056 } 1057 1058 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 1059 // Bool has a different representation in memory than in registers. 1060 if (hasBooleanRepresentation(Ty)) { 1061 // This should really always be an i1, but sometimes it's already 1062 // an i8, and it's awkward to track those cases down. 1063 if (Value->getType()->isIntegerTy(1)) 1064 return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool"); 1065 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1066 "wrong value rep of bool"); 1067 } 1068 1069 return Value; 1070 } 1071 1072 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 1073 // Bool has a different representation in memory than in registers. 1074 if (hasBooleanRepresentation(Ty)) { 1075 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1076 "wrong value rep of bool"); 1077 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 1078 } 1079 1080 return Value; 1081 } 1082 1083 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 1084 bool Volatile, unsigned Alignment, 1085 QualType Ty, 1086 llvm::MDNode *TBAAInfo, 1087 bool isInit) { 1088 1089 // Handle vectors differently to get better performance. 1090 if (Ty->isVectorType()) { 1091 llvm::Type *SrcTy = Value->getType(); 1092 llvm::VectorType *VecTy = cast<llvm::VectorType>(SrcTy); 1093 // Handle vec3 special. 1094 if (VecTy->getNumElements() == 3) { 1095 llvm::LLVMContext &VMContext = getLLVMContext(); 1096 1097 // Our source is a vec3, do a shuffle vector to make it a vec4. 1098 SmallVector<llvm::Constant*, 4> Mask; 1099 Mask.push_back(llvm::ConstantInt::get( 1100 llvm::Type::getInt32Ty(VMContext), 1101 0)); 1102 Mask.push_back(llvm::ConstantInt::get( 1103 llvm::Type::getInt32Ty(VMContext), 1104 1)); 1105 Mask.push_back(llvm::ConstantInt::get( 1106 llvm::Type::getInt32Ty(VMContext), 1107 2)); 1108 Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext))); 1109 1110 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1111 Value = Builder.CreateShuffleVector(Value, 1112 llvm::UndefValue::get(VecTy), 1113 MaskV, "extractVec"); 1114 SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4); 1115 } 1116 llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); 1117 if (DstPtr->getElementType() != SrcTy) { 1118 llvm::Type *MemTy = 1119 llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); 1120 Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); 1121 } 1122 } 1123 1124 Value = EmitToMemory(Value, Ty); 1125 1126 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 1127 if (Alignment) 1128 Store->setAlignment(Alignment); 1129 if (TBAAInfo) 1130 CGM.DecorateInstruction(Store, TBAAInfo); 1131 if (!isInit && Ty->isAtomicType()) 1132 Store->setAtomic(llvm::SequentiallyConsistent); 1133 } 1134 1135 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 1136 bool isInit) { 1137 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 1138 lvalue.getAlignment().getQuantity(), lvalue.getType(), 1139 lvalue.getTBAAInfo(), isInit); 1140 } 1141 1142 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 1143 /// method emits the address of the lvalue, then loads the result as an rvalue, 1144 /// returning the rvalue. 1145 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) { 1146 if (LV.isObjCWeak()) { 1147 // load of a __weak object. 1148 llvm::Value *AddrWeakObj = LV.getAddress(); 1149 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 1150 AddrWeakObj)); 1151 } 1152 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1153 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress()); 1154 Object = EmitObjCConsumeObject(LV.getType(), Object); 1155 return RValue::get(Object); 1156 } 1157 1158 if (LV.isSimple()) { 1159 assert(!LV.getType()->isFunctionType()); 1160 1161 // Everything needs a load. 1162 return RValue::get(EmitLoadOfScalar(LV)); 1163 } 1164 1165 if (LV.isVectorElt()) { 1166 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(), 1167 LV.isVolatileQualified()); 1168 Load->setAlignment(LV.getAlignment().getQuantity()); 1169 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), 1170 "vecext")); 1171 } 1172 1173 // If this is a reference to a subset of the elements of a vector, either 1174 // shuffle the input or extract/insert them as appropriate. 1175 if (LV.isExtVectorElt()) 1176 return EmitLoadOfExtVectorElementLValue(LV); 1177 1178 assert(LV.isBitField() && "Unknown LValue type!"); 1179 return EmitLoadOfBitfieldLValue(LV); 1180 } 1181 1182 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { 1183 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 1184 1185 // Get the output type. 1186 llvm::Type *ResLTy = ConvertType(LV.getType()); 1187 1188 llvm::Value *Ptr = LV.getBitFieldAddr(); 1189 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), 1190 "bf.load"); 1191 cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment); 1192 1193 if (Info.IsSigned) { 1194 assert((Info.Offset + Info.Size) <= Info.StorageSize); 1195 unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size; 1196 if (HighBits) 1197 Val = Builder.CreateShl(Val, HighBits, "bf.shl"); 1198 if (Info.Offset + HighBits) 1199 Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr"); 1200 } else { 1201 if (Info.Offset) 1202 Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr"); 1203 if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize) 1204 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize, 1205 Info.Size), 1206 "bf.clear"); 1207 } 1208 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); 1209 1210 return RValue::get(Val); 1211 } 1212 1213 // If this is a reference to a subset of the elements of a vector, create an 1214 // appropriate shufflevector. 1215 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 1216 llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(), 1217 LV.isVolatileQualified()); 1218 Load->setAlignment(LV.getAlignment().getQuantity()); 1219 llvm::Value *Vec = Load; 1220 1221 const llvm::Constant *Elts = LV.getExtVectorElts(); 1222 1223 // If the result of the expression is a non-vector type, we must be extracting 1224 // a single element. Just codegen as an extractelement. 1225 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 1226 if (!ExprVT) { 1227 unsigned InIdx = getAccessedFieldNo(0, Elts); 1228 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1229 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 1230 } 1231 1232 // Always use shuffle vector to try to retain the original program structure 1233 unsigned NumResultElts = ExprVT->getNumElements(); 1234 1235 SmallVector<llvm::Constant*, 4> Mask; 1236 for (unsigned i = 0; i != NumResultElts; ++i) 1237 Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts))); 1238 1239 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1240 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 1241 MaskV); 1242 return RValue::get(Vec); 1243 } 1244 1245 1246 1247 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 1248 /// lvalue, where both are guaranteed to the have the same type, and that type 1249 /// is 'Ty'. 1250 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) { 1251 if (!Dst.isSimple()) { 1252 if (Dst.isVectorElt()) { 1253 // Read/modify/write the vector, inserting the new element. 1254 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(), 1255 Dst.isVolatileQualified()); 1256 Load->setAlignment(Dst.getAlignment().getQuantity()); 1257 llvm::Value *Vec = Load; 1258 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 1259 Dst.getVectorIdx(), "vecins"); 1260 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(), 1261 Dst.isVolatileQualified()); 1262 Store->setAlignment(Dst.getAlignment().getQuantity()); 1263 return; 1264 } 1265 1266 // If this is an update of extended vector elements, insert them as 1267 // appropriate. 1268 if (Dst.isExtVectorElt()) 1269 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 1270 1271 assert(Dst.isBitField() && "Unknown LValue type"); 1272 return EmitStoreThroughBitfieldLValue(Src, Dst); 1273 } 1274 1275 // There's special magic for assigning into an ARC-qualified l-value. 1276 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 1277 switch (Lifetime) { 1278 case Qualifiers::OCL_None: 1279 llvm_unreachable("present but none"); 1280 1281 case Qualifiers::OCL_ExplicitNone: 1282 // nothing special 1283 break; 1284 1285 case Qualifiers::OCL_Strong: 1286 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 1287 return; 1288 1289 case Qualifiers::OCL_Weak: 1290 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 1291 return; 1292 1293 case Qualifiers::OCL_Autoreleasing: 1294 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 1295 Src.getScalarVal())); 1296 // fall into the normal path 1297 break; 1298 } 1299 } 1300 1301 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 1302 // load of a __weak object. 1303 llvm::Value *LvalueDst = Dst.getAddress(); 1304 llvm::Value *src = Src.getScalarVal(); 1305 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1306 return; 1307 } 1308 1309 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1310 // load of a __strong object. 1311 llvm::Value *LvalueDst = Dst.getAddress(); 1312 llvm::Value *src = Src.getScalarVal(); 1313 if (Dst.isObjCIvar()) { 1314 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1315 llvm::Type *ResultType = ConvertType(getContext().LongTy); 1316 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 1317 llvm::Value *dst = RHS; 1318 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1319 llvm::Value *LHS = 1320 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 1321 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 1322 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 1323 BytesBetween); 1324 } else if (Dst.isGlobalObjCRef()) { 1325 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 1326 Dst.isThreadLocalRef()); 1327 } 1328 else 1329 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 1330 return; 1331 } 1332 1333 assert(Src.isScalar() && "Can't emit an agg store with this method"); 1334 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 1335 } 1336 1337 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 1338 llvm::Value **Result) { 1339 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 1340 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 1341 llvm::Value *Ptr = Dst.getBitFieldAddr(); 1342 1343 // Get the source value, truncated to the width of the bit-field. 1344 llvm::Value *SrcVal = Src.getScalarVal(); 1345 1346 // Cast the source to the storage type and shift it into place. 1347 SrcVal = Builder.CreateIntCast(SrcVal, 1348 Ptr->getType()->getPointerElementType(), 1349 /*IsSigned=*/false); 1350 llvm::Value *MaskedVal = SrcVal; 1351 1352 // See if there are other bits in the bitfield's storage we'll need to load 1353 // and mask together with source before storing. 1354 if (Info.StorageSize != Info.Size) { 1355 assert(Info.StorageSize > Info.Size && "Invalid bitfield size."); 1356 llvm::Value *Val = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), 1357 "bf.load"); 1358 cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment); 1359 1360 // Mask the source value as needed. 1361 if (!hasBooleanRepresentation(Dst.getType())) 1362 SrcVal = Builder.CreateAnd(SrcVal, 1363 llvm::APInt::getLowBitsSet(Info.StorageSize, 1364 Info.Size), 1365 "bf.value"); 1366 MaskedVal = SrcVal; 1367 if (Info.Offset) 1368 SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl"); 1369 1370 // Mask out the original value. 1371 Val = Builder.CreateAnd(Val, 1372 ~llvm::APInt::getBitsSet(Info.StorageSize, 1373 Info.Offset, 1374 Info.Offset + Info.Size), 1375 "bf.clear"); 1376 1377 // Or together the unchanged values and the source value. 1378 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); 1379 } else { 1380 assert(Info.Offset == 0); 1381 } 1382 1383 // Write the new value back out. 1384 llvm::StoreInst *Store = Builder.CreateStore(SrcVal, Ptr, 1385 Dst.isVolatileQualified()); 1386 Store->setAlignment(Info.StorageAlignment); 1387 1388 // Return the new value of the bit-field, if requested. 1389 if (Result) { 1390 llvm::Value *ResultVal = MaskedVal; 1391 1392 // Sign extend the value if needed. 1393 if (Info.IsSigned) { 1394 assert(Info.Size <= Info.StorageSize); 1395 unsigned HighBits = Info.StorageSize - Info.Size; 1396 if (HighBits) { 1397 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); 1398 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); 1399 } 1400 } 1401 1402 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, 1403 "bf.result.cast"); 1404 *Result = EmitFromMemory(ResultVal, Dst.getType()); 1405 } 1406 } 1407 1408 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1409 LValue Dst) { 1410 // This access turns into a read/modify/write of the vector. Load the input 1411 // value now. 1412 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(), 1413 Dst.isVolatileQualified()); 1414 Load->setAlignment(Dst.getAlignment().getQuantity()); 1415 llvm::Value *Vec = Load; 1416 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1417 1418 llvm::Value *SrcVal = Src.getScalarVal(); 1419 1420 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 1421 unsigned NumSrcElts = VTy->getNumElements(); 1422 unsigned NumDstElts = 1423 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1424 if (NumDstElts == NumSrcElts) { 1425 // Use shuffle vector is the src and destination are the same number of 1426 // elements and restore the vector mask since it is on the side it will be 1427 // stored. 1428 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1429 for (unsigned i = 0; i != NumSrcElts; ++i) 1430 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i); 1431 1432 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1433 Vec = Builder.CreateShuffleVector(SrcVal, 1434 llvm::UndefValue::get(Vec->getType()), 1435 MaskV); 1436 } else if (NumDstElts > NumSrcElts) { 1437 // Extended the source vector to the same length and then shuffle it 1438 // into the destination. 1439 // FIXME: since we're shuffling with undef, can we just use the indices 1440 // into that? This could be simpler. 1441 SmallVector<llvm::Constant*, 4> ExtMask; 1442 for (unsigned i = 0; i != NumSrcElts; ++i) 1443 ExtMask.push_back(Builder.getInt32(i)); 1444 ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty)); 1445 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 1446 llvm::Value *ExtSrcVal = 1447 Builder.CreateShuffleVector(SrcVal, 1448 llvm::UndefValue::get(SrcVal->getType()), 1449 ExtMaskV); 1450 // build identity 1451 SmallVector<llvm::Constant*, 4> Mask; 1452 for (unsigned i = 0; i != NumDstElts; ++i) 1453 Mask.push_back(Builder.getInt32(i)); 1454 1455 // modify when what gets shuffled in 1456 for (unsigned i = 0; i != NumSrcElts; ++i) 1457 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts); 1458 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1459 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 1460 } else { 1461 // We should never shorten the vector 1462 llvm_unreachable("unexpected shorten vector length"); 1463 } 1464 } else { 1465 // If the Src is a scalar (not a vector) it must be updating one element. 1466 unsigned InIdx = getAccessedFieldNo(0, Elts); 1467 llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); 1468 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 1469 } 1470 1471 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(), 1472 Dst.isVolatileQualified()); 1473 Store->setAlignment(Dst.getAlignment().getQuantity()); 1474 } 1475 1476 // setObjCGCLValueClass - sets class of he lvalue for the purpose of 1477 // generating write-barries API. It is currently a global, ivar, 1478 // or neither. 1479 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1480 LValue &LV, 1481 bool IsMemberAccess=false) { 1482 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) 1483 return; 1484 1485 if (isa<ObjCIvarRefExpr>(E)) { 1486 QualType ExpTy = E->getType(); 1487 if (IsMemberAccess && ExpTy->isPointerType()) { 1488 // If ivar is a structure pointer, assigning to field of 1489 // this struct follows gcc's behavior and makes it a non-ivar 1490 // writer-barrier conservatively. 1491 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1492 if (ExpTy->isRecordType()) { 1493 LV.setObjCIvar(false); 1494 return; 1495 } 1496 } 1497 LV.setObjCIvar(true); 1498 ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E)); 1499 LV.setBaseIvarExp(Exp->getBase()); 1500 LV.setObjCArray(E->getType()->isArrayType()); 1501 return; 1502 } 1503 1504 if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) { 1505 if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1506 if (VD->hasGlobalStorage()) { 1507 LV.setGlobalObjCRef(true); 1508 LV.setThreadLocalRef(VD->isThreadSpecified()); 1509 } 1510 } 1511 LV.setObjCArray(E->getType()->isArrayType()); 1512 return; 1513 } 1514 1515 if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) { 1516 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1517 return; 1518 } 1519 1520 if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) { 1521 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1522 if (LV.isObjCIvar()) { 1523 // If cast is to a structure pointer, follow gcc's behavior and make it 1524 // a non-ivar write-barrier. 1525 QualType ExpTy = E->getType(); 1526 if (ExpTy->isPointerType()) 1527 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1528 if (ExpTy->isRecordType()) 1529 LV.setObjCIvar(false); 1530 } 1531 return; 1532 } 1533 1534 if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) { 1535 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 1536 return; 1537 } 1538 1539 if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1540 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1541 return; 1542 } 1543 1544 if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) { 1545 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1546 return; 1547 } 1548 1549 if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 1550 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1551 return; 1552 } 1553 1554 if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1555 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1556 if (LV.isObjCIvar() && !LV.isObjCArray()) 1557 // Using array syntax to assigning to what an ivar points to is not 1558 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1559 LV.setObjCIvar(false); 1560 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1561 // Using array syntax to assigning to what global points to is not 1562 // same as assigning to the global itself. {id *G;} G[i] = 0; 1563 LV.setGlobalObjCRef(false); 1564 return; 1565 } 1566 1567 if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) { 1568 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 1569 // We don't know if member is an 'ivar', but this flag is looked at 1570 // only in the context of LV.isObjCIvar(). 1571 LV.setObjCArray(E->getType()->isArrayType()); 1572 return; 1573 } 1574 } 1575 1576 static llvm::Value * 1577 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 1578 llvm::Value *V, llvm::Type *IRType, 1579 StringRef Name = StringRef()) { 1580 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1581 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 1582 } 1583 1584 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1585 const Expr *E, const VarDecl *VD) { 1586 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1587 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 1588 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 1589 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 1590 QualType T = E->getType(); 1591 LValue LV; 1592 if (VD->getType()->isReferenceType()) { 1593 llvm::LoadInst *LI = CGF.Builder.CreateLoad(V); 1594 LI->setAlignment(Alignment.getQuantity()); 1595 V = LI; 1596 LV = CGF.MakeNaturalAlignAddrLValue(V, T); 1597 } else { 1598 LV = CGF.MakeAddrLValue(V, E->getType(), Alignment); 1599 } 1600 setObjCGCLValueClass(CGF.getContext(), E, LV); 1601 return LV; 1602 } 1603 1604 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1605 const Expr *E, const FunctionDecl *FD) { 1606 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); 1607 if (!FD->hasPrototype()) { 1608 if (const FunctionProtoType *Proto = 1609 FD->getType()->getAs<FunctionProtoType>()) { 1610 // Ugly case: for a K&R-style definition, the type of the definition 1611 // isn't the same as the type of a use. Correct for this with a 1612 // bitcast. 1613 QualType NoProtoType = 1614 CGF.getContext().getFunctionNoProtoType(Proto->getResultType()); 1615 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1616 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); 1617 } 1618 } 1619 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 1620 return CGF.MakeAddrLValue(V, E->getType(), Alignment); 1621 } 1622 1623 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1624 const NamedDecl *ND = E->getDecl(); 1625 CharUnits Alignment = getContext().getDeclAlign(ND); 1626 QualType T = E->getType(); 1627 1628 // A DeclRefExpr for a reference initialized by a constant expression can 1629 // appear without being odr-used. Directly emit the constant initializer. 1630 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1631 const Expr *Init = VD->getAnyInitializer(VD); 1632 if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() && 1633 VD->isUsableInConstantExpressions(getContext()) && 1634 VD->checkInitIsICE()) { 1635 llvm::Constant *Val = 1636 CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this); 1637 assert(Val && "failed to emit reference constant expression"); 1638 // FIXME: Eventually we will want to emit vector element references. 1639 return MakeAddrLValue(Val, T, Alignment); 1640 } 1641 } 1642 1643 // FIXME: We should be able to assert this for FunctionDecls as well! 1644 // FIXME: We should be able to assert this for all DeclRefExprs, not just 1645 // those with a valid source location. 1646 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || 1647 !E->getLocation().isValid()) && 1648 "Should not use decl without marking it used!"); 1649 1650 if (ND->hasAttr<WeakRefAttr>()) { 1651 const ValueDecl *VD = cast<ValueDecl>(ND); 1652 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1653 return MakeAddrLValue(Aliasee, T, Alignment); 1654 } 1655 1656 if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) { 1657 // Check if this is a global variable. 1658 if (VD->hasLinkage() || VD->isStaticDataMember()) 1659 return EmitGlobalVarDeclLValue(*this, E, VD); 1660 1661 bool isBlockVariable = VD->hasAttr<BlocksAttr>(); 1662 1663 bool NonGCable = VD->hasLocalStorage() && 1664 !VD->getType()->isReferenceType() && 1665 !isBlockVariable; 1666 1667 llvm::Value *V = LocalDeclMap.lookup(VD); 1668 if (!V && VD->isStaticLocal()) 1669 V = CGM.getStaticLocalDeclAddress(VD); 1670 1671 // Use special handling for lambdas. 1672 if (!V) { 1673 if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) { 1674 QualType LambdaTagType = getContext().getTagDeclType(FD->getParent()); 1675 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, 1676 LambdaTagType); 1677 return EmitLValueForField(LambdaLV, FD); 1678 } 1679 1680 assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal()); 1681 return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable), 1682 T, Alignment); 1683 } 1684 1685 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1686 1687 if (isBlockVariable) 1688 V = BuildBlockByrefAddress(V, VD); 1689 1690 LValue LV; 1691 if (VD->getType()->isReferenceType()) { 1692 llvm::LoadInst *LI = Builder.CreateLoad(V); 1693 LI->setAlignment(Alignment.getQuantity()); 1694 V = LI; 1695 LV = MakeNaturalAlignAddrLValue(V, T); 1696 } else { 1697 LV = MakeAddrLValue(V, T, Alignment); 1698 } 1699 1700 if (NonGCable) { 1701 LV.getQuals().removeObjCGCAttr(); 1702 LV.setNonGC(true); 1703 } 1704 setObjCGCLValueClass(getContext(), E, LV); 1705 return LV; 1706 } 1707 1708 if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND)) 1709 return EmitFunctionDeclLValue(*this, E, fn); 1710 1711 llvm_unreachable("Unhandled DeclRefExpr"); 1712 } 1713 1714 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 1715 // __extension__ doesn't affect lvalue-ness. 1716 if (E->getOpcode() == UO_Extension) 1717 return EmitLValue(E->getSubExpr()); 1718 1719 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 1720 switch (E->getOpcode()) { 1721 default: llvm_unreachable("Unknown unary operator lvalue!"); 1722 case UO_Deref: { 1723 QualType T = E->getSubExpr()->getType()->getPointeeType(); 1724 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 1725 1726 LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T); 1727 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 1728 1729 // We should not generate __weak write barrier on indirect reference 1730 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 1731 // But, we continue to generate __strong write barrier on indirect write 1732 // into a pointer to object. 1733 if (getLangOpts().ObjC1 && 1734 getLangOpts().getGC() != LangOptions::NonGC && 1735 LV.isObjCWeak()) 1736 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 1737 return LV; 1738 } 1739 case UO_Real: 1740 case UO_Imag: { 1741 LValue LV = EmitLValue(E->getSubExpr()); 1742 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 1743 llvm::Value *Addr = LV.getAddress(); 1744 1745 // __real is valid on scalars. This is a faster way of testing that. 1746 // __imag can only produce an rvalue on scalars. 1747 if (E->getOpcode() == UO_Real && 1748 !cast<llvm::PointerType>(Addr->getType()) 1749 ->getElementType()->isStructTy()) { 1750 assert(E->getSubExpr()->getType()->isArithmeticType()); 1751 return LV; 1752 } 1753 1754 assert(E->getSubExpr()->getType()->isAnyComplexType()); 1755 1756 unsigned Idx = E->getOpcode() == UO_Imag; 1757 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(), 1758 Idx, "idx"), 1759 ExprTy); 1760 } 1761 case UO_PreInc: 1762 case UO_PreDec: { 1763 LValue LV = EmitLValue(E->getSubExpr()); 1764 bool isInc = E->getOpcode() == UO_PreInc; 1765 1766 if (E->getType()->isAnyComplexType()) 1767 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 1768 else 1769 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 1770 return LV; 1771 } 1772 } 1773 } 1774 1775 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 1776 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 1777 E->getType()); 1778 } 1779 1780 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 1781 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 1782 E->getType()); 1783 } 1784 1785 static llvm::Constant* 1786 GetAddrOfConstantWideString(StringRef Str, 1787 const char *GlobalName, 1788 ASTContext &Context, 1789 QualType Ty, SourceLocation Loc, 1790 CodeGenModule &CGM) { 1791 1792 StringLiteral *SL = StringLiteral::Create(Context, 1793 Str, 1794 StringLiteral::Wide, 1795 /*Pascal = */false, 1796 Ty, Loc); 1797 llvm::Constant *C = CGM.GetConstantArrayFromStringLiteral(SL); 1798 llvm::GlobalVariable *GV = 1799 new llvm::GlobalVariable(CGM.getModule(), C->getType(), 1800 !CGM.getLangOpts().WritableStrings, 1801 llvm::GlobalValue::PrivateLinkage, 1802 C, GlobalName); 1803 const unsigned WideAlignment = 1804 Context.getTypeAlignInChars(Ty).getQuantity(); 1805 GV->setAlignment(WideAlignment); 1806 return GV; 1807 } 1808 1809 static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source, 1810 SmallString<32>& Target) { 1811 Target.resize(CharByteWidth * (Source.size() + 1)); 1812 char *ResultPtr = &Target[0]; 1813 const UTF8 *ErrorPtr; 1814 bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr); 1815 (void)success; 1816 assert(success); 1817 Target.resize(ResultPtr - &Target[0]); 1818 } 1819 1820 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 1821 switch (E->getIdentType()) { 1822 default: 1823 return EmitUnsupportedLValue(E, "predefined expression"); 1824 1825 case PredefinedExpr::Func: 1826 case PredefinedExpr::Function: 1827 case PredefinedExpr::LFunction: 1828 case PredefinedExpr::PrettyFunction: { 1829 unsigned IdentType = E->getIdentType(); 1830 std::string GlobalVarName; 1831 1832 switch (IdentType) { 1833 default: llvm_unreachable("Invalid type"); 1834 case PredefinedExpr::Func: 1835 GlobalVarName = "__func__."; 1836 break; 1837 case PredefinedExpr::Function: 1838 GlobalVarName = "__FUNCTION__."; 1839 break; 1840 case PredefinedExpr::LFunction: 1841 GlobalVarName = "L__FUNCTION__."; 1842 break; 1843 case PredefinedExpr::PrettyFunction: 1844 GlobalVarName = "__PRETTY_FUNCTION__."; 1845 break; 1846 } 1847 1848 StringRef FnName = CurFn->getName(); 1849 if (FnName.startswith("\01")) 1850 FnName = FnName.substr(1); 1851 GlobalVarName += FnName; 1852 1853 const Decl *CurDecl = CurCodeDecl; 1854 if (CurDecl == 0) 1855 CurDecl = getContext().getTranslationUnitDecl(); 1856 1857 std::string FunctionName = 1858 (isa<BlockDecl>(CurDecl) 1859 ? FnName.str() 1860 : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType, 1861 CurDecl)); 1862 1863 const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual(); 1864 llvm::Constant *C; 1865 if (ElemType->isWideCharType()) { 1866 SmallString<32> RawChars; 1867 ConvertUTF8ToWideString( 1868 getContext().getTypeSizeInChars(ElemType).getQuantity(), 1869 FunctionName, RawChars); 1870 C = GetAddrOfConstantWideString(RawChars, 1871 GlobalVarName.c_str(), 1872 getContext(), 1873 E->getType(), 1874 E->getLocation(), 1875 CGM); 1876 } else { 1877 C = CGM.GetAddrOfConstantCString(FunctionName, 1878 GlobalVarName.c_str(), 1879 1); 1880 } 1881 return MakeAddrLValue(C, E->getType()); 1882 } 1883 } 1884 } 1885 1886 /// Emit a type description suitable for use by a runtime sanitizer library. The 1887 /// format of a type descriptor is 1888 /// 1889 /// \code 1890 /// { i16 TypeKind, i16 TypeInfo } 1891 /// \endcode 1892 /// 1893 /// followed by an array of i8 containing the type name. TypeKind is 0 for an 1894 /// integer, 1 for a floating point value, and -1 for anything else. 1895 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { 1896 // FIXME: Only emit each type's descriptor once. 1897 uint16_t TypeKind = -1; 1898 uint16_t TypeInfo = 0; 1899 1900 if (T->isIntegerType()) { 1901 TypeKind = 0; 1902 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | 1903 (T->isSignedIntegerType() ? 1 : 0); 1904 } else if (T->isFloatingType()) { 1905 TypeKind = 1; 1906 TypeInfo = getContext().getTypeSize(T); 1907 } 1908 1909 // Format the type name as if for a diagnostic, including quotes and 1910 // optionally an 'aka'. 1911 SmallString<32> Buffer; 1912 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, 1913 (intptr_t)T.getAsOpaquePtr(), 1914 0, 0, 0, 0, 0, 0, Buffer, 1915 ArrayRef<intptr_t>()); 1916 1917 llvm::Constant *Components[] = { 1918 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), 1919 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) 1920 }; 1921 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); 1922 1923 llvm::GlobalVariable *GV = 1924 new llvm::GlobalVariable(CGM.getModule(), Descriptor->getType(), 1925 /*isConstant=*/true, 1926 llvm::GlobalVariable::PrivateLinkage, 1927 Descriptor); 1928 GV->setUnnamedAddr(true); 1929 return GV; 1930 } 1931 1932 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { 1933 llvm::Type *TargetTy = IntPtrTy; 1934 1935 // Integers which fit in intptr_t are zero-extended and passed directly. 1936 if (V->getType()->isIntegerTy() && 1937 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) 1938 return Builder.CreateZExt(V, TargetTy); 1939 1940 // Pointers are passed directly, everything else is passed by address. 1941 if (!V->getType()->isPointerTy()) { 1942 llvm::Value *Ptr = Builder.CreateAlloca(V->getType()); 1943 Builder.CreateStore(V, Ptr); 1944 V = Ptr; 1945 } 1946 return Builder.CreatePtrToInt(V, TargetTy); 1947 } 1948 1949 /// \brief Emit a representation of a SourceLocation for passing to a handler 1950 /// in a sanitizer runtime library. The format for this data is: 1951 /// \code 1952 /// struct SourceLocation { 1953 /// const char *Filename; 1954 /// int32_t Line, Column; 1955 /// }; 1956 /// \endcode 1957 /// For an invalid SourceLocation, the Filename pointer is null. 1958 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { 1959 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); 1960 1961 llvm::Constant *Data[] = { 1962 // FIXME: Only emit each file name once. 1963 PLoc.isValid() ? cast<llvm::Constant>( 1964 Builder.CreateGlobalStringPtr(PLoc.getFilename())) 1965 : llvm::Constant::getNullValue(Int8PtrTy), 1966 Builder.getInt32(PLoc.getLine()), 1967 Builder.getInt32(PLoc.getColumn()) 1968 }; 1969 1970 return llvm::ConstantStruct::getAnon(Data); 1971 } 1972 1973 void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName, 1974 ArrayRef<llvm::Constant *> StaticArgs, 1975 ArrayRef<llvm::Value *> DynamicArgs, 1976 CheckRecoverableKind RecoverKind) { 1977 llvm::BasicBlock *Cont = createBasicBlock("cont"); 1978 1979 llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName); 1980 1981 llvm::Instruction *Branch = Builder.CreateCondBr(Checked, Cont, Handler); 1982 1983 // Give hint that we very much don't expect to execute the handler 1984 // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp 1985 llvm::MDBuilder MDHelper(getLLVMContext()); 1986 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); 1987 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); 1988 1989 EmitBlock(Handler); 1990 1991 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 1992 llvm::GlobalValue *InfoPtr = 1993 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, 1994 llvm::GlobalVariable::PrivateLinkage, Info); 1995 InfoPtr->setUnnamedAddr(true); 1996 1997 SmallVector<llvm::Value *, 4> Args; 1998 SmallVector<llvm::Type *, 4> ArgTypes; 1999 Args.reserve(DynamicArgs.size() + 1); 2000 ArgTypes.reserve(DynamicArgs.size() + 1); 2001 2002 // Handler functions take an i8* pointing to the (handler-specific) static 2003 // information block, followed by a sequence of intptr_t arguments 2004 // representing operand values. 2005 Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy)); 2006 ArgTypes.push_back(Int8PtrTy); 2007 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { 2008 Args.push_back(EmitCheckValue(DynamicArgs[i])); 2009 ArgTypes.push_back(IntPtrTy); 2010 } 2011 2012 bool Recover = (RecoverKind == CRK_AlwaysRecoverable) || 2013 ((RecoverKind == CRK_Recoverable) && 2014 CGM.getCodeGenOpts().SanitizeRecover); 2015 2016 llvm::FunctionType *FnType = 2017 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); 2018 llvm::AttrBuilder B; 2019 if (!Recover) { 2020 B.addAttribute(llvm::Attribute::NoReturn) 2021 .addAttribute(llvm::Attribute::NoUnwind); 2022 } 2023 B.addAttribute(llvm::Attribute::UWTable); 2024 2025 // Checks that have two variants use a suffix to differentiate them 2026 bool NeedsAbortSuffix = (RecoverKind != CRK_Unrecoverable) && 2027 !CGM.getCodeGenOpts().SanitizeRecover; 2028 std::string FunctionName = ("__ubsan_handle_" + CheckName + 2029 (NeedsAbortSuffix? "_abort" : "")).str(); 2030 llvm::Value *Fn = 2031 CGM.CreateRuntimeFunction(FnType, FunctionName, 2032 llvm::Attribute::get(getLLVMContext(), B)); 2033 llvm::CallInst *HandlerCall = Builder.CreateCall(Fn, Args); 2034 if (Recover) { 2035 Builder.CreateBr(Cont); 2036 } else { 2037 HandlerCall->setDoesNotReturn(); 2038 HandlerCall->setDoesNotThrow(); 2039 Builder.CreateUnreachable(); 2040 } 2041 2042 EmitBlock(Cont); 2043 } 2044 2045 void CodeGenFunction::EmitTrapvCheck(llvm::Value *Checked) { 2046 llvm::BasicBlock *Cont = createBasicBlock("cont"); 2047 2048 // If we're optimizing, collapse all calls to trap down to just one per 2049 // function to save on code size. 2050 if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) { 2051 TrapBB = createBasicBlock("trap"); 2052 Builder.CreateCondBr(Checked, Cont, TrapBB); 2053 EmitBlock(TrapBB); 2054 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); 2055 llvm::CallInst *TrapCall = Builder.CreateCall(F); 2056 TrapCall->setDoesNotReturn(); 2057 TrapCall->setDoesNotThrow(); 2058 Builder.CreateUnreachable(); 2059 } else { 2060 Builder.CreateCondBr(Checked, Cont, TrapBB); 2061 } 2062 2063 EmitBlock(Cont); 2064 } 2065 2066 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 2067 /// array to pointer, return the array subexpression. 2068 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 2069 // If this isn't just an array->pointer decay, bail out. 2070 const CastExpr *CE = dyn_cast<CastExpr>(E); 2071 if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay) 2072 return 0; 2073 2074 // If this is a decay from variable width array, bail out. 2075 const Expr *SubExpr = CE->getSubExpr(); 2076 if (SubExpr->getType()->isVariableArrayType()) 2077 return 0; 2078 2079 return SubExpr; 2080 } 2081 2082 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { 2083 // The index must always be an integer, which is not an aggregate. Emit it. 2084 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 2085 QualType IdxTy = E->getIdx()->getType(); 2086 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 2087 2088 // If the base is a vector type, then we are forming a vector element lvalue 2089 // with this subscript. 2090 if (E->getBase()->getType()->isVectorType()) { 2091 // Emit the vector as an lvalue to get its address. 2092 LValue LHS = EmitLValue(E->getBase()); 2093 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 2094 Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx"); 2095 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 2096 E->getBase()->getType(), LHS.getAlignment()); 2097 } 2098 2099 // Extend or truncate the index type to 32 or 64-bits. 2100 if (Idx->getType() != IntPtrTy) 2101 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 2102 2103 // We know that the pointer points to a type of the correct size, unless the 2104 // size is a VLA or Objective-C interface. 2105 llvm::Value *Address = 0; 2106 CharUnits ArrayAlignment; 2107 if (const VariableArrayType *vla = 2108 getContext().getAsVariableArrayType(E->getType())) { 2109 // The base must be a pointer, which is not an aggregate. Emit 2110 // it. It needs to be emitted first in case it's what captures 2111 // the VLA bounds. 2112 Address = EmitScalarExpr(E->getBase()); 2113 2114 // The element count here is the total number of non-VLA elements. 2115 llvm::Value *numElements = getVLASize(vla).first; 2116 2117 // Effectively, the multiply by the VLA size is part of the GEP. 2118 // GEP indexes are signed, and scaling an index isn't permitted to 2119 // signed-overflow, so we use the same semantics for our explicit 2120 // multiply. We suppress this if overflow is not undefined behavior. 2121 if (getLangOpts().isSignedOverflowDefined()) { 2122 Idx = Builder.CreateMul(Idx, numElements); 2123 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 2124 } else { 2125 Idx = Builder.CreateNSWMul(Idx, numElements); 2126 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); 2127 } 2128 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 2129 // Indexing over an interface, as in "NSString *P; P[4];" 2130 llvm::Value *InterfaceSize = 2131 llvm::ConstantInt::get(Idx->getType(), 2132 getContext().getTypeSizeInChars(OIT).getQuantity()); 2133 2134 Idx = Builder.CreateMul(Idx, InterfaceSize); 2135 2136 // The base must be a pointer, which is not an aggregate. Emit it. 2137 llvm::Value *Base = EmitScalarExpr(E->getBase()); 2138 Address = EmitCastToVoidPtr(Base); 2139 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 2140 Address = Builder.CreateBitCast(Address, Base->getType()); 2141 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 2142 // If this is A[i] where A is an array, the frontend will have decayed the 2143 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 2144 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 2145 // "gep x, i" here. Emit one "gep A, 0, i". 2146 assert(Array->getType()->isArrayType() && 2147 "Array to pointer decay must have array source type!"); 2148 LValue ArrayLV = EmitLValue(Array); 2149 llvm::Value *ArrayPtr = ArrayLV.getAddress(); 2150 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 2151 llvm::Value *Args[] = { Zero, Idx }; 2152 2153 // Propagate the alignment from the array itself to the result. 2154 ArrayAlignment = ArrayLV.getAlignment(); 2155 2156 if (getLangOpts().isSignedOverflowDefined()) 2157 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); 2158 else 2159 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); 2160 } else { 2161 // The base must be a pointer, which is not an aggregate. Emit it. 2162 llvm::Value *Base = EmitScalarExpr(E->getBase()); 2163 if (getLangOpts().isSignedOverflowDefined()) 2164 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 2165 else 2166 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 2167 } 2168 2169 QualType T = E->getBase()->getType()->getPointeeType(); 2170 assert(!T.isNull() && 2171 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 2172 2173 2174 // Limit the alignment to that of the result type. 2175 LValue LV; 2176 if (!ArrayAlignment.isZero()) { 2177 CharUnits Align = getContext().getTypeAlignInChars(T); 2178 ArrayAlignment = std::min(Align, ArrayAlignment); 2179 LV = MakeAddrLValue(Address, T, ArrayAlignment); 2180 } else { 2181 LV = MakeNaturalAlignAddrLValue(Address, T); 2182 } 2183 2184 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace()); 2185 2186 if (getLangOpts().ObjC1 && 2187 getLangOpts().getGC() != LangOptions::NonGC) { 2188 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 2189 setObjCGCLValueClass(getContext(), E, LV); 2190 } 2191 return LV; 2192 } 2193 2194 static 2195 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder, 2196 SmallVector<unsigned, 4> &Elts) { 2197 SmallVector<llvm::Constant*, 4> CElts; 2198 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 2199 CElts.push_back(Builder.getInt32(Elts[i])); 2200 2201 return llvm::ConstantVector::get(CElts); 2202 } 2203 2204 LValue CodeGenFunction:: 2205 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 2206 // Emit the base vector as an l-value. 2207 LValue Base; 2208 2209 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 2210 if (E->isArrow()) { 2211 // If it is a pointer to a vector, emit the address and form an lvalue with 2212 // it. 2213 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 2214 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 2215 Base = MakeAddrLValue(Ptr, PT->getPointeeType()); 2216 Base.getQuals().removeObjCGCAttr(); 2217 } else if (E->getBase()->isGLValue()) { 2218 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 2219 // emit the base as an lvalue. 2220 assert(E->getBase()->getType()->isVectorType()); 2221 Base = EmitLValue(E->getBase()); 2222 } else { 2223 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 2224 assert(E->getBase()->getType()->isVectorType() && 2225 "Result must be a vector"); 2226 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 2227 2228 // Store the vector to memory (because LValue wants an address). 2229 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 2230 Builder.CreateStore(Vec, VecMem); 2231 Base = MakeAddrLValue(VecMem, E->getBase()->getType()); 2232 } 2233 2234 QualType type = 2235 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 2236 2237 // Encode the element access list into a vector of unsigned indices. 2238 SmallVector<unsigned, 4> Indices; 2239 E->getEncodedElementAccess(Indices); 2240 2241 if (Base.isSimple()) { 2242 llvm::Constant *CV = GenerateConstantVector(Builder, Indices); 2243 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type, 2244 Base.getAlignment()); 2245 } 2246 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 2247 2248 llvm::Constant *BaseElts = Base.getExtVectorElts(); 2249 SmallVector<llvm::Constant *, 4> CElts; 2250 2251 for (unsigned i = 0, e = Indices.size(); i != e; ++i) 2252 CElts.push_back(BaseElts->getAggregateElement(Indices[i])); 2253 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 2254 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type, 2255 Base.getAlignment()); 2256 } 2257 2258 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 2259 Expr *BaseExpr = E->getBase(); 2260 2261 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2262 LValue BaseLV; 2263 if (E->isArrow()) { 2264 llvm::Value *Ptr = EmitScalarExpr(BaseExpr); 2265 QualType PtrTy = BaseExpr->getType()->getPointeeType(); 2266 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy); 2267 BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy); 2268 } else 2269 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); 2270 2271 NamedDecl *ND = E->getMemberDecl(); 2272 if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) { 2273 LValue LV = EmitLValueForField(BaseLV, Field); 2274 setObjCGCLValueClass(getContext(), E, LV); 2275 return LV; 2276 } 2277 2278 if (VarDecl *VD = dyn_cast<VarDecl>(ND)) 2279 return EmitGlobalVarDeclLValue(*this, E, VD); 2280 2281 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) 2282 return EmitFunctionDeclLValue(*this, E, FD); 2283 2284 llvm_unreachable("Unhandled member declaration!"); 2285 } 2286 2287 LValue CodeGenFunction::EmitLValueForField(LValue base, 2288 const FieldDecl *field) { 2289 if (field->isBitField()) { 2290 const CGRecordLayout &RL = 2291 CGM.getTypes().getCGRecordLayout(field->getParent()); 2292 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); 2293 llvm::Value *Addr = base.getAddress(); 2294 unsigned Idx = RL.getLLVMFieldNo(field); 2295 if (Idx != 0) 2296 // For structs, we GEP to the field that the record layout suggests. 2297 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); 2298 // Get the access type. 2299 llvm::Type *PtrTy = llvm::Type::getIntNPtrTy( 2300 getLLVMContext(), Info.StorageSize, 2301 CGM.getContext().getTargetAddressSpace(base.getType())); 2302 if (Addr->getType() != PtrTy) 2303 Addr = Builder.CreateBitCast(Addr, PtrTy); 2304 2305 QualType fieldType = 2306 field->getType().withCVRQualifiers(base.getVRQualifiers()); 2307 return LValue::MakeBitfield(Addr, Info, fieldType, base.getAlignment()); 2308 } 2309 2310 const RecordDecl *rec = field->getParent(); 2311 QualType type = field->getType(); 2312 CharUnits alignment = getContext().getDeclAlign(field); 2313 2314 // FIXME: It should be impossible to have an LValue without alignment for a 2315 // complete type. 2316 if (!base.getAlignment().isZero()) 2317 alignment = std::min(alignment, base.getAlignment()); 2318 2319 bool mayAlias = rec->hasAttr<MayAliasAttr>(); 2320 2321 llvm::Value *addr = base.getAddress(); 2322 unsigned cvr = base.getVRQualifiers(); 2323 if (rec->isUnion()) { 2324 // For unions, there is no pointer adjustment. 2325 assert(!type->isReferenceType() && "union has reference member"); 2326 } else { 2327 // For structs, we GEP to the field that the record layout suggests. 2328 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 2329 addr = Builder.CreateStructGEP(addr, idx, field->getName()); 2330 2331 // If this is a reference field, load the reference right now. 2332 if (const ReferenceType *refType = type->getAs<ReferenceType>()) { 2333 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); 2334 if (cvr & Qualifiers::Volatile) load->setVolatile(true); 2335 load->setAlignment(alignment.getQuantity()); 2336 2337 if (CGM.shouldUseTBAA()) { 2338 llvm::MDNode *tbaa; 2339 if (mayAlias) 2340 tbaa = CGM.getTBAAInfo(getContext().CharTy); 2341 else 2342 tbaa = CGM.getTBAAInfo(type); 2343 CGM.DecorateInstruction(load, tbaa); 2344 } 2345 2346 addr = load; 2347 mayAlias = false; 2348 type = refType->getPointeeType(); 2349 if (type->isIncompleteType()) 2350 alignment = CharUnits(); 2351 else 2352 alignment = getContext().getTypeAlignInChars(type); 2353 cvr = 0; // qualifiers don't recursively apply to referencee 2354 } 2355 } 2356 2357 // Make sure that the address is pointing to the right type. This is critical 2358 // for both unions and structs. A union needs a bitcast, a struct element 2359 // will need a bitcast if the LLVM type laid out doesn't match the desired 2360 // type. 2361 addr = EmitBitCastOfLValueToProperType(*this, addr, 2362 CGM.getTypes().ConvertTypeForMem(type), 2363 field->getName()); 2364 2365 if (field->hasAttr<AnnotateAttr>()) 2366 addr = EmitFieldAnnotations(field, addr); 2367 2368 LValue LV = MakeAddrLValue(addr, type, alignment); 2369 LV.getQuals().addCVRQualifiers(cvr); 2370 2371 // __weak attribute on a field is ignored. 2372 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 2373 LV.getQuals().removeObjCGCAttr(); 2374 2375 // Fields of may_alias structs act like 'char' for TBAA purposes. 2376 // FIXME: this should get propagated down through anonymous structs 2377 // and unions. 2378 if (mayAlias && LV.getTBAAInfo()) 2379 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); 2380 2381 return LV; 2382 } 2383 2384 LValue 2385 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, 2386 const FieldDecl *Field) { 2387 QualType FieldType = Field->getType(); 2388 2389 if (!FieldType->isReferenceType()) 2390 return EmitLValueForField(Base, Field); 2391 2392 const CGRecordLayout &RL = 2393 CGM.getTypes().getCGRecordLayout(Field->getParent()); 2394 unsigned idx = RL.getLLVMFieldNo(Field); 2395 llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx); 2396 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 2397 2398 // Make sure that the address is pointing to the right type. This is critical 2399 // for both unions and structs. A union needs a bitcast, a struct element 2400 // will need a bitcast if the LLVM type laid out doesn't match the desired 2401 // type. 2402 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 2403 V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName()); 2404 2405 CharUnits Alignment = getContext().getDeclAlign(Field); 2406 2407 // FIXME: It should be impossible to have an LValue without alignment for a 2408 // complete type. 2409 if (!Base.getAlignment().isZero()) 2410 Alignment = std::min(Alignment, Base.getAlignment()); 2411 2412 return MakeAddrLValue(V, FieldType, Alignment); 2413 } 2414 2415 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 2416 if (E->isFileScope()) { 2417 llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 2418 return MakeAddrLValue(GlobalPtr, E->getType()); 2419 } 2420 if (E->getType()->isVariablyModifiedType()) 2421 // make sure to emit the VLA size. 2422 EmitVariablyModifiedType(E->getType()); 2423 2424 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 2425 const Expr *InitExpr = E->getInitializer(); 2426 LValue Result = MakeAddrLValue(DeclPtr, E->getType()); 2427 2428 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 2429 /*Init*/ true); 2430 2431 return Result; 2432 } 2433 2434 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { 2435 if (!E->isGLValue()) 2436 // Initializing an aggregate temporary in C++11: T{...}. 2437 return EmitAggExprToLValue(E); 2438 2439 // An lvalue initializer list must be initializing a reference. 2440 assert(E->getNumInits() == 1 && "reference init with multiple values"); 2441 return EmitLValue(E->getInit(0)); 2442 } 2443 2444 LValue CodeGenFunction:: 2445 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 2446 if (!expr->isGLValue()) { 2447 // ?: here should be an aggregate. 2448 assert((hasAggregateLLVMType(expr->getType()) && 2449 !expr->getType()->isAnyComplexType()) && 2450 "Unexpected conditional operator!"); 2451 return EmitAggExprToLValue(expr); 2452 } 2453 2454 OpaqueValueMapping binding(*this, expr); 2455 2456 const Expr *condExpr = expr->getCond(); 2457 bool CondExprBool; 2458 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 2459 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 2460 if (!CondExprBool) std::swap(live, dead); 2461 2462 if (!ContainsLabel(dead)) 2463 return EmitLValue(live); 2464 } 2465 2466 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 2467 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 2468 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 2469 2470 ConditionalEvaluation eval(*this); 2471 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock); 2472 2473 // Any temporaries created here are conditional. 2474 EmitBlock(lhsBlock); 2475 eval.begin(*this); 2476 LValue lhs = EmitLValue(expr->getTrueExpr()); 2477 eval.end(*this); 2478 2479 if (!lhs.isSimple()) 2480 return EmitUnsupportedLValue(expr, "conditional operator"); 2481 2482 lhsBlock = Builder.GetInsertBlock(); 2483 Builder.CreateBr(contBlock); 2484 2485 // Any temporaries created here are conditional. 2486 EmitBlock(rhsBlock); 2487 eval.begin(*this); 2488 LValue rhs = EmitLValue(expr->getFalseExpr()); 2489 eval.end(*this); 2490 if (!rhs.isSimple()) 2491 return EmitUnsupportedLValue(expr, "conditional operator"); 2492 rhsBlock = Builder.GetInsertBlock(); 2493 2494 EmitBlock(contBlock); 2495 2496 llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2, 2497 "cond-lvalue"); 2498 phi->addIncoming(lhs.getAddress(), lhsBlock); 2499 phi->addIncoming(rhs.getAddress(), rhsBlock); 2500 return MakeAddrLValue(phi, expr->getType()); 2501 } 2502 2503 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference 2504 /// type. If the cast is to a reference, we can have the usual lvalue result, 2505 /// otherwise if a cast is needed by the code generator in an lvalue context, 2506 /// then it must mean that we need the address of an aggregate in order to 2507 /// access one of its members. This can happen for all the reasons that casts 2508 /// are permitted with aggregate result, including noop aggregate casts, and 2509 /// cast from scalar to union. 2510 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 2511 switch (E->getCastKind()) { 2512 case CK_ToVoid: 2513 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 2514 2515 case CK_Dependent: 2516 llvm_unreachable("dependent cast kind in IR gen!"); 2517 2518 case CK_BuiltinFnToFnPtr: 2519 llvm_unreachable("builtin functions are handled elsewhere"); 2520 2521 // These two casts are currently treated as no-ops, although they could 2522 // potentially be real operations depending on the target's ABI. 2523 case CK_NonAtomicToAtomic: 2524 case CK_AtomicToNonAtomic: 2525 2526 case CK_NoOp: 2527 case CK_LValueToRValue: 2528 if (!E->getSubExpr()->Classify(getContext()).isPRValue() 2529 || E->getType()->isRecordType()) 2530 return EmitLValue(E->getSubExpr()); 2531 // Fall through to synthesize a temporary. 2532 2533 case CK_BitCast: 2534 case CK_ArrayToPointerDecay: 2535 case CK_FunctionToPointerDecay: 2536 case CK_NullToMemberPointer: 2537 case CK_NullToPointer: 2538 case CK_IntegralToPointer: 2539 case CK_PointerToIntegral: 2540 case CK_PointerToBoolean: 2541 case CK_VectorSplat: 2542 case CK_IntegralCast: 2543 case CK_IntegralToBoolean: 2544 case CK_IntegralToFloating: 2545 case CK_FloatingToIntegral: 2546 case CK_FloatingToBoolean: 2547 case CK_FloatingCast: 2548 case CK_FloatingRealToComplex: 2549 case CK_FloatingComplexToReal: 2550 case CK_FloatingComplexToBoolean: 2551 case CK_FloatingComplexCast: 2552 case CK_FloatingComplexToIntegralComplex: 2553 case CK_IntegralRealToComplex: 2554 case CK_IntegralComplexToReal: 2555 case CK_IntegralComplexToBoolean: 2556 case CK_IntegralComplexCast: 2557 case CK_IntegralComplexToFloatingComplex: 2558 case CK_DerivedToBaseMemberPointer: 2559 case CK_BaseToDerivedMemberPointer: 2560 case CK_MemberPointerToBoolean: 2561 case CK_ReinterpretMemberPointer: 2562 case CK_AnyPointerToBlockPointerCast: 2563 case CK_ARCProduceObject: 2564 case CK_ARCConsumeObject: 2565 case CK_ARCReclaimReturnedObject: 2566 case CK_ARCExtendBlockObject: 2567 case CK_CopyAndAutoreleaseBlockObject: { 2568 // These casts only produce lvalues when we're binding a reference to a 2569 // temporary realized from a (converted) pure rvalue. Emit the expression 2570 // as a value, copy it into a temporary, and return an lvalue referring to 2571 // that temporary. 2572 llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp"); 2573 EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false); 2574 return MakeAddrLValue(V, E->getType()); 2575 } 2576 2577 case CK_Dynamic: { 2578 LValue LV = EmitLValue(E->getSubExpr()); 2579 llvm::Value *V = LV.getAddress(); 2580 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E); 2581 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 2582 } 2583 2584 case CK_ConstructorConversion: 2585 case CK_UserDefinedConversion: 2586 case CK_CPointerToObjCPointerCast: 2587 case CK_BlockPointerToObjCPointerCast: 2588 return EmitLValue(E->getSubExpr()); 2589 2590 case CK_UncheckedDerivedToBase: 2591 case CK_DerivedToBase: { 2592 const RecordType *DerivedClassTy = 2593 E->getSubExpr()->getType()->getAs<RecordType>(); 2594 CXXRecordDecl *DerivedClassDecl = 2595 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2596 2597 LValue LV = EmitLValue(E->getSubExpr()); 2598 llvm::Value *This = LV.getAddress(); 2599 2600 // Perform the derived-to-base conversion 2601 llvm::Value *Base = 2602 GetAddressOfBaseClass(This, DerivedClassDecl, 2603 E->path_begin(), E->path_end(), 2604 /*NullCheckValue=*/false); 2605 2606 return MakeAddrLValue(Base, E->getType()); 2607 } 2608 case CK_ToUnion: 2609 return EmitAggExprToLValue(E); 2610 case CK_BaseToDerived: { 2611 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 2612 CXXRecordDecl *DerivedClassDecl = 2613 cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2614 2615 LValue LV = EmitLValue(E->getSubExpr()); 2616 2617 // Perform the base-to-derived conversion 2618 llvm::Value *Derived = 2619 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 2620 E->path_begin(), E->path_end(), 2621 /*NullCheckValue=*/false); 2622 2623 return MakeAddrLValue(Derived, E->getType()); 2624 } 2625 case CK_LValueBitCast: { 2626 // This must be a reinterpret_cast (or c-style equivalent). 2627 const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E); 2628 2629 LValue LV = EmitLValue(E->getSubExpr()); 2630 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2631 ConvertType(CE->getTypeAsWritten())); 2632 return MakeAddrLValue(V, E->getType()); 2633 } 2634 case CK_ObjCObjectLValueCast: { 2635 LValue LV = EmitLValue(E->getSubExpr()); 2636 QualType ToType = getContext().getLValueReferenceType(E->getType()); 2637 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 2638 ConvertType(ToType)); 2639 return MakeAddrLValue(V, E->getType()); 2640 } 2641 } 2642 2643 llvm_unreachable("Unhandled lvalue cast kind?"); 2644 } 2645 2646 LValue CodeGenFunction::EmitNullInitializationLValue( 2647 const CXXScalarValueInitExpr *E) { 2648 QualType Ty = E->getType(); 2649 LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty); 2650 EmitNullInitialization(LV.getAddress(), Ty); 2651 return LV; 2652 } 2653 2654 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 2655 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 2656 return getOpaqueLValueMapping(e); 2657 } 2658 2659 LValue CodeGenFunction::EmitMaterializeTemporaryExpr( 2660 const MaterializeTemporaryExpr *E) { 2661 RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0); 2662 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2663 } 2664 2665 RValue CodeGenFunction::EmitRValueForField(LValue LV, 2666 const FieldDecl *FD) { 2667 QualType FT = FD->getType(); 2668 LValue FieldLV = EmitLValueForField(LV, FD); 2669 if (FT->isAnyComplexType()) 2670 return RValue::getComplex( 2671 LoadComplexFromAddr(FieldLV.getAddress(), 2672 FieldLV.isVolatileQualified())); 2673 else if (CodeGenFunction::hasAggregateLLVMType(FT)) 2674 return FieldLV.asAggregateRValue(); 2675 2676 return EmitLoadOfLValue(FieldLV); 2677 } 2678 2679 //===--------------------------------------------------------------------===// 2680 // Expression Emission 2681 //===--------------------------------------------------------------------===// 2682 2683 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 2684 ReturnValueSlot ReturnValue) { 2685 if (CGDebugInfo *DI = getDebugInfo()) 2686 DI->EmitLocation(Builder, E->getLocStart()); 2687 2688 // Builtins never have block type. 2689 if (E->getCallee()->getType()->isBlockPointerType()) 2690 return EmitBlockCallExpr(E, ReturnValue); 2691 2692 if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) 2693 return EmitCXXMemberCallExpr(CE, ReturnValue); 2694 2695 if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E)) 2696 return EmitCUDAKernelCallExpr(CE, ReturnValue); 2697 2698 const Decl *TargetDecl = E->getCalleeDecl(); 2699 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 2700 if (unsigned builtinID = FD->getBuiltinID()) 2701 return EmitBuiltinExpr(FD, builtinID, E); 2702 } 2703 2704 if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) 2705 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 2706 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 2707 2708 if (const CXXPseudoDestructorExpr *PseudoDtor 2709 = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 2710 QualType DestroyedType = PseudoDtor->getDestroyedType(); 2711 if (getLangOpts().ObjCAutoRefCount && 2712 DestroyedType->isObjCLifetimeType() && 2713 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong || 2714 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) { 2715 // Automatic Reference Counting: 2716 // If the pseudo-expression names a retainable object with weak or 2717 // strong lifetime, the object shall be released. 2718 Expr *BaseExpr = PseudoDtor->getBase(); 2719 llvm::Value *BaseValue = NULL; 2720 Qualifiers BaseQuals; 2721 2722 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2723 if (PseudoDtor->isArrow()) { 2724 BaseValue = EmitScalarExpr(BaseExpr); 2725 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 2726 BaseQuals = PTy->getPointeeType().getQualifiers(); 2727 } else { 2728 LValue BaseLV = EmitLValue(BaseExpr); 2729 BaseValue = BaseLV.getAddress(); 2730 QualType BaseTy = BaseExpr->getType(); 2731 BaseQuals = BaseTy.getQualifiers(); 2732 } 2733 2734 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) { 2735 case Qualifiers::OCL_None: 2736 case Qualifiers::OCL_ExplicitNone: 2737 case Qualifiers::OCL_Autoreleasing: 2738 break; 2739 2740 case Qualifiers::OCL_Strong: 2741 EmitARCRelease(Builder.CreateLoad(BaseValue, 2742 PseudoDtor->getDestroyedType().isVolatileQualified()), 2743 /*precise*/ true); 2744 break; 2745 2746 case Qualifiers::OCL_Weak: 2747 EmitARCDestroyWeak(BaseValue); 2748 break; 2749 } 2750 } else { 2751 // C++ [expr.pseudo]p1: 2752 // The result shall only be used as the operand for the function call 2753 // operator (), and the result of such a call has type void. The only 2754 // effect is the evaluation of the postfix-expression before the dot or 2755 // arrow. 2756 EmitScalarExpr(E->getCallee()); 2757 } 2758 2759 return RValue::get(0); 2760 } 2761 2762 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 2763 return EmitCall(E->getCallee()->getType(), Callee, ReturnValue, 2764 E->arg_begin(), E->arg_end(), TargetDecl); 2765 } 2766 2767 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 2768 // Comma expressions just emit their LHS then their RHS as an l-value. 2769 if (E->getOpcode() == BO_Comma) { 2770 EmitIgnoredExpr(E->getLHS()); 2771 EnsureInsertPoint(); 2772 return EmitLValue(E->getRHS()); 2773 } 2774 2775 if (E->getOpcode() == BO_PtrMemD || 2776 E->getOpcode() == BO_PtrMemI) 2777 return EmitPointerToDataMemberBinaryExpr(E); 2778 2779 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 2780 2781 // Note that in all of these cases, __block variables need the RHS 2782 // evaluated first just in case the variable gets moved by the RHS. 2783 2784 if (!hasAggregateLLVMType(E->getType())) { 2785 switch (E->getLHS()->getType().getObjCLifetime()) { 2786 case Qualifiers::OCL_Strong: 2787 return EmitARCStoreStrong(E, /*ignored*/ false).first; 2788 2789 case Qualifiers::OCL_Autoreleasing: 2790 return EmitARCStoreAutoreleasing(E).first; 2791 2792 // No reason to do any of these differently. 2793 case Qualifiers::OCL_None: 2794 case Qualifiers::OCL_ExplicitNone: 2795 case Qualifiers::OCL_Weak: 2796 break; 2797 } 2798 2799 RValue RV = EmitAnyExpr(E->getRHS()); 2800 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); 2801 EmitStoreThroughLValue(RV, LV); 2802 return LV; 2803 } 2804 2805 if (E->getType()->isAnyComplexType()) 2806 return EmitComplexAssignmentLValue(E); 2807 2808 return EmitAggExprToLValue(E); 2809 } 2810 2811 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 2812 RValue RV = EmitCallExpr(E); 2813 2814 if (!RV.isScalar()) 2815 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2816 2817 assert(E->getCallReturnType()->isReferenceType() && 2818 "Can't have a scalar return unless the return type is a " 2819 "reference type!"); 2820 2821 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2822 } 2823 2824 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 2825 // FIXME: This shouldn't require another copy. 2826 return EmitAggExprToLValue(E); 2827 } 2828 2829 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 2830 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 2831 && "binding l-value to type which needs a temporary"); 2832 AggValueSlot Slot = CreateAggTemp(E->getType()); 2833 EmitCXXConstructExpr(E, Slot); 2834 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2835 } 2836 2837 LValue 2838 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 2839 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 2840 } 2841 2842 llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { 2843 return CGM.GetAddrOfUuidDescriptor(E); 2844 } 2845 2846 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { 2847 return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType()); 2848 } 2849 2850 LValue 2851 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 2852 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2853 Slot.setExternallyDestructed(); 2854 EmitAggExpr(E->getSubExpr(), Slot); 2855 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr()); 2856 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2857 } 2858 2859 LValue 2860 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) { 2861 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 2862 EmitLambdaExpr(E, Slot); 2863 return MakeAddrLValue(Slot.getAddr(), E->getType()); 2864 } 2865 2866 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 2867 RValue RV = EmitObjCMessageExpr(E); 2868 2869 if (!RV.isScalar()) 2870 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2871 2872 assert(E->getMethodDecl()->getResultType()->isReferenceType() && 2873 "Can't have a scalar return unless the return type is a " 2874 "reference type!"); 2875 2876 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 2877 } 2878 2879 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 2880 llvm::Value *V = 2881 CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true); 2882 return MakeAddrLValue(V, E->getType()); 2883 } 2884 2885 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 2886 const ObjCIvarDecl *Ivar) { 2887 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 2888 } 2889 2890 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 2891 llvm::Value *BaseValue, 2892 const ObjCIvarDecl *Ivar, 2893 unsigned CVRQualifiers) { 2894 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 2895 Ivar, CVRQualifiers); 2896 } 2897 2898 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 2899 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 2900 llvm::Value *BaseValue = 0; 2901 const Expr *BaseExpr = E->getBase(); 2902 Qualifiers BaseQuals; 2903 QualType ObjectTy; 2904 if (E->isArrow()) { 2905 BaseValue = EmitScalarExpr(BaseExpr); 2906 ObjectTy = BaseExpr->getType()->getPointeeType(); 2907 BaseQuals = ObjectTy.getQualifiers(); 2908 } else { 2909 LValue BaseLV = EmitLValue(BaseExpr); 2910 // FIXME: this isn't right for bitfields. 2911 BaseValue = BaseLV.getAddress(); 2912 ObjectTy = BaseExpr->getType(); 2913 BaseQuals = ObjectTy.getQualifiers(); 2914 } 2915 2916 LValue LV = 2917 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 2918 BaseQuals.getCVRQualifiers()); 2919 setObjCGCLValueClass(getContext(), E, LV); 2920 return LV; 2921 } 2922 2923 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 2924 // Can only get l-value for message expression returning aggregate type 2925 RValue RV = EmitAnyExprToTemp(E); 2926 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 2927 } 2928 2929 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 2930 ReturnValueSlot ReturnValue, 2931 CallExpr::const_arg_iterator ArgBeg, 2932 CallExpr::const_arg_iterator ArgEnd, 2933 const Decl *TargetDecl) { 2934 // Get the actual function type. The callee type will always be a pointer to 2935 // function type or a block pointer type. 2936 assert(CalleeType->isFunctionPointerType() && 2937 "Call must have function pointer type!"); 2938 2939 CalleeType = getContext().getCanonicalType(CalleeType); 2940 2941 const FunctionType *FnType 2942 = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 2943 2944 CallArgList Args; 2945 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd); 2946 2947 const CGFunctionInfo &FnInfo = 2948 CGM.getTypes().arrangeFreeFunctionCall(Args, FnType); 2949 2950 // C99 6.5.2.2p6: 2951 // If the expression that denotes the called function has a type 2952 // that does not include a prototype, [the default argument 2953 // promotions are performed]. If the number of arguments does not 2954 // equal the number of parameters, the behavior is undefined. If 2955 // the function is defined with a type that includes a prototype, 2956 // and either the prototype ends with an ellipsis (, ...) or the 2957 // types of the arguments after promotion are not compatible with 2958 // the types of the parameters, the behavior is undefined. If the 2959 // function is defined with a type that does not include a 2960 // prototype, and the types of the arguments after promotion are 2961 // not compatible with those of the parameters after promotion, 2962 // the behavior is undefined [except in some trivial cases]. 2963 // That is, in the general case, we should assume that a call 2964 // through an unprototyped function type works like a *non-variadic* 2965 // call. The way we make this work is to cast to the exact type 2966 // of the promoted arguments. 2967 if (isa<FunctionNoProtoType>(FnType)) { 2968 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo); 2969 CalleeTy = CalleeTy->getPointerTo(); 2970 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); 2971 } 2972 2973 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl); 2974 } 2975 2976 LValue CodeGenFunction:: 2977 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 2978 llvm::Value *BaseV; 2979 if (E->getOpcode() == BO_PtrMemI) 2980 BaseV = EmitScalarExpr(E->getLHS()); 2981 else 2982 BaseV = EmitLValue(E->getLHS()).getAddress(); 2983 2984 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 2985 2986 const MemberPointerType *MPT 2987 = E->getRHS()->getType()->getAs<MemberPointerType>(); 2988 2989 llvm::Value *AddV = 2990 CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT); 2991 2992 return MakeAddrLValue(AddV, MPT->getPointeeType()); 2993 } 2994 2995 static void 2996 EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, 2997 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, 2998 uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { 2999 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; 3000 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0; 3001 3002 switch (E->getOp()) { 3003 case AtomicExpr::AO__c11_atomic_init: 3004 llvm_unreachable("Already handled!"); 3005 3006 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 3007 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 3008 case AtomicExpr::AO__atomic_compare_exchange: 3009 case AtomicExpr::AO__atomic_compare_exchange_n: { 3010 // Note that cmpxchg only supports specifying one ordering and 3011 // doesn't support weak cmpxchg, at least at the moment. 3012 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 3013 LoadVal1->setAlignment(Align); 3014 llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2); 3015 LoadVal2->setAlignment(Align); 3016 llvm::AtomicCmpXchgInst *CXI = 3017 CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order); 3018 CXI->setVolatile(E->isVolatile()); 3019 llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1); 3020 StoreVal1->setAlignment(Align); 3021 llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1); 3022 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); 3023 return; 3024 } 3025 3026 case AtomicExpr::AO__c11_atomic_load: 3027 case AtomicExpr::AO__atomic_load_n: 3028 case AtomicExpr::AO__atomic_load: { 3029 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); 3030 Load->setAtomic(Order); 3031 Load->setAlignment(Size); 3032 Load->setVolatile(E->isVolatile()); 3033 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); 3034 StoreDest->setAlignment(Align); 3035 return; 3036 } 3037 3038 case AtomicExpr::AO__c11_atomic_store: 3039 case AtomicExpr::AO__atomic_store: 3040 case AtomicExpr::AO__atomic_store_n: { 3041 assert(!Dest && "Store does not return a value"); 3042 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 3043 LoadVal1->setAlignment(Align); 3044 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); 3045 Store->setAtomic(Order); 3046 Store->setAlignment(Size); 3047 Store->setVolatile(E->isVolatile()); 3048 return; 3049 } 3050 3051 case AtomicExpr::AO__c11_atomic_exchange: 3052 case AtomicExpr::AO__atomic_exchange_n: 3053 case AtomicExpr::AO__atomic_exchange: 3054 Op = llvm::AtomicRMWInst::Xchg; 3055 break; 3056 3057 case AtomicExpr::AO__atomic_add_fetch: 3058 PostOp = llvm::Instruction::Add; 3059 // Fall through. 3060 case AtomicExpr::AO__c11_atomic_fetch_add: 3061 case AtomicExpr::AO__atomic_fetch_add: 3062 Op = llvm::AtomicRMWInst::Add; 3063 break; 3064 3065 case AtomicExpr::AO__atomic_sub_fetch: 3066 PostOp = llvm::Instruction::Sub; 3067 // Fall through. 3068 case AtomicExpr::AO__c11_atomic_fetch_sub: 3069 case AtomicExpr::AO__atomic_fetch_sub: 3070 Op = llvm::AtomicRMWInst::Sub; 3071 break; 3072 3073 case AtomicExpr::AO__atomic_and_fetch: 3074 PostOp = llvm::Instruction::And; 3075 // Fall through. 3076 case AtomicExpr::AO__c11_atomic_fetch_and: 3077 case AtomicExpr::AO__atomic_fetch_and: 3078 Op = llvm::AtomicRMWInst::And; 3079 break; 3080 3081 case AtomicExpr::AO__atomic_or_fetch: 3082 PostOp = llvm::Instruction::Or; 3083 // Fall through. 3084 case AtomicExpr::AO__c11_atomic_fetch_or: 3085 case AtomicExpr::AO__atomic_fetch_or: 3086 Op = llvm::AtomicRMWInst::Or; 3087 break; 3088 3089 case AtomicExpr::AO__atomic_xor_fetch: 3090 PostOp = llvm::Instruction::Xor; 3091 // Fall through. 3092 case AtomicExpr::AO__c11_atomic_fetch_xor: 3093 case AtomicExpr::AO__atomic_fetch_xor: 3094 Op = llvm::AtomicRMWInst::Xor; 3095 break; 3096 3097 case AtomicExpr::AO__atomic_nand_fetch: 3098 PostOp = llvm::Instruction::And; 3099 // Fall through. 3100 case AtomicExpr::AO__atomic_fetch_nand: 3101 Op = llvm::AtomicRMWInst::Nand; 3102 break; 3103 } 3104 3105 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); 3106 LoadVal1->setAlignment(Align); 3107 llvm::AtomicRMWInst *RMWI = 3108 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); 3109 RMWI->setVolatile(E->isVolatile()); 3110 3111 // For __atomic_*_fetch operations, perform the operation again to 3112 // determine the value which was written. 3113 llvm::Value *Result = RMWI; 3114 if (PostOp) 3115 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1); 3116 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch) 3117 Result = CGF.Builder.CreateNot(Result); 3118 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest); 3119 StoreDest->setAlignment(Align); 3120 } 3121 3122 // This function emits any expression (scalar, complex, or aggregate) 3123 // into a temporary alloca. 3124 static llvm::Value * 3125 EmitValToTemp(CodeGenFunction &CGF, Expr *E) { 3126 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); 3127 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), 3128 /*Init*/ true); 3129 return DeclPtr; 3130 } 3131 3132 static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty, 3133 llvm::Value *Dest) { 3134 if (Ty->isAnyComplexType()) 3135 return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false)); 3136 if (CGF.hasAggregateLLVMType(Ty)) 3137 return RValue::getAggregate(Dest); 3138 return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty))); 3139 } 3140 3141 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { 3142 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 3143 QualType MemTy = AtomicTy; 3144 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>()) 3145 MemTy = AT->getValueType(); 3146 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); 3147 uint64_t Size = sizeChars.getQuantity(); 3148 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); 3149 unsigned Align = alignChars.getQuantity(); 3150 unsigned MaxInlineWidthInBits = 3151 getContext().getTargetInfo().getMaxAtomicInlineWidth(); 3152 bool UseLibcall = (Size != Align || 3153 getContext().toBits(sizeChars) > MaxInlineWidthInBits); 3154 3155 llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0; 3156 Ptr = EmitScalarExpr(E->getPtr()); 3157 3158 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) { 3159 assert(!Dest && "Init does not return a value"); 3160 if (!hasAggregateLLVMType(E->getVal1()->getType())) { 3161 QualType PointeeType 3162 = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType(); 3163 EmitScalarInit(EmitScalarExpr(E->getVal1()), 3164 LValue::MakeAddr(Ptr, PointeeType, alignChars, 3165 getContext())); 3166 } else if (E->getType()->isAnyComplexType()) { 3167 EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile()); 3168 } else { 3169 AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars, 3170 AtomicTy.getQualifiers(), 3171 AggValueSlot::IsNotDestructed, 3172 AggValueSlot::DoesNotNeedGCBarriers, 3173 AggValueSlot::IsNotAliased); 3174 EmitAggExpr(E->getVal1(), Slot); 3175 } 3176 return RValue::get(0); 3177 } 3178 3179 Order = EmitScalarExpr(E->getOrder()); 3180 3181 switch (E->getOp()) { 3182 case AtomicExpr::AO__c11_atomic_init: 3183 llvm_unreachable("Already handled!"); 3184 3185 case AtomicExpr::AO__c11_atomic_load: 3186 case AtomicExpr::AO__atomic_load_n: 3187 break; 3188 3189 case AtomicExpr::AO__atomic_load: 3190 Dest = EmitScalarExpr(E->getVal1()); 3191 break; 3192 3193 case AtomicExpr::AO__atomic_store: 3194 Val1 = EmitScalarExpr(E->getVal1()); 3195 break; 3196 3197 case AtomicExpr::AO__atomic_exchange: 3198 Val1 = EmitScalarExpr(E->getVal1()); 3199 Dest = EmitScalarExpr(E->getVal2()); 3200 break; 3201 3202 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 3203 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 3204 case AtomicExpr::AO__atomic_compare_exchange_n: 3205 case AtomicExpr::AO__atomic_compare_exchange: 3206 Val1 = EmitScalarExpr(E->getVal1()); 3207 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange) 3208 Val2 = EmitScalarExpr(E->getVal2()); 3209 else 3210 Val2 = EmitValToTemp(*this, E->getVal2()); 3211 OrderFail = EmitScalarExpr(E->getOrderFail()); 3212 // Evaluate and discard the 'weak' argument. 3213 if (E->getNumSubExprs() == 6) 3214 EmitScalarExpr(E->getWeak()); 3215 break; 3216 3217 case AtomicExpr::AO__c11_atomic_fetch_add: 3218 case AtomicExpr::AO__c11_atomic_fetch_sub: 3219 if (MemTy->isPointerType()) { 3220 // For pointer arithmetic, we're required to do a bit of math: 3221 // adding 1 to an int* is not the same as adding 1 to a uintptr_t. 3222 // ... but only for the C11 builtins. The GNU builtins expect the 3223 // user to multiply by sizeof(T). 3224 QualType Val1Ty = E->getVal1()->getType(); 3225 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); 3226 CharUnits PointeeIncAmt = 3227 getContext().getTypeSizeInChars(MemTy->getPointeeType()); 3228 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); 3229 Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); 3230 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); 3231 break; 3232 } 3233 // Fall through. 3234 case AtomicExpr::AO__atomic_fetch_add: 3235 case AtomicExpr::AO__atomic_fetch_sub: 3236 case AtomicExpr::AO__atomic_add_fetch: 3237 case AtomicExpr::AO__atomic_sub_fetch: 3238 case AtomicExpr::AO__c11_atomic_store: 3239 case AtomicExpr::AO__c11_atomic_exchange: 3240 case AtomicExpr::AO__atomic_store_n: 3241 case AtomicExpr::AO__atomic_exchange_n: 3242 case AtomicExpr::AO__c11_atomic_fetch_and: 3243 case AtomicExpr::AO__c11_atomic_fetch_or: 3244 case AtomicExpr::AO__c11_atomic_fetch_xor: 3245 case AtomicExpr::AO__atomic_fetch_and: 3246 case AtomicExpr::AO__atomic_fetch_or: 3247 case AtomicExpr::AO__atomic_fetch_xor: 3248 case AtomicExpr::AO__atomic_fetch_nand: 3249 case AtomicExpr::AO__atomic_and_fetch: 3250 case AtomicExpr::AO__atomic_or_fetch: 3251 case AtomicExpr::AO__atomic_xor_fetch: 3252 case AtomicExpr::AO__atomic_nand_fetch: 3253 Val1 = EmitValToTemp(*this, E->getVal1()); 3254 break; 3255 } 3256 3257 if (!E->getType()->isVoidType() && !Dest) 3258 Dest = CreateMemTemp(E->getType(), ".atomicdst"); 3259 3260 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . 3261 if (UseLibcall) { 3262 3263 SmallVector<QualType, 5> Params; 3264 CallArgList Args; 3265 // Size is always the first parameter 3266 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), 3267 getContext().getSizeType()); 3268 // Atomic address is always the second parameter 3269 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), 3270 getContext().VoidPtrTy); 3271 3272 const char* LibCallName; 3273 QualType RetTy = getContext().VoidTy; 3274 switch (E->getOp()) { 3275 // There is only one libcall for compare an exchange, because there is no 3276 // optimisation benefit possible from a libcall version of a weak compare 3277 // and exchange. 3278 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected, 3279 // void *desired, int success, int failure) 3280 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 3281 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 3282 case AtomicExpr::AO__atomic_compare_exchange: 3283 case AtomicExpr::AO__atomic_compare_exchange_n: 3284 LibCallName = "__atomic_compare_exchange"; 3285 RetTy = getContext().BoolTy; 3286 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 3287 getContext().VoidPtrTy); 3288 Args.add(RValue::get(EmitCastToVoidPtr(Val2)), 3289 getContext().VoidPtrTy); 3290 Args.add(RValue::get(Order), 3291 getContext().IntTy); 3292 Order = OrderFail; 3293 break; 3294 // void __atomic_exchange(size_t size, void *mem, void *val, void *return, 3295 // int order) 3296 case AtomicExpr::AO__c11_atomic_exchange: 3297 case AtomicExpr::AO__atomic_exchange_n: 3298 case AtomicExpr::AO__atomic_exchange: 3299 LibCallName = "__atomic_exchange"; 3300 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 3301 getContext().VoidPtrTy); 3302 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 3303 getContext().VoidPtrTy); 3304 break; 3305 // void __atomic_store(size_t size, void *mem, void *val, int order) 3306 case AtomicExpr::AO__c11_atomic_store: 3307 case AtomicExpr::AO__atomic_store: 3308 case AtomicExpr::AO__atomic_store_n: 3309 LibCallName = "__atomic_store"; 3310 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), 3311 getContext().VoidPtrTy); 3312 break; 3313 // void __atomic_load(size_t size, void *mem, void *return, int order) 3314 case AtomicExpr::AO__c11_atomic_load: 3315 case AtomicExpr::AO__atomic_load: 3316 case AtomicExpr::AO__atomic_load_n: 3317 LibCallName = "__atomic_load"; 3318 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), 3319 getContext().VoidPtrTy); 3320 break; 3321 #if 0 3322 // These are only defined for 1-16 byte integers. It is not clear what 3323 // their semantics would be on anything else... 3324 case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break; 3325 case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break; 3326 case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break; 3327 case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break; 3328 case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break; 3329 #endif 3330 default: return EmitUnsupportedRValue(E, "atomic library call"); 3331 } 3332 // order is always the last parameter 3333 Args.add(RValue::get(Order), 3334 getContext().IntTy); 3335 3336 const CGFunctionInfo &FuncInfo = 3337 CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args, 3338 FunctionType::ExtInfo(), RequiredArgs::All); 3339 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); 3340 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName); 3341 RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args); 3342 if (E->isCmpXChg()) 3343 return Res; 3344 if (E->getType()->isVoidType()) 3345 return RValue::get(0); 3346 return ConvertTempToRValue(*this, E->getType(), Dest); 3347 } 3348 3349 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store || 3350 E->getOp() == AtomicExpr::AO__atomic_store || 3351 E->getOp() == AtomicExpr::AO__atomic_store_n; 3352 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || 3353 E->getOp() == AtomicExpr::AO__atomic_load || 3354 E->getOp() == AtomicExpr::AO__atomic_load_n; 3355 3356 llvm::Type *IPtrTy = 3357 llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo(); 3358 llvm::Value *OrigDest = Dest; 3359 Ptr = Builder.CreateBitCast(Ptr, IPtrTy); 3360 if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy); 3361 if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy); 3362 if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy); 3363 3364 if (isa<llvm::ConstantInt>(Order)) { 3365 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); 3366 switch (ord) { 3367 case 0: // memory_order_relaxed 3368 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3369 llvm::Monotonic); 3370 break; 3371 case 1: // memory_order_consume 3372 case 2: // memory_order_acquire 3373 if (IsStore) 3374 break; // Avoid crashing on code with undefined behavior 3375 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3376 llvm::Acquire); 3377 break; 3378 case 3: // memory_order_release 3379 if (IsLoad) 3380 break; // Avoid crashing on code with undefined behavior 3381 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3382 llvm::Release); 3383 break; 3384 case 4: // memory_order_acq_rel 3385 if (IsLoad || IsStore) 3386 break; // Avoid crashing on code with undefined behavior 3387 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3388 llvm::AcquireRelease); 3389 break; 3390 case 5: // memory_order_seq_cst 3391 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3392 llvm::SequentiallyConsistent); 3393 break; 3394 default: // invalid order 3395 // We should not ever get here normally, but it's hard to 3396 // enforce that in general. 3397 break; 3398 } 3399 if (E->getType()->isVoidType()) 3400 return RValue::get(0); 3401 return ConvertTempToRValue(*this, E->getType(), OrigDest); 3402 } 3403 3404 // Long case, when Order isn't obviously constant. 3405 3406 // Create all the relevant BB's 3407 llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0, 3408 *AcqRelBB = 0, *SeqCstBB = 0; 3409 MonotonicBB = createBasicBlock("monotonic", CurFn); 3410 if (!IsStore) 3411 AcquireBB = createBasicBlock("acquire", CurFn); 3412 if (!IsLoad) 3413 ReleaseBB = createBasicBlock("release", CurFn); 3414 if (!IsLoad && !IsStore) 3415 AcqRelBB = createBasicBlock("acqrel", CurFn); 3416 SeqCstBB = createBasicBlock("seqcst", CurFn); 3417 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); 3418 3419 // Create the switch for the split 3420 // MonotonicBB is arbitrarily chosen as the default case; in practice, this 3421 // doesn't matter unless someone is crazy enough to use something that 3422 // doesn't fold to a constant for the ordering. 3423 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); 3424 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); 3425 3426 // Emit all the different atomics 3427 Builder.SetInsertPoint(MonotonicBB); 3428 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3429 llvm::Monotonic); 3430 Builder.CreateBr(ContBB); 3431 if (!IsStore) { 3432 Builder.SetInsertPoint(AcquireBB); 3433 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3434 llvm::Acquire); 3435 Builder.CreateBr(ContBB); 3436 SI->addCase(Builder.getInt32(1), AcquireBB); 3437 SI->addCase(Builder.getInt32(2), AcquireBB); 3438 } 3439 if (!IsLoad) { 3440 Builder.SetInsertPoint(ReleaseBB); 3441 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3442 llvm::Release); 3443 Builder.CreateBr(ContBB); 3444 SI->addCase(Builder.getInt32(3), ReleaseBB); 3445 } 3446 if (!IsLoad && !IsStore) { 3447 Builder.SetInsertPoint(AcqRelBB); 3448 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3449 llvm::AcquireRelease); 3450 Builder.CreateBr(ContBB); 3451 SI->addCase(Builder.getInt32(4), AcqRelBB); 3452 } 3453 Builder.SetInsertPoint(SeqCstBB); 3454 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align, 3455 llvm::SequentiallyConsistent); 3456 Builder.CreateBr(ContBB); 3457 SI->addCase(Builder.getInt32(5), SeqCstBB); 3458 3459 // Cleanup and return 3460 Builder.SetInsertPoint(ContBB); 3461 if (E->getType()->isVoidType()) 3462 return RValue::get(0); 3463 return ConvertTempToRValue(*this, E->getType(), OrigDest); 3464 } 3465 3466 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { 3467 assert(Val->getType()->isFPOrFPVectorTy()); 3468 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) 3469 return; 3470 3471 llvm::MDBuilder MDHelper(getLLVMContext()); 3472 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); 3473 3474 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); 3475 } 3476 3477 namespace { 3478 struct LValueOrRValue { 3479 LValue LV; 3480 RValue RV; 3481 }; 3482 } 3483 3484 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 3485 const PseudoObjectExpr *E, 3486 bool forLValue, 3487 AggValueSlot slot) { 3488 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 3489 3490 // Find the result expression, if any. 3491 const Expr *resultExpr = E->getResultExpr(); 3492 LValueOrRValue result; 3493 3494 for (PseudoObjectExpr::const_semantics_iterator 3495 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 3496 const Expr *semantic = *i; 3497 3498 // If this semantic expression is an opaque value, bind it 3499 // to the result of its source expression. 3500 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 3501 3502 // If this is the result expression, we may need to evaluate 3503 // directly into the slot. 3504 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 3505 OVMA opaqueData; 3506 if (ov == resultExpr && ov->isRValue() && !forLValue && 3507 CodeGenFunction::hasAggregateLLVMType(ov->getType()) && 3508 !ov->getType()->isAnyComplexType()) { 3509 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 3510 3511 LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType()); 3512 opaqueData = OVMA::bind(CGF, ov, LV); 3513 result.RV = slot.asRValue(); 3514 3515 // Otherwise, emit as normal. 3516 } else { 3517 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 3518 3519 // If this is the result, also evaluate the result now. 3520 if (ov == resultExpr) { 3521 if (forLValue) 3522 result.LV = CGF.EmitLValue(ov); 3523 else 3524 result.RV = CGF.EmitAnyExpr(ov, slot); 3525 } 3526 } 3527 3528 opaques.push_back(opaqueData); 3529 3530 // Otherwise, if the expression is the result, evaluate it 3531 // and remember the result. 3532 } else if (semantic == resultExpr) { 3533 if (forLValue) 3534 result.LV = CGF.EmitLValue(semantic); 3535 else 3536 result.RV = CGF.EmitAnyExpr(semantic, slot); 3537 3538 // Otherwise, evaluate the expression in an ignored context. 3539 } else { 3540 CGF.EmitIgnoredExpr(semantic); 3541 } 3542 } 3543 3544 // Unbind all the opaques now. 3545 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 3546 opaques[i].unbind(CGF); 3547 3548 return result; 3549 } 3550 3551 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 3552 AggValueSlot slot) { 3553 return emitPseudoObjectExpr(*this, E, false, slot).RV; 3554 } 3555 3556 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 3557 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 3558 } 3559