1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGCXXABI.h" 16 #include "CGCall.h" 17 #include "CGDebugInfo.h" 18 #include "CGObjCRuntime.h" 19 #include "CGOpenMPRuntime.h" 20 #include "CGRecordLayout.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/ASTContext.h" 24 #include "clang/AST/Attr.h" 25 #include "clang/AST/DeclObjC.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 #include "llvm/ADT/Hashing.h" 28 #include "llvm/ADT/StringExtras.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Intrinsics.h" 31 #include "llvm/IR/LLVMContext.h" 32 #include "llvm/IR/MDBuilder.h" 33 #include "llvm/Support/ConvertUTF.h" 34 35 using namespace clang; 36 using namespace CodeGen; 37 38 //===--------------------------------------------------------------------===// 39 // Miscellaneous Helper Methods 40 //===--------------------------------------------------------------------===// 41 42 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 43 unsigned addressSpace = 44 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 45 46 llvm::PointerType *destType = Int8PtrTy; 47 if (addressSpace) 48 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 49 50 if (value->getType() == destType) return value; 51 return Builder.CreateBitCast(value, destType); 52 } 53 54 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 55 /// block. 56 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 57 const Twine &Name) { 58 if (!Builder.isNamePreserving()) 59 return new llvm::AllocaInst(Ty, nullptr, "", AllocaInsertPt); 60 return new llvm::AllocaInst(Ty, nullptr, Name, AllocaInsertPt); 61 } 62 63 void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, 64 llvm::Value *Init) { 65 auto *Store = new llvm::StoreInst(Init, Var); 66 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 67 Block->getInstList().insertAfter(&*AllocaInsertPt, Store); 68 } 69 70 llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, 71 const Twine &Name) { 72 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); 73 // FIXME: Should we prefer the preferred type alignment here? 74 CharUnits Align = getContext().getTypeAlignInChars(Ty); 75 Alloc->setAlignment(Align.getQuantity()); 76 return Alloc; 77 } 78 79 llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, 80 const Twine &Name) { 81 llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); 82 // FIXME: Should we prefer the preferred type alignment here? 83 CharUnits Align = getContext().getTypeAlignInChars(Ty); 84 Alloc->setAlignment(Align.getQuantity()); 85 return Alloc; 86 } 87 88 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 89 /// expression and compare the result against zero, returning an Int1Ty value. 90 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 91 PGO.setCurrentStmt(E); 92 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 93 llvm::Value *MemPtr = EmitScalarExpr(E); 94 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 95 } 96 97 QualType BoolTy = getContext().BoolTy; 98 if (!E->getType()->isAnyComplexType()) 99 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); 100 101 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); 102 } 103 104 /// EmitIgnoredExpr - Emit code to compute the specified expression, 105 /// ignoring the result. 106 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 107 if (E->isRValue()) 108 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 109 110 // Just emit it as an l-value and drop the result. 111 EmitLValue(E); 112 } 113 114 /// EmitAnyExpr - Emit code to compute the specified expression which 115 /// can have any type. The result is returned as an RValue struct. 116 /// If this is an aggregate expression, AggSlot indicates where the 117 /// result should be returned. 118 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, 119 AggValueSlot aggSlot, 120 bool ignoreResult) { 121 switch (getEvaluationKind(E->getType())) { 122 case TEK_Scalar: 123 return RValue::get(EmitScalarExpr(E, ignoreResult)); 124 case TEK_Complex: 125 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); 126 case TEK_Aggregate: 127 if (!ignoreResult && aggSlot.isIgnored()) 128 aggSlot = CreateAggTemp(E->getType(), "agg-temp"); 129 EmitAggExpr(E, aggSlot); 130 return aggSlot.asRValue(); 131 } 132 llvm_unreachable("bad evaluation kind"); 133 } 134 135 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 136 /// always be accessible even if no aggregate location is provided. 137 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 138 AggValueSlot AggSlot = AggValueSlot::ignored(); 139 140 if (hasAggregateEvaluationKind(E->getType())) 141 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 142 return EmitAnyExpr(E, AggSlot); 143 } 144 145 /// EmitAnyExprToMem - Evaluate an expression into a given memory 146 /// location. 147 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 148 llvm::Value *Location, 149 Qualifiers Quals, 150 bool IsInit) { 151 // FIXME: This function should take an LValue as an argument. 152 switch (getEvaluationKind(E->getType())) { 153 case TEK_Complex: 154 EmitComplexExprIntoLValue(E, 155 MakeNaturalAlignAddrLValue(Location, E->getType()), 156 /*isInit*/ false); 157 return; 158 159 case TEK_Aggregate: { 160 CharUnits Alignment = getContext().getTypeAlignInChars(E->getType()); 161 EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals, 162 AggValueSlot::IsDestructed_t(IsInit), 163 AggValueSlot::DoesNotNeedGCBarriers, 164 AggValueSlot::IsAliased_t(!IsInit))); 165 return; 166 } 167 168 case TEK_Scalar: { 169 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 170 LValue LV = MakeAddrLValue(Location, E->getType()); 171 EmitStoreThroughLValue(RV, LV); 172 return; 173 } 174 } 175 llvm_unreachable("bad evaluation kind"); 176 } 177 178 static void 179 pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, 180 const Expr *E, llvm::Value *ReferenceTemporary) { 181 // Objective-C++ ARC: 182 // If we are binding a reference to a temporary that has ownership, we 183 // need to perform retain/release operations on the temporary. 184 // 185 // FIXME: This should be looking at E, not M. 186 if (CGF.getLangOpts().ObjCAutoRefCount && 187 M->getType()->isObjCLifetimeType()) { 188 QualType ObjCARCReferenceLifetimeType = M->getType(); 189 switch (Qualifiers::ObjCLifetime Lifetime = 190 ObjCARCReferenceLifetimeType.getObjCLifetime()) { 191 case Qualifiers::OCL_None: 192 case Qualifiers::OCL_ExplicitNone: 193 // Carry on to normal cleanup handling. 194 break; 195 196 case Qualifiers::OCL_Autoreleasing: 197 // Nothing to do; cleaned up by an autorelease pool. 198 return; 199 200 case Qualifiers::OCL_Strong: 201 case Qualifiers::OCL_Weak: 202 switch (StorageDuration Duration = M->getStorageDuration()) { 203 case SD_Static: 204 // Note: we intentionally do not register a cleanup to release 205 // the object on program termination. 206 return; 207 208 case SD_Thread: 209 // FIXME: We should probably register a cleanup in this case. 210 return; 211 212 case SD_Automatic: 213 case SD_FullExpression: 214 CodeGenFunction::Destroyer *Destroy; 215 CleanupKind CleanupKind; 216 if (Lifetime == Qualifiers::OCL_Strong) { 217 const ValueDecl *VD = M->getExtendingDecl(); 218 bool Precise = 219 VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 220 CleanupKind = CGF.getARCCleanupKind(); 221 Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise 222 : &CodeGenFunction::destroyARCStrongImprecise; 223 } else { 224 // __weak objects always get EH cleanups; otherwise, exceptions 225 // could cause really nasty crashes instead of mere leaks. 226 CleanupKind = NormalAndEHCleanup; 227 Destroy = &CodeGenFunction::destroyARCWeak; 228 } 229 if (Duration == SD_FullExpression) 230 CGF.pushDestroy(CleanupKind, ReferenceTemporary, 231 ObjCARCReferenceLifetimeType, *Destroy, 232 CleanupKind & EHCleanup); 233 else 234 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary, 235 ObjCARCReferenceLifetimeType, 236 *Destroy, CleanupKind & EHCleanup); 237 return; 238 239 case SD_Dynamic: 240 llvm_unreachable("temporary cannot have dynamic storage duration"); 241 } 242 llvm_unreachable("unknown storage duration"); 243 } 244 } 245 246 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; 247 if (const RecordType *RT = 248 E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { 249 // Get the destructor for the reference temporary. 250 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 251 if (!ClassDecl->hasTrivialDestructor()) 252 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 253 } 254 255 if (!ReferenceTemporaryDtor) 256 return; 257 258 // Call the destructor for the temporary. 259 switch (M->getStorageDuration()) { 260 case SD_Static: 261 case SD_Thread: { 262 llvm::Constant *CleanupFn; 263 llvm::Constant *CleanupArg; 264 if (E->getType()->isArrayType()) { 265 CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( 266 cast<llvm::Constant>(ReferenceTemporary), E->getType(), 267 CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions, 268 dyn_cast_or_null<VarDecl>(M->getExtendingDecl())); 269 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy); 270 } else { 271 CleanupFn = CGF.CGM.getAddrOfCXXStructor(ReferenceTemporaryDtor, 272 StructorType::Complete); 273 CleanupArg = cast<llvm::Constant>(ReferenceTemporary); 274 } 275 CGF.CGM.getCXXABI().registerGlobalDtor( 276 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg); 277 break; 278 } 279 280 case SD_FullExpression: 281 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), 282 CodeGenFunction::destroyCXXObject, 283 CGF.getLangOpts().Exceptions); 284 break; 285 286 case SD_Automatic: 287 CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup, 288 ReferenceTemporary, E->getType(), 289 CodeGenFunction::destroyCXXObject, 290 CGF.getLangOpts().Exceptions); 291 break; 292 293 case SD_Dynamic: 294 llvm_unreachable("temporary cannot have dynamic storage duration"); 295 } 296 } 297 298 static llvm::Value * 299 createReferenceTemporary(CodeGenFunction &CGF, 300 const MaterializeTemporaryExpr *M, const Expr *Inner) { 301 switch (M->getStorageDuration()) { 302 case SD_FullExpression: 303 case SD_Automatic: 304 return CGF.CreateMemTemp(Inner->getType(), "ref.tmp"); 305 306 case SD_Thread: 307 case SD_Static: 308 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner); 309 310 case SD_Dynamic: 311 llvm_unreachable("temporary can't have dynamic storage duration"); 312 } 313 llvm_unreachable("unknown storage duration"); 314 } 315 316 LValue CodeGenFunction:: 317 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { 318 const Expr *E = M->GetTemporaryExpr(); 319 320 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so 321 // as that will cause the lifetime adjustment to be lost for ARC 322 if (getLangOpts().ObjCAutoRefCount && 323 M->getType()->isObjCLifetimeType() && 324 M->getType().getObjCLifetime() != Qualifiers::OCL_None && 325 M->getType().getObjCLifetime() != Qualifiers::OCL_ExplicitNone) { 326 llvm::Value *Object = createReferenceTemporary(*this, M, E); 327 LValue RefTempDst = MakeAddrLValue(Object, M->getType()); 328 329 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object)) { 330 // We should not have emitted the initializer for this temporary as a 331 // constant. 332 assert(!Var->hasInitializer()); 333 Var->setInitializer(CGM.EmitNullConstant(E->getType())); 334 } 335 336 switch (getEvaluationKind(E->getType())) { 337 default: llvm_unreachable("expected scalar or aggregate expression"); 338 case TEK_Scalar: 339 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false); 340 break; 341 case TEK_Aggregate: { 342 CharUnits Alignment = getContext().getTypeAlignInChars(E->getType()); 343 EmitAggExpr(E, AggValueSlot::forAddr(Object, Alignment, 344 E->getType().getQualifiers(), 345 AggValueSlot::IsDestructed, 346 AggValueSlot::DoesNotNeedGCBarriers, 347 AggValueSlot::IsNotAliased)); 348 break; 349 } 350 } 351 352 pushTemporaryCleanup(*this, M, E, Object); 353 return RefTempDst; 354 } 355 356 SmallVector<const Expr *, 2> CommaLHSs; 357 SmallVector<SubobjectAdjustment, 2> Adjustments; 358 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); 359 360 for (const auto &Ignored : CommaLHSs) 361 EmitIgnoredExpr(Ignored); 362 363 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) { 364 if (opaque->getType()->isRecordType()) { 365 assert(Adjustments.empty()); 366 return EmitOpaqueValueLValue(opaque); 367 } 368 } 369 370 // Create and initialize the reference temporary. 371 llvm::Value *Object = createReferenceTemporary(*this, M, E); 372 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object)) { 373 // If the temporary is a global and has a constant initializer, we may 374 // have already initialized it. 375 if (!Var->hasInitializer()) { 376 Var->setInitializer(CGM.EmitNullConstant(E->getType())); 377 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); 378 } 379 } else { 380 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); 381 } 382 pushTemporaryCleanup(*this, M, E, Object); 383 384 // Perform derived-to-base casts and/or field accesses, to get from the 385 // temporary object we created (and, potentially, for which we extended 386 // the lifetime) to the subobject we're binding the reference to. 387 for (unsigned I = Adjustments.size(); I != 0; --I) { 388 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 389 switch (Adjustment.Kind) { 390 case SubobjectAdjustment::DerivedToBaseAdjustment: 391 Object = 392 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass, 393 Adjustment.DerivedToBase.BasePath->path_begin(), 394 Adjustment.DerivedToBase.BasePath->path_end(), 395 /*NullCheckValue=*/ false, E->getExprLoc()); 396 break; 397 398 case SubobjectAdjustment::FieldAdjustment: { 399 LValue LV = MakeAddrLValue(Object, E->getType()); 400 LV = EmitLValueForField(LV, Adjustment.Field); 401 assert(LV.isSimple() && 402 "materialized temporary field is not a simple lvalue"); 403 Object = LV.getAddress(); 404 break; 405 } 406 407 case SubobjectAdjustment::MemberPointerAdjustment: { 408 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS); 409 Object = CGM.getCXXABI().EmitMemberDataPointerAddress( 410 *this, E, Object, Ptr, Adjustment.Ptr.MPT); 411 break; 412 } 413 } 414 } 415 416 return MakeAddrLValue(Object, M->getType()); 417 } 418 419 RValue 420 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { 421 // Emit the expression as an lvalue. 422 LValue LV = EmitLValue(E); 423 assert(LV.isSimple()); 424 llvm::Value *Value = LV.getAddress(); 425 426 if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { 427 // C++11 [dcl.ref]p5 (as amended by core issue 453): 428 // If a glvalue to which a reference is directly bound designates neither 429 // an existing object or function of an appropriate type nor a region of 430 // storage of suitable size and alignment to contain an object of the 431 // reference's type, the behavior is undefined. 432 QualType Ty = E->getType(); 433 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); 434 } 435 436 return RValue::get(Value); 437 } 438 439 440 /// getAccessedFieldNo - Given an encoded value and a result number, return the 441 /// input field number being accessed. 442 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 443 const llvm::Constant *Elts) { 444 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) 445 ->getZExtValue(); 446 } 447 448 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. 449 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, 450 llvm::Value *High) { 451 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); 452 llvm::Value *K47 = Builder.getInt64(47); 453 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); 454 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); 455 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); 456 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); 457 return Builder.CreateMul(B1, KMul); 458 } 459 460 bool CodeGenFunction::sanitizePerformTypeCheck() const { 461 return SanOpts.has(SanitizerKind::Null) | 462 SanOpts.has(SanitizerKind::Alignment) | 463 SanOpts.has(SanitizerKind::ObjectSize) | 464 SanOpts.has(SanitizerKind::Vptr); 465 } 466 467 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, 468 llvm::Value *Address, QualType Ty, 469 CharUnits Alignment, bool SkipNullCheck) { 470 if (!sanitizePerformTypeCheck()) 471 return; 472 473 // Don't check pointers outside the default address space. The null check 474 // isn't correct, the object-size check isn't supported by LLVM, and we can't 475 // communicate the addresses to the runtime handler for the vptr check. 476 if (Address->getType()->getPointerAddressSpace()) 477 return; 478 479 SanitizerScope SanScope(this); 480 481 SmallVector<std::pair<llvm::Value *, SanitizerKind>, 3> Checks; 482 llvm::BasicBlock *Done = nullptr; 483 484 bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast || 485 TCK == TCK_UpcastToVirtualBase; 486 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) && 487 !SkipNullCheck) { 488 // The glvalue must not be an empty glvalue. 489 llvm::Value *IsNonNull = Builder.CreateICmpNE( 490 Address, llvm::Constant::getNullValue(Address->getType())); 491 492 if (AllowNullPointers) { 493 // When performing pointer casts, it's OK if the value is null. 494 // Skip the remaining checks in that case. 495 Done = createBasicBlock("null"); 496 llvm::BasicBlock *Rest = createBasicBlock("not.null"); 497 Builder.CreateCondBr(IsNonNull, Rest, Done); 498 EmitBlock(Rest); 499 } else { 500 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null)); 501 } 502 } 503 504 if (SanOpts.has(SanitizerKind::ObjectSize) && !Ty->isIncompleteType()) { 505 uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity(); 506 507 // The glvalue must refer to a large enough storage region. 508 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation 509 // to check this. 510 // FIXME: Get object address space 511 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; 512 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys); 513 llvm::Value *Min = Builder.getFalse(); 514 llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy); 515 llvm::Value *LargeEnough = 516 Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min), 517 llvm::ConstantInt::get(IntPtrTy, Size)); 518 Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize)); 519 } 520 521 uint64_t AlignVal = 0; 522 523 if (SanOpts.has(SanitizerKind::Alignment)) { 524 AlignVal = Alignment.getQuantity(); 525 if (!Ty->isIncompleteType() && !AlignVal) 526 AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity(); 527 528 // The glvalue must be suitably aligned. 529 if (AlignVal) { 530 llvm::Value *Align = 531 Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy), 532 llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); 533 llvm::Value *Aligned = 534 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); 535 Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment)); 536 } 537 } 538 539 if (Checks.size() > 0) { 540 llvm::Constant *StaticData[] = { 541 EmitCheckSourceLocation(Loc), 542 EmitCheckTypeDescriptor(Ty), 543 llvm::ConstantInt::get(SizeTy, AlignVal), 544 llvm::ConstantInt::get(Int8Ty, TCK) 545 }; 546 EmitCheck(Checks, "type_mismatch", StaticData, Address); 547 } 548 549 // If possible, check that the vptr indicates that there is a subobject of 550 // type Ty at offset zero within this object. 551 // 552 // C++11 [basic.life]p5,6: 553 // [For storage which does not refer to an object within its lifetime] 554 // The program has undefined behavior if: 555 // -- the [pointer or glvalue] is used to access a non-static data member 556 // or call a non-static member function 557 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 558 if (SanOpts.has(SanitizerKind::Vptr) && 559 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || 560 TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || 561 TCK == TCK_UpcastToVirtualBase) && 562 RD && RD->hasDefinition() && RD->isDynamicClass()) { 563 // Compute a hash of the mangled name of the type. 564 // 565 // FIXME: This is not guaranteed to be deterministic! Move to a 566 // fingerprinting mechanism once LLVM provides one. For the time 567 // being the implementation happens to be deterministic. 568 SmallString<64> MangledName; 569 llvm::raw_svector_ostream Out(MangledName); 570 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), 571 Out); 572 573 // Blacklist based on the mangled type. 574 if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType( 575 Out.str())) { 576 llvm::hash_code TypeHash = hash_value(Out.str()); 577 578 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). 579 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); 580 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); 581 llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy); 582 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); 583 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); 584 585 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); 586 Hash = Builder.CreateTrunc(Hash, IntPtrTy); 587 588 // Look the hash up in our cache. 589 const int CacheSize = 128; 590 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); 591 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, 592 "__ubsan_vptr_type_cache"); 593 llvm::Value *Slot = Builder.CreateAnd(Hash, 594 llvm::ConstantInt::get(IntPtrTy, 595 CacheSize-1)); 596 llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; 597 llvm::Value *CacheVal = 598 Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices)); 599 600 // If the hash isn't in the cache, call a runtime handler to perform the 601 // hard work of checking whether the vptr is for an object of the right 602 // type. This will either fill in the cache and return, or produce a 603 // diagnostic. 604 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash); 605 llvm::Constant *StaticData[] = { 606 EmitCheckSourceLocation(Loc), 607 EmitCheckTypeDescriptor(Ty), 608 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), 609 llvm::ConstantInt::get(Int8Ty, TCK) 610 }; 611 llvm::Value *DynamicData[] = { Address, Hash }; 612 EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr), 613 "dynamic_type_cache_miss", StaticData, DynamicData); 614 } 615 } 616 617 if (Done) { 618 Builder.CreateBr(Done); 619 EmitBlock(Done); 620 } 621 } 622 623 /// Determine whether this expression refers to a flexible array member in a 624 /// struct. We disable array bounds checks for such members. 625 static bool isFlexibleArrayMemberExpr(const Expr *E) { 626 // For compatibility with existing code, we treat arrays of length 0 or 627 // 1 as flexible array members. 628 const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe(); 629 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 630 if (CAT->getSize().ugt(1)) 631 return false; 632 } else if (!isa<IncompleteArrayType>(AT)) 633 return false; 634 635 E = E->IgnoreParens(); 636 637 // A flexible array member must be the last member in the class. 638 if (const auto *ME = dyn_cast<MemberExpr>(E)) { 639 // FIXME: If the base type of the member expr is not FD->getParent(), 640 // this should not be treated as a flexible array member access. 641 if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 642 RecordDecl::field_iterator FI( 643 DeclContext::decl_iterator(const_cast<FieldDecl *>(FD))); 644 return ++FI == FD->getParent()->field_end(); 645 } 646 } 647 648 return false; 649 } 650 651 /// If Base is known to point to the start of an array, return the length of 652 /// that array. Return 0 if the length cannot be determined. 653 static llvm::Value *getArrayIndexingBound( 654 CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) { 655 // For the vector indexing extension, the bound is the number of elements. 656 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { 657 IndexedType = Base->getType(); 658 return CGF.Builder.getInt32(VT->getNumElements()); 659 } 660 661 Base = Base->IgnoreParens(); 662 663 if (const auto *CE = dyn_cast<CastExpr>(Base)) { 664 if (CE->getCastKind() == CK_ArrayToPointerDecay && 665 !isFlexibleArrayMemberExpr(CE->getSubExpr())) { 666 IndexedType = CE->getSubExpr()->getType(); 667 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); 668 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 669 return CGF.Builder.getInt(CAT->getSize()); 670 else if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) 671 return CGF.getVLASize(VAT).first; 672 } 673 } 674 675 return nullptr; 676 } 677 678 void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, 679 llvm::Value *Index, QualType IndexType, 680 bool Accessed) { 681 assert(SanOpts.has(SanitizerKind::ArrayBounds) && 682 "should not be called unless adding bounds checks"); 683 SanitizerScope SanScope(this); 684 685 QualType IndexedType; 686 llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType); 687 if (!Bound) 688 return; 689 690 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); 691 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned); 692 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false); 693 694 llvm::Constant *StaticData[] = { 695 EmitCheckSourceLocation(E->getExprLoc()), 696 EmitCheckTypeDescriptor(IndexedType), 697 EmitCheckTypeDescriptor(IndexType) 698 }; 699 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal) 700 : Builder.CreateICmpULE(IndexVal, BoundVal); 701 EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), "out_of_bounds", 702 StaticData, Index); 703 } 704 705 706 CodeGenFunction::ComplexPairTy CodeGenFunction:: 707 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 708 bool isInc, bool isPre) { 709 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc()); 710 711 llvm::Value *NextVal; 712 if (isa<llvm::IntegerType>(InVal.first->getType())) { 713 uint64_t AmountVal = isInc ? 1 : -1; 714 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 715 716 // Add the inc/dec to the real part. 717 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 718 } else { 719 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 720 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 721 if (!isInc) 722 FVal.changeSign(); 723 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 724 725 // Add the inc/dec to the real part. 726 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 727 } 728 729 ComplexPairTy IncVal(NextVal, InVal.second); 730 731 // Store the updated result through the lvalue. 732 EmitStoreOfComplex(IncVal, LV, /*init*/ false); 733 734 // If this is a postinc, return the value read from memory, otherwise use the 735 // updated value. 736 return isPre ? IncVal : InVal; 737 } 738 739 //===----------------------------------------------------------------------===// 740 // LValue Expression Emission 741 //===----------------------------------------------------------------------===// 742 743 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 744 if (Ty->isVoidType()) 745 return RValue::get(nullptr); 746 747 switch (getEvaluationKind(Ty)) { 748 case TEK_Complex: { 749 llvm::Type *EltTy = 750 ConvertType(Ty->castAs<ComplexType>()->getElementType()); 751 llvm::Value *U = llvm::UndefValue::get(EltTy); 752 return RValue::getComplex(std::make_pair(U, U)); 753 } 754 755 // If this is a use of an undefined aggregate type, the aggregate must have an 756 // identifiable address. Just because the contents of the value are undefined 757 // doesn't mean that the address can't be taken and compared. 758 case TEK_Aggregate: { 759 llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 760 return RValue::getAggregate(DestPtr); 761 } 762 763 case TEK_Scalar: 764 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 765 } 766 llvm_unreachable("bad evaluation kind"); 767 } 768 769 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 770 const char *Name) { 771 ErrorUnsupported(E, Name); 772 return GetUndefRValue(E->getType()); 773 } 774 775 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 776 const char *Name) { 777 ErrorUnsupported(E, Name); 778 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 779 return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); 780 } 781 782 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { 783 LValue LV; 784 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E)) 785 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true); 786 else 787 LV = EmitLValue(E); 788 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 789 EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(), 790 E->getType(), LV.getAlignment()); 791 return LV; 792 } 793 794 /// EmitLValue - Emit code to compute a designator that specifies the location 795 /// of the expression. 796 /// 797 /// This can return one of two things: a simple address or a bitfield reference. 798 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 799 /// an LLVM pointer type. 800 /// 801 /// If this returns a bitfield reference, nothing about the pointee type of the 802 /// LLVM value is known: For example, it may not be a pointer to an integer. 803 /// 804 /// If this returns a normal address, and if the lvalue's C type is fixed size, 805 /// this method guarantees that the returned pointer type will point to an LLVM 806 /// type of the same size of the lvalue's type. If the lvalue has a variable 807 /// length type, this is not possible. 808 /// 809 LValue CodeGenFunction::EmitLValue(const Expr *E) { 810 ApplyDebugLocation DL(*this, E); 811 switch (E->getStmtClass()) { 812 default: return EmitUnsupportedLValue(E, "l-value expression"); 813 814 case Expr::ObjCPropertyRefExprClass: 815 llvm_unreachable("cannot emit a property reference directly"); 816 817 case Expr::ObjCSelectorExprClass: 818 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 819 case Expr::ObjCIsaExprClass: 820 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 821 case Expr::BinaryOperatorClass: 822 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 823 case Expr::CompoundAssignOperatorClass: { 824 QualType Ty = E->getType(); 825 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 826 Ty = AT->getValueType(); 827 if (!Ty->isAnyComplexType()) 828 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 829 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 830 } 831 case Expr::CallExprClass: 832 case Expr::CXXMemberCallExprClass: 833 case Expr::CXXOperatorCallExprClass: 834 case Expr::UserDefinedLiteralClass: 835 return EmitCallExprLValue(cast<CallExpr>(E)); 836 case Expr::VAArgExprClass: 837 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 838 case Expr::DeclRefExprClass: 839 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 840 case Expr::ParenExprClass: 841 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 842 case Expr::GenericSelectionExprClass: 843 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 844 case Expr::PredefinedExprClass: 845 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 846 case Expr::StringLiteralClass: 847 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 848 case Expr::ObjCEncodeExprClass: 849 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 850 case Expr::PseudoObjectExprClass: 851 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 852 case Expr::InitListExprClass: 853 return EmitInitListLValue(cast<InitListExpr>(E)); 854 case Expr::CXXTemporaryObjectExprClass: 855 case Expr::CXXConstructExprClass: 856 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 857 case Expr::CXXBindTemporaryExprClass: 858 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 859 case Expr::CXXUuidofExprClass: 860 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); 861 case Expr::LambdaExprClass: 862 return EmitLambdaLValue(cast<LambdaExpr>(E)); 863 864 case Expr::ExprWithCleanupsClass: { 865 const auto *cleanups = cast<ExprWithCleanups>(E); 866 enterFullExpression(cleanups); 867 RunCleanupsScope Scope(*this); 868 return EmitLValue(cleanups->getSubExpr()); 869 } 870 871 case Expr::CXXDefaultArgExprClass: 872 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 873 case Expr::CXXDefaultInitExprClass: { 874 CXXDefaultInitExprScope Scope(*this); 875 return EmitLValue(cast<CXXDefaultInitExpr>(E)->getExpr()); 876 } 877 case Expr::CXXTypeidExprClass: 878 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 879 880 case Expr::ObjCMessageExprClass: 881 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 882 case Expr::ObjCIvarRefExprClass: 883 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 884 case Expr::StmtExprClass: 885 return EmitStmtExprLValue(cast<StmtExpr>(E)); 886 case Expr::UnaryOperatorClass: 887 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 888 case Expr::ArraySubscriptExprClass: 889 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 890 case Expr::ExtVectorElementExprClass: 891 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 892 case Expr::MemberExprClass: 893 return EmitMemberExpr(cast<MemberExpr>(E)); 894 case Expr::CompoundLiteralExprClass: 895 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 896 case Expr::ConditionalOperatorClass: 897 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 898 case Expr::BinaryConditionalOperatorClass: 899 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 900 case Expr::ChooseExprClass: 901 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr()); 902 case Expr::OpaqueValueExprClass: 903 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 904 case Expr::SubstNonTypeTemplateParmExprClass: 905 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 906 case Expr::ImplicitCastExprClass: 907 case Expr::CStyleCastExprClass: 908 case Expr::CXXFunctionalCastExprClass: 909 case Expr::CXXStaticCastExprClass: 910 case Expr::CXXDynamicCastExprClass: 911 case Expr::CXXReinterpretCastExprClass: 912 case Expr::CXXConstCastExprClass: 913 case Expr::ObjCBridgedCastExprClass: 914 return EmitCastLValue(cast<CastExpr>(E)); 915 916 case Expr::MaterializeTemporaryExprClass: 917 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 918 } 919 } 920 921 /// Given an object of the given canonical type, can we safely copy a 922 /// value out of it based on its initializer? 923 static bool isConstantEmittableObjectType(QualType type) { 924 assert(type.isCanonical()); 925 assert(!type->isReferenceType()); 926 927 // Must be const-qualified but non-volatile. 928 Qualifiers qs = type.getLocalQualifiers(); 929 if (!qs.hasConst() || qs.hasVolatile()) return false; 930 931 // Otherwise, all object types satisfy this except C++ classes with 932 // mutable subobjects or non-trivial copy/destroy behavior. 933 if (const auto *RT = dyn_cast<RecordType>(type)) 934 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) 935 if (RD->hasMutableFields() || !RD->isTrivial()) 936 return false; 937 938 return true; 939 } 940 941 /// Can we constant-emit a load of a reference to a variable of the 942 /// given type? This is different from predicates like 943 /// Decl::isUsableInConstantExpressions because we do want it to apply 944 /// in situations that don't necessarily satisfy the language's rules 945 /// for this (e.g. C++'s ODR-use rules). For example, we want to able 946 /// to do this with const float variables even if those variables 947 /// aren't marked 'constexpr'. 948 enum ConstantEmissionKind { 949 CEK_None, 950 CEK_AsReferenceOnly, 951 CEK_AsValueOrReference, 952 CEK_AsValueOnly 953 }; 954 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { 955 type = type.getCanonicalType(); 956 if (const auto *ref = dyn_cast<ReferenceType>(type)) { 957 if (isConstantEmittableObjectType(ref->getPointeeType())) 958 return CEK_AsValueOrReference; 959 return CEK_AsReferenceOnly; 960 } 961 if (isConstantEmittableObjectType(type)) 962 return CEK_AsValueOnly; 963 return CEK_None; 964 } 965 966 /// Try to emit a reference to the given value without producing it as 967 /// an l-value. This is actually more than an optimization: we can't 968 /// produce an l-value for variables that we never actually captured 969 /// in a block or lambda, which means const int variables or constexpr 970 /// literals or similar. 971 CodeGenFunction::ConstantEmission 972 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { 973 ValueDecl *value = refExpr->getDecl(); 974 975 // The value needs to be an enum constant or a constant variable. 976 ConstantEmissionKind CEK; 977 if (isa<ParmVarDecl>(value)) { 978 CEK = CEK_None; 979 } else if (auto *var = dyn_cast<VarDecl>(value)) { 980 CEK = checkVarTypeForConstantEmission(var->getType()); 981 } else if (isa<EnumConstantDecl>(value)) { 982 CEK = CEK_AsValueOnly; 983 } else { 984 CEK = CEK_None; 985 } 986 if (CEK == CEK_None) return ConstantEmission(); 987 988 Expr::EvalResult result; 989 bool resultIsReference; 990 QualType resultType; 991 992 // It's best to evaluate all the way as an r-value if that's permitted. 993 if (CEK != CEK_AsReferenceOnly && 994 refExpr->EvaluateAsRValue(result, getContext())) { 995 resultIsReference = false; 996 resultType = refExpr->getType(); 997 998 // Otherwise, try to evaluate as an l-value. 999 } else if (CEK != CEK_AsValueOnly && 1000 refExpr->EvaluateAsLValue(result, getContext())) { 1001 resultIsReference = true; 1002 resultType = value->getType(); 1003 1004 // Failure. 1005 } else { 1006 return ConstantEmission(); 1007 } 1008 1009 // In any case, if the initializer has side-effects, abandon ship. 1010 if (result.HasSideEffects) 1011 return ConstantEmission(); 1012 1013 // Emit as a constant. 1014 llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this); 1015 1016 // Make sure we emit a debug reference to the global variable. 1017 // This should probably fire even for 1018 if (isa<VarDecl>(value)) { 1019 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value))) 1020 EmitDeclRefExprDbgValue(refExpr, C); 1021 } else { 1022 assert(isa<EnumConstantDecl>(value)); 1023 EmitDeclRefExprDbgValue(refExpr, C); 1024 } 1025 1026 // If we emitted a reference constant, we need to dereference that. 1027 if (resultIsReference) 1028 return ConstantEmission::forReference(C); 1029 1030 return ConstantEmission::forValue(C); 1031 } 1032 1033 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, 1034 SourceLocation Loc) { 1035 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 1036 lvalue.getAlignment().getQuantity(), 1037 lvalue.getType(), Loc, lvalue.getTBAAInfo(), 1038 lvalue.getTBAABaseType(), lvalue.getTBAAOffset()); 1039 } 1040 1041 static bool hasBooleanRepresentation(QualType Ty) { 1042 if (Ty->isBooleanType()) 1043 return true; 1044 1045 if (const EnumType *ET = Ty->getAs<EnumType>()) 1046 return ET->getDecl()->getIntegerType()->isBooleanType(); 1047 1048 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 1049 return hasBooleanRepresentation(AT->getValueType()); 1050 1051 return false; 1052 } 1053 1054 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, 1055 llvm::APInt &Min, llvm::APInt &End, 1056 bool StrictEnums) { 1057 const EnumType *ET = Ty->getAs<EnumType>(); 1058 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && 1059 ET && !ET->getDecl()->isFixed(); 1060 bool IsBool = hasBooleanRepresentation(Ty); 1061 if (!IsBool && !IsRegularCPlusPlusEnum) 1062 return false; 1063 1064 if (IsBool) { 1065 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0); 1066 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2); 1067 } else { 1068 const EnumDecl *ED = ET->getDecl(); 1069 llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType()); 1070 unsigned Bitwidth = LTy->getScalarSizeInBits(); 1071 unsigned NumNegativeBits = ED->getNumNegativeBits(); 1072 unsigned NumPositiveBits = ED->getNumPositiveBits(); 1073 1074 if (NumNegativeBits) { 1075 unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); 1076 assert(NumBits <= Bitwidth); 1077 End = llvm::APInt(Bitwidth, 1) << (NumBits - 1); 1078 Min = -End; 1079 } else { 1080 assert(NumPositiveBits <= Bitwidth); 1081 End = llvm::APInt(Bitwidth, 1) << NumPositiveBits; 1082 Min = llvm::APInt(Bitwidth, 0); 1083 } 1084 } 1085 return true; 1086 } 1087 1088 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { 1089 llvm::APInt Min, End; 1090 if (!getRangeForType(*this, Ty, Min, End, 1091 CGM.getCodeGenOpts().StrictEnums)) 1092 return nullptr; 1093 1094 llvm::MDBuilder MDHelper(getLLVMContext()); 1095 return MDHelper.createRange(Min, End); 1096 } 1097 1098 llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, 1099 unsigned Alignment, QualType Ty, 1100 SourceLocation Loc, 1101 llvm::MDNode *TBAAInfo, 1102 QualType TBAABaseType, 1103 uint64_t TBAAOffset) { 1104 // For better performance, handle vector loads differently. 1105 if (Ty->isVectorType()) { 1106 llvm::Value *V; 1107 const llvm::Type *EltTy = 1108 cast<llvm::PointerType>(Addr->getType())->getElementType(); 1109 1110 const auto *VTy = cast<llvm::VectorType>(EltTy); 1111 1112 // Handle vectors of size 3, like size 4 for better performance. 1113 if (VTy->getNumElements() == 3) { 1114 1115 // Bitcast to vec4 type. 1116 llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(), 1117 4); 1118 llvm::PointerType *ptVec4Ty = 1119 llvm::PointerType::get(vec4Ty, 1120 (cast<llvm::PointerType>( 1121 Addr->getType()))->getAddressSpace()); 1122 llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty, 1123 "castToVec4"); 1124 // Now load value. 1125 llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4"); 1126 1127 // Shuffle vector to get vec3. 1128 llvm::Constant *Mask[] = { 1129 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 0), 1130 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 1), 1131 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 2) 1132 }; 1133 1134 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1135 V = Builder.CreateShuffleVector(LoadVal, 1136 llvm::UndefValue::get(vec4Ty), 1137 MaskV, "extractVec"); 1138 return EmitFromMemory(V, Ty); 1139 } 1140 } 1141 1142 // Atomic operations have to be done on integral types. 1143 if (Ty->isAtomicType() || typeIsSuitableForInlineAtomic(Ty, Volatile)) { 1144 LValue lvalue = LValue::MakeAddr(Addr, Ty, 1145 CharUnits::fromQuantity(Alignment), 1146 getContext(), TBAAInfo); 1147 return EmitAtomicLoad(lvalue, Loc).getScalarVal(); 1148 } 1149 1150 llvm::LoadInst *Load = Builder.CreateLoad(Addr); 1151 if (Volatile) 1152 Load->setVolatile(true); 1153 if (Alignment) 1154 Load->setAlignment(Alignment); 1155 if (TBAAInfo) { 1156 llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo, 1157 TBAAOffset); 1158 if (TBAAPath) 1159 CGM.DecorateInstruction(Load, TBAAPath, false/*ConvertTypeToTag*/); 1160 } 1161 1162 bool NeedsBoolCheck = 1163 SanOpts.has(SanitizerKind::Bool) && hasBooleanRepresentation(Ty); 1164 bool NeedsEnumCheck = 1165 SanOpts.has(SanitizerKind::Enum) && Ty->getAs<EnumType>(); 1166 if (NeedsBoolCheck || NeedsEnumCheck) { 1167 SanitizerScope SanScope(this); 1168 llvm::APInt Min, End; 1169 if (getRangeForType(*this, Ty, Min, End, true)) { 1170 --End; 1171 llvm::Value *Check; 1172 if (!Min) 1173 Check = Builder.CreateICmpULE( 1174 Load, llvm::ConstantInt::get(getLLVMContext(), End)); 1175 else { 1176 llvm::Value *Upper = Builder.CreateICmpSLE( 1177 Load, llvm::ConstantInt::get(getLLVMContext(), End)); 1178 llvm::Value *Lower = Builder.CreateICmpSGE( 1179 Load, llvm::ConstantInt::get(getLLVMContext(), Min)); 1180 Check = Builder.CreateAnd(Upper, Lower); 1181 } 1182 llvm::Constant *StaticArgs[] = { 1183 EmitCheckSourceLocation(Loc), 1184 EmitCheckTypeDescriptor(Ty) 1185 }; 1186 SanitizerKind Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; 1187 EmitCheck(std::make_pair(Check, Kind), "load_invalid_value", StaticArgs, 1188 EmitCheckValue(Load)); 1189 } 1190 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) 1191 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) 1192 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); 1193 1194 return EmitFromMemory(Load, Ty); 1195 } 1196 1197 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 1198 // Bool has a different representation in memory than in registers. 1199 if (hasBooleanRepresentation(Ty)) { 1200 // This should really always be an i1, but sometimes it's already 1201 // an i8, and it's awkward to track those cases down. 1202 if (Value->getType()->isIntegerTy(1)) 1203 return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool"); 1204 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1205 "wrong value rep of bool"); 1206 } 1207 1208 return Value; 1209 } 1210 1211 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 1212 // Bool has a different representation in memory than in registers. 1213 if (hasBooleanRepresentation(Ty)) { 1214 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1215 "wrong value rep of bool"); 1216 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 1217 } 1218 1219 return Value; 1220 } 1221 1222 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, 1223 bool Volatile, unsigned Alignment, 1224 QualType Ty, llvm::MDNode *TBAAInfo, 1225 bool isInit, QualType TBAABaseType, 1226 uint64_t TBAAOffset) { 1227 1228 // Handle vectors differently to get better performance. 1229 if (Ty->isVectorType()) { 1230 llvm::Type *SrcTy = Value->getType(); 1231 auto *VecTy = cast<llvm::VectorType>(SrcTy); 1232 // Handle vec3 special. 1233 if (VecTy->getNumElements() == 3) { 1234 llvm::LLVMContext &VMContext = getLLVMContext(); 1235 1236 // Our source is a vec3, do a shuffle vector to make it a vec4. 1237 SmallVector<llvm::Constant*, 4> Mask; 1238 Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1239 0)); 1240 Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1241 1)); 1242 Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1243 2)); 1244 Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext))); 1245 1246 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1247 Value = Builder.CreateShuffleVector(Value, 1248 llvm::UndefValue::get(VecTy), 1249 MaskV, "extractVec"); 1250 SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4); 1251 } 1252 auto *DstPtr = cast<llvm::PointerType>(Addr->getType()); 1253 if (DstPtr->getElementType() != SrcTy) { 1254 llvm::Type *MemTy = 1255 llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); 1256 Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); 1257 } 1258 } 1259 1260 Value = EmitToMemory(Value, Ty); 1261 1262 if (Ty->isAtomicType() || 1263 (!isInit && typeIsSuitableForInlineAtomic(Ty, Volatile))) { 1264 EmitAtomicStore(RValue::get(Value), 1265 LValue::MakeAddr(Addr, Ty, 1266 CharUnits::fromQuantity(Alignment), 1267 getContext(), TBAAInfo), 1268 isInit); 1269 return; 1270 } 1271 1272 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 1273 if (Alignment) 1274 Store->setAlignment(Alignment); 1275 if (TBAAInfo) { 1276 llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo, 1277 TBAAOffset); 1278 if (TBAAPath) 1279 CGM.DecorateInstruction(Store, TBAAPath, false/*ConvertTypeToTag*/); 1280 } 1281 } 1282 1283 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 1284 bool isInit) { 1285 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 1286 lvalue.getAlignment().getQuantity(), lvalue.getType(), 1287 lvalue.getTBAAInfo(), isInit, lvalue.getTBAABaseType(), 1288 lvalue.getTBAAOffset()); 1289 } 1290 1291 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 1292 /// method emits the address of the lvalue, then loads the result as an rvalue, 1293 /// returning the rvalue. 1294 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 1295 if (LV.isObjCWeak()) { 1296 // load of a __weak object. 1297 llvm::Value *AddrWeakObj = LV.getAddress(); 1298 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 1299 AddrWeakObj)); 1300 } 1301 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1302 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress()); 1303 Object = EmitObjCConsumeObject(LV.getType(), Object); 1304 return RValue::get(Object); 1305 } 1306 1307 if (LV.isSimple()) { 1308 assert(!LV.getType()->isFunctionType()); 1309 1310 // Everything needs a load. 1311 return RValue::get(EmitLoadOfScalar(LV, Loc)); 1312 } 1313 1314 if (LV.isVectorElt()) { 1315 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(), 1316 LV.isVolatileQualified()); 1317 Load->setAlignment(LV.getAlignment().getQuantity()); 1318 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), 1319 "vecext")); 1320 } 1321 1322 // If this is a reference to a subset of the elements of a vector, either 1323 // shuffle the input or extract/insert them as appropriate. 1324 if (LV.isExtVectorElt()) 1325 return EmitLoadOfExtVectorElementLValue(LV); 1326 1327 // Global Register variables always invoke intrinsics 1328 if (LV.isGlobalReg()) 1329 return EmitLoadOfGlobalRegLValue(LV); 1330 1331 assert(LV.isBitField() && "Unknown LValue type!"); 1332 return EmitLoadOfBitfieldLValue(LV); 1333 } 1334 1335 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { 1336 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 1337 1338 // Get the output type. 1339 llvm::Type *ResLTy = ConvertType(LV.getType()); 1340 1341 llvm::Value *Ptr = LV.getBitFieldAddr(); 1342 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), 1343 "bf.load"); 1344 cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment); 1345 1346 if (Info.IsSigned) { 1347 assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize); 1348 unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size; 1349 if (HighBits) 1350 Val = Builder.CreateShl(Val, HighBits, "bf.shl"); 1351 if (Info.Offset + HighBits) 1352 Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr"); 1353 } else { 1354 if (Info.Offset) 1355 Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr"); 1356 if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize) 1357 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize, 1358 Info.Size), 1359 "bf.clear"); 1360 } 1361 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); 1362 1363 return RValue::get(Val); 1364 } 1365 1366 // If this is a reference to a subset of the elements of a vector, create an 1367 // appropriate shufflevector. 1368 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 1369 llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(), 1370 LV.isVolatileQualified()); 1371 Load->setAlignment(LV.getAlignment().getQuantity()); 1372 llvm::Value *Vec = Load; 1373 1374 const llvm::Constant *Elts = LV.getExtVectorElts(); 1375 1376 // If the result of the expression is a non-vector type, we must be extracting 1377 // a single element. Just codegen as an extractelement. 1378 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 1379 if (!ExprVT) { 1380 unsigned InIdx = getAccessedFieldNo(0, Elts); 1381 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); 1382 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 1383 } 1384 1385 // Always use shuffle vector to try to retain the original program structure 1386 unsigned NumResultElts = ExprVT->getNumElements(); 1387 1388 SmallVector<llvm::Constant*, 4> Mask; 1389 for (unsigned i = 0; i != NumResultElts; ++i) 1390 Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts))); 1391 1392 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1393 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 1394 MaskV); 1395 return RValue::get(Vec); 1396 } 1397 1398 /// @brief Generates lvalue for partial ext_vector access. 1399 llvm::Value *CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { 1400 llvm::Value *VectorAddress = LV.getExtVectorAddr(); 1401 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 1402 QualType EQT = ExprVT->getElementType(); 1403 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT); 1404 llvm::Type *VectorElementPtrToTy = VectorElementTy->getPointerTo(); 1405 1406 llvm::Value *CastToPointerElement = 1407 Builder.CreateBitCast(VectorAddress, 1408 VectorElementPtrToTy, "conv.ptr.element"); 1409 1410 const llvm::Constant *Elts = LV.getExtVectorElts(); 1411 unsigned ix = getAccessedFieldNo(0, Elts); 1412 1413 llvm::Value *VectorBasePtrPlusIx = 1414 Builder.CreateInBoundsGEP(CastToPointerElement, 1415 llvm::ConstantInt::get(SizeTy, ix), "add.ptr"); 1416 1417 return VectorBasePtrPlusIx; 1418 } 1419 1420 /// @brief Load of global gamed gegisters are always calls to intrinsics. 1421 RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { 1422 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && 1423 "Bad type for register variable"); 1424 llvm::MDNode *RegName = cast<llvm::MDNode>( 1425 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata()); 1426 1427 // We accept integer and pointer types only 1428 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType()); 1429 llvm::Type *Ty = OrigTy; 1430 if (OrigTy->isPointerTy()) 1431 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); 1432 llvm::Type *Types[] = { Ty }; 1433 1434 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); 1435 llvm::Value *Call = Builder.CreateCall( 1436 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName)); 1437 if (OrigTy->isPointerTy()) 1438 Call = Builder.CreateIntToPtr(Call, OrigTy); 1439 return RValue::get(Call); 1440 } 1441 1442 1443 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 1444 /// lvalue, where both are guaranteed to the have the same type, and that type 1445 /// is 'Ty'. 1446 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 1447 bool isInit) { 1448 if (!Dst.isSimple()) { 1449 if (Dst.isVectorElt()) { 1450 // Read/modify/write the vector, inserting the new element. 1451 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(), 1452 Dst.isVolatileQualified()); 1453 Load->setAlignment(Dst.getAlignment().getQuantity()); 1454 llvm::Value *Vec = Load; 1455 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 1456 Dst.getVectorIdx(), "vecins"); 1457 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(), 1458 Dst.isVolatileQualified()); 1459 Store->setAlignment(Dst.getAlignment().getQuantity()); 1460 return; 1461 } 1462 1463 // If this is an update of extended vector elements, insert them as 1464 // appropriate. 1465 if (Dst.isExtVectorElt()) 1466 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 1467 1468 if (Dst.isGlobalReg()) 1469 return EmitStoreThroughGlobalRegLValue(Src, Dst); 1470 1471 assert(Dst.isBitField() && "Unknown LValue type"); 1472 return EmitStoreThroughBitfieldLValue(Src, Dst); 1473 } 1474 1475 // There's special magic for assigning into an ARC-qualified l-value. 1476 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 1477 switch (Lifetime) { 1478 case Qualifiers::OCL_None: 1479 llvm_unreachable("present but none"); 1480 1481 case Qualifiers::OCL_ExplicitNone: 1482 // nothing special 1483 break; 1484 1485 case Qualifiers::OCL_Strong: 1486 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 1487 return; 1488 1489 case Qualifiers::OCL_Weak: 1490 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 1491 return; 1492 1493 case Qualifiers::OCL_Autoreleasing: 1494 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 1495 Src.getScalarVal())); 1496 // fall into the normal path 1497 break; 1498 } 1499 } 1500 1501 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 1502 // load of a __weak object. 1503 llvm::Value *LvalueDst = Dst.getAddress(); 1504 llvm::Value *src = Src.getScalarVal(); 1505 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1506 return; 1507 } 1508 1509 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1510 // load of a __strong object. 1511 llvm::Value *LvalueDst = Dst.getAddress(); 1512 llvm::Value *src = Src.getScalarVal(); 1513 if (Dst.isObjCIvar()) { 1514 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1515 llvm::Type *ResultType = ConvertType(getContext().LongTy); 1516 llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); 1517 llvm::Value *dst = RHS; 1518 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1519 llvm::Value *LHS = 1520 Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); 1521 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 1522 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 1523 BytesBetween); 1524 } else if (Dst.isGlobalObjCRef()) { 1525 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 1526 Dst.isThreadLocalRef()); 1527 } 1528 else 1529 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 1530 return; 1531 } 1532 1533 assert(Src.isScalar() && "Can't emit an agg store with this method"); 1534 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 1535 } 1536 1537 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 1538 llvm::Value **Result) { 1539 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 1540 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 1541 llvm::Value *Ptr = Dst.getBitFieldAddr(); 1542 1543 // Get the source value, truncated to the width of the bit-field. 1544 llvm::Value *SrcVal = Src.getScalarVal(); 1545 1546 // Cast the source to the storage type and shift it into place. 1547 SrcVal = Builder.CreateIntCast(SrcVal, 1548 Ptr->getType()->getPointerElementType(), 1549 /*IsSigned=*/false); 1550 llvm::Value *MaskedVal = SrcVal; 1551 1552 // See if there are other bits in the bitfield's storage we'll need to load 1553 // and mask together with source before storing. 1554 if (Info.StorageSize != Info.Size) { 1555 assert(Info.StorageSize > Info.Size && "Invalid bitfield size."); 1556 llvm::Value *Val = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), 1557 "bf.load"); 1558 cast<llvm::LoadInst>(Val)->setAlignment(Info.StorageAlignment); 1559 1560 // Mask the source value as needed. 1561 if (!hasBooleanRepresentation(Dst.getType())) 1562 SrcVal = Builder.CreateAnd(SrcVal, 1563 llvm::APInt::getLowBitsSet(Info.StorageSize, 1564 Info.Size), 1565 "bf.value"); 1566 MaskedVal = SrcVal; 1567 if (Info.Offset) 1568 SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl"); 1569 1570 // Mask out the original value. 1571 Val = Builder.CreateAnd(Val, 1572 ~llvm::APInt::getBitsSet(Info.StorageSize, 1573 Info.Offset, 1574 Info.Offset + Info.Size), 1575 "bf.clear"); 1576 1577 // Or together the unchanged values and the source value. 1578 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); 1579 } else { 1580 assert(Info.Offset == 0); 1581 } 1582 1583 // Write the new value back out. 1584 llvm::StoreInst *Store = Builder.CreateStore(SrcVal, Ptr, 1585 Dst.isVolatileQualified()); 1586 Store->setAlignment(Info.StorageAlignment); 1587 1588 // Return the new value of the bit-field, if requested. 1589 if (Result) { 1590 llvm::Value *ResultVal = MaskedVal; 1591 1592 // Sign extend the value if needed. 1593 if (Info.IsSigned) { 1594 assert(Info.Size <= Info.StorageSize); 1595 unsigned HighBits = Info.StorageSize - Info.Size; 1596 if (HighBits) { 1597 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); 1598 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); 1599 } 1600 } 1601 1602 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, 1603 "bf.result.cast"); 1604 *Result = EmitFromMemory(ResultVal, Dst.getType()); 1605 } 1606 } 1607 1608 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1609 LValue Dst) { 1610 // This access turns into a read/modify/write of the vector. Load the input 1611 // value now. 1612 llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(), 1613 Dst.isVolatileQualified()); 1614 Load->setAlignment(Dst.getAlignment().getQuantity()); 1615 llvm::Value *Vec = Load; 1616 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1617 1618 llvm::Value *SrcVal = Src.getScalarVal(); 1619 1620 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 1621 unsigned NumSrcElts = VTy->getNumElements(); 1622 unsigned NumDstElts = 1623 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1624 if (NumDstElts == NumSrcElts) { 1625 // Use shuffle vector is the src and destination are the same number of 1626 // elements and restore the vector mask since it is on the side it will be 1627 // stored. 1628 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1629 for (unsigned i = 0; i != NumSrcElts; ++i) 1630 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i); 1631 1632 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1633 Vec = Builder.CreateShuffleVector(SrcVal, 1634 llvm::UndefValue::get(Vec->getType()), 1635 MaskV); 1636 } else if (NumDstElts > NumSrcElts) { 1637 // Extended the source vector to the same length and then shuffle it 1638 // into the destination. 1639 // FIXME: since we're shuffling with undef, can we just use the indices 1640 // into that? This could be simpler. 1641 SmallVector<llvm::Constant*, 4> ExtMask; 1642 for (unsigned i = 0; i != NumSrcElts; ++i) 1643 ExtMask.push_back(Builder.getInt32(i)); 1644 ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty)); 1645 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 1646 llvm::Value *ExtSrcVal = 1647 Builder.CreateShuffleVector(SrcVal, 1648 llvm::UndefValue::get(SrcVal->getType()), 1649 ExtMaskV); 1650 // build identity 1651 SmallVector<llvm::Constant*, 4> Mask; 1652 for (unsigned i = 0; i != NumDstElts; ++i) 1653 Mask.push_back(Builder.getInt32(i)); 1654 1655 // When the vector size is odd and .odd or .hi is used, the last element 1656 // of the Elts constant array will be one past the size of the vector. 1657 // Ignore the last element here, if it is greater than the mask size. 1658 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) 1659 NumSrcElts--; 1660 1661 // modify when what gets shuffled in 1662 for (unsigned i = 0; i != NumSrcElts; ++i) 1663 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts); 1664 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1665 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 1666 } else { 1667 // We should never shorten the vector 1668 llvm_unreachable("unexpected shorten vector length"); 1669 } 1670 } else { 1671 // If the Src is a scalar (not a vector) it must be updating one element. 1672 unsigned InIdx = getAccessedFieldNo(0, Elts); 1673 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); 1674 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 1675 } 1676 1677 llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(), 1678 Dst.isVolatileQualified()); 1679 Store->setAlignment(Dst.getAlignment().getQuantity()); 1680 } 1681 1682 /// @brief Store of global named registers are always calls to intrinsics. 1683 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { 1684 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && 1685 "Bad type for register variable"); 1686 llvm::MDNode *RegName = cast<llvm::MDNode>( 1687 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata()); 1688 assert(RegName && "Register LValue is not metadata"); 1689 1690 // We accept integer and pointer types only 1691 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType()); 1692 llvm::Type *Ty = OrigTy; 1693 if (OrigTy->isPointerTy()) 1694 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); 1695 llvm::Type *Types[] = { Ty }; 1696 1697 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); 1698 llvm::Value *Value = Src.getScalarVal(); 1699 if (OrigTy->isPointerTy()) 1700 Value = Builder.CreatePtrToInt(Value, Ty); 1701 Builder.CreateCall2(F, llvm::MetadataAsValue::get(Ty->getContext(), RegName), 1702 Value); 1703 } 1704 1705 // setObjCGCLValueClass - sets class of the lvalue for the purpose of 1706 // generating write-barries API. It is currently a global, ivar, 1707 // or neither. 1708 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1709 LValue &LV, 1710 bool IsMemberAccess=false) { 1711 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) 1712 return; 1713 1714 if (isa<ObjCIvarRefExpr>(E)) { 1715 QualType ExpTy = E->getType(); 1716 if (IsMemberAccess && ExpTy->isPointerType()) { 1717 // If ivar is a structure pointer, assigning to field of 1718 // this struct follows gcc's behavior and makes it a non-ivar 1719 // writer-barrier conservatively. 1720 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1721 if (ExpTy->isRecordType()) { 1722 LV.setObjCIvar(false); 1723 return; 1724 } 1725 } 1726 LV.setObjCIvar(true); 1727 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E)); 1728 LV.setBaseIvarExp(Exp->getBase()); 1729 LV.setObjCArray(E->getType()->isArrayType()); 1730 return; 1731 } 1732 1733 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) { 1734 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1735 if (VD->hasGlobalStorage()) { 1736 LV.setGlobalObjCRef(true); 1737 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); 1738 } 1739 } 1740 LV.setObjCArray(E->getType()->isArrayType()); 1741 return; 1742 } 1743 1744 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) { 1745 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1746 return; 1747 } 1748 1749 if (const auto *Exp = dyn_cast<ParenExpr>(E)) { 1750 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1751 if (LV.isObjCIvar()) { 1752 // If cast is to a structure pointer, follow gcc's behavior and make it 1753 // a non-ivar write-barrier. 1754 QualType ExpTy = E->getType(); 1755 if (ExpTy->isPointerType()) 1756 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1757 if (ExpTy->isRecordType()) 1758 LV.setObjCIvar(false); 1759 } 1760 return; 1761 } 1762 1763 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) { 1764 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 1765 return; 1766 } 1767 1768 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1769 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1770 return; 1771 } 1772 1773 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) { 1774 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1775 return; 1776 } 1777 1778 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 1779 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1780 return; 1781 } 1782 1783 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1784 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1785 if (LV.isObjCIvar() && !LV.isObjCArray()) 1786 // Using array syntax to assigning to what an ivar points to is not 1787 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1788 LV.setObjCIvar(false); 1789 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1790 // Using array syntax to assigning to what global points to is not 1791 // same as assigning to the global itself. {id *G;} G[i] = 0; 1792 LV.setGlobalObjCRef(false); 1793 return; 1794 } 1795 1796 if (const auto *Exp = dyn_cast<MemberExpr>(E)) { 1797 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 1798 // We don't know if member is an 'ivar', but this flag is looked at 1799 // only in the context of LV.isObjCIvar(). 1800 LV.setObjCArray(E->getType()->isArrayType()); 1801 return; 1802 } 1803 } 1804 1805 static llvm::Value * 1806 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 1807 llvm::Value *V, llvm::Type *IRType, 1808 StringRef Name = StringRef()) { 1809 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1810 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 1811 } 1812 1813 static LValue EmitThreadPrivateVarDeclLValue( 1814 CodeGenFunction &CGF, const VarDecl *VD, QualType T, llvm::Value *V, 1815 llvm::Type *RealVarTy, CharUnits Alignment, SourceLocation Loc) { 1816 V = CGF.CGM.getOpenMPRuntime().getOMPAddrOfThreadPrivate(CGF, VD, V, Loc); 1817 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 1818 return CGF.MakeAddrLValue(V, T, Alignment); 1819 } 1820 1821 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1822 const Expr *E, const VarDecl *VD) { 1823 QualType T = E->getType(); 1824 1825 // If it's thread_local, emit a call to its wrapper function instead. 1826 if (VD->getTLSKind() == VarDecl::TLS_Dynamic && 1827 CGF.CGM.getCXXABI().usesThreadWrapperFunction()) 1828 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T); 1829 1830 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1831 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 1832 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 1833 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 1834 LValue LV; 1835 // Emit reference to the private copy of the variable if it is an OpenMP 1836 // threadprivate variable. 1837 if (CGF.getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>()) 1838 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, V, RealVarTy, Alignment, 1839 E->getExprLoc()); 1840 if (VD->getType()->isReferenceType()) { 1841 llvm::LoadInst *LI = CGF.Builder.CreateLoad(V); 1842 LI->setAlignment(Alignment.getQuantity()); 1843 V = LI; 1844 LV = CGF.MakeNaturalAlignAddrLValue(V, T); 1845 } else { 1846 LV = CGF.MakeAddrLValue(V, T, Alignment); 1847 } 1848 setObjCGCLValueClass(CGF.getContext(), E, LV); 1849 return LV; 1850 } 1851 1852 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1853 const Expr *E, const FunctionDecl *FD) { 1854 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); 1855 if (!FD->hasPrototype()) { 1856 if (const FunctionProtoType *Proto = 1857 FD->getType()->getAs<FunctionProtoType>()) { 1858 // Ugly case: for a K&R-style definition, the type of the definition 1859 // isn't the same as the type of a use. Correct for this with a 1860 // bitcast. 1861 QualType NoProtoType = 1862 CGF.getContext().getFunctionNoProtoType(Proto->getReturnType()); 1863 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 1864 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); 1865 } 1866 } 1867 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 1868 return CGF.MakeAddrLValue(V, E->getType(), Alignment); 1869 } 1870 1871 static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, 1872 llvm::Value *ThisValue) { 1873 QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); 1874 LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); 1875 return CGF.EmitLValueForField(LV, FD); 1876 } 1877 1878 /// Named Registers are named metadata pointing to the register name 1879 /// which will be read from/written to as an argument to the intrinsic 1880 /// @llvm.read/write_register. 1881 /// So far, only the name is being passed down, but other options such as 1882 /// register type, allocation type or even optimization options could be 1883 /// passed down via the metadata node. 1884 static LValue EmitGlobalNamedRegister(const VarDecl *VD, 1885 CodeGenModule &CGM, 1886 CharUnits Alignment) { 1887 SmallString<64> Name("llvm.named.register."); 1888 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); 1889 assert(Asm->getLabel().size() < 64-Name.size() && 1890 "Register name too big"); 1891 Name.append(Asm->getLabel()); 1892 llvm::NamedMDNode *M = 1893 CGM.getModule().getOrInsertNamedMetadata(Name); 1894 if (M->getNumOperands() == 0) { 1895 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(), 1896 Asm->getLabel()); 1897 llvm::Metadata *Ops[] = {Str}; 1898 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops)); 1899 } 1900 return LValue::MakeGlobalReg( 1901 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)), 1902 VD->getType(), Alignment); 1903 } 1904 1905 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 1906 const NamedDecl *ND = E->getDecl(); 1907 CharUnits Alignment = getContext().getDeclAlign(ND); 1908 QualType T = E->getType(); 1909 1910 if (const auto *VD = dyn_cast<VarDecl>(ND)) { 1911 // Global Named registers access via intrinsics only 1912 if (VD->getStorageClass() == SC_Register && 1913 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) 1914 return EmitGlobalNamedRegister(VD, CGM, Alignment); 1915 1916 // A DeclRefExpr for a reference initialized by a constant expression can 1917 // appear without being odr-used. Directly emit the constant initializer. 1918 const Expr *Init = VD->getAnyInitializer(VD); 1919 if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() && 1920 VD->isUsableInConstantExpressions(getContext()) && 1921 VD->checkInitIsICE()) { 1922 llvm::Constant *Val = 1923 CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this); 1924 assert(Val && "failed to emit reference constant expression"); 1925 // FIXME: Eventually we will want to emit vector element references. 1926 return MakeAddrLValue(Val, T, Alignment); 1927 } 1928 1929 // Check for captured variables. 1930 if (E->refersToEnclosingVariableOrCapture()) { 1931 if (auto *FD = LambdaCaptureFields.lookup(VD)) 1932 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); 1933 else if (CapturedStmtInfo) { 1934 if (auto *V = LocalDeclMap.lookup(VD)) 1935 return MakeAddrLValue(V, T, Alignment); 1936 else 1937 return EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), 1938 CapturedStmtInfo->getContextValue()); 1939 } 1940 assert(isa<BlockDecl>(CurCodeDecl)); 1941 return MakeAddrLValue(GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>()), 1942 T, Alignment); 1943 } 1944 } 1945 1946 // FIXME: We should be able to assert this for FunctionDecls as well! 1947 // FIXME: We should be able to assert this for all DeclRefExprs, not just 1948 // those with a valid source location. 1949 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || 1950 !E->getLocation().isValid()) && 1951 "Should not use decl without marking it used!"); 1952 1953 if (ND->hasAttr<WeakRefAttr>()) { 1954 const auto *VD = cast<ValueDecl>(ND); 1955 llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); 1956 return MakeAddrLValue(Aliasee, T, Alignment); 1957 } 1958 1959 if (const auto *VD = dyn_cast<VarDecl>(ND)) { 1960 // Check if this is a global variable. 1961 if (VD->hasLinkage() || VD->isStaticDataMember()) 1962 return EmitGlobalVarDeclLValue(*this, E, VD); 1963 1964 bool isBlockVariable = VD->hasAttr<BlocksAttr>(); 1965 1966 llvm::Value *V = LocalDeclMap.lookup(VD); 1967 if (!V && VD->isStaticLocal()) 1968 V = CGM.getOrCreateStaticVarDecl( 1969 *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false)); 1970 1971 // Check if variable is threadprivate. 1972 if (V && getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>()) 1973 return EmitThreadPrivateVarDeclLValue( 1974 *this, VD, T, V, getTypes().ConvertTypeForMem(VD->getType()), 1975 Alignment, E->getExprLoc()); 1976 1977 assert(V && "DeclRefExpr not entered in LocalDeclMap?"); 1978 1979 if (isBlockVariable) 1980 V = BuildBlockByrefAddress(V, VD); 1981 1982 LValue LV; 1983 if (VD->getType()->isReferenceType()) { 1984 llvm::LoadInst *LI = Builder.CreateLoad(V); 1985 LI->setAlignment(Alignment.getQuantity()); 1986 V = LI; 1987 LV = MakeNaturalAlignAddrLValue(V, T); 1988 } else { 1989 LV = MakeAddrLValue(V, T, Alignment); 1990 } 1991 1992 bool isLocalStorage = VD->hasLocalStorage(); 1993 1994 bool NonGCable = isLocalStorage && 1995 !VD->getType()->isReferenceType() && 1996 !isBlockVariable; 1997 if (NonGCable) { 1998 LV.getQuals().removeObjCGCAttr(); 1999 LV.setNonGC(true); 2000 } 2001 2002 bool isImpreciseLifetime = 2003 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); 2004 if (isImpreciseLifetime) 2005 LV.setARCPreciseLifetime(ARCImpreciseLifetime); 2006 setObjCGCLValueClass(getContext(), E, LV); 2007 return LV; 2008 } 2009 2010 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2011 return EmitFunctionDeclLValue(*this, E, FD); 2012 2013 llvm_unreachable("Unhandled DeclRefExpr"); 2014 } 2015 2016 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 2017 // __extension__ doesn't affect lvalue-ness. 2018 if (E->getOpcode() == UO_Extension) 2019 return EmitLValue(E->getSubExpr()); 2020 2021 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 2022 switch (E->getOpcode()) { 2023 default: llvm_unreachable("Unknown unary operator lvalue!"); 2024 case UO_Deref: { 2025 QualType T = E->getSubExpr()->getType()->getPointeeType(); 2026 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 2027 2028 LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T); 2029 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 2030 2031 // We should not generate __weak write barrier on indirect reference 2032 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 2033 // But, we continue to generate __strong write barrier on indirect write 2034 // into a pointer to object. 2035 if (getLangOpts().ObjC1 && 2036 getLangOpts().getGC() != LangOptions::NonGC && 2037 LV.isObjCWeak()) 2038 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 2039 return LV; 2040 } 2041 case UO_Real: 2042 case UO_Imag: { 2043 LValue LV = EmitLValue(E->getSubExpr()); 2044 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 2045 llvm::Value *Addr = LV.getAddress(); 2046 2047 // __real is valid on scalars. This is a faster way of testing that. 2048 // __imag can only produce an rvalue on scalars. 2049 if (E->getOpcode() == UO_Real && 2050 !cast<llvm::PointerType>(Addr->getType()) 2051 ->getElementType()->isStructTy()) { 2052 assert(E->getSubExpr()->getType()->isArithmeticType()); 2053 return LV; 2054 } 2055 2056 assert(E->getSubExpr()->getType()->isAnyComplexType()); 2057 2058 unsigned Idx = E->getOpcode() == UO_Imag; 2059 return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(), 2060 Idx, "idx"), 2061 ExprTy); 2062 } 2063 case UO_PreInc: 2064 case UO_PreDec: { 2065 LValue LV = EmitLValue(E->getSubExpr()); 2066 bool isInc = E->getOpcode() == UO_PreInc; 2067 2068 if (E->getType()->isAnyComplexType()) 2069 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 2070 else 2071 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 2072 return LV; 2073 } 2074 } 2075 } 2076 2077 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 2078 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 2079 E->getType()); 2080 } 2081 2082 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 2083 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 2084 E->getType()); 2085 } 2086 2087 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 2088 auto SL = E->getFunctionName(); 2089 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); 2090 StringRef FnName = CurFn->getName(); 2091 if (FnName.startswith("\01")) 2092 FnName = FnName.substr(1); 2093 StringRef NameItems[] = { 2094 PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName}; 2095 std::string GVName = llvm::join(NameItems, NameItems + 2, "."); 2096 if (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)) { 2097 auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str(), 1); 2098 return MakeAddrLValue(C, E->getType()); 2099 } 2100 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName); 2101 return MakeAddrLValue(C, E->getType()); 2102 } 2103 2104 /// Emit a type description suitable for use by a runtime sanitizer library. The 2105 /// format of a type descriptor is 2106 /// 2107 /// \code 2108 /// { i16 TypeKind, i16 TypeInfo } 2109 /// \endcode 2110 /// 2111 /// followed by an array of i8 containing the type name. TypeKind is 0 for an 2112 /// integer, 1 for a floating point value, and -1 for anything else. 2113 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { 2114 // Only emit each type's descriptor once. 2115 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T)) 2116 return C; 2117 2118 uint16_t TypeKind = -1; 2119 uint16_t TypeInfo = 0; 2120 2121 if (T->isIntegerType()) { 2122 TypeKind = 0; 2123 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | 2124 (T->isSignedIntegerType() ? 1 : 0); 2125 } else if (T->isFloatingType()) { 2126 TypeKind = 1; 2127 TypeInfo = getContext().getTypeSize(T); 2128 } 2129 2130 // Format the type name as if for a diagnostic, including quotes and 2131 // optionally an 'aka'. 2132 SmallString<32> Buffer; 2133 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, 2134 (intptr_t)T.getAsOpaquePtr(), 2135 StringRef(), StringRef(), None, Buffer, 2136 None); 2137 2138 llvm::Constant *Components[] = { 2139 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), 2140 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) 2141 }; 2142 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); 2143 2144 auto *GV = new llvm::GlobalVariable( 2145 CGM.getModule(), Descriptor->getType(), 2146 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); 2147 GV->setUnnamedAddr(true); 2148 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); 2149 2150 // Remember the descriptor for this type. 2151 CGM.setTypeDescriptorInMap(T, GV); 2152 2153 return GV; 2154 } 2155 2156 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { 2157 llvm::Type *TargetTy = IntPtrTy; 2158 2159 // Floating-point types which fit into intptr_t are bitcast to integers 2160 // and then passed directly (after zero-extension, if necessary). 2161 if (V->getType()->isFloatingPointTy()) { 2162 unsigned Bits = V->getType()->getPrimitiveSizeInBits(); 2163 if (Bits <= TargetTy->getIntegerBitWidth()) 2164 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), 2165 Bits)); 2166 } 2167 2168 // Integers which fit in intptr_t are zero-extended and passed directly. 2169 if (V->getType()->isIntegerTy() && 2170 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) 2171 return Builder.CreateZExt(V, TargetTy); 2172 2173 // Pointers are passed directly, everything else is passed by address. 2174 if (!V->getType()->isPointerTy()) { 2175 llvm::Value *Ptr = CreateTempAlloca(V->getType()); 2176 Builder.CreateStore(V, Ptr); 2177 V = Ptr; 2178 } 2179 return Builder.CreatePtrToInt(V, TargetTy); 2180 } 2181 2182 /// \brief Emit a representation of a SourceLocation for passing to a handler 2183 /// in a sanitizer runtime library. The format for this data is: 2184 /// \code 2185 /// struct SourceLocation { 2186 /// const char *Filename; 2187 /// int32_t Line, Column; 2188 /// }; 2189 /// \endcode 2190 /// For an invalid SourceLocation, the Filename pointer is null. 2191 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { 2192 llvm::Constant *Filename; 2193 int Line, Column; 2194 2195 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); 2196 if (PLoc.isValid()) { 2197 auto FilenameGV = CGM.GetAddrOfConstantCString(PLoc.getFilename(), ".src"); 2198 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(FilenameGV); 2199 Filename = FilenameGV; 2200 Line = PLoc.getLine(); 2201 Column = PLoc.getColumn(); 2202 } else { 2203 Filename = llvm::Constant::getNullValue(Int8PtrTy); 2204 Line = Column = 0; 2205 } 2206 2207 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line), 2208 Builder.getInt32(Column)}; 2209 2210 return llvm::ConstantStruct::getAnon(Data); 2211 } 2212 2213 namespace { 2214 /// \brief Specify under what conditions this check can be recovered 2215 enum class CheckRecoverableKind { 2216 /// Always terminate program execution if this check fails. 2217 Unrecoverable, 2218 /// Check supports recovering, runtime has both fatal (noreturn) and 2219 /// non-fatal handlers for this check. 2220 Recoverable, 2221 /// Runtime conditionally aborts, always need to support recovery. 2222 AlwaysRecoverable 2223 }; 2224 } 2225 2226 static CheckRecoverableKind getRecoverableKind(SanitizerKind Kind) { 2227 switch (Kind) { 2228 case SanitizerKind::Vptr: 2229 return CheckRecoverableKind::AlwaysRecoverable; 2230 case SanitizerKind::Return: 2231 case SanitizerKind::Unreachable: 2232 return CheckRecoverableKind::Unrecoverable; 2233 default: 2234 return CheckRecoverableKind::Recoverable; 2235 } 2236 } 2237 2238 static void emitCheckHandlerCall(CodeGenFunction &CGF, 2239 llvm::FunctionType *FnType, 2240 ArrayRef<llvm::Value *> FnArgs, 2241 StringRef CheckName, 2242 CheckRecoverableKind RecoverKind, bool IsFatal, 2243 llvm::BasicBlock *ContBB) { 2244 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); 2245 bool NeedsAbortSuffix = 2246 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; 2247 std::string FnName = ("__ubsan_handle_" + CheckName + 2248 (NeedsAbortSuffix ? "_abort" : "")).str(); 2249 bool MayReturn = 2250 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; 2251 2252 llvm::AttrBuilder B; 2253 if (!MayReturn) { 2254 B.addAttribute(llvm::Attribute::NoReturn) 2255 .addAttribute(llvm::Attribute::NoUnwind); 2256 } 2257 B.addAttribute(llvm::Attribute::UWTable); 2258 2259 llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction( 2260 FnType, FnName, 2261 llvm::AttributeSet::get(CGF.getLLVMContext(), 2262 llvm::AttributeSet::FunctionIndex, B)); 2263 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs); 2264 if (!MayReturn) { 2265 HandlerCall->setDoesNotReturn(); 2266 CGF.Builder.CreateUnreachable(); 2267 } else { 2268 CGF.Builder.CreateBr(ContBB); 2269 } 2270 } 2271 2272 void CodeGenFunction::EmitCheck( 2273 ArrayRef<std::pair<llvm::Value *, SanitizerKind>> Checked, 2274 StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs, 2275 ArrayRef<llvm::Value *> DynamicArgs) { 2276 assert(IsSanitizerScope); 2277 assert(Checked.size() > 0); 2278 2279 llvm::Value *FatalCond = nullptr; 2280 llvm::Value *RecoverableCond = nullptr; 2281 for (int i = 0, n = Checked.size(); i < n; ++i) { 2282 llvm::Value *Check = Checked[i].first; 2283 llvm::Value *&Cond = 2284 CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second) 2285 ? RecoverableCond 2286 : FatalCond; 2287 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check; 2288 } 2289 2290 llvm::Value *JointCond; 2291 if (FatalCond && RecoverableCond) 2292 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond); 2293 else 2294 JointCond = FatalCond ? FatalCond : RecoverableCond; 2295 assert(JointCond); 2296 2297 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second); 2298 assert(SanOpts.has(Checked[0].second)); 2299 #ifndef NDEBUG 2300 for (int i = 1, n = Checked.size(); i < n; ++i) { 2301 assert(RecoverKind == getRecoverableKind(Checked[i].second) && 2302 "All recoverable kinds in a single check must be same!"); 2303 assert(SanOpts.has(Checked[i].second)); 2304 } 2305 #endif 2306 2307 if (CGM.getCodeGenOpts().SanitizeUndefinedTrapOnError) { 2308 assert(RecoverKind != CheckRecoverableKind::AlwaysRecoverable && 2309 "Runtime call required for AlwaysRecoverable kind!"); 2310 // Assume that -fsanitize-undefined-trap-on-error overrides 2311 // -fsanitize-recover= options, as we can only print meaningful error 2312 // message and recover if we have a runtime support. 2313 return EmitTrapCheck(JointCond); 2314 } 2315 2316 llvm::BasicBlock *Cont = createBasicBlock("cont"); 2317 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName); 2318 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers); 2319 // Give hint that we very much don't expect to execute the handler 2320 // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp 2321 llvm::MDBuilder MDHelper(getLLVMContext()); 2322 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); 2323 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); 2324 EmitBlock(Handlers); 2325 2326 // Emit handler arguments and create handler function type. 2327 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 2328 auto *InfoPtr = 2329 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, 2330 llvm::GlobalVariable::PrivateLinkage, Info); 2331 InfoPtr->setUnnamedAddr(true); 2332 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); 2333 2334 SmallVector<llvm::Value *, 4> Args; 2335 SmallVector<llvm::Type *, 4> ArgTypes; 2336 Args.reserve(DynamicArgs.size() + 1); 2337 ArgTypes.reserve(DynamicArgs.size() + 1); 2338 2339 // Handler functions take an i8* pointing to the (handler-specific) static 2340 // information block, followed by a sequence of intptr_t arguments 2341 // representing operand values. 2342 Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy)); 2343 ArgTypes.push_back(Int8PtrTy); 2344 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { 2345 Args.push_back(EmitCheckValue(DynamicArgs[i])); 2346 ArgTypes.push_back(IntPtrTy); 2347 } 2348 2349 llvm::FunctionType *FnType = 2350 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); 2351 2352 if (!FatalCond || !RecoverableCond) { 2353 // Simple case: we need to generate a single handler call, either 2354 // fatal, or non-fatal. 2355 emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, 2356 (FatalCond != nullptr), Cont); 2357 } else { 2358 // Emit two handler calls: first one for set of unrecoverable checks, 2359 // another one for recoverable. 2360 llvm::BasicBlock *NonFatalHandlerBB = 2361 createBasicBlock("non_fatal." + CheckName); 2362 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName); 2363 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB); 2364 EmitBlock(FatalHandlerBB); 2365 emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, true, 2366 NonFatalHandlerBB); 2367 EmitBlock(NonFatalHandlerBB); 2368 emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, false, 2369 Cont); 2370 } 2371 2372 EmitBlock(Cont); 2373 } 2374 2375 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) { 2376 llvm::BasicBlock *Cont = createBasicBlock("cont"); 2377 2378 // If we're optimizing, collapse all calls to trap down to just one per 2379 // function to save on code size. 2380 if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) { 2381 TrapBB = createBasicBlock("trap"); 2382 Builder.CreateCondBr(Checked, Cont, TrapBB); 2383 EmitBlock(TrapBB); 2384 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap); 2385 llvm::CallInst *TrapCall = Builder.CreateCall(F); 2386 TrapCall->setDoesNotReturn(); 2387 TrapCall->setDoesNotThrow(); 2388 Builder.CreateUnreachable(); 2389 } else { 2390 Builder.CreateCondBr(Checked, Cont, TrapBB); 2391 } 2392 2393 EmitBlock(Cont); 2394 } 2395 2396 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 2397 /// array to pointer, return the array subexpression. 2398 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 2399 // If this isn't just an array->pointer decay, bail out. 2400 const auto *CE = dyn_cast<CastExpr>(E); 2401 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) 2402 return nullptr; 2403 2404 // If this is a decay from variable width array, bail out. 2405 const Expr *SubExpr = CE->getSubExpr(); 2406 if (SubExpr->getType()->isVariableArrayType()) 2407 return nullptr; 2408 2409 return SubExpr; 2410 } 2411 2412 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, 2413 bool Accessed) { 2414 // The index must always be an integer, which is not an aggregate. Emit it. 2415 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 2416 QualType IdxTy = E->getIdx()->getType(); 2417 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 2418 2419 if (SanOpts.has(SanitizerKind::ArrayBounds)) 2420 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); 2421 2422 // If the base is a vector type, then we are forming a vector element lvalue 2423 // with this subscript. 2424 if (E->getBase()->getType()->isVectorType() && 2425 !isa<ExtVectorElementExpr>(E->getBase())) { 2426 // Emit the vector as an lvalue to get its address. 2427 LValue LHS = EmitLValue(E->getBase()); 2428 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 2429 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 2430 E->getBase()->getType(), LHS.getAlignment()); 2431 } 2432 2433 // Extend or truncate the index type to 32 or 64-bits. 2434 if (Idx->getType() != IntPtrTy) 2435 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 2436 2437 // We know that the pointer points to a type of the correct size, unless the 2438 // size is a VLA or Objective-C interface. 2439 llvm::Value *Address = nullptr; 2440 CharUnits ArrayAlignment; 2441 if (isa<ExtVectorElementExpr>(E->getBase())) { 2442 LValue LV = EmitLValue(E->getBase()); 2443 Address = EmitExtVectorElementLValue(LV); 2444 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); 2445 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 2446 QualType EQT = ExprVT->getElementType(); 2447 return MakeAddrLValue(Address, EQT, 2448 getContext().getTypeAlignInChars(EQT)); 2449 } 2450 else if (const VariableArrayType *vla = 2451 getContext().getAsVariableArrayType(E->getType())) { 2452 // The base must be a pointer, which is not an aggregate. Emit 2453 // it. It needs to be emitted first in case it's what captures 2454 // the VLA bounds. 2455 Address = EmitScalarExpr(E->getBase()); 2456 2457 // The element count here is the total number of non-VLA elements. 2458 llvm::Value *numElements = getVLASize(vla).first; 2459 2460 // Effectively, the multiply by the VLA size is part of the GEP. 2461 // GEP indexes are signed, and scaling an index isn't permitted to 2462 // signed-overflow, so we use the same semantics for our explicit 2463 // multiply. We suppress this if overflow is not undefined behavior. 2464 if (getLangOpts().isSignedOverflowDefined()) { 2465 Idx = Builder.CreateMul(Idx, numElements); 2466 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 2467 } else { 2468 Idx = Builder.CreateNSWMul(Idx, numElements); 2469 Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); 2470 } 2471 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 2472 // Indexing over an interface, as in "NSString *P; P[4];" 2473 llvm::Value *InterfaceSize = 2474 llvm::ConstantInt::get(Idx->getType(), 2475 getContext().getTypeSizeInChars(OIT).getQuantity()); 2476 2477 Idx = Builder.CreateMul(Idx, InterfaceSize); 2478 2479 // The base must be a pointer, which is not an aggregate. Emit it. 2480 llvm::Value *Base = EmitScalarExpr(E->getBase()); 2481 Address = EmitCastToVoidPtr(Base); 2482 Address = Builder.CreateGEP(Address, Idx, "arrayidx"); 2483 Address = Builder.CreateBitCast(Address, Base->getType()); 2484 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 2485 // If this is A[i] where A is an array, the frontend will have decayed the 2486 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 2487 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 2488 // "gep x, i" here. Emit one "gep A, 0, i". 2489 assert(Array->getType()->isArrayType() && 2490 "Array to pointer decay must have array source type!"); 2491 LValue ArrayLV; 2492 // For simple multidimensional array indexing, set the 'accessed' flag for 2493 // better bounds-checking of the base expression. 2494 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) 2495 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); 2496 else 2497 ArrayLV = EmitLValue(Array); 2498 llvm::Value *ArrayPtr = ArrayLV.getAddress(); 2499 llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); 2500 llvm::Value *Args[] = { Zero, Idx }; 2501 2502 // Propagate the alignment from the array itself to the result. 2503 ArrayAlignment = ArrayLV.getAlignment(); 2504 2505 if (getLangOpts().isSignedOverflowDefined()) 2506 Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); 2507 else 2508 Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); 2509 } else { 2510 // The base must be a pointer, which is not an aggregate. Emit it. 2511 llvm::Value *Base = EmitScalarExpr(E->getBase()); 2512 if (getLangOpts().isSignedOverflowDefined()) 2513 Address = Builder.CreateGEP(Base, Idx, "arrayidx"); 2514 else 2515 Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); 2516 } 2517 2518 QualType T = E->getBase()->getType()->getPointeeType(); 2519 assert(!T.isNull() && 2520 "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); 2521 2522 2523 // Limit the alignment to that of the result type. 2524 LValue LV; 2525 if (!ArrayAlignment.isZero()) { 2526 CharUnits Align = getContext().getTypeAlignInChars(T); 2527 ArrayAlignment = std::min(Align, ArrayAlignment); 2528 LV = MakeAddrLValue(Address, T, ArrayAlignment); 2529 } else { 2530 LV = MakeNaturalAlignAddrLValue(Address, T); 2531 } 2532 2533 LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace()); 2534 2535 if (getLangOpts().ObjC1 && 2536 getLangOpts().getGC() != LangOptions::NonGC) { 2537 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 2538 setObjCGCLValueClass(getContext(), E, LV); 2539 } 2540 return LV; 2541 } 2542 2543 static 2544 llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder, 2545 SmallVectorImpl<unsigned> &Elts) { 2546 SmallVector<llvm::Constant*, 4> CElts; 2547 for (unsigned i = 0, e = Elts.size(); i != e; ++i) 2548 CElts.push_back(Builder.getInt32(Elts[i])); 2549 2550 return llvm::ConstantVector::get(CElts); 2551 } 2552 2553 LValue CodeGenFunction:: 2554 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 2555 // Emit the base vector as an l-value. 2556 LValue Base; 2557 2558 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 2559 if (E->isArrow()) { 2560 // If it is a pointer to a vector, emit the address and form an lvalue with 2561 // it. 2562 llvm::Value *Ptr = EmitScalarExpr(E->getBase()); 2563 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 2564 Base = MakeAddrLValue(Ptr, PT->getPointeeType()); 2565 Base.getQuals().removeObjCGCAttr(); 2566 } else if (E->getBase()->isGLValue()) { 2567 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 2568 // emit the base as an lvalue. 2569 assert(E->getBase()->getType()->isVectorType()); 2570 Base = EmitLValue(E->getBase()); 2571 } else { 2572 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 2573 assert(E->getBase()->getType()->isVectorType() && 2574 "Result must be a vector"); 2575 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 2576 2577 // Store the vector to memory (because LValue wants an address). 2578 llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); 2579 Builder.CreateStore(Vec, VecMem); 2580 Base = MakeAddrLValue(VecMem, E->getBase()->getType()); 2581 } 2582 2583 QualType type = 2584 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 2585 2586 // Encode the element access list into a vector of unsigned indices. 2587 SmallVector<unsigned, 4> Indices; 2588 E->getEncodedElementAccess(Indices); 2589 2590 if (Base.isSimple()) { 2591 llvm::Constant *CV = GenerateConstantVector(Builder, Indices); 2592 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type, 2593 Base.getAlignment()); 2594 } 2595 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 2596 2597 llvm::Constant *BaseElts = Base.getExtVectorElts(); 2598 SmallVector<llvm::Constant *, 4> CElts; 2599 2600 for (unsigned i = 0, e = Indices.size(); i != e; ++i) 2601 CElts.push_back(BaseElts->getAggregateElement(Indices[i])); 2602 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 2603 return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type, 2604 Base.getAlignment()); 2605 } 2606 2607 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 2608 Expr *BaseExpr = E->getBase(); 2609 2610 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 2611 LValue BaseLV; 2612 if (E->isArrow()) { 2613 llvm::Value *Ptr = EmitScalarExpr(BaseExpr); 2614 QualType PtrTy = BaseExpr->getType()->getPointeeType(); 2615 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy); 2616 BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy); 2617 } else 2618 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); 2619 2620 NamedDecl *ND = E->getMemberDecl(); 2621 if (auto *Field = dyn_cast<FieldDecl>(ND)) { 2622 LValue LV = EmitLValueForField(BaseLV, Field); 2623 setObjCGCLValueClass(getContext(), E, LV); 2624 return LV; 2625 } 2626 2627 if (auto *VD = dyn_cast<VarDecl>(ND)) 2628 return EmitGlobalVarDeclLValue(*this, E, VD); 2629 2630 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2631 return EmitFunctionDeclLValue(*this, E, FD); 2632 2633 llvm_unreachable("Unhandled member declaration!"); 2634 } 2635 2636 /// Given that we are currently emitting a lambda, emit an l-value for 2637 /// one of its members. 2638 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { 2639 assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda()); 2640 assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent()); 2641 QualType LambdaTagType = 2642 getContext().getTagDeclType(Field->getParent()); 2643 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType); 2644 return EmitLValueForField(LambdaLV, Field); 2645 } 2646 2647 LValue CodeGenFunction::EmitLValueForField(LValue base, 2648 const FieldDecl *field) { 2649 if (field->isBitField()) { 2650 const CGRecordLayout &RL = 2651 CGM.getTypes().getCGRecordLayout(field->getParent()); 2652 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); 2653 llvm::Value *Addr = base.getAddress(); 2654 unsigned Idx = RL.getLLVMFieldNo(field); 2655 if (Idx != 0) 2656 // For structs, we GEP to the field that the record layout suggests. 2657 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); 2658 // Get the access type. 2659 llvm::Type *PtrTy = llvm::Type::getIntNPtrTy( 2660 getLLVMContext(), Info.StorageSize, 2661 CGM.getContext().getTargetAddressSpace(base.getType())); 2662 if (Addr->getType() != PtrTy) 2663 Addr = Builder.CreateBitCast(Addr, PtrTy); 2664 2665 QualType fieldType = 2666 field->getType().withCVRQualifiers(base.getVRQualifiers()); 2667 return LValue::MakeBitfield(Addr, Info, fieldType, base.getAlignment()); 2668 } 2669 2670 const RecordDecl *rec = field->getParent(); 2671 QualType type = field->getType(); 2672 CharUnits alignment = getContext().getDeclAlign(field); 2673 2674 // FIXME: It should be impossible to have an LValue without alignment for a 2675 // complete type. 2676 if (!base.getAlignment().isZero()) 2677 alignment = std::min(alignment, base.getAlignment()); 2678 2679 bool mayAlias = rec->hasAttr<MayAliasAttr>(); 2680 2681 llvm::Value *addr = base.getAddress(); 2682 unsigned cvr = base.getVRQualifiers(); 2683 bool TBAAPath = CGM.getCodeGenOpts().StructPathTBAA; 2684 if (rec->isUnion()) { 2685 // For unions, there is no pointer adjustment. 2686 assert(!type->isReferenceType() && "union has reference member"); 2687 // TODO: handle path-aware TBAA for union. 2688 TBAAPath = false; 2689 } else { 2690 // For structs, we GEP to the field that the record layout suggests. 2691 unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 2692 addr = Builder.CreateStructGEP(addr, idx, field->getName()); 2693 2694 // If this is a reference field, load the reference right now. 2695 if (const ReferenceType *refType = type->getAs<ReferenceType>()) { 2696 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); 2697 if (cvr & Qualifiers::Volatile) load->setVolatile(true); 2698 load->setAlignment(alignment.getQuantity()); 2699 2700 // Loading the reference will disable path-aware TBAA. 2701 TBAAPath = false; 2702 if (CGM.shouldUseTBAA()) { 2703 llvm::MDNode *tbaa; 2704 if (mayAlias) 2705 tbaa = CGM.getTBAAInfo(getContext().CharTy); 2706 else 2707 tbaa = CGM.getTBAAInfo(type); 2708 if (tbaa) 2709 CGM.DecorateInstruction(load, tbaa); 2710 } 2711 2712 addr = load; 2713 mayAlias = false; 2714 type = refType->getPointeeType(); 2715 if (type->isIncompleteType()) 2716 alignment = CharUnits(); 2717 else 2718 alignment = getContext().getTypeAlignInChars(type); 2719 cvr = 0; // qualifiers don't recursively apply to referencee 2720 } 2721 } 2722 2723 // Make sure that the address is pointing to the right type. This is critical 2724 // for both unions and structs. A union needs a bitcast, a struct element 2725 // will need a bitcast if the LLVM type laid out doesn't match the desired 2726 // type. 2727 addr = EmitBitCastOfLValueToProperType(*this, addr, 2728 CGM.getTypes().ConvertTypeForMem(type), 2729 field->getName()); 2730 2731 if (field->hasAttr<AnnotateAttr>()) 2732 addr = EmitFieldAnnotations(field, addr); 2733 2734 LValue LV = MakeAddrLValue(addr, type, alignment); 2735 LV.getQuals().addCVRQualifiers(cvr); 2736 if (TBAAPath) { 2737 const ASTRecordLayout &Layout = 2738 getContext().getASTRecordLayout(field->getParent()); 2739 // Set the base type to be the base type of the base LValue and 2740 // update offset to be relative to the base type. 2741 LV.setTBAABaseType(mayAlias ? getContext().CharTy : base.getTBAABaseType()); 2742 LV.setTBAAOffset(mayAlias ? 0 : base.getTBAAOffset() + 2743 Layout.getFieldOffset(field->getFieldIndex()) / 2744 getContext().getCharWidth()); 2745 } 2746 2747 // __weak attribute on a field is ignored. 2748 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 2749 LV.getQuals().removeObjCGCAttr(); 2750 2751 // Fields of may_alias structs act like 'char' for TBAA purposes. 2752 // FIXME: this should get propagated down through anonymous structs 2753 // and unions. 2754 if (mayAlias && LV.getTBAAInfo()) 2755 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); 2756 2757 return LV; 2758 } 2759 2760 LValue 2761 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, 2762 const FieldDecl *Field) { 2763 QualType FieldType = Field->getType(); 2764 2765 if (!FieldType->isReferenceType()) 2766 return EmitLValueForField(Base, Field); 2767 2768 const CGRecordLayout &RL = 2769 CGM.getTypes().getCGRecordLayout(Field->getParent()); 2770 unsigned idx = RL.getLLVMFieldNo(Field); 2771 llvm::Value *V = Builder.CreateStructGEP(Base.getAddress(), idx); 2772 assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); 2773 2774 // Make sure that the address is pointing to the right type. This is critical 2775 // for both unions and structs. A union needs a bitcast, a struct element 2776 // will need a bitcast if the LLVM type laid out doesn't match the desired 2777 // type. 2778 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 2779 V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName()); 2780 2781 CharUnits Alignment = getContext().getDeclAlign(Field); 2782 2783 // FIXME: It should be impossible to have an LValue without alignment for a 2784 // complete type. 2785 if (!Base.getAlignment().isZero()) 2786 Alignment = std::min(Alignment, Base.getAlignment()); 2787 2788 return MakeAddrLValue(V, FieldType, Alignment); 2789 } 2790 2791 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 2792 if (E->isFileScope()) { 2793 llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 2794 return MakeAddrLValue(GlobalPtr, E->getType()); 2795 } 2796 if (E->getType()->isVariablyModifiedType()) 2797 // make sure to emit the VLA size. 2798 EmitVariablyModifiedType(E->getType()); 2799 2800 llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 2801 const Expr *InitExpr = E->getInitializer(); 2802 LValue Result = MakeAddrLValue(DeclPtr, E->getType()); 2803 2804 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 2805 /*Init*/ true); 2806 2807 return Result; 2808 } 2809 2810 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { 2811 if (!E->isGLValue()) 2812 // Initializing an aggregate temporary in C++11: T{...}. 2813 return EmitAggExprToLValue(E); 2814 2815 // An lvalue initializer list must be initializing a reference. 2816 assert(E->getNumInits() == 1 && "reference init with multiple values"); 2817 return EmitLValue(E->getInit(0)); 2818 } 2819 2820 /// Emit the operand of a glvalue conditional operator. This is either a glvalue 2821 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no 2822 /// LValue is returned and the current block has been terminated. 2823 static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, 2824 const Expr *Operand) { 2825 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) { 2826 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false); 2827 return None; 2828 } 2829 2830 return CGF.EmitLValue(Operand); 2831 } 2832 2833 LValue CodeGenFunction:: 2834 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 2835 if (!expr->isGLValue()) { 2836 // ?: here should be an aggregate. 2837 assert(hasAggregateEvaluationKind(expr->getType()) && 2838 "Unexpected conditional operator!"); 2839 return EmitAggExprToLValue(expr); 2840 } 2841 2842 OpaqueValueMapping binding(*this, expr); 2843 RegionCounter Cnt = getPGORegionCounter(expr); 2844 2845 const Expr *condExpr = expr->getCond(); 2846 bool CondExprBool; 2847 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 2848 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 2849 if (!CondExprBool) std::swap(live, dead); 2850 2851 if (!ContainsLabel(dead)) { 2852 // If the true case is live, we need to track its region. 2853 if (CondExprBool) 2854 Cnt.beginRegion(Builder); 2855 return EmitLValue(live); 2856 } 2857 } 2858 2859 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 2860 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 2861 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 2862 2863 ConditionalEvaluation eval(*this); 2864 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, Cnt.getCount()); 2865 2866 // Any temporaries created here are conditional. 2867 EmitBlock(lhsBlock); 2868 Cnt.beginRegion(Builder); 2869 eval.begin(*this); 2870 Optional<LValue> lhs = 2871 EmitLValueOrThrowExpression(*this, expr->getTrueExpr()); 2872 eval.end(*this); 2873 2874 if (lhs && !lhs->isSimple()) 2875 return EmitUnsupportedLValue(expr, "conditional operator"); 2876 2877 lhsBlock = Builder.GetInsertBlock(); 2878 if (lhs) 2879 Builder.CreateBr(contBlock); 2880 2881 // Any temporaries created here are conditional. 2882 EmitBlock(rhsBlock); 2883 eval.begin(*this); 2884 Optional<LValue> rhs = 2885 EmitLValueOrThrowExpression(*this, expr->getFalseExpr()); 2886 eval.end(*this); 2887 if (rhs && !rhs->isSimple()) 2888 return EmitUnsupportedLValue(expr, "conditional operator"); 2889 rhsBlock = Builder.GetInsertBlock(); 2890 2891 EmitBlock(contBlock); 2892 2893 if (lhs && rhs) { 2894 llvm::PHINode *phi = Builder.CreatePHI(lhs->getAddress()->getType(), 2895 2, "cond-lvalue"); 2896 phi->addIncoming(lhs->getAddress(), lhsBlock); 2897 phi->addIncoming(rhs->getAddress(), rhsBlock); 2898 return MakeAddrLValue(phi, expr->getType()); 2899 } else { 2900 assert((lhs || rhs) && 2901 "both operands of glvalue conditional are throw-expressions?"); 2902 return lhs ? *lhs : *rhs; 2903 } 2904 } 2905 2906 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference 2907 /// type. If the cast is to a reference, we can have the usual lvalue result, 2908 /// otherwise if a cast is needed by the code generator in an lvalue context, 2909 /// then it must mean that we need the address of an aggregate in order to 2910 /// access one of its members. This can happen for all the reasons that casts 2911 /// are permitted with aggregate result, including noop aggregate casts, and 2912 /// cast from scalar to union. 2913 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 2914 switch (E->getCastKind()) { 2915 case CK_ToVoid: 2916 case CK_BitCast: 2917 case CK_ArrayToPointerDecay: 2918 case CK_FunctionToPointerDecay: 2919 case CK_NullToMemberPointer: 2920 case CK_NullToPointer: 2921 case CK_IntegralToPointer: 2922 case CK_PointerToIntegral: 2923 case CK_PointerToBoolean: 2924 case CK_VectorSplat: 2925 case CK_IntegralCast: 2926 case CK_IntegralToBoolean: 2927 case CK_IntegralToFloating: 2928 case CK_FloatingToIntegral: 2929 case CK_FloatingToBoolean: 2930 case CK_FloatingCast: 2931 case CK_FloatingRealToComplex: 2932 case CK_FloatingComplexToReal: 2933 case CK_FloatingComplexToBoolean: 2934 case CK_FloatingComplexCast: 2935 case CK_FloatingComplexToIntegralComplex: 2936 case CK_IntegralRealToComplex: 2937 case CK_IntegralComplexToReal: 2938 case CK_IntegralComplexToBoolean: 2939 case CK_IntegralComplexCast: 2940 case CK_IntegralComplexToFloatingComplex: 2941 case CK_DerivedToBaseMemberPointer: 2942 case CK_BaseToDerivedMemberPointer: 2943 case CK_MemberPointerToBoolean: 2944 case CK_ReinterpretMemberPointer: 2945 case CK_AnyPointerToBlockPointerCast: 2946 case CK_ARCProduceObject: 2947 case CK_ARCConsumeObject: 2948 case CK_ARCReclaimReturnedObject: 2949 case CK_ARCExtendBlockObject: 2950 case CK_CopyAndAutoreleaseBlockObject: 2951 case CK_AddressSpaceConversion: 2952 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 2953 2954 case CK_Dependent: 2955 llvm_unreachable("dependent cast kind in IR gen!"); 2956 2957 case CK_BuiltinFnToFnPtr: 2958 llvm_unreachable("builtin functions are handled elsewhere"); 2959 2960 // These are never l-values; just use the aggregate emission code. 2961 case CK_NonAtomicToAtomic: 2962 case CK_AtomicToNonAtomic: 2963 return EmitAggExprToLValue(E); 2964 2965 case CK_Dynamic: { 2966 LValue LV = EmitLValue(E->getSubExpr()); 2967 llvm::Value *V = LV.getAddress(); 2968 const auto *DCE = cast<CXXDynamicCastExpr>(E); 2969 return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 2970 } 2971 2972 case CK_ConstructorConversion: 2973 case CK_UserDefinedConversion: 2974 case CK_CPointerToObjCPointerCast: 2975 case CK_BlockPointerToObjCPointerCast: 2976 case CK_NoOp: 2977 case CK_LValueToRValue: 2978 return EmitLValue(E->getSubExpr()); 2979 2980 case CK_UncheckedDerivedToBase: 2981 case CK_DerivedToBase: { 2982 const RecordType *DerivedClassTy = 2983 E->getSubExpr()->getType()->getAs<RecordType>(); 2984 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 2985 2986 LValue LV = EmitLValue(E->getSubExpr()); 2987 llvm::Value *This = LV.getAddress(); 2988 2989 // Perform the derived-to-base conversion 2990 llvm::Value *Base = GetAddressOfBaseClass( 2991 This, DerivedClassDecl, E->path_begin(), E->path_end(), 2992 /*NullCheckValue=*/false, E->getExprLoc()); 2993 2994 return MakeAddrLValue(Base, E->getType()); 2995 } 2996 case CK_ToUnion: 2997 return EmitAggExprToLValue(E); 2998 case CK_BaseToDerived: { 2999 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 3000 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 3001 3002 LValue LV = EmitLValue(E->getSubExpr()); 3003 3004 // Perform the base-to-derived conversion 3005 llvm::Value *Derived = 3006 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 3007 E->path_begin(), E->path_end(), 3008 /*NullCheckValue=*/false); 3009 3010 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is 3011 // performed and the object is not of the derived type. 3012 if (sanitizePerformTypeCheck()) 3013 EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), 3014 Derived, E->getType()); 3015 3016 return MakeAddrLValue(Derived, E->getType()); 3017 } 3018 case CK_LValueBitCast: { 3019 // This must be a reinterpret_cast (or c-style equivalent). 3020 const auto *CE = cast<ExplicitCastExpr>(E); 3021 3022 LValue LV = EmitLValue(E->getSubExpr()); 3023 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 3024 ConvertType(CE->getTypeAsWritten())); 3025 return MakeAddrLValue(V, E->getType()); 3026 } 3027 case CK_ObjCObjectLValueCast: { 3028 LValue LV = EmitLValue(E->getSubExpr()); 3029 QualType ToType = getContext().getLValueReferenceType(E->getType()); 3030 llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), 3031 ConvertType(ToType)); 3032 return MakeAddrLValue(V, E->getType()); 3033 } 3034 case CK_ZeroToOCLEvent: 3035 llvm_unreachable("NULL to OpenCL event lvalue cast is not valid"); 3036 } 3037 3038 llvm_unreachable("Unhandled lvalue cast kind?"); 3039 } 3040 3041 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 3042 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 3043 return getOpaqueLValueMapping(e); 3044 } 3045 3046 RValue CodeGenFunction::EmitRValueForField(LValue LV, 3047 const FieldDecl *FD, 3048 SourceLocation Loc) { 3049 QualType FT = FD->getType(); 3050 LValue FieldLV = EmitLValueForField(LV, FD); 3051 switch (getEvaluationKind(FT)) { 3052 case TEK_Complex: 3053 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc)); 3054 case TEK_Aggregate: 3055 return FieldLV.asAggregateRValue(); 3056 case TEK_Scalar: 3057 return EmitLoadOfLValue(FieldLV, Loc); 3058 } 3059 llvm_unreachable("bad evaluation kind"); 3060 } 3061 3062 //===--------------------------------------------------------------------===// 3063 // Expression Emission 3064 //===--------------------------------------------------------------------===// 3065 3066 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 3067 ReturnValueSlot ReturnValue) { 3068 // Builtins never have block type. 3069 if (E->getCallee()->getType()->isBlockPointerType()) 3070 return EmitBlockCallExpr(E, ReturnValue); 3071 3072 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E)) 3073 return EmitCXXMemberCallExpr(CE, ReturnValue); 3074 3075 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E)) 3076 return EmitCUDAKernelCallExpr(CE, ReturnValue); 3077 3078 const Decl *TargetDecl = E->getCalleeDecl(); 3079 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 3080 if (unsigned builtinID = FD->getBuiltinID()) 3081 return EmitBuiltinExpr(FD, builtinID, E, ReturnValue); 3082 } 3083 3084 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E)) 3085 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 3086 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 3087 3088 if (const auto *PseudoDtor = 3089 dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 3090 QualType DestroyedType = PseudoDtor->getDestroyedType(); 3091 if (getLangOpts().ObjCAutoRefCount && 3092 DestroyedType->isObjCLifetimeType() && 3093 (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong || 3094 DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) { 3095 // Automatic Reference Counting: 3096 // If the pseudo-expression names a retainable object with weak or 3097 // strong lifetime, the object shall be released. 3098 Expr *BaseExpr = PseudoDtor->getBase(); 3099 llvm::Value *BaseValue = nullptr; 3100 Qualifiers BaseQuals; 3101 3102 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 3103 if (PseudoDtor->isArrow()) { 3104 BaseValue = EmitScalarExpr(BaseExpr); 3105 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 3106 BaseQuals = PTy->getPointeeType().getQualifiers(); 3107 } else { 3108 LValue BaseLV = EmitLValue(BaseExpr); 3109 BaseValue = BaseLV.getAddress(); 3110 QualType BaseTy = BaseExpr->getType(); 3111 BaseQuals = BaseTy.getQualifiers(); 3112 } 3113 3114 switch (PseudoDtor->getDestroyedType().getObjCLifetime()) { 3115 case Qualifiers::OCL_None: 3116 case Qualifiers::OCL_ExplicitNone: 3117 case Qualifiers::OCL_Autoreleasing: 3118 break; 3119 3120 case Qualifiers::OCL_Strong: 3121 EmitARCRelease(Builder.CreateLoad(BaseValue, 3122 PseudoDtor->getDestroyedType().isVolatileQualified()), 3123 ARCPreciseLifetime); 3124 break; 3125 3126 case Qualifiers::OCL_Weak: 3127 EmitARCDestroyWeak(BaseValue); 3128 break; 3129 } 3130 } else { 3131 // C++ [expr.pseudo]p1: 3132 // The result shall only be used as the operand for the function call 3133 // operator (), and the result of such a call has type void. The only 3134 // effect is the evaluation of the postfix-expression before the dot or 3135 // arrow. 3136 EmitScalarExpr(E->getCallee()); 3137 } 3138 3139 return RValue::get(nullptr); 3140 } 3141 3142 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 3143 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue, 3144 TargetDecl); 3145 } 3146 3147 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 3148 // Comma expressions just emit their LHS then their RHS as an l-value. 3149 if (E->getOpcode() == BO_Comma) { 3150 EmitIgnoredExpr(E->getLHS()); 3151 EnsureInsertPoint(); 3152 return EmitLValue(E->getRHS()); 3153 } 3154 3155 if (E->getOpcode() == BO_PtrMemD || 3156 E->getOpcode() == BO_PtrMemI) 3157 return EmitPointerToDataMemberBinaryExpr(E); 3158 3159 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 3160 3161 // Note that in all of these cases, __block variables need the RHS 3162 // evaluated first just in case the variable gets moved by the RHS. 3163 3164 switch (getEvaluationKind(E->getType())) { 3165 case TEK_Scalar: { 3166 switch (E->getLHS()->getType().getObjCLifetime()) { 3167 case Qualifiers::OCL_Strong: 3168 return EmitARCStoreStrong(E, /*ignored*/ false).first; 3169 3170 case Qualifiers::OCL_Autoreleasing: 3171 return EmitARCStoreAutoreleasing(E).first; 3172 3173 // No reason to do any of these differently. 3174 case Qualifiers::OCL_None: 3175 case Qualifiers::OCL_ExplicitNone: 3176 case Qualifiers::OCL_Weak: 3177 break; 3178 } 3179 3180 RValue RV = EmitAnyExpr(E->getRHS()); 3181 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); 3182 EmitStoreThroughLValue(RV, LV); 3183 return LV; 3184 } 3185 3186 case TEK_Complex: 3187 return EmitComplexAssignmentLValue(E); 3188 3189 case TEK_Aggregate: 3190 return EmitAggExprToLValue(E); 3191 } 3192 llvm_unreachable("bad evaluation kind"); 3193 } 3194 3195 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 3196 RValue RV = EmitCallExpr(E); 3197 3198 if (!RV.isScalar()) 3199 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 3200 3201 assert(E->getCallReturnType()->isReferenceType() && 3202 "Can't have a scalar return unless the return type is a " 3203 "reference type!"); 3204 3205 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 3206 } 3207 3208 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 3209 // FIXME: This shouldn't require another copy. 3210 return EmitAggExprToLValue(E); 3211 } 3212 3213 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 3214 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 3215 && "binding l-value to type which needs a temporary"); 3216 AggValueSlot Slot = CreateAggTemp(E->getType()); 3217 EmitCXXConstructExpr(E, Slot); 3218 return MakeAddrLValue(Slot.getAddr(), E->getType()); 3219 } 3220 3221 LValue 3222 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 3223 return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 3224 } 3225 3226 llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { 3227 return Builder.CreateBitCast(CGM.GetAddrOfUuidDescriptor(E), 3228 ConvertType(E->getType())->getPointerTo()); 3229 } 3230 3231 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { 3232 return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType()); 3233 } 3234 3235 LValue 3236 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 3237 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 3238 Slot.setExternallyDestructed(); 3239 EmitAggExpr(E->getSubExpr(), Slot); 3240 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr()); 3241 return MakeAddrLValue(Slot.getAddr(), E->getType()); 3242 } 3243 3244 LValue 3245 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) { 3246 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 3247 EmitLambdaExpr(E, Slot); 3248 return MakeAddrLValue(Slot.getAddr(), E->getType()); 3249 } 3250 3251 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 3252 RValue RV = EmitObjCMessageExpr(E); 3253 3254 if (!RV.isScalar()) 3255 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 3256 3257 assert(E->getMethodDecl()->getReturnType()->isReferenceType() && 3258 "Can't have a scalar return unless the return type is a " 3259 "reference type!"); 3260 3261 return MakeAddrLValue(RV.getScalarVal(), E->getType()); 3262 } 3263 3264 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 3265 llvm::Value *V = 3266 CGM.getObjCRuntime().GetSelector(*this, E->getSelector(), true); 3267 return MakeAddrLValue(V, E->getType()); 3268 } 3269 3270 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 3271 const ObjCIvarDecl *Ivar) { 3272 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 3273 } 3274 3275 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 3276 llvm::Value *BaseValue, 3277 const ObjCIvarDecl *Ivar, 3278 unsigned CVRQualifiers) { 3279 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 3280 Ivar, CVRQualifiers); 3281 } 3282 3283 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 3284 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 3285 llvm::Value *BaseValue = nullptr; 3286 const Expr *BaseExpr = E->getBase(); 3287 Qualifiers BaseQuals; 3288 QualType ObjectTy; 3289 if (E->isArrow()) { 3290 BaseValue = EmitScalarExpr(BaseExpr); 3291 ObjectTy = BaseExpr->getType()->getPointeeType(); 3292 BaseQuals = ObjectTy.getQualifiers(); 3293 } else { 3294 LValue BaseLV = EmitLValue(BaseExpr); 3295 // FIXME: this isn't right for bitfields. 3296 BaseValue = BaseLV.getAddress(); 3297 ObjectTy = BaseExpr->getType(); 3298 BaseQuals = ObjectTy.getQualifiers(); 3299 } 3300 3301 LValue LV = 3302 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 3303 BaseQuals.getCVRQualifiers()); 3304 setObjCGCLValueClass(getContext(), E, LV); 3305 return LV; 3306 } 3307 3308 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 3309 // Can only get l-value for message expression returning aggregate type 3310 RValue RV = EmitAnyExprToTemp(E); 3311 return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); 3312 } 3313 3314 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 3315 const CallExpr *E, ReturnValueSlot ReturnValue, 3316 const Decl *TargetDecl, llvm::Value *Chain) { 3317 // Get the actual function type. The callee type will always be a pointer to 3318 // function type or a block pointer type. 3319 assert(CalleeType->isFunctionPointerType() && 3320 "Call must have function pointer type!"); 3321 3322 CalleeType = getContext().getCanonicalType(CalleeType); 3323 3324 const auto *FnType = 3325 cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 3326 3327 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) && 3328 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { 3329 if (llvm::Constant *PrefixSig = 3330 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { 3331 SanitizerScope SanScope(this); 3332 llvm::Constant *FTRTTIConst = 3333 CGM.GetAddrOfRTTIDescriptor(QualType(FnType, 0), /*ForEH=*/true); 3334 llvm::Type *PrefixStructTyElems[] = { 3335 PrefixSig->getType(), 3336 FTRTTIConst->getType() 3337 }; 3338 llvm::StructType *PrefixStructTy = llvm::StructType::get( 3339 CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true); 3340 3341 llvm::Value *CalleePrefixStruct = Builder.CreateBitCast( 3342 Callee, llvm::PointerType::getUnqual(PrefixStructTy)); 3343 llvm::Value *CalleeSigPtr = 3344 Builder.CreateConstGEP2_32(CalleePrefixStruct, 0, 0); 3345 llvm::Value *CalleeSig = Builder.CreateLoad(CalleeSigPtr); 3346 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig); 3347 3348 llvm::BasicBlock *Cont = createBasicBlock("cont"); 3349 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck"); 3350 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont); 3351 3352 EmitBlock(TypeCheck); 3353 llvm::Value *CalleeRTTIPtr = 3354 Builder.CreateConstGEP2_32(CalleePrefixStruct, 0, 1); 3355 llvm::Value *CalleeRTTI = Builder.CreateLoad(CalleeRTTIPtr); 3356 llvm::Value *CalleeRTTIMatch = 3357 Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst); 3358 llvm::Constant *StaticData[] = { 3359 EmitCheckSourceLocation(E->getLocStart()), 3360 EmitCheckTypeDescriptor(CalleeType) 3361 }; 3362 EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function), 3363 "function_type_mismatch", StaticData, Callee); 3364 3365 Builder.CreateBr(Cont); 3366 EmitBlock(Cont); 3367 } 3368 } 3369 3370 CallArgList Args; 3371 if (Chain) 3372 Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)), 3373 CGM.getContext().VoidPtrTy); 3374 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arg_begin(), 3375 E->arg_end(), E->getDirectCallee(), /*ParamsToSkip*/ 0); 3376 3377 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( 3378 Args, FnType, /*isChainCall=*/Chain); 3379 3380 // C99 6.5.2.2p6: 3381 // If the expression that denotes the called function has a type 3382 // that does not include a prototype, [the default argument 3383 // promotions are performed]. If the number of arguments does not 3384 // equal the number of parameters, the behavior is undefined. If 3385 // the function is defined with a type that includes a prototype, 3386 // and either the prototype ends with an ellipsis (, ...) or the 3387 // types of the arguments after promotion are not compatible with 3388 // the types of the parameters, the behavior is undefined. If the 3389 // function is defined with a type that does not include a 3390 // prototype, and the types of the arguments after promotion are 3391 // not compatible with those of the parameters after promotion, 3392 // the behavior is undefined [except in some trivial cases]. 3393 // That is, in the general case, we should assume that a call 3394 // through an unprototyped function type works like a *non-variadic* 3395 // call. The way we make this work is to cast to the exact type 3396 // of the promoted arguments. 3397 // 3398 // Chain calls use this same code path to add the invisible chain parameter 3399 // to the function type. 3400 if (isa<FunctionNoProtoType>(FnType) || Chain) { 3401 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo); 3402 CalleeTy = CalleeTy->getPointerTo(); 3403 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); 3404 } 3405 3406 return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl); 3407 } 3408 3409 LValue CodeGenFunction:: 3410 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 3411 llvm::Value *BaseV; 3412 if (E->getOpcode() == BO_PtrMemI) 3413 BaseV = EmitScalarExpr(E->getLHS()); 3414 else 3415 BaseV = EmitLValue(E->getLHS()).getAddress(); 3416 3417 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 3418 3419 const MemberPointerType *MPT 3420 = E->getRHS()->getType()->getAs<MemberPointerType>(); 3421 3422 llvm::Value *AddV = CGM.getCXXABI().EmitMemberDataPointerAddress( 3423 *this, E, BaseV, OffsetV, MPT); 3424 3425 return MakeAddrLValue(AddV, MPT->getPointeeType()); 3426 } 3427 3428 /// Given the address of a temporary variable, produce an r-value of 3429 /// its type. 3430 RValue CodeGenFunction::convertTempToRValue(llvm::Value *addr, 3431 QualType type, 3432 SourceLocation loc) { 3433 LValue lvalue = MakeNaturalAlignAddrLValue(addr, type); 3434 switch (getEvaluationKind(type)) { 3435 case TEK_Complex: 3436 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc)); 3437 case TEK_Aggregate: 3438 return lvalue.asAggregateRValue(); 3439 case TEK_Scalar: 3440 return RValue::get(EmitLoadOfScalar(lvalue, loc)); 3441 } 3442 llvm_unreachable("bad evaluation kind"); 3443 } 3444 3445 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { 3446 assert(Val->getType()->isFPOrFPVectorTy()); 3447 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) 3448 return; 3449 3450 llvm::MDBuilder MDHelper(getLLVMContext()); 3451 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); 3452 3453 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); 3454 } 3455 3456 namespace { 3457 struct LValueOrRValue { 3458 LValue LV; 3459 RValue RV; 3460 }; 3461 } 3462 3463 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 3464 const PseudoObjectExpr *E, 3465 bool forLValue, 3466 AggValueSlot slot) { 3467 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 3468 3469 // Find the result expression, if any. 3470 const Expr *resultExpr = E->getResultExpr(); 3471 LValueOrRValue result; 3472 3473 for (PseudoObjectExpr::const_semantics_iterator 3474 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 3475 const Expr *semantic = *i; 3476 3477 // If this semantic expression is an opaque value, bind it 3478 // to the result of its source expression. 3479 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 3480 3481 // If this is the result expression, we may need to evaluate 3482 // directly into the slot. 3483 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 3484 OVMA opaqueData; 3485 if (ov == resultExpr && ov->isRValue() && !forLValue && 3486 CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) { 3487 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 3488 3489 LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType()); 3490 opaqueData = OVMA::bind(CGF, ov, LV); 3491 result.RV = slot.asRValue(); 3492 3493 // Otherwise, emit as normal. 3494 } else { 3495 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 3496 3497 // If this is the result, also evaluate the result now. 3498 if (ov == resultExpr) { 3499 if (forLValue) 3500 result.LV = CGF.EmitLValue(ov); 3501 else 3502 result.RV = CGF.EmitAnyExpr(ov, slot); 3503 } 3504 } 3505 3506 opaques.push_back(opaqueData); 3507 3508 // Otherwise, if the expression is the result, evaluate it 3509 // and remember the result. 3510 } else if (semantic == resultExpr) { 3511 if (forLValue) 3512 result.LV = CGF.EmitLValue(semantic); 3513 else 3514 result.RV = CGF.EmitAnyExpr(semantic, slot); 3515 3516 // Otherwise, evaluate the expression in an ignored context. 3517 } else { 3518 CGF.EmitIgnoredExpr(semantic); 3519 } 3520 } 3521 3522 // Unbind all the opaques now. 3523 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 3524 opaques[i].unbind(CGF); 3525 3526 return result; 3527 } 3528 3529 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 3530 AggValueSlot slot) { 3531 return emitPseudoObjectExpr(*this, E, false, slot).RV; 3532 } 3533 3534 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 3535 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 3536 } 3537