1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Expr nodes as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CGCXXABI.h" 16 #include "CGCall.h" 17 #include "CGDebugInfo.h" 18 #include "CGObjCRuntime.h" 19 #include "CGOpenMPRuntime.h" 20 #include "CGRecordLayout.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/ASTContext.h" 24 #include "clang/AST/Attr.h" 25 #include "clang/AST/DeclObjC.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 #include "llvm/ADT/Hashing.h" 28 #include "llvm/ADT/StringExtras.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/Intrinsics.h" 31 #include "llvm/IR/LLVMContext.h" 32 #include "llvm/IR/MDBuilder.h" 33 #include "llvm/Support/ConvertUTF.h" 34 #include "llvm/Support/MathExtras.h" 35 #include "llvm/Transforms/Utils/SanitizerStats.h" 36 37 using namespace clang; 38 using namespace CodeGen; 39 40 //===--------------------------------------------------------------------===// 41 // Miscellaneous Helper Methods 42 //===--------------------------------------------------------------------===// 43 44 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 45 unsigned addressSpace = 46 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 47 48 llvm::PointerType *destType = Int8PtrTy; 49 if (addressSpace) 50 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 51 52 if (value->getType() == destType) return value; 53 return Builder.CreateBitCast(value, destType); 54 } 55 56 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 57 /// block. 58 Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, 59 const Twine &Name) { 60 auto Alloca = CreateTempAlloca(Ty, Name); 61 Alloca->setAlignment(Align.getQuantity()); 62 return Address(Alloca, Align); 63 } 64 65 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 66 /// block. 67 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 68 const Twine &Name) { 69 return new llvm::AllocaInst(Ty, nullptr, Name, AllocaInsertPt); 70 } 71 72 /// CreateDefaultAlignTempAlloca - This creates an alloca with the 73 /// default alignment of the corresponding LLVM type, which is *not* 74 /// guaranteed to be related in any way to the expected alignment of 75 /// an AST type that might have been lowered to Ty. 76 Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, 77 const Twine &Name) { 78 CharUnits Align = 79 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty)); 80 return CreateTempAlloca(Ty, Align, Name); 81 } 82 83 void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) { 84 assert(isa<llvm::AllocaInst>(Var.getPointer())); 85 auto *Store = new llvm::StoreInst(Init, Var.getPointer()); 86 Store->setAlignment(Var.getAlignment().getQuantity()); 87 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 88 Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store); 89 } 90 91 Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { 92 CharUnits Align = getContext().getTypeAlignInChars(Ty); 93 return CreateTempAlloca(ConvertType(Ty), Align, Name); 94 } 95 96 Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name) { 97 // FIXME: Should we prefer the preferred type alignment here? 98 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name); 99 } 100 101 Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, 102 const Twine &Name) { 103 return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name); 104 } 105 106 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 107 /// expression and compare the result against zero, returning an Int1Ty value. 108 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 109 PGO.setCurrentStmt(E); 110 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 111 llvm::Value *MemPtr = EmitScalarExpr(E); 112 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 113 } 114 115 QualType BoolTy = getContext().BoolTy; 116 SourceLocation Loc = E->getExprLoc(); 117 if (!E->getType()->isAnyComplexType()) 118 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc); 119 120 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy, 121 Loc); 122 } 123 124 /// EmitIgnoredExpr - Emit code to compute the specified expression, 125 /// ignoring the result. 126 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 127 if (E->isRValue()) 128 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 129 130 // Just emit it as an l-value and drop the result. 131 EmitLValue(E); 132 } 133 134 /// EmitAnyExpr - Emit code to compute the specified expression which 135 /// can have any type. The result is returned as an RValue struct. 136 /// If this is an aggregate expression, AggSlot indicates where the 137 /// result should be returned. 138 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, 139 AggValueSlot aggSlot, 140 bool ignoreResult) { 141 switch (getEvaluationKind(E->getType())) { 142 case TEK_Scalar: 143 return RValue::get(EmitScalarExpr(E, ignoreResult)); 144 case TEK_Complex: 145 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); 146 case TEK_Aggregate: 147 if (!ignoreResult && aggSlot.isIgnored()) 148 aggSlot = CreateAggTemp(E->getType(), "agg-temp"); 149 EmitAggExpr(E, aggSlot); 150 return aggSlot.asRValue(); 151 } 152 llvm_unreachable("bad evaluation kind"); 153 } 154 155 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will 156 /// always be accessible even if no aggregate location is provided. 157 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 158 AggValueSlot AggSlot = AggValueSlot::ignored(); 159 160 if (hasAggregateEvaluationKind(E->getType())) 161 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 162 return EmitAnyExpr(E, AggSlot); 163 } 164 165 /// EmitAnyExprToMem - Evaluate an expression into a given memory 166 /// location. 167 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 168 Address Location, 169 Qualifiers Quals, 170 bool IsInit) { 171 // FIXME: This function should take an LValue as an argument. 172 switch (getEvaluationKind(E->getType())) { 173 case TEK_Complex: 174 EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()), 175 /*isInit*/ false); 176 return; 177 178 case TEK_Aggregate: { 179 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, 180 AggValueSlot::IsDestructed_t(IsInit), 181 AggValueSlot::DoesNotNeedGCBarriers, 182 AggValueSlot::IsAliased_t(!IsInit))); 183 return; 184 } 185 186 case TEK_Scalar: { 187 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 188 LValue LV = MakeAddrLValue(Location, E->getType()); 189 EmitStoreThroughLValue(RV, LV); 190 return; 191 } 192 } 193 llvm_unreachable("bad evaluation kind"); 194 } 195 196 static void 197 pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, 198 const Expr *E, Address ReferenceTemporary) { 199 // Objective-C++ ARC: 200 // If we are binding a reference to a temporary that has ownership, we 201 // need to perform retain/release operations on the temporary. 202 // 203 // FIXME: This should be looking at E, not M. 204 if (auto Lifetime = M->getType().getObjCLifetime()) { 205 switch (Lifetime) { 206 case Qualifiers::OCL_None: 207 case Qualifiers::OCL_ExplicitNone: 208 // Carry on to normal cleanup handling. 209 break; 210 211 case Qualifiers::OCL_Autoreleasing: 212 // Nothing to do; cleaned up by an autorelease pool. 213 return; 214 215 case Qualifiers::OCL_Strong: 216 case Qualifiers::OCL_Weak: 217 switch (StorageDuration Duration = M->getStorageDuration()) { 218 case SD_Static: 219 // Note: we intentionally do not register a cleanup to release 220 // the object on program termination. 221 return; 222 223 case SD_Thread: 224 // FIXME: We should probably register a cleanup in this case. 225 return; 226 227 case SD_Automatic: 228 case SD_FullExpression: 229 CodeGenFunction::Destroyer *Destroy; 230 CleanupKind CleanupKind; 231 if (Lifetime == Qualifiers::OCL_Strong) { 232 const ValueDecl *VD = M->getExtendingDecl(); 233 bool Precise = 234 VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 235 CleanupKind = CGF.getARCCleanupKind(); 236 Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise 237 : &CodeGenFunction::destroyARCStrongImprecise; 238 } else { 239 // __weak objects always get EH cleanups; otherwise, exceptions 240 // could cause really nasty crashes instead of mere leaks. 241 CleanupKind = NormalAndEHCleanup; 242 Destroy = &CodeGenFunction::destroyARCWeak; 243 } 244 if (Duration == SD_FullExpression) 245 CGF.pushDestroy(CleanupKind, ReferenceTemporary, 246 M->getType(), *Destroy, 247 CleanupKind & EHCleanup); 248 else 249 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary, 250 M->getType(), 251 *Destroy, CleanupKind & EHCleanup); 252 return; 253 254 case SD_Dynamic: 255 llvm_unreachable("temporary cannot have dynamic storage duration"); 256 } 257 llvm_unreachable("unknown storage duration"); 258 } 259 } 260 261 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; 262 if (const RecordType *RT = 263 E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { 264 // Get the destructor for the reference temporary. 265 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 266 if (!ClassDecl->hasTrivialDestructor()) 267 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 268 } 269 270 if (!ReferenceTemporaryDtor) 271 return; 272 273 // Call the destructor for the temporary. 274 switch (M->getStorageDuration()) { 275 case SD_Static: 276 case SD_Thread: { 277 llvm::Constant *CleanupFn; 278 llvm::Constant *CleanupArg; 279 if (E->getType()->isArrayType()) { 280 CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( 281 ReferenceTemporary, E->getType(), 282 CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions, 283 dyn_cast_or_null<VarDecl>(M->getExtendingDecl())); 284 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy); 285 } else { 286 CleanupFn = CGF.CGM.getAddrOfCXXStructor(ReferenceTemporaryDtor, 287 StructorType::Complete); 288 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer()); 289 } 290 CGF.CGM.getCXXABI().registerGlobalDtor( 291 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg); 292 break; 293 } 294 295 case SD_FullExpression: 296 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), 297 CodeGenFunction::destroyCXXObject, 298 CGF.getLangOpts().Exceptions); 299 break; 300 301 case SD_Automatic: 302 CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup, 303 ReferenceTemporary, E->getType(), 304 CodeGenFunction::destroyCXXObject, 305 CGF.getLangOpts().Exceptions); 306 break; 307 308 case SD_Dynamic: 309 llvm_unreachable("temporary cannot have dynamic storage duration"); 310 } 311 } 312 313 static Address 314 createReferenceTemporary(CodeGenFunction &CGF, 315 const MaterializeTemporaryExpr *M, const Expr *Inner) { 316 switch (M->getStorageDuration()) { 317 case SD_FullExpression: 318 case SD_Automatic: { 319 // If we have a constant temporary array or record try to promote it into a 320 // constant global under the same rules a normal constant would've been 321 // promoted. This is easier on the optimizer and generally emits fewer 322 // instructions. 323 QualType Ty = Inner->getType(); 324 if (CGF.CGM.getCodeGenOpts().MergeAllConstants && 325 (Ty->isArrayType() || Ty->isRecordType()) && 326 CGF.CGM.isTypeConstant(Ty, true)) 327 if (llvm::Constant *Init = CGF.CGM.EmitConstantExpr(Inner, Ty, &CGF)) { 328 auto *GV = new llvm::GlobalVariable( 329 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, 330 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp"); 331 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty); 332 GV->setAlignment(alignment.getQuantity()); 333 // FIXME: Should we put the new global into a COMDAT? 334 return Address(GV, alignment); 335 } 336 return CGF.CreateMemTemp(Ty, "ref.tmp"); 337 } 338 case SD_Thread: 339 case SD_Static: 340 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner); 341 342 case SD_Dynamic: 343 llvm_unreachable("temporary can't have dynamic storage duration"); 344 } 345 llvm_unreachable("unknown storage duration"); 346 } 347 348 LValue CodeGenFunction:: 349 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { 350 const Expr *E = M->GetTemporaryExpr(); 351 352 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so 353 // as that will cause the lifetime adjustment to be lost for ARC 354 auto ownership = M->getType().getObjCLifetime(); 355 if (ownership != Qualifiers::OCL_None && 356 ownership != Qualifiers::OCL_ExplicitNone) { 357 Address Object = createReferenceTemporary(*this, M, E); 358 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) { 359 Object = Address(llvm::ConstantExpr::getBitCast(Var, 360 ConvertTypeForMem(E->getType()) 361 ->getPointerTo(Object.getAddressSpace())), 362 Object.getAlignment()); 363 // We should not have emitted the initializer for this temporary as a 364 // constant. 365 assert(!Var->hasInitializer()); 366 Var->setInitializer(CGM.EmitNullConstant(E->getType())); 367 } 368 LValue RefTempDst = MakeAddrLValue(Object, M->getType(), 369 AlignmentSource::Decl); 370 371 switch (getEvaluationKind(E->getType())) { 372 default: llvm_unreachable("expected scalar or aggregate expression"); 373 case TEK_Scalar: 374 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false); 375 break; 376 case TEK_Aggregate: { 377 EmitAggExpr(E, AggValueSlot::forAddr(Object, 378 E->getType().getQualifiers(), 379 AggValueSlot::IsDestructed, 380 AggValueSlot::DoesNotNeedGCBarriers, 381 AggValueSlot::IsNotAliased)); 382 break; 383 } 384 } 385 386 pushTemporaryCleanup(*this, M, E, Object); 387 return RefTempDst; 388 } 389 390 SmallVector<const Expr *, 2> CommaLHSs; 391 SmallVector<SubobjectAdjustment, 2> Adjustments; 392 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); 393 394 for (const auto &Ignored : CommaLHSs) 395 EmitIgnoredExpr(Ignored); 396 397 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) { 398 if (opaque->getType()->isRecordType()) { 399 assert(Adjustments.empty()); 400 return EmitOpaqueValueLValue(opaque); 401 } 402 } 403 404 // Create and initialize the reference temporary. 405 Address Object = createReferenceTemporary(*this, M, E); 406 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) { 407 Object = Address(llvm::ConstantExpr::getBitCast( 408 Var, ConvertTypeForMem(E->getType())->getPointerTo()), 409 Object.getAlignment()); 410 // If the temporary is a global and has a constant initializer or is a 411 // constant temporary that we promoted to a global, we may have already 412 // initialized it. 413 if (!Var->hasInitializer()) { 414 Var->setInitializer(CGM.EmitNullConstant(E->getType())); 415 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); 416 } 417 } else { 418 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); 419 } 420 pushTemporaryCleanup(*this, M, E, Object); 421 422 // Perform derived-to-base casts and/or field accesses, to get from the 423 // temporary object we created (and, potentially, for which we extended 424 // the lifetime) to the subobject we're binding the reference to. 425 for (unsigned I = Adjustments.size(); I != 0; --I) { 426 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 427 switch (Adjustment.Kind) { 428 case SubobjectAdjustment::DerivedToBaseAdjustment: 429 Object = 430 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass, 431 Adjustment.DerivedToBase.BasePath->path_begin(), 432 Adjustment.DerivedToBase.BasePath->path_end(), 433 /*NullCheckValue=*/ false, E->getExprLoc()); 434 break; 435 436 case SubobjectAdjustment::FieldAdjustment: { 437 LValue LV = MakeAddrLValue(Object, E->getType(), 438 AlignmentSource::Decl); 439 LV = EmitLValueForField(LV, Adjustment.Field); 440 assert(LV.isSimple() && 441 "materialized temporary field is not a simple lvalue"); 442 Object = LV.getAddress(); 443 break; 444 } 445 446 case SubobjectAdjustment::MemberPointerAdjustment: { 447 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS); 448 Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr, 449 Adjustment.Ptr.MPT); 450 break; 451 } 452 } 453 } 454 455 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); 456 } 457 458 RValue 459 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { 460 // Emit the expression as an lvalue. 461 LValue LV = EmitLValue(E); 462 assert(LV.isSimple()); 463 llvm::Value *Value = LV.getPointer(); 464 465 if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { 466 // C++11 [dcl.ref]p5 (as amended by core issue 453): 467 // If a glvalue to which a reference is directly bound designates neither 468 // an existing object or function of an appropriate type nor a region of 469 // storage of suitable size and alignment to contain an object of the 470 // reference's type, the behavior is undefined. 471 QualType Ty = E->getType(); 472 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); 473 } 474 475 return RValue::get(Value); 476 } 477 478 479 /// getAccessedFieldNo - Given an encoded value and a result number, return the 480 /// input field number being accessed. 481 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 482 const llvm::Constant *Elts) { 483 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) 484 ->getZExtValue(); 485 } 486 487 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. 488 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, 489 llvm::Value *High) { 490 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); 491 llvm::Value *K47 = Builder.getInt64(47); 492 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); 493 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); 494 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); 495 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); 496 return Builder.CreateMul(B1, KMul); 497 } 498 499 bool CodeGenFunction::sanitizePerformTypeCheck() const { 500 return SanOpts.has(SanitizerKind::Null) | 501 SanOpts.has(SanitizerKind::Alignment) | 502 SanOpts.has(SanitizerKind::ObjectSize) | 503 SanOpts.has(SanitizerKind::Vptr); 504 } 505 506 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, 507 llvm::Value *Ptr, QualType Ty, 508 CharUnits Alignment, bool SkipNullCheck) { 509 if (!sanitizePerformTypeCheck()) 510 return; 511 512 // Don't check pointers outside the default address space. The null check 513 // isn't correct, the object-size check isn't supported by LLVM, and we can't 514 // communicate the addresses to the runtime handler for the vptr check. 515 if (Ptr->getType()->getPointerAddressSpace()) 516 return; 517 518 SanitizerScope SanScope(this); 519 520 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; 521 llvm::BasicBlock *Done = nullptr; 522 523 bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast || 524 TCK == TCK_UpcastToVirtualBase; 525 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) && 526 !SkipNullCheck) { 527 // The glvalue must not be an empty glvalue. 528 llvm::Value *IsNonNull = Builder.CreateIsNotNull(Ptr); 529 530 if (AllowNullPointers) { 531 // When performing pointer casts, it's OK if the value is null. 532 // Skip the remaining checks in that case. 533 Done = createBasicBlock("null"); 534 llvm::BasicBlock *Rest = createBasicBlock("not.null"); 535 Builder.CreateCondBr(IsNonNull, Rest, Done); 536 EmitBlock(Rest); 537 } else { 538 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null)); 539 } 540 } 541 542 if (SanOpts.has(SanitizerKind::ObjectSize) && !Ty->isIncompleteType()) { 543 uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity(); 544 545 // The glvalue must refer to a large enough storage region. 546 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation 547 // to check this. 548 // FIXME: Get object address space 549 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; 550 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys); 551 llvm::Value *Min = Builder.getFalse(); 552 llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy); 553 llvm::Value *LargeEnough = 554 Builder.CreateICmpUGE(Builder.CreateCall(F, {CastAddr, Min}), 555 llvm::ConstantInt::get(IntPtrTy, Size)); 556 Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize)); 557 } 558 559 uint64_t AlignVal = 0; 560 561 if (SanOpts.has(SanitizerKind::Alignment)) { 562 AlignVal = Alignment.getQuantity(); 563 if (!Ty->isIncompleteType() && !AlignVal) 564 AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity(); 565 566 // The glvalue must be suitably aligned. 567 if (AlignVal) { 568 llvm::Value *Align = 569 Builder.CreateAnd(Builder.CreatePtrToInt(Ptr, IntPtrTy), 570 llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); 571 llvm::Value *Aligned = 572 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); 573 Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment)); 574 } 575 } 576 577 if (Checks.size() > 0) { 578 llvm::Constant *StaticData[] = { 579 EmitCheckSourceLocation(Loc), 580 EmitCheckTypeDescriptor(Ty), 581 llvm::ConstantInt::get(SizeTy, AlignVal), 582 llvm::ConstantInt::get(Int8Ty, TCK) 583 }; 584 EmitCheck(Checks, "type_mismatch", StaticData, Ptr); 585 } 586 587 // If possible, check that the vptr indicates that there is a subobject of 588 // type Ty at offset zero within this object. 589 // 590 // C++11 [basic.life]p5,6: 591 // [For storage which does not refer to an object within its lifetime] 592 // The program has undefined behavior if: 593 // -- the [pointer or glvalue] is used to access a non-static data member 594 // or call a non-static member function 595 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 596 if (SanOpts.has(SanitizerKind::Vptr) && 597 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || 598 TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || 599 TCK == TCK_UpcastToVirtualBase) && 600 RD && RD->hasDefinition() && RD->isDynamicClass()) { 601 // Compute a hash of the mangled name of the type. 602 // 603 // FIXME: This is not guaranteed to be deterministic! Move to a 604 // fingerprinting mechanism once LLVM provides one. For the time 605 // being the implementation happens to be deterministic. 606 SmallString<64> MangledName; 607 llvm::raw_svector_ostream Out(MangledName); 608 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), 609 Out); 610 611 // Blacklist based on the mangled type. 612 if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType( 613 Out.str())) { 614 llvm::hash_code TypeHash = hash_value(Out.str()); 615 616 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). 617 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); 618 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); 619 Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign()); 620 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); 621 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); 622 623 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); 624 Hash = Builder.CreateTrunc(Hash, IntPtrTy); 625 626 // Look the hash up in our cache. 627 const int CacheSize = 128; 628 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); 629 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, 630 "__ubsan_vptr_type_cache"); 631 llvm::Value *Slot = Builder.CreateAnd(Hash, 632 llvm::ConstantInt::get(IntPtrTy, 633 CacheSize-1)); 634 llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; 635 llvm::Value *CacheVal = 636 Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices), 637 getPointerAlign()); 638 639 // If the hash isn't in the cache, call a runtime handler to perform the 640 // hard work of checking whether the vptr is for an object of the right 641 // type. This will either fill in the cache and return, or produce a 642 // diagnostic. 643 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash); 644 llvm::Constant *StaticData[] = { 645 EmitCheckSourceLocation(Loc), 646 EmitCheckTypeDescriptor(Ty), 647 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), 648 llvm::ConstantInt::get(Int8Ty, TCK) 649 }; 650 llvm::Value *DynamicData[] = { Ptr, Hash }; 651 EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr), 652 "dynamic_type_cache_miss", StaticData, DynamicData); 653 } 654 } 655 656 if (Done) { 657 Builder.CreateBr(Done); 658 EmitBlock(Done); 659 } 660 } 661 662 /// Determine whether this expression refers to a flexible array member in a 663 /// struct. We disable array bounds checks for such members. 664 static bool isFlexibleArrayMemberExpr(const Expr *E) { 665 // For compatibility with existing code, we treat arrays of length 0 or 666 // 1 as flexible array members. 667 const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe(); 668 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 669 if (CAT->getSize().ugt(1)) 670 return false; 671 } else if (!isa<IncompleteArrayType>(AT)) 672 return false; 673 674 E = E->IgnoreParens(); 675 676 // A flexible array member must be the last member in the class. 677 if (const auto *ME = dyn_cast<MemberExpr>(E)) { 678 // FIXME: If the base type of the member expr is not FD->getParent(), 679 // this should not be treated as a flexible array member access. 680 if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 681 RecordDecl::field_iterator FI( 682 DeclContext::decl_iterator(const_cast<FieldDecl *>(FD))); 683 return ++FI == FD->getParent()->field_end(); 684 } 685 } 686 687 return false; 688 } 689 690 /// If Base is known to point to the start of an array, return the length of 691 /// that array. Return 0 if the length cannot be determined. 692 static llvm::Value *getArrayIndexingBound( 693 CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) { 694 // For the vector indexing extension, the bound is the number of elements. 695 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { 696 IndexedType = Base->getType(); 697 return CGF.Builder.getInt32(VT->getNumElements()); 698 } 699 700 Base = Base->IgnoreParens(); 701 702 if (const auto *CE = dyn_cast<CastExpr>(Base)) { 703 if (CE->getCastKind() == CK_ArrayToPointerDecay && 704 !isFlexibleArrayMemberExpr(CE->getSubExpr())) { 705 IndexedType = CE->getSubExpr()->getType(); 706 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); 707 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 708 return CGF.Builder.getInt(CAT->getSize()); 709 else if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) 710 return CGF.getVLASize(VAT).first; 711 } 712 } 713 714 return nullptr; 715 } 716 717 void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, 718 llvm::Value *Index, QualType IndexType, 719 bool Accessed) { 720 assert(SanOpts.has(SanitizerKind::ArrayBounds) && 721 "should not be called unless adding bounds checks"); 722 SanitizerScope SanScope(this); 723 724 QualType IndexedType; 725 llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType); 726 if (!Bound) 727 return; 728 729 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); 730 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned); 731 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false); 732 733 llvm::Constant *StaticData[] = { 734 EmitCheckSourceLocation(E->getExprLoc()), 735 EmitCheckTypeDescriptor(IndexedType), 736 EmitCheckTypeDescriptor(IndexType) 737 }; 738 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal) 739 : Builder.CreateICmpULE(IndexVal, BoundVal); 740 EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), "out_of_bounds", 741 StaticData, Index); 742 } 743 744 745 CodeGenFunction::ComplexPairTy CodeGenFunction:: 746 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 747 bool isInc, bool isPre) { 748 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc()); 749 750 llvm::Value *NextVal; 751 if (isa<llvm::IntegerType>(InVal.first->getType())) { 752 uint64_t AmountVal = isInc ? 1 : -1; 753 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 754 755 // Add the inc/dec to the real part. 756 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 757 } else { 758 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 759 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 760 if (!isInc) 761 FVal.changeSign(); 762 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 763 764 // Add the inc/dec to the real part. 765 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 766 } 767 768 ComplexPairTy IncVal(NextVal, InVal.second); 769 770 // Store the updated result through the lvalue. 771 EmitStoreOfComplex(IncVal, LV, /*init*/ false); 772 773 // If this is a postinc, return the value read from memory, otherwise use the 774 // updated value. 775 return isPre ? IncVal : InVal; 776 } 777 778 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, 779 CodeGenFunction *CGF) { 780 // Bind VLAs in the cast type. 781 if (CGF && E->getType()->isVariablyModifiedType()) 782 CGF->EmitVariablyModifiedType(E->getType()); 783 784 if (CGDebugInfo *DI = getModuleDebugInfo()) 785 DI->EmitExplicitCastType(E->getType()); 786 } 787 788 //===----------------------------------------------------------------------===// 789 // LValue Expression Emission 790 //===----------------------------------------------------------------------===// 791 792 /// EmitPointerWithAlignment - Given an expression of pointer type, try to 793 /// derive a more accurate bound on the alignment of the pointer. 794 Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E, 795 AlignmentSource *Source) { 796 // We allow this with ObjC object pointers because of fragile ABIs. 797 assert(E->getType()->isPointerType() || 798 E->getType()->isObjCObjectPointerType()); 799 E = E->IgnoreParens(); 800 801 // Casts: 802 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 803 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE)) 804 CGM.EmitExplicitCastExprType(ECE, this); 805 806 switch (CE->getCastKind()) { 807 // Non-converting casts (but not C's implicit conversion from void*). 808 case CK_BitCast: 809 case CK_NoOp: 810 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) { 811 if (PtrTy->getPointeeType()->isVoidType()) 812 break; 813 814 AlignmentSource InnerSource; 815 Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), &InnerSource); 816 if (Source) *Source = InnerSource; 817 818 // If this is an explicit bitcast, and the source l-value is 819 // opaque, honor the alignment of the casted-to type. 820 if (isa<ExplicitCastExpr>(CE) && 821 InnerSource != AlignmentSource::Decl) { 822 Addr = Address(Addr.getPointer(), 823 getNaturalPointeeTypeAlignment(E->getType(), Source)); 824 } 825 826 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) && 827 CE->getCastKind() == CK_BitCast) { 828 if (auto PT = E->getType()->getAs<PointerType>()) 829 EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(), 830 /*MayBeNull=*/true, 831 CodeGenFunction::CFITCK_UnrelatedCast, 832 CE->getLocStart()); 833 } 834 835 return Builder.CreateBitCast(Addr, ConvertType(E->getType())); 836 } 837 break; 838 839 // Array-to-pointer decay. 840 case CK_ArrayToPointerDecay: 841 return EmitArrayToPointerDecay(CE->getSubExpr(), Source); 842 843 // Derived-to-base conversions. 844 case CK_UncheckedDerivedToBase: 845 case CK_DerivedToBase: { 846 Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), Source); 847 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); 848 return GetAddressOfBaseClass(Addr, Derived, 849 CE->path_begin(), CE->path_end(), 850 ShouldNullCheckClassCastValue(CE), 851 CE->getExprLoc()); 852 } 853 854 // TODO: Is there any reason to treat base-to-derived conversions 855 // specially? 856 default: 857 break; 858 } 859 } 860 861 // Unary &. 862 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 863 if (UO->getOpcode() == UO_AddrOf) { 864 LValue LV = EmitLValue(UO->getSubExpr()); 865 if (Source) *Source = LV.getAlignmentSource(); 866 return LV.getAddress(); 867 } 868 } 869 870 // TODO: conditional operators, comma. 871 872 // Otherwise, use the alignment of the type. 873 CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), Source); 874 return Address(EmitScalarExpr(E), Align); 875 } 876 877 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 878 if (Ty->isVoidType()) 879 return RValue::get(nullptr); 880 881 switch (getEvaluationKind(Ty)) { 882 case TEK_Complex: { 883 llvm::Type *EltTy = 884 ConvertType(Ty->castAs<ComplexType>()->getElementType()); 885 llvm::Value *U = llvm::UndefValue::get(EltTy); 886 return RValue::getComplex(std::make_pair(U, U)); 887 } 888 889 // If this is a use of an undefined aggregate type, the aggregate must have an 890 // identifiable address. Just because the contents of the value are undefined 891 // doesn't mean that the address can't be taken and compared. 892 case TEK_Aggregate: { 893 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 894 return RValue::getAggregate(DestPtr); 895 } 896 897 case TEK_Scalar: 898 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 899 } 900 llvm_unreachable("bad evaluation kind"); 901 } 902 903 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 904 const char *Name) { 905 ErrorUnsupported(E, Name); 906 return GetUndefRValue(E->getType()); 907 } 908 909 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 910 const char *Name) { 911 ErrorUnsupported(E, Name); 912 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 913 return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()), 914 E->getType()); 915 } 916 917 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { 918 LValue LV; 919 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E)) 920 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true); 921 else 922 LV = EmitLValue(E); 923 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) 924 EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(), 925 E->getType(), LV.getAlignment()); 926 return LV; 927 } 928 929 /// EmitLValue - Emit code to compute a designator that specifies the location 930 /// of the expression. 931 /// 932 /// This can return one of two things: a simple address or a bitfield reference. 933 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 934 /// an LLVM pointer type. 935 /// 936 /// If this returns a bitfield reference, nothing about the pointee type of the 937 /// LLVM value is known: For example, it may not be a pointer to an integer. 938 /// 939 /// If this returns a normal address, and if the lvalue's C type is fixed size, 940 /// this method guarantees that the returned pointer type will point to an LLVM 941 /// type of the same size of the lvalue's type. If the lvalue has a variable 942 /// length type, this is not possible. 943 /// 944 LValue CodeGenFunction::EmitLValue(const Expr *E) { 945 ApplyDebugLocation DL(*this, E); 946 switch (E->getStmtClass()) { 947 default: return EmitUnsupportedLValue(E, "l-value expression"); 948 949 case Expr::ObjCPropertyRefExprClass: 950 llvm_unreachable("cannot emit a property reference directly"); 951 952 case Expr::ObjCSelectorExprClass: 953 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 954 case Expr::ObjCIsaExprClass: 955 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 956 case Expr::BinaryOperatorClass: 957 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 958 case Expr::CompoundAssignOperatorClass: { 959 QualType Ty = E->getType(); 960 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 961 Ty = AT->getValueType(); 962 if (!Ty->isAnyComplexType()) 963 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 964 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 965 } 966 case Expr::CallExprClass: 967 case Expr::CXXMemberCallExprClass: 968 case Expr::CXXOperatorCallExprClass: 969 case Expr::UserDefinedLiteralClass: 970 return EmitCallExprLValue(cast<CallExpr>(E)); 971 case Expr::VAArgExprClass: 972 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 973 case Expr::DeclRefExprClass: 974 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 975 case Expr::ParenExprClass: 976 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 977 case Expr::GenericSelectionExprClass: 978 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 979 case Expr::PredefinedExprClass: 980 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 981 case Expr::StringLiteralClass: 982 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 983 case Expr::ObjCEncodeExprClass: 984 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 985 case Expr::PseudoObjectExprClass: 986 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 987 case Expr::InitListExprClass: 988 return EmitInitListLValue(cast<InitListExpr>(E)); 989 case Expr::CXXTemporaryObjectExprClass: 990 case Expr::CXXConstructExprClass: 991 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 992 case Expr::CXXBindTemporaryExprClass: 993 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 994 case Expr::CXXUuidofExprClass: 995 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); 996 case Expr::LambdaExprClass: 997 return EmitLambdaLValue(cast<LambdaExpr>(E)); 998 999 case Expr::ExprWithCleanupsClass: { 1000 const auto *cleanups = cast<ExprWithCleanups>(E); 1001 enterFullExpression(cleanups); 1002 RunCleanupsScope Scope(*this); 1003 return EmitLValue(cleanups->getSubExpr()); 1004 } 1005 1006 case Expr::CXXDefaultArgExprClass: 1007 return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); 1008 case Expr::CXXDefaultInitExprClass: { 1009 CXXDefaultInitExprScope Scope(*this); 1010 return EmitLValue(cast<CXXDefaultInitExpr>(E)->getExpr()); 1011 } 1012 case Expr::CXXTypeidExprClass: 1013 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 1014 1015 case Expr::ObjCMessageExprClass: 1016 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 1017 case Expr::ObjCIvarRefExprClass: 1018 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 1019 case Expr::StmtExprClass: 1020 return EmitStmtExprLValue(cast<StmtExpr>(E)); 1021 case Expr::UnaryOperatorClass: 1022 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 1023 case Expr::ArraySubscriptExprClass: 1024 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 1025 case Expr::OMPArraySectionExprClass: 1026 return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E)); 1027 case Expr::ExtVectorElementExprClass: 1028 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 1029 case Expr::MemberExprClass: 1030 return EmitMemberExpr(cast<MemberExpr>(E)); 1031 case Expr::CompoundLiteralExprClass: 1032 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 1033 case Expr::ConditionalOperatorClass: 1034 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 1035 case Expr::BinaryConditionalOperatorClass: 1036 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 1037 case Expr::ChooseExprClass: 1038 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr()); 1039 case Expr::OpaqueValueExprClass: 1040 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 1041 case Expr::SubstNonTypeTemplateParmExprClass: 1042 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 1043 case Expr::ImplicitCastExprClass: 1044 case Expr::CStyleCastExprClass: 1045 case Expr::CXXFunctionalCastExprClass: 1046 case Expr::CXXStaticCastExprClass: 1047 case Expr::CXXDynamicCastExprClass: 1048 case Expr::CXXReinterpretCastExprClass: 1049 case Expr::CXXConstCastExprClass: 1050 case Expr::ObjCBridgedCastExprClass: 1051 return EmitCastLValue(cast<CastExpr>(E)); 1052 1053 case Expr::MaterializeTemporaryExprClass: 1054 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 1055 } 1056 } 1057 1058 /// Given an object of the given canonical type, can we safely copy a 1059 /// value out of it based on its initializer? 1060 static bool isConstantEmittableObjectType(QualType type) { 1061 assert(type.isCanonical()); 1062 assert(!type->isReferenceType()); 1063 1064 // Must be const-qualified but non-volatile. 1065 Qualifiers qs = type.getLocalQualifiers(); 1066 if (!qs.hasConst() || qs.hasVolatile()) return false; 1067 1068 // Otherwise, all object types satisfy this except C++ classes with 1069 // mutable subobjects or non-trivial copy/destroy behavior. 1070 if (const auto *RT = dyn_cast<RecordType>(type)) 1071 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1072 if (RD->hasMutableFields() || !RD->isTrivial()) 1073 return false; 1074 1075 return true; 1076 } 1077 1078 /// Can we constant-emit a load of a reference to a variable of the 1079 /// given type? This is different from predicates like 1080 /// Decl::isUsableInConstantExpressions because we do want it to apply 1081 /// in situations that don't necessarily satisfy the language's rules 1082 /// for this (e.g. C++'s ODR-use rules). For example, we want to able 1083 /// to do this with const float variables even if those variables 1084 /// aren't marked 'constexpr'. 1085 enum ConstantEmissionKind { 1086 CEK_None, 1087 CEK_AsReferenceOnly, 1088 CEK_AsValueOrReference, 1089 CEK_AsValueOnly 1090 }; 1091 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { 1092 type = type.getCanonicalType(); 1093 if (const auto *ref = dyn_cast<ReferenceType>(type)) { 1094 if (isConstantEmittableObjectType(ref->getPointeeType())) 1095 return CEK_AsValueOrReference; 1096 return CEK_AsReferenceOnly; 1097 } 1098 if (isConstantEmittableObjectType(type)) 1099 return CEK_AsValueOnly; 1100 return CEK_None; 1101 } 1102 1103 /// Try to emit a reference to the given value without producing it as 1104 /// an l-value. This is actually more than an optimization: we can't 1105 /// produce an l-value for variables that we never actually captured 1106 /// in a block or lambda, which means const int variables or constexpr 1107 /// literals or similar. 1108 CodeGenFunction::ConstantEmission 1109 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { 1110 ValueDecl *value = refExpr->getDecl(); 1111 1112 // The value needs to be an enum constant or a constant variable. 1113 ConstantEmissionKind CEK; 1114 if (isa<ParmVarDecl>(value)) { 1115 CEK = CEK_None; 1116 } else if (auto *var = dyn_cast<VarDecl>(value)) { 1117 CEK = checkVarTypeForConstantEmission(var->getType()); 1118 } else if (isa<EnumConstantDecl>(value)) { 1119 CEK = CEK_AsValueOnly; 1120 } else { 1121 CEK = CEK_None; 1122 } 1123 if (CEK == CEK_None) return ConstantEmission(); 1124 1125 Expr::EvalResult result; 1126 bool resultIsReference; 1127 QualType resultType; 1128 1129 // It's best to evaluate all the way as an r-value if that's permitted. 1130 if (CEK != CEK_AsReferenceOnly && 1131 refExpr->EvaluateAsRValue(result, getContext())) { 1132 resultIsReference = false; 1133 resultType = refExpr->getType(); 1134 1135 // Otherwise, try to evaluate as an l-value. 1136 } else if (CEK != CEK_AsValueOnly && 1137 refExpr->EvaluateAsLValue(result, getContext())) { 1138 resultIsReference = true; 1139 resultType = value->getType(); 1140 1141 // Failure. 1142 } else { 1143 return ConstantEmission(); 1144 } 1145 1146 // In any case, if the initializer has side-effects, abandon ship. 1147 if (result.HasSideEffects) 1148 return ConstantEmission(); 1149 1150 // Emit as a constant. 1151 llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this); 1152 1153 // Make sure we emit a debug reference to the global variable. 1154 // This should probably fire even for 1155 if (isa<VarDecl>(value)) { 1156 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value))) 1157 EmitDeclRefExprDbgValue(refExpr, C); 1158 } else { 1159 assert(isa<EnumConstantDecl>(value)); 1160 EmitDeclRefExprDbgValue(refExpr, C); 1161 } 1162 1163 // If we emitted a reference constant, we need to dereference that. 1164 if (resultIsReference) 1165 return ConstantEmission::forReference(C); 1166 1167 return ConstantEmission::forValue(C); 1168 } 1169 1170 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, 1171 SourceLocation Loc) { 1172 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 1173 lvalue.getType(), Loc, lvalue.getAlignmentSource(), 1174 lvalue.getTBAAInfo(), 1175 lvalue.getTBAABaseType(), lvalue.getTBAAOffset(), 1176 lvalue.isNontemporal()); 1177 } 1178 1179 static bool hasBooleanRepresentation(QualType Ty) { 1180 if (Ty->isBooleanType()) 1181 return true; 1182 1183 if (const EnumType *ET = Ty->getAs<EnumType>()) 1184 return ET->getDecl()->getIntegerType()->isBooleanType(); 1185 1186 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 1187 return hasBooleanRepresentation(AT->getValueType()); 1188 1189 return false; 1190 } 1191 1192 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, 1193 llvm::APInt &Min, llvm::APInt &End, 1194 bool StrictEnums) { 1195 const EnumType *ET = Ty->getAs<EnumType>(); 1196 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && 1197 ET && !ET->getDecl()->isFixed(); 1198 bool IsBool = hasBooleanRepresentation(Ty); 1199 if (!IsBool && !IsRegularCPlusPlusEnum) 1200 return false; 1201 1202 if (IsBool) { 1203 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0); 1204 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2); 1205 } else { 1206 const EnumDecl *ED = ET->getDecl(); 1207 llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType()); 1208 unsigned Bitwidth = LTy->getScalarSizeInBits(); 1209 unsigned NumNegativeBits = ED->getNumNegativeBits(); 1210 unsigned NumPositiveBits = ED->getNumPositiveBits(); 1211 1212 if (NumNegativeBits) { 1213 unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); 1214 assert(NumBits <= Bitwidth); 1215 End = llvm::APInt(Bitwidth, 1) << (NumBits - 1); 1216 Min = -End; 1217 } else { 1218 assert(NumPositiveBits <= Bitwidth); 1219 End = llvm::APInt(Bitwidth, 1) << NumPositiveBits; 1220 Min = llvm::APInt(Bitwidth, 0); 1221 } 1222 } 1223 return true; 1224 } 1225 1226 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { 1227 llvm::APInt Min, End; 1228 if (!getRangeForType(*this, Ty, Min, End, 1229 CGM.getCodeGenOpts().StrictEnums)) 1230 return nullptr; 1231 1232 llvm::MDBuilder MDHelper(getLLVMContext()); 1233 return MDHelper.createRange(Min, End); 1234 } 1235 1236 llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, 1237 QualType Ty, 1238 SourceLocation Loc, 1239 AlignmentSource AlignSource, 1240 llvm::MDNode *TBAAInfo, 1241 QualType TBAABaseType, 1242 uint64_t TBAAOffset, 1243 bool isNontemporal) { 1244 // For better performance, handle vector loads differently. 1245 if (Ty->isVectorType()) { 1246 const llvm::Type *EltTy = Addr.getElementType(); 1247 1248 const auto *VTy = cast<llvm::VectorType>(EltTy); 1249 1250 // Handle vectors of size 3 like size 4 for better performance. 1251 if (VTy->getNumElements() == 3) { 1252 1253 // Bitcast to vec4 type. 1254 llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(), 1255 4); 1256 Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4"); 1257 // Now load value. 1258 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4"); 1259 1260 // Shuffle vector to get vec3. 1261 V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty), 1262 {0, 1, 2}, "extractVec"); 1263 return EmitFromMemory(V, Ty); 1264 } 1265 } 1266 1267 // Atomic operations have to be done on integral types. 1268 if (Ty->isAtomicType() || typeIsSuitableForInlineAtomic(Ty, Volatile)) { 1269 LValue lvalue = 1270 LValue::MakeAddr(Addr, Ty, getContext(), AlignSource, TBAAInfo); 1271 return EmitAtomicLoad(lvalue, Loc).getScalarVal(); 1272 } 1273 1274 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile); 1275 if (isNontemporal) { 1276 llvm::MDNode *Node = llvm::MDNode::get( 1277 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); 1278 Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 1279 } 1280 if (TBAAInfo) { 1281 llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo, 1282 TBAAOffset); 1283 if (TBAAPath) 1284 CGM.DecorateInstructionWithTBAA(Load, TBAAPath, 1285 false /*ConvertTypeToTag*/); 1286 } 1287 1288 bool NeedsBoolCheck = 1289 SanOpts.has(SanitizerKind::Bool) && hasBooleanRepresentation(Ty); 1290 bool NeedsEnumCheck = 1291 SanOpts.has(SanitizerKind::Enum) && Ty->getAs<EnumType>(); 1292 if (NeedsBoolCheck || NeedsEnumCheck) { 1293 SanitizerScope SanScope(this); 1294 llvm::APInt Min, End; 1295 if (getRangeForType(*this, Ty, Min, End, true)) { 1296 --End; 1297 llvm::Value *Check; 1298 if (!Min) 1299 Check = Builder.CreateICmpULE( 1300 Load, llvm::ConstantInt::get(getLLVMContext(), End)); 1301 else { 1302 llvm::Value *Upper = Builder.CreateICmpSLE( 1303 Load, llvm::ConstantInt::get(getLLVMContext(), End)); 1304 llvm::Value *Lower = Builder.CreateICmpSGE( 1305 Load, llvm::ConstantInt::get(getLLVMContext(), Min)); 1306 Check = Builder.CreateAnd(Upper, Lower); 1307 } 1308 llvm::Constant *StaticArgs[] = { 1309 EmitCheckSourceLocation(Loc), 1310 EmitCheckTypeDescriptor(Ty) 1311 }; 1312 SanitizerMask Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; 1313 EmitCheck(std::make_pair(Check, Kind), "load_invalid_value", StaticArgs, 1314 EmitCheckValue(Load)); 1315 } 1316 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) 1317 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) 1318 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); 1319 1320 return EmitFromMemory(Load, Ty); 1321 } 1322 1323 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 1324 // Bool has a different representation in memory than in registers. 1325 if (hasBooleanRepresentation(Ty)) { 1326 // This should really always be an i1, but sometimes it's already 1327 // an i8, and it's awkward to track those cases down. 1328 if (Value->getType()->isIntegerTy(1)) 1329 return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool"); 1330 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1331 "wrong value rep of bool"); 1332 } 1333 1334 return Value; 1335 } 1336 1337 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 1338 // Bool has a different representation in memory than in registers. 1339 if (hasBooleanRepresentation(Ty)) { 1340 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1341 "wrong value rep of bool"); 1342 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 1343 } 1344 1345 return Value; 1346 } 1347 1348 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, 1349 bool Volatile, QualType Ty, 1350 AlignmentSource AlignSource, 1351 llvm::MDNode *TBAAInfo, 1352 bool isInit, QualType TBAABaseType, 1353 uint64_t TBAAOffset, 1354 bool isNontemporal) { 1355 1356 // Handle vectors differently to get better performance. 1357 if (Ty->isVectorType()) { 1358 llvm::Type *SrcTy = Value->getType(); 1359 auto *VecTy = cast<llvm::VectorType>(SrcTy); 1360 // Handle vec3 special. 1361 if (VecTy->getNumElements() == 3) { 1362 // Our source is a vec3, do a shuffle vector to make it a vec4. 1363 llvm::Constant *Mask[] = {Builder.getInt32(0), Builder.getInt32(1), 1364 Builder.getInt32(2), 1365 llvm::UndefValue::get(Builder.getInt32Ty())}; 1366 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1367 Value = Builder.CreateShuffleVector(Value, 1368 llvm::UndefValue::get(VecTy), 1369 MaskV, "extractVec"); 1370 SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4); 1371 } 1372 if (Addr.getElementType() != SrcTy) { 1373 Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp"); 1374 } 1375 } 1376 1377 Value = EmitToMemory(Value, Ty); 1378 1379 if (Ty->isAtomicType() || 1380 (!isInit && typeIsSuitableForInlineAtomic(Ty, Volatile))) { 1381 EmitAtomicStore(RValue::get(Value), 1382 LValue::MakeAddr(Addr, Ty, getContext(), 1383 AlignSource, TBAAInfo), 1384 isInit); 1385 return; 1386 } 1387 1388 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 1389 if (isNontemporal) { 1390 llvm::MDNode *Node = 1391 llvm::MDNode::get(Store->getContext(), 1392 llvm::ConstantAsMetadata::get(Builder.getInt32(1))); 1393 Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 1394 } 1395 if (TBAAInfo) { 1396 llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo, 1397 TBAAOffset); 1398 if (TBAAPath) 1399 CGM.DecorateInstructionWithTBAA(Store, TBAAPath, 1400 false /*ConvertTypeToTag*/); 1401 } 1402 } 1403 1404 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 1405 bool isInit) { 1406 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 1407 lvalue.getType(), lvalue.getAlignmentSource(), 1408 lvalue.getTBAAInfo(), isInit, lvalue.getTBAABaseType(), 1409 lvalue.getTBAAOffset(), lvalue.isNontemporal()); 1410 } 1411 1412 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 1413 /// method emits the address of the lvalue, then loads the result as an rvalue, 1414 /// returning the rvalue. 1415 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 1416 if (LV.isObjCWeak()) { 1417 // load of a __weak object. 1418 Address AddrWeakObj = LV.getAddress(); 1419 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 1420 AddrWeakObj)); 1421 } 1422 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1423 // In MRC mode, we do a load+autorelease. 1424 if (!getLangOpts().ObjCAutoRefCount) { 1425 return RValue::get(EmitARCLoadWeak(LV.getAddress())); 1426 } 1427 1428 // In ARC mode, we load retained and then consume the value. 1429 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress()); 1430 Object = EmitObjCConsumeObject(LV.getType(), Object); 1431 return RValue::get(Object); 1432 } 1433 1434 if (LV.isSimple()) { 1435 assert(!LV.getType()->isFunctionType()); 1436 1437 // Everything needs a load. 1438 return RValue::get(EmitLoadOfScalar(LV, Loc)); 1439 } 1440 1441 if (LV.isVectorElt()) { 1442 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(), 1443 LV.isVolatileQualified()); 1444 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), 1445 "vecext")); 1446 } 1447 1448 // If this is a reference to a subset of the elements of a vector, either 1449 // shuffle the input or extract/insert them as appropriate. 1450 if (LV.isExtVectorElt()) 1451 return EmitLoadOfExtVectorElementLValue(LV); 1452 1453 // Global Register variables always invoke intrinsics 1454 if (LV.isGlobalReg()) 1455 return EmitLoadOfGlobalRegLValue(LV); 1456 1457 assert(LV.isBitField() && "Unknown LValue type!"); 1458 return EmitLoadOfBitfieldLValue(LV); 1459 } 1460 1461 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { 1462 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 1463 1464 // Get the output type. 1465 llvm::Type *ResLTy = ConvertType(LV.getType()); 1466 1467 Address Ptr = LV.getBitFieldAddress(); 1468 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load"); 1469 1470 if (Info.IsSigned) { 1471 assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize); 1472 unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size; 1473 if (HighBits) 1474 Val = Builder.CreateShl(Val, HighBits, "bf.shl"); 1475 if (Info.Offset + HighBits) 1476 Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr"); 1477 } else { 1478 if (Info.Offset) 1479 Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr"); 1480 if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize) 1481 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize, 1482 Info.Size), 1483 "bf.clear"); 1484 } 1485 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); 1486 1487 return RValue::get(Val); 1488 } 1489 1490 // If this is a reference to a subset of the elements of a vector, create an 1491 // appropriate shufflevector. 1492 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 1493 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(), 1494 LV.isVolatileQualified()); 1495 1496 const llvm::Constant *Elts = LV.getExtVectorElts(); 1497 1498 // If the result of the expression is a non-vector type, we must be extracting 1499 // a single element. Just codegen as an extractelement. 1500 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 1501 if (!ExprVT) { 1502 unsigned InIdx = getAccessedFieldNo(0, Elts); 1503 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); 1504 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 1505 } 1506 1507 // Always use shuffle vector to try to retain the original program structure 1508 unsigned NumResultElts = ExprVT->getNumElements(); 1509 1510 SmallVector<llvm::Constant*, 4> Mask; 1511 for (unsigned i = 0; i != NumResultElts; ++i) 1512 Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts))); 1513 1514 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1515 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 1516 MaskV); 1517 return RValue::get(Vec); 1518 } 1519 1520 /// @brief Generates lvalue for partial ext_vector access. 1521 Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { 1522 Address VectorAddress = LV.getExtVectorAddress(); 1523 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 1524 QualType EQT = ExprVT->getElementType(); 1525 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT); 1526 1527 Address CastToPointerElement = 1528 Builder.CreateElementBitCast(VectorAddress, VectorElementTy, 1529 "conv.ptr.element"); 1530 1531 const llvm::Constant *Elts = LV.getExtVectorElts(); 1532 unsigned ix = getAccessedFieldNo(0, Elts); 1533 1534 Address VectorBasePtrPlusIx = 1535 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix, 1536 getContext().getTypeSizeInChars(EQT), 1537 "vector.elt"); 1538 1539 return VectorBasePtrPlusIx; 1540 } 1541 1542 /// @brief Load of global gamed gegisters are always calls to intrinsics. 1543 RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { 1544 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && 1545 "Bad type for register variable"); 1546 llvm::MDNode *RegName = cast<llvm::MDNode>( 1547 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata()); 1548 1549 // We accept integer and pointer types only 1550 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType()); 1551 llvm::Type *Ty = OrigTy; 1552 if (OrigTy->isPointerTy()) 1553 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); 1554 llvm::Type *Types[] = { Ty }; 1555 1556 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); 1557 llvm::Value *Call = Builder.CreateCall( 1558 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName)); 1559 if (OrigTy->isPointerTy()) 1560 Call = Builder.CreateIntToPtr(Call, OrigTy); 1561 return RValue::get(Call); 1562 } 1563 1564 1565 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 1566 /// lvalue, where both are guaranteed to the have the same type, and that type 1567 /// is 'Ty'. 1568 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 1569 bool isInit) { 1570 if (!Dst.isSimple()) { 1571 if (Dst.isVectorElt()) { 1572 // Read/modify/write the vector, inserting the new element. 1573 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(), 1574 Dst.isVolatileQualified()); 1575 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 1576 Dst.getVectorIdx(), "vecins"); 1577 Builder.CreateStore(Vec, Dst.getVectorAddress(), 1578 Dst.isVolatileQualified()); 1579 return; 1580 } 1581 1582 // If this is an update of extended vector elements, insert them as 1583 // appropriate. 1584 if (Dst.isExtVectorElt()) 1585 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 1586 1587 if (Dst.isGlobalReg()) 1588 return EmitStoreThroughGlobalRegLValue(Src, Dst); 1589 1590 assert(Dst.isBitField() && "Unknown LValue type"); 1591 return EmitStoreThroughBitfieldLValue(Src, Dst); 1592 } 1593 1594 // There's special magic for assigning into an ARC-qualified l-value. 1595 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 1596 switch (Lifetime) { 1597 case Qualifiers::OCL_None: 1598 llvm_unreachable("present but none"); 1599 1600 case Qualifiers::OCL_ExplicitNone: 1601 // nothing special 1602 break; 1603 1604 case Qualifiers::OCL_Strong: 1605 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 1606 return; 1607 1608 case Qualifiers::OCL_Weak: 1609 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 1610 return; 1611 1612 case Qualifiers::OCL_Autoreleasing: 1613 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 1614 Src.getScalarVal())); 1615 // fall into the normal path 1616 break; 1617 } 1618 } 1619 1620 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 1621 // load of a __weak object. 1622 Address LvalueDst = Dst.getAddress(); 1623 llvm::Value *src = Src.getScalarVal(); 1624 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1625 return; 1626 } 1627 1628 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1629 // load of a __strong object. 1630 Address LvalueDst = Dst.getAddress(); 1631 llvm::Value *src = Src.getScalarVal(); 1632 if (Dst.isObjCIvar()) { 1633 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1634 llvm::Type *ResultType = IntPtrTy; 1635 Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp()); 1636 llvm::Value *RHS = dst.getPointer(); 1637 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 1638 llvm::Value *LHS = 1639 Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType, 1640 "sub.ptr.lhs.cast"); 1641 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 1642 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 1643 BytesBetween); 1644 } else if (Dst.isGlobalObjCRef()) { 1645 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 1646 Dst.isThreadLocalRef()); 1647 } 1648 else 1649 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 1650 return; 1651 } 1652 1653 assert(Src.isScalar() && "Can't emit an agg store with this method"); 1654 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 1655 } 1656 1657 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 1658 llvm::Value **Result) { 1659 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 1660 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 1661 Address Ptr = Dst.getBitFieldAddress(); 1662 1663 // Get the source value, truncated to the width of the bit-field. 1664 llvm::Value *SrcVal = Src.getScalarVal(); 1665 1666 // Cast the source to the storage type and shift it into place. 1667 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(), 1668 /*IsSigned=*/false); 1669 llvm::Value *MaskedVal = SrcVal; 1670 1671 // See if there are other bits in the bitfield's storage we'll need to load 1672 // and mask together with source before storing. 1673 if (Info.StorageSize != Info.Size) { 1674 assert(Info.StorageSize > Info.Size && "Invalid bitfield size."); 1675 llvm::Value *Val = 1676 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load"); 1677 1678 // Mask the source value as needed. 1679 if (!hasBooleanRepresentation(Dst.getType())) 1680 SrcVal = Builder.CreateAnd(SrcVal, 1681 llvm::APInt::getLowBitsSet(Info.StorageSize, 1682 Info.Size), 1683 "bf.value"); 1684 MaskedVal = SrcVal; 1685 if (Info.Offset) 1686 SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl"); 1687 1688 // Mask out the original value. 1689 Val = Builder.CreateAnd(Val, 1690 ~llvm::APInt::getBitsSet(Info.StorageSize, 1691 Info.Offset, 1692 Info.Offset + Info.Size), 1693 "bf.clear"); 1694 1695 // Or together the unchanged values and the source value. 1696 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); 1697 } else { 1698 assert(Info.Offset == 0); 1699 } 1700 1701 // Write the new value back out. 1702 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified()); 1703 1704 // Return the new value of the bit-field, if requested. 1705 if (Result) { 1706 llvm::Value *ResultVal = MaskedVal; 1707 1708 // Sign extend the value if needed. 1709 if (Info.IsSigned) { 1710 assert(Info.Size <= Info.StorageSize); 1711 unsigned HighBits = Info.StorageSize - Info.Size; 1712 if (HighBits) { 1713 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); 1714 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); 1715 } 1716 } 1717 1718 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, 1719 "bf.result.cast"); 1720 *Result = EmitFromMemory(ResultVal, Dst.getType()); 1721 } 1722 } 1723 1724 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 1725 LValue Dst) { 1726 // This access turns into a read/modify/write of the vector. Load the input 1727 // value now. 1728 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(), 1729 Dst.isVolatileQualified()); 1730 const llvm::Constant *Elts = Dst.getExtVectorElts(); 1731 1732 llvm::Value *SrcVal = Src.getScalarVal(); 1733 1734 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 1735 unsigned NumSrcElts = VTy->getNumElements(); 1736 unsigned NumDstElts = 1737 cast<llvm::VectorType>(Vec->getType())->getNumElements(); 1738 if (NumDstElts == NumSrcElts) { 1739 // Use shuffle vector is the src and destination are the same number of 1740 // elements and restore the vector mask since it is on the side it will be 1741 // stored. 1742 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 1743 for (unsigned i = 0; i != NumSrcElts; ++i) 1744 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i); 1745 1746 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1747 Vec = Builder.CreateShuffleVector(SrcVal, 1748 llvm::UndefValue::get(Vec->getType()), 1749 MaskV); 1750 } else if (NumDstElts > NumSrcElts) { 1751 // Extended the source vector to the same length and then shuffle it 1752 // into the destination. 1753 // FIXME: since we're shuffling with undef, can we just use the indices 1754 // into that? This could be simpler. 1755 SmallVector<llvm::Constant*, 4> ExtMask; 1756 for (unsigned i = 0; i != NumSrcElts; ++i) 1757 ExtMask.push_back(Builder.getInt32(i)); 1758 ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty)); 1759 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 1760 llvm::Value *ExtSrcVal = 1761 Builder.CreateShuffleVector(SrcVal, 1762 llvm::UndefValue::get(SrcVal->getType()), 1763 ExtMaskV); 1764 // build identity 1765 SmallVector<llvm::Constant*, 4> Mask; 1766 for (unsigned i = 0; i != NumDstElts; ++i) 1767 Mask.push_back(Builder.getInt32(i)); 1768 1769 // When the vector size is odd and .odd or .hi is used, the last element 1770 // of the Elts constant array will be one past the size of the vector. 1771 // Ignore the last element here, if it is greater than the mask size. 1772 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) 1773 NumSrcElts--; 1774 1775 // modify when what gets shuffled in 1776 for (unsigned i = 0; i != NumSrcElts; ++i) 1777 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts); 1778 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1779 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 1780 } else { 1781 // We should never shorten the vector 1782 llvm_unreachable("unexpected shorten vector length"); 1783 } 1784 } else { 1785 // If the Src is a scalar (not a vector) it must be updating one element. 1786 unsigned InIdx = getAccessedFieldNo(0, Elts); 1787 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); 1788 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 1789 } 1790 1791 Builder.CreateStore(Vec, Dst.getExtVectorAddress(), 1792 Dst.isVolatileQualified()); 1793 } 1794 1795 /// @brief Store of global named registers are always calls to intrinsics. 1796 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { 1797 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && 1798 "Bad type for register variable"); 1799 llvm::MDNode *RegName = cast<llvm::MDNode>( 1800 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata()); 1801 assert(RegName && "Register LValue is not metadata"); 1802 1803 // We accept integer and pointer types only 1804 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType()); 1805 llvm::Type *Ty = OrigTy; 1806 if (OrigTy->isPointerTy()) 1807 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); 1808 llvm::Type *Types[] = { Ty }; 1809 1810 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); 1811 llvm::Value *Value = Src.getScalarVal(); 1812 if (OrigTy->isPointerTy()) 1813 Value = Builder.CreatePtrToInt(Value, Ty); 1814 Builder.CreateCall( 1815 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value}); 1816 } 1817 1818 // setObjCGCLValueClass - sets class of the lvalue for the purpose of 1819 // generating write-barries API. It is currently a global, ivar, 1820 // or neither. 1821 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 1822 LValue &LV, 1823 bool IsMemberAccess=false) { 1824 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) 1825 return; 1826 1827 if (isa<ObjCIvarRefExpr>(E)) { 1828 QualType ExpTy = E->getType(); 1829 if (IsMemberAccess && ExpTy->isPointerType()) { 1830 // If ivar is a structure pointer, assigning to field of 1831 // this struct follows gcc's behavior and makes it a non-ivar 1832 // writer-barrier conservatively. 1833 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1834 if (ExpTy->isRecordType()) { 1835 LV.setObjCIvar(false); 1836 return; 1837 } 1838 } 1839 LV.setObjCIvar(true); 1840 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E)); 1841 LV.setBaseIvarExp(Exp->getBase()); 1842 LV.setObjCArray(E->getType()->isArrayType()); 1843 return; 1844 } 1845 1846 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) { 1847 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 1848 if (VD->hasGlobalStorage()) { 1849 LV.setGlobalObjCRef(true); 1850 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); 1851 } 1852 } 1853 LV.setObjCArray(E->getType()->isArrayType()); 1854 return; 1855 } 1856 1857 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) { 1858 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1859 return; 1860 } 1861 1862 if (const auto *Exp = dyn_cast<ParenExpr>(E)) { 1863 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1864 if (LV.isObjCIvar()) { 1865 // If cast is to a structure pointer, follow gcc's behavior and make it 1866 // a non-ivar write-barrier. 1867 QualType ExpTy = E->getType(); 1868 if (ExpTy->isPointerType()) 1869 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 1870 if (ExpTy->isRecordType()) 1871 LV.setObjCIvar(false); 1872 } 1873 return; 1874 } 1875 1876 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) { 1877 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 1878 return; 1879 } 1880 1881 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) { 1882 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1883 return; 1884 } 1885 1886 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) { 1887 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1888 return; 1889 } 1890 1891 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 1892 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 1893 return; 1894 } 1895 1896 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 1897 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 1898 if (LV.isObjCIvar() && !LV.isObjCArray()) 1899 // Using array syntax to assigning to what an ivar points to is not 1900 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 1901 LV.setObjCIvar(false); 1902 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 1903 // Using array syntax to assigning to what global points to is not 1904 // same as assigning to the global itself. {id *G;} G[i] = 0; 1905 LV.setGlobalObjCRef(false); 1906 return; 1907 } 1908 1909 if (const auto *Exp = dyn_cast<MemberExpr>(E)) { 1910 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 1911 // We don't know if member is an 'ivar', but this flag is looked at 1912 // only in the context of LV.isObjCIvar(). 1913 LV.setObjCArray(E->getType()->isArrayType()); 1914 return; 1915 } 1916 } 1917 1918 static llvm::Value * 1919 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 1920 llvm::Value *V, llvm::Type *IRType, 1921 StringRef Name = StringRef()) { 1922 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 1923 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 1924 } 1925 1926 static LValue EmitThreadPrivateVarDeclLValue( 1927 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, 1928 llvm::Type *RealVarTy, SourceLocation Loc) { 1929 Addr = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc); 1930 Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy); 1931 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 1932 } 1933 1934 Address CodeGenFunction::EmitLoadOfReference(Address Addr, 1935 const ReferenceType *RefTy, 1936 AlignmentSource *Source) { 1937 llvm::Value *Ptr = Builder.CreateLoad(Addr); 1938 return Address(Ptr, getNaturalTypeAlignment(RefTy->getPointeeType(), 1939 Source, /*forPointee*/ true)); 1940 1941 } 1942 1943 LValue CodeGenFunction::EmitLoadOfReferenceLValue(Address RefAddr, 1944 const ReferenceType *RefTy) { 1945 AlignmentSource Source; 1946 Address Addr = EmitLoadOfReference(RefAddr, RefTy, &Source); 1947 return MakeAddrLValue(Addr, RefTy->getPointeeType(), Source); 1948 } 1949 1950 Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, 1951 const PointerType *PtrTy, 1952 AlignmentSource *Source) { 1953 llvm::Value *Addr = Builder.CreateLoad(Ptr); 1954 return Address(Addr, getNaturalTypeAlignment(PtrTy->getPointeeType(), Source, 1955 /*forPointeeType=*/true)); 1956 } 1957 1958 LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, 1959 const PointerType *PtrTy) { 1960 AlignmentSource Source; 1961 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &Source); 1962 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), Source); 1963 } 1964 1965 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 1966 const Expr *E, const VarDecl *VD) { 1967 QualType T = E->getType(); 1968 1969 // If it's thread_local, emit a call to its wrapper function instead. 1970 if (VD->getTLSKind() == VarDecl::TLS_Dynamic && 1971 CGF.CGM.getCXXABI().usesThreadWrapperFunction()) 1972 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T); 1973 1974 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 1975 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 1976 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 1977 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 1978 Address Addr(V, Alignment); 1979 LValue LV; 1980 // Emit reference to the private copy of the variable if it is an OpenMP 1981 // threadprivate variable. 1982 if (CGF.getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>()) 1983 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, 1984 E->getExprLoc()); 1985 if (auto RefTy = VD->getType()->getAs<ReferenceType>()) { 1986 LV = CGF.EmitLoadOfReferenceLValue(Addr, RefTy); 1987 } else { 1988 LV = CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 1989 } 1990 setObjCGCLValueClass(CGF.getContext(), E, LV); 1991 return LV; 1992 } 1993 1994 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 1995 const Expr *E, const FunctionDecl *FD) { 1996 llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); 1997 if (!FD->hasPrototype()) { 1998 if (const FunctionProtoType *Proto = 1999 FD->getType()->getAs<FunctionProtoType>()) { 2000 // Ugly case: for a K&R-style definition, the type of the definition 2001 // isn't the same as the type of a use. Correct for this with a 2002 // bitcast. 2003 QualType NoProtoType = 2004 CGF.getContext().getFunctionNoProtoType(Proto->getReturnType()); 2005 NoProtoType = CGF.getContext().getPointerType(NoProtoType); 2006 V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); 2007 } 2008 } 2009 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 2010 return CGF.MakeAddrLValue(V, E->getType(), Alignment, AlignmentSource::Decl); 2011 } 2012 2013 static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, 2014 llvm::Value *ThisValue) { 2015 QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); 2016 LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); 2017 return CGF.EmitLValueForField(LV, FD); 2018 } 2019 2020 /// Named Registers are named metadata pointing to the register name 2021 /// which will be read from/written to as an argument to the intrinsic 2022 /// @llvm.read/write_register. 2023 /// So far, only the name is being passed down, but other options such as 2024 /// register type, allocation type or even optimization options could be 2025 /// passed down via the metadata node. 2026 static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { 2027 SmallString<64> Name("llvm.named.register."); 2028 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); 2029 assert(Asm->getLabel().size() < 64-Name.size() && 2030 "Register name too big"); 2031 Name.append(Asm->getLabel()); 2032 llvm::NamedMDNode *M = 2033 CGM.getModule().getOrInsertNamedMetadata(Name); 2034 if (M->getNumOperands() == 0) { 2035 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(), 2036 Asm->getLabel()); 2037 llvm::Metadata *Ops[] = {Str}; 2038 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops)); 2039 } 2040 2041 CharUnits Alignment = CGM.getContext().getDeclAlign(VD); 2042 2043 llvm::Value *Ptr = 2044 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)); 2045 return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType()); 2046 } 2047 2048 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 2049 const NamedDecl *ND = E->getDecl(); 2050 QualType T = E->getType(); 2051 2052 if (const auto *VD = dyn_cast<VarDecl>(ND)) { 2053 // Global Named registers access via intrinsics only 2054 if (VD->getStorageClass() == SC_Register && 2055 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) 2056 return EmitGlobalNamedRegister(VD, CGM); 2057 2058 // A DeclRefExpr for a reference initialized by a constant expression can 2059 // appear without being odr-used. Directly emit the constant initializer. 2060 const Expr *Init = VD->getAnyInitializer(VD); 2061 if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() && 2062 VD->isUsableInConstantExpressions(getContext()) && 2063 VD->checkInitIsICE() && 2064 // Do not emit if it is private OpenMP variable. 2065 !(E->refersToEnclosingVariableOrCapture() && CapturedStmtInfo && 2066 LocalDeclMap.count(VD))) { 2067 llvm::Constant *Val = 2068 CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this); 2069 assert(Val && "failed to emit reference constant expression"); 2070 // FIXME: Eventually we will want to emit vector element references. 2071 2072 // Should we be using the alignment of the constant pointer we emitted? 2073 CharUnits Alignment = getNaturalTypeAlignment(E->getType(), nullptr, 2074 /*pointee*/ true); 2075 2076 return MakeAddrLValue(Address(Val, Alignment), T, AlignmentSource::Decl); 2077 } 2078 2079 // Check for captured variables. 2080 if (E->refersToEnclosingVariableOrCapture()) { 2081 if (auto *FD = LambdaCaptureFields.lookup(VD)) 2082 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); 2083 else if (CapturedStmtInfo) { 2084 auto it = LocalDeclMap.find(VD); 2085 if (it != LocalDeclMap.end()) { 2086 if (auto RefTy = VD->getType()->getAs<ReferenceType>()) { 2087 return EmitLoadOfReferenceLValue(it->second, RefTy); 2088 } 2089 return MakeAddrLValue(it->second, T); 2090 } 2091 LValue CapLVal = 2092 EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), 2093 CapturedStmtInfo->getContextValue()); 2094 return MakeAddrLValue( 2095 Address(CapLVal.getPointer(), getContext().getDeclAlign(VD)), 2096 CapLVal.getType(), AlignmentSource::Decl); 2097 } 2098 2099 assert(isa<BlockDecl>(CurCodeDecl)); 2100 Address addr = GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>()); 2101 return MakeAddrLValue(addr, T, AlignmentSource::Decl); 2102 } 2103 } 2104 2105 // FIXME: We should be able to assert this for FunctionDecls as well! 2106 // FIXME: We should be able to assert this for all DeclRefExprs, not just 2107 // those with a valid source location. 2108 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || 2109 !E->getLocation().isValid()) && 2110 "Should not use decl without marking it used!"); 2111 2112 if (ND->hasAttr<WeakRefAttr>()) { 2113 const auto *VD = cast<ValueDecl>(ND); 2114 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD); 2115 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl); 2116 } 2117 2118 if (const auto *VD = dyn_cast<VarDecl>(ND)) { 2119 // Check if this is a global variable. 2120 if (VD->hasLinkage() || VD->isStaticDataMember()) 2121 return EmitGlobalVarDeclLValue(*this, E, VD); 2122 2123 Address addr = Address::invalid(); 2124 2125 // The variable should generally be present in the local decl map. 2126 auto iter = LocalDeclMap.find(VD); 2127 if (iter != LocalDeclMap.end()) { 2128 addr = iter->second; 2129 2130 // Otherwise, it might be static local we haven't emitted yet for 2131 // some reason; most likely, because it's in an outer function. 2132 } else if (VD->isStaticLocal()) { 2133 addr = Address(CGM.getOrCreateStaticVarDecl( 2134 *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false)), 2135 getContext().getDeclAlign(VD)); 2136 2137 // No other cases for now. 2138 } else { 2139 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?"); 2140 } 2141 2142 2143 // Check for OpenMP threadprivate variables. 2144 if (getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>()) { 2145 return EmitThreadPrivateVarDeclLValue( 2146 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()), 2147 E->getExprLoc()); 2148 } 2149 2150 // Drill into block byref variables. 2151 bool isBlockByref = VD->hasAttr<BlocksAttr>(); 2152 if (isBlockByref) { 2153 addr = emitBlockByrefAddress(addr, VD); 2154 } 2155 2156 // Drill into reference types. 2157 LValue LV; 2158 if (auto RefTy = VD->getType()->getAs<ReferenceType>()) { 2159 LV = EmitLoadOfReferenceLValue(addr, RefTy); 2160 } else { 2161 LV = MakeAddrLValue(addr, T, AlignmentSource::Decl); 2162 } 2163 2164 bool isLocalStorage = VD->hasLocalStorage(); 2165 2166 bool NonGCable = isLocalStorage && 2167 !VD->getType()->isReferenceType() && 2168 !isBlockByref; 2169 if (NonGCable) { 2170 LV.getQuals().removeObjCGCAttr(); 2171 LV.setNonGC(true); 2172 } 2173 2174 bool isImpreciseLifetime = 2175 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); 2176 if (isImpreciseLifetime) 2177 LV.setARCPreciseLifetime(ARCImpreciseLifetime); 2178 setObjCGCLValueClass(getContext(), E, LV); 2179 return LV; 2180 } 2181 2182 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2183 return EmitFunctionDeclLValue(*this, E, FD); 2184 2185 llvm_unreachable("Unhandled DeclRefExpr"); 2186 } 2187 2188 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 2189 // __extension__ doesn't affect lvalue-ness. 2190 if (E->getOpcode() == UO_Extension) 2191 return EmitLValue(E->getSubExpr()); 2192 2193 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 2194 switch (E->getOpcode()) { 2195 default: llvm_unreachable("Unknown unary operator lvalue!"); 2196 case UO_Deref: { 2197 QualType T = E->getSubExpr()->getType()->getPointeeType(); 2198 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 2199 2200 AlignmentSource AlignSource; 2201 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &AlignSource); 2202 LValue LV = MakeAddrLValue(Addr, T, AlignSource); 2203 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 2204 2205 // We should not generate __weak write barrier on indirect reference 2206 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 2207 // But, we continue to generate __strong write barrier on indirect write 2208 // into a pointer to object. 2209 if (getLangOpts().ObjC1 && 2210 getLangOpts().getGC() != LangOptions::NonGC && 2211 LV.isObjCWeak()) 2212 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 2213 return LV; 2214 } 2215 case UO_Real: 2216 case UO_Imag: { 2217 LValue LV = EmitLValue(E->getSubExpr()); 2218 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 2219 2220 // __real is valid on scalars. This is a faster way of testing that. 2221 // __imag can only produce an rvalue on scalars. 2222 if (E->getOpcode() == UO_Real && 2223 !LV.getAddress().getElementType()->isStructTy()) { 2224 assert(E->getSubExpr()->getType()->isArithmeticType()); 2225 return LV; 2226 } 2227 2228 assert(E->getSubExpr()->getType()->isAnyComplexType()); 2229 2230 Address Component = 2231 (E->getOpcode() == UO_Real 2232 ? emitAddrOfRealComponent(LV.getAddress(), LV.getType()) 2233 : emitAddrOfImagComponent(LV.getAddress(), LV.getType())); 2234 return MakeAddrLValue(Component, ExprTy, LV.getAlignmentSource()); 2235 } 2236 case UO_PreInc: 2237 case UO_PreDec: { 2238 LValue LV = EmitLValue(E->getSubExpr()); 2239 bool isInc = E->getOpcode() == UO_PreInc; 2240 2241 if (E->getType()->isAnyComplexType()) 2242 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 2243 else 2244 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 2245 return LV; 2246 } 2247 } 2248 } 2249 2250 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 2251 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 2252 E->getType(), AlignmentSource::Decl); 2253 } 2254 2255 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 2256 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 2257 E->getType(), AlignmentSource::Decl); 2258 } 2259 2260 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 2261 auto SL = E->getFunctionName(); 2262 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); 2263 StringRef FnName = CurFn->getName(); 2264 if (FnName.startswith("\01")) 2265 FnName = FnName.substr(1); 2266 StringRef NameItems[] = { 2267 PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName}; 2268 std::string GVName = llvm::join(NameItems, NameItems + 2, "."); 2269 if (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)) { 2270 auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str()); 2271 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 2272 } 2273 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName); 2274 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 2275 } 2276 2277 /// Emit a type description suitable for use by a runtime sanitizer library. The 2278 /// format of a type descriptor is 2279 /// 2280 /// \code 2281 /// { i16 TypeKind, i16 TypeInfo } 2282 /// \endcode 2283 /// 2284 /// followed by an array of i8 containing the type name. TypeKind is 0 for an 2285 /// integer, 1 for a floating point value, and -1 for anything else. 2286 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { 2287 // Only emit each type's descriptor once. 2288 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T)) 2289 return C; 2290 2291 uint16_t TypeKind = -1; 2292 uint16_t TypeInfo = 0; 2293 2294 if (T->isIntegerType()) { 2295 TypeKind = 0; 2296 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | 2297 (T->isSignedIntegerType() ? 1 : 0); 2298 } else if (T->isFloatingType()) { 2299 TypeKind = 1; 2300 TypeInfo = getContext().getTypeSize(T); 2301 } 2302 2303 // Format the type name as if for a diagnostic, including quotes and 2304 // optionally an 'aka'. 2305 SmallString<32> Buffer; 2306 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, 2307 (intptr_t)T.getAsOpaquePtr(), 2308 StringRef(), StringRef(), None, Buffer, 2309 None); 2310 2311 llvm::Constant *Components[] = { 2312 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), 2313 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) 2314 }; 2315 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); 2316 2317 auto *GV = new llvm::GlobalVariable( 2318 CGM.getModule(), Descriptor->getType(), 2319 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); 2320 GV->setUnnamedAddr(true); 2321 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); 2322 2323 // Remember the descriptor for this type. 2324 CGM.setTypeDescriptorInMap(T, GV); 2325 2326 return GV; 2327 } 2328 2329 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { 2330 llvm::Type *TargetTy = IntPtrTy; 2331 2332 // Floating-point types which fit into intptr_t are bitcast to integers 2333 // and then passed directly (after zero-extension, if necessary). 2334 if (V->getType()->isFloatingPointTy()) { 2335 unsigned Bits = V->getType()->getPrimitiveSizeInBits(); 2336 if (Bits <= TargetTy->getIntegerBitWidth()) 2337 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), 2338 Bits)); 2339 } 2340 2341 // Integers which fit in intptr_t are zero-extended and passed directly. 2342 if (V->getType()->isIntegerTy() && 2343 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) 2344 return Builder.CreateZExt(V, TargetTy); 2345 2346 // Pointers are passed directly, everything else is passed by address. 2347 if (!V->getType()->isPointerTy()) { 2348 Address Ptr = CreateDefaultAlignTempAlloca(V->getType()); 2349 Builder.CreateStore(V, Ptr); 2350 V = Ptr.getPointer(); 2351 } 2352 return Builder.CreatePtrToInt(V, TargetTy); 2353 } 2354 2355 /// \brief Emit a representation of a SourceLocation for passing to a handler 2356 /// in a sanitizer runtime library. The format for this data is: 2357 /// \code 2358 /// struct SourceLocation { 2359 /// const char *Filename; 2360 /// int32_t Line, Column; 2361 /// }; 2362 /// \endcode 2363 /// For an invalid SourceLocation, the Filename pointer is null. 2364 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { 2365 llvm::Constant *Filename; 2366 int Line, Column; 2367 2368 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); 2369 if (PLoc.isValid()) { 2370 auto FilenameGV = CGM.GetAddrOfConstantCString(PLoc.getFilename(), ".src"); 2371 CGM.getSanitizerMetadata()->disableSanitizerForGlobal( 2372 cast<llvm::GlobalVariable>(FilenameGV.getPointer())); 2373 Filename = FilenameGV.getPointer(); 2374 Line = PLoc.getLine(); 2375 Column = PLoc.getColumn(); 2376 } else { 2377 Filename = llvm::Constant::getNullValue(Int8PtrTy); 2378 Line = Column = 0; 2379 } 2380 2381 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line), 2382 Builder.getInt32(Column)}; 2383 2384 return llvm::ConstantStruct::getAnon(Data); 2385 } 2386 2387 namespace { 2388 /// \brief Specify under what conditions this check can be recovered 2389 enum class CheckRecoverableKind { 2390 /// Always terminate program execution if this check fails. 2391 Unrecoverable, 2392 /// Check supports recovering, runtime has both fatal (noreturn) and 2393 /// non-fatal handlers for this check. 2394 Recoverable, 2395 /// Runtime conditionally aborts, always need to support recovery. 2396 AlwaysRecoverable 2397 }; 2398 } 2399 2400 static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { 2401 assert(llvm::countPopulation(Kind) == 1); 2402 switch (Kind) { 2403 case SanitizerKind::Vptr: 2404 return CheckRecoverableKind::AlwaysRecoverable; 2405 case SanitizerKind::Return: 2406 case SanitizerKind::Unreachable: 2407 return CheckRecoverableKind::Unrecoverable; 2408 default: 2409 return CheckRecoverableKind::Recoverable; 2410 } 2411 } 2412 2413 static void emitCheckHandlerCall(CodeGenFunction &CGF, 2414 llvm::FunctionType *FnType, 2415 ArrayRef<llvm::Value *> FnArgs, 2416 StringRef CheckName, 2417 CheckRecoverableKind RecoverKind, bool IsFatal, 2418 llvm::BasicBlock *ContBB) { 2419 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); 2420 bool NeedsAbortSuffix = 2421 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; 2422 std::string FnName = ("__ubsan_handle_" + CheckName + 2423 (NeedsAbortSuffix ? "_abort" : "")).str(); 2424 bool MayReturn = 2425 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; 2426 2427 llvm::AttrBuilder B; 2428 if (!MayReturn) { 2429 B.addAttribute(llvm::Attribute::NoReturn) 2430 .addAttribute(llvm::Attribute::NoUnwind); 2431 } 2432 B.addAttribute(llvm::Attribute::UWTable); 2433 2434 llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction( 2435 FnType, FnName, 2436 llvm::AttributeSet::get(CGF.getLLVMContext(), 2437 llvm::AttributeSet::FunctionIndex, B)); 2438 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs); 2439 if (!MayReturn) { 2440 HandlerCall->setDoesNotReturn(); 2441 CGF.Builder.CreateUnreachable(); 2442 } else { 2443 CGF.Builder.CreateBr(ContBB); 2444 } 2445 } 2446 2447 void CodeGenFunction::EmitCheck( 2448 ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, 2449 StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs, 2450 ArrayRef<llvm::Value *> DynamicArgs) { 2451 assert(IsSanitizerScope); 2452 assert(Checked.size() > 0); 2453 2454 llvm::Value *FatalCond = nullptr; 2455 llvm::Value *RecoverableCond = nullptr; 2456 llvm::Value *TrapCond = nullptr; 2457 for (int i = 0, n = Checked.size(); i < n; ++i) { 2458 llvm::Value *Check = Checked[i].first; 2459 // -fsanitize-trap= overrides -fsanitize-recover=. 2460 llvm::Value *&Cond = 2461 CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second) 2462 ? TrapCond 2463 : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second) 2464 ? RecoverableCond 2465 : FatalCond; 2466 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check; 2467 } 2468 2469 if (TrapCond) 2470 EmitTrapCheck(TrapCond); 2471 if (!FatalCond && !RecoverableCond) 2472 return; 2473 2474 llvm::Value *JointCond; 2475 if (FatalCond && RecoverableCond) 2476 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond); 2477 else 2478 JointCond = FatalCond ? FatalCond : RecoverableCond; 2479 assert(JointCond); 2480 2481 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second); 2482 assert(SanOpts.has(Checked[0].second)); 2483 #ifndef NDEBUG 2484 for (int i = 1, n = Checked.size(); i < n; ++i) { 2485 assert(RecoverKind == getRecoverableKind(Checked[i].second) && 2486 "All recoverable kinds in a single check must be same!"); 2487 assert(SanOpts.has(Checked[i].second)); 2488 } 2489 #endif 2490 2491 llvm::BasicBlock *Cont = createBasicBlock("cont"); 2492 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName); 2493 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers); 2494 // Give hint that we very much don't expect to execute the handler 2495 // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp 2496 llvm::MDBuilder MDHelper(getLLVMContext()); 2497 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); 2498 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); 2499 EmitBlock(Handlers); 2500 2501 // Handler functions take an i8* pointing to the (handler-specific) static 2502 // information block, followed by a sequence of intptr_t arguments 2503 // representing operand values. 2504 SmallVector<llvm::Value *, 4> Args; 2505 SmallVector<llvm::Type *, 4> ArgTypes; 2506 Args.reserve(DynamicArgs.size() + 1); 2507 ArgTypes.reserve(DynamicArgs.size() + 1); 2508 2509 // Emit handler arguments and create handler function type. 2510 if (!StaticArgs.empty()) { 2511 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 2512 auto *InfoPtr = 2513 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, 2514 llvm::GlobalVariable::PrivateLinkage, Info); 2515 InfoPtr->setUnnamedAddr(true); 2516 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); 2517 Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy)); 2518 ArgTypes.push_back(Int8PtrTy); 2519 } 2520 2521 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { 2522 Args.push_back(EmitCheckValue(DynamicArgs[i])); 2523 ArgTypes.push_back(IntPtrTy); 2524 } 2525 2526 llvm::FunctionType *FnType = 2527 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); 2528 2529 if (!FatalCond || !RecoverableCond) { 2530 // Simple case: we need to generate a single handler call, either 2531 // fatal, or non-fatal. 2532 emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, 2533 (FatalCond != nullptr), Cont); 2534 } else { 2535 // Emit two handler calls: first one for set of unrecoverable checks, 2536 // another one for recoverable. 2537 llvm::BasicBlock *NonFatalHandlerBB = 2538 createBasicBlock("non_fatal." + CheckName); 2539 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName); 2540 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB); 2541 EmitBlock(FatalHandlerBB); 2542 emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, true, 2543 NonFatalHandlerBB); 2544 EmitBlock(NonFatalHandlerBB); 2545 emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, false, 2546 Cont); 2547 } 2548 2549 EmitBlock(Cont); 2550 } 2551 2552 void CodeGenFunction::EmitCfiSlowPathCheck( 2553 SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, 2554 llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) { 2555 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont"); 2556 2557 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath"); 2558 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB); 2559 2560 llvm::MDBuilder MDHelper(getLLVMContext()); 2561 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); 2562 BI->setMetadata(llvm::LLVMContext::MD_prof, Node); 2563 2564 EmitBlock(CheckBB); 2565 2566 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind); 2567 2568 llvm::CallInst *CheckCall; 2569 if (WithDiag) { 2570 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 2571 auto *InfoPtr = 2572 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, 2573 llvm::GlobalVariable::PrivateLinkage, Info); 2574 InfoPtr->setUnnamedAddr(true); 2575 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); 2576 2577 llvm::Constant *SlowPathDiagFn = CGM.getModule().getOrInsertFunction( 2578 "__cfi_slowpath_diag", 2579 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, 2580 false)); 2581 CheckCall = Builder.CreateCall( 2582 SlowPathDiagFn, 2583 {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)}); 2584 } else { 2585 llvm::Constant *SlowPathFn = CGM.getModule().getOrInsertFunction( 2586 "__cfi_slowpath", 2587 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false)); 2588 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr}); 2589 } 2590 2591 CheckCall->setDoesNotThrow(); 2592 2593 EmitBlock(Cont); 2594 } 2595 2596 // This function is basically a switch over the CFI failure kind, which is 2597 // extracted from CFICheckFailData (1st function argument). Each case is either 2598 // llvm.trap or a call to one of the two runtime handlers, based on 2599 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid 2600 // failure kind) traps, but this should really never happen. CFICheckFailData 2601 // can be nullptr if the calling module has -fsanitize-trap behavior for this 2602 // check kind; in this case __cfi_check_fail traps as well. 2603 void CodeGenFunction::EmitCfiCheckFail() { 2604 SanitizerScope SanScope(this); 2605 FunctionArgList Args; 2606 ImplicitParamDecl ArgData(getContext(), nullptr, SourceLocation(), nullptr, 2607 getContext().VoidPtrTy); 2608 ImplicitParamDecl ArgAddr(getContext(), nullptr, SourceLocation(), nullptr, 2609 getContext().VoidPtrTy); 2610 Args.push_back(&ArgData); 2611 Args.push_back(&ArgAddr); 2612 2613 const CGFunctionInfo &FI = 2614 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args); 2615 2616 llvm::Function *F = llvm::Function::Create( 2617 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false), 2618 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule()); 2619 F->setVisibility(llvm::GlobalValue::HiddenVisibility); 2620 2621 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args, 2622 SourceLocation()); 2623 2624 llvm::Value *Data = 2625 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false, 2626 CGM.getContext().VoidPtrTy, ArgData.getLocation()); 2627 llvm::Value *Addr = 2628 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false, 2629 CGM.getContext().VoidPtrTy, ArgAddr.getLocation()); 2630 2631 // Data == nullptr means the calling module has trap behaviour for this check. 2632 llvm::Value *DataIsNotNullPtr = 2633 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy)); 2634 EmitTrapCheck(DataIsNotNullPtr); 2635 2636 llvm::StructType *SourceLocationTy = 2637 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty, nullptr); 2638 llvm::StructType *CfiCheckFailDataTy = 2639 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy, nullptr); 2640 2641 llvm::Value *V = Builder.CreateConstGEP2_32( 2642 CfiCheckFailDataTy, 2643 Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0, 2644 0); 2645 Address CheckKindAddr(V, getIntAlign()); 2646 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr); 2647 2648 llvm::Value *AllVtables = llvm::MetadataAsValue::get( 2649 CGM.getLLVMContext(), 2650 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); 2651 llvm::Value *ValidVtable = Builder.CreateZExt( 2652 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::bitset_test), 2653 {Addr, AllVtables}), 2654 IntPtrTy); 2655 2656 const std::pair<int, SanitizerMask> CheckKinds[] = { 2657 {CFITCK_VCall, SanitizerKind::CFIVCall}, 2658 {CFITCK_NVCall, SanitizerKind::CFINVCall}, 2659 {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast}, 2660 {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast}, 2661 {CFITCK_ICall, SanitizerKind::CFIICall}}; 2662 2663 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks; 2664 for (auto CheckKindMaskPair : CheckKinds) { 2665 int Kind = CheckKindMaskPair.first; 2666 SanitizerMask Mask = CheckKindMaskPair.second; 2667 llvm::Value *Cond = 2668 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind)); 2669 if (CGM.getLangOpts().Sanitize.has(Mask)) 2670 EmitCheck(std::make_pair(Cond, Mask), "cfi_check_fail", {}, 2671 {Data, Addr, ValidVtable}); 2672 else 2673 EmitTrapCheck(Cond); 2674 } 2675 2676 FinishFunction(); 2677 // The only reference to this function will be created during LTO link. 2678 // Make sure it survives until then. 2679 CGM.addUsedGlobal(F); 2680 } 2681 2682 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) { 2683 llvm::BasicBlock *Cont = createBasicBlock("cont"); 2684 2685 // If we're optimizing, collapse all calls to trap down to just one per 2686 // function to save on code size. 2687 if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) { 2688 TrapBB = createBasicBlock("trap"); 2689 Builder.CreateCondBr(Checked, Cont, TrapBB); 2690 EmitBlock(TrapBB); 2691 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); 2692 TrapCall->setDoesNotReturn(); 2693 TrapCall->setDoesNotThrow(); 2694 Builder.CreateUnreachable(); 2695 } else { 2696 Builder.CreateCondBr(Checked, Cont, TrapBB); 2697 } 2698 2699 EmitBlock(Cont); 2700 } 2701 2702 llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { 2703 llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID)); 2704 2705 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) 2706 TrapCall->addAttribute(llvm::AttributeSet::FunctionIndex, 2707 "trap-func-name", 2708 CGM.getCodeGenOpts().TrapFuncName); 2709 2710 return TrapCall; 2711 } 2712 2713 Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, 2714 AlignmentSource *AlignSource) { 2715 assert(E->getType()->isArrayType() && 2716 "Array to pointer decay must have array source type!"); 2717 2718 // Expressions of array type can't be bitfields or vector elements. 2719 LValue LV = EmitLValue(E); 2720 Address Addr = LV.getAddress(); 2721 if (AlignSource) *AlignSource = LV.getAlignmentSource(); 2722 2723 // If the array type was an incomplete type, we need to make sure 2724 // the decay ends up being the right type. 2725 llvm::Type *NewTy = ConvertType(E->getType()); 2726 Addr = Builder.CreateElementBitCast(Addr, NewTy); 2727 2728 // Note that VLA pointers are always decayed, so we don't need to do 2729 // anything here. 2730 if (!E->getType()->isVariableArrayType()) { 2731 assert(isa<llvm::ArrayType>(Addr.getElementType()) && 2732 "Expected pointer to array"); 2733 Addr = Builder.CreateStructGEP(Addr, 0, CharUnits::Zero(), "arraydecay"); 2734 } 2735 2736 QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); 2737 return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType)); 2738 } 2739 2740 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 2741 /// array to pointer, return the array subexpression. 2742 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 2743 // If this isn't just an array->pointer decay, bail out. 2744 const auto *CE = dyn_cast<CastExpr>(E); 2745 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) 2746 return nullptr; 2747 2748 // If this is a decay from variable width array, bail out. 2749 const Expr *SubExpr = CE->getSubExpr(); 2750 if (SubExpr->getType()->isVariableArrayType()) 2751 return nullptr; 2752 2753 return SubExpr; 2754 } 2755 2756 static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, 2757 llvm::Value *ptr, 2758 ArrayRef<llvm::Value*> indices, 2759 bool inbounds, 2760 const llvm::Twine &name = "arrayidx") { 2761 if (inbounds) { 2762 return CGF.Builder.CreateInBoundsGEP(ptr, indices, name); 2763 } else { 2764 return CGF.Builder.CreateGEP(ptr, indices, name); 2765 } 2766 } 2767 2768 static CharUnits getArrayElementAlign(CharUnits arrayAlign, 2769 llvm::Value *idx, 2770 CharUnits eltSize) { 2771 // If we have a constant index, we can use the exact offset of the 2772 // element we're accessing. 2773 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) { 2774 CharUnits offset = constantIdx->getZExtValue() * eltSize; 2775 return arrayAlign.alignmentAtOffset(offset); 2776 2777 // Otherwise, use the worst-case alignment for any element. 2778 } else { 2779 return arrayAlign.alignmentOfArrayElement(eltSize); 2780 } 2781 } 2782 2783 static QualType getFixedSizeElementType(const ASTContext &ctx, 2784 const VariableArrayType *vla) { 2785 QualType eltType; 2786 do { 2787 eltType = vla->getElementType(); 2788 } while ((vla = ctx.getAsVariableArrayType(eltType))); 2789 return eltType; 2790 } 2791 2792 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, 2793 ArrayRef<llvm::Value*> indices, 2794 QualType eltType, bool inbounds, 2795 const llvm::Twine &name = "arrayidx") { 2796 // All the indices except that last must be zero. 2797 #ifndef NDEBUG 2798 for (auto idx : indices.drop_back()) 2799 assert(isa<llvm::ConstantInt>(idx) && 2800 cast<llvm::ConstantInt>(idx)->isZero()); 2801 #endif 2802 2803 // Determine the element size of the statically-sized base. This is 2804 // the thing that the indices are expressed in terms of. 2805 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) { 2806 eltType = getFixedSizeElementType(CGF.getContext(), vla); 2807 } 2808 2809 // We can use that to compute the best alignment of the element. 2810 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); 2811 CharUnits eltAlign = 2812 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); 2813 2814 llvm::Value *eltPtr = 2815 emitArraySubscriptGEP(CGF, addr.getPointer(), indices, inbounds, name); 2816 return Address(eltPtr, eltAlign); 2817 } 2818 2819 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, 2820 bool Accessed) { 2821 // The index must always be an integer, which is not an aggregate. Emit it. 2822 llvm::Value *Idx = EmitScalarExpr(E->getIdx()); 2823 QualType IdxTy = E->getIdx()->getType(); 2824 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 2825 2826 if (SanOpts.has(SanitizerKind::ArrayBounds)) 2827 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); 2828 2829 // If the base is a vector type, then we are forming a vector element lvalue 2830 // with this subscript. 2831 if (E->getBase()->getType()->isVectorType() && 2832 !isa<ExtVectorElementExpr>(E->getBase())) { 2833 // Emit the vector as an lvalue to get its address. 2834 LValue LHS = EmitLValue(E->getBase()); 2835 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 2836 return LValue::MakeVectorElt(LHS.getAddress(), Idx, 2837 E->getBase()->getType(), 2838 LHS.getAlignmentSource()); 2839 } 2840 2841 // All the other cases basically behave like simple offsetting. 2842 2843 // Extend or truncate the index type to 32 or 64-bits. 2844 if (Idx->getType() != IntPtrTy) 2845 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 2846 2847 // Handle the extvector case we ignored above. 2848 if (isa<ExtVectorElementExpr>(E->getBase())) { 2849 LValue LV = EmitLValue(E->getBase()); 2850 Address Addr = EmitExtVectorElementLValue(LV); 2851 2852 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); 2853 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true); 2854 return MakeAddrLValue(Addr, EltType, LV.getAlignmentSource()); 2855 } 2856 2857 AlignmentSource AlignSource; 2858 Address Addr = Address::invalid(); 2859 if (const VariableArrayType *vla = 2860 getContext().getAsVariableArrayType(E->getType())) { 2861 // The base must be a pointer, which is not an aggregate. Emit 2862 // it. It needs to be emitted first in case it's what captures 2863 // the VLA bounds. 2864 Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource); 2865 2866 // The element count here is the total number of non-VLA elements. 2867 llvm::Value *numElements = getVLASize(vla).first; 2868 2869 // Effectively, the multiply by the VLA size is part of the GEP. 2870 // GEP indexes are signed, and scaling an index isn't permitted to 2871 // signed-overflow, so we use the same semantics for our explicit 2872 // multiply. We suppress this if overflow is not undefined behavior. 2873 if (getLangOpts().isSignedOverflowDefined()) { 2874 Idx = Builder.CreateMul(Idx, numElements); 2875 } else { 2876 Idx = Builder.CreateNSWMul(Idx, numElements); 2877 } 2878 2879 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(), 2880 !getLangOpts().isSignedOverflowDefined()); 2881 2882 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 2883 // Indexing over an interface, as in "NSString *P; P[4];" 2884 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT); 2885 llvm::Value *InterfaceSizeVal = 2886 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());; 2887 2888 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal); 2889 2890 // Emit the base pointer. 2891 Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource); 2892 2893 // We don't necessarily build correct LLVM struct types for ObjC 2894 // interfaces, so we can't rely on GEP to do this scaling 2895 // correctly, so we need to cast to i8*. FIXME: is this actually 2896 // true? A lot of other things in the fragile ABI would break... 2897 llvm::Type *OrigBaseTy = Addr.getType(); 2898 Addr = Builder.CreateElementBitCast(Addr, Int8Ty); 2899 2900 // Do the GEP. 2901 CharUnits EltAlign = 2902 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize); 2903 llvm::Value *EltPtr = 2904 emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false); 2905 Addr = Address(EltPtr, EltAlign); 2906 2907 // Cast back. 2908 Addr = Builder.CreateBitCast(Addr, OrigBaseTy); 2909 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 2910 // If this is A[i] where A is an array, the frontend will have decayed the 2911 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 2912 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 2913 // "gep x, i" here. Emit one "gep A, 0, i". 2914 assert(Array->getType()->isArrayType() && 2915 "Array to pointer decay must have array source type!"); 2916 LValue ArrayLV; 2917 // For simple multidimensional array indexing, set the 'accessed' flag for 2918 // better bounds-checking of the base expression. 2919 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) 2920 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); 2921 else 2922 ArrayLV = EmitLValue(Array); 2923 2924 // Propagate the alignment from the array itself to the result. 2925 Addr = emitArraySubscriptGEP(*this, ArrayLV.getAddress(), 2926 {CGM.getSize(CharUnits::Zero()), Idx}, 2927 E->getType(), 2928 !getLangOpts().isSignedOverflowDefined()); 2929 AlignSource = ArrayLV.getAlignmentSource(); 2930 } else { 2931 // The base must be a pointer; emit it with an estimate of its alignment. 2932 Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource); 2933 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(), 2934 !getLangOpts().isSignedOverflowDefined()); 2935 } 2936 2937 LValue LV = MakeAddrLValue(Addr, E->getType(), AlignSource); 2938 2939 // TODO: Preserve/extend path TBAA metadata? 2940 2941 if (getLangOpts().ObjC1 && 2942 getLangOpts().getGC() != LangOptions::NonGC) { 2943 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 2944 setObjCGCLValueClass(getContext(), E, LV); 2945 } 2946 return LV; 2947 } 2948 2949 static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, 2950 AlignmentSource &AlignSource, 2951 QualType BaseTy, QualType ElTy, 2952 bool IsLowerBound) { 2953 LValue BaseLVal; 2954 if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) { 2955 BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound); 2956 if (BaseTy->isArrayType()) { 2957 Address Addr = BaseLVal.getAddress(); 2958 AlignSource = BaseLVal.getAlignmentSource(); 2959 2960 // If the array type was an incomplete type, we need to make sure 2961 // the decay ends up being the right type. 2962 llvm::Type *NewTy = CGF.ConvertType(BaseTy); 2963 Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy); 2964 2965 // Note that VLA pointers are always decayed, so we don't need to do 2966 // anything here. 2967 if (!BaseTy->isVariableArrayType()) { 2968 assert(isa<llvm::ArrayType>(Addr.getElementType()) && 2969 "Expected pointer to array"); 2970 Addr = CGF.Builder.CreateStructGEP(Addr, 0, CharUnits::Zero(), 2971 "arraydecay"); 2972 } 2973 2974 return CGF.Builder.CreateElementBitCast(Addr, 2975 CGF.ConvertTypeForMem(ElTy)); 2976 } 2977 CharUnits Align = CGF.getNaturalTypeAlignment(ElTy, &AlignSource); 2978 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()), Align); 2979 } 2980 return CGF.EmitPointerWithAlignment(Base, &AlignSource); 2981 } 2982 2983 LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, 2984 bool IsLowerBound) { 2985 QualType BaseTy; 2986 if (auto *ASE = 2987 dyn_cast<OMPArraySectionExpr>(E->getBase()->IgnoreParenImpCasts())) 2988 BaseTy = OMPArraySectionExpr::getBaseOriginalType(ASE); 2989 else 2990 BaseTy = E->getBase()->getType(); 2991 QualType ResultExprTy; 2992 if (auto *AT = getContext().getAsArrayType(BaseTy)) 2993 ResultExprTy = AT->getElementType(); 2994 else 2995 ResultExprTy = BaseTy->getPointeeType(); 2996 llvm::Value *Idx = nullptr; 2997 if (IsLowerBound || E->getColonLoc().isInvalid()) { 2998 // Requesting lower bound or upper bound, but without provided length and 2999 // without ':' symbol for the default length -> length = 1. 3000 // Idx = LowerBound ?: 0; 3001 if (auto *LowerBound = E->getLowerBound()) { 3002 Idx = Builder.CreateIntCast( 3003 EmitScalarExpr(LowerBound), IntPtrTy, 3004 LowerBound->getType()->hasSignedIntegerRepresentation()); 3005 } else 3006 Idx = llvm::ConstantInt::getNullValue(IntPtrTy); 3007 } else { 3008 // Try to emit length or lower bound as constant. If this is possible, 1 3009 // is subtracted from constant length or lower bound. Otherwise, emit LLVM 3010 // IR (LB + Len) - 1. 3011 auto &C = CGM.getContext(); 3012 auto *Length = E->getLength(); 3013 llvm::APSInt ConstLength; 3014 if (Length) { 3015 // Idx = LowerBound + Length - 1; 3016 if (Length->isIntegerConstantExpr(ConstLength, C)) { 3017 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits); 3018 Length = nullptr; 3019 } 3020 auto *LowerBound = E->getLowerBound(); 3021 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); 3022 if (LowerBound && LowerBound->isIntegerConstantExpr(ConstLowerBound, C)) { 3023 ConstLowerBound = ConstLowerBound.zextOrTrunc(PointerWidthInBits); 3024 LowerBound = nullptr; 3025 } 3026 if (!Length) 3027 --ConstLength; 3028 else if (!LowerBound) 3029 --ConstLowerBound; 3030 3031 if (Length || LowerBound) { 3032 auto *LowerBoundVal = 3033 LowerBound 3034 ? Builder.CreateIntCast( 3035 EmitScalarExpr(LowerBound), IntPtrTy, 3036 LowerBound->getType()->hasSignedIntegerRepresentation()) 3037 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound); 3038 auto *LengthVal = 3039 Length 3040 ? Builder.CreateIntCast( 3041 EmitScalarExpr(Length), IntPtrTy, 3042 Length->getType()->hasSignedIntegerRepresentation()) 3043 : llvm::ConstantInt::get(IntPtrTy, ConstLength); 3044 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len", 3045 /*HasNUW=*/false, 3046 !getLangOpts().isSignedOverflowDefined()); 3047 if (Length && LowerBound) { 3048 Idx = Builder.CreateSub( 3049 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1", 3050 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); 3051 } 3052 } else 3053 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound); 3054 } else { 3055 // Idx = ArraySize - 1; 3056 QualType ArrayTy = BaseTy->isPointerType() 3057 ? E->getBase()->IgnoreParenImpCasts()->getType() 3058 : BaseTy; 3059 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) { 3060 Length = VAT->getSizeExpr(); 3061 if (Length->isIntegerConstantExpr(ConstLength, C)) 3062 Length = nullptr; 3063 } else { 3064 auto *CAT = C.getAsConstantArrayType(ArrayTy); 3065 ConstLength = CAT->getSize(); 3066 } 3067 if (Length) { 3068 auto *LengthVal = Builder.CreateIntCast( 3069 EmitScalarExpr(Length), IntPtrTy, 3070 Length->getType()->hasSignedIntegerRepresentation()); 3071 Idx = Builder.CreateSub( 3072 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1", 3073 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); 3074 } else { 3075 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits); 3076 --ConstLength; 3077 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength); 3078 } 3079 } 3080 } 3081 assert(Idx); 3082 3083 Address EltPtr = Address::invalid(); 3084 AlignmentSource AlignSource; 3085 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) { 3086 // The base must be a pointer, which is not an aggregate. Emit 3087 // it. It needs to be emitted first in case it's what captures 3088 // the VLA bounds. 3089 Address Base = 3090 emitOMPArraySectionBase(*this, E->getBase(), AlignSource, BaseTy, 3091 VLA->getElementType(), IsLowerBound); 3092 // The element count here is the total number of non-VLA elements. 3093 llvm::Value *NumElements = getVLASize(VLA).first; 3094 3095 // Effectively, the multiply by the VLA size is part of the GEP. 3096 // GEP indexes are signed, and scaling an index isn't permitted to 3097 // signed-overflow, so we use the same semantics for our explicit 3098 // multiply. We suppress this if overflow is not undefined behavior. 3099 if (getLangOpts().isSignedOverflowDefined()) 3100 Idx = Builder.CreateMul(Idx, NumElements); 3101 else 3102 Idx = Builder.CreateNSWMul(Idx, NumElements); 3103 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(), 3104 !getLangOpts().isSignedOverflowDefined()); 3105 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 3106 // If this is A[i] where A is an array, the frontend will have decayed the 3107 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 3108 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 3109 // "gep x, i" here. Emit one "gep A, 0, i". 3110 assert(Array->getType()->isArrayType() && 3111 "Array to pointer decay must have array source type!"); 3112 LValue ArrayLV; 3113 // For simple multidimensional array indexing, set the 'accessed' flag for 3114 // better bounds-checking of the base expression. 3115 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) 3116 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); 3117 else 3118 ArrayLV = EmitLValue(Array); 3119 3120 // Propagate the alignment from the array itself to the result. 3121 EltPtr = emitArraySubscriptGEP( 3122 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx}, 3123 ResultExprTy, !getLangOpts().isSignedOverflowDefined()); 3124 AlignSource = ArrayLV.getAlignmentSource(); 3125 } else { 3126 Address Base = emitOMPArraySectionBase(*this, E->getBase(), AlignSource, 3127 BaseTy, ResultExprTy, IsLowerBound); 3128 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy, 3129 !getLangOpts().isSignedOverflowDefined()); 3130 } 3131 3132 return MakeAddrLValue(EltPtr, ResultExprTy, AlignSource); 3133 } 3134 3135 LValue CodeGenFunction:: 3136 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 3137 // Emit the base vector as an l-value. 3138 LValue Base; 3139 3140 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 3141 if (E->isArrow()) { 3142 // If it is a pointer to a vector, emit the address and form an lvalue with 3143 // it. 3144 AlignmentSource AlignSource; 3145 Address Ptr = EmitPointerWithAlignment(E->getBase(), &AlignSource); 3146 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 3147 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), AlignSource); 3148 Base.getQuals().removeObjCGCAttr(); 3149 } else if (E->getBase()->isGLValue()) { 3150 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 3151 // emit the base as an lvalue. 3152 assert(E->getBase()->getType()->isVectorType()); 3153 Base = EmitLValue(E->getBase()); 3154 } else { 3155 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 3156 assert(E->getBase()->getType()->isVectorType() && 3157 "Result must be a vector"); 3158 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 3159 3160 // Store the vector to memory (because LValue wants an address). 3161 Address VecMem = CreateMemTemp(E->getBase()->getType()); 3162 Builder.CreateStore(Vec, VecMem); 3163 Base = MakeAddrLValue(VecMem, E->getBase()->getType(), 3164 AlignmentSource::Decl); 3165 } 3166 3167 QualType type = 3168 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 3169 3170 // Encode the element access list into a vector of unsigned indices. 3171 SmallVector<uint32_t, 4> Indices; 3172 E->getEncodedElementAccess(Indices); 3173 3174 if (Base.isSimple()) { 3175 llvm::Constant *CV = 3176 llvm::ConstantDataVector::get(getLLVMContext(), Indices); 3177 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type, 3178 Base.getAlignmentSource()); 3179 } 3180 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 3181 3182 llvm::Constant *BaseElts = Base.getExtVectorElts(); 3183 SmallVector<llvm::Constant *, 4> CElts; 3184 3185 for (unsigned i = 0, e = Indices.size(); i != e; ++i) 3186 CElts.push_back(BaseElts->getAggregateElement(Indices[i])); 3187 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 3188 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type, 3189 Base.getAlignmentSource()); 3190 } 3191 3192 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 3193 Expr *BaseExpr = E->getBase(); 3194 3195 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 3196 LValue BaseLV; 3197 if (E->isArrow()) { 3198 AlignmentSource AlignSource; 3199 Address Addr = EmitPointerWithAlignment(BaseExpr, &AlignSource); 3200 QualType PtrTy = BaseExpr->getType()->getPointeeType(); 3201 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy); 3202 BaseLV = MakeAddrLValue(Addr, PtrTy, AlignSource); 3203 } else 3204 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); 3205 3206 NamedDecl *ND = E->getMemberDecl(); 3207 if (auto *Field = dyn_cast<FieldDecl>(ND)) { 3208 LValue LV = EmitLValueForField(BaseLV, Field); 3209 setObjCGCLValueClass(getContext(), E, LV); 3210 return LV; 3211 } 3212 3213 if (auto *VD = dyn_cast<VarDecl>(ND)) 3214 return EmitGlobalVarDeclLValue(*this, E, VD); 3215 3216 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 3217 return EmitFunctionDeclLValue(*this, E, FD); 3218 3219 llvm_unreachable("Unhandled member declaration!"); 3220 } 3221 3222 /// Given that we are currently emitting a lambda, emit an l-value for 3223 /// one of its members. 3224 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { 3225 assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda()); 3226 assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent()); 3227 QualType LambdaTagType = 3228 getContext().getTagDeclType(Field->getParent()); 3229 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType); 3230 return EmitLValueForField(LambdaLV, Field); 3231 } 3232 3233 /// Drill down to the storage of a field without walking into 3234 /// reference types. 3235 /// 3236 /// The resulting address doesn't necessarily have the right type. 3237 static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, 3238 const FieldDecl *field) { 3239 const RecordDecl *rec = field->getParent(); 3240 3241 unsigned idx = 3242 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 3243 3244 CharUnits offset; 3245 // Adjust the alignment down to the given offset. 3246 // As a special case, if the LLVM field index is 0, we know that this 3247 // is zero. 3248 assert((idx != 0 || CGF.getContext().getASTRecordLayout(rec) 3249 .getFieldOffset(field->getFieldIndex()) == 0) && 3250 "LLVM field at index zero had non-zero offset?"); 3251 if (idx != 0) { 3252 auto &recLayout = CGF.getContext().getASTRecordLayout(rec); 3253 auto offsetInBits = recLayout.getFieldOffset(field->getFieldIndex()); 3254 offset = CGF.getContext().toCharUnitsFromBits(offsetInBits); 3255 } 3256 3257 return CGF.Builder.CreateStructGEP(base, idx, offset, field->getName()); 3258 } 3259 3260 LValue CodeGenFunction::EmitLValueForField(LValue base, 3261 const FieldDecl *field) { 3262 AlignmentSource fieldAlignSource = 3263 getFieldAlignmentSource(base.getAlignmentSource()); 3264 3265 if (field->isBitField()) { 3266 const CGRecordLayout &RL = 3267 CGM.getTypes().getCGRecordLayout(field->getParent()); 3268 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); 3269 Address Addr = base.getAddress(); 3270 unsigned Idx = RL.getLLVMFieldNo(field); 3271 if (Idx != 0) 3272 // For structs, we GEP to the field that the record layout suggests. 3273 Addr = Builder.CreateStructGEP(Addr, Idx, Info.StorageOffset, 3274 field->getName()); 3275 // Get the access type. 3276 llvm::Type *FieldIntTy = 3277 llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize); 3278 if (Addr.getElementType() != FieldIntTy) 3279 Addr = Builder.CreateElementBitCast(Addr, FieldIntTy); 3280 3281 QualType fieldType = 3282 field->getType().withCVRQualifiers(base.getVRQualifiers()); 3283 return LValue::MakeBitfield(Addr, Info, fieldType, fieldAlignSource); 3284 } 3285 3286 const RecordDecl *rec = field->getParent(); 3287 QualType type = field->getType(); 3288 3289 bool mayAlias = rec->hasAttr<MayAliasAttr>(); 3290 3291 Address addr = base.getAddress(); 3292 unsigned cvr = base.getVRQualifiers(); 3293 bool TBAAPath = CGM.getCodeGenOpts().StructPathTBAA; 3294 if (rec->isUnion()) { 3295 // For unions, there is no pointer adjustment. 3296 assert(!type->isReferenceType() && "union has reference member"); 3297 // TODO: handle path-aware TBAA for union. 3298 TBAAPath = false; 3299 } else { 3300 // For structs, we GEP to the field that the record layout suggests. 3301 addr = emitAddrOfFieldStorage(*this, addr, field); 3302 3303 // If this is a reference field, load the reference right now. 3304 if (const ReferenceType *refType = type->getAs<ReferenceType>()) { 3305 llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); 3306 if (cvr & Qualifiers::Volatile) load->setVolatile(true); 3307 3308 // Loading the reference will disable path-aware TBAA. 3309 TBAAPath = false; 3310 if (CGM.shouldUseTBAA()) { 3311 llvm::MDNode *tbaa; 3312 if (mayAlias) 3313 tbaa = CGM.getTBAAInfo(getContext().CharTy); 3314 else 3315 tbaa = CGM.getTBAAInfo(type); 3316 if (tbaa) 3317 CGM.DecorateInstructionWithTBAA(load, tbaa); 3318 } 3319 3320 mayAlias = false; 3321 type = refType->getPointeeType(); 3322 3323 CharUnits alignment = 3324 getNaturalTypeAlignment(type, &fieldAlignSource, /*pointee*/ true); 3325 addr = Address(load, alignment); 3326 3327 // Qualifiers on the struct don't apply to the referencee, and 3328 // we'll pick up CVR from the actual type later, so reset these 3329 // additional qualifiers now. 3330 cvr = 0; 3331 } 3332 } 3333 3334 // Make sure that the address is pointing to the right type. This is critical 3335 // for both unions and structs. A union needs a bitcast, a struct element 3336 // will need a bitcast if the LLVM type laid out doesn't match the desired 3337 // type. 3338 addr = Builder.CreateElementBitCast(addr, 3339 CGM.getTypes().ConvertTypeForMem(type), 3340 field->getName()); 3341 3342 if (field->hasAttr<AnnotateAttr>()) 3343 addr = EmitFieldAnnotations(field, addr); 3344 3345 LValue LV = MakeAddrLValue(addr, type, fieldAlignSource); 3346 LV.getQuals().addCVRQualifiers(cvr); 3347 if (TBAAPath) { 3348 const ASTRecordLayout &Layout = 3349 getContext().getASTRecordLayout(field->getParent()); 3350 // Set the base type to be the base type of the base LValue and 3351 // update offset to be relative to the base type. 3352 LV.setTBAABaseType(mayAlias ? getContext().CharTy : base.getTBAABaseType()); 3353 LV.setTBAAOffset(mayAlias ? 0 : base.getTBAAOffset() + 3354 Layout.getFieldOffset(field->getFieldIndex()) / 3355 getContext().getCharWidth()); 3356 } 3357 3358 // __weak attribute on a field is ignored. 3359 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 3360 LV.getQuals().removeObjCGCAttr(); 3361 3362 // Fields of may_alias structs act like 'char' for TBAA purposes. 3363 // FIXME: this should get propagated down through anonymous structs 3364 // and unions. 3365 if (mayAlias && LV.getTBAAInfo()) 3366 LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); 3367 3368 return LV; 3369 } 3370 3371 LValue 3372 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, 3373 const FieldDecl *Field) { 3374 QualType FieldType = Field->getType(); 3375 3376 if (!FieldType->isReferenceType()) 3377 return EmitLValueForField(Base, Field); 3378 3379 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field); 3380 3381 // Make sure that the address is pointing to the right type. 3382 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 3383 V = Builder.CreateElementBitCast(V, llvmType, Field->getName()); 3384 3385 // TODO: access-path TBAA? 3386 auto FieldAlignSource = getFieldAlignmentSource(Base.getAlignmentSource()); 3387 return MakeAddrLValue(V, FieldType, FieldAlignSource); 3388 } 3389 3390 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 3391 if (E->isFileScope()) { 3392 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 3393 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl); 3394 } 3395 if (E->getType()->isVariablyModifiedType()) 3396 // make sure to emit the VLA size. 3397 EmitVariablyModifiedType(E->getType()); 3398 3399 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 3400 const Expr *InitExpr = E->getInitializer(); 3401 LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); 3402 3403 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 3404 /*Init*/ true); 3405 3406 return Result; 3407 } 3408 3409 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { 3410 if (!E->isGLValue()) 3411 // Initializing an aggregate temporary in C++11: T{...}. 3412 return EmitAggExprToLValue(E); 3413 3414 // An lvalue initializer list must be initializing a reference. 3415 assert(E->getNumInits() == 1 && "reference init with multiple values"); 3416 return EmitLValue(E->getInit(0)); 3417 } 3418 3419 /// Emit the operand of a glvalue conditional operator. This is either a glvalue 3420 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no 3421 /// LValue is returned and the current block has been terminated. 3422 static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, 3423 const Expr *Operand) { 3424 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) { 3425 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false); 3426 return None; 3427 } 3428 3429 return CGF.EmitLValue(Operand); 3430 } 3431 3432 LValue CodeGenFunction:: 3433 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 3434 if (!expr->isGLValue()) { 3435 // ?: here should be an aggregate. 3436 assert(hasAggregateEvaluationKind(expr->getType()) && 3437 "Unexpected conditional operator!"); 3438 return EmitAggExprToLValue(expr); 3439 } 3440 3441 OpaqueValueMapping binding(*this, expr); 3442 3443 const Expr *condExpr = expr->getCond(); 3444 bool CondExprBool; 3445 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 3446 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 3447 if (!CondExprBool) std::swap(live, dead); 3448 3449 if (!ContainsLabel(dead)) { 3450 // If the true case is live, we need to track its region. 3451 if (CondExprBool) 3452 incrementProfileCounter(expr); 3453 return EmitLValue(live); 3454 } 3455 } 3456 3457 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 3458 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 3459 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 3460 3461 ConditionalEvaluation eval(*this); 3462 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr)); 3463 3464 // Any temporaries created here are conditional. 3465 EmitBlock(lhsBlock); 3466 incrementProfileCounter(expr); 3467 eval.begin(*this); 3468 Optional<LValue> lhs = 3469 EmitLValueOrThrowExpression(*this, expr->getTrueExpr()); 3470 eval.end(*this); 3471 3472 if (lhs && !lhs->isSimple()) 3473 return EmitUnsupportedLValue(expr, "conditional operator"); 3474 3475 lhsBlock = Builder.GetInsertBlock(); 3476 if (lhs) 3477 Builder.CreateBr(contBlock); 3478 3479 // Any temporaries created here are conditional. 3480 EmitBlock(rhsBlock); 3481 eval.begin(*this); 3482 Optional<LValue> rhs = 3483 EmitLValueOrThrowExpression(*this, expr->getFalseExpr()); 3484 eval.end(*this); 3485 if (rhs && !rhs->isSimple()) 3486 return EmitUnsupportedLValue(expr, "conditional operator"); 3487 rhsBlock = Builder.GetInsertBlock(); 3488 3489 EmitBlock(contBlock); 3490 3491 if (lhs && rhs) { 3492 llvm::PHINode *phi = Builder.CreatePHI(lhs->getPointer()->getType(), 3493 2, "cond-lvalue"); 3494 phi->addIncoming(lhs->getPointer(), lhsBlock); 3495 phi->addIncoming(rhs->getPointer(), rhsBlock); 3496 Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment())); 3497 AlignmentSource alignSource = 3498 std::max(lhs->getAlignmentSource(), rhs->getAlignmentSource()); 3499 return MakeAddrLValue(result, expr->getType(), alignSource); 3500 } else { 3501 assert((lhs || rhs) && 3502 "both operands of glvalue conditional are throw-expressions?"); 3503 return lhs ? *lhs : *rhs; 3504 } 3505 } 3506 3507 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference 3508 /// type. If the cast is to a reference, we can have the usual lvalue result, 3509 /// otherwise if a cast is needed by the code generator in an lvalue context, 3510 /// then it must mean that we need the address of an aggregate in order to 3511 /// access one of its members. This can happen for all the reasons that casts 3512 /// are permitted with aggregate result, including noop aggregate casts, and 3513 /// cast from scalar to union. 3514 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 3515 switch (E->getCastKind()) { 3516 case CK_ToVoid: 3517 case CK_BitCast: 3518 case CK_ArrayToPointerDecay: 3519 case CK_FunctionToPointerDecay: 3520 case CK_NullToMemberPointer: 3521 case CK_NullToPointer: 3522 case CK_IntegralToPointer: 3523 case CK_PointerToIntegral: 3524 case CK_PointerToBoolean: 3525 case CK_VectorSplat: 3526 case CK_IntegralCast: 3527 case CK_BooleanToSignedIntegral: 3528 case CK_IntegralToBoolean: 3529 case CK_IntegralToFloating: 3530 case CK_FloatingToIntegral: 3531 case CK_FloatingToBoolean: 3532 case CK_FloatingCast: 3533 case CK_FloatingRealToComplex: 3534 case CK_FloatingComplexToReal: 3535 case CK_FloatingComplexToBoolean: 3536 case CK_FloatingComplexCast: 3537 case CK_FloatingComplexToIntegralComplex: 3538 case CK_IntegralRealToComplex: 3539 case CK_IntegralComplexToReal: 3540 case CK_IntegralComplexToBoolean: 3541 case CK_IntegralComplexCast: 3542 case CK_IntegralComplexToFloatingComplex: 3543 case CK_DerivedToBaseMemberPointer: 3544 case CK_BaseToDerivedMemberPointer: 3545 case CK_MemberPointerToBoolean: 3546 case CK_ReinterpretMemberPointer: 3547 case CK_AnyPointerToBlockPointerCast: 3548 case CK_ARCProduceObject: 3549 case CK_ARCConsumeObject: 3550 case CK_ARCReclaimReturnedObject: 3551 case CK_ARCExtendBlockObject: 3552 case CK_CopyAndAutoreleaseBlockObject: 3553 case CK_AddressSpaceConversion: 3554 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 3555 3556 case CK_Dependent: 3557 llvm_unreachable("dependent cast kind in IR gen!"); 3558 3559 case CK_BuiltinFnToFnPtr: 3560 llvm_unreachable("builtin functions are handled elsewhere"); 3561 3562 // These are never l-values; just use the aggregate emission code. 3563 case CK_NonAtomicToAtomic: 3564 case CK_AtomicToNonAtomic: 3565 return EmitAggExprToLValue(E); 3566 3567 case CK_Dynamic: { 3568 LValue LV = EmitLValue(E->getSubExpr()); 3569 Address V = LV.getAddress(); 3570 const auto *DCE = cast<CXXDynamicCastExpr>(E); 3571 return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 3572 } 3573 3574 case CK_ConstructorConversion: 3575 case CK_UserDefinedConversion: 3576 case CK_CPointerToObjCPointerCast: 3577 case CK_BlockPointerToObjCPointerCast: 3578 case CK_NoOp: 3579 case CK_LValueToRValue: 3580 return EmitLValue(E->getSubExpr()); 3581 3582 case CK_UncheckedDerivedToBase: 3583 case CK_DerivedToBase: { 3584 const RecordType *DerivedClassTy = 3585 E->getSubExpr()->getType()->getAs<RecordType>(); 3586 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 3587 3588 LValue LV = EmitLValue(E->getSubExpr()); 3589 Address This = LV.getAddress(); 3590 3591 // Perform the derived-to-base conversion 3592 Address Base = GetAddressOfBaseClass( 3593 This, DerivedClassDecl, E->path_begin(), E->path_end(), 3594 /*NullCheckValue=*/false, E->getExprLoc()); 3595 3596 return MakeAddrLValue(Base, E->getType(), LV.getAlignmentSource()); 3597 } 3598 case CK_ToUnion: 3599 return EmitAggExprToLValue(E); 3600 case CK_BaseToDerived: { 3601 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 3602 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 3603 3604 LValue LV = EmitLValue(E->getSubExpr()); 3605 3606 // Perform the base-to-derived conversion 3607 Address Derived = 3608 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 3609 E->path_begin(), E->path_end(), 3610 /*NullCheckValue=*/false); 3611 3612 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is 3613 // performed and the object is not of the derived type. 3614 if (sanitizePerformTypeCheck()) 3615 EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), 3616 Derived.getPointer(), E->getType()); 3617 3618 if (SanOpts.has(SanitizerKind::CFIDerivedCast)) 3619 EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(), 3620 /*MayBeNull=*/false, 3621 CFITCK_DerivedCast, E->getLocStart()); 3622 3623 return MakeAddrLValue(Derived, E->getType(), LV.getAlignmentSource()); 3624 } 3625 case CK_LValueBitCast: { 3626 // This must be a reinterpret_cast (or c-style equivalent). 3627 const auto *CE = cast<ExplicitCastExpr>(E); 3628 3629 CGM.EmitExplicitCastExprType(CE, this); 3630 LValue LV = EmitLValue(E->getSubExpr()); 3631 Address V = Builder.CreateBitCast(LV.getAddress(), 3632 ConvertType(CE->getTypeAsWritten())); 3633 3634 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) 3635 EmitVTablePtrCheckForCast(E->getType(), V.getPointer(), 3636 /*MayBeNull=*/false, 3637 CFITCK_UnrelatedCast, E->getLocStart()); 3638 3639 return MakeAddrLValue(V, E->getType(), LV.getAlignmentSource()); 3640 } 3641 case CK_ObjCObjectLValueCast: { 3642 LValue LV = EmitLValue(E->getSubExpr()); 3643 Address V = Builder.CreateElementBitCast(LV.getAddress(), 3644 ConvertType(E->getType())); 3645 return MakeAddrLValue(V, E->getType(), LV.getAlignmentSource()); 3646 } 3647 case CK_ZeroToOCLEvent: 3648 llvm_unreachable("NULL to OpenCL event lvalue cast is not valid"); 3649 } 3650 3651 llvm_unreachable("Unhandled lvalue cast kind?"); 3652 } 3653 3654 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 3655 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 3656 return getOpaqueLValueMapping(e); 3657 } 3658 3659 RValue CodeGenFunction::EmitRValueForField(LValue LV, 3660 const FieldDecl *FD, 3661 SourceLocation Loc) { 3662 QualType FT = FD->getType(); 3663 LValue FieldLV = EmitLValueForField(LV, FD); 3664 switch (getEvaluationKind(FT)) { 3665 case TEK_Complex: 3666 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc)); 3667 case TEK_Aggregate: 3668 return FieldLV.asAggregateRValue(); 3669 case TEK_Scalar: 3670 return EmitLoadOfLValue(FieldLV, Loc); 3671 } 3672 llvm_unreachable("bad evaluation kind"); 3673 } 3674 3675 //===--------------------------------------------------------------------===// 3676 // Expression Emission 3677 //===--------------------------------------------------------------------===// 3678 3679 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 3680 ReturnValueSlot ReturnValue) { 3681 // Builtins never have block type. 3682 if (E->getCallee()->getType()->isBlockPointerType()) 3683 return EmitBlockCallExpr(E, ReturnValue); 3684 3685 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E)) 3686 return EmitCXXMemberCallExpr(CE, ReturnValue); 3687 3688 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E)) 3689 return EmitCUDAKernelCallExpr(CE, ReturnValue); 3690 3691 const Decl *TargetDecl = E->getCalleeDecl(); 3692 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 3693 if (unsigned builtinID = FD->getBuiltinID()) 3694 return EmitBuiltinExpr(FD, builtinID, E, ReturnValue); 3695 } 3696 3697 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E)) 3698 if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) 3699 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 3700 3701 if (const auto *PseudoDtor = 3702 dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { 3703 QualType DestroyedType = PseudoDtor->getDestroyedType(); 3704 if (DestroyedType.hasStrongOrWeakObjCLifetime()) { 3705 // Automatic Reference Counting: 3706 // If the pseudo-expression names a retainable object with weak or 3707 // strong lifetime, the object shall be released. 3708 Expr *BaseExpr = PseudoDtor->getBase(); 3709 Address BaseValue = Address::invalid(); 3710 Qualifiers BaseQuals; 3711 3712 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 3713 if (PseudoDtor->isArrow()) { 3714 BaseValue = EmitPointerWithAlignment(BaseExpr); 3715 const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); 3716 BaseQuals = PTy->getPointeeType().getQualifiers(); 3717 } else { 3718 LValue BaseLV = EmitLValue(BaseExpr); 3719 BaseValue = BaseLV.getAddress(); 3720 QualType BaseTy = BaseExpr->getType(); 3721 BaseQuals = BaseTy.getQualifiers(); 3722 } 3723 3724 switch (DestroyedType.getObjCLifetime()) { 3725 case Qualifiers::OCL_None: 3726 case Qualifiers::OCL_ExplicitNone: 3727 case Qualifiers::OCL_Autoreleasing: 3728 break; 3729 3730 case Qualifiers::OCL_Strong: 3731 EmitARCRelease(Builder.CreateLoad(BaseValue, 3732 PseudoDtor->getDestroyedType().isVolatileQualified()), 3733 ARCPreciseLifetime); 3734 break; 3735 3736 case Qualifiers::OCL_Weak: 3737 EmitARCDestroyWeak(BaseValue); 3738 break; 3739 } 3740 } else { 3741 // C++ [expr.pseudo]p1: 3742 // The result shall only be used as the operand for the function call 3743 // operator (), and the result of such a call has type void. The only 3744 // effect is the evaluation of the postfix-expression before the dot or 3745 // arrow. 3746 EmitScalarExpr(E->getCallee()); 3747 } 3748 3749 return RValue::get(nullptr); 3750 } 3751 3752 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 3753 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue, 3754 TargetDecl); 3755 } 3756 3757 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 3758 // Comma expressions just emit their LHS then their RHS as an l-value. 3759 if (E->getOpcode() == BO_Comma) { 3760 EmitIgnoredExpr(E->getLHS()); 3761 EnsureInsertPoint(); 3762 return EmitLValue(E->getRHS()); 3763 } 3764 3765 if (E->getOpcode() == BO_PtrMemD || 3766 E->getOpcode() == BO_PtrMemI) 3767 return EmitPointerToDataMemberBinaryExpr(E); 3768 3769 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 3770 3771 // Note that in all of these cases, __block variables need the RHS 3772 // evaluated first just in case the variable gets moved by the RHS. 3773 3774 switch (getEvaluationKind(E->getType())) { 3775 case TEK_Scalar: { 3776 switch (E->getLHS()->getType().getObjCLifetime()) { 3777 case Qualifiers::OCL_Strong: 3778 return EmitARCStoreStrong(E, /*ignored*/ false).first; 3779 3780 case Qualifiers::OCL_Autoreleasing: 3781 return EmitARCStoreAutoreleasing(E).first; 3782 3783 // No reason to do any of these differently. 3784 case Qualifiers::OCL_None: 3785 case Qualifiers::OCL_ExplicitNone: 3786 case Qualifiers::OCL_Weak: 3787 break; 3788 } 3789 3790 RValue RV = EmitAnyExpr(E->getRHS()); 3791 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); 3792 EmitStoreThroughLValue(RV, LV); 3793 return LV; 3794 } 3795 3796 case TEK_Complex: 3797 return EmitComplexAssignmentLValue(E); 3798 3799 case TEK_Aggregate: 3800 return EmitAggExprToLValue(E); 3801 } 3802 llvm_unreachable("bad evaluation kind"); 3803 } 3804 3805 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 3806 RValue RV = EmitCallExpr(E); 3807 3808 if (!RV.isScalar()) 3809 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 3810 AlignmentSource::Decl); 3811 3812 assert(E->getCallReturnType(getContext())->isReferenceType() && 3813 "Can't have a scalar return unless the return type is a " 3814 "reference type!"); 3815 3816 return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); 3817 } 3818 3819 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 3820 // FIXME: This shouldn't require another copy. 3821 return EmitAggExprToLValue(E); 3822 } 3823 3824 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 3825 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 3826 && "binding l-value to type which needs a temporary"); 3827 AggValueSlot Slot = CreateAggTemp(E->getType()); 3828 EmitCXXConstructExpr(E, Slot); 3829 return MakeAddrLValue(Slot.getAddress(), E->getType(), 3830 AlignmentSource::Decl); 3831 } 3832 3833 LValue 3834 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 3835 return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 3836 } 3837 3838 Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { 3839 return Builder.CreateElementBitCast(CGM.GetAddrOfUuidDescriptor(E), 3840 ConvertType(E->getType())); 3841 } 3842 3843 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { 3844 return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(), 3845 AlignmentSource::Decl); 3846 } 3847 3848 LValue 3849 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 3850 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 3851 Slot.setExternallyDestructed(); 3852 EmitAggExpr(E->getSubExpr(), Slot); 3853 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress()); 3854 return MakeAddrLValue(Slot.getAddress(), E->getType(), 3855 AlignmentSource::Decl); 3856 } 3857 3858 LValue 3859 CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) { 3860 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 3861 EmitLambdaExpr(E, Slot); 3862 return MakeAddrLValue(Slot.getAddress(), E->getType(), 3863 AlignmentSource::Decl); 3864 } 3865 3866 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 3867 RValue RV = EmitObjCMessageExpr(E); 3868 3869 if (!RV.isScalar()) 3870 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 3871 AlignmentSource::Decl); 3872 3873 assert(E->getMethodDecl()->getReturnType()->isReferenceType() && 3874 "Can't have a scalar return unless the return type is a " 3875 "reference type!"); 3876 3877 return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); 3878 } 3879 3880 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 3881 Address V = 3882 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector()); 3883 return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl); 3884 } 3885 3886 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 3887 const ObjCIvarDecl *Ivar) { 3888 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 3889 } 3890 3891 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 3892 llvm::Value *BaseValue, 3893 const ObjCIvarDecl *Ivar, 3894 unsigned CVRQualifiers) { 3895 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 3896 Ivar, CVRQualifiers); 3897 } 3898 3899 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 3900 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 3901 llvm::Value *BaseValue = nullptr; 3902 const Expr *BaseExpr = E->getBase(); 3903 Qualifiers BaseQuals; 3904 QualType ObjectTy; 3905 if (E->isArrow()) { 3906 BaseValue = EmitScalarExpr(BaseExpr); 3907 ObjectTy = BaseExpr->getType()->getPointeeType(); 3908 BaseQuals = ObjectTy.getQualifiers(); 3909 } else { 3910 LValue BaseLV = EmitLValue(BaseExpr); 3911 BaseValue = BaseLV.getPointer(); 3912 ObjectTy = BaseExpr->getType(); 3913 BaseQuals = ObjectTy.getQualifiers(); 3914 } 3915 3916 LValue LV = 3917 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 3918 BaseQuals.getCVRQualifiers()); 3919 setObjCGCLValueClass(getContext(), E, LV); 3920 return LV; 3921 } 3922 3923 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 3924 // Can only get l-value for message expression returning aggregate type 3925 RValue RV = EmitAnyExprToTemp(E); 3926 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 3927 AlignmentSource::Decl); 3928 } 3929 3930 RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, 3931 const CallExpr *E, ReturnValueSlot ReturnValue, 3932 CGCalleeInfo CalleeInfo, llvm::Value *Chain) { 3933 // Get the actual function type. The callee type will always be a pointer to 3934 // function type or a block pointer type. 3935 assert(CalleeType->isFunctionPointerType() && 3936 "Call must have function pointer type!"); 3937 3938 // Preserve the non-canonical function type because things like exception 3939 // specifications disappear in the canonical type. That information is useful 3940 // to drive the generation of more accurate code for this call later on. 3941 const FunctionProtoType *NonCanonicalFTP = CalleeType->getAs<PointerType>() 3942 ->getPointeeType() 3943 ->getAs<FunctionProtoType>(); 3944 3945 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 3946 3947 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 3948 // We can only guarantee that a function is called from the correct 3949 // context/function based on the appropriate target attributes, 3950 // so only check in the case where we have both always_inline and target 3951 // since otherwise we could be making a conditional call after a check for 3952 // the proper cpu features (and it won't cause code generation issues due to 3953 // function based code generation). 3954 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 3955 TargetDecl->hasAttr<TargetAttr>()) 3956 checkTargetFeatures(E, FD); 3957 3958 CalleeType = getContext().getCanonicalType(CalleeType); 3959 3960 const auto *FnType = 3961 cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); 3962 3963 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) && 3964 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { 3965 if (llvm::Constant *PrefixSig = 3966 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { 3967 SanitizerScope SanScope(this); 3968 llvm::Constant *FTRTTIConst = 3969 CGM.GetAddrOfRTTIDescriptor(QualType(FnType, 0), /*ForEH=*/true); 3970 llvm::Type *PrefixStructTyElems[] = { 3971 PrefixSig->getType(), 3972 FTRTTIConst->getType() 3973 }; 3974 llvm::StructType *PrefixStructTy = llvm::StructType::get( 3975 CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true); 3976 3977 llvm::Value *CalleePrefixStruct = Builder.CreateBitCast( 3978 Callee, llvm::PointerType::getUnqual(PrefixStructTy)); 3979 llvm::Value *CalleeSigPtr = 3980 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0); 3981 llvm::Value *CalleeSig = 3982 Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign()); 3983 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig); 3984 3985 llvm::BasicBlock *Cont = createBasicBlock("cont"); 3986 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck"); 3987 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont); 3988 3989 EmitBlock(TypeCheck); 3990 llvm::Value *CalleeRTTIPtr = 3991 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1); 3992 llvm::Value *CalleeRTTI = 3993 Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign()); 3994 llvm::Value *CalleeRTTIMatch = 3995 Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst); 3996 llvm::Constant *StaticData[] = { 3997 EmitCheckSourceLocation(E->getLocStart()), 3998 EmitCheckTypeDescriptor(CalleeType) 3999 }; 4000 EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function), 4001 "function_type_mismatch", StaticData, Callee); 4002 4003 Builder.CreateBr(Cont); 4004 EmitBlock(Cont); 4005 } 4006 } 4007 4008 // If we are checking indirect calls and this call is indirect, check that the 4009 // function pointer is a member of the bit set for the function type. 4010 if (SanOpts.has(SanitizerKind::CFIICall) && 4011 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { 4012 SanitizerScope SanScope(this); 4013 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall); 4014 4015 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0)); 4016 llvm::Value *BitSetName = llvm::MetadataAsValue::get(getLLVMContext(), MD); 4017 4018 llvm::Value *CastedCallee = Builder.CreateBitCast(Callee, Int8PtrTy); 4019 llvm::Value *BitSetTest = 4020 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::bitset_test), 4021 {CastedCallee, BitSetName}); 4022 4023 auto TypeId = CGM.CreateCfiIdForTypeMetadata(MD); 4024 llvm::Constant *StaticData[] = { 4025 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall), 4026 EmitCheckSourceLocation(E->getLocStart()), 4027 EmitCheckTypeDescriptor(QualType(FnType, 0)), 4028 }; 4029 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && TypeId) { 4030 EmitCfiSlowPathCheck(SanitizerKind::CFIICall, BitSetTest, TypeId, 4031 CastedCallee, StaticData); 4032 } else { 4033 EmitCheck(std::make_pair(BitSetTest, SanitizerKind::CFIICall), 4034 "cfi_check_fail", StaticData, 4035 {CastedCallee, llvm::UndefValue::get(IntPtrTy)}); 4036 } 4037 } 4038 4039 CallArgList Args; 4040 if (Chain) 4041 Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)), 4042 CGM.getContext().VoidPtrTy); 4043 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(), 4044 E->getDirectCallee(), /*ParamsToSkip*/ 0); 4045 4046 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( 4047 Args, FnType, /*isChainCall=*/Chain); 4048 4049 // C99 6.5.2.2p6: 4050 // If the expression that denotes the called function has a type 4051 // that does not include a prototype, [the default argument 4052 // promotions are performed]. If the number of arguments does not 4053 // equal the number of parameters, the behavior is undefined. If 4054 // the function is defined with a type that includes a prototype, 4055 // and either the prototype ends with an ellipsis (, ...) or the 4056 // types of the arguments after promotion are not compatible with 4057 // the types of the parameters, the behavior is undefined. If the 4058 // function is defined with a type that does not include a 4059 // prototype, and the types of the arguments after promotion are 4060 // not compatible with those of the parameters after promotion, 4061 // the behavior is undefined [except in some trivial cases]. 4062 // That is, in the general case, we should assume that a call 4063 // through an unprototyped function type works like a *non-variadic* 4064 // call. The way we make this work is to cast to the exact type 4065 // of the promoted arguments. 4066 // 4067 // Chain calls use this same code path to add the invisible chain parameter 4068 // to the function type. 4069 if (isa<FunctionNoProtoType>(FnType) || Chain) { 4070 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo); 4071 CalleeTy = CalleeTy->getPointerTo(); 4072 Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); 4073 } 4074 4075 return EmitCall(FnInfo, Callee, ReturnValue, Args, 4076 CGCalleeInfo(NonCanonicalFTP, TargetDecl)); 4077 } 4078 4079 LValue CodeGenFunction:: 4080 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 4081 Address BaseAddr = Address::invalid(); 4082 if (E->getOpcode() == BO_PtrMemI) { 4083 BaseAddr = EmitPointerWithAlignment(E->getLHS()); 4084 } else { 4085 BaseAddr = EmitLValue(E->getLHS()).getAddress(); 4086 } 4087 4088 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 4089 4090 const MemberPointerType *MPT 4091 = E->getRHS()->getType()->getAs<MemberPointerType>(); 4092 4093 AlignmentSource AlignSource; 4094 Address MemberAddr = 4095 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, 4096 &AlignSource); 4097 4098 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), AlignSource); 4099 } 4100 4101 /// Given the address of a temporary variable, produce an r-value of 4102 /// its type. 4103 RValue CodeGenFunction::convertTempToRValue(Address addr, 4104 QualType type, 4105 SourceLocation loc) { 4106 LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl); 4107 switch (getEvaluationKind(type)) { 4108 case TEK_Complex: 4109 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc)); 4110 case TEK_Aggregate: 4111 return lvalue.asAggregateRValue(); 4112 case TEK_Scalar: 4113 return RValue::get(EmitLoadOfScalar(lvalue, loc)); 4114 } 4115 llvm_unreachable("bad evaluation kind"); 4116 } 4117 4118 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { 4119 assert(Val->getType()->isFPOrFPVectorTy()); 4120 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) 4121 return; 4122 4123 llvm::MDBuilder MDHelper(getLLVMContext()); 4124 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); 4125 4126 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); 4127 } 4128 4129 namespace { 4130 struct LValueOrRValue { 4131 LValue LV; 4132 RValue RV; 4133 }; 4134 } 4135 4136 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 4137 const PseudoObjectExpr *E, 4138 bool forLValue, 4139 AggValueSlot slot) { 4140 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 4141 4142 // Find the result expression, if any. 4143 const Expr *resultExpr = E->getResultExpr(); 4144 LValueOrRValue result; 4145 4146 for (PseudoObjectExpr::const_semantics_iterator 4147 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 4148 const Expr *semantic = *i; 4149 4150 // If this semantic expression is an opaque value, bind it 4151 // to the result of its source expression. 4152 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 4153 4154 // If this is the result expression, we may need to evaluate 4155 // directly into the slot. 4156 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 4157 OVMA opaqueData; 4158 if (ov == resultExpr && ov->isRValue() && !forLValue && 4159 CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) { 4160 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 4161 4162 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(), 4163 AlignmentSource::Decl); 4164 opaqueData = OVMA::bind(CGF, ov, LV); 4165 result.RV = slot.asRValue(); 4166 4167 // Otherwise, emit as normal. 4168 } else { 4169 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 4170 4171 // If this is the result, also evaluate the result now. 4172 if (ov == resultExpr) { 4173 if (forLValue) 4174 result.LV = CGF.EmitLValue(ov); 4175 else 4176 result.RV = CGF.EmitAnyExpr(ov, slot); 4177 } 4178 } 4179 4180 opaques.push_back(opaqueData); 4181 4182 // Otherwise, if the expression is the result, evaluate it 4183 // and remember the result. 4184 } else if (semantic == resultExpr) { 4185 if (forLValue) 4186 result.LV = CGF.EmitLValue(semantic); 4187 else 4188 result.RV = CGF.EmitAnyExpr(semantic, slot); 4189 4190 // Otherwise, evaluate the expression in an ignored context. 4191 } else { 4192 CGF.EmitIgnoredExpr(semantic); 4193 } 4194 } 4195 4196 // Unbind all the opaques now. 4197 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 4198 opaques[i].unbind(CGF); 4199 4200 return result; 4201 } 4202 4203 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 4204 AggValueSlot slot) { 4205 return emitPseudoObjectExpr(*this, E, false, slot).RV; 4206 } 4207 4208 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 4209 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 4210 } 4211