1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Expr nodes as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGCXXABI.h" 14 #include "CGCall.h" 15 #include "CGCleanup.h" 16 #include "CGDebugInfo.h" 17 #include "CGObjCRuntime.h" 18 #include "CGOpenMPRuntime.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "ConstantEmitter.h" 23 #include "TargetInfo.h" 24 #include "clang/AST/ASTContext.h" 25 #include "clang/AST/Attr.h" 26 #include "clang/AST/DeclObjC.h" 27 #include "clang/AST/NSAPI.h" 28 #include "clang/Basic/CodeGenOptions.h" 29 #include "llvm/ADT/Hashing.h" 30 #include "llvm/ADT/StringExtras.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/IR/LLVMContext.h" 34 #include "llvm/IR/MDBuilder.h" 35 #include "llvm/Support/ConvertUTF.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/Path.h" 38 #include "llvm/Transforms/Utils/SanitizerStats.h" 39 40 #include <string> 41 42 using namespace clang; 43 using namespace CodeGen; 44 45 //===--------------------------------------------------------------------===// 46 // Miscellaneous Helper Methods 47 //===--------------------------------------------------------------------===// 48 49 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { 50 unsigned addressSpace = 51 cast<llvm::PointerType>(value->getType())->getAddressSpace(); 52 53 llvm::PointerType *destType = Int8PtrTy; 54 if (addressSpace) 55 destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); 56 57 if (value->getType() == destType) return value; 58 return Builder.CreateBitCast(value, destType); 59 } 60 61 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 62 /// block. 63 Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, 64 CharUnits Align, 65 const Twine &Name, 66 llvm::Value *ArraySize) { 67 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize); 68 Alloca->setAlignment(Align.getQuantity()); 69 return Address(Alloca, Align); 70 } 71 72 /// CreateTempAlloca - This creates a alloca and inserts it into the entry 73 /// block. The alloca is casted to default address space if necessary. 74 Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align, 75 const Twine &Name, 76 llvm::Value *ArraySize, 77 Address *AllocaAddr) { 78 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize); 79 if (AllocaAddr) 80 *AllocaAddr = Alloca; 81 llvm::Value *V = Alloca.getPointer(); 82 // Alloca always returns a pointer in alloca address space, which may 83 // be different from the type defined by the language. For example, 84 // in C++ the auto variables are in the default address space. Therefore 85 // cast alloca to the default address space when necessary. 86 if (getASTAllocaAddressSpace() != LangAS::Default) { 87 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default); 88 llvm::IRBuilderBase::InsertPointGuard IPG(Builder); 89 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt, 90 // otherwise alloca is inserted at the current insertion point of the 91 // builder. 92 if (!ArraySize) 93 Builder.SetInsertPoint(AllocaInsertPt); 94 V = getTargetHooks().performAddrSpaceCast( 95 *this, V, getASTAllocaAddressSpace(), LangAS::Default, 96 Ty->getPointerTo(DestAddrSpace), /*non-null*/ true); 97 } 98 99 return Address(V, Align); 100 } 101 102 /// CreateTempAlloca - This creates an alloca and inserts it into the entry 103 /// block if \p ArraySize is nullptr, otherwise inserts it at the current 104 /// insertion point of the builder. 105 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, 106 const Twine &Name, 107 llvm::Value *ArraySize) { 108 if (ArraySize) 109 return Builder.CreateAlloca(Ty, ArraySize, Name); 110 return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(), 111 ArraySize, Name, AllocaInsertPt); 112 } 113 114 /// CreateDefaultAlignTempAlloca - This creates an alloca with the 115 /// default alignment of the corresponding LLVM type, which is *not* 116 /// guaranteed to be related in any way to the expected alignment of 117 /// an AST type that might have been lowered to Ty. 118 Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty, 119 const Twine &Name) { 120 CharUnits Align = 121 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty)); 122 return CreateTempAlloca(Ty, Align, Name); 123 } 124 125 void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) { 126 assert(isa<llvm::AllocaInst>(Var.getPointer())); 127 auto *Store = new llvm::StoreInst(Init, Var.getPointer()); 128 Store->setAlignment(Var.getAlignment().getQuantity()); 129 llvm::BasicBlock *Block = AllocaInsertPt->getParent(); 130 Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store); 131 } 132 133 Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { 134 CharUnits Align = getContext().getTypeAlignInChars(Ty); 135 return CreateTempAlloca(ConvertType(Ty), Align, Name); 136 } 137 138 Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name, 139 Address *Alloca) { 140 // FIXME: Should we prefer the preferred type alignment here? 141 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca); 142 } 143 144 Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align, 145 const Twine &Name, Address *Alloca) { 146 return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name, 147 /*ArraySize=*/nullptr, Alloca); 148 } 149 150 Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align, 151 const Twine &Name) { 152 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name); 153 } 154 155 Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, 156 const Twine &Name) { 157 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty), 158 Name); 159 } 160 161 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified 162 /// expression and compare the result against zero, returning an Int1Ty value. 163 llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { 164 PGO.setCurrentStmt(E); 165 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { 166 llvm::Value *MemPtr = EmitScalarExpr(E); 167 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); 168 } 169 170 QualType BoolTy = getContext().BoolTy; 171 SourceLocation Loc = E->getExprLoc(); 172 if (!E->getType()->isAnyComplexType()) 173 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc); 174 175 return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy, 176 Loc); 177 } 178 179 /// EmitIgnoredExpr - Emit code to compute the specified expression, 180 /// ignoring the result. 181 void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { 182 if (E->isRValue()) 183 return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); 184 185 // Just emit it as an l-value and drop the result. 186 EmitLValue(E); 187 } 188 189 /// EmitAnyExpr - Emit code to compute the specified expression which 190 /// can have any type. The result is returned as an RValue struct. 191 /// If this is an aggregate expression, AggSlot indicates where the 192 /// result should be returned. 193 RValue CodeGenFunction::EmitAnyExpr(const Expr *E, 194 AggValueSlot aggSlot, 195 bool ignoreResult) { 196 switch (getEvaluationKind(E->getType())) { 197 case TEK_Scalar: 198 return RValue::get(EmitScalarExpr(E, ignoreResult)); 199 case TEK_Complex: 200 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); 201 case TEK_Aggregate: 202 if (!ignoreResult && aggSlot.isIgnored()) 203 aggSlot = CreateAggTemp(E->getType(), "agg-temp"); 204 EmitAggExpr(E, aggSlot); 205 return aggSlot.asRValue(); 206 } 207 llvm_unreachable("bad evaluation kind"); 208 } 209 210 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will 211 /// always be accessible even if no aggregate location is provided. 212 RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { 213 AggValueSlot AggSlot = AggValueSlot::ignored(); 214 215 if (hasAggregateEvaluationKind(E->getType())) 216 AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); 217 return EmitAnyExpr(E, AggSlot); 218 } 219 220 /// EmitAnyExprToMem - Evaluate an expression into a given memory 221 /// location. 222 void CodeGenFunction::EmitAnyExprToMem(const Expr *E, 223 Address Location, 224 Qualifiers Quals, 225 bool IsInit) { 226 // FIXME: This function should take an LValue as an argument. 227 switch (getEvaluationKind(E->getType())) { 228 case TEK_Complex: 229 EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()), 230 /*isInit*/ false); 231 return; 232 233 case TEK_Aggregate: { 234 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals, 235 AggValueSlot::IsDestructed_t(IsInit), 236 AggValueSlot::DoesNotNeedGCBarriers, 237 AggValueSlot::IsAliased_t(!IsInit), 238 AggValueSlot::MayOverlap)); 239 return; 240 } 241 242 case TEK_Scalar: { 243 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); 244 LValue LV = MakeAddrLValue(Location, E->getType()); 245 EmitStoreThroughLValue(RV, LV); 246 return; 247 } 248 } 249 llvm_unreachable("bad evaluation kind"); 250 } 251 252 static void 253 pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, 254 const Expr *E, Address ReferenceTemporary) { 255 // Objective-C++ ARC: 256 // If we are binding a reference to a temporary that has ownership, we 257 // need to perform retain/release operations on the temporary. 258 // 259 // FIXME: This should be looking at E, not M. 260 if (auto Lifetime = M->getType().getObjCLifetime()) { 261 switch (Lifetime) { 262 case Qualifiers::OCL_None: 263 case Qualifiers::OCL_ExplicitNone: 264 // Carry on to normal cleanup handling. 265 break; 266 267 case Qualifiers::OCL_Autoreleasing: 268 // Nothing to do; cleaned up by an autorelease pool. 269 return; 270 271 case Qualifiers::OCL_Strong: 272 case Qualifiers::OCL_Weak: 273 switch (StorageDuration Duration = M->getStorageDuration()) { 274 case SD_Static: 275 // Note: we intentionally do not register a cleanup to release 276 // the object on program termination. 277 return; 278 279 case SD_Thread: 280 // FIXME: We should probably register a cleanup in this case. 281 return; 282 283 case SD_Automatic: 284 case SD_FullExpression: 285 CodeGenFunction::Destroyer *Destroy; 286 CleanupKind CleanupKind; 287 if (Lifetime == Qualifiers::OCL_Strong) { 288 const ValueDecl *VD = M->getExtendingDecl(); 289 bool Precise = 290 VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>(); 291 CleanupKind = CGF.getARCCleanupKind(); 292 Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise 293 : &CodeGenFunction::destroyARCStrongImprecise; 294 } else { 295 // __weak objects always get EH cleanups; otherwise, exceptions 296 // could cause really nasty crashes instead of mere leaks. 297 CleanupKind = NormalAndEHCleanup; 298 Destroy = &CodeGenFunction::destroyARCWeak; 299 } 300 if (Duration == SD_FullExpression) 301 CGF.pushDestroy(CleanupKind, ReferenceTemporary, 302 M->getType(), *Destroy, 303 CleanupKind & EHCleanup); 304 else 305 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary, 306 M->getType(), 307 *Destroy, CleanupKind & EHCleanup); 308 return; 309 310 case SD_Dynamic: 311 llvm_unreachable("temporary cannot have dynamic storage duration"); 312 } 313 llvm_unreachable("unknown storage duration"); 314 } 315 } 316 317 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; 318 if (const RecordType *RT = 319 E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { 320 // Get the destructor for the reference temporary. 321 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 322 if (!ClassDecl->hasTrivialDestructor()) 323 ReferenceTemporaryDtor = ClassDecl->getDestructor(); 324 } 325 326 if (!ReferenceTemporaryDtor) 327 return; 328 329 // Call the destructor for the temporary. 330 switch (M->getStorageDuration()) { 331 case SD_Static: 332 case SD_Thread: { 333 llvm::FunctionCallee CleanupFn; 334 llvm::Constant *CleanupArg; 335 if (E->getType()->isArrayType()) { 336 CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( 337 ReferenceTemporary, E->getType(), 338 CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions, 339 dyn_cast_or_null<VarDecl>(M->getExtendingDecl())); 340 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy); 341 } else { 342 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor( 343 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete)); 344 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer()); 345 } 346 CGF.CGM.getCXXABI().registerGlobalDtor( 347 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg); 348 break; 349 } 350 351 case SD_FullExpression: 352 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), 353 CodeGenFunction::destroyCXXObject, 354 CGF.getLangOpts().Exceptions); 355 break; 356 357 case SD_Automatic: 358 CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup, 359 ReferenceTemporary, E->getType(), 360 CodeGenFunction::destroyCXXObject, 361 CGF.getLangOpts().Exceptions); 362 break; 363 364 case SD_Dynamic: 365 llvm_unreachable("temporary cannot have dynamic storage duration"); 366 } 367 } 368 369 static Address createReferenceTemporary(CodeGenFunction &CGF, 370 const MaterializeTemporaryExpr *M, 371 const Expr *Inner, 372 Address *Alloca = nullptr) { 373 auto &TCG = CGF.getTargetHooks(); 374 switch (M->getStorageDuration()) { 375 case SD_FullExpression: 376 case SD_Automatic: { 377 // If we have a constant temporary array or record try to promote it into a 378 // constant global under the same rules a normal constant would've been 379 // promoted. This is easier on the optimizer and generally emits fewer 380 // instructions. 381 QualType Ty = Inner->getType(); 382 if (CGF.CGM.getCodeGenOpts().MergeAllConstants && 383 (Ty->isArrayType() || Ty->isRecordType()) && 384 CGF.CGM.isTypeConstant(Ty, true)) 385 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) { 386 if (auto AddrSpace = CGF.getTarget().getConstantAddressSpace()) { 387 auto AS = AddrSpace.getValue(); 388 auto *GV = new llvm::GlobalVariable( 389 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, 390 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr, 391 llvm::GlobalValue::NotThreadLocal, 392 CGF.getContext().getTargetAddressSpace(AS)); 393 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty); 394 GV->setAlignment(alignment.getQuantity()); 395 llvm::Constant *C = GV; 396 if (AS != LangAS::Default) 397 C = TCG.performAddrSpaceCast( 398 CGF.CGM, GV, AS, LangAS::Default, 399 GV->getValueType()->getPointerTo( 400 CGF.getContext().getTargetAddressSpace(LangAS::Default))); 401 // FIXME: Should we put the new global into a COMDAT? 402 return Address(C, alignment); 403 } 404 } 405 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca); 406 } 407 case SD_Thread: 408 case SD_Static: 409 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner); 410 411 case SD_Dynamic: 412 llvm_unreachable("temporary can't have dynamic storage duration"); 413 } 414 llvm_unreachable("unknown storage duration"); 415 } 416 417 LValue CodeGenFunction:: 418 EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { 419 const Expr *E = M->GetTemporaryExpr(); 420 421 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) || 422 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) && 423 "Reference should never be pseudo-strong!"); 424 425 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so 426 // as that will cause the lifetime adjustment to be lost for ARC 427 auto ownership = M->getType().getObjCLifetime(); 428 if (ownership != Qualifiers::OCL_None && 429 ownership != Qualifiers::OCL_ExplicitNone) { 430 Address Object = createReferenceTemporary(*this, M, E); 431 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) { 432 Object = Address(llvm::ConstantExpr::getBitCast(Var, 433 ConvertTypeForMem(E->getType()) 434 ->getPointerTo(Object.getAddressSpace())), 435 Object.getAlignment()); 436 437 // createReferenceTemporary will promote the temporary to a global with a 438 // constant initializer if it can. It can only do this to a value of 439 // ARC-manageable type if the value is global and therefore "immune" to 440 // ref-counting operations. Therefore we have no need to emit either a 441 // dynamic initialization or a cleanup and we can just return the address 442 // of the temporary. 443 if (Var->hasInitializer()) 444 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); 445 446 Var->setInitializer(CGM.EmitNullConstant(E->getType())); 447 } 448 LValue RefTempDst = MakeAddrLValue(Object, M->getType(), 449 AlignmentSource::Decl); 450 451 switch (getEvaluationKind(E->getType())) { 452 default: llvm_unreachable("expected scalar or aggregate expression"); 453 case TEK_Scalar: 454 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false); 455 break; 456 case TEK_Aggregate: { 457 EmitAggExpr(E, AggValueSlot::forAddr(Object, 458 E->getType().getQualifiers(), 459 AggValueSlot::IsDestructed, 460 AggValueSlot::DoesNotNeedGCBarriers, 461 AggValueSlot::IsNotAliased, 462 AggValueSlot::DoesNotOverlap)); 463 break; 464 } 465 } 466 467 pushTemporaryCleanup(*this, M, E, Object); 468 return RefTempDst; 469 } 470 471 SmallVector<const Expr *, 2> CommaLHSs; 472 SmallVector<SubobjectAdjustment, 2> Adjustments; 473 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); 474 475 for (const auto &Ignored : CommaLHSs) 476 EmitIgnoredExpr(Ignored); 477 478 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) { 479 if (opaque->getType()->isRecordType()) { 480 assert(Adjustments.empty()); 481 return EmitOpaqueValueLValue(opaque); 482 } 483 } 484 485 // Create and initialize the reference temporary. 486 Address Alloca = Address::invalid(); 487 Address Object = createReferenceTemporary(*this, M, E, &Alloca); 488 if (auto *Var = dyn_cast<llvm::GlobalVariable>( 489 Object.getPointer()->stripPointerCasts())) { 490 Object = Address(llvm::ConstantExpr::getBitCast( 491 cast<llvm::Constant>(Object.getPointer()), 492 ConvertTypeForMem(E->getType())->getPointerTo()), 493 Object.getAlignment()); 494 // If the temporary is a global and has a constant initializer or is a 495 // constant temporary that we promoted to a global, we may have already 496 // initialized it. 497 if (!Var->hasInitializer()) { 498 Var->setInitializer(CGM.EmitNullConstant(E->getType())); 499 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); 500 } 501 } else { 502 switch (M->getStorageDuration()) { 503 case SD_Automatic: 504 if (auto *Size = EmitLifetimeStart( 505 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), 506 Alloca.getPointer())) { 507 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker, 508 Alloca, Size); 509 } 510 break; 511 512 case SD_FullExpression: { 513 if (!ShouldEmitLifetimeMarkers) 514 break; 515 516 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end 517 // marker. Instead, start the lifetime of a conditional temporary earlier 518 // so that it's unconditional. Don't do this in ASan's use-after-scope 519 // mode so that it gets the more precise lifetime marks. If the type has 520 // a non-trivial destructor, we'll have a cleanup block for it anyway, 521 // so this typically doesn't help; skip it in that case. 522 ConditionalEvaluation *OldConditional = nullptr; 523 CGBuilderTy::InsertPoint OldIP; 524 if (isInConditionalBranch() && !E->getType().isDestructedType() && 525 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) { 526 OldConditional = OutermostConditional; 527 OutermostConditional = nullptr; 528 529 OldIP = Builder.saveIP(); 530 llvm::BasicBlock *Block = OldConditional->getStartingBlock(); 531 Builder.restoreIP(CGBuilderTy::InsertPoint( 532 Block, llvm::BasicBlock::iterator(Block->back()))); 533 } 534 535 if (auto *Size = EmitLifetimeStart( 536 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()), 537 Alloca.getPointer())) { 538 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca, 539 Size); 540 } 541 542 if (OldConditional) { 543 OutermostConditional = OldConditional; 544 Builder.restoreIP(OldIP); 545 } 546 break; 547 } 548 549 default: 550 break; 551 } 552 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); 553 } 554 pushTemporaryCleanup(*this, M, E, Object); 555 556 // Perform derived-to-base casts and/or field accesses, to get from the 557 // temporary object we created (and, potentially, for which we extended 558 // the lifetime) to the subobject we're binding the reference to. 559 for (unsigned I = Adjustments.size(); I != 0; --I) { 560 SubobjectAdjustment &Adjustment = Adjustments[I-1]; 561 switch (Adjustment.Kind) { 562 case SubobjectAdjustment::DerivedToBaseAdjustment: 563 Object = 564 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass, 565 Adjustment.DerivedToBase.BasePath->path_begin(), 566 Adjustment.DerivedToBase.BasePath->path_end(), 567 /*NullCheckValue=*/ false, E->getExprLoc()); 568 break; 569 570 case SubobjectAdjustment::FieldAdjustment: { 571 LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl); 572 LV = EmitLValueForField(LV, Adjustment.Field); 573 assert(LV.isSimple() && 574 "materialized temporary field is not a simple lvalue"); 575 Object = LV.getAddress(); 576 break; 577 } 578 579 case SubobjectAdjustment::MemberPointerAdjustment: { 580 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS); 581 Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr, 582 Adjustment.Ptr.MPT); 583 break; 584 } 585 } 586 } 587 588 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl); 589 } 590 591 RValue 592 CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { 593 // Emit the expression as an lvalue. 594 LValue LV = EmitLValue(E); 595 assert(LV.isSimple()); 596 llvm::Value *Value = LV.getPointer(); 597 598 if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { 599 // C++11 [dcl.ref]p5 (as amended by core issue 453): 600 // If a glvalue to which a reference is directly bound designates neither 601 // an existing object or function of an appropriate type nor a region of 602 // storage of suitable size and alignment to contain an object of the 603 // reference's type, the behavior is undefined. 604 QualType Ty = E->getType(); 605 EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); 606 } 607 608 return RValue::get(Value); 609 } 610 611 612 /// getAccessedFieldNo - Given an encoded value and a result number, return the 613 /// input field number being accessed. 614 unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, 615 const llvm::Constant *Elts) { 616 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) 617 ->getZExtValue(); 618 } 619 620 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. 621 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, 622 llvm::Value *High) { 623 llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); 624 llvm::Value *K47 = Builder.getInt64(47); 625 llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); 626 llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); 627 llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); 628 llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); 629 return Builder.CreateMul(B1, KMul); 630 } 631 632 bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) { 633 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast || 634 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation; 635 } 636 637 bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) { 638 CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 639 return (RD && RD->hasDefinition() && RD->isDynamicClass()) && 640 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || 641 TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || 642 TCK == TCK_UpcastToVirtualBase || TCK == TCK_DynamicOperation); 643 } 644 645 bool CodeGenFunction::sanitizePerformTypeCheck() const { 646 return SanOpts.has(SanitizerKind::Null) | 647 SanOpts.has(SanitizerKind::Alignment) | 648 SanOpts.has(SanitizerKind::ObjectSize) | 649 SanOpts.has(SanitizerKind::Vptr); 650 } 651 652 void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, 653 llvm::Value *Ptr, QualType Ty, 654 CharUnits Alignment, 655 SanitizerSet SkippedChecks, 656 llvm::Value *ArraySize) { 657 if (!sanitizePerformTypeCheck()) 658 return; 659 660 // Don't check pointers outside the default address space. The null check 661 // isn't correct, the object-size check isn't supported by LLVM, and we can't 662 // communicate the addresses to the runtime handler for the vptr check. 663 if (Ptr->getType()->getPointerAddressSpace()) 664 return; 665 666 // Don't check pointers to volatile data. The behavior here is implementation- 667 // defined. 668 if (Ty.isVolatileQualified()) 669 return; 670 671 SanitizerScope SanScope(this); 672 673 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; 674 llvm::BasicBlock *Done = nullptr; 675 676 // Quickly determine whether we have a pointer to an alloca. It's possible 677 // to skip null checks, and some alignment checks, for these pointers. This 678 // can reduce compile-time significantly. 679 auto PtrToAlloca = 680 dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCastsNoFollowAliases()); 681 682 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext()); 683 llvm::Value *IsNonNull = nullptr; 684 bool IsGuaranteedNonNull = 685 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca; 686 bool AllowNullPointers = isNullPointerAllowed(TCK); 687 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) && 688 !IsGuaranteedNonNull) { 689 // The glvalue must not be an empty glvalue. 690 IsNonNull = Builder.CreateIsNotNull(Ptr); 691 692 // The IR builder can constant-fold the null check if the pointer points to 693 // a constant. 694 IsGuaranteedNonNull = IsNonNull == True; 695 696 // Skip the null check if the pointer is known to be non-null. 697 if (!IsGuaranteedNonNull) { 698 if (AllowNullPointers) { 699 // When performing pointer casts, it's OK if the value is null. 700 // Skip the remaining checks in that case. 701 Done = createBasicBlock("null"); 702 llvm::BasicBlock *Rest = createBasicBlock("not.null"); 703 Builder.CreateCondBr(IsNonNull, Rest, Done); 704 EmitBlock(Rest); 705 } else { 706 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null)); 707 } 708 } 709 } 710 711 if (SanOpts.has(SanitizerKind::ObjectSize) && 712 !SkippedChecks.has(SanitizerKind::ObjectSize) && 713 !Ty->isIncompleteType()) { 714 uint64_t TySize = getContext().getTypeSizeInChars(Ty).getQuantity(); 715 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize); 716 if (ArraySize) 717 Size = Builder.CreateMul(Size, ArraySize); 718 719 // Degenerate case: new X[0] does not need an objectsize check. 720 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size); 721 if (!ConstantSize || !ConstantSize->isNullValue()) { 722 // The glvalue must refer to a large enough storage region. 723 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation 724 // to check this. 725 // FIXME: Get object address space 726 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; 727 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys); 728 llvm::Value *Min = Builder.getFalse(); 729 llvm::Value *NullIsUnknown = Builder.getFalse(); 730 llvm::Value *Dynamic = Builder.getFalse(); 731 llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy); 732 llvm::Value *LargeEnough = Builder.CreateICmpUGE( 733 Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size); 734 Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize)); 735 } 736 } 737 738 uint64_t AlignVal = 0; 739 llvm::Value *PtrAsInt = nullptr; 740 741 if (SanOpts.has(SanitizerKind::Alignment) && 742 !SkippedChecks.has(SanitizerKind::Alignment)) { 743 AlignVal = Alignment.getQuantity(); 744 if (!Ty->isIncompleteType() && !AlignVal) 745 AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity(); 746 747 // The glvalue must be suitably aligned. 748 if (AlignVal > 1 && 749 (!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) { 750 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy); 751 llvm::Value *Align = Builder.CreateAnd( 752 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); 753 llvm::Value *Aligned = 754 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); 755 if (Aligned != True) 756 Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment)); 757 } 758 } 759 760 if (Checks.size() > 0) { 761 // Make sure we're not losing information. Alignment needs to be a power of 762 // 2 763 assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal); 764 llvm::Constant *StaticData[] = { 765 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty), 766 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 1), 767 llvm::ConstantInt::get(Int8Ty, TCK)}; 768 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData, 769 PtrAsInt ? PtrAsInt : Ptr); 770 } 771 772 // If possible, check that the vptr indicates that there is a subobject of 773 // type Ty at offset zero within this object. 774 // 775 // C++11 [basic.life]p5,6: 776 // [For storage which does not refer to an object within its lifetime] 777 // The program has undefined behavior if: 778 // -- the [pointer or glvalue] is used to access a non-static data member 779 // or call a non-static member function 780 if (SanOpts.has(SanitizerKind::Vptr) && 781 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) { 782 // Ensure that the pointer is non-null before loading it. If there is no 783 // compile-time guarantee, reuse the run-time null check or emit a new one. 784 if (!IsGuaranteedNonNull) { 785 if (!IsNonNull) 786 IsNonNull = Builder.CreateIsNotNull(Ptr); 787 if (!Done) 788 Done = createBasicBlock("vptr.null"); 789 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null"); 790 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done); 791 EmitBlock(VptrNotNull); 792 } 793 794 // Compute a hash of the mangled name of the type. 795 // 796 // FIXME: This is not guaranteed to be deterministic! Move to a 797 // fingerprinting mechanism once LLVM provides one. For the time 798 // being the implementation happens to be deterministic. 799 SmallString<64> MangledName; 800 llvm::raw_svector_ostream Out(MangledName); 801 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), 802 Out); 803 804 // Blacklist based on the mangled type. 805 if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType( 806 SanitizerKind::Vptr, Out.str())) { 807 llvm::hash_code TypeHash = hash_value(Out.str()); 808 809 // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). 810 llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); 811 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); 812 Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign()); 813 llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); 814 llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); 815 816 llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); 817 Hash = Builder.CreateTrunc(Hash, IntPtrTy); 818 819 // Look the hash up in our cache. 820 const int CacheSize = 128; 821 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); 822 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, 823 "__ubsan_vptr_type_cache"); 824 llvm::Value *Slot = Builder.CreateAnd(Hash, 825 llvm::ConstantInt::get(IntPtrTy, 826 CacheSize-1)); 827 llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; 828 llvm::Value *CacheVal = 829 Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices), 830 getPointerAlign()); 831 832 // If the hash isn't in the cache, call a runtime handler to perform the 833 // hard work of checking whether the vptr is for an object of the right 834 // type. This will either fill in the cache and return, or produce a 835 // diagnostic. 836 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash); 837 llvm::Constant *StaticData[] = { 838 EmitCheckSourceLocation(Loc), 839 EmitCheckTypeDescriptor(Ty), 840 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), 841 llvm::ConstantInt::get(Int8Ty, TCK) 842 }; 843 llvm::Value *DynamicData[] = { Ptr, Hash }; 844 EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr), 845 SanitizerHandler::DynamicTypeCacheMiss, StaticData, 846 DynamicData); 847 } 848 } 849 850 if (Done) { 851 Builder.CreateBr(Done); 852 EmitBlock(Done); 853 } 854 } 855 856 /// Determine whether this expression refers to a flexible array member in a 857 /// struct. We disable array bounds checks for such members. 858 static bool isFlexibleArrayMemberExpr(const Expr *E) { 859 // For compatibility with existing code, we treat arrays of length 0 or 860 // 1 as flexible array members. 861 const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe(); 862 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 863 if (CAT->getSize().ugt(1)) 864 return false; 865 } else if (!isa<IncompleteArrayType>(AT)) 866 return false; 867 868 E = E->IgnoreParens(); 869 870 // A flexible array member must be the last member in the class. 871 if (const auto *ME = dyn_cast<MemberExpr>(E)) { 872 // FIXME: If the base type of the member expr is not FD->getParent(), 873 // this should not be treated as a flexible array member access. 874 if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) { 875 RecordDecl::field_iterator FI( 876 DeclContext::decl_iterator(const_cast<FieldDecl *>(FD))); 877 return ++FI == FD->getParent()->field_end(); 878 } 879 } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) { 880 return IRE->getDecl()->getNextIvar() == nullptr; 881 } 882 883 return false; 884 } 885 886 llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E, 887 QualType EltTy) { 888 ASTContext &C = getContext(); 889 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity(); 890 if (!EltSize) 891 return nullptr; 892 893 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()); 894 if (!ArrayDeclRef) 895 return nullptr; 896 897 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl()); 898 if (!ParamDecl) 899 return nullptr; 900 901 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>(); 902 if (!POSAttr) 903 return nullptr; 904 905 // Don't load the size if it's a lower bound. 906 int POSType = POSAttr->getType(); 907 if (POSType != 0 && POSType != 1) 908 return nullptr; 909 910 // Find the implicit size parameter. 911 auto PassedSizeIt = SizeArguments.find(ParamDecl); 912 if (PassedSizeIt == SizeArguments.end()) 913 return nullptr; 914 915 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second; 916 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable"); 917 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second; 918 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false, 919 C.getSizeType(), E->getExprLoc()); 920 llvm::Value *SizeOfElement = 921 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize); 922 return Builder.CreateUDiv(SizeInBytes, SizeOfElement); 923 } 924 925 /// If Base is known to point to the start of an array, return the length of 926 /// that array. Return 0 if the length cannot be determined. 927 static llvm::Value *getArrayIndexingBound( 928 CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) { 929 // For the vector indexing extension, the bound is the number of elements. 930 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { 931 IndexedType = Base->getType(); 932 return CGF.Builder.getInt32(VT->getNumElements()); 933 } 934 935 Base = Base->IgnoreParens(); 936 937 if (const auto *CE = dyn_cast<CastExpr>(Base)) { 938 if (CE->getCastKind() == CK_ArrayToPointerDecay && 939 !isFlexibleArrayMemberExpr(CE->getSubExpr())) { 940 IndexedType = CE->getSubExpr()->getType(); 941 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); 942 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 943 return CGF.Builder.getInt(CAT->getSize()); 944 else if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) 945 return CGF.getVLASize(VAT).NumElts; 946 // Ignore pass_object_size here. It's not applicable on decayed pointers. 947 } 948 } 949 950 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0}; 951 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) { 952 IndexedType = Base->getType(); 953 return POS; 954 } 955 956 return nullptr; 957 } 958 959 void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, 960 llvm::Value *Index, QualType IndexType, 961 bool Accessed) { 962 assert(SanOpts.has(SanitizerKind::ArrayBounds) && 963 "should not be called unless adding bounds checks"); 964 SanitizerScope SanScope(this); 965 966 QualType IndexedType; 967 llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType); 968 if (!Bound) 969 return; 970 971 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); 972 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned); 973 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false); 974 975 llvm::Constant *StaticData[] = { 976 EmitCheckSourceLocation(E->getExprLoc()), 977 EmitCheckTypeDescriptor(IndexedType), 978 EmitCheckTypeDescriptor(IndexType) 979 }; 980 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal) 981 : Builder.CreateICmpULE(IndexVal, BoundVal); 982 EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), 983 SanitizerHandler::OutOfBounds, StaticData, Index); 984 } 985 986 987 CodeGenFunction::ComplexPairTy CodeGenFunction:: 988 EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, 989 bool isInc, bool isPre) { 990 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc()); 991 992 llvm::Value *NextVal; 993 if (isa<llvm::IntegerType>(InVal.first->getType())) { 994 uint64_t AmountVal = isInc ? 1 : -1; 995 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); 996 997 // Add the inc/dec to the real part. 998 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 999 } else { 1000 QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); 1001 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); 1002 if (!isInc) 1003 FVal.changeSign(); 1004 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); 1005 1006 // Add the inc/dec to the real part. 1007 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); 1008 } 1009 1010 ComplexPairTy IncVal(NextVal, InVal.second); 1011 1012 // Store the updated result through the lvalue. 1013 EmitStoreOfComplex(IncVal, LV, /*init*/ false); 1014 1015 // If this is a postinc, return the value read from memory, otherwise use the 1016 // updated value. 1017 return isPre ? IncVal : InVal; 1018 } 1019 1020 void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E, 1021 CodeGenFunction *CGF) { 1022 // Bind VLAs in the cast type. 1023 if (CGF && E->getType()->isVariablyModifiedType()) 1024 CGF->EmitVariablyModifiedType(E->getType()); 1025 1026 if (CGDebugInfo *DI = getModuleDebugInfo()) 1027 DI->EmitExplicitCastType(E->getType()); 1028 } 1029 1030 //===----------------------------------------------------------------------===// 1031 // LValue Expression Emission 1032 //===----------------------------------------------------------------------===// 1033 1034 /// EmitPointerWithAlignment - Given an expression of pointer type, try to 1035 /// derive a more accurate bound on the alignment of the pointer. 1036 Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E, 1037 LValueBaseInfo *BaseInfo, 1038 TBAAAccessInfo *TBAAInfo) { 1039 // We allow this with ObjC object pointers because of fragile ABIs. 1040 assert(E->getType()->isPointerType() || 1041 E->getType()->isObjCObjectPointerType()); 1042 E = E->IgnoreParens(); 1043 1044 // Casts: 1045 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 1046 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE)) 1047 CGM.EmitExplicitCastExprType(ECE, this); 1048 1049 switch (CE->getCastKind()) { 1050 // Non-converting casts (but not C's implicit conversion from void*). 1051 case CK_BitCast: 1052 case CK_NoOp: 1053 case CK_AddressSpaceConversion: 1054 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) { 1055 if (PtrTy->getPointeeType()->isVoidType()) 1056 break; 1057 1058 LValueBaseInfo InnerBaseInfo; 1059 TBAAAccessInfo InnerTBAAInfo; 1060 Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), 1061 &InnerBaseInfo, 1062 &InnerTBAAInfo); 1063 if (BaseInfo) *BaseInfo = InnerBaseInfo; 1064 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo; 1065 1066 if (isa<ExplicitCastExpr>(CE)) { 1067 LValueBaseInfo TargetTypeBaseInfo; 1068 TBAAAccessInfo TargetTypeTBAAInfo; 1069 CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), 1070 &TargetTypeBaseInfo, 1071 &TargetTypeTBAAInfo); 1072 if (TBAAInfo) 1073 *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo, 1074 TargetTypeTBAAInfo); 1075 // If the source l-value is opaque, honor the alignment of the 1076 // casted-to type. 1077 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { 1078 if (BaseInfo) 1079 BaseInfo->mergeForCast(TargetTypeBaseInfo); 1080 Addr = Address(Addr.getPointer(), Align); 1081 } 1082 } 1083 1084 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) && 1085 CE->getCastKind() == CK_BitCast) { 1086 if (auto PT = E->getType()->getAs<PointerType>()) 1087 EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(), 1088 /*MayBeNull=*/true, 1089 CodeGenFunction::CFITCK_UnrelatedCast, 1090 CE->getBeginLoc()); 1091 } 1092 return CE->getCastKind() != CK_AddressSpaceConversion 1093 ? Builder.CreateBitCast(Addr, ConvertType(E->getType())) 1094 : Builder.CreateAddrSpaceCast(Addr, 1095 ConvertType(E->getType())); 1096 } 1097 break; 1098 1099 // Array-to-pointer decay. 1100 case CK_ArrayToPointerDecay: 1101 return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo); 1102 1103 // Derived-to-base conversions. 1104 case CK_UncheckedDerivedToBase: 1105 case CK_DerivedToBase: { 1106 // TODO: Support accesses to members of base classes in TBAA. For now, we 1107 // conservatively pretend that the complete object is of the base class 1108 // type. 1109 if (TBAAInfo) 1110 *TBAAInfo = CGM.getTBAAAccessInfo(E->getType()); 1111 Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo); 1112 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl(); 1113 return GetAddressOfBaseClass(Addr, Derived, 1114 CE->path_begin(), CE->path_end(), 1115 ShouldNullCheckClassCastValue(CE), 1116 CE->getExprLoc()); 1117 } 1118 1119 // TODO: Is there any reason to treat base-to-derived conversions 1120 // specially? 1121 default: 1122 break; 1123 } 1124 } 1125 1126 // Unary &. 1127 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 1128 if (UO->getOpcode() == UO_AddrOf) { 1129 LValue LV = EmitLValue(UO->getSubExpr()); 1130 if (BaseInfo) *BaseInfo = LV.getBaseInfo(); 1131 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo(); 1132 return LV.getAddress(); 1133 } 1134 } 1135 1136 // TODO: conditional operators, comma. 1137 1138 // Otherwise, use the alignment of the type. 1139 CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, 1140 TBAAInfo); 1141 return Address(EmitScalarExpr(E), Align); 1142 } 1143 1144 RValue CodeGenFunction::GetUndefRValue(QualType Ty) { 1145 if (Ty->isVoidType()) 1146 return RValue::get(nullptr); 1147 1148 switch (getEvaluationKind(Ty)) { 1149 case TEK_Complex: { 1150 llvm::Type *EltTy = 1151 ConvertType(Ty->castAs<ComplexType>()->getElementType()); 1152 llvm::Value *U = llvm::UndefValue::get(EltTy); 1153 return RValue::getComplex(std::make_pair(U, U)); 1154 } 1155 1156 // If this is a use of an undefined aggregate type, the aggregate must have an 1157 // identifiable address. Just because the contents of the value are undefined 1158 // doesn't mean that the address can't be taken and compared. 1159 case TEK_Aggregate: { 1160 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); 1161 return RValue::getAggregate(DestPtr); 1162 } 1163 1164 case TEK_Scalar: 1165 return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); 1166 } 1167 llvm_unreachable("bad evaluation kind"); 1168 } 1169 1170 RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, 1171 const char *Name) { 1172 ErrorUnsupported(E, Name); 1173 return GetUndefRValue(E->getType()); 1174 } 1175 1176 LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, 1177 const char *Name) { 1178 ErrorUnsupported(E, Name); 1179 llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); 1180 return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()), 1181 E->getType()); 1182 } 1183 1184 bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) { 1185 const Expr *Base = Obj; 1186 while (!isa<CXXThisExpr>(Base)) { 1187 // The result of a dynamic_cast can be null. 1188 if (isa<CXXDynamicCastExpr>(Base)) 1189 return false; 1190 1191 if (const auto *CE = dyn_cast<CastExpr>(Base)) { 1192 Base = CE->getSubExpr(); 1193 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) { 1194 Base = PE->getSubExpr(); 1195 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) { 1196 if (UO->getOpcode() == UO_Extension) 1197 Base = UO->getSubExpr(); 1198 else 1199 return false; 1200 } else { 1201 return false; 1202 } 1203 } 1204 return true; 1205 } 1206 1207 LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { 1208 LValue LV; 1209 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E)) 1210 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true); 1211 else 1212 LV = EmitLValue(E); 1213 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) { 1214 SanitizerSet SkippedChecks; 1215 if (const auto *ME = dyn_cast<MemberExpr>(E)) { 1216 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase()); 1217 if (IsBaseCXXThis) 1218 SkippedChecks.set(SanitizerKind::Alignment, true); 1219 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase())) 1220 SkippedChecks.set(SanitizerKind::Null, true); 1221 } 1222 EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(), 1223 E->getType(), LV.getAlignment(), SkippedChecks); 1224 } 1225 return LV; 1226 } 1227 1228 /// EmitLValue - Emit code to compute a designator that specifies the location 1229 /// of the expression. 1230 /// 1231 /// This can return one of two things: a simple address or a bitfield reference. 1232 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be 1233 /// an LLVM pointer type. 1234 /// 1235 /// If this returns a bitfield reference, nothing about the pointee type of the 1236 /// LLVM value is known: For example, it may not be a pointer to an integer. 1237 /// 1238 /// If this returns a normal address, and if the lvalue's C type is fixed size, 1239 /// this method guarantees that the returned pointer type will point to an LLVM 1240 /// type of the same size of the lvalue's type. If the lvalue has a variable 1241 /// length type, this is not possible. 1242 /// 1243 LValue CodeGenFunction::EmitLValue(const Expr *E) { 1244 ApplyDebugLocation DL(*this, E); 1245 switch (E->getStmtClass()) { 1246 default: return EmitUnsupportedLValue(E, "l-value expression"); 1247 1248 case Expr::ObjCPropertyRefExprClass: 1249 llvm_unreachable("cannot emit a property reference directly"); 1250 1251 case Expr::ObjCSelectorExprClass: 1252 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); 1253 case Expr::ObjCIsaExprClass: 1254 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); 1255 case Expr::BinaryOperatorClass: 1256 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); 1257 case Expr::CompoundAssignOperatorClass: { 1258 QualType Ty = E->getType(); 1259 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 1260 Ty = AT->getValueType(); 1261 if (!Ty->isAnyComplexType()) 1262 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 1263 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); 1264 } 1265 case Expr::CallExprClass: 1266 case Expr::CXXMemberCallExprClass: 1267 case Expr::CXXOperatorCallExprClass: 1268 case Expr::UserDefinedLiteralClass: 1269 return EmitCallExprLValue(cast<CallExpr>(E)); 1270 case Expr::VAArgExprClass: 1271 return EmitVAArgExprLValue(cast<VAArgExpr>(E)); 1272 case Expr::DeclRefExprClass: 1273 return EmitDeclRefLValue(cast<DeclRefExpr>(E)); 1274 case Expr::ConstantExprClass: 1275 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr()); 1276 case Expr::ParenExprClass: 1277 return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); 1278 case Expr::GenericSelectionExprClass: 1279 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); 1280 case Expr::PredefinedExprClass: 1281 return EmitPredefinedLValue(cast<PredefinedExpr>(E)); 1282 case Expr::StringLiteralClass: 1283 return EmitStringLiteralLValue(cast<StringLiteral>(E)); 1284 case Expr::ObjCEncodeExprClass: 1285 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); 1286 case Expr::PseudoObjectExprClass: 1287 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); 1288 case Expr::InitListExprClass: 1289 return EmitInitListLValue(cast<InitListExpr>(E)); 1290 case Expr::CXXTemporaryObjectExprClass: 1291 case Expr::CXXConstructExprClass: 1292 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); 1293 case Expr::CXXBindTemporaryExprClass: 1294 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); 1295 case Expr::CXXUuidofExprClass: 1296 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); 1297 case Expr::LambdaExprClass: 1298 return EmitAggExprToLValue(E); 1299 1300 case Expr::ExprWithCleanupsClass: { 1301 const auto *cleanups = cast<ExprWithCleanups>(E); 1302 enterFullExpression(cleanups); 1303 RunCleanupsScope Scope(*this); 1304 LValue LV = EmitLValue(cleanups->getSubExpr()); 1305 if (LV.isSimple()) { 1306 // Defend against branches out of gnu statement expressions surrounded by 1307 // cleanups. 1308 llvm::Value *V = LV.getPointer(); 1309 Scope.ForceCleanup({&V}); 1310 return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(), 1311 getContext(), LV.getBaseInfo(), LV.getTBAAInfo()); 1312 } 1313 // FIXME: Is it possible to create an ExprWithCleanups that produces a 1314 // bitfield lvalue or some other non-simple lvalue? 1315 return LV; 1316 } 1317 1318 case Expr::CXXDefaultArgExprClass: { 1319 auto *DAE = cast<CXXDefaultArgExpr>(E); 1320 CXXDefaultArgExprScope Scope(*this, DAE); 1321 return EmitLValue(DAE->getExpr()); 1322 } 1323 case Expr::CXXDefaultInitExprClass: { 1324 auto *DIE = cast<CXXDefaultInitExpr>(E); 1325 CXXDefaultInitExprScope Scope(*this, DIE); 1326 return EmitLValue(DIE->getExpr()); 1327 } 1328 case Expr::CXXTypeidExprClass: 1329 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); 1330 1331 case Expr::ObjCMessageExprClass: 1332 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); 1333 case Expr::ObjCIvarRefExprClass: 1334 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); 1335 case Expr::StmtExprClass: 1336 return EmitStmtExprLValue(cast<StmtExpr>(E)); 1337 case Expr::UnaryOperatorClass: 1338 return EmitUnaryOpLValue(cast<UnaryOperator>(E)); 1339 case Expr::ArraySubscriptExprClass: 1340 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); 1341 case Expr::OMPArraySectionExprClass: 1342 return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E)); 1343 case Expr::ExtVectorElementExprClass: 1344 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); 1345 case Expr::MemberExprClass: 1346 return EmitMemberExpr(cast<MemberExpr>(E)); 1347 case Expr::CompoundLiteralExprClass: 1348 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); 1349 case Expr::ConditionalOperatorClass: 1350 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); 1351 case Expr::BinaryConditionalOperatorClass: 1352 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); 1353 case Expr::ChooseExprClass: 1354 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr()); 1355 case Expr::OpaqueValueExprClass: 1356 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); 1357 case Expr::SubstNonTypeTemplateParmExprClass: 1358 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); 1359 case Expr::ImplicitCastExprClass: 1360 case Expr::CStyleCastExprClass: 1361 case Expr::CXXFunctionalCastExprClass: 1362 case Expr::CXXStaticCastExprClass: 1363 case Expr::CXXDynamicCastExprClass: 1364 case Expr::CXXReinterpretCastExprClass: 1365 case Expr::CXXConstCastExprClass: 1366 case Expr::ObjCBridgedCastExprClass: 1367 return EmitCastLValue(cast<CastExpr>(E)); 1368 1369 case Expr::MaterializeTemporaryExprClass: 1370 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); 1371 1372 case Expr::CoawaitExprClass: 1373 return EmitCoawaitLValue(cast<CoawaitExpr>(E)); 1374 case Expr::CoyieldExprClass: 1375 return EmitCoyieldLValue(cast<CoyieldExpr>(E)); 1376 } 1377 } 1378 1379 /// Given an object of the given canonical type, can we safely copy a 1380 /// value out of it based on its initializer? 1381 static bool isConstantEmittableObjectType(QualType type) { 1382 assert(type.isCanonical()); 1383 assert(!type->isReferenceType()); 1384 1385 // Must be const-qualified but non-volatile. 1386 Qualifiers qs = type.getLocalQualifiers(); 1387 if (!qs.hasConst() || qs.hasVolatile()) return false; 1388 1389 // Otherwise, all object types satisfy this except C++ classes with 1390 // mutable subobjects or non-trivial copy/destroy behavior. 1391 if (const auto *RT = dyn_cast<RecordType>(type)) 1392 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1393 if (RD->hasMutableFields() || !RD->isTrivial()) 1394 return false; 1395 1396 return true; 1397 } 1398 1399 /// Can we constant-emit a load of a reference to a variable of the 1400 /// given type? This is different from predicates like 1401 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply 1402 /// in situations that don't necessarily satisfy the language's rules 1403 /// for this (e.g. C++'s ODR-use rules). For example, we want to able 1404 /// to do this with const float variables even if those variables 1405 /// aren't marked 'constexpr'. 1406 enum ConstantEmissionKind { 1407 CEK_None, 1408 CEK_AsReferenceOnly, 1409 CEK_AsValueOrReference, 1410 CEK_AsValueOnly 1411 }; 1412 static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { 1413 type = type.getCanonicalType(); 1414 if (const auto *ref = dyn_cast<ReferenceType>(type)) { 1415 if (isConstantEmittableObjectType(ref->getPointeeType())) 1416 return CEK_AsValueOrReference; 1417 return CEK_AsReferenceOnly; 1418 } 1419 if (isConstantEmittableObjectType(type)) 1420 return CEK_AsValueOnly; 1421 return CEK_None; 1422 } 1423 1424 /// Try to emit a reference to the given value without producing it as 1425 /// an l-value. This is just an optimization, but it avoids us needing 1426 /// to emit global copies of variables if they're named without triggering 1427 /// a formal use in a context where we can't emit a direct reference to them, 1428 /// for instance if a block or lambda or a member of a local class uses a 1429 /// const int variable or constexpr variable from an enclosing function. 1430 CodeGenFunction::ConstantEmission 1431 CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { 1432 ValueDecl *value = refExpr->getDecl(); 1433 1434 // The value needs to be an enum constant or a constant variable. 1435 ConstantEmissionKind CEK; 1436 if (isa<ParmVarDecl>(value)) { 1437 CEK = CEK_None; 1438 } else if (auto *var = dyn_cast<VarDecl>(value)) { 1439 CEK = checkVarTypeForConstantEmission(var->getType()); 1440 } else if (isa<EnumConstantDecl>(value)) { 1441 CEK = CEK_AsValueOnly; 1442 } else { 1443 CEK = CEK_None; 1444 } 1445 if (CEK == CEK_None) return ConstantEmission(); 1446 1447 Expr::EvalResult result; 1448 bool resultIsReference; 1449 QualType resultType; 1450 1451 // It's best to evaluate all the way as an r-value if that's permitted. 1452 if (CEK != CEK_AsReferenceOnly && 1453 refExpr->EvaluateAsRValue(result, getContext())) { 1454 resultIsReference = false; 1455 resultType = refExpr->getType(); 1456 1457 // Otherwise, try to evaluate as an l-value. 1458 } else if (CEK != CEK_AsValueOnly && 1459 refExpr->EvaluateAsLValue(result, getContext())) { 1460 resultIsReference = true; 1461 resultType = value->getType(); 1462 1463 // Failure. 1464 } else { 1465 return ConstantEmission(); 1466 } 1467 1468 // In any case, if the initializer has side-effects, abandon ship. 1469 if (result.HasSideEffects) 1470 return ConstantEmission(); 1471 1472 // Emit as a constant. 1473 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(), 1474 result.Val, resultType); 1475 1476 // Make sure we emit a debug reference to the global variable. 1477 // This should probably fire even for 1478 if (isa<VarDecl>(value)) { 1479 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value))) 1480 EmitDeclRefExprDbgValue(refExpr, result.Val); 1481 } else { 1482 assert(isa<EnumConstantDecl>(value)); 1483 EmitDeclRefExprDbgValue(refExpr, result.Val); 1484 } 1485 1486 // If we emitted a reference constant, we need to dereference that. 1487 if (resultIsReference) 1488 return ConstantEmission::forReference(C); 1489 1490 return ConstantEmission::forValue(C); 1491 } 1492 1493 static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, 1494 const MemberExpr *ME) { 1495 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) { 1496 // Try to emit static variable member expressions as DREs. 1497 return DeclRefExpr::Create( 1498 CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD, 1499 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(), 1500 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse()); 1501 } 1502 return nullptr; 1503 } 1504 1505 CodeGenFunction::ConstantEmission 1506 CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) { 1507 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME)) 1508 return tryEmitAsConstant(DRE); 1509 return ConstantEmission(); 1510 } 1511 1512 llvm::Value *CodeGenFunction::emitScalarConstant( 1513 const CodeGenFunction::ConstantEmission &Constant, Expr *E) { 1514 assert(Constant && "not a constant"); 1515 if (Constant.isReference()) 1516 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E), 1517 E->getExprLoc()) 1518 .getScalarVal(); 1519 return Constant.getValue(); 1520 } 1521 1522 llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, 1523 SourceLocation Loc) { 1524 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), 1525 lvalue.getType(), Loc, lvalue.getBaseInfo(), 1526 lvalue.getTBAAInfo(), lvalue.isNontemporal()); 1527 } 1528 1529 static bool hasBooleanRepresentation(QualType Ty) { 1530 if (Ty->isBooleanType()) 1531 return true; 1532 1533 if (const EnumType *ET = Ty->getAs<EnumType>()) 1534 return ET->getDecl()->getIntegerType()->isBooleanType(); 1535 1536 if (const AtomicType *AT = Ty->getAs<AtomicType>()) 1537 return hasBooleanRepresentation(AT->getValueType()); 1538 1539 return false; 1540 } 1541 1542 static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, 1543 llvm::APInt &Min, llvm::APInt &End, 1544 bool StrictEnums, bool IsBool) { 1545 const EnumType *ET = Ty->getAs<EnumType>(); 1546 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && 1547 ET && !ET->getDecl()->isFixed(); 1548 if (!IsBool && !IsRegularCPlusPlusEnum) 1549 return false; 1550 1551 if (IsBool) { 1552 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0); 1553 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2); 1554 } else { 1555 const EnumDecl *ED = ET->getDecl(); 1556 llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType()); 1557 unsigned Bitwidth = LTy->getScalarSizeInBits(); 1558 unsigned NumNegativeBits = ED->getNumNegativeBits(); 1559 unsigned NumPositiveBits = ED->getNumPositiveBits(); 1560 1561 if (NumNegativeBits) { 1562 unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); 1563 assert(NumBits <= Bitwidth); 1564 End = llvm::APInt(Bitwidth, 1) << (NumBits - 1); 1565 Min = -End; 1566 } else { 1567 assert(NumPositiveBits <= Bitwidth); 1568 End = llvm::APInt(Bitwidth, 1) << NumPositiveBits; 1569 Min = llvm::APInt(Bitwidth, 0); 1570 } 1571 } 1572 return true; 1573 } 1574 1575 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { 1576 llvm::APInt Min, End; 1577 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums, 1578 hasBooleanRepresentation(Ty))) 1579 return nullptr; 1580 1581 llvm::MDBuilder MDHelper(getLLVMContext()); 1582 return MDHelper.createRange(Min, End); 1583 } 1584 1585 bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, 1586 SourceLocation Loc) { 1587 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool); 1588 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum); 1589 if (!HasBoolCheck && !HasEnumCheck) 1590 return false; 1591 1592 bool IsBool = hasBooleanRepresentation(Ty) || 1593 NSAPI(CGM.getContext()).isObjCBOOLType(Ty); 1594 bool NeedsBoolCheck = HasBoolCheck && IsBool; 1595 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>(); 1596 if (!NeedsBoolCheck && !NeedsEnumCheck) 1597 return false; 1598 1599 // Single-bit booleans don't need to be checked. Special-case this to avoid 1600 // a bit width mismatch when handling bitfield values. This is handled by 1601 // EmitFromMemory for the non-bitfield case. 1602 if (IsBool && 1603 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1) 1604 return false; 1605 1606 llvm::APInt Min, End; 1607 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) 1608 return true; 1609 1610 auto &Ctx = getLLVMContext(); 1611 SanitizerScope SanScope(this); 1612 llvm::Value *Check; 1613 --End; 1614 if (!Min) { 1615 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End)); 1616 } else { 1617 llvm::Value *Upper = 1618 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End)); 1619 llvm::Value *Lower = 1620 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min)); 1621 Check = Builder.CreateAnd(Upper, Lower); 1622 } 1623 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc), 1624 EmitCheckTypeDescriptor(Ty)}; 1625 SanitizerMask Kind = 1626 NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; 1627 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue, 1628 StaticArgs, EmitCheckValue(Value)); 1629 return true; 1630 } 1631 1632 llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile, 1633 QualType Ty, 1634 SourceLocation Loc, 1635 LValueBaseInfo BaseInfo, 1636 TBAAAccessInfo TBAAInfo, 1637 bool isNontemporal) { 1638 if (!CGM.getCodeGenOpts().PreserveVec3Type) { 1639 // For better performance, handle vector loads differently. 1640 if (Ty->isVectorType()) { 1641 const llvm::Type *EltTy = Addr.getElementType(); 1642 1643 const auto *VTy = cast<llvm::VectorType>(EltTy); 1644 1645 // Handle vectors of size 3 like size 4 for better performance. 1646 if (VTy->getNumElements() == 3) { 1647 1648 // Bitcast to vec4 type. 1649 llvm::VectorType *vec4Ty = 1650 llvm::VectorType::get(VTy->getElementType(), 4); 1651 Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4"); 1652 // Now load value. 1653 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4"); 1654 1655 // Shuffle vector to get vec3. 1656 V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty), 1657 {0, 1, 2}, "extractVec"); 1658 return EmitFromMemory(V, Ty); 1659 } 1660 } 1661 } 1662 1663 // Atomic operations have to be done on integral types. 1664 LValue AtomicLValue = 1665 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); 1666 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) { 1667 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal(); 1668 } 1669 1670 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile); 1671 if (isNontemporal) { 1672 llvm::MDNode *Node = llvm::MDNode::get( 1673 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1))); 1674 Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 1675 } 1676 1677 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo); 1678 1679 if (EmitScalarRangeCheck(Load, Ty, Loc)) { 1680 // In order to prevent the optimizer from throwing away the check, don't 1681 // attach range metadata to the load. 1682 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) 1683 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) 1684 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); 1685 1686 return EmitFromMemory(Load, Ty); 1687 } 1688 1689 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { 1690 // Bool has a different representation in memory than in registers. 1691 if (hasBooleanRepresentation(Ty)) { 1692 // This should really always be an i1, but sometimes it's already 1693 // an i8, and it's awkward to track those cases down. 1694 if (Value->getType()->isIntegerTy(1)) 1695 return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool"); 1696 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1697 "wrong value rep of bool"); 1698 } 1699 1700 return Value; 1701 } 1702 1703 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { 1704 // Bool has a different representation in memory than in registers. 1705 if (hasBooleanRepresentation(Ty)) { 1706 assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && 1707 "wrong value rep of bool"); 1708 return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); 1709 } 1710 1711 return Value; 1712 } 1713 1714 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr, 1715 bool Volatile, QualType Ty, 1716 LValueBaseInfo BaseInfo, 1717 TBAAAccessInfo TBAAInfo, 1718 bool isInit, bool isNontemporal) { 1719 if (!CGM.getCodeGenOpts().PreserveVec3Type) { 1720 // Handle vectors differently to get better performance. 1721 if (Ty->isVectorType()) { 1722 llvm::Type *SrcTy = Value->getType(); 1723 auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy); 1724 // Handle vec3 special. 1725 if (VecTy && VecTy->getNumElements() == 3) { 1726 // Our source is a vec3, do a shuffle vector to make it a vec4. 1727 llvm::Constant *Mask[] = {Builder.getInt32(0), Builder.getInt32(1), 1728 Builder.getInt32(2), 1729 llvm::UndefValue::get(Builder.getInt32Ty())}; 1730 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1731 Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy), 1732 MaskV, "extractVec"); 1733 SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4); 1734 } 1735 if (Addr.getElementType() != SrcTy) { 1736 Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp"); 1737 } 1738 } 1739 } 1740 1741 Value = EmitToMemory(Value, Ty); 1742 1743 LValue AtomicLValue = 1744 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo); 1745 if (Ty->isAtomicType() || 1746 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) { 1747 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit); 1748 return; 1749 } 1750 1751 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); 1752 if (isNontemporal) { 1753 llvm::MDNode *Node = 1754 llvm::MDNode::get(Store->getContext(), 1755 llvm::ConstantAsMetadata::get(Builder.getInt32(1))); 1756 Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node); 1757 } 1758 1759 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo); 1760 } 1761 1762 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, 1763 bool isInit) { 1764 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), 1765 lvalue.getType(), lvalue.getBaseInfo(), 1766 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal()); 1767 } 1768 1769 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this 1770 /// method emits the address of the lvalue, then loads the result as an rvalue, 1771 /// returning the rvalue. 1772 RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { 1773 if (LV.isObjCWeak()) { 1774 // load of a __weak object. 1775 Address AddrWeakObj = LV.getAddress(); 1776 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, 1777 AddrWeakObj)); 1778 } 1779 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1780 // In MRC mode, we do a load+autorelease. 1781 if (!getLangOpts().ObjCAutoRefCount) { 1782 return RValue::get(EmitARCLoadWeak(LV.getAddress())); 1783 } 1784 1785 // In ARC mode, we load retained and then consume the value. 1786 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress()); 1787 Object = EmitObjCConsumeObject(LV.getType(), Object); 1788 return RValue::get(Object); 1789 } 1790 1791 if (LV.isSimple()) { 1792 assert(!LV.getType()->isFunctionType()); 1793 1794 // Everything needs a load. 1795 return RValue::get(EmitLoadOfScalar(LV, Loc)); 1796 } 1797 1798 if (LV.isVectorElt()) { 1799 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(), 1800 LV.isVolatileQualified()); 1801 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), 1802 "vecext")); 1803 } 1804 1805 // If this is a reference to a subset of the elements of a vector, either 1806 // shuffle the input or extract/insert them as appropriate. 1807 if (LV.isExtVectorElt()) 1808 return EmitLoadOfExtVectorElementLValue(LV); 1809 1810 // Global Register variables always invoke intrinsics 1811 if (LV.isGlobalReg()) 1812 return EmitLoadOfGlobalRegLValue(LV); 1813 1814 assert(LV.isBitField() && "Unknown LValue type!"); 1815 return EmitLoadOfBitfieldLValue(LV, Loc); 1816 } 1817 1818 RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, 1819 SourceLocation Loc) { 1820 const CGBitFieldInfo &Info = LV.getBitFieldInfo(); 1821 1822 // Get the output type. 1823 llvm::Type *ResLTy = ConvertType(LV.getType()); 1824 1825 Address Ptr = LV.getBitFieldAddress(); 1826 llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load"); 1827 1828 if (Info.IsSigned) { 1829 assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize); 1830 unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size; 1831 if (HighBits) 1832 Val = Builder.CreateShl(Val, HighBits, "bf.shl"); 1833 if (Info.Offset + HighBits) 1834 Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr"); 1835 } else { 1836 if (Info.Offset) 1837 Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr"); 1838 if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize) 1839 Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize, 1840 Info.Size), 1841 "bf.clear"); 1842 } 1843 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); 1844 EmitScalarRangeCheck(Val, LV.getType(), Loc); 1845 return RValue::get(Val); 1846 } 1847 1848 // If this is a reference to a subset of the elements of a vector, create an 1849 // appropriate shufflevector. 1850 RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { 1851 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(), 1852 LV.isVolatileQualified()); 1853 1854 const llvm::Constant *Elts = LV.getExtVectorElts(); 1855 1856 // If the result of the expression is a non-vector type, we must be extracting 1857 // a single element. Just codegen as an extractelement. 1858 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 1859 if (!ExprVT) { 1860 unsigned InIdx = getAccessedFieldNo(0, Elts); 1861 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); 1862 return RValue::get(Builder.CreateExtractElement(Vec, Elt)); 1863 } 1864 1865 // Always use shuffle vector to try to retain the original program structure 1866 unsigned NumResultElts = ExprVT->getNumElements(); 1867 1868 SmallVector<llvm::Constant*, 4> Mask; 1869 for (unsigned i = 0; i != NumResultElts; ++i) 1870 Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts))); 1871 1872 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 1873 Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), 1874 MaskV); 1875 return RValue::get(Vec); 1876 } 1877 1878 /// Generates lvalue for partial ext_vector access. 1879 Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { 1880 Address VectorAddress = LV.getExtVectorAddress(); 1881 const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); 1882 QualType EQT = ExprVT->getElementType(); 1883 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT); 1884 1885 Address CastToPointerElement = 1886 Builder.CreateElementBitCast(VectorAddress, VectorElementTy, 1887 "conv.ptr.element"); 1888 1889 const llvm::Constant *Elts = LV.getExtVectorElts(); 1890 unsigned ix = getAccessedFieldNo(0, Elts); 1891 1892 Address VectorBasePtrPlusIx = 1893 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix, 1894 "vector.elt"); 1895 1896 return VectorBasePtrPlusIx; 1897 } 1898 1899 /// Load of global gamed gegisters are always calls to intrinsics. 1900 RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { 1901 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && 1902 "Bad type for register variable"); 1903 llvm::MDNode *RegName = cast<llvm::MDNode>( 1904 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata()); 1905 1906 // We accept integer and pointer types only 1907 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType()); 1908 llvm::Type *Ty = OrigTy; 1909 if (OrigTy->isPointerTy()) 1910 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); 1911 llvm::Type *Types[] = { Ty }; 1912 1913 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); 1914 llvm::Value *Call = Builder.CreateCall( 1915 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName)); 1916 if (OrigTy->isPointerTy()) 1917 Call = Builder.CreateIntToPtr(Call, OrigTy); 1918 return RValue::get(Call); 1919 } 1920 1921 1922 /// EmitStoreThroughLValue - Store the specified rvalue into the specified 1923 /// lvalue, where both are guaranteed to the have the same type, and that type 1924 /// is 'Ty'. 1925 void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, 1926 bool isInit) { 1927 if (!Dst.isSimple()) { 1928 if (Dst.isVectorElt()) { 1929 // Read/modify/write the vector, inserting the new element. 1930 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(), 1931 Dst.isVolatileQualified()); 1932 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), 1933 Dst.getVectorIdx(), "vecins"); 1934 Builder.CreateStore(Vec, Dst.getVectorAddress(), 1935 Dst.isVolatileQualified()); 1936 return; 1937 } 1938 1939 // If this is an update of extended vector elements, insert them as 1940 // appropriate. 1941 if (Dst.isExtVectorElt()) 1942 return EmitStoreThroughExtVectorComponentLValue(Src, Dst); 1943 1944 if (Dst.isGlobalReg()) 1945 return EmitStoreThroughGlobalRegLValue(Src, Dst); 1946 1947 assert(Dst.isBitField() && "Unknown LValue type"); 1948 return EmitStoreThroughBitfieldLValue(Src, Dst); 1949 } 1950 1951 // There's special magic for assigning into an ARC-qualified l-value. 1952 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { 1953 switch (Lifetime) { 1954 case Qualifiers::OCL_None: 1955 llvm_unreachable("present but none"); 1956 1957 case Qualifiers::OCL_ExplicitNone: 1958 // nothing special 1959 break; 1960 1961 case Qualifiers::OCL_Strong: 1962 if (isInit) { 1963 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal())); 1964 break; 1965 } 1966 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); 1967 return; 1968 1969 case Qualifiers::OCL_Weak: 1970 if (isInit) 1971 // Initialize and then skip the primitive store. 1972 EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal()); 1973 else 1974 EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); 1975 return; 1976 1977 case Qualifiers::OCL_Autoreleasing: 1978 Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), 1979 Src.getScalarVal())); 1980 // fall into the normal path 1981 break; 1982 } 1983 } 1984 1985 if (Dst.isObjCWeak() && !Dst.isNonGC()) { 1986 // load of a __weak object. 1987 Address LvalueDst = Dst.getAddress(); 1988 llvm::Value *src = Src.getScalarVal(); 1989 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); 1990 return; 1991 } 1992 1993 if (Dst.isObjCStrong() && !Dst.isNonGC()) { 1994 // load of a __strong object. 1995 Address LvalueDst = Dst.getAddress(); 1996 llvm::Value *src = Src.getScalarVal(); 1997 if (Dst.isObjCIvar()) { 1998 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); 1999 llvm::Type *ResultType = IntPtrTy; 2000 Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp()); 2001 llvm::Value *RHS = dst.getPointer(); 2002 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); 2003 llvm::Value *LHS = 2004 Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType, 2005 "sub.ptr.lhs.cast"); 2006 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); 2007 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, 2008 BytesBetween); 2009 } else if (Dst.isGlobalObjCRef()) { 2010 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, 2011 Dst.isThreadLocalRef()); 2012 } 2013 else 2014 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); 2015 return; 2016 } 2017 2018 assert(Src.isScalar() && "Can't emit an agg store with this method"); 2019 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); 2020 } 2021 2022 void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, 2023 llvm::Value **Result) { 2024 const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); 2025 llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); 2026 Address Ptr = Dst.getBitFieldAddress(); 2027 2028 // Get the source value, truncated to the width of the bit-field. 2029 llvm::Value *SrcVal = Src.getScalarVal(); 2030 2031 // Cast the source to the storage type and shift it into place. 2032 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(), 2033 /*IsSigned=*/false); 2034 llvm::Value *MaskedVal = SrcVal; 2035 2036 // See if there are other bits in the bitfield's storage we'll need to load 2037 // and mask together with source before storing. 2038 if (Info.StorageSize != Info.Size) { 2039 assert(Info.StorageSize > Info.Size && "Invalid bitfield size."); 2040 llvm::Value *Val = 2041 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load"); 2042 2043 // Mask the source value as needed. 2044 if (!hasBooleanRepresentation(Dst.getType())) 2045 SrcVal = Builder.CreateAnd(SrcVal, 2046 llvm::APInt::getLowBitsSet(Info.StorageSize, 2047 Info.Size), 2048 "bf.value"); 2049 MaskedVal = SrcVal; 2050 if (Info.Offset) 2051 SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl"); 2052 2053 // Mask out the original value. 2054 Val = Builder.CreateAnd(Val, 2055 ~llvm::APInt::getBitsSet(Info.StorageSize, 2056 Info.Offset, 2057 Info.Offset + Info.Size), 2058 "bf.clear"); 2059 2060 // Or together the unchanged values and the source value. 2061 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); 2062 } else { 2063 assert(Info.Offset == 0); 2064 } 2065 2066 // Write the new value back out. 2067 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified()); 2068 2069 // Return the new value of the bit-field, if requested. 2070 if (Result) { 2071 llvm::Value *ResultVal = MaskedVal; 2072 2073 // Sign extend the value if needed. 2074 if (Info.IsSigned) { 2075 assert(Info.Size <= Info.StorageSize); 2076 unsigned HighBits = Info.StorageSize - Info.Size; 2077 if (HighBits) { 2078 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); 2079 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); 2080 } 2081 } 2082 2083 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, 2084 "bf.result.cast"); 2085 *Result = EmitFromMemory(ResultVal, Dst.getType()); 2086 } 2087 } 2088 2089 void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, 2090 LValue Dst) { 2091 // This access turns into a read/modify/write of the vector. Load the input 2092 // value now. 2093 llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(), 2094 Dst.isVolatileQualified()); 2095 const llvm::Constant *Elts = Dst.getExtVectorElts(); 2096 2097 llvm::Value *SrcVal = Src.getScalarVal(); 2098 2099 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) { 2100 unsigned NumSrcElts = VTy->getNumElements(); 2101 unsigned NumDstElts = Vec->getType()->getVectorNumElements(); 2102 if (NumDstElts == NumSrcElts) { 2103 // Use shuffle vector is the src and destination are the same number of 2104 // elements and restore the vector mask since it is on the side it will be 2105 // stored. 2106 SmallVector<llvm::Constant*, 4> Mask(NumDstElts); 2107 for (unsigned i = 0; i != NumSrcElts; ++i) 2108 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i); 2109 2110 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 2111 Vec = Builder.CreateShuffleVector(SrcVal, 2112 llvm::UndefValue::get(Vec->getType()), 2113 MaskV); 2114 } else if (NumDstElts > NumSrcElts) { 2115 // Extended the source vector to the same length and then shuffle it 2116 // into the destination. 2117 // FIXME: since we're shuffling with undef, can we just use the indices 2118 // into that? This could be simpler. 2119 SmallVector<llvm::Constant*, 4> ExtMask; 2120 for (unsigned i = 0; i != NumSrcElts; ++i) 2121 ExtMask.push_back(Builder.getInt32(i)); 2122 ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty)); 2123 llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); 2124 llvm::Value *ExtSrcVal = 2125 Builder.CreateShuffleVector(SrcVal, 2126 llvm::UndefValue::get(SrcVal->getType()), 2127 ExtMaskV); 2128 // build identity 2129 SmallVector<llvm::Constant*, 4> Mask; 2130 for (unsigned i = 0; i != NumDstElts; ++i) 2131 Mask.push_back(Builder.getInt32(i)); 2132 2133 // When the vector size is odd and .odd or .hi is used, the last element 2134 // of the Elts constant array will be one past the size of the vector. 2135 // Ignore the last element here, if it is greater than the mask size. 2136 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) 2137 NumSrcElts--; 2138 2139 // modify when what gets shuffled in 2140 for (unsigned i = 0; i != NumSrcElts; ++i) 2141 Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts); 2142 llvm::Value *MaskV = llvm::ConstantVector::get(Mask); 2143 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); 2144 } else { 2145 // We should never shorten the vector 2146 llvm_unreachable("unexpected shorten vector length"); 2147 } 2148 } else { 2149 // If the Src is a scalar (not a vector) it must be updating one element. 2150 unsigned InIdx = getAccessedFieldNo(0, Elts); 2151 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); 2152 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); 2153 } 2154 2155 Builder.CreateStore(Vec, Dst.getExtVectorAddress(), 2156 Dst.isVolatileQualified()); 2157 } 2158 2159 /// Store of global named registers are always calls to intrinsics. 2160 void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { 2161 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && 2162 "Bad type for register variable"); 2163 llvm::MDNode *RegName = cast<llvm::MDNode>( 2164 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata()); 2165 assert(RegName && "Register LValue is not metadata"); 2166 2167 // We accept integer and pointer types only 2168 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType()); 2169 llvm::Type *Ty = OrigTy; 2170 if (OrigTy->isPointerTy()) 2171 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); 2172 llvm::Type *Types[] = { Ty }; 2173 2174 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); 2175 llvm::Value *Value = Src.getScalarVal(); 2176 if (OrigTy->isPointerTy()) 2177 Value = Builder.CreatePtrToInt(Value, Ty); 2178 Builder.CreateCall( 2179 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value}); 2180 } 2181 2182 // setObjCGCLValueClass - sets class of the lvalue for the purpose of 2183 // generating write-barries API. It is currently a global, ivar, 2184 // or neither. 2185 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, 2186 LValue &LV, 2187 bool IsMemberAccess=false) { 2188 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) 2189 return; 2190 2191 if (isa<ObjCIvarRefExpr>(E)) { 2192 QualType ExpTy = E->getType(); 2193 if (IsMemberAccess && ExpTy->isPointerType()) { 2194 // If ivar is a structure pointer, assigning to field of 2195 // this struct follows gcc's behavior and makes it a non-ivar 2196 // writer-barrier conservatively. 2197 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 2198 if (ExpTy->isRecordType()) { 2199 LV.setObjCIvar(false); 2200 return; 2201 } 2202 } 2203 LV.setObjCIvar(true); 2204 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E)); 2205 LV.setBaseIvarExp(Exp->getBase()); 2206 LV.setObjCArray(E->getType()->isArrayType()); 2207 return; 2208 } 2209 2210 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) { 2211 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) { 2212 if (VD->hasGlobalStorage()) { 2213 LV.setGlobalObjCRef(true); 2214 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); 2215 } 2216 } 2217 LV.setObjCArray(E->getType()->isArrayType()); 2218 return; 2219 } 2220 2221 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) { 2222 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2223 return; 2224 } 2225 2226 if (const auto *Exp = dyn_cast<ParenExpr>(E)) { 2227 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2228 if (LV.isObjCIvar()) { 2229 // If cast is to a structure pointer, follow gcc's behavior and make it 2230 // a non-ivar write-barrier. 2231 QualType ExpTy = E->getType(); 2232 if (ExpTy->isPointerType()) 2233 ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); 2234 if (ExpTy->isRecordType()) 2235 LV.setObjCIvar(false); 2236 } 2237 return; 2238 } 2239 2240 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) { 2241 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); 2242 return; 2243 } 2244 2245 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) { 2246 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2247 return; 2248 } 2249 2250 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) { 2251 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2252 return; 2253 } 2254 2255 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { 2256 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); 2257 return; 2258 } 2259 2260 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) { 2261 setObjCGCLValueClass(Ctx, Exp->getBase(), LV); 2262 if (LV.isObjCIvar() && !LV.isObjCArray()) 2263 // Using array syntax to assigning to what an ivar points to is not 2264 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; 2265 LV.setObjCIvar(false); 2266 else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) 2267 // Using array syntax to assigning to what global points to is not 2268 // same as assigning to the global itself. {id *G;} G[i] = 0; 2269 LV.setGlobalObjCRef(false); 2270 return; 2271 } 2272 2273 if (const auto *Exp = dyn_cast<MemberExpr>(E)) { 2274 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); 2275 // We don't know if member is an 'ivar', but this flag is looked at 2276 // only in the context of LV.isObjCIvar(). 2277 LV.setObjCArray(E->getType()->isArrayType()); 2278 return; 2279 } 2280 } 2281 2282 static llvm::Value * 2283 EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, 2284 llvm::Value *V, llvm::Type *IRType, 2285 StringRef Name = StringRef()) { 2286 unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); 2287 return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); 2288 } 2289 2290 static LValue EmitThreadPrivateVarDeclLValue( 2291 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, 2292 llvm::Type *RealVarTy, SourceLocation Loc) { 2293 Addr = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc); 2294 Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy); 2295 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 2296 } 2297 2298 static Address emitDeclTargetLinkVarDeclLValue(CodeGenFunction &CGF, 2299 const VarDecl *VD, QualType T) { 2300 llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res = 2301 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD); 2302 if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_To) 2303 return Address::invalid(); 2304 assert(*Res == OMPDeclareTargetDeclAttr::MT_Link && "Expected link clause"); 2305 QualType PtrTy = CGF.getContext().getPointerType(VD->getType()); 2306 Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD); 2307 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>()); 2308 } 2309 2310 Address 2311 CodeGenFunction::EmitLoadOfReference(LValue RefLVal, 2312 LValueBaseInfo *PointeeBaseInfo, 2313 TBAAAccessInfo *PointeeTBAAInfo) { 2314 llvm::LoadInst *Load = Builder.CreateLoad(RefLVal.getAddress(), 2315 RefLVal.isVolatile()); 2316 CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo()); 2317 2318 CharUnits Align = getNaturalTypeAlignment(RefLVal.getType()->getPointeeType(), 2319 PointeeBaseInfo, PointeeTBAAInfo, 2320 /* forPointeeType= */ true); 2321 return Address(Load, Align); 2322 } 2323 2324 LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) { 2325 LValueBaseInfo PointeeBaseInfo; 2326 TBAAAccessInfo PointeeTBAAInfo; 2327 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo, 2328 &PointeeTBAAInfo); 2329 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(), 2330 PointeeBaseInfo, PointeeTBAAInfo); 2331 } 2332 2333 Address CodeGenFunction::EmitLoadOfPointer(Address Ptr, 2334 const PointerType *PtrTy, 2335 LValueBaseInfo *BaseInfo, 2336 TBAAAccessInfo *TBAAInfo) { 2337 llvm::Value *Addr = Builder.CreateLoad(Ptr); 2338 return Address(Addr, getNaturalTypeAlignment(PtrTy->getPointeeType(), 2339 BaseInfo, TBAAInfo, 2340 /*forPointeeType=*/true)); 2341 } 2342 2343 LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr, 2344 const PointerType *PtrTy) { 2345 LValueBaseInfo BaseInfo; 2346 TBAAAccessInfo TBAAInfo; 2347 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo); 2348 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo); 2349 } 2350 2351 static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, 2352 const Expr *E, const VarDecl *VD) { 2353 QualType T = E->getType(); 2354 2355 // If it's thread_local, emit a call to its wrapper function instead. 2356 if (VD->getTLSKind() == VarDecl::TLS_Dynamic && 2357 CGF.CGM.getCXXABI().usesThreadWrapperFunction()) 2358 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T); 2359 // Check if the variable is marked as declare target with link clause in 2360 // device codegen. 2361 if (CGF.getLangOpts().OpenMPIsDevice) { 2362 Address Addr = emitDeclTargetLinkVarDeclLValue(CGF, VD, T); 2363 if (Addr.isValid()) 2364 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 2365 } 2366 2367 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); 2368 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); 2369 V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); 2370 CharUnits Alignment = CGF.getContext().getDeclAlign(VD); 2371 Address Addr(V, Alignment); 2372 // Emit reference to the private copy of the variable if it is an OpenMP 2373 // threadprivate variable. 2374 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd && 2375 VD->hasAttr<OMPThreadPrivateDeclAttr>()) { 2376 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy, 2377 E->getExprLoc()); 2378 } 2379 LValue LV = VD->getType()->isReferenceType() ? 2380 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(), 2381 AlignmentSource::Decl) : 2382 CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl); 2383 setObjCGCLValueClass(CGF.getContext(), E, LV); 2384 return LV; 2385 } 2386 2387 static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM, 2388 const FunctionDecl *FD) { 2389 if (FD->hasAttr<WeakRefAttr>()) { 2390 ConstantAddress aliasee = CGM.GetWeakRefReference(FD); 2391 return aliasee.getPointer(); 2392 } 2393 2394 llvm::Constant *V = CGM.GetAddrOfFunction(FD); 2395 if (!FD->hasPrototype()) { 2396 if (const FunctionProtoType *Proto = 2397 FD->getType()->getAs<FunctionProtoType>()) { 2398 // Ugly case: for a K&R-style definition, the type of the definition 2399 // isn't the same as the type of a use. Correct for this with a 2400 // bitcast. 2401 QualType NoProtoType = 2402 CGM.getContext().getFunctionNoProtoType(Proto->getReturnType()); 2403 NoProtoType = CGM.getContext().getPointerType(NoProtoType); 2404 V = llvm::ConstantExpr::getBitCast(V, 2405 CGM.getTypes().ConvertType(NoProtoType)); 2406 } 2407 } 2408 return V; 2409 } 2410 2411 static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, 2412 const Expr *E, const FunctionDecl *FD) { 2413 llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, FD); 2414 CharUnits Alignment = CGF.getContext().getDeclAlign(FD); 2415 return CGF.MakeAddrLValue(V, E->getType(), Alignment, 2416 AlignmentSource::Decl); 2417 } 2418 2419 static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, 2420 llvm::Value *ThisValue) { 2421 QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); 2422 LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); 2423 return CGF.EmitLValueForField(LV, FD); 2424 } 2425 2426 /// Named Registers are named metadata pointing to the register name 2427 /// which will be read from/written to as an argument to the intrinsic 2428 /// @llvm.read/write_register. 2429 /// So far, only the name is being passed down, but other options such as 2430 /// register type, allocation type or even optimization options could be 2431 /// passed down via the metadata node. 2432 static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) { 2433 SmallString<64> Name("llvm.named.register."); 2434 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); 2435 assert(Asm->getLabel().size() < 64-Name.size() && 2436 "Register name too big"); 2437 Name.append(Asm->getLabel()); 2438 llvm::NamedMDNode *M = 2439 CGM.getModule().getOrInsertNamedMetadata(Name); 2440 if (M->getNumOperands() == 0) { 2441 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(), 2442 Asm->getLabel()); 2443 llvm::Metadata *Ops[] = {Str}; 2444 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops)); 2445 } 2446 2447 CharUnits Alignment = CGM.getContext().getDeclAlign(VD); 2448 2449 llvm::Value *Ptr = 2450 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)); 2451 return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType()); 2452 } 2453 2454 /// Determine whether we can emit a reference to \p VD from the current 2455 /// context, despite not necessarily having seen an odr-use of the variable in 2456 /// this context. 2457 static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, 2458 const DeclRefExpr *E, 2459 const VarDecl *VD, 2460 bool IsConstant) { 2461 // For a variable declared in an enclosing scope, do not emit a spurious 2462 // reference even if we have a capture, as that will emit an unwarranted 2463 // reference to our capture state, and will likely generate worse code than 2464 // emitting a local copy. 2465 if (E->refersToEnclosingVariableOrCapture()) 2466 return false; 2467 2468 // For a local declaration declared in this function, we can always reference 2469 // it even if we don't have an odr-use. 2470 if (VD->hasLocalStorage()) { 2471 return VD->getDeclContext() == 2472 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl); 2473 } 2474 2475 // For a global declaration, we can emit a reference to it if we know 2476 // for sure that we are able to emit a definition of it. 2477 VD = VD->getDefinition(CGF.getContext()); 2478 if (!VD) 2479 return false; 2480 2481 // Don't emit a spurious reference if it might be to a variable that only 2482 // exists on a different device / target. 2483 // FIXME: This is unnecessarily broad. Check whether this would actually be a 2484 // cross-target reference. 2485 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA || 2486 CGF.getLangOpts().OpenCL) { 2487 return false; 2488 } 2489 2490 // We can emit a spurious reference only if the linkage implies that we'll 2491 // be emitting a non-interposable symbol that will be retained until link 2492 // time. 2493 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) { 2494 case llvm::GlobalValue::ExternalLinkage: 2495 case llvm::GlobalValue::LinkOnceODRLinkage: 2496 case llvm::GlobalValue::WeakODRLinkage: 2497 case llvm::GlobalValue::InternalLinkage: 2498 case llvm::GlobalValue::PrivateLinkage: 2499 return true; 2500 default: 2501 return false; 2502 } 2503 } 2504 2505 LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { 2506 const NamedDecl *ND = E->getDecl(); 2507 QualType T = E->getType(); 2508 2509 assert(E->isNonOdrUse() != NOUR_Unevaluated && 2510 "should not emit an unevaluated operand"); 2511 2512 if (const auto *VD = dyn_cast<VarDecl>(ND)) { 2513 // Global Named registers access via intrinsics only 2514 if (VD->getStorageClass() == SC_Register && 2515 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) 2516 return EmitGlobalNamedRegister(VD, CGM); 2517 2518 // If this DeclRefExpr does not constitute an odr-use of the variable, 2519 // we're not permitted to emit a reference to it in general, and it might 2520 // not be captured if capture would be necessary for a use. Emit the 2521 // constant value directly instead. 2522 if (E->isNonOdrUse() == NOUR_Constant && 2523 (VD->getType()->isReferenceType() || 2524 !canEmitSpuriousReferenceToVariable(*this, E, VD, true))) { 2525 VD->getAnyInitializer(VD); 2526 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract( 2527 E->getLocation(), *VD->evaluateValue(), VD->getType()); 2528 assert(Val && "failed to emit constant expression"); 2529 2530 Address Addr = Address::invalid(); 2531 if (!VD->getType()->isReferenceType()) { 2532 // Spill the constant value to a global. 2533 Addr = CGM.createUnnamedGlobalFrom(*VD, Val, 2534 getContext().getDeclAlign(VD)); 2535 } else { 2536 // Should we be using the alignment of the constant pointer we emitted? 2537 CharUnits Alignment = 2538 getNaturalTypeAlignment(E->getType(), 2539 /* BaseInfo= */ nullptr, 2540 /* TBAAInfo= */ nullptr, 2541 /* forPointeeType= */ true); 2542 Addr = Address(Val, Alignment); 2543 } 2544 return MakeAddrLValue(Addr, T, AlignmentSource::Decl); 2545 } 2546 2547 // FIXME: Handle other kinds of non-odr-use DeclRefExprs. 2548 2549 // Check for captured variables. 2550 if (E->refersToEnclosingVariableOrCapture()) { 2551 VD = VD->getCanonicalDecl(); 2552 if (auto *FD = LambdaCaptureFields.lookup(VD)) 2553 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); 2554 else if (CapturedStmtInfo) { 2555 auto I = LocalDeclMap.find(VD); 2556 if (I != LocalDeclMap.end()) { 2557 if (VD->getType()->isReferenceType()) 2558 return EmitLoadOfReferenceLValue(I->second, VD->getType(), 2559 AlignmentSource::Decl); 2560 return MakeAddrLValue(I->second, T); 2561 } 2562 LValue CapLVal = 2563 EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), 2564 CapturedStmtInfo->getContextValue()); 2565 return MakeAddrLValue( 2566 Address(CapLVal.getPointer(), getContext().getDeclAlign(VD)), 2567 CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl), 2568 CapLVal.getTBAAInfo()); 2569 } 2570 2571 assert(isa<BlockDecl>(CurCodeDecl)); 2572 Address addr = GetAddrOfBlockDecl(VD); 2573 return MakeAddrLValue(addr, T, AlignmentSource::Decl); 2574 } 2575 } 2576 2577 // FIXME: We should be able to assert this for FunctionDecls as well! 2578 // FIXME: We should be able to assert this for all DeclRefExprs, not just 2579 // those with a valid source location. 2580 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() || 2581 !E->getLocation().isValid()) && 2582 "Should not use decl without marking it used!"); 2583 2584 if (ND->hasAttr<WeakRefAttr>()) { 2585 const auto *VD = cast<ValueDecl>(ND); 2586 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD); 2587 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl); 2588 } 2589 2590 if (const auto *VD = dyn_cast<VarDecl>(ND)) { 2591 // Check if this is a global variable. 2592 if (VD->hasLinkage() || VD->isStaticDataMember()) 2593 return EmitGlobalVarDeclLValue(*this, E, VD); 2594 2595 Address addr = Address::invalid(); 2596 2597 // The variable should generally be present in the local decl map. 2598 auto iter = LocalDeclMap.find(VD); 2599 if (iter != LocalDeclMap.end()) { 2600 addr = iter->second; 2601 2602 // Otherwise, it might be static local we haven't emitted yet for 2603 // some reason; most likely, because it's in an outer function. 2604 } else if (VD->isStaticLocal()) { 2605 addr = Address(CGM.getOrCreateStaticVarDecl( 2606 *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false)), 2607 getContext().getDeclAlign(VD)); 2608 2609 // No other cases for now. 2610 } else { 2611 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?"); 2612 } 2613 2614 2615 // Check for OpenMP threadprivate variables. 2616 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && 2617 VD->hasAttr<OMPThreadPrivateDeclAttr>()) { 2618 return EmitThreadPrivateVarDeclLValue( 2619 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()), 2620 E->getExprLoc()); 2621 } 2622 2623 // Drill into block byref variables. 2624 bool isBlockByref = VD->isEscapingByref(); 2625 if (isBlockByref) { 2626 addr = emitBlockByrefAddress(addr, VD); 2627 } 2628 2629 // Drill into reference types. 2630 LValue LV = VD->getType()->isReferenceType() ? 2631 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) : 2632 MakeAddrLValue(addr, T, AlignmentSource::Decl); 2633 2634 bool isLocalStorage = VD->hasLocalStorage(); 2635 2636 bool NonGCable = isLocalStorage && 2637 !VD->getType()->isReferenceType() && 2638 !isBlockByref; 2639 if (NonGCable) { 2640 LV.getQuals().removeObjCGCAttr(); 2641 LV.setNonGC(true); 2642 } 2643 2644 bool isImpreciseLifetime = 2645 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); 2646 if (isImpreciseLifetime) 2647 LV.setARCPreciseLifetime(ARCImpreciseLifetime); 2648 setObjCGCLValueClass(getContext(), E, LV); 2649 return LV; 2650 } 2651 2652 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 2653 return EmitFunctionDeclLValue(*this, E, FD); 2654 2655 // FIXME: While we're emitting a binding from an enclosing scope, all other 2656 // DeclRefExprs we see should be implicitly treated as if they also refer to 2657 // an enclosing scope. 2658 if (const auto *BD = dyn_cast<BindingDecl>(ND)) 2659 return EmitLValue(BD->getBinding()); 2660 2661 llvm_unreachable("Unhandled DeclRefExpr"); 2662 } 2663 2664 LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { 2665 // __extension__ doesn't affect lvalue-ness. 2666 if (E->getOpcode() == UO_Extension) 2667 return EmitLValue(E->getSubExpr()); 2668 2669 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); 2670 switch (E->getOpcode()) { 2671 default: llvm_unreachable("Unknown unary operator lvalue!"); 2672 case UO_Deref: { 2673 QualType T = E->getSubExpr()->getType()->getPointeeType(); 2674 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); 2675 2676 LValueBaseInfo BaseInfo; 2677 TBAAAccessInfo TBAAInfo; 2678 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo, 2679 &TBAAInfo); 2680 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo); 2681 LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); 2682 2683 // We should not generate __weak write barrier on indirect reference 2684 // of a pointer to object; as in void foo (__weak id *param); *param = 0; 2685 // But, we continue to generate __strong write barrier on indirect write 2686 // into a pointer to object. 2687 if (getLangOpts().ObjC && 2688 getLangOpts().getGC() != LangOptions::NonGC && 2689 LV.isObjCWeak()) 2690 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 2691 return LV; 2692 } 2693 case UO_Real: 2694 case UO_Imag: { 2695 LValue LV = EmitLValue(E->getSubExpr()); 2696 assert(LV.isSimple() && "real/imag on non-ordinary l-value"); 2697 2698 // __real is valid on scalars. This is a faster way of testing that. 2699 // __imag can only produce an rvalue on scalars. 2700 if (E->getOpcode() == UO_Real && 2701 !LV.getAddress().getElementType()->isStructTy()) { 2702 assert(E->getSubExpr()->getType()->isArithmeticType()); 2703 return LV; 2704 } 2705 2706 QualType T = ExprTy->castAs<ComplexType>()->getElementType(); 2707 2708 Address Component = 2709 (E->getOpcode() == UO_Real 2710 ? emitAddrOfRealComponent(LV.getAddress(), LV.getType()) 2711 : emitAddrOfImagComponent(LV.getAddress(), LV.getType())); 2712 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(), 2713 CGM.getTBAAInfoForSubobject(LV, T)); 2714 ElemLV.getQuals().addQualifiers(LV.getQuals()); 2715 return ElemLV; 2716 } 2717 case UO_PreInc: 2718 case UO_PreDec: { 2719 LValue LV = EmitLValue(E->getSubExpr()); 2720 bool isInc = E->getOpcode() == UO_PreInc; 2721 2722 if (E->getType()->isAnyComplexType()) 2723 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); 2724 else 2725 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); 2726 return LV; 2727 } 2728 } 2729 } 2730 2731 LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { 2732 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), 2733 E->getType(), AlignmentSource::Decl); 2734 } 2735 2736 LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { 2737 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), 2738 E->getType(), AlignmentSource::Decl); 2739 } 2740 2741 LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { 2742 auto SL = E->getFunctionName(); 2743 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); 2744 StringRef FnName = CurFn->getName(); 2745 if (FnName.startswith("\01")) 2746 FnName = FnName.substr(1); 2747 StringRef NameItems[] = { 2748 PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName}; 2749 std::string GVName = llvm::join(NameItems, NameItems + 2, "."); 2750 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) { 2751 std::string Name = SL->getString(); 2752 if (!Name.empty()) { 2753 unsigned Discriminator = 2754 CGM.getCXXABI().getMangleContext().getBlockId(BD, true); 2755 if (Discriminator) 2756 Name += "_" + Twine(Discriminator + 1).str(); 2757 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str()); 2758 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 2759 } else { 2760 auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str()); 2761 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 2762 } 2763 } 2764 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName); 2765 return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl); 2766 } 2767 2768 /// Emit a type description suitable for use by a runtime sanitizer library. The 2769 /// format of a type descriptor is 2770 /// 2771 /// \code 2772 /// { i16 TypeKind, i16 TypeInfo } 2773 /// \endcode 2774 /// 2775 /// followed by an array of i8 containing the type name. TypeKind is 0 for an 2776 /// integer, 1 for a floating point value, and -1 for anything else. 2777 llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { 2778 // Only emit each type's descriptor once. 2779 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T)) 2780 return C; 2781 2782 uint16_t TypeKind = -1; 2783 uint16_t TypeInfo = 0; 2784 2785 if (T->isIntegerType()) { 2786 TypeKind = 0; 2787 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | 2788 (T->isSignedIntegerType() ? 1 : 0); 2789 } else if (T->isFloatingType()) { 2790 TypeKind = 1; 2791 TypeInfo = getContext().getTypeSize(T); 2792 } 2793 2794 // Format the type name as if for a diagnostic, including quotes and 2795 // optionally an 'aka'. 2796 SmallString<32> Buffer; 2797 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, 2798 (intptr_t)T.getAsOpaquePtr(), 2799 StringRef(), StringRef(), None, Buffer, 2800 None); 2801 2802 llvm::Constant *Components[] = { 2803 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), 2804 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) 2805 }; 2806 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); 2807 2808 auto *GV = new llvm::GlobalVariable( 2809 CGM.getModule(), Descriptor->getType(), 2810 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); 2811 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 2812 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); 2813 2814 // Remember the descriptor for this type. 2815 CGM.setTypeDescriptorInMap(T, GV); 2816 2817 return GV; 2818 } 2819 2820 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { 2821 llvm::Type *TargetTy = IntPtrTy; 2822 2823 if (V->getType() == TargetTy) 2824 return V; 2825 2826 // Floating-point types which fit into intptr_t are bitcast to integers 2827 // and then passed directly (after zero-extension, if necessary). 2828 if (V->getType()->isFloatingPointTy()) { 2829 unsigned Bits = V->getType()->getPrimitiveSizeInBits(); 2830 if (Bits <= TargetTy->getIntegerBitWidth()) 2831 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), 2832 Bits)); 2833 } 2834 2835 // Integers which fit in intptr_t are zero-extended and passed directly. 2836 if (V->getType()->isIntegerTy() && 2837 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) 2838 return Builder.CreateZExt(V, TargetTy); 2839 2840 // Pointers are passed directly, everything else is passed by address. 2841 if (!V->getType()->isPointerTy()) { 2842 Address Ptr = CreateDefaultAlignTempAlloca(V->getType()); 2843 Builder.CreateStore(V, Ptr); 2844 V = Ptr.getPointer(); 2845 } 2846 return Builder.CreatePtrToInt(V, TargetTy); 2847 } 2848 2849 /// Emit a representation of a SourceLocation for passing to a handler 2850 /// in a sanitizer runtime library. The format for this data is: 2851 /// \code 2852 /// struct SourceLocation { 2853 /// const char *Filename; 2854 /// int32_t Line, Column; 2855 /// }; 2856 /// \endcode 2857 /// For an invalid SourceLocation, the Filename pointer is null. 2858 llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { 2859 llvm::Constant *Filename; 2860 int Line, Column; 2861 2862 PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); 2863 if (PLoc.isValid()) { 2864 StringRef FilenameString = PLoc.getFilename(); 2865 2866 int PathComponentsToStrip = 2867 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip; 2868 if (PathComponentsToStrip < 0) { 2869 assert(PathComponentsToStrip != INT_MIN); 2870 int PathComponentsToKeep = -PathComponentsToStrip; 2871 auto I = llvm::sys::path::rbegin(FilenameString); 2872 auto E = llvm::sys::path::rend(FilenameString); 2873 while (I != E && --PathComponentsToKeep) 2874 ++I; 2875 2876 FilenameString = FilenameString.substr(I - E); 2877 } else if (PathComponentsToStrip > 0) { 2878 auto I = llvm::sys::path::begin(FilenameString); 2879 auto E = llvm::sys::path::end(FilenameString); 2880 while (I != E && PathComponentsToStrip--) 2881 ++I; 2882 2883 if (I != E) 2884 FilenameString = 2885 FilenameString.substr(I - llvm::sys::path::begin(FilenameString)); 2886 else 2887 FilenameString = llvm::sys::path::filename(FilenameString); 2888 } 2889 2890 auto FilenameGV = CGM.GetAddrOfConstantCString(FilenameString, ".src"); 2891 CGM.getSanitizerMetadata()->disableSanitizerForGlobal( 2892 cast<llvm::GlobalVariable>(FilenameGV.getPointer())); 2893 Filename = FilenameGV.getPointer(); 2894 Line = PLoc.getLine(); 2895 Column = PLoc.getColumn(); 2896 } else { 2897 Filename = llvm::Constant::getNullValue(Int8PtrTy); 2898 Line = Column = 0; 2899 } 2900 2901 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line), 2902 Builder.getInt32(Column)}; 2903 2904 return llvm::ConstantStruct::getAnon(Data); 2905 } 2906 2907 namespace { 2908 /// Specify under what conditions this check can be recovered 2909 enum class CheckRecoverableKind { 2910 /// Always terminate program execution if this check fails. 2911 Unrecoverable, 2912 /// Check supports recovering, runtime has both fatal (noreturn) and 2913 /// non-fatal handlers for this check. 2914 Recoverable, 2915 /// Runtime conditionally aborts, always need to support recovery. 2916 AlwaysRecoverable 2917 }; 2918 } 2919 2920 static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { 2921 assert(Kind.countPopulation() == 1); 2922 if (Kind == SanitizerKind::Vptr) 2923 return CheckRecoverableKind::AlwaysRecoverable; 2924 else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable) 2925 return CheckRecoverableKind::Unrecoverable; 2926 else 2927 return CheckRecoverableKind::Recoverable; 2928 } 2929 2930 namespace { 2931 struct SanitizerHandlerInfo { 2932 char const *const Name; 2933 unsigned Version; 2934 }; 2935 } 2936 2937 const SanitizerHandlerInfo SanitizerHandlers[] = { 2938 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version}, 2939 LIST_SANITIZER_CHECKS 2940 #undef SANITIZER_CHECK 2941 }; 2942 2943 static void emitCheckHandlerCall(CodeGenFunction &CGF, 2944 llvm::FunctionType *FnType, 2945 ArrayRef<llvm::Value *> FnArgs, 2946 SanitizerHandler CheckHandler, 2947 CheckRecoverableKind RecoverKind, bool IsFatal, 2948 llvm::BasicBlock *ContBB) { 2949 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); 2950 Optional<ApplyDebugLocation> DL; 2951 if (!CGF.Builder.getCurrentDebugLocation()) { 2952 // Ensure that the call has at least an artificial debug location. 2953 DL.emplace(CGF, SourceLocation()); 2954 } 2955 bool NeedsAbortSuffix = 2956 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; 2957 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime; 2958 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler]; 2959 const StringRef CheckName = CheckInfo.Name; 2960 std::string FnName = "__ubsan_handle_" + CheckName.str(); 2961 if (CheckInfo.Version && !MinimalRuntime) 2962 FnName += "_v" + llvm::utostr(CheckInfo.Version); 2963 if (MinimalRuntime) 2964 FnName += "_minimal"; 2965 if (NeedsAbortSuffix) 2966 FnName += "_abort"; 2967 bool MayReturn = 2968 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; 2969 2970 llvm::AttrBuilder B; 2971 if (!MayReturn) { 2972 B.addAttribute(llvm::Attribute::NoReturn) 2973 .addAttribute(llvm::Attribute::NoUnwind); 2974 } 2975 B.addAttribute(llvm::Attribute::UWTable); 2976 2977 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction( 2978 FnType, FnName, 2979 llvm::AttributeList::get(CGF.getLLVMContext(), 2980 llvm::AttributeList::FunctionIndex, B), 2981 /*Local=*/true); 2982 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs); 2983 if (!MayReturn) { 2984 HandlerCall->setDoesNotReturn(); 2985 CGF.Builder.CreateUnreachable(); 2986 } else { 2987 CGF.Builder.CreateBr(ContBB); 2988 } 2989 } 2990 2991 void CodeGenFunction::EmitCheck( 2992 ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, 2993 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs, 2994 ArrayRef<llvm::Value *> DynamicArgs) { 2995 assert(IsSanitizerScope); 2996 assert(Checked.size() > 0); 2997 assert(CheckHandler >= 0 && 2998 size_t(CheckHandler) < llvm::array_lengthof(SanitizerHandlers)); 2999 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name; 3000 3001 llvm::Value *FatalCond = nullptr; 3002 llvm::Value *RecoverableCond = nullptr; 3003 llvm::Value *TrapCond = nullptr; 3004 for (int i = 0, n = Checked.size(); i < n; ++i) { 3005 llvm::Value *Check = Checked[i].first; 3006 // -fsanitize-trap= overrides -fsanitize-recover=. 3007 llvm::Value *&Cond = 3008 CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second) 3009 ? TrapCond 3010 : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second) 3011 ? RecoverableCond 3012 : FatalCond; 3013 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check; 3014 } 3015 3016 if (TrapCond) 3017 EmitTrapCheck(TrapCond); 3018 if (!FatalCond && !RecoverableCond) 3019 return; 3020 3021 llvm::Value *JointCond; 3022 if (FatalCond && RecoverableCond) 3023 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond); 3024 else 3025 JointCond = FatalCond ? FatalCond : RecoverableCond; 3026 assert(JointCond); 3027 3028 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second); 3029 assert(SanOpts.has(Checked[0].second)); 3030 #ifndef NDEBUG 3031 for (int i = 1, n = Checked.size(); i < n; ++i) { 3032 assert(RecoverKind == getRecoverableKind(Checked[i].second) && 3033 "All recoverable kinds in a single check must be same!"); 3034 assert(SanOpts.has(Checked[i].second)); 3035 } 3036 #endif 3037 3038 llvm::BasicBlock *Cont = createBasicBlock("cont"); 3039 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName); 3040 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers); 3041 // Give hint that we very much don't expect to execute the handler 3042 // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp 3043 llvm::MDBuilder MDHelper(getLLVMContext()); 3044 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); 3045 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); 3046 EmitBlock(Handlers); 3047 3048 // Handler functions take an i8* pointing to the (handler-specific) static 3049 // information block, followed by a sequence of intptr_t arguments 3050 // representing operand values. 3051 SmallVector<llvm::Value *, 4> Args; 3052 SmallVector<llvm::Type *, 4> ArgTypes; 3053 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { 3054 Args.reserve(DynamicArgs.size() + 1); 3055 ArgTypes.reserve(DynamicArgs.size() + 1); 3056 3057 // Emit handler arguments and create handler function type. 3058 if (!StaticArgs.empty()) { 3059 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 3060 auto *InfoPtr = 3061 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, 3062 llvm::GlobalVariable::PrivateLinkage, Info); 3063 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3064 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); 3065 Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy)); 3066 ArgTypes.push_back(Int8PtrTy); 3067 } 3068 3069 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { 3070 Args.push_back(EmitCheckValue(DynamicArgs[i])); 3071 ArgTypes.push_back(IntPtrTy); 3072 } 3073 } 3074 3075 llvm::FunctionType *FnType = 3076 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); 3077 3078 if (!FatalCond || !RecoverableCond) { 3079 // Simple case: we need to generate a single handler call, either 3080 // fatal, or non-fatal. 3081 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, 3082 (FatalCond != nullptr), Cont); 3083 } else { 3084 // Emit two handler calls: first one for set of unrecoverable checks, 3085 // another one for recoverable. 3086 llvm::BasicBlock *NonFatalHandlerBB = 3087 createBasicBlock("non_fatal." + CheckName); 3088 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName); 3089 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB); 3090 EmitBlock(FatalHandlerBB); 3091 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true, 3092 NonFatalHandlerBB); 3093 EmitBlock(NonFatalHandlerBB); 3094 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false, 3095 Cont); 3096 } 3097 3098 EmitBlock(Cont); 3099 } 3100 3101 void CodeGenFunction::EmitCfiSlowPathCheck( 3102 SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId, 3103 llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) { 3104 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont"); 3105 3106 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath"); 3107 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB); 3108 3109 llvm::MDBuilder MDHelper(getLLVMContext()); 3110 llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); 3111 BI->setMetadata(llvm::LLVMContext::MD_prof, Node); 3112 3113 EmitBlock(CheckBB); 3114 3115 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind); 3116 3117 llvm::CallInst *CheckCall; 3118 llvm::FunctionCallee SlowPathFn; 3119 if (WithDiag) { 3120 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); 3121 auto *InfoPtr = 3122 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, 3123 llvm::GlobalVariable::PrivateLinkage, Info); 3124 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 3125 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); 3126 3127 SlowPathFn = CGM.getModule().getOrInsertFunction( 3128 "__cfi_slowpath_diag", 3129 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, 3130 false)); 3131 CheckCall = Builder.CreateCall( 3132 SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)}); 3133 } else { 3134 SlowPathFn = CGM.getModule().getOrInsertFunction( 3135 "__cfi_slowpath", 3136 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false)); 3137 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr}); 3138 } 3139 3140 CGM.setDSOLocal( 3141 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts())); 3142 CheckCall->setDoesNotThrow(); 3143 3144 EmitBlock(Cont); 3145 } 3146 3147 // Emit a stub for __cfi_check function so that the linker knows about this 3148 // symbol in LTO mode. 3149 void CodeGenFunction::EmitCfiCheckStub() { 3150 llvm::Module *M = &CGM.getModule(); 3151 auto &Ctx = M->getContext(); 3152 llvm::Function *F = llvm::Function::Create( 3153 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false), 3154 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M); 3155 CGM.setDSOLocal(F); 3156 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F); 3157 // FIXME: consider emitting an intrinsic call like 3158 // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2) 3159 // which can be lowered in CrossDSOCFI pass to the actual contents of 3160 // __cfi_check. This would allow inlining of __cfi_check calls. 3161 llvm::CallInst::Create( 3162 llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB); 3163 llvm::ReturnInst::Create(Ctx, nullptr, BB); 3164 } 3165 3166 // This function is basically a switch over the CFI failure kind, which is 3167 // extracted from CFICheckFailData (1st function argument). Each case is either 3168 // llvm.trap or a call to one of the two runtime handlers, based on 3169 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid 3170 // failure kind) traps, but this should really never happen. CFICheckFailData 3171 // can be nullptr if the calling module has -fsanitize-trap behavior for this 3172 // check kind; in this case __cfi_check_fail traps as well. 3173 void CodeGenFunction::EmitCfiCheckFail() { 3174 SanitizerScope SanScope(this); 3175 FunctionArgList Args; 3176 ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy, 3177 ImplicitParamDecl::Other); 3178 ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy, 3179 ImplicitParamDecl::Other); 3180 Args.push_back(&ArgData); 3181 Args.push_back(&ArgAddr); 3182 3183 const CGFunctionInfo &FI = 3184 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args); 3185 3186 llvm::Function *F = llvm::Function::Create( 3187 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false), 3188 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule()); 3189 F->setVisibility(llvm::GlobalValue::HiddenVisibility); 3190 3191 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args, 3192 SourceLocation()); 3193 3194 // This function should not be affected by blacklist. This function does 3195 // not have a source location, but "src:*" would still apply. Revert any 3196 // changes to SanOpts made in StartFunction. 3197 SanOpts = CGM.getLangOpts().Sanitize; 3198 3199 llvm::Value *Data = 3200 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false, 3201 CGM.getContext().VoidPtrTy, ArgData.getLocation()); 3202 llvm::Value *Addr = 3203 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false, 3204 CGM.getContext().VoidPtrTy, ArgAddr.getLocation()); 3205 3206 // Data == nullptr means the calling module has trap behaviour for this check. 3207 llvm::Value *DataIsNotNullPtr = 3208 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy)); 3209 EmitTrapCheck(DataIsNotNullPtr); 3210 3211 llvm::StructType *SourceLocationTy = 3212 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty); 3213 llvm::StructType *CfiCheckFailDataTy = 3214 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy); 3215 3216 llvm::Value *V = Builder.CreateConstGEP2_32( 3217 CfiCheckFailDataTy, 3218 Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0, 3219 0); 3220 Address CheckKindAddr(V, getIntAlign()); 3221 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr); 3222 3223 llvm::Value *AllVtables = llvm::MetadataAsValue::get( 3224 CGM.getLLVMContext(), 3225 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); 3226 llvm::Value *ValidVtable = Builder.CreateZExt( 3227 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), 3228 {Addr, AllVtables}), 3229 IntPtrTy); 3230 3231 const std::pair<int, SanitizerMask> CheckKinds[] = { 3232 {CFITCK_VCall, SanitizerKind::CFIVCall}, 3233 {CFITCK_NVCall, SanitizerKind::CFINVCall}, 3234 {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast}, 3235 {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast}, 3236 {CFITCK_ICall, SanitizerKind::CFIICall}}; 3237 3238 SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks; 3239 for (auto CheckKindMaskPair : CheckKinds) { 3240 int Kind = CheckKindMaskPair.first; 3241 SanitizerMask Mask = CheckKindMaskPair.second; 3242 llvm::Value *Cond = 3243 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind)); 3244 if (CGM.getLangOpts().Sanitize.has(Mask)) 3245 EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {}, 3246 {Data, Addr, ValidVtable}); 3247 else 3248 EmitTrapCheck(Cond); 3249 } 3250 3251 FinishFunction(); 3252 // The only reference to this function will be created during LTO link. 3253 // Make sure it survives until then. 3254 CGM.addUsedGlobal(F); 3255 } 3256 3257 void CodeGenFunction::EmitUnreachable(SourceLocation Loc) { 3258 if (SanOpts.has(SanitizerKind::Unreachable)) { 3259 SanitizerScope SanScope(this); 3260 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()), 3261 SanitizerKind::Unreachable), 3262 SanitizerHandler::BuiltinUnreachable, 3263 EmitCheckSourceLocation(Loc), None); 3264 } 3265 Builder.CreateUnreachable(); 3266 } 3267 3268 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) { 3269 llvm::BasicBlock *Cont = createBasicBlock("cont"); 3270 3271 // If we're optimizing, collapse all calls to trap down to just one per 3272 // function to save on code size. 3273 if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) { 3274 TrapBB = createBasicBlock("trap"); 3275 Builder.CreateCondBr(Checked, Cont, TrapBB); 3276 EmitBlock(TrapBB); 3277 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); 3278 TrapCall->setDoesNotReturn(); 3279 TrapCall->setDoesNotThrow(); 3280 Builder.CreateUnreachable(); 3281 } else { 3282 Builder.CreateCondBr(Checked, Cont, TrapBB); 3283 } 3284 3285 EmitBlock(Cont); 3286 } 3287 3288 llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { 3289 llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID)); 3290 3291 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) { 3292 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name", 3293 CGM.getCodeGenOpts().TrapFuncName); 3294 TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A); 3295 } 3296 3297 return TrapCall; 3298 } 3299 3300 Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E, 3301 LValueBaseInfo *BaseInfo, 3302 TBAAAccessInfo *TBAAInfo) { 3303 assert(E->getType()->isArrayType() && 3304 "Array to pointer decay must have array source type!"); 3305 3306 // Expressions of array type can't be bitfields or vector elements. 3307 LValue LV = EmitLValue(E); 3308 Address Addr = LV.getAddress(); 3309 3310 // If the array type was an incomplete type, we need to make sure 3311 // the decay ends up being the right type. 3312 llvm::Type *NewTy = ConvertType(E->getType()); 3313 Addr = Builder.CreateElementBitCast(Addr, NewTy); 3314 3315 // Note that VLA pointers are always decayed, so we don't need to do 3316 // anything here. 3317 if (!E->getType()->isVariableArrayType()) { 3318 assert(isa<llvm::ArrayType>(Addr.getElementType()) && 3319 "Expected pointer to array"); 3320 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); 3321 } 3322 3323 // The result of this decay conversion points to an array element within the 3324 // base lvalue. However, since TBAA currently does not support representing 3325 // accesses to elements of member arrays, we conservatively represent accesses 3326 // to the pointee object as if it had no any base lvalue specified. 3327 // TODO: Support TBAA for member arrays. 3328 QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType(); 3329 if (BaseInfo) *BaseInfo = LV.getBaseInfo(); 3330 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType); 3331 3332 return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType)); 3333 } 3334 3335 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an 3336 /// array to pointer, return the array subexpression. 3337 static const Expr *isSimpleArrayDecayOperand(const Expr *E) { 3338 // If this isn't just an array->pointer decay, bail out. 3339 const auto *CE = dyn_cast<CastExpr>(E); 3340 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) 3341 return nullptr; 3342 3343 // If this is a decay from variable width array, bail out. 3344 const Expr *SubExpr = CE->getSubExpr(); 3345 if (SubExpr->getType()->isVariableArrayType()) 3346 return nullptr; 3347 3348 return SubExpr; 3349 } 3350 3351 static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF, 3352 llvm::Value *ptr, 3353 ArrayRef<llvm::Value*> indices, 3354 bool inbounds, 3355 bool signedIndices, 3356 SourceLocation loc, 3357 const llvm::Twine &name = "arrayidx") { 3358 if (inbounds) { 3359 return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices, 3360 CodeGenFunction::NotSubtraction, loc, 3361 name); 3362 } else { 3363 return CGF.Builder.CreateGEP(ptr, indices, name); 3364 } 3365 } 3366 3367 static CharUnits getArrayElementAlign(CharUnits arrayAlign, 3368 llvm::Value *idx, 3369 CharUnits eltSize) { 3370 // If we have a constant index, we can use the exact offset of the 3371 // element we're accessing. 3372 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) { 3373 CharUnits offset = constantIdx->getZExtValue() * eltSize; 3374 return arrayAlign.alignmentAtOffset(offset); 3375 3376 // Otherwise, use the worst-case alignment for any element. 3377 } else { 3378 return arrayAlign.alignmentOfArrayElement(eltSize); 3379 } 3380 } 3381 3382 static QualType getFixedSizeElementType(const ASTContext &ctx, 3383 const VariableArrayType *vla) { 3384 QualType eltType; 3385 do { 3386 eltType = vla->getElementType(); 3387 } while ((vla = ctx.getAsVariableArrayType(eltType))); 3388 return eltType; 3389 } 3390 3391 static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr, 3392 ArrayRef<llvm::Value *> indices, 3393 QualType eltType, bool inbounds, 3394 bool signedIndices, SourceLocation loc, 3395 const llvm::Twine &name = "arrayidx") { 3396 // All the indices except that last must be zero. 3397 #ifndef NDEBUG 3398 for (auto idx : indices.drop_back()) 3399 assert(isa<llvm::ConstantInt>(idx) && 3400 cast<llvm::ConstantInt>(idx)->isZero()); 3401 #endif 3402 3403 // Determine the element size of the statically-sized base. This is 3404 // the thing that the indices are expressed in terms of. 3405 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) { 3406 eltType = getFixedSizeElementType(CGF.getContext(), vla); 3407 } 3408 3409 // We can use that to compute the best alignment of the element. 3410 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); 3411 CharUnits eltAlign = 3412 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize); 3413 3414 llvm::Value *eltPtr = emitArraySubscriptGEP( 3415 CGF, addr.getPointer(), indices, inbounds, signedIndices, loc, name); 3416 return Address(eltPtr, eltAlign); 3417 } 3418 3419 LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, 3420 bool Accessed) { 3421 // The index must always be an integer, which is not an aggregate. Emit it 3422 // in lexical order (this complexity is, sadly, required by C++17). 3423 llvm::Value *IdxPre = 3424 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr; 3425 bool SignedIndices = false; 3426 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * { 3427 auto *Idx = IdxPre; 3428 if (E->getLHS() != E->getIdx()) { 3429 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS"); 3430 Idx = EmitScalarExpr(E->getIdx()); 3431 } 3432 3433 QualType IdxTy = E->getIdx()->getType(); 3434 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); 3435 SignedIndices |= IdxSigned; 3436 3437 if (SanOpts.has(SanitizerKind::ArrayBounds)) 3438 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); 3439 3440 // Extend or truncate the index type to 32 or 64-bits. 3441 if (Promote && Idx->getType() != IntPtrTy) 3442 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); 3443 3444 return Idx; 3445 }; 3446 IdxPre = nullptr; 3447 3448 // If the base is a vector type, then we are forming a vector element lvalue 3449 // with this subscript. 3450 if (E->getBase()->getType()->isVectorType() && 3451 !isa<ExtVectorElementExpr>(E->getBase())) { 3452 // Emit the vector as an lvalue to get its address. 3453 LValue LHS = EmitLValue(E->getBase()); 3454 auto *Idx = EmitIdxAfterBase(/*Promote*/false); 3455 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); 3456 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(), 3457 LHS.getBaseInfo(), TBAAAccessInfo()); 3458 } 3459 3460 // All the other cases basically behave like simple offsetting. 3461 3462 // Handle the extvector case we ignored above. 3463 if (isa<ExtVectorElementExpr>(E->getBase())) { 3464 LValue LV = EmitLValue(E->getBase()); 3465 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 3466 Address Addr = EmitExtVectorElementLValue(LV); 3467 3468 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType(); 3469 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true, 3470 SignedIndices, E->getExprLoc()); 3471 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(), 3472 CGM.getTBAAInfoForSubobject(LV, EltType)); 3473 } 3474 3475 LValueBaseInfo EltBaseInfo; 3476 TBAAAccessInfo EltTBAAInfo; 3477 Address Addr = Address::invalid(); 3478 if (const VariableArrayType *vla = 3479 getContext().getAsVariableArrayType(E->getType())) { 3480 // The base must be a pointer, which is not an aggregate. Emit 3481 // it. It needs to be emitted first in case it's what captures 3482 // the VLA bounds. 3483 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); 3484 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 3485 3486 // The element count here is the total number of non-VLA elements. 3487 llvm::Value *numElements = getVLASize(vla).NumElts; 3488 3489 // Effectively, the multiply by the VLA size is part of the GEP. 3490 // GEP indexes are signed, and scaling an index isn't permitted to 3491 // signed-overflow, so we use the same semantics for our explicit 3492 // multiply. We suppress this if overflow is not undefined behavior. 3493 if (getLangOpts().isSignedOverflowDefined()) { 3494 Idx = Builder.CreateMul(Idx, numElements); 3495 } else { 3496 Idx = Builder.CreateNSWMul(Idx, numElements); 3497 } 3498 3499 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(), 3500 !getLangOpts().isSignedOverflowDefined(), 3501 SignedIndices, E->getExprLoc()); 3502 3503 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ 3504 // Indexing over an interface, as in "NSString *P; P[4];" 3505 3506 // Emit the base pointer. 3507 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); 3508 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 3509 3510 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT); 3511 llvm::Value *InterfaceSizeVal = 3512 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity()); 3513 3514 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal); 3515 3516 // We don't necessarily build correct LLVM struct types for ObjC 3517 // interfaces, so we can't rely on GEP to do this scaling 3518 // correctly, so we need to cast to i8*. FIXME: is this actually 3519 // true? A lot of other things in the fragile ABI would break... 3520 llvm::Type *OrigBaseTy = Addr.getType(); 3521 Addr = Builder.CreateElementBitCast(Addr, Int8Ty); 3522 3523 // Do the GEP. 3524 CharUnits EltAlign = 3525 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize); 3526 llvm::Value *EltPtr = 3527 emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false, 3528 SignedIndices, E->getExprLoc()); 3529 Addr = Address(EltPtr, EltAlign); 3530 3531 // Cast back. 3532 Addr = Builder.CreateBitCast(Addr, OrigBaseTy); 3533 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 3534 // If this is A[i] where A is an array, the frontend will have decayed the 3535 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 3536 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 3537 // "gep x, i" here. Emit one "gep A, 0, i". 3538 assert(Array->getType()->isArrayType() && 3539 "Array to pointer decay must have array source type!"); 3540 LValue ArrayLV; 3541 // For simple multidimensional array indexing, set the 'accessed' flag for 3542 // better bounds-checking of the base expression. 3543 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) 3544 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); 3545 else 3546 ArrayLV = EmitLValue(Array); 3547 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 3548 3549 // Propagate the alignment from the array itself to the result. 3550 Addr = emitArraySubscriptGEP( 3551 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx}, 3552 E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices, 3553 E->getExprLoc()); 3554 EltBaseInfo = ArrayLV.getBaseInfo(); 3555 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType()); 3556 } else { 3557 // The base must be a pointer; emit it with an estimate of its alignment. 3558 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo); 3559 auto *Idx = EmitIdxAfterBase(/*Promote*/true); 3560 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(), 3561 !getLangOpts().isSignedOverflowDefined(), 3562 SignedIndices, E->getExprLoc()); 3563 } 3564 3565 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo); 3566 3567 if (getLangOpts().ObjC && 3568 getLangOpts().getGC() != LangOptions::NonGC) { 3569 LV.setNonGC(!E->isOBJCGCCandidate(getContext())); 3570 setObjCGCLValueClass(getContext(), E, LV); 3571 } 3572 return LV; 3573 } 3574 3575 static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, 3576 LValueBaseInfo &BaseInfo, 3577 TBAAAccessInfo &TBAAInfo, 3578 QualType BaseTy, QualType ElTy, 3579 bool IsLowerBound) { 3580 LValue BaseLVal; 3581 if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) { 3582 BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound); 3583 if (BaseTy->isArrayType()) { 3584 Address Addr = BaseLVal.getAddress(); 3585 BaseInfo = BaseLVal.getBaseInfo(); 3586 3587 // If the array type was an incomplete type, we need to make sure 3588 // the decay ends up being the right type. 3589 llvm::Type *NewTy = CGF.ConvertType(BaseTy); 3590 Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy); 3591 3592 // Note that VLA pointers are always decayed, so we don't need to do 3593 // anything here. 3594 if (!BaseTy->isVariableArrayType()) { 3595 assert(isa<llvm::ArrayType>(Addr.getElementType()) && 3596 "Expected pointer to array"); 3597 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay"); 3598 } 3599 3600 return CGF.Builder.CreateElementBitCast(Addr, 3601 CGF.ConvertTypeForMem(ElTy)); 3602 } 3603 LValueBaseInfo TypeBaseInfo; 3604 TBAAAccessInfo TypeTBAAInfo; 3605 CharUnits Align = CGF.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, 3606 &TypeTBAAInfo); 3607 BaseInfo.mergeForCast(TypeBaseInfo); 3608 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo); 3609 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()), Align); 3610 } 3611 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo); 3612 } 3613 3614 LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E, 3615 bool IsLowerBound) { 3616 QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(E->getBase()); 3617 QualType ResultExprTy; 3618 if (auto *AT = getContext().getAsArrayType(BaseTy)) 3619 ResultExprTy = AT->getElementType(); 3620 else 3621 ResultExprTy = BaseTy->getPointeeType(); 3622 llvm::Value *Idx = nullptr; 3623 if (IsLowerBound || E->getColonLoc().isInvalid()) { 3624 // Requesting lower bound or upper bound, but without provided length and 3625 // without ':' symbol for the default length -> length = 1. 3626 // Idx = LowerBound ?: 0; 3627 if (auto *LowerBound = E->getLowerBound()) { 3628 Idx = Builder.CreateIntCast( 3629 EmitScalarExpr(LowerBound), IntPtrTy, 3630 LowerBound->getType()->hasSignedIntegerRepresentation()); 3631 } else 3632 Idx = llvm::ConstantInt::getNullValue(IntPtrTy); 3633 } else { 3634 // Try to emit length or lower bound as constant. If this is possible, 1 3635 // is subtracted from constant length or lower bound. Otherwise, emit LLVM 3636 // IR (LB + Len) - 1. 3637 auto &C = CGM.getContext(); 3638 auto *Length = E->getLength(); 3639 llvm::APSInt ConstLength; 3640 if (Length) { 3641 // Idx = LowerBound + Length - 1; 3642 if (Length->isIntegerConstantExpr(ConstLength, C)) { 3643 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits); 3644 Length = nullptr; 3645 } 3646 auto *LowerBound = E->getLowerBound(); 3647 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false); 3648 if (LowerBound && LowerBound->isIntegerConstantExpr(ConstLowerBound, C)) { 3649 ConstLowerBound = ConstLowerBound.zextOrTrunc(PointerWidthInBits); 3650 LowerBound = nullptr; 3651 } 3652 if (!Length) 3653 --ConstLength; 3654 else if (!LowerBound) 3655 --ConstLowerBound; 3656 3657 if (Length || LowerBound) { 3658 auto *LowerBoundVal = 3659 LowerBound 3660 ? Builder.CreateIntCast( 3661 EmitScalarExpr(LowerBound), IntPtrTy, 3662 LowerBound->getType()->hasSignedIntegerRepresentation()) 3663 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound); 3664 auto *LengthVal = 3665 Length 3666 ? Builder.CreateIntCast( 3667 EmitScalarExpr(Length), IntPtrTy, 3668 Length->getType()->hasSignedIntegerRepresentation()) 3669 : llvm::ConstantInt::get(IntPtrTy, ConstLength); 3670 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len", 3671 /*HasNUW=*/false, 3672 !getLangOpts().isSignedOverflowDefined()); 3673 if (Length && LowerBound) { 3674 Idx = Builder.CreateSub( 3675 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1", 3676 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); 3677 } 3678 } else 3679 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound); 3680 } else { 3681 // Idx = ArraySize - 1; 3682 QualType ArrayTy = BaseTy->isPointerType() 3683 ? E->getBase()->IgnoreParenImpCasts()->getType() 3684 : BaseTy; 3685 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) { 3686 Length = VAT->getSizeExpr(); 3687 if (Length->isIntegerConstantExpr(ConstLength, C)) 3688 Length = nullptr; 3689 } else { 3690 auto *CAT = C.getAsConstantArrayType(ArrayTy); 3691 ConstLength = CAT->getSize(); 3692 } 3693 if (Length) { 3694 auto *LengthVal = Builder.CreateIntCast( 3695 EmitScalarExpr(Length), IntPtrTy, 3696 Length->getType()->hasSignedIntegerRepresentation()); 3697 Idx = Builder.CreateSub( 3698 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1", 3699 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined()); 3700 } else { 3701 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits); 3702 --ConstLength; 3703 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength); 3704 } 3705 } 3706 } 3707 assert(Idx); 3708 3709 Address EltPtr = Address::invalid(); 3710 LValueBaseInfo BaseInfo; 3711 TBAAAccessInfo TBAAInfo; 3712 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) { 3713 // The base must be a pointer, which is not an aggregate. Emit 3714 // it. It needs to be emitted first in case it's what captures 3715 // the VLA bounds. 3716 Address Base = 3717 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, 3718 BaseTy, VLA->getElementType(), IsLowerBound); 3719 // The element count here is the total number of non-VLA elements. 3720 llvm::Value *NumElements = getVLASize(VLA).NumElts; 3721 3722 // Effectively, the multiply by the VLA size is part of the GEP. 3723 // GEP indexes are signed, and scaling an index isn't permitted to 3724 // signed-overflow, so we use the same semantics for our explicit 3725 // multiply. We suppress this if overflow is not undefined behavior. 3726 if (getLangOpts().isSignedOverflowDefined()) 3727 Idx = Builder.CreateMul(Idx, NumElements); 3728 else 3729 Idx = Builder.CreateNSWMul(Idx, NumElements); 3730 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(), 3731 !getLangOpts().isSignedOverflowDefined(), 3732 /*SignedIndices=*/false, E->getExprLoc()); 3733 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) { 3734 // If this is A[i] where A is an array, the frontend will have decayed the 3735 // base to be a ArrayToPointerDecay implicit cast. While correct, it is 3736 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a 3737 // "gep x, i" here. Emit one "gep A, 0, i". 3738 assert(Array->getType()->isArrayType() && 3739 "Array to pointer decay must have array source type!"); 3740 LValue ArrayLV; 3741 // For simple multidimensional array indexing, set the 'accessed' flag for 3742 // better bounds-checking of the base expression. 3743 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) 3744 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); 3745 else 3746 ArrayLV = EmitLValue(Array); 3747 3748 // Propagate the alignment from the array itself to the result. 3749 EltPtr = emitArraySubscriptGEP( 3750 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx}, 3751 ResultExprTy, !getLangOpts().isSignedOverflowDefined(), 3752 /*SignedIndices=*/false, E->getExprLoc()); 3753 BaseInfo = ArrayLV.getBaseInfo(); 3754 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy); 3755 } else { 3756 Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, 3757 TBAAInfo, BaseTy, ResultExprTy, 3758 IsLowerBound); 3759 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy, 3760 !getLangOpts().isSignedOverflowDefined(), 3761 /*SignedIndices=*/false, E->getExprLoc()); 3762 } 3763 3764 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo); 3765 } 3766 3767 LValue CodeGenFunction:: 3768 EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { 3769 // Emit the base vector as an l-value. 3770 LValue Base; 3771 3772 // ExtVectorElementExpr's base can either be a vector or pointer to vector. 3773 if (E->isArrow()) { 3774 // If it is a pointer to a vector, emit the address and form an lvalue with 3775 // it. 3776 LValueBaseInfo BaseInfo; 3777 TBAAAccessInfo TBAAInfo; 3778 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo); 3779 const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); 3780 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo); 3781 Base.getQuals().removeObjCGCAttr(); 3782 } else if (E->getBase()->isGLValue()) { 3783 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), 3784 // emit the base as an lvalue. 3785 assert(E->getBase()->getType()->isVectorType()); 3786 Base = EmitLValue(E->getBase()); 3787 } else { 3788 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. 3789 assert(E->getBase()->getType()->isVectorType() && 3790 "Result must be a vector"); 3791 llvm::Value *Vec = EmitScalarExpr(E->getBase()); 3792 3793 // Store the vector to memory (because LValue wants an address). 3794 Address VecMem = CreateMemTemp(E->getBase()->getType()); 3795 Builder.CreateStore(Vec, VecMem); 3796 Base = MakeAddrLValue(VecMem, E->getBase()->getType(), 3797 AlignmentSource::Decl); 3798 } 3799 3800 QualType type = 3801 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); 3802 3803 // Encode the element access list into a vector of unsigned indices. 3804 SmallVector<uint32_t, 4> Indices; 3805 E->getEncodedElementAccess(Indices); 3806 3807 if (Base.isSimple()) { 3808 llvm::Constant *CV = 3809 llvm::ConstantDataVector::get(getLLVMContext(), Indices); 3810 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type, 3811 Base.getBaseInfo(), TBAAAccessInfo()); 3812 } 3813 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); 3814 3815 llvm::Constant *BaseElts = Base.getExtVectorElts(); 3816 SmallVector<llvm::Constant *, 4> CElts; 3817 3818 for (unsigned i = 0, e = Indices.size(); i != e; ++i) 3819 CElts.push_back(BaseElts->getAggregateElement(Indices[i])); 3820 llvm::Constant *CV = llvm::ConstantVector::get(CElts); 3821 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type, 3822 Base.getBaseInfo(), TBAAAccessInfo()); 3823 } 3824 3825 LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { 3826 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) { 3827 EmitIgnoredExpr(E->getBase()); 3828 return EmitDeclRefLValue(DRE); 3829 } 3830 3831 Expr *BaseExpr = E->getBase(); 3832 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. 3833 LValue BaseLV; 3834 if (E->isArrow()) { 3835 LValueBaseInfo BaseInfo; 3836 TBAAAccessInfo TBAAInfo; 3837 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo); 3838 QualType PtrTy = BaseExpr->getType()->getPointeeType(); 3839 SanitizerSet SkippedChecks; 3840 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr); 3841 if (IsBaseCXXThis) 3842 SkippedChecks.set(SanitizerKind::Alignment, true); 3843 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr)) 3844 SkippedChecks.set(SanitizerKind::Null, true); 3845 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy, 3846 /*Alignment=*/CharUnits::Zero(), SkippedChecks); 3847 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo); 3848 } else 3849 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); 3850 3851 NamedDecl *ND = E->getMemberDecl(); 3852 if (auto *Field = dyn_cast<FieldDecl>(ND)) { 3853 LValue LV = EmitLValueForField(BaseLV, Field); 3854 setObjCGCLValueClass(getContext(), E, LV); 3855 return LV; 3856 } 3857 3858 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) 3859 return EmitFunctionDeclLValue(*this, E, FD); 3860 3861 llvm_unreachable("Unhandled member declaration!"); 3862 } 3863 3864 /// Given that we are currently emitting a lambda, emit an l-value for 3865 /// one of its members. 3866 LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { 3867 assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda()); 3868 assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent()); 3869 QualType LambdaTagType = 3870 getContext().getTagDeclType(Field->getParent()); 3871 LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType); 3872 return EmitLValueForField(LambdaLV, Field); 3873 } 3874 3875 /// Drill down to the storage of a field without walking into 3876 /// reference types. 3877 /// 3878 /// The resulting address doesn't necessarily have the right type. 3879 static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, 3880 const FieldDecl *field) { 3881 const RecordDecl *rec = field->getParent(); 3882 3883 unsigned idx = 3884 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); 3885 3886 return CGF.Builder.CreateStructGEP(base, idx, field->getName()); 3887 } 3888 3889 static bool hasAnyVptr(const QualType Type, const ASTContext &Context) { 3890 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl(); 3891 if (!RD) 3892 return false; 3893 3894 if (RD->isDynamicClass()) 3895 return true; 3896 3897 for (const auto &Base : RD->bases()) 3898 if (hasAnyVptr(Base.getType(), Context)) 3899 return true; 3900 3901 for (const FieldDecl *Field : RD->fields()) 3902 if (hasAnyVptr(Field->getType(), Context)) 3903 return true; 3904 3905 return false; 3906 } 3907 3908 LValue CodeGenFunction::EmitLValueForField(LValue base, 3909 const FieldDecl *field) { 3910 LValueBaseInfo BaseInfo = base.getBaseInfo(); 3911 3912 if (field->isBitField()) { 3913 const CGRecordLayout &RL = 3914 CGM.getTypes().getCGRecordLayout(field->getParent()); 3915 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); 3916 Address Addr = base.getAddress(); 3917 unsigned Idx = RL.getLLVMFieldNo(field); 3918 if (Idx != 0) 3919 // For structs, we GEP to the field that the record layout suggests. 3920 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName()); 3921 // Get the access type. 3922 llvm::Type *FieldIntTy = 3923 llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize); 3924 if (Addr.getElementType() != FieldIntTy) 3925 Addr = Builder.CreateElementBitCast(Addr, FieldIntTy); 3926 3927 QualType fieldType = 3928 field->getType().withCVRQualifiers(base.getVRQualifiers()); 3929 // TODO: Support TBAA for bit fields. 3930 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); 3931 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo, 3932 TBAAAccessInfo()); 3933 } 3934 3935 // Fields of may-alias structures are may-alias themselves. 3936 // FIXME: this should get propagated down through anonymous structs 3937 // and unions. 3938 QualType FieldType = field->getType(); 3939 const RecordDecl *rec = field->getParent(); 3940 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource(); 3941 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource)); 3942 TBAAAccessInfo FieldTBAAInfo; 3943 if (base.getTBAAInfo().isMayAlias() || 3944 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) { 3945 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); 3946 } else if (rec->isUnion()) { 3947 // TODO: Support TBAA for unions. 3948 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo(); 3949 } else { 3950 // If no base type been assigned for the base access, then try to generate 3951 // one for this base lvalue. 3952 FieldTBAAInfo = base.getTBAAInfo(); 3953 if (!FieldTBAAInfo.BaseType) { 3954 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType()); 3955 assert(!FieldTBAAInfo.Offset && 3956 "Nonzero offset for an access with no base type!"); 3957 } 3958 3959 // Adjust offset to be relative to the base type. 3960 const ASTRecordLayout &Layout = 3961 getContext().getASTRecordLayout(field->getParent()); 3962 unsigned CharWidth = getContext().getCharWidth(); 3963 if (FieldTBAAInfo.BaseType) 3964 FieldTBAAInfo.Offset += 3965 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth; 3966 3967 // Update the final access type and size. 3968 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType); 3969 FieldTBAAInfo.Size = 3970 getContext().getTypeSizeInChars(FieldType).getQuantity(); 3971 } 3972 3973 Address addr = base.getAddress(); 3974 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) { 3975 if (CGM.getCodeGenOpts().StrictVTablePointers && 3976 ClassDef->isDynamicClass()) { 3977 // Getting to any field of dynamic object requires stripping dynamic 3978 // information provided by invariant.group. This is because accessing 3979 // fields may leak the real address of dynamic object, which could result 3980 // in miscompilation when leaked pointer would be compared. 3981 auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer()); 3982 addr = Address(stripped, addr.getAlignment()); 3983 } 3984 } 3985 3986 unsigned RecordCVR = base.getVRQualifiers(); 3987 if (rec->isUnion()) { 3988 // For unions, there is no pointer adjustment. 3989 assert(!FieldType->isReferenceType() && "union has reference member"); 3990 if (CGM.getCodeGenOpts().StrictVTablePointers && 3991 hasAnyVptr(FieldType, getContext())) 3992 // Because unions can easily skip invariant.barriers, we need to add 3993 // a barrier every time CXXRecord field with vptr is referenced. 3994 addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()), 3995 addr.getAlignment()); 3996 } else { 3997 // For structs, we GEP to the field that the record layout suggests. 3998 addr = emitAddrOfFieldStorage(*this, addr, field); 3999 4000 // If this is a reference field, load the reference right now. 4001 if (FieldType->isReferenceType()) { 4002 LValue RefLVal = MakeAddrLValue(addr, FieldType, FieldBaseInfo, 4003 FieldTBAAInfo); 4004 if (RecordCVR & Qualifiers::Volatile) 4005 RefLVal.getQuals().addVolatile(); 4006 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo); 4007 4008 // Qualifiers on the struct don't apply to the referencee. 4009 RecordCVR = 0; 4010 FieldType = FieldType->getPointeeType(); 4011 } 4012 } 4013 4014 // Make sure that the address is pointing to the right type. This is critical 4015 // for both unions and structs. A union needs a bitcast, a struct element 4016 // will need a bitcast if the LLVM type laid out doesn't match the desired 4017 // type. 4018 addr = Builder.CreateElementBitCast( 4019 addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName()); 4020 4021 if (field->hasAttr<AnnotateAttr>()) 4022 addr = EmitFieldAnnotations(field, addr); 4023 4024 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo); 4025 LV.getQuals().addCVRQualifiers(RecordCVR); 4026 4027 // __weak attribute on a field is ignored. 4028 if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) 4029 LV.getQuals().removeObjCGCAttr(); 4030 4031 return LV; 4032 } 4033 4034 LValue 4035 CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, 4036 const FieldDecl *Field) { 4037 QualType FieldType = Field->getType(); 4038 4039 if (!FieldType->isReferenceType()) 4040 return EmitLValueForField(Base, Field); 4041 4042 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field); 4043 4044 // Make sure that the address is pointing to the right type. 4045 llvm::Type *llvmType = ConvertTypeForMem(FieldType); 4046 V = Builder.CreateElementBitCast(V, llvmType, Field->getName()); 4047 4048 // TODO: Generate TBAA information that describes this access as a structure 4049 // member access and not just an access to an object of the field's type. This 4050 // should be similar to what we do in EmitLValueForField(). 4051 LValueBaseInfo BaseInfo = Base.getBaseInfo(); 4052 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource(); 4053 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource)); 4054 return MakeAddrLValue(V, FieldType, FieldBaseInfo, 4055 CGM.getTBAAInfoForSubobject(Base, FieldType)); 4056 } 4057 4058 LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ 4059 if (E->isFileScope()) { 4060 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); 4061 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl); 4062 } 4063 if (E->getType()->isVariablyModifiedType()) 4064 // make sure to emit the VLA size. 4065 EmitVariablyModifiedType(E->getType()); 4066 4067 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); 4068 const Expr *InitExpr = E->getInitializer(); 4069 LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl); 4070 4071 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), 4072 /*Init*/ true); 4073 4074 return Result; 4075 } 4076 4077 LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { 4078 if (!E->isGLValue()) 4079 // Initializing an aggregate temporary in C++11: T{...}. 4080 return EmitAggExprToLValue(E); 4081 4082 // An lvalue initializer list must be initializing a reference. 4083 assert(E->isTransparent() && "non-transparent glvalue init list"); 4084 return EmitLValue(E->getInit(0)); 4085 } 4086 4087 /// Emit the operand of a glvalue conditional operator. This is either a glvalue 4088 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no 4089 /// LValue is returned and the current block has been terminated. 4090 static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, 4091 const Expr *Operand) { 4092 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) { 4093 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false); 4094 return None; 4095 } 4096 4097 return CGF.EmitLValue(Operand); 4098 } 4099 4100 LValue CodeGenFunction:: 4101 EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { 4102 if (!expr->isGLValue()) { 4103 // ?: here should be an aggregate. 4104 assert(hasAggregateEvaluationKind(expr->getType()) && 4105 "Unexpected conditional operator!"); 4106 return EmitAggExprToLValue(expr); 4107 } 4108 4109 OpaqueValueMapping binding(*this, expr); 4110 4111 const Expr *condExpr = expr->getCond(); 4112 bool CondExprBool; 4113 if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { 4114 const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); 4115 if (!CondExprBool) std::swap(live, dead); 4116 4117 if (!ContainsLabel(dead)) { 4118 // If the true case is live, we need to track its region. 4119 if (CondExprBool) 4120 incrementProfileCounter(expr); 4121 return EmitLValue(live); 4122 } 4123 } 4124 4125 llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); 4126 llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); 4127 llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); 4128 4129 ConditionalEvaluation eval(*this); 4130 EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr)); 4131 4132 // Any temporaries created here are conditional. 4133 EmitBlock(lhsBlock); 4134 incrementProfileCounter(expr); 4135 eval.begin(*this); 4136 Optional<LValue> lhs = 4137 EmitLValueOrThrowExpression(*this, expr->getTrueExpr()); 4138 eval.end(*this); 4139 4140 if (lhs && !lhs->isSimple()) 4141 return EmitUnsupportedLValue(expr, "conditional operator"); 4142 4143 lhsBlock = Builder.GetInsertBlock(); 4144 if (lhs) 4145 Builder.CreateBr(contBlock); 4146 4147 // Any temporaries created here are conditional. 4148 EmitBlock(rhsBlock); 4149 eval.begin(*this); 4150 Optional<LValue> rhs = 4151 EmitLValueOrThrowExpression(*this, expr->getFalseExpr()); 4152 eval.end(*this); 4153 if (rhs && !rhs->isSimple()) 4154 return EmitUnsupportedLValue(expr, "conditional operator"); 4155 rhsBlock = Builder.GetInsertBlock(); 4156 4157 EmitBlock(contBlock); 4158 4159 if (lhs && rhs) { 4160 llvm::PHINode *phi = Builder.CreatePHI(lhs->getPointer()->getType(), 4161 2, "cond-lvalue"); 4162 phi->addIncoming(lhs->getPointer(), lhsBlock); 4163 phi->addIncoming(rhs->getPointer(), rhsBlock); 4164 Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment())); 4165 AlignmentSource alignSource = 4166 std::max(lhs->getBaseInfo().getAlignmentSource(), 4167 rhs->getBaseInfo().getAlignmentSource()); 4168 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator( 4169 lhs->getTBAAInfo(), rhs->getTBAAInfo()); 4170 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource), 4171 TBAAInfo); 4172 } else { 4173 assert((lhs || rhs) && 4174 "both operands of glvalue conditional are throw-expressions?"); 4175 return lhs ? *lhs : *rhs; 4176 } 4177 } 4178 4179 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference 4180 /// type. If the cast is to a reference, we can have the usual lvalue result, 4181 /// otherwise if a cast is needed by the code generator in an lvalue context, 4182 /// then it must mean that we need the address of an aggregate in order to 4183 /// access one of its members. This can happen for all the reasons that casts 4184 /// are permitted with aggregate result, including noop aggregate casts, and 4185 /// cast from scalar to union. 4186 LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { 4187 switch (E->getCastKind()) { 4188 case CK_ToVoid: 4189 case CK_BitCast: 4190 case CK_ArrayToPointerDecay: 4191 case CK_FunctionToPointerDecay: 4192 case CK_NullToMemberPointer: 4193 case CK_NullToPointer: 4194 case CK_IntegralToPointer: 4195 case CK_PointerToIntegral: 4196 case CK_PointerToBoolean: 4197 case CK_VectorSplat: 4198 case CK_IntegralCast: 4199 case CK_BooleanToSignedIntegral: 4200 case CK_IntegralToBoolean: 4201 case CK_IntegralToFloating: 4202 case CK_FloatingToIntegral: 4203 case CK_FloatingToBoolean: 4204 case CK_FloatingCast: 4205 case CK_FloatingRealToComplex: 4206 case CK_FloatingComplexToReal: 4207 case CK_FloatingComplexToBoolean: 4208 case CK_FloatingComplexCast: 4209 case CK_FloatingComplexToIntegralComplex: 4210 case CK_IntegralRealToComplex: 4211 case CK_IntegralComplexToReal: 4212 case CK_IntegralComplexToBoolean: 4213 case CK_IntegralComplexCast: 4214 case CK_IntegralComplexToFloatingComplex: 4215 case CK_DerivedToBaseMemberPointer: 4216 case CK_BaseToDerivedMemberPointer: 4217 case CK_MemberPointerToBoolean: 4218 case CK_ReinterpretMemberPointer: 4219 case CK_AnyPointerToBlockPointerCast: 4220 case CK_ARCProduceObject: 4221 case CK_ARCConsumeObject: 4222 case CK_ARCReclaimReturnedObject: 4223 case CK_ARCExtendBlockObject: 4224 case CK_CopyAndAutoreleaseBlockObject: 4225 case CK_IntToOCLSampler: 4226 case CK_FixedPointCast: 4227 case CK_FixedPointToBoolean: 4228 case CK_FixedPointToIntegral: 4229 case CK_IntegralToFixedPoint: 4230 return EmitUnsupportedLValue(E, "unexpected cast lvalue"); 4231 4232 case CK_Dependent: 4233 llvm_unreachable("dependent cast kind in IR gen!"); 4234 4235 case CK_BuiltinFnToFnPtr: 4236 llvm_unreachable("builtin functions are handled elsewhere"); 4237 4238 // These are never l-values; just use the aggregate emission code. 4239 case CK_NonAtomicToAtomic: 4240 case CK_AtomicToNonAtomic: 4241 return EmitAggExprToLValue(E); 4242 4243 case CK_Dynamic: { 4244 LValue LV = EmitLValue(E->getSubExpr()); 4245 Address V = LV.getAddress(); 4246 const auto *DCE = cast<CXXDynamicCastExpr>(E); 4247 return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType()); 4248 } 4249 4250 case CK_ConstructorConversion: 4251 case CK_UserDefinedConversion: 4252 case CK_CPointerToObjCPointerCast: 4253 case CK_BlockPointerToObjCPointerCast: 4254 case CK_NoOp: 4255 case CK_LValueToRValue: 4256 return EmitLValue(E->getSubExpr()); 4257 4258 case CK_UncheckedDerivedToBase: 4259 case CK_DerivedToBase: { 4260 const RecordType *DerivedClassTy = 4261 E->getSubExpr()->getType()->getAs<RecordType>(); 4262 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 4263 4264 LValue LV = EmitLValue(E->getSubExpr()); 4265 Address This = LV.getAddress(); 4266 4267 // Perform the derived-to-base conversion 4268 Address Base = GetAddressOfBaseClass( 4269 This, DerivedClassDecl, E->path_begin(), E->path_end(), 4270 /*NullCheckValue=*/false, E->getExprLoc()); 4271 4272 // TODO: Support accesses to members of base classes in TBAA. For now, we 4273 // conservatively pretend that the complete object is of the base class 4274 // type. 4275 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(), 4276 CGM.getTBAAInfoForSubobject(LV, E->getType())); 4277 } 4278 case CK_ToUnion: 4279 return EmitAggExprToLValue(E); 4280 case CK_BaseToDerived: { 4281 const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); 4282 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); 4283 4284 LValue LV = EmitLValue(E->getSubExpr()); 4285 4286 // Perform the base-to-derived conversion 4287 Address Derived = 4288 GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, 4289 E->path_begin(), E->path_end(), 4290 /*NullCheckValue=*/false); 4291 4292 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is 4293 // performed and the object is not of the derived type. 4294 if (sanitizePerformTypeCheck()) 4295 EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), 4296 Derived.getPointer(), E->getType()); 4297 4298 if (SanOpts.has(SanitizerKind::CFIDerivedCast)) 4299 EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(), 4300 /*MayBeNull=*/false, CFITCK_DerivedCast, 4301 E->getBeginLoc()); 4302 4303 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(), 4304 CGM.getTBAAInfoForSubobject(LV, E->getType())); 4305 } 4306 case CK_LValueBitCast: { 4307 // This must be a reinterpret_cast (or c-style equivalent). 4308 const auto *CE = cast<ExplicitCastExpr>(E); 4309 4310 CGM.EmitExplicitCastExprType(CE, this); 4311 LValue LV = EmitLValue(E->getSubExpr()); 4312 Address V = Builder.CreateBitCast(LV.getAddress(), 4313 ConvertType(CE->getTypeAsWritten())); 4314 4315 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) 4316 EmitVTablePtrCheckForCast(E->getType(), V.getPointer(), 4317 /*MayBeNull=*/false, CFITCK_UnrelatedCast, 4318 E->getBeginLoc()); 4319 4320 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), 4321 CGM.getTBAAInfoForSubobject(LV, E->getType())); 4322 } 4323 case CK_AddressSpaceConversion: { 4324 LValue LV = EmitLValue(E->getSubExpr()); 4325 QualType DestTy = getContext().getPointerType(E->getType()); 4326 llvm::Value *V = getTargetHooks().performAddrSpaceCast( 4327 *this, LV.getPointer(), E->getSubExpr()->getType().getAddressSpace(), 4328 E->getType().getAddressSpace(), ConvertType(DestTy)); 4329 return MakeAddrLValue(Address(V, LV.getAddress().getAlignment()), 4330 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo()); 4331 } 4332 case CK_ObjCObjectLValueCast: { 4333 LValue LV = EmitLValue(E->getSubExpr()); 4334 Address V = Builder.CreateElementBitCast(LV.getAddress(), 4335 ConvertType(E->getType())); 4336 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(), 4337 CGM.getTBAAInfoForSubobject(LV, E->getType())); 4338 } 4339 case CK_ZeroToOCLOpaqueType: 4340 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid"); 4341 } 4342 4343 llvm_unreachable("Unhandled lvalue cast kind?"); 4344 } 4345 4346 LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { 4347 assert(OpaqueValueMappingData::shouldBindAsLValue(e)); 4348 return getOrCreateOpaqueLValueMapping(e); 4349 } 4350 4351 LValue 4352 CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) { 4353 assert(OpaqueValueMapping::shouldBindAsLValue(e)); 4354 4355 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator 4356 it = OpaqueLValues.find(e); 4357 4358 if (it != OpaqueLValues.end()) 4359 return it->second; 4360 4361 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted"); 4362 return EmitLValue(e->getSourceExpr()); 4363 } 4364 4365 RValue 4366 CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) { 4367 assert(!OpaqueValueMapping::shouldBindAsLValue(e)); 4368 4369 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator 4370 it = OpaqueRValues.find(e); 4371 4372 if (it != OpaqueRValues.end()) 4373 return it->second; 4374 4375 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted"); 4376 return EmitAnyExpr(e->getSourceExpr()); 4377 } 4378 4379 RValue CodeGenFunction::EmitRValueForField(LValue LV, 4380 const FieldDecl *FD, 4381 SourceLocation Loc) { 4382 QualType FT = FD->getType(); 4383 LValue FieldLV = EmitLValueForField(LV, FD); 4384 switch (getEvaluationKind(FT)) { 4385 case TEK_Complex: 4386 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc)); 4387 case TEK_Aggregate: 4388 return FieldLV.asAggregateRValue(); 4389 case TEK_Scalar: 4390 // This routine is used to load fields one-by-one to perform a copy, so 4391 // don't load reference fields. 4392 if (FD->getType()->isReferenceType()) 4393 return RValue::get(FieldLV.getPointer()); 4394 return EmitLoadOfLValue(FieldLV, Loc); 4395 } 4396 llvm_unreachable("bad evaluation kind"); 4397 } 4398 4399 //===--------------------------------------------------------------------===// 4400 // Expression Emission 4401 //===--------------------------------------------------------------------===// 4402 4403 RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 4404 ReturnValueSlot ReturnValue) { 4405 // Builtins never have block type. 4406 if (E->getCallee()->getType()->isBlockPointerType()) 4407 return EmitBlockCallExpr(E, ReturnValue); 4408 4409 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E)) 4410 return EmitCXXMemberCallExpr(CE, ReturnValue); 4411 4412 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E)) 4413 return EmitCUDAKernelCallExpr(CE, ReturnValue); 4414 4415 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E)) 4416 if (const CXXMethodDecl *MD = 4417 dyn_cast_or_null<CXXMethodDecl>(CE->getCalleeDecl())) 4418 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); 4419 4420 CGCallee callee = EmitCallee(E->getCallee()); 4421 4422 if (callee.isBuiltin()) { 4423 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), 4424 E, ReturnValue); 4425 } 4426 4427 if (callee.isPseudoDestructor()) { 4428 return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr()); 4429 } 4430 4431 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue); 4432 } 4433 4434 /// Emit a CallExpr without considering whether it might be a subclass. 4435 RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E, 4436 ReturnValueSlot ReturnValue) { 4437 CGCallee Callee = EmitCallee(E->getCallee()); 4438 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue); 4439 } 4440 4441 static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) { 4442 if (auto builtinID = FD->getBuiltinID()) { 4443 return CGCallee::forBuiltin(builtinID, FD); 4444 } 4445 4446 llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, FD); 4447 return CGCallee::forDirect(calleePtr, GlobalDecl(FD)); 4448 } 4449 4450 CGCallee CodeGenFunction::EmitCallee(const Expr *E) { 4451 E = E->IgnoreParens(); 4452 4453 // Look through function-to-pointer decay. 4454 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) { 4455 if (ICE->getCastKind() == CK_FunctionToPointerDecay || 4456 ICE->getCastKind() == CK_BuiltinFnToFnPtr) { 4457 return EmitCallee(ICE->getSubExpr()); 4458 } 4459 4460 // Resolve direct calls. 4461 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) { 4462 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) { 4463 return EmitDirectCallee(*this, FD); 4464 } 4465 } else if (auto ME = dyn_cast<MemberExpr>(E)) { 4466 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) { 4467 EmitIgnoredExpr(ME->getBase()); 4468 return EmitDirectCallee(*this, FD); 4469 } 4470 4471 // Look through template substitutions. 4472 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) { 4473 return EmitCallee(NTTP->getReplacement()); 4474 4475 // Treat pseudo-destructor calls differently. 4476 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) { 4477 return CGCallee::forPseudoDestructor(PDE); 4478 } 4479 4480 // Otherwise, we have an indirect reference. 4481 llvm::Value *calleePtr; 4482 QualType functionType; 4483 if (auto ptrType = E->getType()->getAs<PointerType>()) { 4484 calleePtr = EmitScalarExpr(E); 4485 functionType = ptrType->getPointeeType(); 4486 } else { 4487 functionType = E->getType(); 4488 calleePtr = EmitLValue(E).getPointer(); 4489 } 4490 assert(functionType->isFunctionType()); 4491 4492 GlobalDecl GD; 4493 if (const auto *VD = 4494 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) 4495 GD = GlobalDecl(VD); 4496 4497 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD); 4498 CGCallee callee(calleeInfo, calleePtr); 4499 return callee; 4500 } 4501 4502 LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { 4503 // Comma expressions just emit their LHS then their RHS as an l-value. 4504 if (E->getOpcode() == BO_Comma) { 4505 EmitIgnoredExpr(E->getLHS()); 4506 EnsureInsertPoint(); 4507 return EmitLValue(E->getRHS()); 4508 } 4509 4510 if (E->getOpcode() == BO_PtrMemD || 4511 E->getOpcode() == BO_PtrMemI) 4512 return EmitPointerToDataMemberBinaryExpr(E); 4513 4514 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); 4515 4516 // Note that in all of these cases, __block variables need the RHS 4517 // evaluated first just in case the variable gets moved by the RHS. 4518 4519 switch (getEvaluationKind(E->getType())) { 4520 case TEK_Scalar: { 4521 switch (E->getLHS()->getType().getObjCLifetime()) { 4522 case Qualifiers::OCL_Strong: 4523 return EmitARCStoreStrong(E, /*ignored*/ false).first; 4524 4525 case Qualifiers::OCL_Autoreleasing: 4526 return EmitARCStoreAutoreleasing(E).first; 4527 4528 // No reason to do any of these differently. 4529 case Qualifiers::OCL_None: 4530 case Qualifiers::OCL_ExplicitNone: 4531 case Qualifiers::OCL_Weak: 4532 break; 4533 } 4534 4535 RValue RV = EmitAnyExpr(E->getRHS()); 4536 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); 4537 if (RV.isScalar()) 4538 EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc()); 4539 EmitStoreThroughLValue(RV, LV); 4540 return LV; 4541 } 4542 4543 case TEK_Complex: 4544 return EmitComplexAssignmentLValue(E); 4545 4546 case TEK_Aggregate: 4547 return EmitAggExprToLValue(E); 4548 } 4549 llvm_unreachable("bad evaluation kind"); 4550 } 4551 4552 LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { 4553 RValue RV = EmitCallExpr(E); 4554 4555 if (!RV.isScalar()) 4556 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 4557 AlignmentSource::Decl); 4558 4559 assert(E->getCallReturnType(getContext())->isReferenceType() && 4560 "Can't have a scalar return unless the return type is a " 4561 "reference type!"); 4562 4563 return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); 4564 } 4565 4566 LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { 4567 // FIXME: This shouldn't require another copy. 4568 return EmitAggExprToLValue(E); 4569 } 4570 4571 LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { 4572 assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() 4573 && "binding l-value to type which needs a temporary"); 4574 AggValueSlot Slot = CreateAggTemp(E->getType()); 4575 EmitCXXConstructExpr(E, Slot); 4576 return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); 4577 } 4578 4579 LValue 4580 CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { 4581 return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType()); 4582 } 4583 4584 Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { 4585 return Builder.CreateElementBitCast(CGM.GetAddrOfUuidDescriptor(E), 4586 ConvertType(E->getType())); 4587 } 4588 4589 LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { 4590 return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(), 4591 AlignmentSource::Decl); 4592 } 4593 4594 LValue 4595 CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { 4596 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); 4597 Slot.setExternallyDestructed(); 4598 EmitAggExpr(E->getSubExpr(), Slot); 4599 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress()); 4600 return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl); 4601 } 4602 4603 LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { 4604 RValue RV = EmitObjCMessageExpr(E); 4605 4606 if (!RV.isScalar()) 4607 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 4608 AlignmentSource::Decl); 4609 4610 assert(E->getMethodDecl()->getReturnType()->isReferenceType() && 4611 "Can't have a scalar return unless the return type is a " 4612 "reference type!"); 4613 4614 return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType()); 4615 } 4616 4617 LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { 4618 Address V = 4619 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector()); 4620 return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl); 4621 } 4622 4623 llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, 4624 const ObjCIvarDecl *Ivar) { 4625 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); 4626 } 4627 4628 LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, 4629 llvm::Value *BaseValue, 4630 const ObjCIvarDecl *Ivar, 4631 unsigned CVRQualifiers) { 4632 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, 4633 Ivar, CVRQualifiers); 4634 } 4635 4636 LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { 4637 // FIXME: A lot of the code below could be shared with EmitMemberExpr. 4638 llvm::Value *BaseValue = nullptr; 4639 const Expr *BaseExpr = E->getBase(); 4640 Qualifiers BaseQuals; 4641 QualType ObjectTy; 4642 if (E->isArrow()) { 4643 BaseValue = EmitScalarExpr(BaseExpr); 4644 ObjectTy = BaseExpr->getType()->getPointeeType(); 4645 BaseQuals = ObjectTy.getQualifiers(); 4646 } else { 4647 LValue BaseLV = EmitLValue(BaseExpr); 4648 BaseValue = BaseLV.getPointer(); 4649 ObjectTy = BaseExpr->getType(); 4650 BaseQuals = ObjectTy.getQualifiers(); 4651 } 4652 4653 LValue LV = 4654 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), 4655 BaseQuals.getCVRQualifiers()); 4656 setObjCGCLValueClass(getContext(), E, LV); 4657 return LV; 4658 } 4659 4660 LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { 4661 // Can only get l-value for message expression returning aggregate type 4662 RValue RV = EmitAnyExprToTemp(E); 4663 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(), 4664 AlignmentSource::Decl); 4665 } 4666 4667 RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee, 4668 const CallExpr *E, ReturnValueSlot ReturnValue, 4669 llvm::Value *Chain) { 4670 // Get the actual function type. The callee type will always be a pointer to 4671 // function type or a block pointer type. 4672 assert(CalleeType->isFunctionPointerType() && 4673 "Call must have function pointer type!"); 4674 4675 const Decl *TargetDecl = 4676 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl(); 4677 4678 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 4679 // We can only guarantee that a function is called from the correct 4680 // context/function based on the appropriate target attributes, 4681 // so only check in the case where we have both always_inline and target 4682 // since otherwise we could be making a conditional call after a check for 4683 // the proper cpu features (and it won't cause code generation issues due to 4684 // function based code generation). 4685 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 4686 TargetDecl->hasAttr<TargetAttr>()) 4687 checkTargetFeatures(E, FD); 4688 4689 CalleeType = getContext().getCanonicalType(CalleeType); 4690 4691 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType(); 4692 4693 CGCallee Callee = OrigCallee; 4694 4695 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) && 4696 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { 4697 if (llvm::Constant *PrefixSig = 4698 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { 4699 SanitizerScope SanScope(this); 4700 // Remove any (C++17) exception specifications, to allow calling e.g. a 4701 // noexcept function through a non-noexcept pointer. 4702 auto ProtoTy = 4703 getContext().getFunctionTypeWithExceptionSpec(PointeeType, EST_None); 4704 llvm::Constant *FTRTTIConst = 4705 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true); 4706 llvm::Type *PrefixStructTyElems[] = {PrefixSig->getType(), Int32Ty}; 4707 llvm::StructType *PrefixStructTy = llvm::StructType::get( 4708 CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true); 4709 4710 llvm::Value *CalleePtr = Callee.getFunctionPointer(); 4711 4712 llvm::Value *CalleePrefixStruct = Builder.CreateBitCast( 4713 CalleePtr, llvm::PointerType::getUnqual(PrefixStructTy)); 4714 llvm::Value *CalleeSigPtr = 4715 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0); 4716 llvm::Value *CalleeSig = 4717 Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign()); 4718 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig); 4719 4720 llvm::BasicBlock *Cont = createBasicBlock("cont"); 4721 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck"); 4722 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont); 4723 4724 EmitBlock(TypeCheck); 4725 llvm::Value *CalleeRTTIPtr = 4726 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1); 4727 llvm::Value *CalleeRTTIEncoded = 4728 Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign()); 4729 llvm::Value *CalleeRTTI = 4730 DecodeAddrUsedInPrologue(CalleePtr, CalleeRTTIEncoded); 4731 llvm::Value *CalleeRTTIMatch = 4732 Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst); 4733 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()), 4734 EmitCheckTypeDescriptor(CalleeType)}; 4735 EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function), 4736 SanitizerHandler::FunctionTypeMismatch, StaticData, 4737 {CalleePtr, CalleeRTTI, FTRTTIConst}); 4738 4739 Builder.CreateBr(Cont); 4740 EmitBlock(Cont); 4741 } 4742 } 4743 4744 const auto *FnType = cast<FunctionType>(PointeeType); 4745 4746 // If we are checking indirect calls and this call is indirect, check that the 4747 // function pointer is a member of the bit set for the function type. 4748 if (SanOpts.has(SanitizerKind::CFIICall) && 4749 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { 4750 SanitizerScope SanScope(this); 4751 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall); 4752 4753 llvm::Metadata *MD; 4754 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers) 4755 MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0)); 4756 else 4757 MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0)); 4758 4759 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD); 4760 4761 llvm::Value *CalleePtr = Callee.getFunctionPointer(); 4762 llvm::Value *CastedCallee = Builder.CreateBitCast(CalleePtr, Int8PtrTy); 4763 llvm::Value *TypeTest = Builder.CreateCall( 4764 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedCallee, TypeId}); 4765 4766 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD); 4767 llvm::Constant *StaticData[] = { 4768 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall), 4769 EmitCheckSourceLocation(E->getBeginLoc()), 4770 EmitCheckTypeDescriptor(QualType(FnType, 0)), 4771 }; 4772 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) { 4773 EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId, 4774 CastedCallee, StaticData); 4775 } else { 4776 EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall), 4777 SanitizerHandler::CFICheckFail, StaticData, 4778 {CastedCallee, llvm::UndefValue::get(IntPtrTy)}); 4779 } 4780 } 4781 4782 CallArgList Args; 4783 if (Chain) 4784 Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)), 4785 CGM.getContext().VoidPtrTy); 4786 4787 // C++17 requires that we evaluate arguments to a call using assignment syntax 4788 // right-to-left, and that we evaluate arguments to certain other operators 4789 // left-to-right. Note that we allow this to override the order dictated by 4790 // the calling convention on the MS ABI, which means that parameter 4791 // destruction order is not necessarily reverse construction order. 4792 // FIXME: Revisit this based on C++ committee response to unimplementability. 4793 EvaluationOrder Order = EvaluationOrder::Default; 4794 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) { 4795 if (OCE->isAssignmentOp()) 4796 Order = EvaluationOrder::ForceRightToLeft; 4797 else { 4798 switch (OCE->getOperator()) { 4799 case OO_LessLess: 4800 case OO_GreaterGreater: 4801 case OO_AmpAmp: 4802 case OO_PipePipe: 4803 case OO_Comma: 4804 case OO_ArrowStar: 4805 Order = EvaluationOrder::ForceLeftToRight; 4806 break; 4807 default: 4808 break; 4809 } 4810 } 4811 } 4812 4813 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(), 4814 E->getDirectCallee(), /*ParamsToSkip*/ 0, Order); 4815 4816 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( 4817 Args, FnType, /*isChainCall=*/Chain); 4818 4819 // C99 6.5.2.2p6: 4820 // If the expression that denotes the called function has a type 4821 // that does not include a prototype, [the default argument 4822 // promotions are performed]. If the number of arguments does not 4823 // equal the number of parameters, the behavior is undefined. If 4824 // the function is defined with a type that includes a prototype, 4825 // and either the prototype ends with an ellipsis (, ...) or the 4826 // types of the arguments after promotion are not compatible with 4827 // the types of the parameters, the behavior is undefined. If the 4828 // function is defined with a type that does not include a 4829 // prototype, and the types of the arguments after promotion are 4830 // not compatible with those of the parameters after promotion, 4831 // the behavior is undefined [except in some trivial cases]. 4832 // That is, in the general case, we should assume that a call 4833 // through an unprototyped function type works like a *non-variadic* 4834 // call. The way we make this work is to cast to the exact type 4835 // of the promoted arguments. 4836 // 4837 // Chain calls use this same code path to add the invisible chain parameter 4838 // to the function type. 4839 if (isa<FunctionNoProtoType>(FnType) || Chain) { 4840 llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo); 4841 CalleeTy = CalleeTy->getPointerTo(); 4842 4843 llvm::Value *CalleePtr = Callee.getFunctionPointer(); 4844 CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast"); 4845 Callee.setFunctionPointer(CalleePtr); 4846 } 4847 4848 return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr, E->getExprLoc()); 4849 } 4850 4851 LValue CodeGenFunction:: 4852 EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { 4853 Address BaseAddr = Address::invalid(); 4854 if (E->getOpcode() == BO_PtrMemI) { 4855 BaseAddr = EmitPointerWithAlignment(E->getLHS()); 4856 } else { 4857 BaseAddr = EmitLValue(E->getLHS()).getAddress(); 4858 } 4859 4860 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); 4861 4862 const MemberPointerType *MPT 4863 = E->getRHS()->getType()->getAs<MemberPointerType>(); 4864 4865 LValueBaseInfo BaseInfo; 4866 TBAAAccessInfo TBAAInfo; 4867 Address MemberAddr = 4868 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo, 4869 &TBAAInfo); 4870 4871 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo); 4872 } 4873 4874 /// Given the address of a temporary variable, produce an r-value of 4875 /// its type. 4876 RValue CodeGenFunction::convertTempToRValue(Address addr, 4877 QualType type, 4878 SourceLocation loc) { 4879 LValue lvalue = MakeAddrLValue(addr, type, AlignmentSource::Decl); 4880 switch (getEvaluationKind(type)) { 4881 case TEK_Complex: 4882 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc)); 4883 case TEK_Aggregate: 4884 return lvalue.asAggregateRValue(); 4885 case TEK_Scalar: 4886 return RValue::get(EmitLoadOfScalar(lvalue, loc)); 4887 } 4888 llvm_unreachable("bad evaluation kind"); 4889 } 4890 4891 void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { 4892 assert(Val->getType()->isFPOrFPVectorTy()); 4893 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) 4894 return; 4895 4896 llvm::MDBuilder MDHelper(getLLVMContext()); 4897 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); 4898 4899 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); 4900 } 4901 4902 namespace { 4903 struct LValueOrRValue { 4904 LValue LV; 4905 RValue RV; 4906 }; 4907 } 4908 4909 static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, 4910 const PseudoObjectExpr *E, 4911 bool forLValue, 4912 AggValueSlot slot) { 4913 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 4914 4915 // Find the result expression, if any. 4916 const Expr *resultExpr = E->getResultExpr(); 4917 LValueOrRValue result; 4918 4919 for (PseudoObjectExpr::const_semantics_iterator 4920 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 4921 const Expr *semantic = *i; 4922 4923 // If this semantic expression is an opaque value, bind it 4924 // to the result of its source expression. 4925 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 4926 // Skip unique OVEs. 4927 if (ov->isUnique()) { 4928 assert(ov != resultExpr && 4929 "A unique OVE cannot be used as the result expression"); 4930 continue; 4931 } 4932 4933 // If this is the result expression, we may need to evaluate 4934 // directly into the slot. 4935 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 4936 OVMA opaqueData; 4937 if (ov == resultExpr && ov->isRValue() && !forLValue && 4938 CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) { 4939 CGF.EmitAggExpr(ov->getSourceExpr(), slot); 4940 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(), 4941 AlignmentSource::Decl); 4942 opaqueData = OVMA::bind(CGF, ov, LV); 4943 result.RV = slot.asRValue(); 4944 4945 // Otherwise, emit as normal. 4946 } else { 4947 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 4948 4949 // If this is the result, also evaluate the result now. 4950 if (ov == resultExpr) { 4951 if (forLValue) 4952 result.LV = CGF.EmitLValue(ov); 4953 else 4954 result.RV = CGF.EmitAnyExpr(ov, slot); 4955 } 4956 } 4957 4958 opaques.push_back(opaqueData); 4959 4960 // Otherwise, if the expression is the result, evaluate it 4961 // and remember the result. 4962 } else if (semantic == resultExpr) { 4963 if (forLValue) 4964 result.LV = CGF.EmitLValue(semantic); 4965 else 4966 result.RV = CGF.EmitAnyExpr(semantic, slot); 4967 4968 // Otherwise, evaluate the expression in an ignored context. 4969 } else { 4970 CGF.EmitIgnoredExpr(semantic); 4971 } 4972 } 4973 4974 // Unbind all the opaques now. 4975 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 4976 opaques[i].unbind(CGF); 4977 4978 return result; 4979 } 4980 4981 RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, 4982 AggValueSlot slot) { 4983 return emitPseudoObjectExpr(*this, E, false, slot).RV; 4984 } 4985 4986 LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { 4987 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; 4988 } 4989