1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This coordinates the per-function state used while generating code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CodeGenFunction.h" 14 #include "CGBlocks.h" 15 #include "CGCUDARuntime.h" 16 #include "CGCXXABI.h" 17 #include "CGCleanup.h" 18 #include "CGDebugInfo.h" 19 #include "CGOpenMPRuntime.h" 20 #include "CodeGenModule.h" 21 #include "CodeGenPGO.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/ASTContext.h" 24 #include "clang/AST/ASTLambda.h" 25 #include "clang/AST/Attr.h" 26 #include "clang/AST/Decl.h" 27 #include "clang/AST/DeclCXX.h" 28 #include "clang/AST/StmtCXX.h" 29 #include "clang/AST/StmtObjC.h" 30 #include "clang/Basic/Builtins.h" 31 #include "clang/Basic/CodeGenOptions.h" 32 #include "clang/Basic/TargetInfo.h" 33 #include "clang/CodeGen/CGFunctionInfo.h" 34 #include "clang/Frontend/FrontendDiagnostic.h" 35 #include "llvm/ADT/ArrayRef.h" 36 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/FPEnv.h" 40 #include "llvm/IR/IntrinsicInst.h" 41 #include "llvm/IR/Intrinsics.h" 42 #include "llvm/IR/MDBuilder.h" 43 #include "llvm/IR/Operator.h" 44 #include "llvm/Support/CRC.h" 45 #include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h" 46 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 47 using namespace clang; 48 using namespace CodeGen; 49 50 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time 51 /// markers. 52 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts, 53 const LangOptions &LangOpts) { 54 if (CGOpts.DisableLifetimeMarkers) 55 return false; 56 57 // Sanitizers may use markers. 58 if (CGOpts.SanitizeAddressUseAfterScope || 59 LangOpts.Sanitize.has(SanitizerKind::HWAddress) || 60 LangOpts.Sanitize.has(SanitizerKind::Memory)) 61 return true; 62 63 // For now, only in optimized builds. 64 return CGOpts.OptimizationLevel != 0; 65 } 66 67 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 68 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), 69 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(), 70 CGBuilderInserterTy(this)), 71 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()), 72 DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm), 73 ShouldEmitLifetimeMarkers( 74 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) { 75 if (!suppressNewContext) 76 CGM.getCXXABI().getMangleContext().startNewFunction(); 77 78 SetFastMathFlags(CurFPFeatures); 79 SetFPModel(); 80 } 81 82 CodeGenFunction::~CodeGenFunction() { 83 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); 84 85 if (getLangOpts().OpenMP && CurFn) 86 CGM.getOpenMPRuntime().functionFinished(*this); 87 88 // If we have an OpenMPIRBuilder we want to finalize functions (incl. 89 // outlining etc) at some point. Doing it once the function codegen is done 90 // seems to be a reasonable spot. We do it here, as opposed to the deletion 91 // time of the CodeGenModule, because we have to ensure the IR has not yet 92 // been "emitted" to the outside, thus, modifications are still sensible. 93 if (CGM.getLangOpts().OpenMPIRBuilder) 94 CGM.getOpenMPRuntime().getOMPBuilder().finalize(); 95 } 96 97 // Map the LangOption for exception behavior into 98 // the corresponding enum in the IR. 99 llvm::fp::ExceptionBehavior 100 clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) { 101 102 switch (Kind) { 103 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore; 104 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap; 105 case LangOptions::FPE_Strict: return llvm::fp::ebStrict; 106 } 107 llvm_unreachable("Unsupported FP Exception Behavior"); 108 } 109 110 void CodeGenFunction::SetFPModel() { 111 llvm::RoundingMode RM = getLangOpts().getFPRoundingMode(); 112 auto fpExceptionBehavior = ToConstrainedExceptMD( 113 getLangOpts().getFPExceptionMode()); 114 115 Builder.setDefaultConstrainedRounding(RM); 116 Builder.setDefaultConstrainedExcept(fpExceptionBehavior); 117 Builder.setIsFPConstrained(fpExceptionBehavior != llvm::fp::ebIgnore || 118 RM != llvm::RoundingMode::NearestTiesToEven); 119 } 120 121 void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) { 122 llvm::FastMathFlags FMF; 123 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate()); 124 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs()); 125 FMF.setNoInfs(FPFeatures.getNoHonorInfs()); 126 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero()); 127 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal()); 128 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc()); 129 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement()); 130 Builder.setFastMathFlags(FMF); 131 } 132 133 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF, 134 FPOptions FPFeatures) 135 : CGF(CGF), OldFPFeatures(CGF.CurFPFeatures) { 136 CGF.CurFPFeatures = FPFeatures; 137 138 if (OldFPFeatures == FPFeatures) 139 return; 140 141 FMFGuard.emplace(CGF.Builder); 142 143 llvm::RoundingMode NewRoundingBehavior = 144 static_cast<llvm::RoundingMode>(FPFeatures.getRoundingMode()); 145 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior); 146 auto NewExceptionBehavior = 147 ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>( 148 FPFeatures.getFPExceptionMode())); 149 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior); 150 151 CGF.SetFastMathFlags(FPFeatures); 152 153 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() || 154 isa<CXXConstructorDecl>(CGF.CurFuncDecl) || 155 isa<CXXDestructorDecl>(CGF.CurFuncDecl) || 156 (NewExceptionBehavior == llvm::fp::ebIgnore && 157 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) && 158 "FPConstrained should be enabled on entire function"); 159 160 auto mergeFnAttrValue = [&](StringRef Name, bool Value) { 161 auto OldValue = 162 CGF.CurFn->getFnAttribute(Name).getValueAsString() == "true"; 163 auto NewValue = OldValue & Value; 164 if (OldValue != NewValue) 165 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue)); 166 }; 167 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs()); 168 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs()); 169 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero()); 170 mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() && 171 FPFeatures.getAllowReciprocal() && 172 FPFeatures.getAllowApproxFunc() && 173 FPFeatures.getNoSignedZero()); 174 } 175 176 CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() { 177 CGF.CurFPFeatures = OldFPFeatures; 178 } 179 180 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { 181 LValueBaseInfo BaseInfo; 182 TBAAAccessInfo TBAAInfo; 183 CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo); 184 return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo, 185 TBAAInfo); 186 } 187 188 /// Given a value of type T* that may not be to a complete object, 189 /// construct an l-value with the natural pointee alignment of T. 190 LValue 191 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) { 192 LValueBaseInfo BaseInfo; 193 TBAAAccessInfo TBAAInfo; 194 CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo, 195 /* forPointeeType= */ true); 196 return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo); 197 } 198 199 200 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 201 return CGM.getTypes().ConvertTypeForMem(T); 202 } 203 204 llvm::Type *CodeGenFunction::ConvertType(QualType T) { 205 return CGM.getTypes().ConvertType(T); 206 } 207 208 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { 209 type = type.getCanonicalType(); 210 while (true) { 211 switch (type->getTypeClass()) { 212 #define TYPE(name, parent) 213 #define ABSTRACT_TYPE(name, parent) 214 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 215 #define DEPENDENT_TYPE(name, parent) case Type::name: 216 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 217 #include "clang/AST/TypeNodes.inc" 218 llvm_unreachable("non-canonical or dependent type in IR-generation"); 219 220 case Type::Auto: 221 case Type::DeducedTemplateSpecialization: 222 llvm_unreachable("undeduced type in IR-generation"); 223 224 // Various scalar types. 225 case Type::Builtin: 226 case Type::Pointer: 227 case Type::BlockPointer: 228 case Type::LValueReference: 229 case Type::RValueReference: 230 case Type::MemberPointer: 231 case Type::Vector: 232 case Type::ExtVector: 233 case Type::ConstantMatrix: 234 case Type::FunctionProto: 235 case Type::FunctionNoProto: 236 case Type::Enum: 237 case Type::ObjCObjectPointer: 238 case Type::Pipe: 239 case Type::ExtInt: 240 return TEK_Scalar; 241 242 // Complexes. 243 case Type::Complex: 244 return TEK_Complex; 245 246 // Arrays, records, and Objective-C objects. 247 case Type::ConstantArray: 248 case Type::IncompleteArray: 249 case Type::VariableArray: 250 case Type::Record: 251 case Type::ObjCObject: 252 case Type::ObjCInterface: 253 return TEK_Aggregate; 254 255 // We operate on atomic values according to their underlying type. 256 case Type::Atomic: 257 type = cast<AtomicType>(type)->getValueType(); 258 continue; 259 } 260 llvm_unreachable("unknown type kind!"); 261 } 262 } 263 264 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() { 265 // For cleanliness, we try to avoid emitting the return block for 266 // simple cases. 267 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 268 269 if (CurBB) { 270 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 271 272 // We have a valid insert point, reuse it if it is empty or there are no 273 // explicit jumps to the return block. 274 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 275 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 276 delete ReturnBlock.getBlock(); 277 ReturnBlock = JumpDest(); 278 } else 279 EmitBlock(ReturnBlock.getBlock()); 280 return llvm::DebugLoc(); 281 } 282 283 // Otherwise, if the return block is the target of a single direct 284 // branch then we can just put the code in that block instead. This 285 // cleans up functions which started with a unified return block. 286 if (ReturnBlock.getBlock()->hasOneUse()) { 287 llvm::BranchInst *BI = 288 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin()); 289 if (BI && BI->isUnconditional() && 290 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 291 // Record/return the DebugLoc of the simple 'return' expression to be used 292 // later by the actual 'ret' instruction. 293 llvm::DebugLoc Loc = BI->getDebugLoc(); 294 Builder.SetInsertPoint(BI->getParent()); 295 BI->eraseFromParent(); 296 delete ReturnBlock.getBlock(); 297 ReturnBlock = JumpDest(); 298 return Loc; 299 } 300 } 301 302 // FIXME: We are at an unreachable point, there is no reason to emit the block 303 // unless it has uses. However, we still need a place to put the debug 304 // region.end for now. 305 306 EmitBlock(ReturnBlock.getBlock()); 307 return llvm::DebugLoc(); 308 } 309 310 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 311 if (!BB) return; 312 if (!BB->use_empty()) 313 return CGF.CurFn->getBasicBlockList().push_back(BB); 314 delete BB; 315 } 316 317 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 318 assert(BreakContinueStack.empty() && 319 "mismatched push/pop in break/continue stack!"); 320 321 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 322 && NumSimpleReturnExprs == NumReturnExprs 323 && ReturnBlock.getBlock()->use_empty(); 324 // Usually the return expression is evaluated before the cleanup 325 // code. If the function contains only a simple return statement, 326 // such as a constant, the location before the cleanup code becomes 327 // the last useful breakpoint in the function, because the simple 328 // return expression will be evaluated after the cleanup code. To be 329 // safe, set the debug location for cleanup code to the location of 330 // the return statement. Otherwise the cleanup code should be at the 331 // end of the function's lexical scope. 332 // 333 // If there are multiple branches to the return block, the branch 334 // instructions will get the location of the return statements and 335 // all will be fine. 336 if (CGDebugInfo *DI = getDebugInfo()) { 337 if (OnlySimpleReturnStmts) 338 DI->EmitLocation(Builder, LastStopPoint); 339 else 340 DI->EmitLocation(Builder, EndLoc); 341 } 342 343 // Pop any cleanups that might have been associated with the 344 // parameters. Do this in whatever block we're currently in; it's 345 // important to do this before we enter the return block or return 346 // edges will be *really* confused. 347 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth; 348 bool HasOnlyLifetimeMarkers = 349 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth); 350 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers; 351 if (HasCleanups) { 352 // Make sure the line table doesn't jump back into the body for 353 // the ret after it's been at EndLoc. 354 Optional<ApplyDebugLocation> AL; 355 if (CGDebugInfo *DI = getDebugInfo()) { 356 if (OnlySimpleReturnStmts) 357 DI->EmitLocation(Builder, EndLoc); 358 else 359 // We may not have a valid end location. Try to apply it anyway, and 360 // fall back to an artificial location if needed. 361 AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc); 362 } 363 364 PopCleanupBlocks(PrologueCleanupDepth); 365 } 366 367 // Emit function epilog (to return). 368 llvm::DebugLoc Loc = EmitReturnBlock(); 369 370 if (ShouldInstrumentFunction()) { 371 if (CGM.getCodeGenOpts().InstrumentFunctions) 372 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit"); 373 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 374 CurFn->addFnAttr("instrument-function-exit-inlined", 375 "__cyg_profile_func_exit"); 376 } 377 378 // Emit debug descriptor for function end. 379 if (CGDebugInfo *DI = getDebugInfo()) 380 DI->EmitFunctionEnd(Builder, CurFn); 381 382 // Reset the debug location to that of the simple 'return' expression, if any 383 // rather than that of the end of the function's scope '}'. 384 ApplyDebugLocation AL(*this, Loc); 385 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc); 386 EmitEndEHSpec(CurCodeDecl); 387 388 assert(EHStack.empty() && 389 "did not remove all scopes from cleanup stack!"); 390 391 // If someone did an indirect goto, emit the indirect goto block at the end of 392 // the function. 393 if (IndirectBranch) { 394 EmitBlock(IndirectBranch->getParent()); 395 Builder.ClearInsertionPoint(); 396 } 397 398 // If some of our locals escaped, insert a call to llvm.localescape in the 399 // entry block. 400 if (!EscapedLocals.empty()) { 401 // Invert the map from local to index into a simple vector. There should be 402 // no holes. 403 SmallVector<llvm::Value *, 4> EscapeArgs; 404 EscapeArgs.resize(EscapedLocals.size()); 405 for (auto &Pair : EscapedLocals) 406 EscapeArgs[Pair.second] = Pair.first; 407 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration( 408 &CGM.getModule(), llvm::Intrinsic::localescape); 409 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs); 410 } 411 412 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 413 llvm::Instruction *Ptr = AllocaInsertPt; 414 AllocaInsertPt = nullptr; 415 Ptr->eraseFromParent(); 416 417 // If someone took the address of a label but never did an indirect goto, we 418 // made a zero entry PHI node, which is illegal, zap it now. 419 if (IndirectBranch) { 420 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 421 if (PN->getNumIncomingValues() == 0) { 422 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 423 PN->eraseFromParent(); 424 } 425 } 426 427 EmitIfUsed(*this, EHResumeBlock); 428 EmitIfUsed(*this, TerminateLandingPad); 429 EmitIfUsed(*this, TerminateHandler); 430 EmitIfUsed(*this, UnreachableBlock); 431 432 for (const auto &FuncletAndParent : TerminateFunclets) 433 EmitIfUsed(*this, FuncletAndParent.second); 434 435 if (CGM.getCodeGenOpts().EmitDeclMetadata) 436 EmitDeclMetadata(); 437 438 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator 439 I = DeferredReplacements.begin(), 440 E = DeferredReplacements.end(); 441 I != E; ++I) { 442 I->first->replaceAllUsesWith(I->second); 443 I->first->eraseFromParent(); 444 } 445 446 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and 447 // PHIs if the current function is a coroutine. We don't do it for all 448 // functions as it may result in slight increase in numbers of instructions 449 // if compiled with no optimizations. We do it for coroutine as the lifetime 450 // of CleanupDestSlot alloca make correct coroutine frame building very 451 // difficult. 452 if (NormalCleanupDest.isValid() && isCoroutine()) { 453 llvm::DominatorTree DT(*CurFn); 454 llvm::PromoteMemToReg( 455 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT); 456 NormalCleanupDest = Address::invalid(); 457 } 458 459 // Scan function arguments for vector width. 460 for (llvm::Argument &A : CurFn->args()) 461 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType())) 462 LargestVectorWidth = 463 std::max((uint64_t)LargestVectorWidth, 464 VT->getPrimitiveSizeInBits().getKnownMinSize()); 465 466 // Update vector width based on return type. 467 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType())) 468 LargestVectorWidth = 469 std::max((uint64_t)LargestVectorWidth, 470 VT->getPrimitiveSizeInBits().getKnownMinSize()); 471 472 // Add the required-vector-width attribute. This contains the max width from: 473 // 1. min-vector-width attribute used in the source program. 474 // 2. Any builtins used that have a vector width specified. 475 // 3. Values passed in and out of inline assembly. 476 // 4. Width of vector arguments and return types for this function. 477 // 5. Width of vector aguments and return types for functions called by this 478 // function. 479 CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth)); 480 481 // If we generated an unreachable return block, delete it now. 482 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) { 483 Builder.ClearInsertionPoint(); 484 ReturnBlock.getBlock()->eraseFromParent(); 485 } 486 if (ReturnValue.isValid()) { 487 auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer()); 488 if (RetAlloca && RetAlloca->use_empty()) { 489 RetAlloca->eraseFromParent(); 490 ReturnValue = Address::invalid(); 491 } 492 } 493 } 494 495 /// ShouldInstrumentFunction - Return true if the current function should be 496 /// instrumented with __cyg_profile_func_* calls 497 bool CodeGenFunction::ShouldInstrumentFunction() { 498 if (!CGM.getCodeGenOpts().InstrumentFunctions && 499 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining && 500 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 501 return false; 502 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 503 return false; 504 return true; 505 } 506 507 /// ShouldXRayInstrument - Return true if the current function should be 508 /// instrumented with XRay nop sleds. 509 bool CodeGenFunction::ShouldXRayInstrumentFunction() const { 510 return CGM.getCodeGenOpts().XRayInstrumentFunctions; 511 } 512 513 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to 514 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation. 515 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const { 516 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 517 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents || 518 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 519 XRayInstrKind::Custom); 520 } 521 522 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const { 523 return CGM.getCodeGenOpts().XRayInstrumentFunctions && 524 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents || 525 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask == 526 XRayInstrKind::Typed); 527 } 528 529 llvm::Constant * 530 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F, 531 llvm::Constant *Addr) { 532 // Addresses stored in prologue data can't require run-time fixups and must 533 // be PC-relative. Run-time fixups are undesirable because they necessitate 534 // writable text segments, which are unsafe. And absolute addresses are 535 // undesirable because they break PIE mode. 536 537 // Add a layer of indirection through a private global. Taking its address 538 // won't result in a run-time fixup, even if Addr has linkonce_odr linkage. 539 auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(), 540 /*isConstant=*/true, 541 llvm::GlobalValue::PrivateLinkage, Addr); 542 543 // Create a PC-relative address. 544 auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy); 545 auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy); 546 auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt); 547 return (IntPtrTy == Int32Ty) 548 ? PCRelAsInt 549 : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty); 550 } 551 552 llvm::Value * 553 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F, 554 llvm::Value *EncodedAddr) { 555 // Reconstruct the address of the global. 556 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy); 557 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int"); 558 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int"); 559 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr"); 560 561 // Load the original pointer through the global. 562 return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()), 563 "decoded_addr"); 564 } 565 566 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, 567 llvm::Function *Fn) 568 { 569 if (!FD->hasAttr<OpenCLKernelAttr>()) 570 return; 571 572 llvm::LLVMContext &Context = getLLVMContext(); 573 574 CGM.GenOpenCLArgMetadata(Fn, FD, this); 575 576 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) { 577 QualType HintQTy = A->getTypeHint(); 578 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>(); 579 bool IsSignedInteger = 580 HintQTy->isSignedIntegerType() || 581 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType()); 582 llvm::Metadata *AttrMDArgs[] = { 583 llvm::ConstantAsMetadata::get(llvm::UndefValue::get( 584 CGM.getTypes().ConvertType(A->getTypeHint()))), 585 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( 586 llvm::IntegerType::get(Context, 32), 587 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))}; 588 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs)); 589 } 590 591 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) { 592 llvm::Metadata *AttrMDArgs[] = { 593 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 594 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 595 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 596 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs)); 597 } 598 599 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) { 600 llvm::Metadata *AttrMDArgs[] = { 601 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())), 602 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())), 603 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))}; 604 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs)); 605 } 606 607 if (const OpenCLIntelReqdSubGroupSizeAttr *A = 608 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) { 609 llvm::Metadata *AttrMDArgs[] = { 610 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))}; 611 Fn->setMetadata("intel_reqd_sub_group_size", 612 llvm::MDNode::get(Context, AttrMDArgs)); 613 } 614 } 615 616 /// Determine whether the function F ends with a return stmt. 617 static bool endsWithReturn(const Decl* F) { 618 const Stmt *Body = nullptr; 619 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F)) 620 Body = FD->getBody(); 621 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F)) 622 Body = OMD->getBody(); 623 624 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) { 625 auto LastStmt = CS->body_rbegin(); 626 if (LastStmt != CS->body_rend()) 627 return isa<ReturnStmt>(*LastStmt); 628 } 629 return false; 630 } 631 632 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) { 633 if (SanOpts.has(SanitizerKind::Thread)) { 634 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time"); 635 Fn->removeFnAttr(llvm::Attribute::SanitizeThread); 636 } 637 } 638 639 /// Check if the return value of this function requires sanitization. 640 bool CodeGenFunction::requiresReturnValueCheck() const { 641 return requiresReturnValueNullabilityCheck() || 642 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl && 643 CurCodeDecl->getAttr<ReturnsNonNullAttr>()); 644 } 645 646 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { 647 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D); 648 if (!MD || !MD->getDeclName().getAsIdentifierInfo() || 649 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") || 650 (MD->getNumParams() != 1 && MD->getNumParams() != 2)) 651 return false; 652 653 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) 654 return false; 655 656 if (MD->getNumParams() == 2) { 657 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>(); 658 if (!PT || !PT->isVoidPointerType() || 659 !PT->getPointeeType().isConstQualified()) 660 return false; 661 } 662 663 return true; 664 } 665 666 /// Return the UBSan prologue signature for \p FD if one is available. 667 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM, 668 const FunctionDecl *FD) { 669 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 670 if (!MD->isStatic()) 671 return nullptr; 672 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM); 673 } 674 675 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, 676 llvm::Function *Fn, 677 const CGFunctionInfo &FnInfo, 678 const FunctionArgList &Args, 679 SourceLocation Loc, 680 SourceLocation StartLoc) { 681 assert(!CurFn && 682 "Do not use a CodeGenFunction object for more than one function"); 683 684 const Decl *D = GD.getDecl(); 685 686 DidCallStackSave = false; 687 CurCodeDecl = D; 688 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) 689 if (FD->usesSEHTry()) 690 CurSEHParent = FD; 691 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); 692 FnRetTy = RetTy; 693 CurFn = Fn; 694 CurFnInfo = &FnInfo; 695 assert(CurFn->isDeclaration() && "Function already has body?"); 696 697 // If this function has been blacklisted for any of the enabled sanitizers, 698 // disable the sanitizer for the function. 699 do { 700 #define SANITIZER(NAME, ID) \ 701 if (SanOpts.empty()) \ 702 break; \ 703 if (SanOpts.has(SanitizerKind::ID)) \ 704 if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \ 705 SanOpts.set(SanitizerKind::ID, false); 706 707 #include "clang/Basic/Sanitizers.def" 708 #undef SANITIZER 709 } while (0); 710 711 if (D) { 712 // Apply the no_sanitize* attributes to SanOpts. 713 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) { 714 SanitizerMask mask = Attr->getMask(); 715 SanOpts.Mask &= ~mask; 716 if (mask & SanitizerKind::Address) 717 SanOpts.set(SanitizerKind::KernelAddress, false); 718 if (mask & SanitizerKind::KernelAddress) 719 SanOpts.set(SanitizerKind::Address, false); 720 if (mask & SanitizerKind::HWAddress) 721 SanOpts.set(SanitizerKind::KernelHWAddress, false); 722 if (mask & SanitizerKind::KernelHWAddress) 723 SanOpts.set(SanitizerKind::HWAddress, false); 724 } 725 } 726 727 // Apply sanitizer attributes to the function. 728 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) 729 Fn->addFnAttr(llvm::Attribute::SanitizeAddress); 730 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress)) 731 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); 732 if (SanOpts.has(SanitizerKind::MemTag)) 733 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag); 734 if (SanOpts.has(SanitizerKind::Thread)) 735 Fn->addFnAttr(llvm::Attribute::SanitizeThread); 736 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory)) 737 Fn->addFnAttr(llvm::Attribute::SanitizeMemory); 738 if (SanOpts.has(SanitizerKind::SafeStack)) 739 Fn->addFnAttr(llvm::Attribute::SafeStack); 740 if (SanOpts.has(SanitizerKind::ShadowCallStack)) 741 Fn->addFnAttr(llvm::Attribute::ShadowCallStack); 742 743 // Apply fuzzing attribute to the function. 744 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink)) 745 Fn->addFnAttr(llvm::Attribute::OptForFuzzing); 746 747 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize, 748 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time. 749 if (SanOpts.has(SanitizerKind::Thread)) { 750 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) { 751 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0); 752 if (OMD->getMethodFamily() == OMF_dealloc || 753 OMD->getMethodFamily() == OMF_initialize || 754 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) { 755 markAsIgnoreThreadCheckingAtRuntime(Fn); 756 } 757 } 758 } 759 760 // Ignore unrelated casts in STL allocate() since the allocator must cast 761 // from void* to T* before object initialization completes. Don't match on the 762 // namespace because not all allocators are in std:: 763 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { 764 if (matchesStlAllocatorFn(D, getContext())) 765 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast; 766 } 767 768 // Ignore null checks in coroutine functions since the coroutines passes 769 // are not aware of how to move the extra UBSan instructions across the split 770 // coroutine boundaries. 771 if (D && SanOpts.has(SanitizerKind::Null)) 772 if (const auto *FD = dyn_cast<FunctionDecl>(D)) 773 if (FD->getBody() && 774 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass) 775 SanOpts.Mask &= ~SanitizerKind::Null; 776 777 // Apply xray attributes to the function (as a string, for now) 778 bool AlwaysXRayAttr = false; 779 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) { 780 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 781 XRayInstrKind::FunctionEntry) || 782 CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 783 XRayInstrKind::FunctionExit)) { 784 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) { 785 Fn->addFnAttr("function-instrument", "xray-always"); 786 AlwaysXRayAttr = true; 787 } 788 if (XRayAttr->neverXRayInstrument()) 789 Fn->addFnAttr("function-instrument", "xray-never"); 790 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>()) 791 if (ShouldXRayInstrumentFunction()) 792 Fn->addFnAttr("xray-log-args", 793 llvm::utostr(LogArgs->getArgumentCount())); 794 } 795 } else { 796 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc)) 797 Fn->addFnAttr( 798 "xray-instruction-threshold", 799 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold)); 800 } 801 802 if (ShouldXRayInstrumentFunction()) { 803 if (CGM.getCodeGenOpts().XRayIgnoreLoops) 804 Fn->addFnAttr("xray-ignore-loops"); 805 806 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 807 XRayInstrKind::FunctionExit)) 808 Fn->addFnAttr("xray-skip-exit"); 809 810 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has( 811 XRayInstrKind::FunctionEntry)) 812 Fn->addFnAttr("xray-skip-entry"); 813 814 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups; 815 if (FuncGroups > 1) { 816 auto FuncName = llvm::makeArrayRef<uint8_t>( 817 CurFn->getName().bytes_begin(), CurFn->getName().bytes_end()); 818 auto Group = crc32(FuncName) % FuncGroups; 819 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup && 820 !AlwaysXRayAttr) 821 Fn->addFnAttr("function-instrument", "xray-never"); 822 } 823 } 824 825 unsigned Count, Offset; 826 if (const auto *Attr = 827 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) { 828 Count = Attr->getCount(); 829 Offset = Attr->getOffset(); 830 } else { 831 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount; 832 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset; 833 } 834 if (Count && Offset <= Count) { 835 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset)); 836 if (Offset) 837 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset)); 838 } 839 840 // Add no-jump-tables value. 841 Fn->addFnAttr("no-jump-tables", 842 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables)); 843 844 // Add no-inline-line-tables value. 845 if (CGM.getCodeGenOpts().NoInlineLineTables) 846 Fn->addFnAttr("no-inline-line-tables"); 847 848 // Add profile-sample-accurate value. 849 if (CGM.getCodeGenOpts().ProfileSampleAccurate) 850 Fn->addFnAttr("profile-sample-accurate"); 851 852 if (!CGM.getCodeGenOpts().SampleProfileFile.empty()) 853 Fn->addFnAttr("use-sample-profile"); 854 855 if (D && D->hasAttr<CFICanonicalJumpTableAttr>()) 856 Fn->addFnAttr("cfi-canonical-jump-table"); 857 858 if (getLangOpts().OpenCL) { 859 // Add metadata for a kernel function. 860 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 861 EmitOpenCLKernelMetadata(FD, Fn); 862 } 863 864 // If we are checking function types, emit a function type signature as 865 // prologue data. 866 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { 867 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 868 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) { 869 // Remove any (C++17) exception specifications, to allow calling e.g. a 870 // noexcept function through a non-noexcept pointer. 871 auto ProtoTy = 872 getContext().getFunctionTypeWithExceptionSpec(FD->getType(), 873 EST_None); 874 llvm::Constant *FTRTTIConst = 875 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true); 876 llvm::Constant *FTRTTIConstEncoded = 877 EncodeAddrForUseInPrologue(Fn, FTRTTIConst); 878 llvm::Constant *PrologueStructElems[] = {PrologueSig, 879 FTRTTIConstEncoded}; 880 llvm::Constant *PrologueStructConst = 881 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true); 882 Fn->setPrologueData(PrologueStructConst); 883 } 884 } 885 } 886 887 // If we're checking nullability, we need to know whether we can check the 888 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl. 889 if (SanOpts.has(SanitizerKind::NullabilityReturn)) { 890 auto Nullability = FnRetTy->getNullability(getContext()); 891 if (Nullability && *Nullability == NullabilityKind::NonNull) { 892 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && 893 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>())) 894 RetValNullabilityPrecondition = 895 llvm::ConstantInt::getTrue(getLLVMContext()); 896 } 897 } 898 899 // If we're in C++ mode and the function name is "main", it is guaranteed 900 // to be norecurse by the standard (3.6.1.3 "The function main shall not be 901 // used within a program"). 902 // 903 // OpenCL C 2.0 v2.2-11 s6.9.i: 904 // Recursion is not supported. 905 // 906 // SYCL v1.2.1 s3.10: 907 // kernels cannot include RTTI information, exception classes, 908 // recursive code, virtual functions or make use of C++ libraries that 909 // are not compiled for the device. 910 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 911 if ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL || 912 getLangOpts().SYCLIsDevice || 913 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())) 914 Fn->addFnAttr(llvm::Attribute::NoRecurse); 915 } 916 917 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 918 Builder.setIsFPConstrained(FD->hasAttr<StrictFPAttr>()); 919 if (FD->hasAttr<StrictFPAttr>()) 920 Fn->addFnAttr(llvm::Attribute::StrictFP); 921 } 922 923 // If a custom alignment is used, force realigning to this alignment on 924 // any main function which certainly will need it. 925 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 926 if ((FD->isMain() || FD->isMSVCRTEntryPoint()) && 927 CGM.getCodeGenOpts().StackAlignment) 928 Fn->addFnAttr("stackrealign"); 929 930 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 931 932 // Create a marker to make it easy to insert allocas into the entryblock 933 // later. Don't create this with the builder, because we don't want it 934 // folded. 935 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 936 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB); 937 938 ReturnBlock = getJumpDestInCurrentScope("return"); 939 940 Builder.SetInsertPoint(EntryBB); 941 942 // If we're checking the return value, allocate space for a pointer to a 943 // precise source location of the checked return statement. 944 if (requiresReturnValueCheck()) { 945 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr"); 946 InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy)); 947 } 948 949 // Emit subprogram debug descriptor. 950 if (CGDebugInfo *DI = getDebugInfo()) { 951 // Reconstruct the type from the argument list so that implicit parameters, 952 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling 953 // convention. 954 CallingConv CC = CallingConv::CC_C; 955 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) 956 if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>()) 957 CC = SrcFnTy->getCallConv(); 958 SmallVector<QualType, 16> ArgTypes; 959 for (const VarDecl *VD : Args) 960 ArgTypes.push_back(VD->getType()); 961 QualType FnType = getContext().getFunctionType( 962 RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC)); 963 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk, 964 Builder); 965 } 966 967 if (ShouldInstrumentFunction()) { 968 if (CGM.getCodeGenOpts().InstrumentFunctions) 969 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter"); 970 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining) 971 CurFn->addFnAttr("instrument-function-entry-inlined", 972 "__cyg_profile_func_enter"); 973 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare) 974 CurFn->addFnAttr("instrument-function-entry-inlined", 975 "__cyg_profile_func_enter_bare"); 976 } 977 978 // Since emitting the mcount call here impacts optimizations such as function 979 // inlining, we just add an attribute to insert a mcount call in backend. 980 // The attribute "counting-function" is set to mcount function name which is 981 // architecture dependent. 982 if (CGM.getCodeGenOpts().InstrumentForProfiling) { 983 // Calls to fentry/mcount should not be generated if function has 984 // the no_instrument_function attribute. 985 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) { 986 if (CGM.getCodeGenOpts().CallFEntry) 987 Fn->addFnAttr("fentry-call", "true"); 988 else { 989 Fn->addFnAttr("instrument-function-entry-inlined", 990 getTarget().getMCountName()); 991 } 992 if (CGM.getCodeGenOpts().MNopMCount) { 993 if (!CGM.getCodeGenOpts().CallFEntry) 994 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt) 995 << "-mnop-mcount" << "-mfentry"; 996 Fn->addFnAttr("mnop-mcount"); 997 } 998 999 if (CGM.getCodeGenOpts().RecordMCount) { 1000 if (!CGM.getCodeGenOpts().CallFEntry) 1001 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt) 1002 << "-mrecord-mcount" << "-mfentry"; 1003 Fn->addFnAttr("mrecord-mcount"); 1004 } 1005 } 1006 } 1007 1008 if (CGM.getCodeGenOpts().PackedStack) { 1009 if (getContext().getTargetInfo().getTriple().getArch() != 1010 llvm::Triple::systemz) 1011 CGM.getDiags().Report(diag::err_opt_not_valid_on_target) 1012 << "-mpacked-stack"; 1013 Fn->addFnAttr("packed-stack"); 1014 } 1015 1016 if (RetTy->isVoidType()) { 1017 // Void type; nothing to return. 1018 ReturnValue = Address::invalid(); 1019 1020 // Count the implicit return. 1021 if (!endsWithReturn(D)) 1022 ++NumReturnExprs; 1023 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) { 1024 // Indirect return; emit returned value directly into sret slot. 1025 // This reduces code size, and affects correctness in C++. 1026 auto AI = CurFn->arg_begin(); 1027 if (CurFnInfo->getReturnInfo().isSRetAfterThis()) 1028 ++AI; 1029 ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign()); 1030 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) { 1031 ReturnValuePointer = 1032 CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr"); 1033 Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast( 1034 ReturnValue.getPointer(), Int8PtrTy), 1035 ReturnValuePointer); 1036 } 1037 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && 1038 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 1039 // Load the sret pointer from the argument struct and return into that. 1040 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); 1041 llvm::Function::arg_iterator EI = CurFn->arg_end(); 1042 --EI; 1043 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx); 1044 ReturnValuePointer = Address(Addr, getPointerAlign()); 1045 Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result"); 1046 ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy)); 1047 } else { 1048 ReturnValue = CreateIRTemp(RetTy, "retval"); 1049 1050 // Tell the epilog emitter to autorelease the result. We do this 1051 // now so that various specialized functions can suppress it 1052 // during their IR-generation. 1053 if (getLangOpts().ObjCAutoRefCount && 1054 !CurFnInfo->isReturnsRetained() && 1055 RetTy->isObjCRetainableType()) 1056 AutoreleaseResult = true; 1057 } 1058 1059 EmitStartEHSpec(CurCodeDecl); 1060 1061 PrologueCleanupDepth = EHStack.stable_begin(); 1062 1063 // Emit OpenMP specific initialization of the device functions. 1064 if (getLangOpts().OpenMP && CurCodeDecl) 1065 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl); 1066 1067 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 1068 1069 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { 1070 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 1071 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 1072 if (MD->getParent()->isLambda() && 1073 MD->getOverloadedOperator() == OO_Call) { 1074 // We're in a lambda; figure out the captures. 1075 MD->getParent()->getCaptureFields(LambdaCaptureFields, 1076 LambdaThisCaptureField); 1077 if (LambdaThisCaptureField) { 1078 // If the lambda captures the object referred to by '*this' - either by 1079 // value or by reference, make sure CXXThisValue points to the correct 1080 // object. 1081 1082 // Get the lvalue for the field (which is a copy of the enclosing object 1083 // or contains the address of the enclosing object). 1084 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField); 1085 if (!LambdaThisCaptureField->getType()->isPointerType()) { 1086 // If the enclosing object was captured by value, just use its address. 1087 CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer(); 1088 } else { 1089 // Load the lvalue pointed to by the field, since '*this' was captured 1090 // by reference. 1091 CXXThisValue = 1092 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal(); 1093 } 1094 } 1095 for (auto *FD : MD->getParent()->fields()) { 1096 if (FD->hasCapturedVLAType()) { 1097 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD), 1098 SourceLocation()).getScalarVal(); 1099 auto VAT = FD->getCapturedVLAType(); 1100 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 1101 } 1102 } 1103 } else { 1104 // Not in a lambda; just use 'this' from the method. 1105 // FIXME: Should we generate a new load for each use of 'this'? The 1106 // fast register allocator would be happier... 1107 CXXThisValue = CXXABIThisValue; 1108 } 1109 1110 // Check the 'this' pointer once per function, if it's available. 1111 if (CXXABIThisValue) { 1112 SanitizerSet SkippedChecks; 1113 SkippedChecks.set(SanitizerKind::ObjectSize, true); 1114 QualType ThisTy = MD->getThisType(); 1115 1116 // If this is the call operator of a lambda with no capture-default, it 1117 // may have a static invoker function, which may call this operator with 1118 // a null 'this' pointer. 1119 if (isLambdaCallOperator(MD) && 1120 MD->getParent()->getLambdaCaptureDefault() == LCD_None) 1121 SkippedChecks.set(SanitizerKind::Null, true); 1122 1123 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall 1124 : TCK_MemberCall, 1125 Loc, CXXABIThisValue, ThisTy, 1126 getContext().getTypeAlignInChars(ThisTy->getPointeeType()), 1127 SkippedChecks); 1128 } 1129 } 1130 1131 // If any of the arguments have a variably modified type, make sure to 1132 // emit the type size. 1133 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1134 i != e; ++i) { 1135 const VarDecl *VD = *i; 1136 1137 // Dig out the type as written from ParmVarDecls; it's unclear whether 1138 // the standard (C99 6.9.1p10) requires this, but we're following the 1139 // precedent set by gcc. 1140 QualType Ty; 1141 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) 1142 Ty = PVD->getOriginalType(); 1143 else 1144 Ty = VD->getType(); 1145 1146 if (Ty->isVariablyModifiedType()) 1147 EmitVariablyModifiedType(Ty); 1148 } 1149 // Emit a location at the end of the prologue. 1150 if (CGDebugInfo *DI = getDebugInfo()) 1151 DI->EmitLocation(Builder, StartLoc); 1152 1153 // TODO: Do we need to handle this in two places like we do with 1154 // target-features/target-cpu? 1155 if (CurFuncDecl) 1156 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>()) 1157 LargestVectorWidth = VecWidth->getVectorWidth(); 1158 } 1159 1160 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) { 1161 incrementProfileCounter(Body); 1162 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body)) 1163 EmitCompoundStmtWithoutScope(*S); 1164 else 1165 EmitStmt(Body); 1166 } 1167 1168 /// When instrumenting to collect profile data, the counts for some blocks 1169 /// such as switch cases need to not include the fall-through counts, so 1170 /// emit a branch around the instrumentation code. When not instrumenting, 1171 /// this just calls EmitBlock(). 1172 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, 1173 const Stmt *S) { 1174 llvm::BasicBlock *SkipCountBB = nullptr; 1175 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) { 1176 // When instrumenting for profiling, the fallthrough to certain 1177 // statements needs to skip over the instrumentation code so that we 1178 // get an accurate count. 1179 SkipCountBB = createBasicBlock("skipcount"); 1180 EmitBranch(SkipCountBB); 1181 } 1182 EmitBlock(BB); 1183 uint64_t CurrentCount = getCurrentProfileCount(); 1184 incrementProfileCounter(S); 1185 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount); 1186 if (SkipCountBB) 1187 EmitBlock(SkipCountBB); 1188 } 1189 1190 /// Tries to mark the given function nounwind based on the 1191 /// non-existence of any throwing calls within it. We believe this is 1192 /// lightweight enough to do at -O0. 1193 static void TryMarkNoThrow(llvm::Function *F) { 1194 // LLVM treats 'nounwind' on a function as part of the type, so we 1195 // can't do this on functions that can be overwritten. 1196 if (F->isInterposable()) return; 1197 1198 for (llvm::BasicBlock &BB : *F) 1199 for (llvm::Instruction &I : BB) 1200 if (I.mayThrow()) 1201 return; 1202 1203 F->setDoesNotThrow(); 1204 } 1205 1206 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD, 1207 FunctionArgList &Args) { 1208 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1209 QualType ResTy = FD->getReturnType(); 1210 1211 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 1212 if (MD && MD->isInstance()) { 1213 if (CGM.getCXXABI().HasThisReturn(GD)) 1214 ResTy = MD->getThisType(); 1215 else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) 1216 ResTy = CGM.getContext().VoidPtrTy; 1217 CGM.getCXXABI().buildThisParam(*this, Args); 1218 } 1219 1220 // The base version of an inheriting constructor whose constructed base is a 1221 // virtual base is not passed any arguments (because it doesn't actually call 1222 // the inherited constructor). 1223 bool PassedParams = true; 1224 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 1225 if (auto Inherited = CD->getInheritedConstructor()) 1226 PassedParams = 1227 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType()); 1228 1229 if (PassedParams) { 1230 for (auto *Param : FD->parameters()) { 1231 Args.push_back(Param); 1232 if (!Param->hasAttr<PassObjectSizeAttr>()) 1233 continue; 1234 1235 auto *Implicit = ImplicitParamDecl::Create( 1236 getContext(), Param->getDeclContext(), Param->getLocation(), 1237 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other); 1238 SizeArguments[Param] = Implicit; 1239 Args.push_back(Implicit); 1240 } 1241 } 1242 1243 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))) 1244 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); 1245 1246 return ResTy; 1247 } 1248 1249 static bool 1250 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD, 1251 const ASTContext &Context) { 1252 QualType T = FD->getReturnType(); 1253 // Avoid the optimization for functions that return a record type with a 1254 // trivial destructor or another trivially copyable type. 1255 if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) { 1256 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1257 return !ClassDecl->hasTrivialDestructor(); 1258 } 1259 return !T.isTriviallyCopyableType(Context); 1260 } 1261 1262 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 1263 const CGFunctionInfo &FnInfo) { 1264 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 1265 CurGD = GD; 1266 1267 FunctionArgList Args; 1268 QualType ResTy = BuildFunctionArgList(GD, Args); 1269 1270 // Check if we should generate debug info for this function. 1271 if (FD->hasAttr<NoDebugAttr>()) 1272 DebugInfo = nullptr; // disable debug info indefinitely for this function 1273 1274 // The function might not have a body if we're generating thunks for a 1275 // function declaration. 1276 SourceRange BodyRange; 1277 if (Stmt *Body = FD->getBody()) 1278 BodyRange = Body->getSourceRange(); 1279 else 1280 BodyRange = FD->getLocation(); 1281 CurEHLocation = BodyRange.getEnd(); 1282 1283 // Use the location of the start of the function to determine where 1284 // the function definition is located. By default use the location 1285 // of the declaration as the location for the subprogram. A function 1286 // may lack a declaration in the source code if it is created by code 1287 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk). 1288 SourceLocation Loc = FD->getLocation(); 1289 1290 // If this is a function specialization then use the pattern body 1291 // as the location for the function. 1292 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern()) 1293 if (SpecDecl->hasBody(SpecDecl)) 1294 Loc = SpecDecl->getLocation(); 1295 1296 Stmt *Body = FD->getBody(); 1297 1298 // Initialize helper which will detect jumps which can cause invalid lifetime 1299 // markers. 1300 if (Body && ShouldEmitLifetimeMarkers) 1301 Bypasses.Init(Body); 1302 1303 // Emit the standard function prologue. 1304 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); 1305 1306 // Generate the body of the function. 1307 PGO.assignRegionCounters(GD, CurFn); 1308 if (isa<CXXDestructorDecl>(FD)) 1309 EmitDestructorBody(Args); 1310 else if (isa<CXXConstructorDecl>(FD)) 1311 EmitConstructorBody(Args); 1312 else if (getLangOpts().CUDA && 1313 !getLangOpts().CUDAIsDevice && 1314 FD->hasAttr<CUDAGlobalAttr>()) 1315 CGM.getCUDARuntime().emitDeviceStub(*this, Args); 1316 else if (isa<CXXMethodDecl>(FD) && 1317 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 1318 // The lambda static invoker function is special, because it forwards or 1319 // clones the body of the function call operator (but is actually static). 1320 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD)); 1321 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && 1322 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() || 1323 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) { 1324 // Implicit copy-assignment gets the same special treatment as implicit 1325 // copy-constructors. 1326 emitImplicitAssignmentOperatorBody(Args); 1327 } else if (Body) { 1328 EmitFunctionBody(Body); 1329 } else 1330 llvm_unreachable("no definition for emitted function"); 1331 1332 // C++11 [stmt.return]p2: 1333 // Flowing off the end of a function [...] results in undefined behavior in 1334 // a value-returning function. 1335 // C11 6.9.1p12: 1336 // If the '}' that terminates a function is reached, and the value of the 1337 // function call is used by the caller, the behavior is undefined. 1338 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && 1339 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) { 1340 bool ShouldEmitUnreachable = 1341 CGM.getCodeGenOpts().StrictReturn || 1342 shouldUseUndefinedBehaviorReturnOptimization(FD, getContext()); 1343 if (SanOpts.has(SanitizerKind::Return)) { 1344 SanitizerScope SanScope(this); 1345 llvm::Value *IsFalse = Builder.getFalse(); 1346 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return), 1347 SanitizerHandler::MissingReturn, 1348 EmitCheckSourceLocation(FD->getLocation()), None); 1349 } else if (ShouldEmitUnreachable) { 1350 if (CGM.getCodeGenOpts().OptimizationLevel == 0) 1351 EmitTrapCall(llvm::Intrinsic::trap); 1352 } 1353 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) { 1354 Builder.CreateUnreachable(); 1355 Builder.ClearInsertionPoint(); 1356 } 1357 } 1358 1359 // Emit the standard function epilogue. 1360 FinishFunction(BodyRange.getEnd()); 1361 1362 // If we haven't marked the function nothrow through other means, do 1363 // a quick pass now to see if we can. 1364 if (!CurFn->doesNotThrow()) 1365 TryMarkNoThrow(CurFn); 1366 } 1367 1368 /// ContainsLabel - Return true if the statement contains a label in it. If 1369 /// this statement is not executed normally, it not containing a label means 1370 /// that we can just remove the code. 1371 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 1372 // Null statement, not a label! 1373 if (!S) return false; 1374 1375 // If this is a label, we have to emit the code, consider something like: 1376 // if (0) { ... foo: bar(); } goto foo; 1377 // 1378 // TODO: If anyone cared, we could track __label__'s, since we know that you 1379 // can't jump to one from outside their declared region. 1380 if (isa<LabelStmt>(S)) 1381 return true; 1382 1383 // If this is a case/default statement, and we haven't seen a switch, we have 1384 // to emit the code. 1385 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 1386 return true; 1387 1388 // If this is a switch statement, we want to ignore cases below it. 1389 if (isa<SwitchStmt>(S)) 1390 IgnoreCaseStmts = true; 1391 1392 // Scan subexpressions for verboten labels. 1393 for (const Stmt *SubStmt : S->children()) 1394 if (ContainsLabel(SubStmt, IgnoreCaseStmts)) 1395 return true; 1396 1397 return false; 1398 } 1399 1400 /// containsBreak - Return true if the statement contains a break out of it. 1401 /// If the statement (recursively) contains a switch or loop with a break 1402 /// inside of it, this is fine. 1403 bool CodeGenFunction::containsBreak(const Stmt *S) { 1404 // Null statement, not a label! 1405 if (!S) return false; 1406 1407 // If this is a switch or loop that defines its own break scope, then we can 1408 // include it and anything inside of it. 1409 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 1410 isa<ForStmt>(S)) 1411 return false; 1412 1413 if (isa<BreakStmt>(S)) 1414 return true; 1415 1416 // Scan subexpressions for verboten breaks. 1417 for (const Stmt *SubStmt : S->children()) 1418 if (containsBreak(SubStmt)) 1419 return true; 1420 1421 return false; 1422 } 1423 1424 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) { 1425 if (!S) return false; 1426 1427 // Some statement kinds add a scope and thus never add a decl to the current 1428 // scope. Note, this list is longer than the list of statements that might 1429 // have an unscoped decl nested within them, but this way is conservatively 1430 // correct even if more statement kinds are added. 1431 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) || 1432 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) || 1433 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) || 1434 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S)) 1435 return false; 1436 1437 if (isa<DeclStmt>(S)) 1438 return true; 1439 1440 for (const Stmt *SubStmt : S->children()) 1441 if (mightAddDeclToScope(SubStmt)) 1442 return true; 1443 1444 return false; 1445 } 1446 1447 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1448 /// to a constant, or if it does but contains a label, return false. If it 1449 /// constant folds return true and set the boolean result in Result. 1450 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1451 bool &ResultBool, 1452 bool AllowLabels) { 1453 llvm::APSInt ResultInt; 1454 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels)) 1455 return false; 1456 1457 ResultBool = ResultInt.getBoolValue(); 1458 return true; 1459 } 1460 1461 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 1462 /// to a constant, or if it does but contains a label, return false. If it 1463 /// constant folds return true and set the folded value. 1464 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 1465 llvm::APSInt &ResultInt, 1466 bool AllowLabels) { 1467 // FIXME: Rename and handle conversion of other evaluatable things 1468 // to bool. 1469 Expr::EvalResult Result; 1470 if (!Cond->EvaluateAsInt(Result, getContext())) 1471 return false; // Not foldable, not integer or not fully evaluatable. 1472 1473 llvm::APSInt Int = Result.Val.getInt(); 1474 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond)) 1475 return false; // Contains a label. 1476 1477 ResultInt = Int; 1478 return true; 1479 } 1480 1481 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 1482 /// statement) to the specified blocks. Based on the condition, this might try 1483 /// to simplify the codegen of the conditional based on the branch. 1484 /// \param LH The value of the likelihood attribute on the True branch. 1485 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 1486 llvm::BasicBlock *TrueBlock, 1487 llvm::BasicBlock *FalseBlock, 1488 uint64_t TrueCount, 1489 Stmt::Likelihood LH) { 1490 Cond = Cond->IgnoreParens(); 1491 1492 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 1493 1494 // Handle X && Y in a condition. 1495 if (CondBOp->getOpcode() == BO_LAnd) { 1496 // If we have "1 && X", simplify the code. "0 && X" would have constant 1497 // folded if the case was simple enough. 1498 bool ConstantBool = false; 1499 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1500 ConstantBool) { 1501 // br(1 && X) -> br(X). 1502 incrementProfileCounter(CondBOp); 1503 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1504 TrueCount, LH); 1505 } 1506 1507 // If we have "X && 1", simplify the code to use an uncond branch. 1508 // "X && 0" would have been constant folded to 0. 1509 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1510 ConstantBool) { 1511 // br(X && 1) -> br(X). 1512 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1513 TrueCount, LH); 1514 } 1515 1516 // Emit the LHS as a conditional. If the LHS conditional is false, we 1517 // want to jump to the FalseBlock. 1518 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 1519 // The counter tells us how often we evaluate RHS, and all of TrueCount 1520 // can be propagated to that branch. 1521 uint64_t RHSCount = getProfileCount(CondBOp->getRHS()); 1522 1523 ConditionalEvaluation eval(*this); 1524 { 1525 ApplyDebugLocation DL(*this, Cond); 1526 // Propagate the likelihood attribute like __builtin_expect 1527 // __builtin_expect(X && Y, 1) -> X and Y are likely 1528 // __builtin_expect(X && Y, 0) -> only Y is unlikely 1529 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount, 1530 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH); 1531 EmitBlock(LHSTrue); 1532 } 1533 1534 incrementProfileCounter(CondBOp); 1535 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1536 1537 // Any temporaries created here are conditional. 1538 eval.begin(*this); 1539 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount, 1540 LH); 1541 eval.end(*this); 1542 1543 return; 1544 } 1545 1546 if (CondBOp->getOpcode() == BO_LOr) { 1547 // If we have "0 || X", simplify the code. "1 || X" would have constant 1548 // folded if the case was simple enough. 1549 bool ConstantBool = false; 1550 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 1551 !ConstantBool) { 1552 // br(0 || X) -> br(X). 1553 incrementProfileCounter(CondBOp); 1554 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 1555 TrueCount, LH); 1556 } 1557 1558 // If we have "X || 0", simplify the code to use an uncond branch. 1559 // "X || 1" would have been constant folded to 1. 1560 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 1561 !ConstantBool) { 1562 // br(X || 0) -> br(X). 1563 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 1564 TrueCount, LH); 1565 } 1566 1567 // Emit the LHS as a conditional. If the LHS conditional is true, we 1568 // want to jump to the TrueBlock. 1569 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 1570 // We have the count for entry to the RHS and for the whole expression 1571 // being true, so we can divy up True count between the short circuit and 1572 // the RHS. 1573 uint64_t LHSCount = 1574 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS()); 1575 uint64_t RHSCount = TrueCount - LHSCount; 1576 1577 ConditionalEvaluation eval(*this); 1578 { 1579 // Propagate the likelihood attribute like __builtin_expect 1580 // __builtin_expect(X || Y, 1) -> only Y is likely 1581 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely 1582 ApplyDebugLocation DL(*this, Cond); 1583 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount, 1584 LH == Stmt::LH_Likely ? Stmt::LH_None : LH); 1585 EmitBlock(LHSFalse); 1586 } 1587 1588 incrementProfileCounter(CondBOp); 1589 setCurrentProfileCount(getProfileCount(CondBOp->getRHS())); 1590 1591 // Any temporaries created here are conditional. 1592 eval.begin(*this); 1593 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount, 1594 LH); 1595 1596 eval.end(*this); 1597 1598 return; 1599 } 1600 } 1601 1602 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 1603 // br(!x, t, f) -> br(x, f, t) 1604 if (CondUOp->getOpcode() == UO_LNot) { 1605 // Negate the count. 1606 uint64_t FalseCount = getCurrentProfileCount() - TrueCount; 1607 // The values of the enum are chosen to make this negation possible. 1608 LH = static_cast<Stmt::Likelihood>(-LH); 1609 // Negate the condition and swap the destination blocks. 1610 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, 1611 FalseCount, LH); 1612 } 1613 } 1614 1615 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 1616 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 1617 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 1618 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 1619 1620 // The ConditionalOperator itself has no likelihood information for its 1621 // true and false branches. This matches the behavior of __builtin_expect. 1622 ConditionalEvaluation cond(*this); 1623 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, 1624 getProfileCount(CondOp), Stmt::LH_None); 1625 1626 // When computing PGO branch weights, we only know the overall count for 1627 // the true block. This code is essentially doing tail duplication of the 1628 // naive code-gen, introducing new edges for which counts are not 1629 // available. Divide the counts proportionally between the LHS and RHS of 1630 // the conditional operator. 1631 uint64_t LHSScaledTrueCount = 0; 1632 if (TrueCount) { 1633 double LHSRatio = 1634 getProfileCount(CondOp) / (double)getCurrentProfileCount(); 1635 LHSScaledTrueCount = TrueCount * LHSRatio; 1636 } 1637 1638 cond.begin(*this); 1639 EmitBlock(LHSBlock); 1640 incrementProfileCounter(CondOp); 1641 { 1642 ApplyDebugLocation DL(*this, Cond); 1643 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, 1644 LHSScaledTrueCount, LH); 1645 } 1646 cond.end(*this); 1647 1648 cond.begin(*this); 1649 EmitBlock(RHSBlock); 1650 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock, 1651 TrueCount - LHSScaledTrueCount, LH); 1652 cond.end(*this); 1653 1654 return; 1655 } 1656 1657 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) { 1658 // Conditional operator handling can give us a throw expression as a 1659 // condition for a case like: 1660 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) 1661 // Fold this to: 1662 // br(c, throw x, br(y, t, f)) 1663 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); 1664 return; 1665 } 1666 1667 // If the branch has a condition wrapped by __builtin_unpredictable, 1668 // create metadata that specifies that the branch is unpredictable. 1669 // Don't bother if not optimizing because that metadata would not be used. 1670 llvm::MDNode *Unpredictable = nullptr; 1671 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts()); 1672 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) { 1673 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl()); 1674 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) { 1675 llvm::MDBuilder MDHelper(getLLVMContext()); 1676 Unpredictable = MDHelper.createUnpredictable(); 1677 } 1678 } 1679 1680 llvm::MDNode *Weights = createBranchWeights(LH); 1681 if (!Weights) { 1682 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount); 1683 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount); 1684 } 1685 1686 // Emit the code with the fully general case. 1687 llvm::Value *CondV; 1688 { 1689 ApplyDebugLocation DL(*this, Cond); 1690 CondV = EvaluateExprAsBool(Cond); 1691 } 1692 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable); 1693 } 1694 1695 /// ErrorUnsupported - Print out an error that codegen doesn't support the 1696 /// specified stmt yet. 1697 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) { 1698 CGM.ErrorUnsupported(S, Type); 1699 } 1700 1701 /// emitNonZeroVLAInit - Emit the "zero" initialization of a 1702 /// variable-length array whose elements have a non-zero bit-pattern. 1703 /// 1704 /// \param baseType the inner-most element type of the array 1705 /// \param src - a char* pointing to the bit-pattern for a single 1706 /// base element of the array 1707 /// \param sizeInChars - the total size of the VLA, in chars 1708 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 1709 Address dest, Address src, 1710 llvm::Value *sizeInChars) { 1711 CGBuilderTy &Builder = CGF.Builder; 1712 1713 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType); 1714 llvm::Value *baseSizeInChars 1715 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity()); 1716 1717 Address begin = 1718 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin"); 1719 llvm::Value *end = 1720 Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end"); 1721 1722 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 1723 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 1724 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 1725 1726 // Make a loop over the VLA. C99 guarantees that the VLA element 1727 // count must be nonzero. 1728 CGF.EmitBlock(loopBB); 1729 1730 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur"); 1731 cur->addIncoming(begin.getPointer(), originBB); 1732 1733 CharUnits curAlign = 1734 dest.getAlignment().alignmentOfArrayElement(baseSize); 1735 1736 // memcpy the individual element bit-pattern. 1737 Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars, 1738 /*volatile*/ false); 1739 1740 // Go to the next element. 1741 llvm::Value *next = 1742 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next"); 1743 1744 // Leave if that's the end of the VLA. 1745 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 1746 Builder.CreateCondBr(done, contBB, loopBB); 1747 cur->addIncoming(next, loopBB); 1748 1749 CGF.EmitBlock(contBB); 1750 } 1751 1752 void 1753 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) { 1754 // Ignore empty classes in C++. 1755 if (getLangOpts().CPlusPlus) { 1756 if (const RecordType *RT = Ty->getAs<RecordType>()) { 1757 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 1758 return; 1759 } 1760 } 1761 1762 // Cast the dest ptr to the appropriate i8 pointer type. 1763 if (DestPtr.getElementType() != Int8Ty) 1764 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); 1765 1766 // Get size and alignment info for this aggregate. 1767 CharUnits size = getContext().getTypeSizeInChars(Ty); 1768 1769 llvm::Value *SizeVal; 1770 const VariableArrayType *vla; 1771 1772 // Don't bother emitting a zero-byte memset. 1773 if (size.isZero()) { 1774 // But note that getTypeInfo returns 0 for a VLA. 1775 if (const VariableArrayType *vlaType = 1776 dyn_cast_or_null<VariableArrayType>( 1777 getContext().getAsArrayType(Ty))) { 1778 auto VlaSize = getVLASize(vlaType); 1779 SizeVal = VlaSize.NumElts; 1780 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type); 1781 if (!eltSize.isOne()) 1782 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 1783 vla = vlaType; 1784 } else { 1785 return; 1786 } 1787 } else { 1788 SizeVal = CGM.getSize(size); 1789 vla = nullptr; 1790 } 1791 1792 // If the type contains a pointer to data member we can't memset it to zero. 1793 // Instead, create a null constant and copy it to the destination. 1794 // TODO: there are other patterns besides zero that we can usefully memset, 1795 // like -1, which happens to be the pattern used by member-pointers. 1796 if (!CGM.getTypes().isZeroInitializable(Ty)) { 1797 // For a VLA, emit a single element, then splat that over the VLA. 1798 if (vla) Ty = getContext().getBaseElementType(vla); 1799 1800 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 1801 1802 llvm::GlobalVariable *NullVariable = 1803 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 1804 /*isConstant=*/true, 1805 llvm::GlobalVariable::PrivateLinkage, 1806 NullConstant, Twine()); 1807 CharUnits NullAlign = DestPtr.getAlignment(); 1808 NullVariable->setAlignment(NullAlign.getAsAlign()); 1809 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()), 1810 NullAlign); 1811 1812 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 1813 1814 // Get and call the appropriate llvm.memcpy overload. 1815 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false); 1816 return; 1817 } 1818 1819 // Otherwise, just memset the whole thing to zero. This is legal 1820 // because in LLVM, all default initializers (other than the ones we just 1821 // handled above) are guaranteed to have a bit pattern of all zeros. 1822 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false); 1823 } 1824 1825 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 1826 // Make sure that there is a block for the indirect goto. 1827 if (!IndirectBranch) 1828 GetIndirectGotoBlock(); 1829 1830 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 1831 1832 // Make sure the indirect branch includes all of the address-taken blocks. 1833 IndirectBranch->addDestination(BB); 1834 return llvm::BlockAddress::get(CurFn, BB); 1835 } 1836 1837 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 1838 // If we already made the indirect branch for indirect goto, return its block. 1839 if (IndirectBranch) return IndirectBranch->getParent(); 1840 1841 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto")); 1842 1843 // Create the PHI node that indirect gotos will add entries to. 1844 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 1845 "indirect.goto.dest"); 1846 1847 // Create the indirect branch instruction. 1848 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 1849 return IndirectBranch->getParent(); 1850 } 1851 1852 /// Computes the length of an array in elements, as well as the base 1853 /// element type and a properly-typed first element pointer. 1854 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 1855 QualType &baseType, 1856 Address &addr) { 1857 const ArrayType *arrayType = origArrayType; 1858 1859 // If it's a VLA, we have to load the stored size. Note that 1860 // this is the size of the VLA in bytes, not its size in elements. 1861 llvm::Value *numVLAElements = nullptr; 1862 if (isa<VariableArrayType>(arrayType)) { 1863 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts; 1864 1865 // Walk into all VLAs. This doesn't require changes to addr, 1866 // which has type T* where T is the first non-VLA element type. 1867 do { 1868 QualType elementType = arrayType->getElementType(); 1869 arrayType = getContext().getAsArrayType(elementType); 1870 1871 // If we only have VLA components, 'addr' requires no adjustment. 1872 if (!arrayType) { 1873 baseType = elementType; 1874 return numVLAElements; 1875 } 1876 } while (isa<VariableArrayType>(arrayType)); 1877 1878 // We get out here only if we find a constant array type 1879 // inside the VLA. 1880 } 1881 1882 // We have some number of constant-length arrays, so addr should 1883 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 1884 // down to the first element of addr. 1885 SmallVector<llvm::Value*, 8> gepIndices; 1886 1887 // GEP down to the array type. 1888 llvm::ConstantInt *zero = Builder.getInt32(0); 1889 gepIndices.push_back(zero); 1890 1891 uint64_t countFromCLAs = 1; 1892 QualType eltType; 1893 1894 llvm::ArrayType *llvmArrayType = 1895 dyn_cast<llvm::ArrayType>(addr.getElementType()); 1896 while (llvmArrayType) { 1897 assert(isa<ConstantArrayType>(arrayType)); 1898 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 1899 == llvmArrayType->getNumElements()); 1900 1901 gepIndices.push_back(zero); 1902 countFromCLAs *= llvmArrayType->getNumElements(); 1903 eltType = arrayType->getElementType(); 1904 1905 llvmArrayType = 1906 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 1907 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 1908 assert((!llvmArrayType || arrayType) && 1909 "LLVM and Clang types are out-of-synch"); 1910 } 1911 1912 if (arrayType) { 1913 // From this point onwards, the Clang array type has been emitted 1914 // as some other type (probably a packed struct). Compute the array 1915 // size, and just emit the 'begin' expression as a bitcast. 1916 while (arrayType) { 1917 countFromCLAs *= 1918 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 1919 eltType = arrayType->getElementType(); 1920 arrayType = getContext().getAsArrayType(eltType); 1921 } 1922 1923 llvm::Type *baseType = ConvertType(eltType); 1924 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin"); 1925 } else { 1926 // Create the actual GEP. 1927 addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(), 1928 gepIndices, "array.begin"), 1929 addr.getAlignment()); 1930 } 1931 1932 baseType = eltType; 1933 1934 llvm::Value *numElements 1935 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 1936 1937 // If we had any VLA dimensions, factor them in. 1938 if (numVLAElements) 1939 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 1940 1941 return numElements; 1942 } 1943 1944 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) { 1945 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1946 assert(vla && "type was not a variable array type!"); 1947 return getVLASize(vla); 1948 } 1949 1950 CodeGenFunction::VlaSizePair 1951 CodeGenFunction::getVLASize(const VariableArrayType *type) { 1952 // The number of elements so far; always size_t. 1953 llvm::Value *numElements = nullptr; 1954 1955 QualType elementType; 1956 do { 1957 elementType = type->getElementType(); 1958 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 1959 assert(vlaSize && "no size for VLA!"); 1960 assert(vlaSize->getType() == SizeTy); 1961 1962 if (!numElements) { 1963 numElements = vlaSize; 1964 } else { 1965 // It's undefined behavior if this wraps around, so mark it that way. 1966 // FIXME: Teach -fsanitize=undefined to trap this. 1967 numElements = Builder.CreateNUWMul(numElements, vlaSize); 1968 } 1969 } while ((type = getContext().getAsVariableArrayType(elementType))); 1970 1971 return { numElements, elementType }; 1972 } 1973 1974 CodeGenFunction::VlaSizePair 1975 CodeGenFunction::getVLAElements1D(QualType type) { 1976 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 1977 assert(vla && "type was not a variable array type!"); 1978 return getVLAElements1D(vla); 1979 } 1980 1981 CodeGenFunction::VlaSizePair 1982 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) { 1983 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()]; 1984 assert(VlaSize && "no size for VLA!"); 1985 assert(VlaSize->getType() == SizeTy); 1986 return { VlaSize, Vla->getElementType() }; 1987 } 1988 1989 void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 1990 assert(type->isVariablyModifiedType() && 1991 "Must pass variably modified type to EmitVLASizes!"); 1992 1993 EnsureInsertPoint(); 1994 1995 // We're going to walk down into the type and look for VLA 1996 // expressions. 1997 do { 1998 assert(type->isVariablyModifiedType()); 1999 2000 const Type *ty = type.getTypePtr(); 2001 switch (ty->getTypeClass()) { 2002 2003 #define TYPE(Class, Base) 2004 #define ABSTRACT_TYPE(Class, Base) 2005 #define NON_CANONICAL_TYPE(Class, Base) 2006 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 2007 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 2008 #include "clang/AST/TypeNodes.inc" 2009 llvm_unreachable("unexpected dependent type!"); 2010 2011 // These types are never variably-modified. 2012 case Type::Builtin: 2013 case Type::Complex: 2014 case Type::Vector: 2015 case Type::ExtVector: 2016 case Type::ConstantMatrix: 2017 case Type::Record: 2018 case Type::Enum: 2019 case Type::Elaborated: 2020 case Type::TemplateSpecialization: 2021 case Type::ObjCTypeParam: 2022 case Type::ObjCObject: 2023 case Type::ObjCInterface: 2024 case Type::ObjCObjectPointer: 2025 case Type::ExtInt: 2026 llvm_unreachable("type class is never variably-modified!"); 2027 2028 case Type::Adjusted: 2029 type = cast<AdjustedType>(ty)->getAdjustedType(); 2030 break; 2031 2032 case Type::Decayed: 2033 type = cast<DecayedType>(ty)->getPointeeType(); 2034 break; 2035 2036 case Type::Pointer: 2037 type = cast<PointerType>(ty)->getPointeeType(); 2038 break; 2039 2040 case Type::BlockPointer: 2041 type = cast<BlockPointerType>(ty)->getPointeeType(); 2042 break; 2043 2044 case Type::LValueReference: 2045 case Type::RValueReference: 2046 type = cast<ReferenceType>(ty)->getPointeeType(); 2047 break; 2048 2049 case Type::MemberPointer: 2050 type = cast<MemberPointerType>(ty)->getPointeeType(); 2051 break; 2052 2053 case Type::ConstantArray: 2054 case Type::IncompleteArray: 2055 // Losing element qualification here is fine. 2056 type = cast<ArrayType>(ty)->getElementType(); 2057 break; 2058 2059 case Type::VariableArray: { 2060 // Losing element qualification here is fine. 2061 const VariableArrayType *vat = cast<VariableArrayType>(ty); 2062 2063 // Unknown size indication requires no size computation. 2064 // Otherwise, evaluate and record it. 2065 if (const Expr *size = vat->getSizeExpr()) { 2066 // It's possible that we might have emitted this already, 2067 // e.g. with a typedef and a pointer to it. 2068 llvm::Value *&entry = VLASizeMap[size]; 2069 if (!entry) { 2070 llvm::Value *Size = EmitScalarExpr(size); 2071 2072 // C11 6.7.6.2p5: 2073 // If the size is an expression that is not an integer constant 2074 // expression [...] each time it is evaluated it shall have a value 2075 // greater than zero. 2076 if (SanOpts.has(SanitizerKind::VLABound) && 2077 size->getType()->isSignedIntegerType()) { 2078 SanitizerScope SanScope(this); 2079 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType()); 2080 llvm::Constant *StaticArgs[] = { 2081 EmitCheckSourceLocation(size->getBeginLoc()), 2082 EmitCheckTypeDescriptor(size->getType())}; 2083 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero), 2084 SanitizerKind::VLABound), 2085 SanitizerHandler::VLABoundNotPositive, StaticArgs, Size); 2086 } 2087 2088 // Always zexting here would be wrong if it weren't 2089 // undefined behavior to have a negative bound. 2090 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false); 2091 } 2092 } 2093 type = vat->getElementType(); 2094 break; 2095 } 2096 2097 case Type::FunctionProto: 2098 case Type::FunctionNoProto: 2099 type = cast<FunctionType>(ty)->getReturnType(); 2100 break; 2101 2102 case Type::Paren: 2103 case Type::TypeOf: 2104 case Type::UnaryTransform: 2105 case Type::Attributed: 2106 case Type::SubstTemplateTypeParm: 2107 case Type::MacroQualified: 2108 // Keep walking after single level desugaring. 2109 type = type.getSingleStepDesugaredType(getContext()); 2110 break; 2111 2112 case Type::Typedef: 2113 case Type::Decltype: 2114 case Type::Auto: 2115 case Type::DeducedTemplateSpecialization: 2116 // Stop walking: nothing to do. 2117 return; 2118 2119 case Type::TypeOfExpr: 2120 // Stop walking: emit typeof expression. 2121 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 2122 return; 2123 2124 case Type::Atomic: 2125 type = cast<AtomicType>(ty)->getValueType(); 2126 break; 2127 2128 case Type::Pipe: 2129 type = cast<PipeType>(ty)->getElementType(); 2130 break; 2131 } 2132 } while (type->isVariablyModifiedType()); 2133 } 2134 2135 Address CodeGenFunction::EmitVAListRef(const Expr* E) { 2136 if (getContext().getBuiltinVaListType()->isArrayType()) 2137 return EmitPointerWithAlignment(E); 2138 return EmitLValue(E).getAddress(*this); 2139 } 2140 2141 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) { 2142 return EmitLValue(E).getAddress(*this); 2143 } 2144 2145 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 2146 const APValue &Init) { 2147 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!"); 2148 if (CGDebugInfo *Dbg = getDebugInfo()) 2149 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) 2150 Dbg->EmitGlobalVariable(E->getDecl(), Init); 2151 } 2152 2153 CodeGenFunction::PeepholeProtection 2154 CodeGenFunction::protectFromPeepholes(RValue rvalue) { 2155 // At the moment, the only aggressive peephole we do in IR gen 2156 // is trunc(zext) folding, but if we add more, we can easily 2157 // extend this protection. 2158 2159 if (!rvalue.isScalar()) return PeepholeProtection(); 2160 llvm::Value *value = rvalue.getScalarVal(); 2161 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 2162 2163 // Just make an extra bitcast. 2164 assert(HaveInsertPoint()); 2165 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 2166 Builder.GetInsertBlock()); 2167 2168 PeepholeProtection protection; 2169 protection.Inst = inst; 2170 return protection; 2171 } 2172 2173 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 2174 if (!protection.Inst) return; 2175 2176 // In theory, we could try to duplicate the peepholes now, but whatever. 2177 protection.Inst->eraseFromParent(); 2178 } 2179 2180 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue, 2181 QualType Ty, SourceLocation Loc, 2182 SourceLocation AssumptionLoc, 2183 llvm::Value *Alignment, 2184 llvm::Value *OffsetValue) { 2185 if (Alignment->getType() != IntPtrTy) 2186 Alignment = 2187 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align"); 2188 if (OffsetValue && OffsetValue->getType() != IntPtrTy) 2189 OffsetValue = 2190 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset"); 2191 llvm::Value *TheCheck = nullptr; 2192 if (SanOpts.has(SanitizerKind::Alignment)) { 2193 llvm::Value *PtrIntValue = 2194 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint"); 2195 2196 if (OffsetValue) { 2197 bool IsOffsetZero = false; 2198 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue)) 2199 IsOffsetZero = CI->isZero(); 2200 2201 if (!IsOffsetZero) 2202 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr"); 2203 } 2204 2205 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0); 2206 llvm::Value *Mask = 2207 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1)); 2208 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr"); 2209 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond"); 2210 } 2211 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption( 2212 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue); 2213 2214 if (!SanOpts.has(SanitizerKind::Alignment)) 2215 return; 2216 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment, 2217 OffsetValue, TheCheck, Assumption); 2218 } 2219 2220 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue, 2221 const Expr *E, 2222 SourceLocation AssumptionLoc, 2223 llvm::Value *Alignment, 2224 llvm::Value *OffsetValue) { 2225 if (auto *CE = dyn_cast<CastExpr>(E)) 2226 E = CE->getSubExprAsWritten(); 2227 QualType Ty = E->getType(); 2228 SourceLocation Loc = E->getExprLoc(); 2229 2230 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment, 2231 OffsetValue); 2232 } 2233 2234 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn, 2235 llvm::Value *AnnotatedVal, 2236 StringRef AnnotationStr, 2237 SourceLocation Location, 2238 const AnnotateAttr *Attr) { 2239 SmallVector<llvm::Value *, 5> Args = { 2240 AnnotatedVal, 2241 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 2242 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 2243 CGM.EmitAnnotationLineNo(Location), 2244 }; 2245 if (Attr) 2246 Args.push_back(CGM.EmitAnnotationArgs(Attr)); 2247 return Builder.CreateCall(AnnotationFn, Args); 2248 } 2249 2250 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 2251 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2252 // FIXME We create a new bitcast for every annotation because that's what 2253 // llvm-gcc was doing. 2254 for (const auto *I : D->specific_attrs<AnnotateAttr>()) 2255 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 2256 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 2257 I->getAnnotation(), D->getLocation(), I); 2258 } 2259 2260 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 2261 Address Addr) { 2262 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 2263 llvm::Value *V = Addr.getPointer(); 2264 llvm::Type *VTy = V->getType(); 2265 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 2266 CGM.Int8PtrTy); 2267 2268 for (const auto *I : D->specific_attrs<AnnotateAttr>()) { 2269 // FIXME Always emit the cast inst so we can differentiate between 2270 // annotation on the first field of a struct and annotation on the struct 2271 // itself. 2272 if (VTy != CGM.Int8PtrTy) 2273 V = Builder.CreateBitCast(V, CGM.Int8PtrTy); 2274 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I); 2275 V = Builder.CreateBitCast(V, VTy); 2276 } 2277 2278 return Address(V, Addr.getAlignment()); 2279 } 2280 2281 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } 2282 2283 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF) 2284 : CGF(CGF) { 2285 assert(!CGF->IsSanitizerScope); 2286 CGF->IsSanitizerScope = true; 2287 } 2288 2289 CodeGenFunction::SanitizerScope::~SanitizerScope() { 2290 CGF->IsSanitizerScope = false; 2291 } 2292 2293 void CodeGenFunction::InsertHelper(llvm::Instruction *I, 2294 const llvm::Twine &Name, 2295 llvm::BasicBlock *BB, 2296 llvm::BasicBlock::iterator InsertPt) const { 2297 LoopStack.InsertHelper(I); 2298 if (IsSanitizerScope) 2299 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I); 2300 } 2301 2302 void CGBuilderInserter::InsertHelper( 2303 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 2304 llvm::BasicBlock::iterator InsertPt) const { 2305 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); 2306 if (CGF) 2307 CGF->InsertHelper(I, Name, BB, InsertPt); 2308 } 2309 2310 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures, 2311 CodeGenModule &CGM, const FunctionDecl *FD, 2312 std::string &FirstMissing) { 2313 // If there aren't any required features listed then go ahead and return. 2314 if (ReqFeatures.empty()) 2315 return false; 2316 2317 // Now build up the set of caller features and verify that all the required 2318 // features are there. 2319 llvm::StringMap<bool> CallerFeatureMap; 2320 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD); 2321 2322 // If we have at least one of the features in the feature list return 2323 // true, otherwise return false. 2324 return std::all_of( 2325 ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) { 2326 SmallVector<StringRef, 1> OrFeatures; 2327 Feature.split(OrFeatures, '|'); 2328 return llvm::any_of(OrFeatures, [&](StringRef Feature) { 2329 if (!CallerFeatureMap.lookup(Feature)) { 2330 FirstMissing = Feature.str(); 2331 return false; 2332 } 2333 return true; 2334 }); 2335 }); 2336 } 2337 2338 // Emits an error if we don't have a valid set of target features for the 2339 // called function. 2340 void CodeGenFunction::checkTargetFeatures(const CallExpr *E, 2341 const FunctionDecl *TargetDecl) { 2342 return checkTargetFeatures(E->getBeginLoc(), TargetDecl); 2343 } 2344 2345 // Emits an error if we don't have a valid set of target features for the 2346 // called function. 2347 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc, 2348 const FunctionDecl *TargetDecl) { 2349 // Early exit if this is an indirect call. 2350 if (!TargetDecl) 2351 return; 2352 2353 // Get the current enclosing function if it exists. If it doesn't 2354 // we can't check the target features anyhow. 2355 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl); 2356 if (!FD) 2357 return; 2358 2359 // Grab the required features for the call. For a builtin this is listed in 2360 // the td file with the default cpu, for an always_inline function this is any 2361 // listed cpu and any listed features. 2362 unsigned BuiltinID = TargetDecl->getBuiltinID(); 2363 std::string MissingFeature; 2364 if (BuiltinID) { 2365 SmallVector<StringRef, 1> ReqFeatures; 2366 const char *FeatureList = 2367 CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); 2368 // Return if the builtin doesn't have any required features. 2369 if (!FeatureList || StringRef(FeatureList) == "") 2370 return; 2371 StringRef(FeatureList).split(ReqFeatures, ','); 2372 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) 2373 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature) 2374 << TargetDecl->getDeclName() 2375 << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID); 2376 2377 } else if (!TargetDecl->isMultiVersion() && 2378 TargetDecl->hasAttr<TargetAttr>()) { 2379 // Get the required features for the callee. 2380 2381 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>(); 2382 ParsedTargetAttr ParsedAttr = 2383 CGM.getContext().filterFunctionTargetAttrs(TD); 2384 2385 SmallVector<StringRef, 1> ReqFeatures; 2386 llvm::StringMap<bool> CalleeFeatureMap; 2387 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl); 2388 2389 for (const auto &F : ParsedAttr.Features) { 2390 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1))) 2391 ReqFeatures.push_back(StringRef(F).substr(1)); 2392 } 2393 2394 for (const auto &F : CalleeFeatureMap) { 2395 // Only positive features are "required". 2396 if (F.getValue()) 2397 ReqFeatures.push_back(F.getKey()); 2398 } 2399 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature)) 2400 CGM.getDiags().Report(Loc, diag::err_function_needs_feature) 2401 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature; 2402 } 2403 } 2404 2405 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) { 2406 if (!CGM.getCodeGenOpts().SanitizeStats) 2407 return; 2408 2409 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint()); 2410 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation()); 2411 CGM.getSanStats().create(IRB, SSK); 2412 } 2413 2414 llvm::Value * 2415 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) { 2416 llvm::Value *Condition = nullptr; 2417 2418 if (!RO.Conditions.Architecture.empty()) 2419 Condition = EmitX86CpuIs(RO.Conditions.Architecture); 2420 2421 if (!RO.Conditions.Features.empty()) { 2422 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features); 2423 Condition = 2424 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond; 2425 } 2426 return Condition; 2427 } 2428 2429 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM, 2430 llvm::Function *Resolver, 2431 CGBuilderTy &Builder, 2432 llvm::Function *FuncToReturn, 2433 bool SupportsIFunc) { 2434 if (SupportsIFunc) { 2435 Builder.CreateRet(FuncToReturn); 2436 return; 2437 } 2438 2439 llvm::SmallVector<llvm::Value *, 10> Args; 2440 llvm::for_each(Resolver->args(), 2441 [&](llvm::Argument &Arg) { Args.push_back(&Arg); }); 2442 2443 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args); 2444 Result->setTailCallKind(llvm::CallInst::TCK_MustTail); 2445 2446 if (Resolver->getReturnType()->isVoidTy()) 2447 Builder.CreateRetVoid(); 2448 else 2449 Builder.CreateRet(Result); 2450 } 2451 2452 void CodeGenFunction::EmitMultiVersionResolver( 2453 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) { 2454 assert(getContext().getTargetInfo().getTriple().isX86() && 2455 "Only implemented for x86 targets"); 2456 2457 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc(); 2458 2459 // Main function's basic block. 2460 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver); 2461 Builder.SetInsertPoint(CurBlock); 2462 EmitX86CpuInit(); 2463 2464 for (const MultiVersionResolverOption &RO : Options) { 2465 Builder.SetInsertPoint(CurBlock); 2466 llvm::Value *Condition = FormResolverCondition(RO); 2467 2468 // The 'default' or 'generic' case. 2469 if (!Condition) { 2470 assert(&RO == Options.end() - 1 && 2471 "Default or Generic case must be last"); 2472 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function, 2473 SupportsIFunc); 2474 return; 2475 } 2476 2477 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver); 2478 CGBuilderTy RetBuilder(*this, RetBlock); 2479 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function, 2480 SupportsIFunc); 2481 CurBlock = createBasicBlock("resolver_else", Resolver); 2482 Builder.CreateCondBr(Condition, RetBlock, CurBlock); 2483 } 2484 2485 // If no generic/default, emit an unreachable. 2486 Builder.SetInsertPoint(CurBlock); 2487 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); 2488 TrapCall->setDoesNotReturn(); 2489 TrapCall->setDoesNotThrow(); 2490 Builder.CreateUnreachable(); 2491 Builder.ClearInsertionPoint(); 2492 } 2493 2494 // Loc - where the diagnostic will point, where in the source code this 2495 // alignment has failed. 2496 // SecondaryLoc - if present (will be present if sufficiently different from 2497 // Loc), the diagnostic will additionally point a "Note:" to this location. 2498 // It should be the location where the __attribute__((assume_aligned)) 2499 // was written e.g. 2500 void CodeGenFunction::emitAlignmentAssumptionCheck( 2501 llvm::Value *Ptr, QualType Ty, SourceLocation Loc, 2502 SourceLocation SecondaryLoc, llvm::Value *Alignment, 2503 llvm::Value *OffsetValue, llvm::Value *TheCheck, 2504 llvm::Instruction *Assumption) { 2505 assert(Assumption && isa<llvm::CallInst>(Assumption) && 2506 cast<llvm::CallInst>(Assumption)->getCalledOperand() == 2507 llvm::Intrinsic::getDeclaration( 2508 Builder.GetInsertBlock()->getParent()->getParent(), 2509 llvm::Intrinsic::assume) && 2510 "Assumption should be a call to llvm.assume()."); 2511 assert(&(Builder.GetInsertBlock()->back()) == Assumption && 2512 "Assumption should be the last instruction of the basic block, " 2513 "since the basic block is still being generated."); 2514 2515 if (!SanOpts.has(SanitizerKind::Alignment)) 2516 return; 2517 2518 // Don't check pointers to volatile data. The behavior here is implementation- 2519 // defined. 2520 if (Ty->getPointeeType().isVolatileQualified()) 2521 return; 2522 2523 // We need to temorairly remove the assumption so we can insert the 2524 // sanitizer check before it, else the check will be dropped by optimizations. 2525 Assumption->removeFromParent(); 2526 2527 { 2528 SanitizerScope SanScope(this); 2529 2530 if (!OffsetValue) 2531 OffsetValue = Builder.getInt1(0); // no offset. 2532 2533 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc), 2534 EmitCheckSourceLocation(SecondaryLoc), 2535 EmitCheckTypeDescriptor(Ty)}; 2536 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr), 2537 EmitCheckValue(Alignment), 2538 EmitCheckValue(OffsetValue)}; 2539 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)}, 2540 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData); 2541 } 2542 2543 // We are now in the (new, empty) "cont" basic block. 2544 // Reintroduce the assumption. 2545 Builder.Insert(Assumption); 2546 // FIXME: Assumption still has it's original basic block as it's Parent. 2547 } 2548 2549 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) { 2550 if (CGDebugInfo *DI = getDebugInfo()) 2551 return DI->SourceLocToDebugLoc(Location); 2552 2553 return llvm::DebugLoc(); 2554 } 2555 2556 static Optional<std::pair<uint32_t, uint32_t>> 2557 getLikelihoodWeights(Stmt::Likelihood LH) { 2558 switch (LH) { 2559 case Stmt::LH_Unlikely: 2560 return std::pair<uint32_t, uint32_t>(llvm::UnlikelyBranchWeight, 2561 llvm::LikelyBranchWeight); 2562 case Stmt::LH_None: 2563 return None; 2564 case Stmt::LH_Likely: 2565 return std::pair<uint32_t, uint32_t>(llvm::LikelyBranchWeight, 2566 llvm::UnlikelyBranchWeight); 2567 } 2568 llvm_unreachable("Unknown Likelihood"); 2569 } 2570 2571 llvm::MDNode *CodeGenFunction::createBranchWeights(Stmt::Likelihood LH) const { 2572 Optional<std::pair<uint32_t, uint32_t>> LHW = getLikelihoodWeights(LH); 2573 if (!LHW) 2574 return nullptr; 2575 2576 llvm::MDBuilder MDHelper(CGM.getLLVMContext()); 2577 return MDHelper.createBranchWeights(LHW->first, LHW->second); 2578 } 2579