1 //===-- InstrProfiling.cpp - Frontend instrumentation based profiling -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass lowers instrprof_* intrinsics emitted by a frontend for profiling. 10 // It also builds the data structures and initialization code needed for 11 // updating execution counts and emitting the profile at runtime. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Instrumentation/InstrProfiling.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/ADT/Triple.h" 20 #include "llvm/ADT/Twine.h" 21 #include "llvm/Analysis/BlockFrequencyInfo.h" 22 #include "llvm/Analysis/BranchProbabilityInfo.h" 23 #include "llvm/Analysis/LoopInfo.h" 24 #include "llvm/Analysis/TargetLibraryInfo.h" 25 #include "llvm/IR/Attributes.h" 26 #include "llvm/IR/BasicBlock.h" 27 #include "llvm/IR/Constant.h" 28 #include "llvm/IR/Constants.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalValue.h" 33 #include "llvm/IR/GlobalVariable.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/Module.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/InitializePasses.h" 41 #include "llvm/Pass.h" 42 #include "llvm/ProfileData/InstrProf.h" 43 #include "llvm/Support/Casting.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Support/Error.h" 46 #include "llvm/Support/ErrorHandling.h" 47 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 48 #include "llvm/Transforms/Utils/ModuleUtils.h" 49 #include "llvm/Transforms/Utils/SSAUpdater.h" 50 #include <algorithm> 51 #include <cassert> 52 #include <cstddef> 53 #include <cstdint> 54 #include <string> 55 56 using namespace llvm; 57 58 #define DEBUG_TYPE "instrprof" 59 60 namespace { 61 62 cl::opt<bool> DoHashBasedCounterSplit( 63 "hash-based-counter-split", 64 cl::desc("Rename counter variable of a comdat function based on cfg hash"), 65 cl::init(true)); 66 67 cl::opt<bool> RuntimeCounterRelocation( 68 "runtime-counter-relocation", 69 cl::desc("Enable relocating counters at runtime."), 70 cl::init(false)); 71 72 cl::opt<bool> CounterLinkOrder( 73 "counter-link-order", 74 cl::desc("Set counter associated metadata to enable garbage collection at link time."), 75 cl::init(false)); 76 77 cl::opt<bool> ValueProfileStaticAlloc( 78 "vp-static-alloc", 79 cl::desc("Do static counter allocation for value profiler"), 80 cl::init(true)); 81 82 cl::opt<double> NumCountersPerValueSite( 83 "vp-counters-per-site", 84 cl::desc("The average number of profile counters allocated " 85 "per value profiling site."), 86 // This is set to a very small value because in real programs, only 87 // a very small percentage of value sites have non-zero targets, e.g, 1/30. 88 // For those sites with non-zero profile, the average number of targets 89 // is usually smaller than 2. 90 cl::init(1.0)); 91 92 cl::opt<bool> AtomicCounterUpdateAll( 93 "instrprof-atomic-counter-update-all", cl::ZeroOrMore, 94 cl::desc("Make all profile counter updates atomic (for testing only)"), 95 cl::init(false)); 96 97 cl::opt<bool> AtomicCounterUpdatePromoted( 98 "atomic-counter-update-promoted", cl::ZeroOrMore, 99 cl::desc("Do counter update using atomic fetch add " 100 " for promoted counters only"), 101 cl::init(false)); 102 103 cl::opt<bool> AtomicFirstCounter( 104 "atomic-first-counter", cl::ZeroOrMore, 105 cl::desc("Use atomic fetch add for first counter in a function (usually " 106 "the entry counter)"), 107 cl::init(false)); 108 109 // If the option is not specified, the default behavior about whether 110 // counter promotion is done depends on how instrumentaiton lowering 111 // pipeline is setup, i.e., the default value of true of this option 112 // does not mean the promotion will be done by default. Explicitly 113 // setting this option can override the default behavior. 114 cl::opt<bool> DoCounterPromotion("do-counter-promotion", cl::ZeroOrMore, 115 cl::desc("Do counter register promotion"), 116 cl::init(false)); 117 cl::opt<unsigned> MaxNumOfPromotionsPerLoop( 118 cl::ZeroOrMore, "max-counter-promotions-per-loop", cl::init(20), 119 cl::desc("Max number counter promotions per loop to avoid" 120 " increasing register pressure too much")); 121 122 // A debug option 123 cl::opt<int> 124 MaxNumOfPromotions(cl::ZeroOrMore, "max-counter-promotions", cl::init(-1), 125 cl::desc("Max number of allowed counter promotions")); 126 127 cl::opt<unsigned> SpeculativeCounterPromotionMaxExiting( 128 cl::ZeroOrMore, "speculative-counter-promotion-max-exiting", cl::init(3), 129 cl::desc("The max number of exiting blocks of a loop to allow " 130 " speculative counter promotion")); 131 132 cl::opt<bool> SpeculativeCounterPromotionToLoop( 133 cl::ZeroOrMore, "speculative-counter-promotion-to-loop", cl::init(false), 134 cl::desc("When the option is false, if the target block is in a loop, " 135 "the promotion will be disallowed unless the promoted counter " 136 " update can be further/iteratively promoted into an acyclic " 137 " region.")); 138 139 cl::opt<bool> IterativeCounterPromotion( 140 cl::ZeroOrMore, "iterative-counter-promotion", cl::init(true), 141 cl::desc("Allow counter promotion across the whole loop nest.")); 142 143 cl::opt<bool> SkipRetExitBlock( 144 cl::ZeroOrMore, "skip-ret-exit-block", cl::init(true), 145 cl::desc("Suppress counter promotion if exit blocks contain ret.")); 146 147 class InstrProfilingLegacyPass : public ModulePass { 148 InstrProfiling InstrProf; 149 150 public: 151 static char ID; 152 153 InstrProfilingLegacyPass() : ModulePass(ID) {} 154 InstrProfilingLegacyPass(const InstrProfOptions &Options, bool IsCS = false) 155 : ModulePass(ID), InstrProf(Options, IsCS) { 156 initializeInstrProfilingLegacyPassPass(*PassRegistry::getPassRegistry()); 157 } 158 159 StringRef getPassName() const override { 160 return "Frontend instrumentation-based coverage lowering"; 161 } 162 163 bool runOnModule(Module &M) override { 164 auto GetTLI = [this](Function &F) -> TargetLibraryInfo & { 165 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 166 }; 167 return InstrProf.run(M, GetTLI); 168 } 169 170 void getAnalysisUsage(AnalysisUsage &AU) const override { 171 AU.setPreservesCFG(); 172 AU.addRequired<TargetLibraryInfoWrapperPass>(); 173 } 174 }; 175 176 /// 177 /// A helper class to promote one counter RMW operation in the loop 178 /// into register update. 179 /// 180 /// RWM update for the counter will be sinked out of the loop after 181 /// the transformation. 182 /// 183 class PGOCounterPromoterHelper : public LoadAndStorePromoter { 184 public: 185 PGOCounterPromoterHelper( 186 Instruction *L, Instruction *S, SSAUpdater &SSA, Value *Init, 187 BasicBlock *PH, ArrayRef<BasicBlock *> ExitBlocks, 188 ArrayRef<Instruction *> InsertPts, 189 DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands, 190 LoopInfo &LI) 191 : LoadAndStorePromoter({L, S}, SSA), Store(S), ExitBlocks(ExitBlocks), 192 InsertPts(InsertPts), LoopToCandidates(LoopToCands), LI(LI) { 193 assert(isa<LoadInst>(L)); 194 assert(isa<StoreInst>(S)); 195 SSA.AddAvailableValue(PH, Init); 196 } 197 198 void doExtraRewritesBeforeFinalDeletion() override { 199 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) { 200 BasicBlock *ExitBlock = ExitBlocks[i]; 201 Instruction *InsertPos = InsertPts[i]; 202 // Get LiveIn value into the ExitBlock. If there are multiple 203 // predecessors, the value is defined by a PHI node in this 204 // block. 205 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); 206 Value *Addr = cast<StoreInst>(Store)->getPointerOperand(); 207 Type *Ty = LiveInValue->getType(); 208 IRBuilder<> Builder(InsertPos); 209 if (AtomicCounterUpdatePromoted) 210 // automic update currently can only be promoted across the current 211 // loop, not the whole loop nest. 212 Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, LiveInValue, 213 AtomicOrdering::SequentiallyConsistent); 214 else { 215 LoadInst *OldVal = Builder.CreateLoad(Ty, Addr, "pgocount.promoted"); 216 auto *NewVal = Builder.CreateAdd(OldVal, LiveInValue); 217 auto *NewStore = Builder.CreateStore(NewVal, Addr); 218 219 // Now update the parent loop's candidate list: 220 if (IterativeCounterPromotion) { 221 auto *TargetLoop = LI.getLoopFor(ExitBlock); 222 if (TargetLoop) 223 LoopToCandidates[TargetLoop].emplace_back(OldVal, NewStore); 224 } 225 } 226 } 227 } 228 229 private: 230 Instruction *Store; 231 ArrayRef<BasicBlock *> ExitBlocks; 232 ArrayRef<Instruction *> InsertPts; 233 DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates; 234 LoopInfo &LI; 235 }; 236 237 /// A helper class to do register promotion for all profile counter 238 /// updates in a loop. 239 /// 240 class PGOCounterPromoter { 241 public: 242 PGOCounterPromoter( 243 DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands, 244 Loop &CurLoop, LoopInfo &LI, BlockFrequencyInfo *BFI) 245 : LoopToCandidates(LoopToCands), ExitBlocks(), InsertPts(), L(CurLoop), 246 LI(LI), BFI(BFI) { 247 248 // Skip collection of ExitBlocks and InsertPts for loops that will not be 249 // able to have counters promoted. 250 SmallVector<BasicBlock *, 8> LoopExitBlocks; 251 SmallPtrSet<BasicBlock *, 8> BlockSet; 252 253 L.getExitBlocks(LoopExitBlocks); 254 if (!isPromotionPossible(&L, LoopExitBlocks)) 255 return; 256 257 for (BasicBlock *ExitBlock : LoopExitBlocks) { 258 if (BlockSet.insert(ExitBlock).second) { 259 ExitBlocks.push_back(ExitBlock); 260 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); 261 } 262 } 263 } 264 265 bool run(int64_t *NumPromoted) { 266 // Skip 'infinite' loops: 267 if (ExitBlocks.size() == 0) 268 return false; 269 270 // Skip if any of the ExitBlocks contains a ret instruction. 271 // This is to prevent dumping of incomplete profile -- if the 272 // the loop is a long running loop and dump is called in the middle 273 // of the loop, the result profile is incomplete. 274 // FIXME: add other heuristics to detect long running loops. 275 if (SkipRetExitBlock) { 276 for (auto BB : ExitBlocks) 277 if (isa<ReturnInst>(BB->getTerminator())) 278 return false; 279 } 280 281 unsigned MaxProm = getMaxNumOfPromotionsInLoop(&L); 282 if (MaxProm == 0) 283 return false; 284 285 unsigned Promoted = 0; 286 for (auto &Cand : LoopToCandidates[&L]) { 287 288 SmallVector<PHINode *, 4> NewPHIs; 289 SSAUpdater SSA(&NewPHIs); 290 Value *InitVal = ConstantInt::get(Cand.first->getType(), 0); 291 292 // If BFI is set, we will use it to guide the promotions. 293 if (BFI) { 294 auto *BB = Cand.first->getParent(); 295 auto InstrCount = BFI->getBlockProfileCount(BB); 296 if (!InstrCount) 297 continue; 298 auto PreheaderCount = BFI->getBlockProfileCount(L.getLoopPreheader()); 299 // If the average loop trip count is not greater than 1.5, we skip 300 // promotion. 301 if (PreheaderCount && 302 (PreheaderCount.getValue() * 3) >= (InstrCount.getValue() * 2)) 303 continue; 304 } 305 306 PGOCounterPromoterHelper Promoter(Cand.first, Cand.second, SSA, InitVal, 307 L.getLoopPreheader(), ExitBlocks, 308 InsertPts, LoopToCandidates, LI); 309 Promoter.run(SmallVector<Instruction *, 2>({Cand.first, Cand.second})); 310 Promoted++; 311 if (Promoted >= MaxProm) 312 break; 313 314 (*NumPromoted)++; 315 if (MaxNumOfPromotions != -1 && *NumPromoted >= MaxNumOfPromotions) 316 break; 317 } 318 319 LLVM_DEBUG(dbgs() << Promoted << " counters promoted for loop (depth=" 320 << L.getLoopDepth() << ")\n"); 321 return Promoted != 0; 322 } 323 324 private: 325 bool allowSpeculativeCounterPromotion(Loop *LP) { 326 SmallVector<BasicBlock *, 8> ExitingBlocks; 327 L.getExitingBlocks(ExitingBlocks); 328 // Not considierered speculative. 329 if (ExitingBlocks.size() == 1) 330 return true; 331 if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting) 332 return false; 333 return true; 334 } 335 336 // Check whether the loop satisfies the basic conditions needed to perform 337 // Counter Promotions. 338 bool isPromotionPossible(Loop *LP, 339 const SmallVectorImpl<BasicBlock *> &LoopExitBlocks) { 340 // We can't insert into a catchswitch. 341 if (llvm::any_of(LoopExitBlocks, [](BasicBlock *Exit) { 342 return isa<CatchSwitchInst>(Exit->getTerminator()); 343 })) 344 return false; 345 346 if (!LP->hasDedicatedExits()) 347 return false; 348 349 BasicBlock *PH = LP->getLoopPreheader(); 350 if (!PH) 351 return false; 352 353 return true; 354 } 355 356 // Returns the max number of Counter Promotions for LP. 357 unsigned getMaxNumOfPromotionsInLoop(Loop *LP) { 358 SmallVector<BasicBlock *, 8> LoopExitBlocks; 359 LP->getExitBlocks(LoopExitBlocks); 360 if (!isPromotionPossible(LP, LoopExitBlocks)) 361 return 0; 362 363 SmallVector<BasicBlock *, 8> ExitingBlocks; 364 LP->getExitingBlocks(ExitingBlocks); 365 366 // If BFI is set, we do more aggressive promotions based on BFI. 367 if (BFI) 368 return (unsigned)-1; 369 370 // Not considierered speculative. 371 if (ExitingBlocks.size() == 1) 372 return MaxNumOfPromotionsPerLoop; 373 374 if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting) 375 return 0; 376 377 // Whether the target block is in a loop does not matter: 378 if (SpeculativeCounterPromotionToLoop) 379 return MaxNumOfPromotionsPerLoop; 380 381 // Now check the target block: 382 unsigned MaxProm = MaxNumOfPromotionsPerLoop; 383 for (auto *TargetBlock : LoopExitBlocks) { 384 auto *TargetLoop = LI.getLoopFor(TargetBlock); 385 if (!TargetLoop) 386 continue; 387 unsigned MaxPromForTarget = getMaxNumOfPromotionsInLoop(TargetLoop); 388 unsigned PendingCandsInTarget = LoopToCandidates[TargetLoop].size(); 389 MaxProm = 390 std::min(MaxProm, std::max(MaxPromForTarget, PendingCandsInTarget) - 391 PendingCandsInTarget); 392 } 393 return MaxProm; 394 } 395 396 DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates; 397 SmallVector<BasicBlock *, 8> ExitBlocks; 398 SmallVector<Instruction *, 8> InsertPts; 399 Loop &L; 400 LoopInfo &LI; 401 BlockFrequencyInfo *BFI; 402 }; 403 404 enum class ValueProfilingCallType { 405 // Individual values are tracked. Currently used for indiret call target 406 // profiling. 407 Default, 408 409 // MemOp: the memop size value profiling. 410 MemOp 411 }; 412 413 } // end anonymous namespace 414 415 PreservedAnalyses InstrProfiling::run(Module &M, ModuleAnalysisManager &AM) { 416 FunctionAnalysisManager &FAM = 417 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 418 auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & { 419 return FAM.getResult<TargetLibraryAnalysis>(F); 420 }; 421 if (!run(M, GetTLI)) 422 return PreservedAnalyses::all(); 423 424 return PreservedAnalyses::none(); 425 } 426 427 char InstrProfilingLegacyPass::ID = 0; 428 INITIALIZE_PASS_BEGIN( 429 InstrProfilingLegacyPass, "instrprof", 430 "Frontend instrumentation-based coverage lowering.", false, false) 431 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 432 INITIALIZE_PASS_END( 433 InstrProfilingLegacyPass, "instrprof", 434 "Frontend instrumentation-based coverage lowering.", false, false) 435 436 ModulePass * 437 llvm::createInstrProfilingLegacyPass(const InstrProfOptions &Options, 438 bool IsCS) { 439 return new InstrProfilingLegacyPass(Options, IsCS); 440 } 441 442 static InstrProfIncrementInst *castToIncrementInst(Instruction *Instr) { 443 InstrProfIncrementInst *Inc = dyn_cast<InstrProfIncrementInstStep>(Instr); 444 if (Inc) 445 return Inc; 446 return dyn_cast<InstrProfIncrementInst>(Instr); 447 } 448 449 bool InstrProfiling::lowerIntrinsics(Function *F) { 450 bool MadeChange = false; 451 PromotionCandidates.clear(); 452 for (BasicBlock &BB : *F) { 453 for (auto I = BB.begin(), E = BB.end(); I != E;) { 454 auto Instr = I++; 455 InstrProfIncrementInst *Inc = castToIncrementInst(&*Instr); 456 if (Inc) { 457 lowerIncrement(Inc); 458 MadeChange = true; 459 } else if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(Instr)) { 460 lowerValueProfileInst(Ind); 461 MadeChange = true; 462 } 463 } 464 } 465 466 if (!MadeChange) 467 return false; 468 469 promoteCounterLoadStores(F); 470 return true; 471 } 472 473 bool InstrProfiling::isRuntimeCounterRelocationEnabled() const { 474 if (RuntimeCounterRelocation.getNumOccurrences() > 0) 475 return RuntimeCounterRelocation; 476 477 return TT.isOSFuchsia(); 478 } 479 480 bool InstrProfiling::isCounterPromotionEnabled() const { 481 if (DoCounterPromotion.getNumOccurrences() > 0) 482 return DoCounterPromotion; 483 484 return Options.DoCounterPromotion; 485 } 486 487 bool InstrProfiling::isCounterLinkOrderEnabled() const { 488 if (CounterLinkOrder.getNumOccurrences() > 0) 489 return CounterLinkOrder; 490 491 return Options.CounterLinkOrder; 492 } 493 494 void InstrProfiling::promoteCounterLoadStores(Function *F) { 495 if (!isCounterPromotionEnabled()) 496 return; 497 498 DominatorTree DT(*F); 499 LoopInfo LI(DT); 500 DenseMap<Loop *, SmallVector<LoadStorePair, 8>> LoopPromotionCandidates; 501 502 std::unique_ptr<BlockFrequencyInfo> BFI; 503 if (Options.UseBFIInPromotion) { 504 std::unique_ptr<BranchProbabilityInfo> BPI; 505 BPI.reset(new BranchProbabilityInfo(*F, LI, &GetTLI(*F))); 506 BFI.reset(new BlockFrequencyInfo(*F, *BPI, LI)); 507 } 508 509 for (const auto &LoadStore : PromotionCandidates) { 510 auto *CounterLoad = LoadStore.first; 511 auto *CounterStore = LoadStore.second; 512 BasicBlock *BB = CounterLoad->getParent(); 513 Loop *ParentLoop = LI.getLoopFor(BB); 514 if (!ParentLoop) 515 continue; 516 LoopPromotionCandidates[ParentLoop].emplace_back(CounterLoad, CounterStore); 517 } 518 519 SmallVector<Loop *, 4> Loops = LI.getLoopsInPreorder(); 520 521 // Do a post-order traversal of the loops so that counter updates can be 522 // iteratively hoisted outside the loop nest. 523 for (auto *Loop : llvm::reverse(Loops)) { 524 PGOCounterPromoter Promoter(LoopPromotionCandidates, *Loop, LI, BFI.get()); 525 Promoter.run(&TotalCountersPromoted); 526 } 527 } 528 529 /// Check if the module contains uses of any profiling intrinsics. 530 static bool containsProfilingIntrinsics(Module &M) { 531 if (auto *F = M.getFunction( 532 Intrinsic::getName(llvm::Intrinsic::instrprof_increment))) 533 if (!F->use_empty()) 534 return true; 535 if (auto *F = M.getFunction( 536 Intrinsic::getName(llvm::Intrinsic::instrprof_increment_step))) 537 if (!F->use_empty()) 538 return true; 539 if (auto *F = M.getFunction( 540 Intrinsic::getName(llvm::Intrinsic::instrprof_value_profile))) 541 if (!F->use_empty()) 542 return true; 543 return false; 544 } 545 546 bool InstrProfiling::run( 547 Module &M, std::function<const TargetLibraryInfo &(Function &F)> GetTLI) { 548 this->M = &M; 549 this->GetTLI = std::move(GetTLI); 550 NamesVar = nullptr; 551 NamesSize = 0; 552 ProfileDataMap.clear(); 553 UsedVars.clear(); 554 TT = Triple(M.getTargetTriple()); 555 556 // Emit the runtime hook even if no counters are present. 557 bool MadeChange = emitRuntimeHook(); 558 559 // Improve compile time by avoiding linear scans when there is no work. 560 GlobalVariable *CoverageNamesVar = 561 M.getNamedGlobal(getCoverageUnusedNamesVarName()); 562 if (!containsProfilingIntrinsics(M) && !CoverageNamesVar) 563 return MadeChange; 564 565 // We did not know how many value sites there would be inside 566 // the instrumented function. This is counting the number of instrumented 567 // target value sites to enter it as field in the profile data variable. 568 for (Function &F : M) { 569 InstrProfIncrementInst *FirstProfIncInst = nullptr; 570 for (BasicBlock &BB : F) 571 for (auto I = BB.begin(), E = BB.end(); I != E; I++) 572 if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(I)) 573 computeNumValueSiteCounts(Ind); 574 else if (FirstProfIncInst == nullptr) 575 FirstProfIncInst = dyn_cast<InstrProfIncrementInst>(I); 576 577 // Value profiling intrinsic lowering requires per-function profile data 578 // variable to be created first. 579 if (FirstProfIncInst != nullptr) 580 static_cast<void>(getOrCreateRegionCounters(FirstProfIncInst)); 581 } 582 583 for (Function &F : M) 584 MadeChange |= lowerIntrinsics(&F); 585 586 if (CoverageNamesVar) { 587 lowerCoverageData(CoverageNamesVar); 588 MadeChange = true; 589 } 590 591 if (!MadeChange) 592 return false; 593 594 emitVNodes(); 595 emitNameData(); 596 emitRegistration(); 597 emitUses(); 598 emitInitialization(); 599 return true; 600 } 601 602 static FunctionCallee getOrInsertValueProfilingCall( 603 Module &M, const TargetLibraryInfo &TLI, 604 ValueProfilingCallType CallType = ValueProfilingCallType::Default) { 605 LLVMContext &Ctx = M.getContext(); 606 auto *ReturnTy = Type::getVoidTy(M.getContext()); 607 608 AttributeList AL; 609 if (auto AK = TLI.getExtAttrForI32Param(false)) 610 AL = AL.addParamAttribute(M.getContext(), 2, AK); 611 612 assert((CallType == ValueProfilingCallType::Default || 613 CallType == ValueProfilingCallType::MemOp) && 614 "Must be Default or MemOp"); 615 Type *ParamTypes[] = { 616 #define VALUE_PROF_FUNC_PARAM(ParamType, ParamName, ParamLLVMType) ParamLLVMType 617 #include "llvm/ProfileData/InstrProfData.inc" 618 }; 619 auto *ValueProfilingCallTy = 620 FunctionType::get(ReturnTy, makeArrayRef(ParamTypes), false); 621 StringRef FuncName = CallType == ValueProfilingCallType::Default 622 ? getInstrProfValueProfFuncName() 623 : getInstrProfValueProfMemOpFuncName(); 624 return M.getOrInsertFunction(FuncName, ValueProfilingCallTy, AL); 625 } 626 627 void InstrProfiling::computeNumValueSiteCounts(InstrProfValueProfileInst *Ind) { 628 GlobalVariable *Name = Ind->getName(); 629 uint64_t ValueKind = Ind->getValueKind()->getZExtValue(); 630 uint64_t Index = Ind->getIndex()->getZExtValue(); 631 auto It = ProfileDataMap.find(Name); 632 if (It == ProfileDataMap.end()) { 633 PerFunctionProfileData PD; 634 PD.NumValueSites[ValueKind] = Index + 1; 635 ProfileDataMap[Name] = PD; 636 } else if (It->second.NumValueSites[ValueKind] <= Index) 637 It->second.NumValueSites[ValueKind] = Index + 1; 638 } 639 640 void InstrProfiling::lowerValueProfileInst(InstrProfValueProfileInst *Ind) { 641 GlobalVariable *Name = Ind->getName(); 642 auto It = ProfileDataMap.find(Name); 643 assert(It != ProfileDataMap.end() && It->second.DataVar && 644 "value profiling detected in function with no counter incerement"); 645 646 GlobalVariable *DataVar = It->second.DataVar; 647 uint64_t ValueKind = Ind->getValueKind()->getZExtValue(); 648 uint64_t Index = Ind->getIndex()->getZExtValue(); 649 for (uint32_t Kind = IPVK_First; Kind < ValueKind; ++Kind) 650 Index += It->second.NumValueSites[Kind]; 651 652 IRBuilder<> Builder(Ind); 653 bool IsMemOpSize = (Ind->getValueKind()->getZExtValue() == 654 llvm::InstrProfValueKind::IPVK_MemOPSize); 655 CallInst *Call = nullptr; 656 auto *TLI = &GetTLI(*Ind->getFunction()); 657 658 // To support value profiling calls within Windows exception handlers, funclet 659 // information contained within operand bundles needs to be copied over to 660 // the library call. This is required for the IR to be processed by the 661 // WinEHPrepare pass. 662 SmallVector<OperandBundleDef, 1> OpBundles; 663 Ind->getOperandBundlesAsDefs(OpBundles); 664 if (!IsMemOpSize) { 665 Value *Args[3] = {Ind->getTargetValue(), 666 Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()), 667 Builder.getInt32(Index)}; 668 Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI), Args, 669 OpBundles); 670 } else { 671 Value *Args[3] = {Ind->getTargetValue(), 672 Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()), 673 Builder.getInt32(Index)}; 674 Call = Builder.CreateCall( 675 getOrInsertValueProfilingCall(*M, *TLI, ValueProfilingCallType::MemOp), 676 Args, OpBundles); 677 } 678 if (auto AK = TLI->getExtAttrForI32Param(false)) 679 Call->addParamAttr(2, AK); 680 Ind->replaceAllUsesWith(Call); 681 Ind->eraseFromParent(); 682 } 683 684 void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) { 685 GlobalVariable *Counters = getOrCreateRegionCounters(Inc); 686 687 IRBuilder<> Builder(Inc); 688 uint64_t Index = Inc->getIndex()->getZExtValue(); 689 Value *Addr = Builder.CreateConstInBoundsGEP2_64(Counters->getValueType(), 690 Counters, 0, Index); 691 692 if (isRuntimeCounterRelocationEnabled()) { 693 Type *Int64Ty = Type::getInt64Ty(M->getContext()); 694 Type *Int64PtrTy = Type::getInt64PtrTy(M->getContext()); 695 Function *Fn = Inc->getParent()->getParent(); 696 Instruction &I = Fn->getEntryBlock().front(); 697 LoadInst *LI = dyn_cast<LoadInst>(&I); 698 if (!LI) { 699 IRBuilder<> Builder(&I); 700 Type *Int64Ty = Type::getInt64Ty(M->getContext()); 701 GlobalVariable *Bias = M->getGlobalVariable(getInstrProfCounterBiasVarName()); 702 if (!Bias) { 703 Bias = new GlobalVariable(*M, Int64Ty, false, GlobalValue::LinkOnceODRLinkage, 704 Constant::getNullValue(Int64Ty), 705 getInstrProfCounterBiasVarName()); 706 Bias->setVisibility(GlobalVariable::HiddenVisibility); 707 } 708 LI = Builder.CreateLoad(Int64Ty, Bias); 709 } 710 auto *Add = Builder.CreateAdd(Builder.CreatePtrToInt(Addr, Int64Ty), LI); 711 Addr = Builder.CreateIntToPtr(Add, Int64PtrTy); 712 } 713 714 if (Options.Atomic || AtomicCounterUpdateAll || 715 (Index == 0 && AtomicFirstCounter)) { 716 Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, Inc->getStep(), 717 AtomicOrdering::Monotonic); 718 } else { 719 Value *IncStep = Inc->getStep(); 720 Value *Load = Builder.CreateLoad(IncStep->getType(), Addr, "pgocount"); 721 auto *Count = Builder.CreateAdd(Load, Inc->getStep()); 722 auto *Store = Builder.CreateStore(Count, Addr); 723 if (isCounterPromotionEnabled()) 724 PromotionCandidates.emplace_back(cast<Instruction>(Load), Store); 725 } 726 Inc->eraseFromParent(); 727 } 728 729 void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageNamesVar) { 730 ConstantArray *Names = 731 cast<ConstantArray>(CoverageNamesVar->getInitializer()); 732 for (unsigned I = 0, E = Names->getNumOperands(); I < E; ++I) { 733 Constant *NC = Names->getOperand(I); 734 Value *V = NC->stripPointerCasts(); 735 assert(isa<GlobalVariable>(V) && "Missing reference to function name"); 736 GlobalVariable *Name = cast<GlobalVariable>(V); 737 738 Name->setLinkage(GlobalValue::PrivateLinkage); 739 ReferencedNames.push_back(Name); 740 NC->dropAllReferences(); 741 } 742 CoverageNamesVar->eraseFromParent(); 743 } 744 745 /// Get the name of a profiling variable for a particular function. 746 static std::string getVarName(InstrProfIncrementInst *Inc, StringRef Prefix) { 747 StringRef NamePrefix = getInstrProfNameVarPrefix(); 748 StringRef Name = Inc->getName()->getName().substr(NamePrefix.size()); 749 Function *F = Inc->getParent()->getParent(); 750 Module *M = F->getParent(); 751 if (!DoHashBasedCounterSplit || !isIRPGOFlagSet(M) || 752 !canRenameComdatFunc(*F)) 753 return (Prefix + Name).str(); 754 uint64_t FuncHash = Inc->getHash()->getZExtValue(); 755 SmallVector<char, 24> HashPostfix; 756 if (Name.endswith((Twine(".") + Twine(FuncHash)).toStringRef(HashPostfix))) 757 return (Prefix + Name).str(); 758 return (Prefix + Name + "." + Twine(FuncHash)).str(); 759 } 760 761 static inline bool shouldRecordFunctionAddr(Function *F) { 762 // Check the linkage 763 bool HasAvailableExternallyLinkage = F->hasAvailableExternallyLinkage(); 764 if (!F->hasLinkOnceLinkage() && !F->hasLocalLinkage() && 765 !HasAvailableExternallyLinkage) 766 return true; 767 768 // A function marked 'alwaysinline' with available_externally linkage can't 769 // have its address taken. Doing so would create an undefined external ref to 770 // the function, which would fail to link. 771 if (HasAvailableExternallyLinkage && 772 F->hasFnAttribute(Attribute::AlwaysInline)) 773 return false; 774 775 // Prohibit function address recording if the function is both internal and 776 // COMDAT. This avoids the profile data variable referencing internal symbols 777 // in COMDAT. 778 if (F->hasLocalLinkage() && F->hasComdat()) 779 return false; 780 781 // Check uses of this function for other than direct calls or invokes to it. 782 // Inline virtual functions have linkeOnceODR linkage. When a key method 783 // exists, the vtable will only be emitted in the TU where the key method 784 // is defined. In a TU where vtable is not available, the function won't 785 // be 'addresstaken'. If its address is not recorded here, the profile data 786 // with missing address may be picked by the linker leading to missing 787 // indirect call target info. 788 return F->hasAddressTaken() || F->hasLinkOnceLinkage(); 789 } 790 791 static bool needsRuntimeRegistrationOfSectionRange(const Triple &TT) { 792 // Don't do this for Darwin. compiler-rt uses linker magic. 793 if (TT.isOSDarwin()) 794 return false; 795 // Use linker script magic to get data/cnts/name start/end. 796 if (TT.isOSLinux() || TT.isOSFreeBSD() || TT.isOSNetBSD() || 797 TT.isOSSolaris() || TT.isOSFuchsia() || TT.isPS4CPU() || 798 TT.isOSWindows()) 799 return false; 800 801 return true; 802 } 803 804 GlobalVariable * 805 InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) { 806 GlobalVariable *NamePtr = Inc->getName(); 807 auto It = ProfileDataMap.find(NamePtr); 808 PerFunctionProfileData PD; 809 if (It != ProfileDataMap.end()) { 810 if (It->second.RegionCounters) 811 return It->second.RegionCounters; 812 PD = It->second; 813 } 814 815 // Match the linkage and visibility of the name global. COFF supports using 816 // comdats with internal symbols, so do that if we can. 817 Function *Fn = Inc->getParent()->getParent(); 818 GlobalValue::LinkageTypes Linkage = NamePtr->getLinkage(); 819 GlobalValue::VisibilityTypes Visibility = NamePtr->getVisibility(); 820 if (TT.isOSBinFormatCOFF()) { 821 Linkage = GlobalValue::InternalLinkage; 822 Visibility = GlobalValue::DefaultVisibility; 823 } 824 825 // Move the name variable to the right section. Place them in a COMDAT group 826 // if the associated function is a COMDAT. This will make sure that only one 827 // copy of counters of the COMDAT function will be emitted after linking. Keep 828 // in mind that this pass may run before the inliner, so we need to create a 829 // new comdat group for the counters and profiling data. If we use the comdat 830 // of the parent function, that will result in relocations against discarded 831 // sections. 832 bool NeedComdat = needsComdatForCounter(*Fn, *M); 833 if (NeedComdat) { 834 if (TT.isOSBinFormatCOFF()) { 835 // For COFF, put the counters, data, and values each into their own 836 // comdats. We can't use a group because the Visual C++ linker will 837 // report duplicate symbol errors if there are multiple external symbols 838 // with the same name marked IMAGE_COMDAT_SELECT_ASSOCIATIVE. 839 Linkage = GlobalValue::LinkOnceODRLinkage; 840 Visibility = GlobalValue::HiddenVisibility; 841 } 842 } 843 std::string DataVarName = getVarName(Inc, getInstrProfDataVarPrefix()); 844 auto MaybeSetComdat = [=](GlobalVariable *GV) { 845 if (NeedComdat) 846 GV->setComdat(M->getOrInsertComdat(TT.isOSBinFormatCOFF() ? GV->getName() 847 : DataVarName)); 848 }; 849 850 uint64_t NumCounters = Inc->getNumCounters()->getZExtValue(); 851 LLVMContext &Ctx = M->getContext(); 852 ArrayType *CounterTy = ArrayType::get(Type::getInt64Ty(Ctx), NumCounters); 853 854 // Create the counters variable. 855 auto *CounterPtr = 856 new GlobalVariable(*M, CounterTy, false, Linkage, 857 Constant::getNullValue(CounterTy), 858 getVarName(Inc, getInstrProfCountersVarPrefix())); 859 CounterPtr->setVisibility(Visibility); 860 CounterPtr->setSection( 861 getInstrProfSectionName(IPSK_cnts, TT.getObjectFormat())); 862 CounterPtr->setAlignment(Align(8)); 863 MaybeSetComdat(CounterPtr); 864 CounterPtr->setLinkage(Linkage); 865 // We need a self-link for the counter variable because the ELF section name 866 // (that is __llvm_prf_cnts) is a C identifier and considered a GC root in the 867 // absence of the SHF_LINK_ORDER flag. 868 if (isCounterLinkOrderEnabled()) 869 CounterPtr->setMetadata(LLVMContext::MD_associated, 870 MDNode::get(Ctx, ValueAsMetadata::get(Fn))); 871 872 auto *Int8PtrTy = Type::getInt8PtrTy(Ctx); 873 // Allocate statically the array of pointers to value profile nodes for 874 // the current function. 875 Constant *ValuesPtrExpr = ConstantPointerNull::get(Int8PtrTy); 876 if (ValueProfileStaticAlloc && !needsRuntimeRegistrationOfSectionRange(TT)) { 877 uint64_t NS = 0; 878 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 879 NS += PD.NumValueSites[Kind]; 880 if (NS) { 881 ArrayType *ValuesTy = ArrayType::get(Type::getInt64Ty(Ctx), NS); 882 883 auto *ValuesVar = 884 new GlobalVariable(*M, ValuesTy, false, Linkage, 885 Constant::getNullValue(ValuesTy), 886 getVarName(Inc, getInstrProfValuesVarPrefix())); 887 ValuesVar->setVisibility(Visibility); 888 ValuesVar->setSection( 889 getInstrProfSectionName(IPSK_vals, TT.getObjectFormat())); 890 ValuesVar->setAlignment(Align(8)); 891 MaybeSetComdat(ValuesVar); 892 if (isCounterLinkOrderEnabled()) 893 ValuesVar->setMetadata( 894 LLVMContext::MD_associated, 895 MDNode::get(Ctx, ValueAsMetadata::get(CounterPtr))); 896 ValuesPtrExpr = 897 ConstantExpr::getBitCast(ValuesVar, Type::getInt8PtrTy(Ctx)); 898 } 899 } 900 901 // Create data variable. 902 auto *Int16Ty = Type::getInt16Ty(Ctx); 903 auto *Int16ArrayTy = ArrayType::get(Int16Ty, IPVK_Last + 1); 904 Type *DataTypes[] = { 905 #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) LLVMType, 906 #include "llvm/ProfileData/InstrProfData.inc" 907 }; 908 auto *DataTy = StructType::get(Ctx, makeArrayRef(DataTypes)); 909 910 Constant *FunctionAddr = shouldRecordFunctionAddr(Fn) 911 ? ConstantExpr::getBitCast(Fn, Int8PtrTy) 912 : ConstantPointerNull::get(Int8PtrTy); 913 914 Constant *Int16ArrayVals[IPVK_Last + 1]; 915 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 916 Int16ArrayVals[Kind] = ConstantInt::get(Int16Ty, PD.NumValueSites[Kind]); 917 918 Constant *DataVals[] = { 919 #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) Init, 920 #include "llvm/ProfileData/InstrProfData.inc" 921 }; 922 auto *Data = 923 new GlobalVariable(*M, DataTy, false, Linkage, 924 ConstantStruct::get(DataTy, DataVals), DataVarName); 925 Data->setVisibility(Visibility); 926 Data->setSection(getInstrProfSectionName(IPSK_data, TT.getObjectFormat())); 927 Data->setAlignment(Align(INSTR_PROF_DATA_ALIGNMENT)); 928 MaybeSetComdat(Data); 929 Data->setLinkage(Linkage); 930 if (isCounterLinkOrderEnabled()) 931 Data->setMetadata(LLVMContext::MD_associated, 932 MDNode::get(Ctx, ValueAsMetadata::get(CounterPtr))); 933 934 PD.RegionCounters = CounterPtr; 935 PD.DataVar = Data; 936 ProfileDataMap[NamePtr] = PD; 937 938 // Mark the data variable as used so that it isn't stripped out. 939 UsedVars.push_back(Data); 940 // Now that the linkage set by the FE has been passed to the data and counter 941 // variables, reset Name variable's linkage and visibility to private so that 942 // it can be removed later by the compiler. 943 NamePtr->setLinkage(GlobalValue::PrivateLinkage); 944 // Collect the referenced names to be used by emitNameData. 945 ReferencedNames.push_back(NamePtr); 946 947 return CounterPtr; 948 } 949 950 void InstrProfiling::emitVNodes() { 951 if (!ValueProfileStaticAlloc) 952 return; 953 954 // For now only support this on platforms that do 955 // not require runtime registration to discover 956 // named section start/end. 957 if (needsRuntimeRegistrationOfSectionRange(TT)) 958 return; 959 960 size_t TotalNS = 0; 961 for (auto &PD : ProfileDataMap) { 962 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 963 TotalNS += PD.second.NumValueSites[Kind]; 964 } 965 966 if (!TotalNS) 967 return; 968 969 uint64_t NumCounters = TotalNS * NumCountersPerValueSite; 970 // Heuristic for small programs with very few total value sites. 971 // The default value of vp-counters-per-site is chosen based on 972 // the observation that large apps usually have a low percentage 973 // of value sites that actually have any profile data, and thus 974 // the average number of counters per site is low. For small 975 // apps with very few sites, this may not be true. Bump up the 976 // number of counters in this case. 977 #define INSTR_PROF_MIN_VAL_COUNTS 10 978 if (NumCounters < INSTR_PROF_MIN_VAL_COUNTS) 979 NumCounters = std::max(INSTR_PROF_MIN_VAL_COUNTS, (int)NumCounters * 2); 980 981 auto &Ctx = M->getContext(); 982 Type *VNodeTypes[] = { 983 #define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Init) LLVMType, 984 #include "llvm/ProfileData/InstrProfData.inc" 985 }; 986 auto *VNodeTy = StructType::get(Ctx, makeArrayRef(VNodeTypes)); 987 988 ArrayType *VNodesTy = ArrayType::get(VNodeTy, NumCounters); 989 auto *VNodesVar = new GlobalVariable( 990 *M, VNodesTy, false, GlobalValue::PrivateLinkage, 991 Constant::getNullValue(VNodesTy), getInstrProfVNodesVarName()); 992 VNodesVar->setSection( 993 getInstrProfSectionName(IPSK_vnodes, TT.getObjectFormat())); 994 UsedVars.push_back(VNodesVar); 995 } 996 997 void InstrProfiling::emitNameData() { 998 std::string UncompressedData; 999 1000 if (ReferencedNames.empty()) 1001 return; 1002 1003 std::string CompressedNameStr; 1004 if (Error E = collectPGOFuncNameStrings(ReferencedNames, CompressedNameStr, 1005 DoInstrProfNameCompression)) { 1006 report_fatal_error(toString(std::move(E)), false); 1007 } 1008 1009 auto &Ctx = M->getContext(); 1010 auto *NamesVal = ConstantDataArray::getString( 1011 Ctx, StringRef(CompressedNameStr), false); 1012 NamesVar = new GlobalVariable(*M, NamesVal->getType(), true, 1013 GlobalValue::PrivateLinkage, NamesVal, 1014 getInstrProfNamesVarName()); 1015 NamesSize = CompressedNameStr.size(); 1016 NamesVar->setSection( 1017 getInstrProfSectionName(IPSK_name, TT.getObjectFormat())); 1018 // On COFF, it's important to reduce the alignment down to 1 to prevent the 1019 // linker from inserting padding before the start of the names section or 1020 // between names entries. 1021 NamesVar->setAlignment(Align(1)); 1022 UsedVars.push_back(NamesVar); 1023 1024 for (auto *NamePtr : ReferencedNames) 1025 NamePtr->eraseFromParent(); 1026 } 1027 1028 void InstrProfiling::emitRegistration() { 1029 if (!needsRuntimeRegistrationOfSectionRange(TT)) 1030 return; 1031 1032 // Construct the function. 1033 auto *VoidTy = Type::getVoidTy(M->getContext()); 1034 auto *VoidPtrTy = Type::getInt8PtrTy(M->getContext()); 1035 auto *Int64Ty = Type::getInt64Ty(M->getContext()); 1036 auto *RegisterFTy = FunctionType::get(VoidTy, false); 1037 auto *RegisterF = Function::Create(RegisterFTy, GlobalValue::InternalLinkage, 1038 getInstrProfRegFuncsName(), M); 1039 RegisterF->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 1040 if (Options.NoRedZone) 1041 RegisterF->addFnAttr(Attribute::NoRedZone); 1042 1043 auto *RuntimeRegisterTy = FunctionType::get(VoidTy, VoidPtrTy, false); 1044 auto *RuntimeRegisterF = 1045 Function::Create(RuntimeRegisterTy, GlobalVariable::ExternalLinkage, 1046 getInstrProfRegFuncName(), M); 1047 1048 IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", RegisterF)); 1049 for (Value *Data : UsedVars) 1050 if (Data != NamesVar && !isa<Function>(Data)) 1051 IRB.CreateCall(RuntimeRegisterF, IRB.CreateBitCast(Data, VoidPtrTy)); 1052 1053 if (NamesVar) { 1054 Type *ParamTypes[] = {VoidPtrTy, Int64Ty}; 1055 auto *NamesRegisterTy = 1056 FunctionType::get(VoidTy, makeArrayRef(ParamTypes), false); 1057 auto *NamesRegisterF = 1058 Function::Create(NamesRegisterTy, GlobalVariable::ExternalLinkage, 1059 getInstrProfNamesRegFuncName(), M); 1060 IRB.CreateCall(NamesRegisterF, {IRB.CreateBitCast(NamesVar, VoidPtrTy), 1061 IRB.getInt64(NamesSize)}); 1062 } 1063 1064 IRB.CreateRetVoid(); 1065 } 1066 1067 bool InstrProfiling::emitRuntimeHook() { 1068 // We expect the linker to be invoked with -u<hook_var> flag for Linux or 1069 // Fuchsia, in which case there is no need to emit the user function. 1070 if (TT.isOSLinux() || TT.isOSFuchsia()) 1071 return false; 1072 1073 // If the module's provided its own runtime, we don't need to do anything. 1074 if (M->getGlobalVariable(getInstrProfRuntimeHookVarName())) 1075 return false; 1076 1077 // Declare an external variable that will pull in the runtime initialization. 1078 auto *Int32Ty = Type::getInt32Ty(M->getContext()); 1079 auto *Var = 1080 new GlobalVariable(*M, Int32Ty, false, GlobalValue::ExternalLinkage, 1081 nullptr, getInstrProfRuntimeHookVarName()); 1082 1083 // Make a function that uses it. 1084 auto *User = Function::Create(FunctionType::get(Int32Ty, false), 1085 GlobalValue::LinkOnceODRLinkage, 1086 getInstrProfRuntimeHookVarUseFuncName(), M); 1087 User->addFnAttr(Attribute::NoInline); 1088 if (Options.NoRedZone) 1089 User->addFnAttr(Attribute::NoRedZone); 1090 User->setVisibility(GlobalValue::HiddenVisibility); 1091 if (TT.supportsCOMDAT()) 1092 User->setComdat(M->getOrInsertComdat(User->getName())); 1093 1094 IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", User)); 1095 auto *Load = IRB.CreateLoad(Int32Ty, Var); 1096 IRB.CreateRet(Load); 1097 1098 // Mark the user variable as used so that it isn't stripped out. 1099 UsedVars.push_back(User); 1100 return true; 1101 } 1102 1103 void InstrProfiling::emitUses() { 1104 if (!UsedVars.empty()) 1105 appendToUsed(*M, UsedVars); 1106 } 1107 1108 void InstrProfiling::emitInitialization() { 1109 // Create ProfileFileName variable. Don't don't this for the 1110 // context-sensitive instrumentation lowering: This lowering is after 1111 // LTO/ThinLTO linking. Pass PGOInstrumentationGenCreateVar should 1112 // have already create the variable before LTO/ThinLTO linking. 1113 if (!IsCS) 1114 createProfileFileNameVar(*M, Options.InstrProfileOutput); 1115 Function *RegisterF = M->getFunction(getInstrProfRegFuncsName()); 1116 if (!RegisterF) 1117 return; 1118 1119 // Create the initialization function. 1120 auto *VoidTy = Type::getVoidTy(M->getContext()); 1121 auto *F = Function::Create(FunctionType::get(VoidTy, false), 1122 GlobalValue::InternalLinkage, 1123 getInstrProfInitFuncName(), M); 1124 F->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 1125 F->addFnAttr(Attribute::NoInline); 1126 if (Options.NoRedZone) 1127 F->addFnAttr(Attribute::NoRedZone); 1128 1129 // Add the basic block and the necessary calls. 1130 IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", F)); 1131 IRB.CreateCall(RegisterF, {}); 1132 IRB.CreateRetVoid(); 1133 1134 appendToGlobalCtors(*M, F, 0); 1135 } 1136