1 //===-- InstrProfiling.cpp - Frontend instrumentation based profiling -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass lowers instrprof_* intrinsics emitted by a frontend for profiling. 10 // It also builds the data structures and initialization code needed for 11 // updating execution counts and emitting the profile at runtime. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Instrumentation/InstrProfiling.h" 16 #include "llvm/ADT/ArrayRef.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/StringRef.h" 19 #include "llvm/ADT/Triple.h" 20 #include "llvm/ADT/Twine.h" 21 #include "llvm/Analysis/BlockFrequencyInfo.h" 22 #include "llvm/Analysis/BranchProbabilityInfo.h" 23 #include "llvm/Analysis/LoopInfo.h" 24 #include "llvm/Analysis/TargetLibraryInfo.h" 25 #include "llvm/IR/Attributes.h" 26 #include "llvm/IR/BasicBlock.h" 27 #include "llvm/IR/Constant.h" 28 #include "llvm/IR/Constants.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GlobalValue.h" 33 #include "llvm/IR/GlobalVariable.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/Instruction.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/Module.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/InitializePasses.h" 41 #include "llvm/Pass.h" 42 #include "llvm/ProfileData/InstrProf.h" 43 #include "llvm/Support/Casting.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Support/Error.h" 46 #include "llvm/Support/ErrorHandling.h" 47 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 48 #include "llvm/Transforms/Utils/ModuleUtils.h" 49 #include "llvm/Transforms/Utils/SSAUpdater.h" 50 #include <algorithm> 51 #include <cassert> 52 #include <cstddef> 53 #include <cstdint> 54 #include <string> 55 56 using namespace llvm; 57 58 #define DEBUG_TYPE "instrprof" 59 60 // FIXME: These are to be removed after switching to the new memop value 61 // profiling. 62 // The start and end values of precise value profile range for memory 63 // intrinsic sizes 64 cl::opt<std::string> MemOPSizeRange( 65 "memop-size-range", 66 cl::desc("Set the range of size in memory intrinsic calls to be profiled " 67 "precisely, in a format of <start_val>:<end_val>"), 68 cl::init("")); 69 70 // The value that considered to be large value in memory intrinsic. 71 cl::opt<unsigned> MemOPSizeLarge( 72 "memop-size-large", 73 cl::desc("Set large value thresthold in memory intrinsic size profiling. " 74 "Value of 0 disables the large value profiling."), 75 cl::init(8192)); 76 77 cl::opt<bool> UseOldMemOpValueProf( 78 "use-old-memop-value-prof", 79 cl::desc("Use the old memop value profiling buckets. This is " 80 "transitional and to be removed after switching. "), 81 cl::init(false)); 82 83 namespace { 84 85 cl::opt<bool> DoHashBasedCounterSplit( 86 "hash-based-counter-split", 87 cl::desc("Rename counter variable of a comdat function based on cfg hash"), 88 cl::init(true)); 89 90 cl::opt<bool> RuntimeCounterRelocation( 91 "runtime-counter-relocation", 92 cl::desc("Enable relocating counters at runtime."), 93 cl::init(false)); 94 95 cl::opt<bool> ValueProfileStaticAlloc( 96 "vp-static-alloc", 97 cl::desc("Do static counter allocation for value profiler"), 98 cl::init(true)); 99 100 cl::opt<double> NumCountersPerValueSite( 101 "vp-counters-per-site", 102 cl::desc("The average number of profile counters allocated " 103 "per value profiling site."), 104 // This is set to a very small value because in real programs, only 105 // a very small percentage of value sites have non-zero targets, e.g, 1/30. 106 // For those sites with non-zero profile, the average number of targets 107 // is usually smaller than 2. 108 cl::init(1.0)); 109 110 cl::opt<bool> AtomicCounterUpdateAll( 111 "instrprof-atomic-counter-update-all", cl::ZeroOrMore, 112 cl::desc("Make all profile counter updates atomic (for testing only)"), 113 cl::init(false)); 114 115 cl::opt<bool> AtomicCounterUpdatePromoted( 116 "atomic-counter-update-promoted", cl::ZeroOrMore, 117 cl::desc("Do counter update using atomic fetch add " 118 " for promoted counters only"), 119 cl::init(false)); 120 121 cl::opt<bool> AtomicFirstCounter( 122 "atomic-first-counter", cl::ZeroOrMore, 123 cl::desc("Use atomic fetch add for first counter in a function (usually " 124 "the entry counter)"), 125 cl::init(false)); 126 127 // If the option is not specified, the default behavior about whether 128 // counter promotion is done depends on how instrumentaiton lowering 129 // pipeline is setup, i.e., the default value of true of this option 130 // does not mean the promotion will be done by default. Explicitly 131 // setting this option can override the default behavior. 132 cl::opt<bool> DoCounterPromotion("do-counter-promotion", cl::ZeroOrMore, 133 cl::desc("Do counter register promotion"), 134 cl::init(false)); 135 cl::opt<unsigned> MaxNumOfPromotionsPerLoop( 136 cl::ZeroOrMore, "max-counter-promotions-per-loop", cl::init(20), 137 cl::desc("Max number counter promotions per loop to avoid" 138 " increasing register pressure too much")); 139 140 // A debug option 141 cl::opt<int> 142 MaxNumOfPromotions(cl::ZeroOrMore, "max-counter-promotions", cl::init(-1), 143 cl::desc("Max number of allowed counter promotions")); 144 145 cl::opt<unsigned> SpeculativeCounterPromotionMaxExiting( 146 cl::ZeroOrMore, "speculative-counter-promotion-max-exiting", cl::init(3), 147 cl::desc("The max number of exiting blocks of a loop to allow " 148 " speculative counter promotion")); 149 150 cl::opt<bool> SpeculativeCounterPromotionToLoop( 151 cl::ZeroOrMore, "speculative-counter-promotion-to-loop", cl::init(false), 152 cl::desc("When the option is false, if the target block is in a loop, " 153 "the promotion will be disallowed unless the promoted counter " 154 " update can be further/iteratively promoted into an acyclic " 155 " region.")); 156 157 cl::opt<bool> IterativeCounterPromotion( 158 cl::ZeroOrMore, "iterative-counter-promotion", cl::init(true), 159 cl::desc("Allow counter promotion across the whole loop nest.")); 160 161 class InstrProfilingLegacyPass : public ModulePass { 162 InstrProfiling InstrProf; 163 164 public: 165 static char ID; 166 167 InstrProfilingLegacyPass() : ModulePass(ID) {} 168 InstrProfilingLegacyPass(const InstrProfOptions &Options, bool IsCS = false) 169 : ModulePass(ID), InstrProf(Options, IsCS) { 170 initializeInstrProfilingLegacyPassPass(*PassRegistry::getPassRegistry()); 171 } 172 173 StringRef getPassName() const override { 174 return "Frontend instrumentation-based coverage lowering"; 175 } 176 177 bool runOnModule(Module &M) override { 178 auto GetTLI = [this](Function &F) -> TargetLibraryInfo & { 179 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 180 }; 181 return InstrProf.run(M, GetTLI); 182 } 183 184 void getAnalysisUsage(AnalysisUsage &AU) const override { 185 AU.setPreservesCFG(); 186 AU.addRequired<TargetLibraryInfoWrapperPass>(); 187 } 188 }; 189 190 /// 191 /// A helper class to promote one counter RMW operation in the loop 192 /// into register update. 193 /// 194 /// RWM update for the counter will be sinked out of the loop after 195 /// the transformation. 196 /// 197 class PGOCounterPromoterHelper : public LoadAndStorePromoter { 198 public: 199 PGOCounterPromoterHelper( 200 Instruction *L, Instruction *S, SSAUpdater &SSA, Value *Init, 201 BasicBlock *PH, ArrayRef<BasicBlock *> ExitBlocks, 202 ArrayRef<Instruction *> InsertPts, 203 DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands, 204 LoopInfo &LI) 205 : LoadAndStorePromoter({L, S}, SSA), Store(S), ExitBlocks(ExitBlocks), 206 InsertPts(InsertPts), LoopToCandidates(LoopToCands), LI(LI) { 207 assert(isa<LoadInst>(L)); 208 assert(isa<StoreInst>(S)); 209 SSA.AddAvailableValue(PH, Init); 210 } 211 212 void doExtraRewritesBeforeFinalDeletion() override { 213 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) { 214 BasicBlock *ExitBlock = ExitBlocks[i]; 215 Instruction *InsertPos = InsertPts[i]; 216 // Get LiveIn value into the ExitBlock. If there are multiple 217 // predecessors, the value is defined by a PHI node in this 218 // block. 219 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); 220 Value *Addr = cast<StoreInst>(Store)->getPointerOperand(); 221 Type *Ty = LiveInValue->getType(); 222 IRBuilder<> Builder(InsertPos); 223 if (AtomicCounterUpdatePromoted) 224 // automic update currently can only be promoted across the current 225 // loop, not the whole loop nest. 226 Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, LiveInValue, 227 AtomicOrdering::SequentiallyConsistent); 228 else { 229 LoadInst *OldVal = Builder.CreateLoad(Ty, Addr, "pgocount.promoted"); 230 auto *NewVal = Builder.CreateAdd(OldVal, LiveInValue); 231 auto *NewStore = Builder.CreateStore(NewVal, Addr); 232 233 // Now update the parent loop's candidate list: 234 if (IterativeCounterPromotion) { 235 auto *TargetLoop = LI.getLoopFor(ExitBlock); 236 if (TargetLoop) 237 LoopToCandidates[TargetLoop].emplace_back(OldVal, NewStore); 238 } 239 } 240 } 241 } 242 243 private: 244 Instruction *Store; 245 ArrayRef<BasicBlock *> ExitBlocks; 246 ArrayRef<Instruction *> InsertPts; 247 DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates; 248 LoopInfo &LI; 249 }; 250 251 /// A helper class to do register promotion for all profile counter 252 /// updates in a loop. 253 /// 254 class PGOCounterPromoter { 255 public: 256 PGOCounterPromoter( 257 DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands, 258 Loop &CurLoop, LoopInfo &LI, BlockFrequencyInfo *BFI) 259 : LoopToCandidates(LoopToCands), ExitBlocks(), InsertPts(), L(CurLoop), 260 LI(LI), BFI(BFI) { 261 262 // Skip collection of ExitBlocks and InsertPts for loops that will not be 263 // able to have counters promoted. 264 SmallVector<BasicBlock *, 8> LoopExitBlocks; 265 SmallPtrSet<BasicBlock *, 8> BlockSet; 266 267 L.getExitBlocks(LoopExitBlocks); 268 if (!isPromotionPossible(&L, LoopExitBlocks)) 269 return; 270 271 for (BasicBlock *ExitBlock : LoopExitBlocks) { 272 if (BlockSet.insert(ExitBlock).second) { 273 ExitBlocks.push_back(ExitBlock); 274 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); 275 } 276 } 277 } 278 279 bool run(int64_t *NumPromoted) { 280 // Skip 'infinite' loops: 281 if (ExitBlocks.size() == 0) 282 return false; 283 unsigned MaxProm = getMaxNumOfPromotionsInLoop(&L); 284 if (MaxProm == 0) 285 return false; 286 287 unsigned Promoted = 0; 288 for (auto &Cand : LoopToCandidates[&L]) { 289 290 SmallVector<PHINode *, 4> NewPHIs; 291 SSAUpdater SSA(&NewPHIs); 292 Value *InitVal = ConstantInt::get(Cand.first->getType(), 0); 293 294 // If BFI is set, we will use it to guide the promotions. 295 if (BFI) { 296 auto *BB = Cand.first->getParent(); 297 auto InstrCount = BFI->getBlockProfileCount(BB); 298 if (!InstrCount) 299 continue; 300 auto PreheaderCount = BFI->getBlockProfileCount(L.getLoopPreheader()); 301 // If the average loop trip count is not greater than 1.5, we skip 302 // promotion. 303 if (PreheaderCount && 304 (PreheaderCount.getValue() * 3) >= (InstrCount.getValue() * 2)) 305 continue; 306 } 307 308 PGOCounterPromoterHelper Promoter(Cand.first, Cand.second, SSA, InitVal, 309 L.getLoopPreheader(), ExitBlocks, 310 InsertPts, LoopToCandidates, LI); 311 Promoter.run(SmallVector<Instruction *, 2>({Cand.first, Cand.second})); 312 Promoted++; 313 if (Promoted >= MaxProm) 314 break; 315 316 (*NumPromoted)++; 317 if (MaxNumOfPromotions != -1 && *NumPromoted >= MaxNumOfPromotions) 318 break; 319 } 320 321 LLVM_DEBUG(dbgs() << Promoted << " counters promoted for loop (depth=" 322 << L.getLoopDepth() << ")\n"); 323 return Promoted != 0; 324 } 325 326 private: 327 bool allowSpeculativeCounterPromotion(Loop *LP) { 328 SmallVector<BasicBlock *, 8> ExitingBlocks; 329 L.getExitingBlocks(ExitingBlocks); 330 // Not considierered speculative. 331 if (ExitingBlocks.size() == 1) 332 return true; 333 if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting) 334 return false; 335 return true; 336 } 337 338 // Check whether the loop satisfies the basic conditions needed to perform 339 // Counter Promotions. 340 bool isPromotionPossible(Loop *LP, 341 const SmallVectorImpl<BasicBlock *> &LoopExitBlocks) { 342 // We can't insert into a catchswitch. 343 if (llvm::any_of(LoopExitBlocks, [](BasicBlock *Exit) { 344 return isa<CatchSwitchInst>(Exit->getTerminator()); 345 })) 346 return false; 347 348 if (!LP->hasDedicatedExits()) 349 return false; 350 351 BasicBlock *PH = LP->getLoopPreheader(); 352 if (!PH) 353 return false; 354 355 return true; 356 } 357 358 // Returns the max number of Counter Promotions for LP. 359 unsigned getMaxNumOfPromotionsInLoop(Loop *LP) { 360 SmallVector<BasicBlock *, 8> LoopExitBlocks; 361 LP->getExitBlocks(LoopExitBlocks); 362 if (!isPromotionPossible(LP, LoopExitBlocks)) 363 return 0; 364 365 SmallVector<BasicBlock *, 8> ExitingBlocks; 366 LP->getExitingBlocks(ExitingBlocks); 367 368 // If BFI is set, we do more aggressive promotions based on BFI. 369 if (BFI) 370 return (unsigned)-1; 371 372 // Not considierered speculative. 373 if (ExitingBlocks.size() == 1) 374 return MaxNumOfPromotionsPerLoop; 375 376 if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting) 377 return 0; 378 379 // Whether the target block is in a loop does not matter: 380 if (SpeculativeCounterPromotionToLoop) 381 return MaxNumOfPromotionsPerLoop; 382 383 // Now check the target block: 384 unsigned MaxProm = MaxNumOfPromotionsPerLoop; 385 for (auto *TargetBlock : LoopExitBlocks) { 386 auto *TargetLoop = LI.getLoopFor(TargetBlock); 387 if (!TargetLoop) 388 continue; 389 unsigned MaxPromForTarget = getMaxNumOfPromotionsInLoop(TargetLoop); 390 unsigned PendingCandsInTarget = LoopToCandidates[TargetLoop].size(); 391 MaxProm = 392 std::min(MaxProm, std::max(MaxPromForTarget, PendingCandsInTarget) - 393 PendingCandsInTarget); 394 } 395 return MaxProm; 396 } 397 398 DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates; 399 SmallVector<BasicBlock *, 8> ExitBlocks; 400 SmallVector<Instruction *, 8> InsertPts; 401 Loop &L; 402 LoopInfo &LI; 403 BlockFrequencyInfo *BFI; 404 }; 405 406 enum class ValueProfilingCallType { 407 // Individual values are tracked. Currently used for indiret call target 408 // profiling. 409 Default, 410 411 // The old memop size value profiling. FIXME: To be removed after switching to 412 // the new one. 413 OldMemOp, 414 415 // MemOp: the (new) memop size value profiling with extended buckets. 416 MemOp 417 }; 418 419 } // end anonymous namespace 420 421 PreservedAnalyses InstrProfiling::run(Module &M, ModuleAnalysisManager &AM) { 422 FunctionAnalysisManager &FAM = 423 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 424 auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & { 425 return FAM.getResult<TargetLibraryAnalysis>(F); 426 }; 427 if (!run(M, GetTLI)) 428 return PreservedAnalyses::all(); 429 430 return PreservedAnalyses::none(); 431 } 432 433 char InstrProfilingLegacyPass::ID = 0; 434 INITIALIZE_PASS_BEGIN( 435 InstrProfilingLegacyPass, "instrprof", 436 "Frontend instrumentation-based coverage lowering.", false, false) 437 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 438 INITIALIZE_PASS_END( 439 InstrProfilingLegacyPass, "instrprof", 440 "Frontend instrumentation-based coverage lowering.", false, false) 441 442 ModulePass * 443 llvm::createInstrProfilingLegacyPass(const InstrProfOptions &Options, 444 bool IsCS) { 445 return new InstrProfilingLegacyPass(Options, IsCS); 446 } 447 448 static InstrProfIncrementInst *castToIncrementInst(Instruction *Instr) { 449 InstrProfIncrementInst *Inc = dyn_cast<InstrProfIncrementInstStep>(Instr); 450 if (Inc) 451 return Inc; 452 return dyn_cast<InstrProfIncrementInst>(Instr); 453 } 454 455 bool InstrProfiling::lowerIntrinsics(Function *F) { 456 bool MadeChange = false; 457 PromotionCandidates.clear(); 458 for (BasicBlock &BB : *F) { 459 for (auto I = BB.begin(), E = BB.end(); I != E;) { 460 auto Instr = I++; 461 InstrProfIncrementInst *Inc = castToIncrementInst(&*Instr); 462 if (Inc) { 463 lowerIncrement(Inc); 464 MadeChange = true; 465 } else if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(Instr)) { 466 lowerValueProfileInst(Ind); 467 MadeChange = true; 468 } 469 } 470 } 471 472 if (!MadeChange) 473 return false; 474 475 promoteCounterLoadStores(F); 476 return true; 477 } 478 479 bool InstrProfiling::isRuntimeCounterRelocationEnabled() const { 480 if (RuntimeCounterRelocation.getNumOccurrences() > 0) 481 return RuntimeCounterRelocation; 482 483 return TT.isOSFuchsia(); 484 } 485 486 bool InstrProfiling::isCounterPromotionEnabled() const { 487 if (DoCounterPromotion.getNumOccurrences() > 0) 488 return DoCounterPromotion; 489 490 return Options.DoCounterPromotion; 491 } 492 493 void InstrProfiling::promoteCounterLoadStores(Function *F) { 494 if (!isCounterPromotionEnabled()) 495 return; 496 497 DominatorTree DT(*F); 498 LoopInfo LI(DT); 499 DenseMap<Loop *, SmallVector<LoadStorePair, 8>> LoopPromotionCandidates; 500 501 std::unique_ptr<BlockFrequencyInfo> BFI; 502 if (Options.UseBFIInPromotion) { 503 std::unique_ptr<BranchProbabilityInfo> BPI; 504 BPI.reset(new BranchProbabilityInfo(*F, LI, &GetTLI(*F))); 505 BFI.reset(new BlockFrequencyInfo(*F, *BPI, LI)); 506 } 507 508 for (const auto &LoadStore : PromotionCandidates) { 509 auto *CounterLoad = LoadStore.first; 510 auto *CounterStore = LoadStore.second; 511 BasicBlock *BB = CounterLoad->getParent(); 512 Loop *ParentLoop = LI.getLoopFor(BB); 513 if (!ParentLoop) 514 continue; 515 LoopPromotionCandidates[ParentLoop].emplace_back(CounterLoad, CounterStore); 516 } 517 518 SmallVector<Loop *, 4> Loops = LI.getLoopsInPreorder(); 519 520 // Do a post-order traversal of the loops so that counter updates can be 521 // iteratively hoisted outside the loop nest. 522 for (auto *Loop : llvm::reverse(Loops)) { 523 PGOCounterPromoter Promoter(LoopPromotionCandidates, *Loop, LI, BFI.get()); 524 Promoter.run(&TotalCountersPromoted); 525 } 526 } 527 528 /// Check if the module contains uses of any profiling intrinsics. 529 static bool containsProfilingIntrinsics(Module &M) { 530 if (auto *F = M.getFunction( 531 Intrinsic::getName(llvm::Intrinsic::instrprof_increment))) 532 if (!F->use_empty()) 533 return true; 534 if (auto *F = M.getFunction( 535 Intrinsic::getName(llvm::Intrinsic::instrprof_increment_step))) 536 if (!F->use_empty()) 537 return true; 538 if (auto *F = M.getFunction( 539 Intrinsic::getName(llvm::Intrinsic::instrprof_value_profile))) 540 if (!F->use_empty()) 541 return true; 542 return false; 543 } 544 545 bool InstrProfiling::run( 546 Module &M, std::function<const TargetLibraryInfo &(Function &F)> GetTLI) { 547 this->M = &M; 548 this->GetTLI = std::move(GetTLI); 549 NamesVar = nullptr; 550 NamesSize = 0; 551 ProfileDataMap.clear(); 552 UsedVars.clear(); 553 getMemOPSizeRangeFromOption(MemOPSizeRange, MemOPSizeRangeStart, 554 MemOPSizeRangeLast); 555 TT = Triple(M.getTargetTriple()); 556 557 // Emit the runtime hook even if no counters are present. 558 bool MadeChange = emitRuntimeHook(); 559 560 // Improve compile time by avoiding linear scans when there is no work. 561 GlobalVariable *CoverageNamesVar = 562 M.getNamedGlobal(getCoverageUnusedNamesVarName()); 563 if (!containsProfilingIntrinsics(M) && !CoverageNamesVar) 564 return MadeChange; 565 566 // We did not know how many value sites there would be inside 567 // the instrumented function. This is counting the number of instrumented 568 // target value sites to enter it as field in the profile data variable. 569 for (Function &F : M) { 570 InstrProfIncrementInst *FirstProfIncInst = nullptr; 571 for (BasicBlock &BB : F) 572 for (auto I = BB.begin(), E = BB.end(); I != E; I++) 573 if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(I)) 574 computeNumValueSiteCounts(Ind); 575 else if (FirstProfIncInst == nullptr) 576 FirstProfIncInst = dyn_cast<InstrProfIncrementInst>(I); 577 578 // Value profiling intrinsic lowering requires per-function profile data 579 // variable to be created first. 580 if (FirstProfIncInst != nullptr) 581 static_cast<void>(getOrCreateRegionCounters(FirstProfIncInst)); 582 } 583 584 for (Function &F : M) 585 MadeChange |= lowerIntrinsics(&F); 586 587 if (CoverageNamesVar) { 588 lowerCoverageData(CoverageNamesVar); 589 MadeChange = true; 590 } 591 592 if (!MadeChange) 593 return false; 594 595 emitVNodes(); 596 emitNameData(); 597 emitRegistration(); 598 emitUses(); 599 emitInitialization(); 600 return true; 601 } 602 603 static FunctionCallee getOrInsertValueProfilingCall( 604 Module &M, const TargetLibraryInfo &TLI, 605 ValueProfilingCallType CallType = ValueProfilingCallType::Default) { 606 LLVMContext &Ctx = M.getContext(); 607 auto *ReturnTy = Type::getVoidTy(M.getContext()); 608 609 AttributeList AL; 610 if (auto AK = TLI.getExtAttrForI32Param(false)) 611 AL = AL.addParamAttribute(M.getContext(), 2, AK); 612 613 if (CallType == ValueProfilingCallType::Default || 614 CallType == ValueProfilingCallType::MemOp) { 615 Type *ParamTypes[] = { 616 #define VALUE_PROF_FUNC_PARAM(ParamType, ParamName, ParamLLVMType) ParamLLVMType 617 #include "llvm/ProfileData/InstrProfData.inc" 618 }; 619 auto *ValueProfilingCallTy = 620 FunctionType::get(ReturnTy, makeArrayRef(ParamTypes), false); 621 StringRef FuncName = CallType == ValueProfilingCallType::Default 622 ? getInstrProfValueProfFuncName() 623 : getInstrProfValueProfMemOpFuncName(); 624 return M.getOrInsertFunction(FuncName, ValueProfilingCallTy, AL); 625 } else { 626 // FIXME: This code is to be removed after switching to the new memop value 627 // profiling. 628 assert(CallType == ValueProfilingCallType::OldMemOp); 629 Type *RangeParamTypes[] = { 630 #define VALUE_RANGE_PROF 1 631 #define VALUE_PROF_FUNC_PARAM(ParamType, ParamName, ParamLLVMType) ParamLLVMType 632 #include "llvm/ProfileData/InstrProfData.inc" 633 #undef VALUE_RANGE_PROF 634 }; 635 auto *ValueRangeProfilingCallTy = 636 FunctionType::get(ReturnTy, makeArrayRef(RangeParamTypes), false); 637 return M.getOrInsertFunction(getInstrProfValueRangeProfFuncName(), 638 ValueRangeProfilingCallTy, AL); 639 } 640 } 641 642 void InstrProfiling::computeNumValueSiteCounts(InstrProfValueProfileInst *Ind) { 643 GlobalVariable *Name = Ind->getName(); 644 uint64_t ValueKind = Ind->getValueKind()->getZExtValue(); 645 uint64_t Index = Ind->getIndex()->getZExtValue(); 646 auto It = ProfileDataMap.find(Name); 647 if (It == ProfileDataMap.end()) { 648 PerFunctionProfileData PD; 649 PD.NumValueSites[ValueKind] = Index + 1; 650 ProfileDataMap[Name] = PD; 651 } else if (It->second.NumValueSites[ValueKind] <= Index) 652 It->second.NumValueSites[ValueKind] = Index + 1; 653 } 654 655 void InstrProfiling::lowerValueProfileInst(InstrProfValueProfileInst *Ind) { 656 GlobalVariable *Name = Ind->getName(); 657 auto It = ProfileDataMap.find(Name); 658 assert(It != ProfileDataMap.end() && It->second.DataVar && 659 "value profiling detected in function with no counter incerement"); 660 661 GlobalVariable *DataVar = It->second.DataVar; 662 uint64_t ValueKind = Ind->getValueKind()->getZExtValue(); 663 uint64_t Index = Ind->getIndex()->getZExtValue(); 664 for (uint32_t Kind = IPVK_First; Kind < ValueKind; ++Kind) 665 Index += It->second.NumValueSites[Kind]; 666 667 IRBuilder<> Builder(Ind); 668 bool IsMemOpSize = (Ind->getValueKind()->getZExtValue() == 669 llvm::InstrProfValueKind::IPVK_MemOPSize); 670 CallInst *Call = nullptr; 671 auto *TLI = &GetTLI(*Ind->getFunction()); 672 673 // To support value profiling calls within Windows exception handlers, funclet 674 // information contained within operand bundles needs to be copied over to 675 // the library call. This is required for the IR to be processed by the 676 // WinEHPrepare pass. 677 SmallVector<OperandBundleDef, 1> OpBundles; 678 Ind->getOperandBundlesAsDefs(OpBundles); 679 if (!IsMemOpSize) { 680 Value *Args[3] = {Ind->getTargetValue(), 681 Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()), 682 Builder.getInt32(Index)}; 683 Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI), Args, 684 OpBundles); 685 } else if (!UseOldMemOpValueProf) { 686 Value *Args[3] = {Ind->getTargetValue(), 687 Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()), 688 Builder.getInt32(Index)}; 689 Call = Builder.CreateCall( 690 getOrInsertValueProfilingCall(*M, *TLI, ValueProfilingCallType::MemOp), 691 Args, OpBundles); 692 } else { 693 Value *Args[6] = { 694 Ind->getTargetValue(), 695 Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()), 696 Builder.getInt32(Index), 697 Builder.getInt64(MemOPSizeRangeStart), 698 Builder.getInt64(MemOPSizeRangeLast), 699 Builder.getInt64(MemOPSizeLarge == 0 ? INT64_MIN : MemOPSizeLarge)}; 700 Call = Builder.CreateCall(getOrInsertValueProfilingCall( 701 *M, *TLI, ValueProfilingCallType::OldMemOp), 702 Args, OpBundles); 703 } 704 if (auto AK = TLI->getExtAttrForI32Param(false)) 705 Call->addParamAttr(2, AK); 706 Ind->replaceAllUsesWith(Call); 707 Ind->eraseFromParent(); 708 } 709 710 void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) { 711 GlobalVariable *Counters = getOrCreateRegionCounters(Inc); 712 713 IRBuilder<> Builder(Inc); 714 uint64_t Index = Inc->getIndex()->getZExtValue(); 715 Value *Addr = Builder.CreateConstInBoundsGEP2_64(Counters->getValueType(), 716 Counters, 0, Index); 717 718 if (isRuntimeCounterRelocationEnabled()) { 719 Type *Int64Ty = Type::getInt64Ty(M->getContext()); 720 Type *Int64PtrTy = Type::getInt64PtrTy(M->getContext()); 721 Function *Fn = Inc->getParent()->getParent(); 722 Instruction &I = Fn->getEntryBlock().front(); 723 LoadInst *LI = dyn_cast<LoadInst>(&I); 724 if (!LI) { 725 IRBuilder<> Builder(&I); 726 Type *Int64Ty = Type::getInt64Ty(M->getContext()); 727 GlobalVariable *Bias = M->getGlobalVariable(getInstrProfCounterBiasVarName()); 728 if (!Bias) { 729 Bias = new GlobalVariable(*M, Int64Ty, false, GlobalValue::LinkOnceODRLinkage, 730 Constant::getNullValue(Int64Ty), 731 getInstrProfCounterBiasVarName()); 732 Bias->setVisibility(GlobalVariable::HiddenVisibility); 733 } 734 LI = Builder.CreateLoad(Int64Ty, Bias); 735 } 736 auto *Add = Builder.CreateAdd(Builder.CreatePtrToInt(Addr, Int64Ty), LI); 737 Addr = Builder.CreateIntToPtr(Add, Int64PtrTy); 738 } 739 740 if (Options.Atomic || AtomicCounterUpdateAll || 741 (Index == 0 && AtomicFirstCounter)) { 742 Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, Inc->getStep(), 743 AtomicOrdering::Monotonic); 744 } else { 745 Value *IncStep = Inc->getStep(); 746 Value *Load = Builder.CreateLoad(IncStep->getType(), Addr, "pgocount"); 747 auto *Count = Builder.CreateAdd(Load, Inc->getStep()); 748 auto *Store = Builder.CreateStore(Count, Addr); 749 if (isCounterPromotionEnabled()) 750 PromotionCandidates.emplace_back(cast<Instruction>(Load), Store); 751 } 752 Inc->eraseFromParent(); 753 } 754 755 void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageNamesVar) { 756 ConstantArray *Names = 757 cast<ConstantArray>(CoverageNamesVar->getInitializer()); 758 for (unsigned I = 0, E = Names->getNumOperands(); I < E; ++I) { 759 Constant *NC = Names->getOperand(I); 760 Value *V = NC->stripPointerCasts(); 761 assert(isa<GlobalVariable>(V) && "Missing reference to function name"); 762 GlobalVariable *Name = cast<GlobalVariable>(V); 763 764 Name->setLinkage(GlobalValue::PrivateLinkage); 765 ReferencedNames.push_back(Name); 766 NC->dropAllReferences(); 767 } 768 CoverageNamesVar->eraseFromParent(); 769 } 770 771 /// Get the name of a profiling variable for a particular function. 772 static std::string getVarName(InstrProfIncrementInst *Inc, StringRef Prefix) { 773 StringRef NamePrefix = getInstrProfNameVarPrefix(); 774 StringRef Name = Inc->getName()->getName().substr(NamePrefix.size()); 775 Function *F = Inc->getParent()->getParent(); 776 Module *M = F->getParent(); 777 if (!DoHashBasedCounterSplit || !isIRPGOFlagSet(M) || 778 !canRenameComdatFunc(*F)) 779 return (Prefix + Name).str(); 780 uint64_t FuncHash = Inc->getHash()->getZExtValue(); 781 SmallVector<char, 24> HashPostfix; 782 if (Name.endswith((Twine(".") + Twine(FuncHash)).toStringRef(HashPostfix))) 783 return (Prefix + Name).str(); 784 return (Prefix + Name + "." + Twine(FuncHash)).str(); 785 } 786 787 static inline bool shouldRecordFunctionAddr(Function *F) { 788 // Check the linkage 789 bool HasAvailableExternallyLinkage = F->hasAvailableExternallyLinkage(); 790 if (!F->hasLinkOnceLinkage() && !F->hasLocalLinkage() && 791 !HasAvailableExternallyLinkage) 792 return true; 793 794 // A function marked 'alwaysinline' with available_externally linkage can't 795 // have its address taken. Doing so would create an undefined external ref to 796 // the function, which would fail to link. 797 if (HasAvailableExternallyLinkage && 798 F->hasFnAttribute(Attribute::AlwaysInline)) 799 return false; 800 801 // Prohibit function address recording if the function is both internal and 802 // COMDAT. This avoids the profile data variable referencing internal symbols 803 // in COMDAT. 804 if (F->hasLocalLinkage() && F->hasComdat()) 805 return false; 806 807 // Check uses of this function for other than direct calls or invokes to it. 808 // Inline virtual functions have linkeOnceODR linkage. When a key method 809 // exists, the vtable will only be emitted in the TU where the key method 810 // is defined. In a TU where vtable is not available, the function won't 811 // be 'addresstaken'. If its address is not recorded here, the profile data 812 // with missing address may be picked by the linker leading to missing 813 // indirect call target info. 814 return F->hasAddressTaken() || F->hasLinkOnceLinkage(); 815 } 816 817 static bool needsRuntimeRegistrationOfSectionRange(const Triple &TT) { 818 // Don't do this for Darwin. compiler-rt uses linker magic. 819 if (TT.isOSDarwin()) 820 return false; 821 // Use linker script magic to get data/cnts/name start/end. 822 if (TT.isOSLinux() || TT.isOSFreeBSD() || TT.isOSNetBSD() || 823 TT.isOSSolaris() || TT.isOSFuchsia() || TT.isPS4CPU() || 824 TT.isOSWindows()) 825 return false; 826 827 return true; 828 } 829 830 GlobalVariable * 831 InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) { 832 GlobalVariable *NamePtr = Inc->getName(); 833 auto It = ProfileDataMap.find(NamePtr); 834 PerFunctionProfileData PD; 835 if (It != ProfileDataMap.end()) { 836 if (It->second.RegionCounters) 837 return It->second.RegionCounters; 838 PD = It->second; 839 } 840 841 // Match the linkage and visibility of the name global. COFF supports using 842 // comdats with internal symbols, so do that if we can. 843 Function *Fn = Inc->getParent()->getParent(); 844 GlobalValue::LinkageTypes Linkage = NamePtr->getLinkage(); 845 GlobalValue::VisibilityTypes Visibility = NamePtr->getVisibility(); 846 if (TT.isOSBinFormatCOFF()) { 847 Linkage = GlobalValue::InternalLinkage; 848 Visibility = GlobalValue::DefaultVisibility; 849 } 850 851 // Move the name variable to the right section. Place them in a COMDAT group 852 // if the associated function is a COMDAT. This will make sure that only one 853 // copy of counters of the COMDAT function will be emitted after linking. Keep 854 // in mind that this pass may run before the inliner, so we need to create a 855 // new comdat group for the counters and profiling data. If we use the comdat 856 // of the parent function, that will result in relocations against discarded 857 // sections. 858 bool NeedComdat = needsComdatForCounter(*Fn, *M); 859 if (NeedComdat) { 860 if (TT.isOSBinFormatCOFF()) { 861 // For COFF, put the counters, data, and values each into their own 862 // comdats. We can't use a group because the Visual C++ linker will 863 // report duplicate symbol errors if there are multiple external symbols 864 // with the same name marked IMAGE_COMDAT_SELECT_ASSOCIATIVE. 865 Linkage = GlobalValue::LinkOnceODRLinkage; 866 Visibility = GlobalValue::HiddenVisibility; 867 } 868 } 869 auto MaybeSetComdat = [=](GlobalVariable *GV) { 870 if (NeedComdat) 871 GV->setComdat(M->getOrInsertComdat(GV->getName())); 872 }; 873 874 uint64_t NumCounters = Inc->getNumCounters()->getZExtValue(); 875 LLVMContext &Ctx = M->getContext(); 876 ArrayType *CounterTy = ArrayType::get(Type::getInt64Ty(Ctx), NumCounters); 877 878 // Create the counters variable. 879 auto *CounterPtr = 880 new GlobalVariable(*M, CounterTy, false, Linkage, 881 Constant::getNullValue(CounterTy), 882 getVarName(Inc, getInstrProfCountersVarPrefix())); 883 CounterPtr->setVisibility(Visibility); 884 CounterPtr->setSection( 885 getInstrProfSectionName(IPSK_cnts, TT.getObjectFormat())); 886 CounterPtr->setAlignment(Align(8)); 887 MaybeSetComdat(CounterPtr); 888 CounterPtr->setLinkage(Linkage); 889 890 auto *Int8PtrTy = Type::getInt8PtrTy(Ctx); 891 // Allocate statically the array of pointers to value profile nodes for 892 // the current function. 893 Constant *ValuesPtrExpr = ConstantPointerNull::get(Int8PtrTy); 894 if (ValueProfileStaticAlloc && !needsRuntimeRegistrationOfSectionRange(TT)) { 895 uint64_t NS = 0; 896 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 897 NS += PD.NumValueSites[Kind]; 898 if (NS) { 899 ArrayType *ValuesTy = ArrayType::get(Type::getInt64Ty(Ctx), NS); 900 901 auto *ValuesVar = 902 new GlobalVariable(*M, ValuesTy, false, Linkage, 903 Constant::getNullValue(ValuesTy), 904 getVarName(Inc, getInstrProfValuesVarPrefix())); 905 ValuesVar->setVisibility(Visibility); 906 ValuesVar->setSection( 907 getInstrProfSectionName(IPSK_vals, TT.getObjectFormat())); 908 ValuesVar->setAlignment(Align(8)); 909 MaybeSetComdat(ValuesVar); 910 ValuesPtrExpr = 911 ConstantExpr::getBitCast(ValuesVar, Type::getInt8PtrTy(Ctx)); 912 } 913 } 914 915 // Create data variable. 916 auto *Int16Ty = Type::getInt16Ty(Ctx); 917 auto *Int16ArrayTy = ArrayType::get(Int16Ty, IPVK_Last + 1); 918 Type *DataTypes[] = { 919 #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) LLVMType, 920 #include "llvm/ProfileData/InstrProfData.inc" 921 }; 922 auto *DataTy = StructType::get(Ctx, makeArrayRef(DataTypes)); 923 924 Constant *FunctionAddr = shouldRecordFunctionAddr(Fn) 925 ? ConstantExpr::getBitCast(Fn, Int8PtrTy) 926 : ConstantPointerNull::get(Int8PtrTy); 927 928 Constant *Int16ArrayVals[IPVK_Last + 1]; 929 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 930 Int16ArrayVals[Kind] = ConstantInt::get(Int16Ty, PD.NumValueSites[Kind]); 931 932 Constant *DataVals[] = { 933 #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) Init, 934 #include "llvm/ProfileData/InstrProfData.inc" 935 }; 936 auto *Data = new GlobalVariable(*M, DataTy, false, Linkage, 937 ConstantStruct::get(DataTy, DataVals), 938 getVarName(Inc, getInstrProfDataVarPrefix())); 939 Data->setVisibility(Visibility); 940 Data->setSection(getInstrProfSectionName(IPSK_data, TT.getObjectFormat())); 941 Data->setAlignment(Align(INSTR_PROF_DATA_ALIGNMENT)); 942 MaybeSetComdat(Data); 943 Data->setLinkage(Linkage); 944 945 PD.RegionCounters = CounterPtr; 946 PD.DataVar = Data; 947 ProfileDataMap[NamePtr] = PD; 948 949 // Mark the data variable as used so that it isn't stripped out. 950 UsedVars.push_back(Data); 951 // Now that the linkage set by the FE has been passed to the data and counter 952 // variables, reset Name variable's linkage and visibility to private so that 953 // it can be removed later by the compiler. 954 NamePtr->setLinkage(GlobalValue::PrivateLinkage); 955 // Collect the referenced names to be used by emitNameData. 956 ReferencedNames.push_back(NamePtr); 957 958 return CounterPtr; 959 } 960 961 void InstrProfiling::emitVNodes() { 962 if (!ValueProfileStaticAlloc) 963 return; 964 965 // For now only support this on platforms that do 966 // not require runtime registration to discover 967 // named section start/end. 968 if (needsRuntimeRegistrationOfSectionRange(TT)) 969 return; 970 971 size_t TotalNS = 0; 972 for (auto &PD : ProfileDataMap) { 973 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 974 TotalNS += PD.second.NumValueSites[Kind]; 975 } 976 977 if (!TotalNS) 978 return; 979 980 uint64_t NumCounters = TotalNS * NumCountersPerValueSite; 981 // Heuristic for small programs with very few total value sites. 982 // The default value of vp-counters-per-site is chosen based on 983 // the observation that large apps usually have a low percentage 984 // of value sites that actually have any profile data, and thus 985 // the average number of counters per site is low. For small 986 // apps with very few sites, this may not be true. Bump up the 987 // number of counters in this case. 988 #define INSTR_PROF_MIN_VAL_COUNTS 10 989 if (NumCounters < INSTR_PROF_MIN_VAL_COUNTS) 990 NumCounters = std::max(INSTR_PROF_MIN_VAL_COUNTS, (int)NumCounters * 2); 991 992 auto &Ctx = M->getContext(); 993 Type *VNodeTypes[] = { 994 #define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Init) LLVMType, 995 #include "llvm/ProfileData/InstrProfData.inc" 996 }; 997 auto *VNodeTy = StructType::get(Ctx, makeArrayRef(VNodeTypes)); 998 999 ArrayType *VNodesTy = ArrayType::get(VNodeTy, NumCounters); 1000 auto *VNodesVar = new GlobalVariable( 1001 *M, VNodesTy, false, GlobalValue::PrivateLinkage, 1002 Constant::getNullValue(VNodesTy), getInstrProfVNodesVarName()); 1003 VNodesVar->setSection( 1004 getInstrProfSectionName(IPSK_vnodes, TT.getObjectFormat())); 1005 UsedVars.push_back(VNodesVar); 1006 } 1007 1008 void InstrProfiling::emitNameData() { 1009 std::string UncompressedData; 1010 1011 if (ReferencedNames.empty()) 1012 return; 1013 1014 std::string CompressedNameStr; 1015 if (Error E = collectPGOFuncNameStrings(ReferencedNames, CompressedNameStr, 1016 DoInstrProfNameCompression)) { 1017 report_fatal_error(toString(std::move(E)), false); 1018 } 1019 1020 auto &Ctx = M->getContext(); 1021 auto *NamesVal = ConstantDataArray::getString( 1022 Ctx, StringRef(CompressedNameStr), false); 1023 NamesVar = new GlobalVariable(*M, NamesVal->getType(), true, 1024 GlobalValue::PrivateLinkage, NamesVal, 1025 getInstrProfNamesVarName()); 1026 NamesSize = CompressedNameStr.size(); 1027 NamesVar->setSection( 1028 getInstrProfSectionName(IPSK_name, TT.getObjectFormat())); 1029 // On COFF, it's important to reduce the alignment down to 1 to prevent the 1030 // linker from inserting padding before the start of the names section or 1031 // between names entries. 1032 NamesVar->setAlignment(Align(1)); 1033 UsedVars.push_back(NamesVar); 1034 1035 for (auto *NamePtr : ReferencedNames) 1036 NamePtr->eraseFromParent(); 1037 } 1038 1039 void InstrProfiling::emitRegistration() { 1040 if (!needsRuntimeRegistrationOfSectionRange(TT)) 1041 return; 1042 1043 // Construct the function. 1044 auto *VoidTy = Type::getVoidTy(M->getContext()); 1045 auto *VoidPtrTy = Type::getInt8PtrTy(M->getContext()); 1046 auto *Int64Ty = Type::getInt64Ty(M->getContext()); 1047 auto *RegisterFTy = FunctionType::get(VoidTy, false); 1048 auto *RegisterF = Function::Create(RegisterFTy, GlobalValue::InternalLinkage, 1049 getInstrProfRegFuncsName(), M); 1050 RegisterF->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 1051 if (Options.NoRedZone) 1052 RegisterF->addFnAttr(Attribute::NoRedZone); 1053 1054 auto *RuntimeRegisterTy = FunctionType::get(VoidTy, VoidPtrTy, false); 1055 auto *RuntimeRegisterF = 1056 Function::Create(RuntimeRegisterTy, GlobalVariable::ExternalLinkage, 1057 getInstrProfRegFuncName(), M); 1058 1059 IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", RegisterF)); 1060 for (Value *Data : UsedVars) 1061 if (Data != NamesVar && !isa<Function>(Data)) 1062 IRB.CreateCall(RuntimeRegisterF, IRB.CreateBitCast(Data, VoidPtrTy)); 1063 1064 if (NamesVar) { 1065 Type *ParamTypes[] = {VoidPtrTy, Int64Ty}; 1066 auto *NamesRegisterTy = 1067 FunctionType::get(VoidTy, makeArrayRef(ParamTypes), false); 1068 auto *NamesRegisterF = 1069 Function::Create(NamesRegisterTy, GlobalVariable::ExternalLinkage, 1070 getInstrProfNamesRegFuncName(), M); 1071 IRB.CreateCall(NamesRegisterF, {IRB.CreateBitCast(NamesVar, VoidPtrTy), 1072 IRB.getInt64(NamesSize)}); 1073 } 1074 1075 IRB.CreateRetVoid(); 1076 } 1077 1078 bool InstrProfiling::emitRuntimeHook() { 1079 // We expect the linker to be invoked with -u<hook_var> flag for Linux or 1080 // Fuchsia, in which case there is no need to emit the user function. 1081 if (TT.isOSLinux() || TT.isOSFuchsia()) 1082 return false; 1083 1084 // If the module's provided its own runtime, we don't need to do anything. 1085 if (M->getGlobalVariable(getInstrProfRuntimeHookVarName())) 1086 return false; 1087 1088 // Declare an external variable that will pull in the runtime initialization. 1089 auto *Int32Ty = Type::getInt32Ty(M->getContext()); 1090 auto *Var = 1091 new GlobalVariable(*M, Int32Ty, false, GlobalValue::ExternalLinkage, 1092 nullptr, getInstrProfRuntimeHookVarName()); 1093 1094 // Make a function that uses it. 1095 auto *User = Function::Create(FunctionType::get(Int32Ty, false), 1096 GlobalValue::LinkOnceODRLinkage, 1097 getInstrProfRuntimeHookVarUseFuncName(), M); 1098 User->addFnAttr(Attribute::NoInline); 1099 if (Options.NoRedZone) 1100 User->addFnAttr(Attribute::NoRedZone); 1101 User->setVisibility(GlobalValue::HiddenVisibility); 1102 if (TT.supportsCOMDAT()) 1103 User->setComdat(M->getOrInsertComdat(User->getName())); 1104 1105 IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", User)); 1106 auto *Load = IRB.CreateLoad(Int32Ty, Var); 1107 IRB.CreateRet(Load); 1108 1109 // Mark the user variable as used so that it isn't stripped out. 1110 UsedVars.push_back(User); 1111 return true; 1112 } 1113 1114 void InstrProfiling::emitUses() { 1115 if (!UsedVars.empty()) 1116 appendToUsed(*M, UsedVars); 1117 } 1118 1119 void InstrProfiling::emitInitialization() { 1120 // Create ProfileFileName variable. Don't don't this for the 1121 // context-sensitive instrumentation lowering: This lowering is after 1122 // LTO/ThinLTO linking. Pass PGOInstrumentationGenCreateVar should 1123 // have already create the variable before LTO/ThinLTO linking. 1124 if (!IsCS) 1125 createProfileFileNameVar(*M, Options.InstrProfileOutput); 1126 Function *RegisterF = M->getFunction(getInstrProfRegFuncsName()); 1127 if (!RegisterF) 1128 return; 1129 1130 // Create the initialization function. 1131 auto *VoidTy = Type::getVoidTy(M->getContext()); 1132 auto *F = Function::Create(FunctionType::get(VoidTy, false), 1133 GlobalValue::InternalLinkage, 1134 getInstrProfInitFuncName(), M); 1135 F->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 1136 F->addFnAttr(Attribute::NoInline); 1137 if (Options.NoRedZone) 1138 F->addFnAttr(Attribute::NoRedZone); 1139 1140 // Add the basic block and the necessary calls. 1141 IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", F)); 1142 IRB.CreateCall(RegisterF, {}); 1143 IRB.CreateRetVoid(); 1144 1145 appendToGlobalCtors(*M, F, 0); 1146 } 1147