1 //===-- InstrProfiling.cpp - Frontend instrumentation based profiling -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers instrprof_* intrinsics emitted by a frontend for profiling.
10 // It also builds the data structures and initialization code needed for
11 // updating execution counts and emitting the profile at runtime.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Instrumentation/InstrProfiling.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/Analysis/BlockFrequencyInfo.h"
22 #include "llvm/Analysis/BranchProbabilityInfo.h"
23 #include "llvm/Analysis/LoopInfo.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/BasicBlock.h"
27 #include "llvm/IR/Constant.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalValue.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/InitializePasses.h"
41 #include "llvm/Pass.h"
42 #include "llvm/ProfileData/InstrProf.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Error.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
48 #include "llvm/Transforms/Utils/ModuleUtils.h"
49 #include "llvm/Transforms/Utils/SSAUpdater.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <cstddef>
53 #include <cstdint>
54 #include <string>
55 
56 using namespace llvm;
57 
58 #define DEBUG_TYPE "instrprof"
59 
60 // FIXME: These are to be removed after switching to the new memop value
61 // profiling.
62 // The start and end values of precise value profile range for memory
63 // intrinsic sizes
64 cl::opt<std::string> MemOPSizeRange(
65     "memop-size-range",
66     cl::desc("Set the range of size in memory intrinsic calls to be profiled "
67              "precisely, in a format of <start_val>:<end_val>"),
68     cl::init(""));
69 
70 // The value that considered to be large value in  memory intrinsic.
71 cl::opt<unsigned> MemOPSizeLarge(
72     "memop-size-large",
73     cl::desc("Set large value thresthold in memory intrinsic size profiling. "
74              "Value of 0 disables the large value profiling."),
75     cl::init(8192));
76 
77 cl::opt<bool> UseOldMemOpValueProf(
78     "use-old-memop-value-prof",
79     cl::desc("Use the old memop value profiling buckets. This is "
80              "transitional and to be removed after switching. "),
81     cl::init(false));
82 
83 namespace {
84 
85 cl::opt<bool> DoHashBasedCounterSplit(
86     "hash-based-counter-split",
87     cl::desc("Rename counter variable of a comdat function based on cfg hash"),
88     cl::init(true));
89 
90 cl::opt<bool> RuntimeCounterRelocation(
91     "runtime-counter-relocation",
92     cl::desc("Enable relocating counters at runtime."),
93     cl::init(false));
94 
95 cl::opt<bool> ValueProfileStaticAlloc(
96     "vp-static-alloc",
97     cl::desc("Do static counter allocation for value profiler"),
98     cl::init(true));
99 
100 cl::opt<double> NumCountersPerValueSite(
101     "vp-counters-per-site",
102     cl::desc("The average number of profile counters allocated "
103              "per value profiling site."),
104     // This is set to a very small value because in real programs, only
105     // a very small percentage of value sites have non-zero targets, e.g, 1/30.
106     // For those sites with non-zero profile, the average number of targets
107     // is usually smaller than 2.
108     cl::init(1.0));
109 
110 cl::opt<bool> AtomicCounterUpdateAll(
111     "instrprof-atomic-counter-update-all", cl::ZeroOrMore,
112     cl::desc("Make all profile counter updates atomic (for testing only)"),
113     cl::init(false));
114 
115 cl::opt<bool> AtomicCounterUpdatePromoted(
116     "atomic-counter-update-promoted", cl::ZeroOrMore,
117     cl::desc("Do counter update using atomic fetch add "
118              " for promoted counters only"),
119     cl::init(false));
120 
121 cl::opt<bool> AtomicFirstCounter(
122     "atomic-first-counter", cl::ZeroOrMore,
123     cl::desc("Use atomic fetch add for first counter in a function (usually "
124              "the entry counter)"),
125     cl::init(false));
126 
127 // If the option is not specified, the default behavior about whether
128 // counter promotion is done depends on how instrumentaiton lowering
129 // pipeline is setup, i.e., the default value of true of this option
130 // does not mean the promotion will be done by default. Explicitly
131 // setting this option can override the default behavior.
132 cl::opt<bool> DoCounterPromotion("do-counter-promotion", cl::ZeroOrMore,
133                                  cl::desc("Do counter register promotion"),
134                                  cl::init(false));
135 cl::opt<unsigned> MaxNumOfPromotionsPerLoop(
136     cl::ZeroOrMore, "max-counter-promotions-per-loop", cl::init(20),
137     cl::desc("Max number counter promotions per loop to avoid"
138              " increasing register pressure too much"));
139 
140 // A debug option
141 cl::opt<int>
142     MaxNumOfPromotions(cl::ZeroOrMore, "max-counter-promotions", cl::init(-1),
143                        cl::desc("Max number of allowed counter promotions"));
144 
145 cl::opt<unsigned> SpeculativeCounterPromotionMaxExiting(
146     cl::ZeroOrMore, "speculative-counter-promotion-max-exiting", cl::init(3),
147     cl::desc("The max number of exiting blocks of a loop to allow "
148              " speculative counter promotion"));
149 
150 cl::opt<bool> SpeculativeCounterPromotionToLoop(
151     cl::ZeroOrMore, "speculative-counter-promotion-to-loop", cl::init(false),
152     cl::desc("When the option is false, if the target block is in a loop, "
153              "the promotion will be disallowed unless the promoted counter "
154              " update can be further/iteratively promoted into an acyclic "
155              " region."));
156 
157 cl::opt<bool> IterativeCounterPromotion(
158     cl::ZeroOrMore, "iterative-counter-promotion", cl::init(true),
159     cl::desc("Allow counter promotion across the whole loop nest."));
160 
161 cl::opt<bool> SkipRetExitBlock(
162     cl::ZeroOrMore, "skip-ret-exit-block", cl::init(true),
163     cl::desc("Suppress counter promotion if exit blocks contain ret."));
164 
165 class InstrProfilingLegacyPass : public ModulePass {
166   InstrProfiling InstrProf;
167 
168 public:
169   static char ID;
170 
171   InstrProfilingLegacyPass() : ModulePass(ID) {}
172   InstrProfilingLegacyPass(const InstrProfOptions &Options, bool IsCS = false)
173       : ModulePass(ID), InstrProf(Options, IsCS) {
174     initializeInstrProfilingLegacyPassPass(*PassRegistry::getPassRegistry());
175   }
176 
177   StringRef getPassName() const override {
178     return "Frontend instrumentation-based coverage lowering";
179   }
180 
181   bool runOnModule(Module &M) override {
182     auto GetTLI = [this](Function &F) -> TargetLibraryInfo & {
183       return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
184     };
185     return InstrProf.run(M, GetTLI);
186   }
187 
188   void getAnalysisUsage(AnalysisUsage &AU) const override {
189     AU.setPreservesCFG();
190     AU.addRequired<TargetLibraryInfoWrapperPass>();
191   }
192 };
193 
194 ///
195 /// A helper class to promote one counter RMW operation in the loop
196 /// into register update.
197 ///
198 /// RWM update for the counter will be sinked out of the loop after
199 /// the transformation.
200 ///
201 class PGOCounterPromoterHelper : public LoadAndStorePromoter {
202 public:
203   PGOCounterPromoterHelper(
204       Instruction *L, Instruction *S, SSAUpdater &SSA, Value *Init,
205       BasicBlock *PH, ArrayRef<BasicBlock *> ExitBlocks,
206       ArrayRef<Instruction *> InsertPts,
207       DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
208       LoopInfo &LI)
209       : LoadAndStorePromoter({L, S}, SSA), Store(S), ExitBlocks(ExitBlocks),
210         InsertPts(InsertPts), LoopToCandidates(LoopToCands), LI(LI) {
211     assert(isa<LoadInst>(L));
212     assert(isa<StoreInst>(S));
213     SSA.AddAvailableValue(PH, Init);
214   }
215 
216   void doExtraRewritesBeforeFinalDeletion() override {
217     for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
218       BasicBlock *ExitBlock = ExitBlocks[i];
219       Instruction *InsertPos = InsertPts[i];
220       // Get LiveIn value into the ExitBlock. If there are multiple
221       // predecessors, the value is defined by a PHI node in this
222       // block.
223       Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
224       Value *Addr = cast<StoreInst>(Store)->getPointerOperand();
225       Type *Ty = LiveInValue->getType();
226       IRBuilder<> Builder(InsertPos);
227       if (AtomicCounterUpdatePromoted)
228         // automic update currently can only be promoted across the current
229         // loop, not the whole loop nest.
230         Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, LiveInValue,
231                                 AtomicOrdering::SequentiallyConsistent);
232       else {
233         LoadInst *OldVal = Builder.CreateLoad(Ty, Addr, "pgocount.promoted");
234         auto *NewVal = Builder.CreateAdd(OldVal, LiveInValue);
235         auto *NewStore = Builder.CreateStore(NewVal, Addr);
236 
237         // Now update the parent loop's candidate list:
238         if (IterativeCounterPromotion) {
239           auto *TargetLoop = LI.getLoopFor(ExitBlock);
240           if (TargetLoop)
241             LoopToCandidates[TargetLoop].emplace_back(OldVal, NewStore);
242         }
243       }
244     }
245   }
246 
247 private:
248   Instruction *Store;
249   ArrayRef<BasicBlock *> ExitBlocks;
250   ArrayRef<Instruction *> InsertPts;
251   DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates;
252   LoopInfo &LI;
253 };
254 
255 /// A helper class to do register promotion for all profile counter
256 /// updates in a loop.
257 ///
258 class PGOCounterPromoter {
259 public:
260   PGOCounterPromoter(
261       DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
262       Loop &CurLoop, LoopInfo &LI, BlockFrequencyInfo *BFI)
263       : LoopToCandidates(LoopToCands), ExitBlocks(), InsertPts(), L(CurLoop),
264         LI(LI), BFI(BFI) {
265 
266     // Skip collection of ExitBlocks and InsertPts for loops that will not be
267     // able to have counters promoted.
268     SmallVector<BasicBlock *, 8> LoopExitBlocks;
269     SmallPtrSet<BasicBlock *, 8> BlockSet;
270 
271     L.getExitBlocks(LoopExitBlocks);
272     if (!isPromotionPossible(&L, LoopExitBlocks))
273       return;
274 
275     for (BasicBlock *ExitBlock : LoopExitBlocks) {
276       if (BlockSet.insert(ExitBlock).second) {
277         ExitBlocks.push_back(ExitBlock);
278         InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
279       }
280     }
281   }
282 
283   bool run(int64_t *NumPromoted) {
284     // Skip 'infinite' loops:
285     if (ExitBlocks.size() == 0)
286       return false;
287 
288     // Skip if any of the ExitBlocks contains a ret instruction.
289     // This is to prevent dumping of incomplete profile -- if the
290     // the loop is a long running loop and dump is called in the middle
291     // of the loop, the result profile is incomplete.
292     // FIXME: add other heuristics to detect long running loops.
293     if (SkipRetExitBlock) {
294       for (auto BB : ExitBlocks)
295         if (dyn_cast<ReturnInst>(BB->getTerminator()) != nullptr)
296           return false;
297     }
298 
299     unsigned MaxProm = getMaxNumOfPromotionsInLoop(&L);
300     if (MaxProm == 0)
301       return false;
302 
303     unsigned Promoted = 0;
304     for (auto &Cand : LoopToCandidates[&L]) {
305 
306       SmallVector<PHINode *, 4> NewPHIs;
307       SSAUpdater SSA(&NewPHIs);
308       Value *InitVal = ConstantInt::get(Cand.first->getType(), 0);
309 
310       // If BFI is set, we will use it to guide the promotions.
311       if (BFI) {
312         auto *BB = Cand.first->getParent();
313         auto InstrCount = BFI->getBlockProfileCount(BB);
314         if (!InstrCount)
315           continue;
316         auto PreheaderCount = BFI->getBlockProfileCount(L.getLoopPreheader());
317         // If the average loop trip count is not greater than 1.5, we skip
318         // promotion.
319         if (PreheaderCount &&
320             (PreheaderCount.getValue() * 3) >= (InstrCount.getValue() * 2))
321           continue;
322       }
323 
324       PGOCounterPromoterHelper Promoter(Cand.first, Cand.second, SSA, InitVal,
325                                         L.getLoopPreheader(), ExitBlocks,
326                                         InsertPts, LoopToCandidates, LI);
327       Promoter.run(SmallVector<Instruction *, 2>({Cand.first, Cand.second}));
328       Promoted++;
329       if (Promoted >= MaxProm)
330         break;
331 
332       (*NumPromoted)++;
333       if (MaxNumOfPromotions != -1 && *NumPromoted >= MaxNumOfPromotions)
334         break;
335     }
336 
337     LLVM_DEBUG(dbgs() << Promoted << " counters promoted for loop (depth="
338                       << L.getLoopDepth() << ")\n");
339     return Promoted != 0;
340   }
341 
342 private:
343   bool allowSpeculativeCounterPromotion(Loop *LP) {
344     SmallVector<BasicBlock *, 8> ExitingBlocks;
345     L.getExitingBlocks(ExitingBlocks);
346     // Not considierered speculative.
347     if (ExitingBlocks.size() == 1)
348       return true;
349     if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting)
350       return false;
351     return true;
352   }
353 
354   // Check whether the loop satisfies the basic conditions needed to perform
355   // Counter Promotions.
356   bool isPromotionPossible(Loop *LP,
357                            const SmallVectorImpl<BasicBlock *> &LoopExitBlocks) {
358     // We can't insert into a catchswitch.
359     if (llvm::any_of(LoopExitBlocks, [](BasicBlock *Exit) {
360           return isa<CatchSwitchInst>(Exit->getTerminator());
361         }))
362       return false;
363 
364     if (!LP->hasDedicatedExits())
365       return false;
366 
367     BasicBlock *PH = LP->getLoopPreheader();
368     if (!PH)
369       return false;
370 
371     return true;
372   }
373 
374   // Returns the max number of Counter Promotions for LP.
375   unsigned getMaxNumOfPromotionsInLoop(Loop *LP) {
376     SmallVector<BasicBlock *, 8> LoopExitBlocks;
377     LP->getExitBlocks(LoopExitBlocks);
378     if (!isPromotionPossible(LP, LoopExitBlocks))
379       return 0;
380 
381     SmallVector<BasicBlock *, 8> ExitingBlocks;
382     LP->getExitingBlocks(ExitingBlocks);
383 
384     // If BFI is set, we do more aggressive promotions based on BFI.
385     if (BFI)
386       return (unsigned)-1;
387 
388     // Not considierered speculative.
389     if (ExitingBlocks.size() == 1)
390       return MaxNumOfPromotionsPerLoop;
391 
392     if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting)
393       return 0;
394 
395     // Whether the target block is in a loop does not matter:
396     if (SpeculativeCounterPromotionToLoop)
397       return MaxNumOfPromotionsPerLoop;
398 
399     // Now check the target block:
400     unsigned MaxProm = MaxNumOfPromotionsPerLoop;
401     for (auto *TargetBlock : LoopExitBlocks) {
402       auto *TargetLoop = LI.getLoopFor(TargetBlock);
403       if (!TargetLoop)
404         continue;
405       unsigned MaxPromForTarget = getMaxNumOfPromotionsInLoop(TargetLoop);
406       unsigned PendingCandsInTarget = LoopToCandidates[TargetLoop].size();
407       MaxProm =
408           std::min(MaxProm, std::max(MaxPromForTarget, PendingCandsInTarget) -
409                                 PendingCandsInTarget);
410     }
411     return MaxProm;
412   }
413 
414   DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates;
415   SmallVector<BasicBlock *, 8> ExitBlocks;
416   SmallVector<Instruction *, 8> InsertPts;
417   Loop &L;
418   LoopInfo &LI;
419   BlockFrequencyInfo *BFI;
420 };
421 
422 enum class ValueProfilingCallType {
423   // Individual values are tracked. Currently used for indiret call target
424   // profiling.
425   Default,
426 
427   // The old memop size value profiling. FIXME: To be removed after switching to
428   // the new one.
429   OldMemOp,
430 
431   // MemOp: the (new) memop size value profiling with extended buckets.
432   MemOp
433 };
434 
435 } // end anonymous namespace
436 
437 PreservedAnalyses InstrProfiling::run(Module &M, ModuleAnalysisManager &AM) {
438   FunctionAnalysisManager &FAM =
439       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
440   auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
441     return FAM.getResult<TargetLibraryAnalysis>(F);
442   };
443   if (!run(M, GetTLI))
444     return PreservedAnalyses::all();
445 
446   return PreservedAnalyses::none();
447 }
448 
449 char InstrProfilingLegacyPass::ID = 0;
450 INITIALIZE_PASS_BEGIN(
451     InstrProfilingLegacyPass, "instrprof",
452     "Frontend instrumentation-based coverage lowering.", false, false)
453 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
454 INITIALIZE_PASS_END(
455     InstrProfilingLegacyPass, "instrprof",
456     "Frontend instrumentation-based coverage lowering.", false, false)
457 
458 ModulePass *
459 llvm::createInstrProfilingLegacyPass(const InstrProfOptions &Options,
460                                      bool IsCS) {
461   return new InstrProfilingLegacyPass(Options, IsCS);
462 }
463 
464 static InstrProfIncrementInst *castToIncrementInst(Instruction *Instr) {
465   InstrProfIncrementInst *Inc = dyn_cast<InstrProfIncrementInstStep>(Instr);
466   if (Inc)
467     return Inc;
468   return dyn_cast<InstrProfIncrementInst>(Instr);
469 }
470 
471 bool InstrProfiling::lowerIntrinsics(Function *F) {
472   bool MadeChange = false;
473   PromotionCandidates.clear();
474   for (BasicBlock &BB : *F) {
475     for (auto I = BB.begin(), E = BB.end(); I != E;) {
476       auto Instr = I++;
477       InstrProfIncrementInst *Inc = castToIncrementInst(&*Instr);
478       if (Inc) {
479         lowerIncrement(Inc);
480         MadeChange = true;
481       } else if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(Instr)) {
482         lowerValueProfileInst(Ind);
483         MadeChange = true;
484       }
485     }
486   }
487 
488   if (!MadeChange)
489     return false;
490 
491   promoteCounterLoadStores(F);
492   return true;
493 }
494 
495 bool InstrProfiling::isRuntimeCounterRelocationEnabled() const {
496   if (RuntimeCounterRelocation.getNumOccurrences() > 0)
497     return RuntimeCounterRelocation;
498 
499   return TT.isOSFuchsia();
500 }
501 
502 bool InstrProfiling::isCounterPromotionEnabled() const {
503   if (DoCounterPromotion.getNumOccurrences() > 0)
504     return DoCounterPromotion;
505 
506   return Options.DoCounterPromotion;
507 }
508 
509 void InstrProfiling::promoteCounterLoadStores(Function *F) {
510   if (!isCounterPromotionEnabled())
511     return;
512 
513   DominatorTree DT(*F);
514   LoopInfo LI(DT);
515   DenseMap<Loop *, SmallVector<LoadStorePair, 8>> LoopPromotionCandidates;
516 
517   std::unique_ptr<BlockFrequencyInfo> BFI;
518   if (Options.UseBFIInPromotion) {
519     std::unique_ptr<BranchProbabilityInfo> BPI;
520     BPI.reset(new BranchProbabilityInfo(*F, LI, &GetTLI(*F)));
521     BFI.reset(new BlockFrequencyInfo(*F, *BPI, LI));
522   }
523 
524   for (const auto &LoadStore : PromotionCandidates) {
525     auto *CounterLoad = LoadStore.first;
526     auto *CounterStore = LoadStore.second;
527     BasicBlock *BB = CounterLoad->getParent();
528     Loop *ParentLoop = LI.getLoopFor(BB);
529     if (!ParentLoop)
530       continue;
531     LoopPromotionCandidates[ParentLoop].emplace_back(CounterLoad, CounterStore);
532   }
533 
534   SmallVector<Loop *, 4> Loops = LI.getLoopsInPreorder();
535 
536   // Do a post-order traversal of the loops so that counter updates can be
537   // iteratively hoisted outside the loop nest.
538   for (auto *Loop : llvm::reverse(Loops)) {
539     PGOCounterPromoter Promoter(LoopPromotionCandidates, *Loop, LI, BFI.get());
540     Promoter.run(&TotalCountersPromoted);
541   }
542 }
543 
544 /// Check if the module contains uses of any profiling intrinsics.
545 static bool containsProfilingIntrinsics(Module &M) {
546   if (auto *F = M.getFunction(
547           Intrinsic::getName(llvm::Intrinsic::instrprof_increment)))
548     if (!F->use_empty())
549       return true;
550   if (auto *F = M.getFunction(
551           Intrinsic::getName(llvm::Intrinsic::instrprof_increment_step)))
552     if (!F->use_empty())
553       return true;
554   if (auto *F = M.getFunction(
555           Intrinsic::getName(llvm::Intrinsic::instrprof_value_profile)))
556     if (!F->use_empty())
557       return true;
558   return false;
559 }
560 
561 bool InstrProfiling::run(
562     Module &M, std::function<const TargetLibraryInfo &(Function &F)> GetTLI) {
563   this->M = &M;
564   this->GetTLI = std::move(GetTLI);
565   NamesVar = nullptr;
566   NamesSize = 0;
567   ProfileDataMap.clear();
568   UsedVars.clear();
569   getMemOPSizeRangeFromOption(MemOPSizeRange, MemOPSizeRangeStart,
570                               MemOPSizeRangeLast);
571   TT = Triple(M.getTargetTriple());
572 
573   // Emit the runtime hook even if no counters are present.
574   bool MadeChange = emitRuntimeHook();
575 
576   // Improve compile time by avoiding linear scans when there is no work.
577   GlobalVariable *CoverageNamesVar =
578       M.getNamedGlobal(getCoverageUnusedNamesVarName());
579   if (!containsProfilingIntrinsics(M) && !CoverageNamesVar)
580     return MadeChange;
581 
582   // We did not know how many value sites there would be inside
583   // the instrumented function. This is counting the number of instrumented
584   // target value sites to enter it as field in the profile data variable.
585   for (Function &F : M) {
586     InstrProfIncrementInst *FirstProfIncInst = nullptr;
587     for (BasicBlock &BB : F)
588       for (auto I = BB.begin(), E = BB.end(); I != E; I++)
589         if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(I))
590           computeNumValueSiteCounts(Ind);
591         else if (FirstProfIncInst == nullptr)
592           FirstProfIncInst = dyn_cast<InstrProfIncrementInst>(I);
593 
594     // Value profiling intrinsic lowering requires per-function profile data
595     // variable to be created first.
596     if (FirstProfIncInst != nullptr)
597       static_cast<void>(getOrCreateRegionCounters(FirstProfIncInst));
598   }
599 
600   for (Function &F : M)
601     MadeChange |= lowerIntrinsics(&F);
602 
603   if (CoverageNamesVar) {
604     lowerCoverageData(CoverageNamesVar);
605     MadeChange = true;
606   }
607 
608   if (!MadeChange)
609     return false;
610 
611   emitVNodes();
612   emitNameData();
613   emitRegistration();
614   emitUses();
615   emitInitialization();
616   return true;
617 }
618 
619 static FunctionCallee getOrInsertValueProfilingCall(
620     Module &M, const TargetLibraryInfo &TLI,
621     ValueProfilingCallType CallType = ValueProfilingCallType::Default) {
622   LLVMContext &Ctx = M.getContext();
623   auto *ReturnTy = Type::getVoidTy(M.getContext());
624 
625   AttributeList AL;
626   if (auto AK = TLI.getExtAttrForI32Param(false))
627     AL = AL.addParamAttribute(M.getContext(), 2, AK);
628 
629   if (CallType == ValueProfilingCallType::Default ||
630       CallType == ValueProfilingCallType::MemOp) {
631     Type *ParamTypes[] = {
632 #define VALUE_PROF_FUNC_PARAM(ParamType, ParamName, ParamLLVMType) ParamLLVMType
633 #include "llvm/ProfileData/InstrProfData.inc"
634     };
635     auto *ValueProfilingCallTy =
636         FunctionType::get(ReturnTy, makeArrayRef(ParamTypes), false);
637     StringRef FuncName = CallType == ValueProfilingCallType::Default
638                              ? getInstrProfValueProfFuncName()
639                              : getInstrProfValueProfMemOpFuncName();
640     return M.getOrInsertFunction(FuncName, ValueProfilingCallTy, AL);
641   } else {
642     // FIXME: This code is to be removed after switching to the new memop value
643     // profiling.
644     assert(CallType == ValueProfilingCallType::OldMemOp);
645     Type *RangeParamTypes[] = {
646 #define VALUE_RANGE_PROF 1
647 #define VALUE_PROF_FUNC_PARAM(ParamType, ParamName, ParamLLVMType) ParamLLVMType
648 #include "llvm/ProfileData/InstrProfData.inc"
649 #undef VALUE_RANGE_PROF
650     };
651     auto *ValueRangeProfilingCallTy =
652         FunctionType::get(ReturnTy, makeArrayRef(RangeParamTypes), false);
653     return M.getOrInsertFunction(getInstrProfValueRangeProfFuncName(),
654                                  ValueRangeProfilingCallTy, AL);
655   }
656 }
657 
658 void InstrProfiling::computeNumValueSiteCounts(InstrProfValueProfileInst *Ind) {
659   GlobalVariable *Name = Ind->getName();
660   uint64_t ValueKind = Ind->getValueKind()->getZExtValue();
661   uint64_t Index = Ind->getIndex()->getZExtValue();
662   auto It = ProfileDataMap.find(Name);
663   if (It == ProfileDataMap.end()) {
664     PerFunctionProfileData PD;
665     PD.NumValueSites[ValueKind] = Index + 1;
666     ProfileDataMap[Name] = PD;
667   } else if (It->second.NumValueSites[ValueKind] <= Index)
668     It->second.NumValueSites[ValueKind] = Index + 1;
669 }
670 
671 void InstrProfiling::lowerValueProfileInst(InstrProfValueProfileInst *Ind) {
672   GlobalVariable *Name = Ind->getName();
673   auto It = ProfileDataMap.find(Name);
674   assert(It != ProfileDataMap.end() && It->second.DataVar &&
675          "value profiling detected in function with no counter incerement");
676 
677   GlobalVariable *DataVar = It->second.DataVar;
678   uint64_t ValueKind = Ind->getValueKind()->getZExtValue();
679   uint64_t Index = Ind->getIndex()->getZExtValue();
680   for (uint32_t Kind = IPVK_First; Kind < ValueKind; ++Kind)
681     Index += It->second.NumValueSites[Kind];
682 
683   IRBuilder<> Builder(Ind);
684   bool IsMemOpSize = (Ind->getValueKind()->getZExtValue() ==
685                       llvm::InstrProfValueKind::IPVK_MemOPSize);
686   CallInst *Call = nullptr;
687   auto *TLI = &GetTLI(*Ind->getFunction());
688 
689   // To support value profiling calls within Windows exception handlers, funclet
690   // information contained within operand bundles needs to be copied over to
691   // the library call. This is required for the IR to be processed by the
692   // WinEHPrepare pass.
693   SmallVector<OperandBundleDef, 1> OpBundles;
694   Ind->getOperandBundlesAsDefs(OpBundles);
695   if (!IsMemOpSize) {
696     Value *Args[3] = {Ind->getTargetValue(),
697                       Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
698                       Builder.getInt32(Index)};
699     Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI), Args,
700                               OpBundles);
701   } else if (!UseOldMemOpValueProf) {
702     Value *Args[3] = {Ind->getTargetValue(),
703                       Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
704                       Builder.getInt32(Index)};
705     Call = Builder.CreateCall(
706         getOrInsertValueProfilingCall(*M, *TLI, ValueProfilingCallType::MemOp),
707         Args, OpBundles);
708   } else {
709     Value *Args[6] = {
710         Ind->getTargetValue(),
711         Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
712         Builder.getInt32(Index),
713         Builder.getInt64(MemOPSizeRangeStart),
714         Builder.getInt64(MemOPSizeRangeLast),
715         Builder.getInt64(MemOPSizeLarge == 0 ? INT64_MIN : MemOPSizeLarge)};
716     Call = Builder.CreateCall(getOrInsertValueProfilingCall(
717                                   *M, *TLI, ValueProfilingCallType::OldMemOp),
718                               Args, OpBundles);
719   }
720   if (auto AK = TLI->getExtAttrForI32Param(false))
721     Call->addParamAttr(2, AK);
722   Ind->replaceAllUsesWith(Call);
723   Ind->eraseFromParent();
724 }
725 
726 void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) {
727   GlobalVariable *Counters = getOrCreateRegionCounters(Inc);
728 
729   IRBuilder<> Builder(Inc);
730   uint64_t Index = Inc->getIndex()->getZExtValue();
731   Value *Addr = Builder.CreateConstInBoundsGEP2_64(Counters->getValueType(),
732                                                    Counters, 0, Index);
733 
734   if (isRuntimeCounterRelocationEnabled()) {
735     Type *Int64Ty = Type::getInt64Ty(M->getContext());
736     Type *Int64PtrTy = Type::getInt64PtrTy(M->getContext());
737     Function *Fn = Inc->getParent()->getParent();
738     Instruction &I = Fn->getEntryBlock().front();
739     LoadInst *LI = dyn_cast<LoadInst>(&I);
740     if (!LI) {
741       IRBuilder<> Builder(&I);
742       Type *Int64Ty = Type::getInt64Ty(M->getContext());
743       GlobalVariable *Bias = M->getGlobalVariable(getInstrProfCounterBiasVarName());
744       if (!Bias) {
745         Bias = new GlobalVariable(*M, Int64Ty, false, GlobalValue::LinkOnceODRLinkage,
746                                   Constant::getNullValue(Int64Ty),
747                                   getInstrProfCounterBiasVarName());
748         Bias->setVisibility(GlobalVariable::HiddenVisibility);
749       }
750       LI = Builder.CreateLoad(Int64Ty, Bias);
751     }
752     auto *Add = Builder.CreateAdd(Builder.CreatePtrToInt(Addr, Int64Ty), LI);
753     Addr = Builder.CreateIntToPtr(Add, Int64PtrTy);
754   }
755 
756   if (Options.Atomic || AtomicCounterUpdateAll ||
757       (Index == 0 && AtomicFirstCounter)) {
758     Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, Inc->getStep(),
759                             AtomicOrdering::Monotonic);
760   } else {
761     Value *IncStep = Inc->getStep();
762     Value *Load = Builder.CreateLoad(IncStep->getType(), Addr, "pgocount");
763     auto *Count = Builder.CreateAdd(Load, Inc->getStep());
764     auto *Store = Builder.CreateStore(Count, Addr);
765     if (isCounterPromotionEnabled())
766       PromotionCandidates.emplace_back(cast<Instruction>(Load), Store);
767   }
768   Inc->eraseFromParent();
769 }
770 
771 void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageNamesVar) {
772   ConstantArray *Names =
773       cast<ConstantArray>(CoverageNamesVar->getInitializer());
774   for (unsigned I = 0, E = Names->getNumOperands(); I < E; ++I) {
775     Constant *NC = Names->getOperand(I);
776     Value *V = NC->stripPointerCasts();
777     assert(isa<GlobalVariable>(V) && "Missing reference to function name");
778     GlobalVariable *Name = cast<GlobalVariable>(V);
779 
780     Name->setLinkage(GlobalValue::PrivateLinkage);
781     ReferencedNames.push_back(Name);
782     NC->dropAllReferences();
783   }
784   CoverageNamesVar->eraseFromParent();
785 }
786 
787 /// Get the name of a profiling variable for a particular function.
788 static std::string getVarName(InstrProfIncrementInst *Inc, StringRef Prefix) {
789   StringRef NamePrefix = getInstrProfNameVarPrefix();
790   StringRef Name = Inc->getName()->getName().substr(NamePrefix.size());
791   Function *F = Inc->getParent()->getParent();
792   Module *M = F->getParent();
793   if (!DoHashBasedCounterSplit || !isIRPGOFlagSet(M) ||
794       !canRenameComdatFunc(*F))
795     return (Prefix + Name).str();
796   uint64_t FuncHash = Inc->getHash()->getZExtValue();
797   SmallVector<char, 24> HashPostfix;
798   if (Name.endswith((Twine(".") + Twine(FuncHash)).toStringRef(HashPostfix)))
799     return (Prefix + Name).str();
800   return (Prefix + Name + "." + Twine(FuncHash)).str();
801 }
802 
803 static inline bool shouldRecordFunctionAddr(Function *F) {
804   // Check the linkage
805   bool HasAvailableExternallyLinkage = F->hasAvailableExternallyLinkage();
806   if (!F->hasLinkOnceLinkage() && !F->hasLocalLinkage() &&
807       !HasAvailableExternallyLinkage)
808     return true;
809 
810   // A function marked 'alwaysinline' with available_externally linkage can't
811   // have its address taken. Doing so would create an undefined external ref to
812   // the function, which would fail to link.
813   if (HasAvailableExternallyLinkage &&
814       F->hasFnAttribute(Attribute::AlwaysInline))
815     return false;
816 
817   // Prohibit function address recording if the function is both internal and
818   // COMDAT. This avoids the profile data variable referencing internal symbols
819   // in COMDAT.
820   if (F->hasLocalLinkage() && F->hasComdat())
821     return false;
822 
823   // Check uses of this function for other than direct calls or invokes to it.
824   // Inline virtual functions have linkeOnceODR linkage. When a key method
825   // exists, the vtable will only be emitted in the TU where the key method
826   // is defined. In a TU where vtable is not available, the function won't
827   // be 'addresstaken'. If its address is not recorded here, the profile data
828   // with missing address may be picked by the linker leading  to missing
829   // indirect call target info.
830   return F->hasAddressTaken() || F->hasLinkOnceLinkage();
831 }
832 
833 static bool needsRuntimeRegistrationOfSectionRange(const Triple &TT) {
834   // Don't do this for Darwin.  compiler-rt uses linker magic.
835   if (TT.isOSDarwin())
836     return false;
837   // Use linker script magic to get data/cnts/name start/end.
838   if (TT.isOSLinux() || TT.isOSFreeBSD() || TT.isOSNetBSD() ||
839       TT.isOSSolaris() || TT.isOSFuchsia() || TT.isPS4CPU() ||
840       TT.isOSWindows())
841     return false;
842 
843   return true;
844 }
845 
846 GlobalVariable *
847 InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) {
848   GlobalVariable *NamePtr = Inc->getName();
849   auto It = ProfileDataMap.find(NamePtr);
850   PerFunctionProfileData PD;
851   if (It != ProfileDataMap.end()) {
852     if (It->second.RegionCounters)
853       return It->second.RegionCounters;
854     PD = It->second;
855   }
856 
857   // Match the linkage and visibility of the name global. COFF supports using
858   // comdats with internal symbols, so do that if we can.
859   Function *Fn = Inc->getParent()->getParent();
860   GlobalValue::LinkageTypes Linkage = NamePtr->getLinkage();
861   GlobalValue::VisibilityTypes Visibility = NamePtr->getVisibility();
862   if (TT.isOSBinFormatCOFF()) {
863     Linkage = GlobalValue::InternalLinkage;
864     Visibility = GlobalValue::DefaultVisibility;
865   }
866 
867   // Move the name variable to the right section. Place them in a COMDAT group
868   // if the associated function is a COMDAT. This will make sure that only one
869   // copy of counters of the COMDAT function will be emitted after linking. Keep
870   // in mind that this pass may run before the inliner, so we need to create a
871   // new comdat group for the counters and profiling data. If we use the comdat
872   // of the parent function, that will result in relocations against discarded
873   // sections.
874   bool NeedComdat = needsComdatForCounter(*Fn, *M);
875   if (NeedComdat) {
876     if (TT.isOSBinFormatCOFF()) {
877       // For COFF, put the counters, data, and values each into their own
878       // comdats. We can't use a group because the Visual C++ linker will
879       // report duplicate symbol errors if there are multiple external symbols
880       // with the same name marked IMAGE_COMDAT_SELECT_ASSOCIATIVE.
881       Linkage = GlobalValue::LinkOnceODRLinkage;
882       Visibility = GlobalValue::HiddenVisibility;
883     }
884   }
885   std::string DataVarName = getVarName(Inc, getInstrProfDataVarPrefix());
886   auto MaybeSetComdat = [=](GlobalVariable *GV) {
887     if (NeedComdat)
888       GV->setComdat(M->getOrInsertComdat(TT.isOSBinFormatCOFF() ? GV->getName()
889                                                                 : DataVarName));
890   };
891 
892   uint64_t NumCounters = Inc->getNumCounters()->getZExtValue();
893   LLVMContext &Ctx = M->getContext();
894   ArrayType *CounterTy = ArrayType::get(Type::getInt64Ty(Ctx), NumCounters);
895 
896   // Create the counters variable.
897   auto *CounterPtr =
898       new GlobalVariable(*M, CounterTy, false, Linkage,
899                          Constant::getNullValue(CounterTy),
900                          getVarName(Inc, getInstrProfCountersVarPrefix()));
901   CounterPtr->setVisibility(Visibility);
902   CounterPtr->setSection(
903       getInstrProfSectionName(IPSK_cnts, TT.getObjectFormat()));
904   CounterPtr->setAlignment(Align(8));
905   MaybeSetComdat(CounterPtr);
906   CounterPtr->setLinkage(Linkage);
907 
908   auto *Int8PtrTy = Type::getInt8PtrTy(Ctx);
909   // Allocate statically the array of pointers to value profile nodes for
910   // the current function.
911   Constant *ValuesPtrExpr = ConstantPointerNull::get(Int8PtrTy);
912   if (ValueProfileStaticAlloc && !needsRuntimeRegistrationOfSectionRange(TT)) {
913     uint64_t NS = 0;
914     for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
915       NS += PD.NumValueSites[Kind];
916     if (NS) {
917       ArrayType *ValuesTy = ArrayType::get(Type::getInt64Ty(Ctx), NS);
918 
919       auto *ValuesVar =
920           new GlobalVariable(*M, ValuesTy, false, Linkage,
921                              Constant::getNullValue(ValuesTy),
922                              getVarName(Inc, getInstrProfValuesVarPrefix()));
923       ValuesVar->setVisibility(Visibility);
924       ValuesVar->setSection(
925           getInstrProfSectionName(IPSK_vals, TT.getObjectFormat()));
926       ValuesVar->setAlignment(Align(8));
927       MaybeSetComdat(ValuesVar);
928       ValuesPtrExpr =
929           ConstantExpr::getBitCast(ValuesVar, Type::getInt8PtrTy(Ctx));
930     }
931   }
932 
933   // Create data variable.
934   auto *Int16Ty = Type::getInt16Ty(Ctx);
935   auto *Int16ArrayTy = ArrayType::get(Int16Ty, IPVK_Last + 1);
936   Type *DataTypes[] = {
937 #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) LLVMType,
938 #include "llvm/ProfileData/InstrProfData.inc"
939   };
940   auto *DataTy = StructType::get(Ctx, makeArrayRef(DataTypes));
941 
942   Constant *FunctionAddr = shouldRecordFunctionAddr(Fn)
943                                ? ConstantExpr::getBitCast(Fn, Int8PtrTy)
944                                : ConstantPointerNull::get(Int8PtrTy);
945 
946   Constant *Int16ArrayVals[IPVK_Last + 1];
947   for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
948     Int16ArrayVals[Kind] = ConstantInt::get(Int16Ty, PD.NumValueSites[Kind]);
949 
950   Constant *DataVals[] = {
951 #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) Init,
952 #include "llvm/ProfileData/InstrProfData.inc"
953   };
954   auto *Data =
955       new GlobalVariable(*M, DataTy, false, Linkage,
956                          ConstantStruct::get(DataTy, DataVals), DataVarName);
957   Data->setVisibility(Visibility);
958   Data->setSection(getInstrProfSectionName(IPSK_data, TT.getObjectFormat()));
959   Data->setAlignment(Align(INSTR_PROF_DATA_ALIGNMENT));
960   MaybeSetComdat(Data);
961   Data->setLinkage(Linkage);
962 
963   PD.RegionCounters = CounterPtr;
964   PD.DataVar = Data;
965   ProfileDataMap[NamePtr] = PD;
966 
967   // Mark the data variable as used so that it isn't stripped out.
968   UsedVars.push_back(Data);
969   // Now that the linkage set by the FE has been passed to the data and counter
970   // variables, reset Name variable's linkage and visibility to private so that
971   // it can be removed later by the compiler.
972   NamePtr->setLinkage(GlobalValue::PrivateLinkage);
973   // Collect the referenced names to be used by emitNameData.
974   ReferencedNames.push_back(NamePtr);
975 
976   return CounterPtr;
977 }
978 
979 void InstrProfiling::emitVNodes() {
980   if (!ValueProfileStaticAlloc)
981     return;
982 
983   // For now only support this on platforms that do
984   // not require runtime registration to discover
985   // named section start/end.
986   if (needsRuntimeRegistrationOfSectionRange(TT))
987     return;
988 
989   size_t TotalNS = 0;
990   for (auto &PD : ProfileDataMap) {
991     for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
992       TotalNS += PD.second.NumValueSites[Kind];
993   }
994 
995   if (!TotalNS)
996     return;
997 
998   uint64_t NumCounters = TotalNS * NumCountersPerValueSite;
999 // Heuristic for small programs with very few total value sites.
1000 // The default value of vp-counters-per-site is chosen based on
1001 // the observation that large apps usually have a low percentage
1002 // of value sites that actually have any profile data, and thus
1003 // the average number of counters per site is low. For small
1004 // apps with very few sites, this may not be true. Bump up the
1005 // number of counters in this case.
1006 #define INSTR_PROF_MIN_VAL_COUNTS 10
1007   if (NumCounters < INSTR_PROF_MIN_VAL_COUNTS)
1008     NumCounters = std::max(INSTR_PROF_MIN_VAL_COUNTS, (int)NumCounters * 2);
1009 
1010   auto &Ctx = M->getContext();
1011   Type *VNodeTypes[] = {
1012 #define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Init) LLVMType,
1013 #include "llvm/ProfileData/InstrProfData.inc"
1014   };
1015   auto *VNodeTy = StructType::get(Ctx, makeArrayRef(VNodeTypes));
1016 
1017   ArrayType *VNodesTy = ArrayType::get(VNodeTy, NumCounters);
1018   auto *VNodesVar = new GlobalVariable(
1019       *M, VNodesTy, false, GlobalValue::PrivateLinkage,
1020       Constant::getNullValue(VNodesTy), getInstrProfVNodesVarName());
1021   VNodesVar->setSection(
1022       getInstrProfSectionName(IPSK_vnodes, TT.getObjectFormat()));
1023   UsedVars.push_back(VNodesVar);
1024 }
1025 
1026 void InstrProfiling::emitNameData() {
1027   std::string UncompressedData;
1028 
1029   if (ReferencedNames.empty())
1030     return;
1031 
1032   std::string CompressedNameStr;
1033   if (Error E = collectPGOFuncNameStrings(ReferencedNames, CompressedNameStr,
1034                                           DoInstrProfNameCompression)) {
1035     report_fatal_error(toString(std::move(E)), false);
1036   }
1037 
1038   auto &Ctx = M->getContext();
1039   auto *NamesVal = ConstantDataArray::getString(
1040       Ctx, StringRef(CompressedNameStr), false);
1041   NamesVar = new GlobalVariable(*M, NamesVal->getType(), true,
1042                                 GlobalValue::PrivateLinkage, NamesVal,
1043                                 getInstrProfNamesVarName());
1044   NamesSize = CompressedNameStr.size();
1045   NamesVar->setSection(
1046       getInstrProfSectionName(IPSK_name, TT.getObjectFormat()));
1047   // On COFF, it's important to reduce the alignment down to 1 to prevent the
1048   // linker from inserting padding before the start of the names section or
1049   // between names entries.
1050   NamesVar->setAlignment(Align(1));
1051   UsedVars.push_back(NamesVar);
1052 
1053   for (auto *NamePtr : ReferencedNames)
1054     NamePtr->eraseFromParent();
1055 }
1056 
1057 void InstrProfiling::emitRegistration() {
1058   if (!needsRuntimeRegistrationOfSectionRange(TT))
1059     return;
1060 
1061   // Construct the function.
1062   auto *VoidTy = Type::getVoidTy(M->getContext());
1063   auto *VoidPtrTy = Type::getInt8PtrTy(M->getContext());
1064   auto *Int64Ty = Type::getInt64Ty(M->getContext());
1065   auto *RegisterFTy = FunctionType::get(VoidTy, false);
1066   auto *RegisterF = Function::Create(RegisterFTy, GlobalValue::InternalLinkage,
1067                                      getInstrProfRegFuncsName(), M);
1068   RegisterF->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1069   if (Options.NoRedZone)
1070     RegisterF->addFnAttr(Attribute::NoRedZone);
1071 
1072   auto *RuntimeRegisterTy = FunctionType::get(VoidTy, VoidPtrTy, false);
1073   auto *RuntimeRegisterF =
1074       Function::Create(RuntimeRegisterTy, GlobalVariable::ExternalLinkage,
1075                        getInstrProfRegFuncName(), M);
1076 
1077   IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", RegisterF));
1078   for (Value *Data : UsedVars)
1079     if (Data != NamesVar && !isa<Function>(Data))
1080       IRB.CreateCall(RuntimeRegisterF, IRB.CreateBitCast(Data, VoidPtrTy));
1081 
1082   if (NamesVar) {
1083     Type *ParamTypes[] = {VoidPtrTy, Int64Ty};
1084     auto *NamesRegisterTy =
1085         FunctionType::get(VoidTy, makeArrayRef(ParamTypes), false);
1086     auto *NamesRegisterF =
1087         Function::Create(NamesRegisterTy, GlobalVariable::ExternalLinkage,
1088                          getInstrProfNamesRegFuncName(), M);
1089     IRB.CreateCall(NamesRegisterF, {IRB.CreateBitCast(NamesVar, VoidPtrTy),
1090                                     IRB.getInt64(NamesSize)});
1091   }
1092 
1093   IRB.CreateRetVoid();
1094 }
1095 
1096 bool InstrProfiling::emitRuntimeHook() {
1097   // We expect the linker to be invoked with -u<hook_var> flag for Linux or
1098   // Fuchsia, in which case there is no need to emit the user function.
1099   if (TT.isOSLinux() || TT.isOSFuchsia())
1100     return false;
1101 
1102   // If the module's provided its own runtime, we don't need to do anything.
1103   if (M->getGlobalVariable(getInstrProfRuntimeHookVarName()))
1104     return false;
1105 
1106   // Declare an external variable that will pull in the runtime initialization.
1107   auto *Int32Ty = Type::getInt32Ty(M->getContext());
1108   auto *Var =
1109       new GlobalVariable(*M, Int32Ty, false, GlobalValue::ExternalLinkage,
1110                          nullptr, getInstrProfRuntimeHookVarName());
1111 
1112   // Make a function that uses it.
1113   auto *User = Function::Create(FunctionType::get(Int32Ty, false),
1114                                 GlobalValue::LinkOnceODRLinkage,
1115                                 getInstrProfRuntimeHookVarUseFuncName(), M);
1116   User->addFnAttr(Attribute::NoInline);
1117   if (Options.NoRedZone)
1118     User->addFnAttr(Attribute::NoRedZone);
1119   User->setVisibility(GlobalValue::HiddenVisibility);
1120   if (TT.supportsCOMDAT())
1121     User->setComdat(M->getOrInsertComdat(User->getName()));
1122 
1123   IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", User));
1124   auto *Load = IRB.CreateLoad(Int32Ty, Var);
1125   IRB.CreateRet(Load);
1126 
1127   // Mark the user variable as used so that it isn't stripped out.
1128   UsedVars.push_back(User);
1129   return true;
1130 }
1131 
1132 void InstrProfiling::emitUses() {
1133   if (!UsedVars.empty())
1134     appendToUsed(*M, UsedVars);
1135 }
1136 
1137 void InstrProfiling::emitInitialization() {
1138   // Create ProfileFileName variable. Don't don't this for the
1139   // context-sensitive instrumentation lowering: This lowering is after
1140   // LTO/ThinLTO linking. Pass PGOInstrumentationGenCreateVar should
1141   // have already create the variable before LTO/ThinLTO linking.
1142   if (!IsCS)
1143     createProfileFileNameVar(*M, Options.InstrProfileOutput);
1144   Function *RegisterF = M->getFunction(getInstrProfRegFuncsName());
1145   if (!RegisterF)
1146     return;
1147 
1148   // Create the initialization function.
1149   auto *VoidTy = Type::getVoidTy(M->getContext());
1150   auto *F = Function::Create(FunctionType::get(VoidTy, false),
1151                              GlobalValue::InternalLinkage,
1152                              getInstrProfInitFuncName(), M);
1153   F->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1154   F->addFnAttr(Attribute::NoInline);
1155   if (Options.NoRedZone)
1156     F->addFnAttr(Attribute::NoRedZone);
1157 
1158   // Add the basic block and the necessary calls.
1159   IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", F));
1160   IRB.CreateCall(RegisterF, {});
1161   IRB.CreateRetVoid();
1162 
1163   appendToGlobalCtors(*M, F, 0);
1164 }
1165