1 //===-- InstrProfiling.cpp - Frontend instrumentation based profiling -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers instrprof_* intrinsics emitted by a frontend for profiling.
10 // It also builds the data structures and initialization code needed for
11 // updating execution counts and emitting the profile at runtime.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Instrumentation/InstrProfiling.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/Analysis/BlockFrequencyInfo.h"
22 #include "llvm/Analysis/BranchProbabilityInfo.h"
23 #include "llvm/Analysis/LoopInfo.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/BasicBlock.h"
27 #include "llvm/IR/Constant.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DIBuilder.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Module.h"
41 #include "llvm/IR/Type.h"
42 #include "llvm/InitializePasses.h"
43 #include "llvm/Pass.h"
44 #include "llvm/ProfileData/InstrProf.h"
45 #include "llvm/ProfileData/InstrProfCorrelator.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Support/Error.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/ModuleUtils.h"
52 #include "llvm/Transforms/Utils/SSAUpdater.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <cstddef>
56 #include <cstdint>
57 #include <string>
58 
59 using namespace llvm;
60 
61 #define DEBUG_TYPE "instrprof"
62 
63 namespace llvm {
64 cl::opt<bool>
65     DebugInfoCorrelate("debug-info-correlate", cl::ZeroOrMore,
66                        cl::desc("Use debug info to correlate profiles."),
67                        cl::init(false));
68 } // namespace llvm
69 
70 namespace {
71 
72 cl::opt<bool> DoHashBasedCounterSplit(
73     "hash-based-counter-split",
74     cl::desc("Rename counter variable of a comdat function based on cfg hash"),
75     cl::init(true));
76 
77 cl::opt<bool>
78     RuntimeCounterRelocation("runtime-counter-relocation",
79                              cl::desc("Enable relocating counters at runtime."),
80                              cl::init(false));
81 
82 cl::opt<bool> ValueProfileStaticAlloc(
83     "vp-static-alloc",
84     cl::desc("Do static counter allocation for value profiler"),
85     cl::init(true));
86 
87 cl::opt<double> NumCountersPerValueSite(
88     "vp-counters-per-site",
89     cl::desc("The average number of profile counters allocated "
90              "per value profiling site."),
91     // This is set to a very small value because in real programs, only
92     // a very small percentage of value sites have non-zero targets, e.g, 1/30.
93     // For those sites with non-zero profile, the average number of targets
94     // is usually smaller than 2.
95     cl::init(1.0));
96 
97 cl::opt<bool> AtomicCounterUpdateAll(
98     "instrprof-atomic-counter-update-all", cl::ZeroOrMore,
99     cl::desc("Make all profile counter updates atomic (for testing only)"),
100     cl::init(false));
101 
102 cl::opt<bool> AtomicCounterUpdatePromoted(
103     "atomic-counter-update-promoted", cl::ZeroOrMore,
104     cl::desc("Do counter update using atomic fetch add "
105              " for promoted counters only"),
106     cl::init(false));
107 
108 cl::opt<bool> AtomicFirstCounter(
109     "atomic-first-counter", cl::ZeroOrMore,
110     cl::desc("Use atomic fetch add for first counter in a function (usually "
111              "the entry counter)"),
112     cl::init(false));
113 
114 // If the option is not specified, the default behavior about whether
115 // counter promotion is done depends on how instrumentaiton lowering
116 // pipeline is setup, i.e., the default value of true of this option
117 // does not mean the promotion will be done by default. Explicitly
118 // setting this option can override the default behavior.
119 cl::opt<bool> DoCounterPromotion("do-counter-promotion", cl::ZeroOrMore,
120                                  cl::desc("Do counter register promotion"),
121                                  cl::init(false));
122 cl::opt<unsigned> MaxNumOfPromotionsPerLoop(
123     cl::ZeroOrMore, "max-counter-promotions-per-loop", cl::init(20),
124     cl::desc("Max number counter promotions per loop to avoid"
125              " increasing register pressure too much"));
126 
127 // A debug option
128 cl::opt<int>
129     MaxNumOfPromotions(cl::ZeroOrMore, "max-counter-promotions", cl::init(-1),
130                        cl::desc("Max number of allowed counter promotions"));
131 
132 cl::opt<unsigned> SpeculativeCounterPromotionMaxExiting(
133     cl::ZeroOrMore, "speculative-counter-promotion-max-exiting", cl::init(3),
134     cl::desc("The max number of exiting blocks of a loop to allow "
135              " speculative counter promotion"));
136 
137 cl::opt<bool> SpeculativeCounterPromotionToLoop(
138     cl::ZeroOrMore, "speculative-counter-promotion-to-loop", cl::init(false),
139     cl::desc("When the option is false, if the target block is in a loop, "
140              "the promotion will be disallowed unless the promoted counter "
141              " update can be further/iteratively promoted into an acyclic "
142              " region."));
143 
144 cl::opt<bool> IterativeCounterPromotion(
145     cl::ZeroOrMore, "iterative-counter-promotion", cl::init(true),
146     cl::desc("Allow counter promotion across the whole loop nest."));
147 
148 cl::opt<bool> SkipRetExitBlock(
149     cl::ZeroOrMore, "skip-ret-exit-block", cl::init(true),
150     cl::desc("Suppress counter promotion if exit blocks contain ret."));
151 
152 class InstrProfilingLegacyPass : public ModulePass {
153   InstrProfiling InstrProf;
154 
155 public:
156   static char ID;
157 
158   InstrProfilingLegacyPass() : ModulePass(ID) {}
159   InstrProfilingLegacyPass(const InstrProfOptions &Options, bool IsCS = false)
160       : ModulePass(ID), InstrProf(Options, IsCS) {
161     initializeInstrProfilingLegacyPassPass(*PassRegistry::getPassRegistry());
162   }
163 
164   StringRef getPassName() const override {
165     return "Frontend instrumentation-based coverage lowering";
166   }
167 
168   bool runOnModule(Module &M) override {
169     auto GetTLI = [this](Function &F) -> TargetLibraryInfo & {
170       return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
171     };
172     return InstrProf.run(M, GetTLI);
173   }
174 
175   void getAnalysisUsage(AnalysisUsage &AU) const override {
176     AU.setPreservesCFG();
177     AU.addRequired<TargetLibraryInfoWrapperPass>();
178   }
179 };
180 
181 ///
182 /// A helper class to promote one counter RMW operation in the loop
183 /// into register update.
184 ///
185 /// RWM update for the counter will be sinked out of the loop after
186 /// the transformation.
187 ///
188 class PGOCounterPromoterHelper : public LoadAndStorePromoter {
189 public:
190   PGOCounterPromoterHelper(
191       Instruction *L, Instruction *S, SSAUpdater &SSA, Value *Init,
192       BasicBlock *PH, ArrayRef<BasicBlock *> ExitBlocks,
193       ArrayRef<Instruction *> InsertPts,
194       DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
195       LoopInfo &LI)
196       : LoadAndStorePromoter({L, S}, SSA), Store(S), ExitBlocks(ExitBlocks),
197         InsertPts(InsertPts), LoopToCandidates(LoopToCands), LI(LI) {
198     assert(isa<LoadInst>(L));
199     assert(isa<StoreInst>(S));
200     SSA.AddAvailableValue(PH, Init);
201   }
202 
203   void doExtraRewritesBeforeFinalDeletion() override {
204     for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
205       BasicBlock *ExitBlock = ExitBlocks[i];
206       Instruction *InsertPos = InsertPts[i];
207       // Get LiveIn value into the ExitBlock. If there are multiple
208       // predecessors, the value is defined by a PHI node in this
209       // block.
210       Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
211       Value *Addr = cast<StoreInst>(Store)->getPointerOperand();
212       Type *Ty = LiveInValue->getType();
213       IRBuilder<> Builder(InsertPos);
214       if (AtomicCounterUpdatePromoted)
215         // automic update currently can only be promoted across the current
216         // loop, not the whole loop nest.
217         Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, LiveInValue,
218                                 MaybeAlign(),
219                                 AtomicOrdering::SequentiallyConsistent);
220       else {
221         LoadInst *OldVal = Builder.CreateLoad(Ty, Addr, "pgocount.promoted");
222         auto *NewVal = Builder.CreateAdd(OldVal, LiveInValue);
223         auto *NewStore = Builder.CreateStore(NewVal, Addr);
224 
225         // Now update the parent loop's candidate list:
226         if (IterativeCounterPromotion) {
227           auto *TargetLoop = LI.getLoopFor(ExitBlock);
228           if (TargetLoop)
229             LoopToCandidates[TargetLoop].emplace_back(OldVal, NewStore);
230         }
231       }
232     }
233   }
234 
235 private:
236   Instruction *Store;
237   ArrayRef<BasicBlock *> ExitBlocks;
238   ArrayRef<Instruction *> InsertPts;
239   DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates;
240   LoopInfo &LI;
241 };
242 
243 /// A helper class to do register promotion for all profile counter
244 /// updates in a loop.
245 ///
246 class PGOCounterPromoter {
247 public:
248   PGOCounterPromoter(
249       DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
250       Loop &CurLoop, LoopInfo &LI, BlockFrequencyInfo *BFI)
251       : LoopToCandidates(LoopToCands), L(CurLoop), LI(LI), BFI(BFI) {
252 
253     // Skip collection of ExitBlocks and InsertPts for loops that will not be
254     // able to have counters promoted.
255     SmallVector<BasicBlock *, 8> LoopExitBlocks;
256     SmallPtrSet<BasicBlock *, 8> BlockSet;
257 
258     L.getExitBlocks(LoopExitBlocks);
259     if (!isPromotionPossible(&L, LoopExitBlocks))
260       return;
261 
262     for (BasicBlock *ExitBlock : LoopExitBlocks) {
263       if (BlockSet.insert(ExitBlock).second) {
264         ExitBlocks.push_back(ExitBlock);
265         InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
266       }
267     }
268   }
269 
270   bool run(int64_t *NumPromoted) {
271     // Skip 'infinite' loops:
272     if (ExitBlocks.size() == 0)
273       return false;
274 
275     // Skip if any of the ExitBlocks contains a ret instruction.
276     // This is to prevent dumping of incomplete profile -- if the
277     // the loop is a long running loop and dump is called in the middle
278     // of the loop, the result profile is incomplete.
279     // FIXME: add other heuristics to detect long running loops.
280     if (SkipRetExitBlock) {
281       for (auto BB : ExitBlocks)
282         if (isa<ReturnInst>(BB->getTerminator()))
283           return false;
284     }
285 
286     unsigned MaxProm = getMaxNumOfPromotionsInLoop(&L);
287     if (MaxProm == 0)
288       return false;
289 
290     unsigned Promoted = 0;
291     for (auto &Cand : LoopToCandidates[&L]) {
292 
293       SmallVector<PHINode *, 4> NewPHIs;
294       SSAUpdater SSA(&NewPHIs);
295       Value *InitVal = ConstantInt::get(Cand.first->getType(), 0);
296 
297       // If BFI is set, we will use it to guide the promotions.
298       if (BFI) {
299         auto *BB = Cand.first->getParent();
300         auto InstrCount = BFI->getBlockProfileCount(BB);
301         if (!InstrCount)
302           continue;
303         auto PreheaderCount = BFI->getBlockProfileCount(L.getLoopPreheader());
304         // If the average loop trip count is not greater than 1.5, we skip
305         // promotion.
306         if (PreheaderCount &&
307             (PreheaderCount.getValue() * 3) >= (InstrCount.getValue() * 2))
308           continue;
309       }
310 
311       PGOCounterPromoterHelper Promoter(Cand.first, Cand.second, SSA, InitVal,
312                                         L.getLoopPreheader(), ExitBlocks,
313                                         InsertPts, LoopToCandidates, LI);
314       Promoter.run(SmallVector<Instruction *, 2>({Cand.first, Cand.second}));
315       Promoted++;
316       if (Promoted >= MaxProm)
317         break;
318 
319       (*NumPromoted)++;
320       if (MaxNumOfPromotions != -1 && *NumPromoted >= MaxNumOfPromotions)
321         break;
322     }
323 
324     LLVM_DEBUG(dbgs() << Promoted << " counters promoted for loop (depth="
325                       << L.getLoopDepth() << ")\n");
326     return Promoted != 0;
327   }
328 
329 private:
330   bool allowSpeculativeCounterPromotion(Loop *LP) {
331     SmallVector<BasicBlock *, 8> ExitingBlocks;
332     L.getExitingBlocks(ExitingBlocks);
333     // Not considierered speculative.
334     if (ExitingBlocks.size() == 1)
335       return true;
336     if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting)
337       return false;
338     return true;
339   }
340 
341   // Check whether the loop satisfies the basic conditions needed to perform
342   // Counter Promotions.
343   bool
344   isPromotionPossible(Loop *LP,
345                       const SmallVectorImpl<BasicBlock *> &LoopExitBlocks) {
346     // We can't insert into a catchswitch.
347     if (llvm::any_of(LoopExitBlocks, [](BasicBlock *Exit) {
348           return isa<CatchSwitchInst>(Exit->getTerminator());
349         }))
350       return false;
351 
352     if (!LP->hasDedicatedExits())
353       return false;
354 
355     BasicBlock *PH = LP->getLoopPreheader();
356     if (!PH)
357       return false;
358 
359     return true;
360   }
361 
362   // Returns the max number of Counter Promotions for LP.
363   unsigned getMaxNumOfPromotionsInLoop(Loop *LP) {
364     SmallVector<BasicBlock *, 8> LoopExitBlocks;
365     LP->getExitBlocks(LoopExitBlocks);
366     if (!isPromotionPossible(LP, LoopExitBlocks))
367       return 0;
368 
369     SmallVector<BasicBlock *, 8> ExitingBlocks;
370     LP->getExitingBlocks(ExitingBlocks);
371 
372     // If BFI is set, we do more aggressive promotions based on BFI.
373     if (BFI)
374       return (unsigned)-1;
375 
376     // Not considierered speculative.
377     if (ExitingBlocks.size() == 1)
378       return MaxNumOfPromotionsPerLoop;
379 
380     if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting)
381       return 0;
382 
383     // Whether the target block is in a loop does not matter:
384     if (SpeculativeCounterPromotionToLoop)
385       return MaxNumOfPromotionsPerLoop;
386 
387     // Now check the target block:
388     unsigned MaxProm = MaxNumOfPromotionsPerLoop;
389     for (auto *TargetBlock : LoopExitBlocks) {
390       auto *TargetLoop = LI.getLoopFor(TargetBlock);
391       if (!TargetLoop)
392         continue;
393       unsigned MaxPromForTarget = getMaxNumOfPromotionsInLoop(TargetLoop);
394       unsigned PendingCandsInTarget = LoopToCandidates[TargetLoop].size();
395       MaxProm =
396           std::min(MaxProm, std::max(MaxPromForTarget, PendingCandsInTarget) -
397                                 PendingCandsInTarget);
398     }
399     return MaxProm;
400   }
401 
402   DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates;
403   SmallVector<BasicBlock *, 8> ExitBlocks;
404   SmallVector<Instruction *, 8> InsertPts;
405   Loop &L;
406   LoopInfo &LI;
407   BlockFrequencyInfo *BFI;
408 };
409 
410 enum class ValueProfilingCallType {
411   // Individual values are tracked. Currently used for indiret call target
412   // profiling.
413   Default,
414 
415   // MemOp: the memop size value profiling.
416   MemOp
417 };
418 
419 } // end anonymous namespace
420 
421 PreservedAnalyses InstrProfiling::run(Module &M, ModuleAnalysisManager &AM) {
422   FunctionAnalysisManager &FAM =
423       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
424   auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
425     return FAM.getResult<TargetLibraryAnalysis>(F);
426   };
427   if (!run(M, GetTLI))
428     return PreservedAnalyses::all();
429 
430   return PreservedAnalyses::none();
431 }
432 
433 char InstrProfilingLegacyPass::ID = 0;
434 INITIALIZE_PASS_BEGIN(InstrProfilingLegacyPass, "instrprof",
435                       "Frontend instrumentation-based coverage lowering.",
436                       false, false)
437 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
438 INITIALIZE_PASS_END(InstrProfilingLegacyPass, "instrprof",
439                     "Frontend instrumentation-based coverage lowering.", false,
440                     false)
441 
442 ModulePass *
443 llvm::createInstrProfilingLegacyPass(const InstrProfOptions &Options,
444                                      bool IsCS) {
445   return new InstrProfilingLegacyPass(Options, IsCS);
446 }
447 
448 bool InstrProfiling::lowerIntrinsics(Function *F) {
449   bool MadeChange = false;
450   PromotionCandidates.clear();
451   for (BasicBlock &BB : *F) {
452     for (Instruction &Instr : llvm::make_early_inc_range(BB)) {
453       if (auto *IPIS = dyn_cast<InstrProfIncrementInstStep>(&Instr)) {
454         lowerIncrement(IPIS);
455         MadeChange = true;
456       } else if (auto *IPI = dyn_cast<InstrProfIncrementInst>(&Instr)) {
457         lowerIncrement(IPI);
458         MadeChange = true;
459       } else if (auto *IPVP = dyn_cast<InstrProfValueProfileInst>(&Instr)) {
460         lowerValueProfileInst(IPVP);
461         MadeChange = true;
462       }
463     }
464   }
465 
466   if (!MadeChange)
467     return false;
468 
469   promoteCounterLoadStores(F);
470   return true;
471 }
472 
473 bool InstrProfiling::isRuntimeCounterRelocationEnabled() const {
474   // Mach-O don't support weak external references.
475   if (TT.isOSBinFormatMachO())
476     return false;
477 
478   if (RuntimeCounterRelocation.getNumOccurrences() > 0)
479     return RuntimeCounterRelocation;
480 
481   // Fuchsia uses runtime counter relocation by default.
482   return TT.isOSFuchsia();
483 }
484 
485 bool InstrProfiling::isCounterPromotionEnabled() const {
486   if (DoCounterPromotion.getNumOccurrences() > 0)
487     return DoCounterPromotion;
488 
489   return Options.DoCounterPromotion;
490 }
491 
492 void InstrProfiling::promoteCounterLoadStores(Function *F) {
493   if (!isCounterPromotionEnabled())
494     return;
495 
496   DominatorTree DT(*F);
497   LoopInfo LI(DT);
498   DenseMap<Loop *, SmallVector<LoadStorePair, 8>> LoopPromotionCandidates;
499 
500   std::unique_ptr<BlockFrequencyInfo> BFI;
501   if (Options.UseBFIInPromotion) {
502     std::unique_ptr<BranchProbabilityInfo> BPI;
503     BPI.reset(new BranchProbabilityInfo(*F, LI, &GetTLI(*F)));
504     BFI.reset(new BlockFrequencyInfo(*F, *BPI, LI));
505   }
506 
507   for (const auto &LoadStore : PromotionCandidates) {
508     auto *CounterLoad = LoadStore.first;
509     auto *CounterStore = LoadStore.second;
510     BasicBlock *BB = CounterLoad->getParent();
511     Loop *ParentLoop = LI.getLoopFor(BB);
512     if (!ParentLoop)
513       continue;
514     LoopPromotionCandidates[ParentLoop].emplace_back(CounterLoad, CounterStore);
515   }
516 
517   SmallVector<Loop *, 4> Loops = LI.getLoopsInPreorder();
518 
519   // Do a post-order traversal of the loops so that counter updates can be
520   // iteratively hoisted outside the loop nest.
521   for (auto *Loop : llvm::reverse(Loops)) {
522     PGOCounterPromoter Promoter(LoopPromotionCandidates, *Loop, LI, BFI.get());
523     Promoter.run(&TotalCountersPromoted);
524   }
525 }
526 
527 static bool needsRuntimeHookUnconditionally(const Triple &TT) {
528   // On Fuchsia, we only need runtime hook if any counters are present.
529   if (TT.isOSFuchsia())
530     return false;
531 
532   return true;
533 }
534 
535 /// Check if the module contains uses of any profiling intrinsics.
536 static bool containsProfilingIntrinsics(Module &M) {
537   auto containsIntrinsic = [&](int ID) {
538     if (auto *F = M.getFunction(Intrinsic::getName(ID)))
539       return !F->use_empty();
540     return false;
541   };
542   return containsIntrinsic(llvm::Intrinsic::instrprof_increment) ||
543          containsIntrinsic(llvm::Intrinsic::instrprof_increment_step) ||
544          containsIntrinsic(llvm::Intrinsic::instrprof_value_profile);
545 }
546 
547 bool InstrProfiling::run(
548     Module &M, std::function<const TargetLibraryInfo &(Function &F)> GetTLI) {
549   this->M = &M;
550   this->GetTLI = std::move(GetTLI);
551   NamesVar = nullptr;
552   NamesSize = 0;
553   ProfileDataMap.clear();
554   CompilerUsedVars.clear();
555   UsedVars.clear();
556   TT = Triple(M.getTargetTriple());
557 
558   bool MadeChange = false;
559 
560   // Emit the runtime hook even if no counters are present.
561   if (needsRuntimeHookUnconditionally(TT))
562     MadeChange = emitRuntimeHook();
563 
564   // Improve compile time by avoiding linear scans when there is no work.
565   GlobalVariable *CoverageNamesVar =
566       M.getNamedGlobal(getCoverageUnusedNamesVarName());
567   if (!containsProfilingIntrinsics(M) && !CoverageNamesVar)
568     return MadeChange;
569 
570   // We did not know how many value sites there would be inside
571   // the instrumented function. This is counting the number of instrumented
572   // target value sites to enter it as field in the profile data variable.
573   for (Function &F : M) {
574     InstrProfIncrementInst *FirstProfIncInst = nullptr;
575     for (BasicBlock &BB : F)
576       for (auto I = BB.begin(), E = BB.end(); I != E; I++)
577         if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(I))
578           computeNumValueSiteCounts(Ind);
579         else if (FirstProfIncInst == nullptr)
580           FirstProfIncInst = dyn_cast<InstrProfIncrementInst>(I);
581 
582     // Value profiling intrinsic lowering requires per-function profile data
583     // variable to be created first.
584     if (FirstProfIncInst != nullptr)
585       static_cast<void>(getOrCreateRegionCounters(FirstProfIncInst));
586   }
587 
588   for (Function &F : M)
589     MadeChange |= lowerIntrinsics(&F);
590 
591   if (CoverageNamesVar) {
592     lowerCoverageData(CoverageNamesVar);
593     MadeChange = true;
594   }
595 
596   if (!MadeChange)
597     return false;
598 
599   emitVNodes();
600   emitNameData();
601   emitRuntimeHook();
602   emitRegistration();
603   emitUses();
604   emitInitialization();
605   return true;
606 }
607 
608 static FunctionCallee getOrInsertValueProfilingCall(
609     Module &M, const TargetLibraryInfo &TLI,
610     ValueProfilingCallType CallType = ValueProfilingCallType::Default) {
611   LLVMContext &Ctx = M.getContext();
612   auto *ReturnTy = Type::getVoidTy(M.getContext());
613 
614   AttributeList AL;
615   if (auto AK = TLI.getExtAttrForI32Param(false))
616     AL = AL.addParamAttribute(M.getContext(), 2, AK);
617 
618   assert((CallType == ValueProfilingCallType::Default ||
619           CallType == ValueProfilingCallType::MemOp) &&
620          "Must be Default or MemOp");
621   Type *ParamTypes[] = {
622 #define VALUE_PROF_FUNC_PARAM(ParamType, ParamName, ParamLLVMType) ParamLLVMType
623 #include "llvm/ProfileData/InstrProfData.inc"
624   };
625   auto *ValueProfilingCallTy =
626       FunctionType::get(ReturnTy, makeArrayRef(ParamTypes), false);
627   StringRef FuncName = CallType == ValueProfilingCallType::Default
628                            ? getInstrProfValueProfFuncName()
629                            : getInstrProfValueProfMemOpFuncName();
630   return M.getOrInsertFunction(FuncName, ValueProfilingCallTy, AL);
631 }
632 
633 void InstrProfiling::computeNumValueSiteCounts(InstrProfValueProfileInst *Ind) {
634   GlobalVariable *Name = Ind->getName();
635   uint64_t ValueKind = Ind->getValueKind()->getZExtValue();
636   uint64_t Index = Ind->getIndex()->getZExtValue();
637   auto &PD = ProfileDataMap[Name];
638   PD.NumValueSites[ValueKind] =
639       std::max(PD.NumValueSites[ValueKind], (uint32_t)(Index + 1));
640 }
641 
642 void InstrProfiling::lowerValueProfileInst(InstrProfValueProfileInst *Ind) {
643   // TODO: Value profiling heavily depends on the data section which is omitted
644   // in lightweight mode. We need to move the value profile pointer to the
645   // Counter struct to get this working.
646   assert(
647       !DebugInfoCorrelate &&
648       "Value profiling is not yet supported with lightweight instrumentation");
649   GlobalVariable *Name = Ind->getName();
650   auto It = ProfileDataMap.find(Name);
651   assert(It != ProfileDataMap.end() && It->second.DataVar &&
652          "value profiling detected in function with no counter incerement");
653 
654   GlobalVariable *DataVar = It->second.DataVar;
655   uint64_t ValueKind = Ind->getValueKind()->getZExtValue();
656   uint64_t Index = Ind->getIndex()->getZExtValue();
657   for (uint32_t Kind = IPVK_First; Kind < ValueKind; ++Kind)
658     Index += It->second.NumValueSites[Kind];
659 
660   IRBuilder<> Builder(Ind);
661   bool IsMemOpSize = (Ind->getValueKind()->getZExtValue() ==
662                       llvm::InstrProfValueKind::IPVK_MemOPSize);
663   CallInst *Call = nullptr;
664   auto *TLI = &GetTLI(*Ind->getFunction());
665 
666   // To support value profiling calls within Windows exception handlers, funclet
667   // information contained within operand bundles needs to be copied over to
668   // the library call. This is required for the IR to be processed by the
669   // WinEHPrepare pass.
670   SmallVector<OperandBundleDef, 1> OpBundles;
671   Ind->getOperandBundlesAsDefs(OpBundles);
672   if (!IsMemOpSize) {
673     Value *Args[3] = {Ind->getTargetValue(),
674                       Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
675                       Builder.getInt32(Index)};
676     Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI), Args,
677                               OpBundles);
678   } else {
679     Value *Args[3] = {Ind->getTargetValue(),
680                       Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
681                       Builder.getInt32(Index)};
682     Call = Builder.CreateCall(
683         getOrInsertValueProfilingCall(*M, *TLI, ValueProfilingCallType::MemOp),
684         Args, OpBundles);
685   }
686   if (auto AK = TLI->getExtAttrForI32Param(false))
687     Call->addParamAttr(2, AK);
688   Ind->replaceAllUsesWith(Call);
689   Ind->eraseFromParent();
690 }
691 
692 void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) {
693   GlobalVariable *Counters = getOrCreateRegionCounters(Inc);
694 
695   IRBuilder<> Builder(Inc);
696   uint64_t Index = Inc->getIndex()->getZExtValue();
697   Value *Addr = Builder.CreateConstInBoundsGEP2_32(Counters->getValueType(),
698                                                    Counters, 0, Index);
699 
700   if (isRuntimeCounterRelocationEnabled()) {
701     Type *Int64Ty = Type::getInt64Ty(M->getContext());
702     Type *Int64PtrTy = Type::getInt64PtrTy(M->getContext());
703     Function *Fn = Inc->getParent()->getParent();
704     Instruction &I = Fn->getEntryBlock().front();
705     LoadInst *LI = dyn_cast<LoadInst>(&I);
706     if (!LI) {
707       IRBuilder<> Builder(&I);
708       GlobalVariable *Bias =
709           M->getGlobalVariable(getInstrProfCounterBiasVarName());
710       if (!Bias) {
711         // Compiler must define this variable when runtime counter relocation
712         // is being used. Runtime has a weak external reference that is used
713         // to check whether that's the case or not.
714         Bias = new GlobalVariable(
715             *M, Int64Ty, false, GlobalValue::LinkOnceODRLinkage,
716             Constant::getNullValue(Int64Ty), getInstrProfCounterBiasVarName());
717         Bias->setVisibility(GlobalVariable::HiddenVisibility);
718         // A definition that's weak (linkonce_odr) without being in a COMDAT
719         // section wouldn't lead to link errors, but it would lead to a dead
720         // data word from every TU but one. Putting it in COMDAT ensures there
721         // will be exactly one data slot in the link.
722         if (TT.supportsCOMDAT())
723           Bias->setComdat(M->getOrInsertComdat(Bias->getName()));
724       }
725       LI = Builder.CreateLoad(Int64Ty, Bias);
726     }
727     auto *Add = Builder.CreateAdd(Builder.CreatePtrToInt(Addr, Int64Ty), LI);
728     Addr = Builder.CreateIntToPtr(Add, Int64PtrTy);
729   }
730 
731   if (Options.Atomic || AtomicCounterUpdateAll ||
732       (Index == 0 && AtomicFirstCounter)) {
733     Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, Inc->getStep(),
734                             MaybeAlign(), AtomicOrdering::Monotonic);
735   } else {
736     Value *IncStep = Inc->getStep();
737     Value *Load = Builder.CreateLoad(IncStep->getType(), Addr, "pgocount");
738     auto *Count = Builder.CreateAdd(Load, Inc->getStep());
739     auto *Store = Builder.CreateStore(Count, Addr);
740     if (isCounterPromotionEnabled())
741       PromotionCandidates.emplace_back(cast<Instruction>(Load), Store);
742   }
743   Inc->eraseFromParent();
744 }
745 
746 void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageNamesVar) {
747   ConstantArray *Names =
748       cast<ConstantArray>(CoverageNamesVar->getInitializer());
749   for (unsigned I = 0, E = Names->getNumOperands(); I < E; ++I) {
750     Constant *NC = Names->getOperand(I);
751     Value *V = NC->stripPointerCasts();
752     assert(isa<GlobalVariable>(V) && "Missing reference to function name");
753     GlobalVariable *Name = cast<GlobalVariable>(V);
754 
755     Name->setLinkage(GlobalValue::PrivateLinkage);
756     ReferencedNames.push_back(Name);
757     NC->dropAllReferences();
758   }
759   CoverageNamesVar->eraseFromParent();
760 }
761 
762 /// Get the name of a profiling variable for a particular function.
763 static std::string getVarName(InstrProfInstBase *Inc, StringRef Prefix,
764                               bool &Renamed) {
765   StringRef NamePrefix = getInstrProfNameVarPrefix();
766   StringRef Name = Inc->getName()->getName().substr(NamePrefix.size());
767   Function *F = Inc->getParent()->getParent();
768   Module *M = F->getParent();
769   if (!DoHashBasedCounterSplit || !isIRPGOFlagSet(M) ||
770       !canRenameComdatFunc(*F)) {
771     Renamed = false;
772     return (Prefix + Name).str();
773   }
774   Renamed = true;
775   uint64_t FuncHash = Inc->getHash()->getZExtValue();
776   SmallVector<char, 24> HashPostfix;
777   if (Name.endswith((Twine(".") + Twine(FuncHash)).toStringRef(HashPostfix)))
778     return (Prefix + Name).str();
779   return (Prefix + Name + "." + Twine(FuncHash)).str();
780 }
781 
782 static uint64_t getIntModuleFlagOrZero(const Module &M, StringRef Flag) {
783   auto *MD = dyn_cast_or_null<ConstantAsMetadata>(M.getModuleFlag(Flag));
784   if (!MD)
785     return 0;
786 
787   // If the flag is a ConstantAsMetadata, it should be an integer representable
788   // in 64-bits.
789   return cast<ConstantInt>(MD->getValue())->getZExtValue();
790 }
791 
792 static bool enablesValueProfiling(const Module &M) {
793   return isIRPGOFlagSet(&M) ||
794          getIntModuleFlagOrZero(M, "EnableValueProfiling") != 0;
795 }
796 
797 // Conservatively returns true if data variables may be referenced by code.
798 static bool profDataReferencedByCode(const Module &M) {
799   return enablesValueProfiling(M);
800 }
801 
802 static inline bool shouldRecordFunctionAddr(Function *F) {
803   // Only record function addresses if IR PGO is enabled or if clang value
804   // profiling is enabled. Recording function addresses greatly increases object
805   // file size, because it prevents the inliner from deleting functions that
806   // have been inlined everywhere.
807   if (!profDataReferencedByCode(*F->getParent()))
808     return false;
809 
810   // Check the linkage
811   bool HasAvailableExternallyLinkage = F->hasAvailableExternallyLinkage();
812   if (!F->hasLinkOnceLinkage() && !F->hasLocalLinkage() &&
813       !HasAvailableExternallyLinkage)
814     return true;
815 
816   // A function marked 'alwaysinline' with available_externally linkage can't
817   // have its address taken. Doing so would create an undefined external ref to
818   // the function, which would fail to link.
819   if (HasAvailableExternallyLinkage &&
820       F->hasFnAttribute(Attribute::AlwaysInline))
821     return false;
822 
823   // Prohibit function address recording if the function is both internal and
824   // COMDAT. This avoids the profile data variable referencing internal symbols
825   // in COMDAT.
826   if (F->hasLocalLinkage() && F->hasComdat())
827     return false;
828 
829   // Check uses of this function for other than direct calls or invokes to it.
830   // Inline virtual functions have linkeOnceODR linkage. When a key method
831   // exists, the vtable will only be emitted in the TU where the key method
832   // is defined. In a TU where vtable is not available, the function won't
833   // be 'addresstaken'. If its address is not recorded here, the profile data
834   // with missing address may be picked by the linker leading  to missing
835   // indirect call target info.
836   return F->hasAddressTaken() || F->hasLinkOnceLinkage();
837 }
838 
839 static bool needsRuntimeRegistrationOfSectionRange(const Triple &TT) {
840   // Don't do this for Darwin.  compiler-rt uses linker magic.
841   if (TT.isOSDarwin())
842     return false;
843   // Use linker script magic to get data/cnts/name start/end.
844   if (TT.isOSLinux() || TT.isOSFreeBSD() || TT.isOSNetBSD() ||
845       TT.isOSSolaris() || TT.isOSFuchsia() || TT.isPS4CPU() || TT.isOSWindows())
846     return false;
847 
848   return true;
849 }
850 
851 GlobalVariable *
852 InstrProfiling::getOrCreateRegionCounters(InstrProfInstBase *Inc) {
853   GlobalVariable *NamePtr = Inc->getName();
854   auto &PD = ProfileDataMap[NamePtr];
855   if (PD.RegionCounters)
856     return PD.RegionCounters;
857 
858   // Match the linkage and visibility of the name global.
859   Function *Fn = Inc->getParent()->getParent();
860   GlobalValue::LinkageTypes Linkage = NamePtr->getLinkage();
861   GlobalValue::VisibilityTypes Visibility = NamePtr->getVisibility();
862 
863   // Use internal rather than private linkage so the counter variable shows up
864   // in the symbol table when using debug info for correlation.
865   if (DebugInfoCorrelate && TT.isOSBinFormatMachO() &&
866       Linkage == GlobalValue::PrivateLinkage)
867     Linkage = GlobalValue::InternalLinkage;
868 
869   // Due to the limitation of binder as of 2021/09/28, the duplicate weak
870   // symbols in the same csect won't be discarded. When there are duplicate weak
871   // symbols, we can NOT guarantee that the relocations get resolved to the
872   // intended weak symbol, so we can not ensure the correctness of the relative
873   // CounterPtr, so we have to use private linkage for counter and data symbols.
874   if (TT.isOSBinFormatXCOFF()) {
875     Linkage = GlobalValue::PrivateLinkage;
876     Visibility = GlobalValue::DefaultVisibility;
877   }
878   // Move the name variable to the right section. Place them in a COMDAT group
879   // if the associated function is a COMDAT. This will make sure that only one
880   // copy of counters of the COMDAT function will be emitted after linking. Keep
881   // in mind that this pass may run before the inliner, so we need to create a
882   // new comdat group for the counters and profiling data. If we use the comdat
883   // of the parent function, that will result in relocations against discarded
884   // sections.
885   //
886   // If the data variable is referenced by code,  counters and data have to be
887   // in different comdats for COFF because the Visual C++ linker will report
888   // duplicate symbol errors if there are multiple external symbols with the
889   // same name marked IMAGE_COMDAT_SELECT_ASSOCIATIVE.
890   //
891   // For ELF, when not using COMDAT, put counters, data and values into a
892   // nodeduplicate COMDAT which is lowered to a zero-flag section group. This
893   // allows -z start-stop-gc to discard the entire group when the function is
894   // discarded.
895   bool DataReferencedByCode = profDataReferencedByCode(*M);
896   bool NeedComdat = needsComdatForCounter(*Fn, *M);
897   bool Renamed;
898   std::string CntsVarName =
899       getVarName(Inc, getInstrProfCountersVarPrefix(), Renamed);
900   std::string DataVarName =
901       getVarName(Inc, getInstrProfDataVarPrefix(), Renamed);
902   auto MaybeSetComdat = [&](GlobalVariable *GV) {
903     bool UseComdat = (NeedComdat || TT.isOSBinFormatELF());
904     if (UseComdat) {
905       StringRef GroupName = TT.isOSBinFormatCOFF() && DataReferencedByCode
906                                 ? GV->getName()
907                                 : CntsVarName;
908       Comdat *C = M->getOrInsertComdat(GroupName);
909       if (!NeedComdat)
910         C->setSelectionKind(Comdat::NoDeduplicate);
911       GV->setComdat(C);
912     }
913   };
914 
915   uint64_t NumCounters = Inc->getNumCounters()->getZExtValue();
916   LLVMContext &Ctx = M->getContext();
917   ArrayType *CounterTy = ArrayType::get(Type::getInt64Ty(Ctx), NumCounters);
918 
919   // Create the counters variable.
920   auto *CounterPtr =
921       new GlobalVariable(*M, CounterTy, false, Linkage,
922                          Constant::getNullValue(CounterTy), CntsVarName);
923   CounterPtr->setVisibility(Visibility);
924   CounterPtr->setSection(
925       getInstrProfSectionName(IPSK_cnts, TT.getObjectFormat()));
926   CounterPtr->setAlignment(Align(8));
927   MaybeSetComdat(CounterPtr);
928   CounterPtr->setLinkage(Linkage);
929   PD.RegionCounters = CounterPtr;
930   if (DebugInfoCorrelate) {
931     if (auto *SP = Fn->getSubprogram()) {
932       DIBuilder DB(*M, true, SP->getUnit());
933       Metadata *FunctionNameAnnotation[] = {
934           MDString::get(Ctx, InstrProfCorrelator::FunctionNameAttributeName),
935           MDString::get(Ctx, getPGOFuncNameVarInitializer(NamePtr)),
936       };
937       Metadata *CFGHashAnnotation[] = {
938           MDString::get(Ctx, InstrProfCorrelator::CFGHashAttributeName),
939           ConstantAsMetadata::get(Inc->getHash()),
940       };
941       Metadata *NumCountersAnnotation[] = {
942           MDString::get(Ctx, InstrProfCorrelator::NumCountersAttributeName),
943           ConstantAsMetadata::get(Inc->getNumCounters()),
944       };
945       auto Annotations = DB.getOrCreateArray({
946           MDNode::get(Ctx, FunctionNameAnnotation),
947           MDNode::get(Ctx, CFGHashAnnotation),
948           MDNode::get(Ctx, NumCountersAnnotation),
949       });
950       auto *DICounter = DB.createGlobalVariableExpression(
951           SP, CounterPtr->getName(), /*LinkageName=*/StringRef(), SP->getFile(),
952           /*LineNo=*/0, DB.createUnspecifiedType("Profile Data Type"),
953           CounterPtr->hasLocalLinkage(), /*IsDefined=*/true, /*Expr=*/nullptr,
954           /*Decl=*/nullptr, /*TemplateParams=*/nullptr, /*AlignInBits=*/0,
955           Annotations);
956       CounterPtr->addDebugInfo(DICounter);
957       DB.finalize();
958     } else {
959       std::string Msg = ("Missing debug info for function " + Fn->getName() +
960                          "; required for profile correlation.")
961                             .str();
962       Ctx.diagnose(
963           DiagnosticInfoPGOProfile(M->getName().data(), Msg, DS_Warning));
964     }
965   }
966 
967   auto *Int8PtrTy = Type::getInt8PtrTy(Ctx);
968   // Allocate statically the array of pointers to value profile nodes for
969   // the current function.
970   Constant *ValuesPtrExpr = ConstantPointerNull::get(Int8PtrTy);
971   uint64_t NS = 0;
972   for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
973     NS += PD.NumValueSites[Kind];
974   if (NS > 0 && ValueProfileStaticAlloc &&
975       !needsRuntimeRegistrationOfSectionRange(TT)) {
976     ArrayType *ValuesTy = ArrayType::get(Type::getInt64Ty(Ctx), NS);
977     auto *ValuesVar = new GlobalVariable(
978         *M, ValuesTy, false, Linkage, Constant::getNullValue(ValuesTy),
979         getVarName(Inc, getInstrProfValuesVarPrefix(), Renamed));
980     ValuesVar->setVisibility(Visibility);
981     ValuesVar->setSection(
982         getInstrProfSectionName(IPSK_vals, TT.getObjectFormat()));
983     ValuesVar->setAlignment(Align(8));
984     MaybeSetComdat(ValuesVar);
985     ValuesPtrExpr =
986         ConstantExpr::getBitCast(ValuesVar, Type::getInt8PtrTy(Ctx));
987   }
988 
989   if (DebugInfoCorrelate) {
990     // Mark the counter variable as used so that it isn't optimized out.
991     CompilerUsedVars.push_back(PD.RegionCounters);
992     return PD.RegionCounters;
993   }
994 
995   // Create data variable.
996   auto *IntPtrTy = M->getDataLayout().getIntPtrType(M->getContext());
997   auto *Int16Ty = Type::getInt16Ty(Ctx);
998   auto *Int16ArrayTy = ArrayType::get(Int16Ty, IPVK_Last + 1);
999   Type *DataTypes[] = {
1000 #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) LLVMType,
1001 #include "llvm/ProfileData/InstrProfData.inc"
1002   };
1003   auto *DataTy = StructType::get(Ctx, makeArrayRef(DataTypes));
1004 
1005   Constant *FunctionAddr = shouldRecordFunctionAddr(Fn)
1006                                ? ConstantExpr::getBitCast(Fn, Int8PtrTy)
1007                                : ConstantPointerNull::get(Int8PtrTy);
1008 
1009   Constant *Int16ArrayVals[IPVK_Last + 1];
1010   for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
1011     Int16ArrayVals[Kind] = ConstantInt::get(Int16Ty, PD.NumValueSites[Kind]);
1012 
1013   // If the data variable is not referenced by code (if we don't emit
1014   // @llvm.instrprof.value.profile, NS will be 0), and the counter keeps the
1015   // data variable live under linker GC, the data variable can be private. This
1016   // optimization applies to ELF.
1017   //
1018   // On COFF, a comdat leader cannot be local so we require DataReferencedByCode
1019   // to be false.
1020   //
1021   // If profd is in a deduplicate comdat, NS==0 with a hash suffix guarantees
1022   // that other copies must have the same CFG and cannot have value profiling.
1023   // If no hash suffix, other profd copies may be referenced by code.
1024   if (NS == 0 && !(DataReferencedByCode && NeedComdat && !Renamed) &&
1025       (TT.isOSBinFormatELF() ||
1026        (!DataReferencedByCode && TT.isOSBinFormatCOFF()))) {
1027     Linkage = GlobalValue::PrivateLinkage;
1028     Visibility = GlobalValue::DefaultVisibility;
1029   }
1030   auto *Data =
1031       new GlobalVariable(*M, DataTy, false, Linkage, nullptr, DataVarName);
1032   // Reference the counter variable with a label difference (link-time
1033   // constant).
1034   auto *RelativeCounterPtr =
1035       ConstantExpr::getSub(ConstantExpr::getPtrToInt(CounterPtr, IntPtrTy),
1036                            ConstantExpr::getPtrToInt(Data, IntPtrTy));
1037 
1038   Constant *DataVals[] = {
1039 #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) Init,
1040 #include "llvm/ProfileData/InstrProfData.inc"
1041   };
1042   Data->setInitializer(ConstantStruct::get(DataTy, DataVals));
1043 
1044   Data->setVisibility(Visibility);
1045   Data->setSection(getInstrProfSectionName(IPSK_data, TT.getObjectFormat()));
1046   Data->setAlignment(Align(INSTR_PROF_DATA_ALIGNMENT));
1047   MaybeSetComdat(Data);
1048   Data->setLinkage(Linkage);
1049 
1050   PD.DataVar = Data;
1051 
1052   // Mark the data variable as used so that it isn't stripped out.
1053   CompilerUsedVars.push_back(Data);
1054   // Now that the linkage set by the FE has been passed to the data and counter
1055   // variables, reset Name variable's linkage and visibility to private so that
1056   // it can be removed later by the compiler.
1057   NamePtr->setLinkage(GlobalValue::PrivateLinkage);
1058   // Collect the referenced names to be used by emitNameData.
1059   ReferencedNames.push_back(NamePtr);
1060 
1061   return PD.RegionCounters;
1062 }
1063 
1064 void InstrProfiling::emitVNodes() {
1065   if (!ValueProfileStaticAlloc)
1066     return;
1067 
1068   // For now only support this on platforms that do
1069   // not require runtime registration to discover
1070   // named section start/end.
1071   if (needsRuntimeRegistrationOfSectionRange(TT))
1072     return;
1073 
1074   size_t TotalNS = 0;
1075   for (auto &PD : ProfileDataMap) {
1076     for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
1077       TotalNS += PD.second.NumValueSites[Kind];
1078   }
1079 
1080   if (!TotalNS)
1081     return;
1082 
1083   uint64_t NumCounters = TotalNS * NumCountersPerValueSite;
1084 // Heuristic for small programs with very few total value sites.
1085 // The default value of vp-counters-per-site is chosen based on
1086 // the observation that large apps usually have a low percentage
1087 // of value sites that actually have any profile data, and thus
1088 // the average number of counters per site is low. For small
1089 // apps with very few sites, this may not be true. Bump up the
1090 // number of counters in this case.
1091 #define INSTR_PROF_MIN_VAL_COUNTS 10
1092   if (NumCounters < INSTR_PROF_MIN_VAL_COUNTS)
1093     NumCounters = std::max(INSTR_PROF_MIN_VAL_COUNTS, (int)NumCounters * 2);
1094 
1095   auto &Ctx = M->getContext();
1096   Type *VNodeTypes[] = {
1097 #define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Init) LLVMType,
1098 #include "llvm/ProfileData/InstrProfData.inc"
1099   };
1100   auto *VNodeTy = StructType::get(Ctx, makeArrayRef(VNodeTypes));
1101 
1102   ArrayType *VNodesTy = ArrayType::get(VNodeTy, NumCounters);
1103   auto *VNodesVar = new GlobalVariable(
1104       *M, VNodesTy, false, GlobalValue::PrivateLinkage,
1105       Constant::getNullValue(VNodesTy), getInstrProfVNodesVarName());
1106   VNodesVar->setSection(
1107       getInstrProfSectionName(IPSK_vnodes, TT.getObjectFormat()));
1108   // VNodesVar is used by runtime but not referenced via relocation by other
1109   // sections. Conservatively make it linker retained.
1110   UsedVars.push_back(VNodesVar);
1111 }
1112 
1113 void InstrProfiling::emitNameData() {
1114   std::string UncompressedData;
1115 
1116   if (ReferencedNames.empty())
1117     return;
1118 
1119   std::string CompressedNameStr;
1120   if (Error E = collectPGOFuncNameStrings(ReferencedNames, CompressedNameStr,
1121                                           DoInstrProfNameCompression)) {
1122     report_fatal_error(Twine(toString(std::move(E))), false);
1123   }
1124 
1125   auto &Ctx = M->getContext();
1126   auto *NamesVal =
1127       ConstantDataArray::getString(Ctx, StringRef(CompressedNameStr), false);
1128   NamesVar = new GlobalVariable(*M, NamesVal->getType(), true,
1129                                 GlobalValue::PrivateLinkage, NamesVal,
1130                                 getInstrProfNamesVarName());
1131   NamesSize = CompressedNameStr.size();
1132   NamesVar->setSection(
1133       getInstrProfSectionName(IPSK_name, TT.getObjectFormat()));
1134   // On COFF, it's important to reduce the alignment down to 1 to prevent the
1135   // linker from inserting padding before the start of the names section or
1136   // between names entries.
1137   NamesVar->setAlignment(Align(1));
1138   // NamesVar is used by runtime but not referenced via relocation by other
1139   // sections. Conservatively make it linker retained.
1140   UsedVars.push_back(NamesVar);
1141 
1142   for (auto *NamePtr : ReferencedNames)
1143     NamePtr->eraseFromParent();
1144 }
1145 
1146 void InstrProfiling::emitRegistration() {
1147   if (!needsRuntimeRegistrationOfSectionRange(TT))
1148     return;
1149 
1150   // Construct the function.
1151   auto *VoidTy = Type::getVoidTy(M->getContext());
1152   auto *VoidPtrTy = Type::getInt8PtrTy(M->getContext());
1153   auto *Int64Ty = Type::getInt64Ty(M->getContext());
1154   auto *RegisterFTy = FunctionType::get(VoidTy, false);
1155   auto *RegisterF = Function::Create(RegisterFTy, GlobalValue::InternalLinkage,
1156                                      getInstrProfRegFuncsName(), M);
1157   RegisterF->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1158   if (Options.NoRedZone)
1159     RegisterF->addFnAttr(Attribute::NoRedZone);
1160 
1161   auto *RuntimeRegisterTy = FunctionType::get(VoidTy, VoidPtrTy, false);
1162   auto *RuntimeRegisterF =
1163       Function::Create(RuntimeRegisterTy, GlobalVariable::ExternalLinkage,
1164                        getInstrProfRegFuncName(), M);
1165 
1166   IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", RegisterF));
1167   for (Value *Data : CompilerUsedVars)
1168     if (!isa<Function>(Data))
1169       IRB.CreateCall(RuntimeRegisterF, IRB.CreateBitCast(Data, VoidPtrTy));
1170   for (Value *Data : UsedVars)
1171     if (Data != NamesVar && !isa<Function>(Data))
1172       IRB.CreateCall(RuntimeRegisterF, IRB.CreateBitCast(Data, VoidPtrTy));
1173 
1174   if (NamesVar) {
1175     Type *ParamTypes[] = {VoidPtrTy, Int64Ty};
1176     auto *NamesRegisterTy =
1177         FunctionType::get(VoidTy, makeArrayRef(ParamTypes), false);
1178     auto *NamesRegisterF =
1179         Function::Create(NamesRegisterTy, GlobalVariable::ExternalLinkage,
1180                          getInstrProfNamesRegFuncName(), M);
1181     IRB.CreateCall(NamesRegisterF, {IRB.CreateBitCast(NamesVar, VoidPtrTy),
1182                                     IRB.getInt64(NamesSize)});
1183   }
1184 
1185   IRB.CreateRetVoid();
1186 }
1187 
1188 bool InstrProfiling::emitRuntimeHook() {
1189   // We expect the linker to be invoked with -u<hook_var> flag for Linux
1190   // in which case there is no need to emit the external variable.
1191   if (TT.isOSLinux())
1192     return false;
1193 
1194   // If the module's provided its own runtime, we don't need to do anything.
1195   if (M->getGlobalVariable(getInstrProfRuntimeHookVarName()))
1196     return false;
1197 
1198   // Declare an external variable that will pull in the runtime initialization.
1199   auto *Int32Ty = Type::getInt32Ty(M->getContext());
1200   auto *Var =
1201       new GlobalVariable(*M, Int32Ty, false, GlobalValue::ExternalLinkage,
1202                          nullptr, getInstrProfRuntimeHookVarName());
1203 
1204   if (TT.isOSBinFormatELF()) {
1205     // Mark the user variable as used so that it isn't stripped out.
1206     CompilerUsedVars.push_back(Var);
1207   } else {
1208     // Make a function that uses it.
1209     auto *User = Function::Create(FunctionType::get(Int32Ty, false),
1210                                   GlobalValue::LinkOnceODRLinkage,
1211                                   getInstrProfRuntimeHookVarUseFuncName(), M);
1212     User->addFnAttr(Attribute::NoInline);
1213     if (Options.NoRedZone)
1214       User->addFnAttr(Attribute::NoRedZone);
1215     User->setVisibility(GlobalValue::HiddenVisibility);
1216     if (TT.supportsCOMDAT())
1217       User->setComdat(M->getOrInsertComdat(User->getName()));
1218 
1219     IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", User));
1220     auto *Load = IRB.CreateLoad(Int32Ty, Var);
1221     IRB.CreateRet(Load);
1222 
1223     // Mark the function as used so that it isn't stripped out.
1224     CompilerUsedVars.push_back(User);
1225   }
1226   return true;
1227 }
1228 
1229 void InstrProfiling::emitUses() {
1230   // The metadata sections are parallel arrays. Optimizers (e.g.
1231   // GlobalOpt/ConstantMerge) may not discard associated sections as a unit, so
1232   // we conservatively retain all unconditionally in the compiler.
1233   //
1234   // On ELF and Mach-O, the linker can guarantee the associated sections will be
1235   // retained or discarded as a unit, so llvm.compiler.used is sufficient.
1236   // Similarly on COFF, if prof data is not referenced by code we use one comdat
1237   // and ensure this GC property as well. Otherwise, we have to conservatively
1238   // make all of the sections retained by the linker.
1239   if (TT.isOSBinFormatELF() || TT.isOSBinFormatMachO() ||
1240       (TT.isOSBinFormatCOFF() && !profDataReferencedByCode(*M)))
1241     appendToCompilerUsed(*M, CompilerUsedVars);
1242   else
1243     appendToUsed(*M, CompilerUsedVars);
1244 
1245   // We do not add proper references from used metadata sections to NamesVar and
1246   // VNodesVar, so we have to be conservative and place them in llvm.used
1247   // regardless of the target,
1248   appendToUsed(*M, UsedVars);
1249 }
1250 
1251 void InstrProfiling::emitInitialization() {
1252   // Create ProfileFileName variable. Don't don't this for the
1253   // context-sensitive instrumentation lowering: This lowering is after
1254   // LTO/ThinLTO linking. Pass PGOInstrumentationGenCreateVar should
1255   // have already create the variable before LTO/ThinLTO linking.
1256   if (!IsCS)
1257     createProfileFileNameVar(*M, Options.InstrProfileOutput);
1258   Function *RegisterF = M->getFunction(getInstrProfRegFuncsName());
1259   if (!RegisterF)
1260     return;
1261 
1262   // Create the initialization function.
1263   auto *VoidTy = Type::getVoidTy(M->getContext());
1264   auto *F = Function::Create(FunctionType::get(VoidTy, false),
1265                              GlobalValue::InternalLinkage,
1266                              getInstrProfInitFuncName(), M);
1267   F->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1268   F->addFnAttr(Attribute::NoInline);
1269   if (Options.NoRedZone)
1270     F->addFnAttr(Attribute::NoRedZone);
1271 
1272   // Add the basic block and the necessary calls.
1273   IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", F));
1274   IRB.CreateCall(RegisterF, {});
1275   IRB.CreateRetVoid();
1276 
1277   appendToGlobalCtors(*M, F, 0);
1278 }
1279