1 //===-- InstrProfiling.cpp - Frontend instrumentation based profiling -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers instrprof_* intrinsics emitted by a frontend for profiling.
10 // It also builds the data structures and initialization code needed for
11 // updating execution counts and emitting the profile at runtime.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Instrumentation/InstrProfiling.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/Analysis/BlockFrequencyInfo.h"
22 #include "llvm/Analysis/BranchProbabilityInfo.h"
23 #include "llvm/Analysis/LoopInfo.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/BasicBlock.h"
27 #include "llvm/IR/Constant.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/GlobalValue.h"
33 #include "llvm/IR/GlobalVariable.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/InitializePasses.h"
41 #include "llvm/Pass.h"
42 #include "llvm/ProfileData/InstrProf.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Error.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
48 #include "llvm/Transforms/Utils/ModuleUtils.h"
49 #include "llvm/Transforms/Utils/SSAUpdater.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <cstddef>
53 #include <cstdint>
54 #include <string>
55 
56 using namespace llvm;
57 
58 #define DEBUG_TYPE "instrprof"
59 
60 // The start and end values of precise value profile range for memory
61 // intrinsic sizes
62 cl::opt<std::string> MemOPSizeRange(
63     "memop-size-range",
64     cl::desc("Set the range of size in memory intrinsic calls to be profiled "
65              "precisely, in a format of <start_val>:<end_val>"),
66     cl::init(""));
67 
68 // The value that considered to be large value in  memory intrinsic.
69 cl::opt<unsigned> MemOPSizeLarge(
70     "memop-size-large",
71     cl::desc("Set large value thresthold in memory intrinsic size profiling. "
72              "Value of 0 disables the large value profiling."),
73     cl::init(8192));
74 
75 namespace {
76 
77 cl::opt<bool> DoHashBasedCounterSplit(
78     "hash-based-counter-split",
79     cl::desc("Rename counter variable of a comdat function based on cfg hash"),
80     cl::init(true));
81 
82 cl::opt<bool> RuntimeCounterRelocation(
83     "runtime-counter-relocation",
84     cl::desc("Enable relocating counters at runtime."),
85     cl::init(false));
86 
87 cl::opt<bool> ValueProfileStaticAlloc(
88     "vp-static-alloc",
89     cl::desc("Do static counter allocation for value profiler"),
90     cl::init(true));
91 
92 cl::opt<double> NumCountersPerValueSite(
93     "vp-counters-per-site",
94     cl::desc("The average number of profile counters allocated "
95              "per value profiling site."),
96     // This is set to a very small value because in real programs, only
97     // a very small percentage of value sites have non-zero targets, e.g, 1/30.
98     // For those sites with non-zero profile, the average number of targets
99     // is usually smaller than 2.
100     cl::init(1.0));
101 
102 cl::opt<bool> AtomicCounterUpdateAll(
103     "instrprof-atomic-counter-update-all", cl::ZeroOrMore,
104     cl::desc("Make all profile counter updates atomic (for testing only)"),
105     cl::init(false));
106 
107 cl::opt<bool> AtomicCounterUpdatePromoted(
108     "atomic-counter-update-promoted", cl::ZeroOrMore,
109     cl::desc("Do counter update using atomic fetch add "
110              " for promoted counters only"),
111     cl::init(false));
112 
113 cl::opt<bool> AtomicFirstCounter(
114     "atomic-first-counter", cl::ZeroOrMore,
115     cl::desc("Use atomic fetch add for first counter in a function (usually "
116              "the entry counter)"),
117     cl::init(false));
118 
119 // If the option is not specified, the default behavior about whether
120 // counter promotion is done depends on how instrumentaiton lowering
121 // pipeline is setup, i.e., the default value of true of this option
122 // does not mean the promotion will be done by default. Explicitly
123 // setting this option can override the default behavior.
124 cl::opt<bool> DoCounterPromotion("do-counter-promotion", cl::ZeroOrMore,
125                                  cl::desc("Do counter register promotion"),
126                                  cl::init(false));
127 cl::opt<unsigned> MaxNumOfPromotionsPerLoop(
128     cl::ZeroOrMore, "max-counter-promotions-per-loop", cl::init(20),
129     cl::desc("Max number counter promotions per loop to avoid"
130              " increasing register pressure too much"));
131 
132 // A debug option
133 cl::opt<int>
134     MaxNumOfPromotions(cl::ZeroOrMore, "max-counter-promotions", cl::init(-1),
135                        cl::desc("Max number of allowed counter promotions"));
136 
137 cl::opt<unsigned> SpeculativeCounterPromotionMaxExiting(
138     cl::ZeroOrMore, "speculative-counter-promotion-max-exiting", cl::init(3),
139     cl::desc("The max number of exiting blocks of a loop to allow "
140              " speculative counter promotion"));
141 
142 cl::opt<bool> SpeculativeCounterPromotionToLoop(
143     cl::ZeroOrMore, "speculative-counter-promotion-to-loop", cl::init(false),
144     cl::desc("When the option is false, if the target block is in a loop, "
145              "the promotion will be disallowed unless the promoted counter "
146              " update can be further/iteratively promoted into an acyclic "
147              " region."));
148 
149 cl::opt<bool> IterativeCounterPromotion(
150     cl::ZeroOrMore, "iterative-counter-promotion", cl::init(true),
151     cl::desc("Allow counter promotion across the whole loop nest."));
152 
153 cl::opt<bool> SkipRetExitBlock(
154     cl::ZeroOrMore, "skip-ret-exit-block", cl::init(true),
155     cl::desc("Suppress counter promotion if exit blocks contain ret."));
156 
157 class InstrProfilingLegacyPass : public ModulePass {
158   InstrProfiling InstrProf;
159 
160 public:
161   static char ID;
162 
163   InstrProfilingLegacyPass() : ModulePass(ID) {}
164   InstrProfilingLegacyPass(const InstrProfOptions &Options, bool IsCS = false)
165       : ModulePass(ID), InstrProf(Options, IsCS) {
166     initializeInstrProfilingLegacyPassPass(*PassRegistry::getPassRegistry());
167   }
168 
169   StringRef getPassName() const override {
170     return "Frontend instrumentation-based coverage lowering";
171   }
172 
173   bool runOnModule(Module &M) override {
174     auto GetTLI = [this](Function &F) -> TargetLibraryInfo & {
175       return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
176     };
177     return InstrProf.run(M, GetTLI);
178   }
179 
180   void getAnalysisUsage(AnalysisUsage &AU) const override {
181     AU.setPreservesCFG();
182     AU.addRequired<TargetLibraryInfoWrapperPass>();
183   }
184 };
185 
186 ///
187 /// A helper class to promote one counter RMW operation in the loop
188 /// into register update.
189 ///
190 /// RWM update for the counter will be sinked out of the loop after
191 /// the transformation.
192 ///
193 class PGOCounterPromoterHelper : public LoadAndStorePromoter {
194 public:
195   PGOCounterPromoterHelper(
196       Instruction *L, Instruction *S, SSAUpdater &SSA, Value *Init,
197       BasicBlock *PH, ArrayRef<BasicBlock *> ExitBlocks,
198       ArrayRef<Instruction *> InsertPts,
199       DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
200       LoopInfo &LI)
201       : LoadAndStorePromoter({L, S}, SSA), Store(S), ExitBlocks(ExitBlocks),
202         InsertPts(InsertPts), LoopToCandidates(LoopToCands), LI(LI) {
203     assert(isa<LoadInst>(L));
204     assert(isa<StoreInst>(S));
205     SSA.AddAvailableValue(PH, Init);
206   }
207 
208   void doExtraRewritesBeforeFinalDeletion() override {
209     for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
210       BasicBlock *ExitBlock = ExitBlocks[i];
211       Instruction *InsertPos = InsertPts[i];
212       // Get LiveIn value into the ExitBlock. If there are multiple
213       // predecessors, the value is defined by a PHI node in this
214       // block.
215       Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
216       Value *Addr = cast<StoreInst>(Store)->getPointerOperand();
217       Type *Ty = LiveInValue->getType();
218       IRBuilder<> Builder(InsertPos);
219       if (AtomicCounterUpdatePromoted)
220         // automic update currently can only be promoted across the current
221         // loop, not the whole loop nest.
222         Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, LiveInValue,
223                                 AtomicOrdering::SequentiallyConsistent);
224       else {
225         LoadInst *OldVal = Builder.CreateLoad(Ty, Addr, "pgocount.promoted");
226         auto *NewVal = Builder.CreateAdd(OldVal, LiveInValue);
227         auto *NewStore = Builder.CreateStore(NewVal, Addr);
228 
229         // Now update the parent loop's candidate list:
230         if (IterativeCounterPromotion) {
231           auto *TargetLoop = LI.getLoopFor(ExitBlock);
232           if (TargetLoop)
233             LoopToCandidates[TargetLoop].emplace_back(OldVal, NewStore);
234         }
235       }
236     }
237   }
238 
239 private:
240   Instruction *Store;
241   ArrayRef<BasicBlock *> ExitBlocks;
242   ArrayRef<Instruction *> InsertPts;
243   DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates;
244   LoopInfo &LI;
245 };
246 
247 /// A helper class to do register promotion for all profile counter
248 /// updates in a loop.
249 ///
250 class PGOCounterPromoter {
251 public:
252   PGOCounterPromoter(
253       DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCands,
254       Loop &CurLoop, LoopInfo &LI, BlockFrequencyInfo *BFI)
255       : LoopToCandidates(LoopToCands), ExitBlocks(), InsertPts(), L(CurLoop),
256         LI(LI), BFI(BFI) {
257 
258     // Skip collection of ExitBlocks and InsertPts for loops that will not be
259     // able to have counters promoted.
260     SmallVector<BasicBlock *, 8> LoopExitBlocks;
261     SmallPtrSet<BasicBlock *, 8> BlockSet;
262 
263     L.getExitBlocks(LoopExitBlocks);
264     if (!isPromotionPossible(&L, LoopExitBlocks))
265       return;
266 
267     for (BasicBlock *ExitBlock : LoopExitBlocks) {
268       if (BlockSet.insert(ExitBlock).second) {
269         ExitBlocks.push_back(ExitBlock);
270         InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
271       }
272     }
273   }
274 
275   bool run(int64_t *NumPromoted) {
276     // Skip 'infinite' loops:
277     if (ExitBlocks.size() == 0)
278       return false;
279 
280     // Skip if any of the ExitBlocks contains a ret instruction.
281     // This is to prevent dumping of incomplete profile -- if the
282     // the loop is a long running loop and dump is called in the middle
283     // of the loop, the result profile is incomplete.
284     // FIXME: add other heuristics to detect long running loops.
285     if (SkipRetExitBlock) {
286       for (auto BB : ExitBlocks)
287         if (dyn_cast<ReturnInst>(BB->getTerminator()) != nullptr)
288           return false;
289     }
290 
291     unsigned MaxProm = getMaxNumOfPromotionsInLoop(&L);
292     if (MaxProm == 0)
293       return false;
294 
295     unsigned Promoted = 0;
296     for (auto &Cand : LoopToCandidates[&L]) {
297 
298       SmallVector<PHINode *, 4> NewPHIs;
299       SSAUpdater SSA(&NewPHIs);
300       Value *InitVal = ConstantInt::get(Cand.first->getType(), 0);
301 
302       // If BFI is set, we will use it to guide the promotions.
303       if (BFI) {
304         auto *BB = Cand.first->getParent();
305         auto InstrCount = BFI->getBlockProfileCount(BB);
306         if (!InstrCount)
307           continue;
308         auto PreheaderCount = BFI->getBlockProfileCount(L.getLoopPreheader());
309         // If the average loop trip count is not greater than 1.5, we skip
310         // promotion.
311         if (PreheaderCount &&
312             (PreheaderCount.getValue() * 3) >= (InstrCount.getValue() * 2))
313           continue;
314       }
315 
316       PGOCounterPromoterHelper Promoter(Cand.first, Cand.second, SSA, InitVal,
317                                         L.getLoopPreheader(), ExitBlocks,
318                                         InsertPts, LoopToCandidates, LI);
319       Promoter.run(SmallVector<Instruction *, 2>({Cand.first, Cand.second}));
320       Promoted++;
321       if (Promoted >= MaxProm)
322         break;
323 
324       (*NumPromoted)++;
325       if (MaxNumOfPromotions != -1 && *NumPromoted >= MaxNumOfPromotions)
326         break;
327     }
328 
329     LLVM_DEBUG(dbgs() << Promoted << " counters promoted for loop (depth="
330                       << L.getLoopDepth() << ")\n");
331     return Promoted != 0;
332   }
333 
334 private:
335   bool allowSpeculativeCounterPromotion(Loop *LP) {
336     SmallVector<BasicBlock *, 8> ExitingBlocks;
337     L.getExitingBlocks(ExitingBlocks);
338     // Not considierered speculative.
339     if (ExitingBlocks.size() == 1)
340       return true;
341     if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting)
342       return false;
343     return true;
344   }
345 
346   // Check whether the loop satisfies the basic conditions needed to perform
347   // Counter Promotions.
348   bool isPromotionPossible(Loop *LP,
349                            const SmallVectorImpl<BasicBlock *> &LoopExitBlocks) {
350     // We can't insert into a catchswitch.
351     if (llvm::any_of(LoopExitBlocks, [](BasicBlock *Exit) {
352           return isa<CatchSwitchInst>(Exit->getTerminator());
353         }))
354       return false;
355 
356     if (!LP->hasDedicatedExits())
357       return false;
358 
359     BasicBlock *PH = LP->getLoopPreheader();
360     if (!PH)
361       return false;
362 
363     return true;
364   }
365 
366   // Returns the max number of Counter Promotions for LP.
367   unsigned getMaxNumOfPromotionsInLoop(Loop *LP) {
368     SmallVector<BasicBlock *, 8> LoopExitBlocks;
369     LP->getExitBlocks(LoopExitBlocks);
370     if (!isPromotionPossible(LP, LoopExitBlocks))
371       return 0;
372 
373     SmallVector<BasicBlock *, 8> ExitingBlocks;
374     LP->getExitingBlocks(ExitingBlocks);
375 
376     // If BFI is set, we do more aggressive promotions based on BFI.
377     if (BFI)
378       return (unsigned)-1;
379 
380     // Not considierered speculative.
381     if (ExitingBlocks.size() == 1)
382       return MaxNumOfPromotionsPerLoop;
383 
384     if (ExitingBlocks.size() > SpeculativeCounterPromotionMaxExiting)
385       return 0;
386 
387     // Whether the target block is in a loop does not matter:
388     if (SpeculativeCounterPromotionToLoop)
389       return MaxNumOfPromotionsPerLoop;
390 
391     // Now check the target block:
392     unsigned MaxProm = MaxNumOfPromotionsPerLoop;
393     for (auto *TargetBlock : LoopExitBlocks) {
394       auto *TargetLoop = LI.getLoopFor(TargetBlock);
395       if (!TargetLoop)
396         continue;
397       unsigned MaxPromForTarget = getMaxNumOfPromotionsInLoop(TargetLoop);
398       unsigned PendingCandsInTarget = LoopToCandidates[TargetLoop].size();
399       MaxProm =
400           std::min(MaxProm, std::max(MaxPromForTarget, PendingCandsInTarget) -
401                                 PendingCandsInTarget);
402     }
403     return MaxProm;
404   }
405 
406   DenseMap<Loop *, SmallVector<LoadStorePair, 8>> &LoopToCandidates;
407   SmallVector<BasicBlock *, 8> ExitBlocks;
408   SmallVector<Instruction *, 8> InsertPts;
409   Loop &L;
410   LoopInfo &LI;
411   BlockFrequencyInfo *BFI;
412 };
413 
414 } // end anonymous namespace
415 
416 PreservedAnalyses InstrProfiling::run(Module &M, ModuleAnalysisManager &AM) {
417   FunctionAnalysisManager &FAM =
418       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
419   auto GetTLI = [&FAM](Function &F) -> TargetLibraryInfo & {
420     return FAM.getResult<TargetLibraryAnalysis>(F);
421   };
422   if (!run(M, GetTLI))
423     return PreservedAnalyses::all();
424 
425   return PreservedAnalyses::none();
426 }
427 
428 char InstrProfilingLegacyPass::ID = 0;
429 INITIALIZE_PASS_BEGIN(
430     InstrProfilingLegacyPass, "instrprof",
431     "Frontend instrumentation-based coverage lowering.", false, false)
432 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
433 INITIALIZE_PASS_END(
434     InstrProfilingLegacyPass, "instrprof",
435     "Frontend instrumentation-based coverage lowering.", false, false)
436 
437 ModulePass *
438 llvm::createInstrProfilingLegacyPass(const InstrProfOptions &Options,
439                                      bool IsCS) {
440   return new InstrProfilingLegacyPass(Options, IsCS);
441 }
442 
443 static InstrProfIncrementInst *castToIncrementInst(Instruction *Instr) {
444   InstrProfIncrementInst *Inc = dyn_cast<InstrProfIncrementInstStep>(Instr);
445   if (Inc)
446     return Inc;
447   return dyn_cast<InstrProfIncrementInst>(Instr);
448 }
449 
450 bool InstrProfiling::lowerIntrinsics(Function *F) {
451   bool MadeChange = false;
452   PromotionCandidates.clear();
453   for (BasicBlock &BB : *F) {
454     for (auto I = BB.begin(), E = BB.end(); I != E;) {
455       auto Instr = I++;
456       InstrProfIncrementInst *Inc = castToIncrementInst(&*Instr);
457       if (Inc) {
458         lowerIncrement(Inc);
459         MadeChange = true;
460       } else if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(Instr)) {
461         lowerValueProfileInst(Ind);
462         MadeChange = true;
463       }
464     }
465   }
466 
467   if (!MadeChange)
468     return false;
469 
470   promoteCounterLoadStores(F);
471   return true;
472 }
473 
474 bool InstrProfiling::isRuntimeCounterRelocationEnabled() const {
475   if (RuntimeCounterRelocation.getNumOccurrences() > 0)
476     return RuntimeCounterRelocation;
477 
478   return TT.isOSFuchsia();
479 }
480 
481 bool InstrProfiling::isCounterPromotionEnabled() const {
482   if (DoCounterPromotion.getNumOccurrences() > 0)
483     return DoCounterPromotion;
484 
485   return Options.DoCounterPromotion;
486 }
487 
488 void InstrProfiling::promoteCounterLoadStores(Function *F) {
489   if (!isCounterPromotionEnabled())
490     return;
491 
492   DominatorTree DT(*F);
493   LoopInfo LI(DT);
494   DenseMap<Loop *, SmallVector<LoadStorePair, 8>> LoopPromotionCandidates;
495 
496   std::unique_ptr<BlockFrequencyInfo> BFI;
497   if (Options.UseBFIInPromotion) {
498     std::unique_ptr<BranchProbabilityInfo> BPI;
499     BPI.reset(new BranchProbabilityInfo(*F, LI, &GetTLI(*F)));
500     BFI.reset(new BlockFrequencyInfo(*F, *BPI, LI));
501   }
502 
503   for (const auto &LoadStore : PromotionCandidates) {
504     auto *CounterLoad = LoadStore.first;
505     auto *CounterStore = LoadStore.second;
506     BasicBlock *BB = CounterLoad->getParent();
507     Loop *ParentLoop = LI.getLoopFor(BB);
508     if (!ParentLoop)
509       continue;
510     LoopPromotionCandidates[ParentLoop].emplace_back(CounterLoad, CounterStore);
511   }
512 
513   SmallVector<Loop *, 4> Loops = LI.getLoopsInPreorder();
514 
515   // Do a post-order traversal of the loops so that counter updates can be
516   // iteratively hoisted outside the loop nest.
517   for (auto *Loop : llvm::reverse(Loops)) {
518     PGOCounterPromoter Promoter(LoopPromotionCandidates, *Loop, LI, BFI.get());
519     Promoter.run(&TotalCountersPromoted);
520   }
521 }
522 
523 /// Check if the module contains uses of any profiling intrinsics.
524 static bool containsProfilingIntrinsics(Module &M) {
525   if (auto *F = M.getFunction(
526           Intrinsic::getName(llvm::Intrinsic::instrprof_increment)))
527     if (!F->use_empty())
528       return true;
529   if (auto *F = M.getFunction(
530           Intrinsic::getName(llvm::Intrinsic::instrprof_increment_step)))
531     if (!F->use_empty())
532       return true;
533   if (auto *F = M.getFunction(
534           Intrinsic::getName(llvm::Intrinsic::instrprof_value_profile)))
535     if (!F->use_empty())
536       return true;
537   return false;
538 }
539 
540 bool InstrProfiling::run(
541     Module &M, std::function<const TargetLibraryInfo &(Function &F)> GetTLI) {
542   this->M = &M;
543   this->GetTLI = std::move(GetTLI);
544   NamesVar = nullptr;
545   NamesSize = 0;
546   ProfileDataMap.clear();
547   UsedVars.clear();
548   getMemOPSizeRangeFromOption(MemOPSizeRange, MemOPSizeRangeStart,
549                               MemOPSizeRangeLast);
550   TT = Triple(M.getTargetTriple());
551 
552   // Emit the runtime hook even if no counters are present.
553   bool MadeChange = emitRuntimeHook();
554 
555   // Improve compile time by avoiding linear scans when there is no work.
556   GlobalVariable *CoverageNamesVar =
557       M.getNamedGlobal(getCoverageUnusedNamesVarName());
558   if (!containsProfilingIntrinsics(M) && !CoverageNamesVar)
559     return MadeChange;
560 
561   // We did not know how many value sites there would be inside
562   // the instrumented function. This is counting the number of instrumented
563   // target value sites to enter it as field in the profile data variable.
564   for (Function &F : M) {
565     InstrProfIncrementInst *FirstProfIncInst = nullptr;
566     for (BasicBlock &BB : F)
567       for (auto I = BB.begin(), E = BB.end(); I != E; I++)
568         if (auto *Ind = dyn_cast<InstrProfValueProfileInst>(I))
569           computeNumValueSiteCounts(Ind);
570         else if (FirstProfIncInst == nullptr)
571           FirstProfIncInst = dyn_cast<InstrProfIncrementInst>(I);
572 
573     // Value profiling intrinsic lowering requires per-function profile data
574     // variable to be created first.
575     if (FirstProfIncInst != nullptr)
576       static_cast<void>(getOrCreateRegionCounters(FirstProfIncInst));
577   }
578 
579   for (Function &F : M)
580     MadeChange |= lowerIntrinsics(&F);
581 
582   if (CoverageNamesVar) {
583     lowerCoverageData(CoverageNamesVar);
584     MadeChange = true;
585   }
586 
587   if (!MadeChange)
588     return false;
589 
590   emitVNodes();
591   emitNameData();
592   emitRegistration();
593   emitUses();
594   emitInitialization();
595   return true;
596 }
597 
598 static FunctionCallee
599 getOrInsertValueProfilingCall(Module &M, const TargetLibraryInfo &TLI,
600                               bool IsRange = false) {
601   LLVMContext &Ctx = M.getContext();
602   auto *ReturnTy = Type::getVoidTy(M.getContext());
603 
604   AttributeList AL;
605   if (auto AK = TLI.getExtAttrForI32Param(false))
606     AL = AL.addParamAttribute(M.getContext(), 2, AK);
607 
608   if (!IsRange) {
609     Type *ParamTypes[] = {
610 #define VALUE_PROF_FUNC_PARAM(ParamType, ParamName, ParamLLVMType) ParamLLVMType
611 #include "llvm/ProfileData/InstrProfData.inc"
612     };
613     auto *ValueProfilingCallTy =
614         FunctionType::get(ReturnTy, makeArrayRef(ParamTypes), false);
615     return M.getOrInsertFunction(getInstrProfValueProfFuncName(),
616                                  ValueProfilingCallTy, AL);
617   } else {
618     Type *RangeParamTypes[] = {
619 #define VALUE_RANGE_PROF 1
620 #define VALUE_PROF_FUNC_PARAM(ParamType, ParamName, ParamLLVMType) ParamLLVMType
621 #include "llvm/ProfileData/InstrProfData.inc"
622 #undef VALUE_RANGE_PROF
623     };
624     auto *ValueRangeProfilingCallTy =
625         FunctionType::get(ReturnTy, makeArrayRef(RangeParamTypes), false);
626     return M.getOrInsertFunction(getInstrProfValueRangeProfFuncName(),
627                                  ValueRangeProfilingCallTy, AL);
628   }
629 }
630 
631 void InstrProfiling::computeNumValueSiteCounts(InstrProfValueProfileInst *Ind) {
632   GlobalVariable *Name = Ind->getName();
633   uint64_t ValueKind = Ind->getValueKind()->getZExtValue();
634   uint64_t Index = Ind->getIndex()->getZExtValue();
635   auto It = ProfileDataMap.find(Name);
636   if (It == ProfileDataMap.end()) {
637     PerFunctionProfileData PD;
638     PD.NumValueSites[ValueKind] = Index + 1;
639     ProfileDataMap[Name] = PD;
640   } else if (It->second.NumValueSites[ValueKind] <= Index)
641     It->second.NumValueSites[ValueKind] = Index + 1;
642 }
643 
644 void InstrProfiling::lowerValueProfileInst(InstrProfValueProfileInst *Ind) {
645   GlobalVariable *Name = Ind->getName();
646   auto It = ProfileDataMap.find(Name);
647   assert(It != ProfileDataMap.end() && It->second.DataVar &&
648          "value profiling detected in function with no counter incerement");
649 
650   GlobalVariable *DataVar = It->second.DataVar;
651   uint64_t ValueKind = Ind->getValueKind()->getZExtValue();
652   uint64_t Index = Ind->getIndex()->getZExtValue();
653   for (uint32_t Kind = IPVK_First; Kind < ValueKind; ++Kind)
654     Index += It->second.NumValueSites[Kind];
655 
656   IRBuilder<> Builder(Ind);
657   bool IsRange = (Ind->getValueKind()->getZExtValue() ==
658                   llvm::InstrProfValueKind::IPVK_MemOPSize);
659   CallInst *Call = nullptr;
660   auto *TLI = &GetTLI(*Ind->getFunction());
661 
662   // To support value profiling calls within Windows exception handlers, funclet
663   // information contained within operand bundles needs to be copied over to
664   // the library call. This is required for the IR to be processed by the
665   // WinEHPrepare pass.
666   SmallVector<OperandBundleDef, 1> OpBundles;
667   Ind->getOperandBundlesAsDefs(OpBundles);
668   if (!IsRange) {
669     Value *Args[3] = {Ind->getTargetValue(),
670                       Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
671                       Builder.getInt32(Index)};
672     Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI), Args,
673                               OpBundles);
674   } else {
675     Value *Args[6] = {
676         Ind->getTargetValue(),
677         Builder.CreateBitCast(DataVar, Builder.getInt8PtrTy()),
678         Builder.getInt32(Index),
679         Builder.getInt64(MemOPSizeRangeStart),
680         Builder.getInt64(MemOPSizeRangeLast),
681         Builder.getInt64(MemOPSizeLarge == 0 ? INT64_MIN : MemOPSizeLarge)};
682     Call = Builder.CreateCall(getOrInsertValueProfilingCall(*M, *TLI, true),
683                               Args, OpBundles);
684   }
685   if (auto AK = TLI->getExtAttrForI32Param(false))
686     Call->addParamAttr(2, AK);
687   Ind->replaceAllUsesWith(Call);
688   Ind->eraseFromParent();
689 }
690 
691 void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) {
692   GlobalVariable *Counters = getOrCreateRegionCounters(Inc);
693 
694   IRBuilder<> Builder(Inc);
695   uint64_t Index = Inc->getIndex()->getZExtValue();
696   Value *Addr = Builder.CreateConstInBoundsGEP2_64(Counters->getValueType(),
697                                                    Counters, 0, Index);
698 
699   if (isRuntimeCounterRelocationEnabled()) {
700     Type *Int64Ty = Type::getInt64Ty(M->getContext());
701     Type *Int64PtrTy = Type::getInt64PtrTy(M->getContext());
702     Function *Fn = Inc->getParent()->getParent();
703     Instruction &I = Fn->getEntryBlock().front();
704     LoadInst *LI = dyn_cast<LoadInst>(&I);
705     if (!LI) {
706       IRBuilder<> Builder(&I);
707       Type *Int64Ty = Type::getInt64Ty(M->getContext());
708       GlobalVariable *Bias = M->getGlobalVariable(getInstrProfCounterBiasVarName());
709       if (!Bias) {
710         Bias = new GlobalVariable(*M, Int64Ty, false, GlobalValue::LinkOnceODRLinkage,
711                                   Constant::getNullValue(Int64Ty),
712                                   getInstrProfCounterBiasVarName());
713         Bias->setVisibility(GlobalVariable::HiddenVisibility);
714       }
715       LI = Builder.CreateLoad(Int64Ty, Bias);
716     }
717     auto *Add = Builder.CreateAdd(Builder.CreatePtrToInt(Addr, Int64Ty), LI);
718     Addr = Builder.CreateIntToPtr(Add, Int64PtrTy);
719   }
720 
721   if (Options.Atomic || AtomicCounterUpdateAll ||
722       (Index == 0 && AtomicFirstCounter)) {
723     Builder.CreateAtomicRMW(AtomicRMWInst::Add, Addr, Inc->getStep(),
724                             AtomicOrdering::Monotonic);
725   } else {
726     Value *IncStep = Inc->getStep();
727     Value *Load = Builder.CreateLoad(IncStep->getType(), Addr, "pgocount");
728     auto *Count = Builder.CreateAdd(Load, Inc->getStep());
729     auto *Store = Builder.CreateStore(Count, Addr);
730     if (isCounterPromotionEnabled())
731       PromotionCandidates.emplace_back(cast<Instruction>(Load), Store);
732   }
733   Inc->eraseFromParent();
734 }
735 
736 void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageNamesVar) {
737   ConstantArray *Names =
738       cast<ConstantArray>(CoverageNamesVar->getInitializer());
739   for (unsigned I = 0, E = Names->getNumOperands(); I < E; ++I) {
740     Constant *NC = Names->getOperand(I);
741     Value *V = NC->stripPointerCasts();
742     assert(isa<GlobalVariable>(V) && "Missing reference to function name");
743     GlobalVariable *Name = cast<GlobalVariable>(V);
744 
745     Name->setLinkage(GlobalValue::PrivateLinkage);
746     ReferencedNames.push_back(Name);
747     NC->dropAllReferences();
748   }
749   CoverageNamesVar->eraseFromParent();
750 }
751 
752 /// Get the name of a profiling variable for a particular function.
753 static std::string getVarName(InstrProfIncrementInst *Inc, StringRef Prefix) {
754   StringRef NamePrefix = getInstrProfNameVarPrefix();
755   StringRef Name = Inc->getName()->getName().substr(NamePrefix.size());
756   Function *F = Inc->getParent()->getParent();
757   Module *M = F->getParent();
758   if (!DoHashBasedCounterSplit || !isIRPGOFlagSet(M) ||
759       !canRenameComdatFunc(*F))
760     return (Prefix + Name).str();
761   uint64_t FuncHash = Inc->getHash()->getZExtValue();
762   SmallVector<char, 24> HashPostfix;
763   if (Name.endswith((Twine(".") + Twine(FuncHash)).toStringRef(HashPostfix)))
764     return (Prefix + Name).str();
765   return (Prefix + Name + "." + Twine(FuncHash)).str();
766 }
767 
768 static inline bool shouldRecordFunctionAddr(Function *F) {
769   // Check the linkage
770   bool HasAvailableExternallyLinkage = F->hasAvailableExternallyLinkage();
771   if (!F->hasLinkOnceLinkage() && !F->hasLocalLinkage() &&
772       !HasAvailableExternallyLinkage)
773     return true;
774 
775   // A function marked 'alwaysinline' with available_externally linkage can't
776   // have its address taken. Doing so would create an undefined external ref to
777   // the function, which would fail to link.
778   if (HasAvailableExternallyLinkage &&
779       F->hasFnAttribute(Attribute::AlwaysInline))
780     return false;
781 
782   // Prohibit function address recording if the function is both internal and
783   // COMDAT. This avoids the profile data variable referencing internal symbols
784   // in COMDAT.
785   if (F->hasLocalLinkage() && F->hasComdat())
786     return false;
787 
788   // Check uses of this function for other than direct calls or invokes to it.
789   // Inline virtual functions have linkeOnceODR linkage. When a key method
790   // exists, the vtable will only be emitted in the TU where the key method
791   // is defined. In a TU where vtable is not available, the function won't
792   // be 'addresstaken'. If its address is not recorded here, the profile data
793   // with missing address may be picked by the linker leading  to missing
794   // indirect call target info.
795   return F->hasAddressTaken() || F->hasLinkOnceLinkage();
796 }
797 
798 static bool needsRuntimeRegistrationOfSectionRange(const Triple &TT) {
799   // Don't do this for Darwin.  compiler-rt uses linker magic.
800   if (TT.isOSDarwin())
801     return false;
802   // Use linker script magic to get data/cnts/name start/end.
803   if (TT.isOSLinux() || TT.isOSFreeBSD() || TT.isOSNetBSD() ||
804       TT.isOSSolaris() || TT.isOSFuchsia() || TT.isPS4CPU() ||
805       TT.isOSWindows())
806     return false;
807 
808   return true;
809 }
810 
811 GlobalVariable *
812 InstrProfiling::getOrCreateRegionCounters(InstrProfIncrementInst *Inc) {
813   GlobalVariable *NamePtr = Inc->getName();
814   auto It = ProfileDataMap.find(NamePtr);
815   PerFunctionProfileData PD;
816   if (It != ProfileDataMap.end()) {
817     if (It->second.RegionCounters)
818       return It->second.RegionCounters;
819     PD = It->second;
820   }
821 
822   // Match the linkage and visibility of the name global. COFF supports using
823   // comdats with internal symbols, so do that if we can.
824   Function *Fn = Inc->getParent()->getParent();
825   GlobalValue::LinkageTypes Linkage = NamePtr->getLinkage();
826   GlobalValue::VisibilityTypes Visibility = NamePtr->getVisibility();
827   if (TT.isOSBinFormatCOFF()) {
828     Linkage = GlobalValue::InternalLinkage;
829     Visibility = GlobalValue::DefaultVisibility;
830   }
831 
832   // Move the name variable to the right section. Place them in a COMDAT group
833   // if the associated function is a COMDAT. This will make sure that only one
834   // copy of counters of the COMDAT function will be emitted after linking. Keep
835   // in mind that this pass may run before the inliner, so we need to create a
836   // new comdat group for the counters and profiling data. If we use the comdat
837   // of the parent function, that will result in relocations against discarded
838   // sections.
839   bool NeedComdat = needsComdatForCounter(*Fn, *M);
840   if (NeedComdat) {
841     if (TT.isOSBinFormatCOFF()) {
842       // For COFF, put the counters, data, and values each into their own
843       // comdats. We can't use a group because the Visual C++ linker will
844       // report duplicate symbol errors if there are multiple external symbols
845       // with the same name marked IMAGE_COMDAT_SELECT_ASSOCIATIVE.
846       Linkage = GlobalValue::LinkOnceODRLinkage;
847       Visibility = GlobalValue::HiddenVisibility;
848     }
849   }
850   auto MaybeSetComdat = [=](GlobalVariable *GV) {
851     if (NeedComdat)
852       GV->setComdat(M->getOrInsertComdat(GV->getName()));
853   };
854 
855   uint64_t NumCounters = Inc->getNumCounters()->getZExtValue();
856   LLVMContext &Ctx = M->getContext();
857   ArrayType *CounterTy = ArrayType::get(Type::getInt64Ty(Ctx), NumCounters);
858 
859   // Create the counters variable.
860   auto *CounterPtr =
861       new GlobalVariable(*M, CounterTy, false, Linkage,
862                          Constant::getNullValue(CounterTy),
863                          getVarName(Inc, getInstrProfCountersVarPrefix()));
864   CounterPtr->setVisibility(Visibility);
865   CounterPtr->setSection(
866       getInstrProfSectionName(IPSK_cnts, TT.getObjectFormat()));
867   CounterPtr->setAlignment(Align(8));
868   MaybeSetComdat(CounterPtr);
869   CounterPtr->setLinkage(Linkage);
870 
871   auto *Int8PtrTy = Type::getInt8PtrTy(Ctx);
872   // Allocate statically the array of pointers to value profile nodes for
873   // the current function.
874   Constant *ValuesPtrExpr = ConstantPointerNull::get(Int8PtrTy);
875   if (ValueProfileStaticAlloc && !needsRuntimeRegistrationOfSectionRange(TT)) {
876     uint64_t NS = 0;
877     for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
878       NS += PD.NumValueSites[Kind];
879     if (NS) {
880       ArrayType *ValuesTy = ArrayType::get(Type::getInt64Ty(Ctx), NS);
881 
882       auto *ValuesVar =
883           new GlobalVariable(*M, ValuesTy, false, Linkage,
884                              Constant::getNullValue(ValuesTy),
885                              getVarName(Inc, getInstrProfValuesVarPrefix()));
886       ValuesVar->setVisibility(Visibility);
887       ValuesVar->setSection(
888           getInstrProfSectionName(IPSK_vals, TT.getObjectFormat()));
889       ValuesVar->setAlignment(Align(8));
890       MaybeSetComdat(ValuesVar);
891       ValuesPtrExpr =
892           ConstantExpr::getBitCast(ValuesVar, Type::getInt8PtrTy(Ctx));
893     }
894   }
895 
896   // Create data variable.
897   auto *Int16Ty = Type::getInt16Ty(Ctx);
898   auto *Int16ArrayTy = ArrayType::get(Int16Ty, IPVK_Last + 1);
899   Type *DataTypes[] = {
900 #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) LLVMType,
901 #include "llvm/ProfileData/InstrProfData.inc"
902   };
903   auto *DataTy = StructType::get(Ctx, makeArrayRef(DataTypes));
904 
905   Constant *FunctionAddr = shouldRecordFunctionAddr(Fn)
906                                ? ConstantExpr::getBitCast(Fn, Int8PtrTy)
907                                : ConstantPointerNull::get(Int8PtrTy);
908 
909   Constant *Int16ArrayVals[IPVK_Last + 1];
910   for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
911     Int16ArrayVals[Kind] = ConstantInt::get(Int16Ty, PD.NumValueSites[Kind]);
912 
913   Constant *DataVals[] = {
914 #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) Init,
915 #include "llvm/ProfileData/InstrProfData.inc"
916   };
917   auto *Data = new GlobalVariable(*M, DataTy, false, Linkage,
918                                   ConstantStruct::get(DataTy, DataVals),
919                                   getVarName(Inc, getInstrProfDataVarPrefix()));
920   Data->setVisibility(Visibility);
921   Data->setSection(getInstrProfSectionName(IPSK_data, TT.getObjectFormat()));
922   Data->setAlignment(Align(INSTR_PROF_DATA_ALIGNMENT));
923   MaybeSetComdat(Data);
924   Data->setLinkage(Linkage);
925 
926   PD.RegionCounters = CounterPtr;
927   PD.DataVar = Data;
928   ProfileDataMap[NamePtr] = PD;
929 
930   // Mark the data variable as used so that it isn't stripped out.
931   UsedVars.push_back(Data);
932   // Now that the linkage set by the FE has been passed to the data and counter
933   // variables, reset Name variable's linkage and visibility to private so that
934   // it can be removed later by the compiler.
935   NamePtr->setLinkage(GlobalValue::PrivateLinkage);
936   // Collect the referenced names to be used by emitNameData.
937   ReferencedNames.push_back(NamePtr);
938 
939   return CounterPtr;
940 }
941 
942 void InstrProfiling::emitVNodes() {
943   if (!ValueProfileStaticAlloc)
944     return;
945 
946   // For now only support this on platforms that do
947   // not require runtime registration to discover
948   // named section start/end.
949   if (needsRuntimeRegistrationOfSectionRange(TT))
950     return;
951 
952   size_t TotalNS = 0;
953   for (auto &PD : ProfileDataMap) {
954     for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
955       TotalNS += PD.second.NumValueSites[Kind];
956   }
957 
958   if (!TotalNS)
959     return;
960 
961   uint64_t NumCounters = TotalNS * NumCountersPerValueSite;
962 // Heuristic for small programs with very few total value sites.
963 // The default value of vp-counters-per-site is chosen based on
964 // the observation that large apps usually have a low percentage
965 // of value sites that actually have any profile data, and thus
966 // the average number of counters per site is low. For small
967 // apps with very few sites, this may not be true. Bump up the
968 // number of counters in this case.
969 #define INSTR_PROF_MIN_VAL_COUNTS 10
970   if (NumCounters < INSTR_PROF_MIN_VAL_COUNTS)
971     NumCounters = std::max(INSTR_PROF_MIN_VAL_COUNTS, (int)NumCounters * 2);
972 
973   auto &Ctx = M->getContext();
974   Type *VNodeTypes[] = {
975 #define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Init) LLVMType,
976 #include "llvm/ProfileData/InstrProfData.inc"
977   };
978   auto *VNodeTy = StructType::get(Ctx, makeArrayRef(VNodeTypes));
979 
980   ArrayType *VNodesTy = ArrayType::get(VNodeTy, NumCounters);
981   auto *VNodesVar = new GlobalVariable(
982       *M, VNodesTy, false, GlobalValue::PrivateLinkage,
983       Constant::getNullValue(VNodesTy), getInstrProfVNodesVarName());
984   VNodesVar->setSection(
985       getInstrProfSectionName(IPSK_vnodes, TT.getObjectFormat()));
986   UsedVars.push_back(VNodesVar);
987 }
988 
989 void InstrProfiling::emitNameData() {
990   std::string UncompressedData;
991 
992   if (ReferencedNames.empty())
993     return;
994 
995   std::string CompressedNameStr;
996   if (Error E = collectPGOFuncNameStrings(ReferencedNames, CompressedNameStr,
997                                           DoInstrProfNameCompression)) {
998     report_fatal_error(toString(std::move(E)), false);
999   }
1000 
1001   auto &Ctx = M->getContext();
1002   auto *NamesVal = ConstantDataArray::getString(
1003       Ctx, StringRef(CompressedNameStr), false);
1004   NamesVar = new GlobalVariable(*M, NamesVal->getType(), true,
1005                                 GlobalValue::PrivateLinkage, NamesVal,
1006                                 getInstrProfNamesVarName());
1007   NamesSize = CompressedNameStr.size();
1008   NamesVar->setSection(
1009       getInstrProfSectionName(IPSK_name, TT.getObjectFormat()));
1010   // On COFF, it's important to reduce the alignment down to 1 to prevent the
1011   // linker from inserting padding before the start of the names section or
1012   // between names entries.
1013   NamesVar->setAlignment(Align(1));
1014   UsedVars.push_back(NamesVar);
1015 
1016   for (auto *NamePtr : ReferencedNames)
1017     NamePtr->eraseFromParent();
1018 }
1019 
1020 void InstrProfiling::emitRegistration() {
1021   if (!needsRuntimeRegistrationOfSectionRange(TT))
1022     return;
1023 
1024   // Construct the function.
1025   auto *VoidTy = Type::getVoidTy(M->getContext());
1026   auto *VoidPtrTy = Type::getInt8PtrTy(M->getContext());
1027   auto *Int64Ty = Type::getInt64Ty(M->getContext());
1028   auto *RegisterFTy = FunctionType::get(VoidTy, false);
1029   auto *RegisterF = Function::Create(RegisterFTy, GlobalValue::InternalLinkage,
1030                                      getInstrProfRegFuncsName(), M);
1031   RegisterF->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1032   if (Options.NoRedZone)
1033     RegisterF->addFnAttr(Attribute::NoRedZone);
1034 
1035   auto *RuntimeRegisterTy = FunctionType::get(VoidTy, VoidPtrTy, false);
1036   auto *RuntimeRegisterF =
1037       Function::Create(RuntimeRegisterTy, GlobalVariable::ExternalLinkage,
1038                        getInstrProfRegFuncName(), M);
1039 
1040   IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", RegisterF));
1041   for (Value *Data : UsedVars)
1042     if (Data != NamesVar && !isa<Function>(Data))
1043       IRB.CreateCall(RuntimeRegisterF, IRB.CreateBitCast(Data, VoidPtrTy));
1044 
1045   if (NamesVar) {
1046     Type *ParamTypes[] = {VoidPtrTy, Int64Ty};
1047     auto *NamesRegisterTy =
1048         FunctionType::get(VoidTy, makeArrayRef(ParamTypes), false);
1049     auto *NamesRegisterF =
1050         Function::Create(NamesRegisterTy, GlobalVariable::ExternalLinkage,
1051                          getInstrProfNamesRegFuncName(), M);
1052     IRB.CreateCall(NamesRegisterF, {IRB.CreateBitCast(NamesVar, VoidPtrTy),
1053                                     IRB.getInt64(NamesSize)});
1054   }
1055 
1056   IRB.CreateRetVoid();
1057 }
1058 
1059 bool InstrProfiling::emitRuntimeHook() {
1060   // We expect the linker to be invoked with -u<hook_var> flag for Linux or
1061   // Fuchsia, in which case there is no need to emit the user function.
1062   if (TT.isOSLinux() || TT.isOSFuchsia())
1063     return false;
1064 
1065   // If the module's provided its own runtime, we don't need to do anything.
1066   if (M->getGlobalVariable(getInstrProfRuntimeHookVarName()))
1067     return false;
1068 
1069   // Declare an external variable that will pull in the runtime initialization.
1070   auto *Int32Ty = Type::getInt32Ty(M->getContext());
1071   auto *Var =
1072       new GlobalVariable(*M, Int32Ty, false, GlobalValue::ExternalLinkage,
1073                          nullptr, getInstrProfRuntimeHookVarName());
1074 
1075   // Make a function that uses it.
1076   auto *User = Function::Create(FunctionType::get(Int32Ty, false),
1077                                 GlobalValue::LinkOnceODRLinkage,
1078                                 getInstrProfRuntimeHookVarUseFuncName(), M);
1079   User->addFnAttr(Attribute::NoInline);
1080   if (Options.NoRedZone)
1081     User->addFnAttr(Attribute::NoRedZone);
1082   User->setVisibility(GlobalValue::HiddenVisibility);
1083   if (TT.supportsCOMDAT())
1084     User->setComdat(M->getOrInsertComdat(User->getName()));
1085 
1086   IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", User));
1087   auto *Load = IRB.CreateLoad(Int32Ty, Var);
1088   IRB.CreateRet(Load);
1089 
1090   // Mark the user variable as used so that it isn't stripped out.
1091   UsedVars.push_back(User);
1092   return true;
1093 }
1094 
1095 void InstrProfiling::emitUses() {
1096   if (!UsedVars.empty())
1097     appendToUsed(*M, UsedVars);
1098 }
1099 
1100 void InstrProfiling::emitInitialization() {
1101   // Create ProfileFileName variable. Don't don't this for the
1102   // context-sensitive instrumentation lowering: This lowering is after
1103   // LTO/ThinLTO linking. Pass PGOInstrumentationGenCreateVar should
1104   // have already create the variable before LTO/ThinLTO linking.
1105   if (!IsCS)
1106     createProfileFileNameVar(*M, Options.InstrProfileOutput);
1107   Function *RegisterF = M->getFunction(getInstrProfRegFuncsName());
1108   if (!RegisterF)
1109     return;
1110 
1111   // Create the initialization function.
1112   auto *VoidTy = Type::getVoidTy(M->getContext());
1113   auto *F = Function::Create(FunctionType::get(VoidTy, false),
1114                              GlobalValue::InternalLinkage,
1115                              getInstrProfInitFuncName(), M);
1116   F->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1117   F->addFnAttr(Attribute::NoInline);
1118   if (Options.NoRedZone)
1119     F->addFnAttr(Attribute::NoRedZone);
1120 
1121   // Add the basic block and the necessary calls.
1122   IRBuilder<> IRB(BasicBlock::Create(M->getContext(), "", F));
1123   IRB.CreateCall(RegisterF, {});
1124   IRB.CreateRetVoid();
1125 
1126   appendToGlobalCtors(*M, F, 0);
1127 }
1128