1 //===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the SampleProfileLoader transformation. This pass
10 // reads a profile file generated by a sampling profiler (e.g. Linux Perf -
11 // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the
12 // profile information in the given profile.
13 //
14 // This pass generates branch weight annotations on the IR:
15 //
16 // - prof: Represents branch weights. This annotation is added to branches
17 //      to indicate the weights of each edge coming out of the branch.
18 //      The weight of each edge is the weight of the target block for
19 //      that edge. The weight of a block B is computed as the maximum
20 //      number of samples found in B.
21 //
22 //===----------------------------------------------------------------------===//
23 
24 #include "llvm/Transforms/IPO/SampleProfile.h"
25 #include "llvm/ADT/ArrayRef.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/DenseSet.h"
28 #include "llvm/ADT/None.h"
29 #include "llvm/ADT/PriorityQueue.h"
30 #include "llvm/ADT/SCCIterator.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringMap.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/Analysis/AssumptionCache.h"
39 #include "llvm/Analysis/CallGraph.h"
40 #include "llvm/Analysis/CallGraphSCCPass.h"
41 #include "llvm/Analysis/InlineAdvisor.h"
42 #include "llvm/Analysis/InlineCost.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
45 #include "llvm/Analysis/PostDominators.h"
46 #include "llvm/Analysis/ProfileSummaryInfo.h"
47 #include "llvm/Analysis/ReplayInlineAdvisor.h"
48 #include "llvm/Analysis/TargetLibraryInfo.h"
49 #include "llvm/Analysis/TargetTransformInfo.h"
50 #include "llvm/IR/BasicBlock.h"
51 #include "llvm/IR/CFG.h"
52 #include "llvm/IR/DebugInfoMetadata.h"
53 #include "llvm/IR/DebugLoc.h"
54 #include "llvm/IR/DiagnosticInfo.h"
55 #include "llvm/IR/Dominators.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/GlobalValue.h"
58 #include "llvm/IR/InstrTypes.h"
59 #include "llvm/IR/Instruction.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/LLVMContext.h"
63 #include "llvm/IR/MDBuilder.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/PassManager.h"
66 #include "llvm/IR/ValueSymbolTable.h"
67 #include "llvm/InitializePasses.h"
68 #include "llvm/Pass.h"
69 #include "llvm/ProfileData/InstrProf.h"
70 #include "llvm/ProfileData/SampleProf.h"
71 #include "llvm/ProfileData/SampleProfReader.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/CommandLine.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/ErrorOr.h"
77 #include "llvm/Support/GenericDomTree.h"
78 #include "llvm/Support/raw_ostream.h"
79 #include "llvm/Transforms/IPO.h"
80 #include "llvm/Transforms/IPO/SampleContextTracker.h"
81 #include "llvm/Transforms/IPO/SampleProfileProbe.h"
82 #include "llvm/Transforms/Instrumentation.h"
83 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
84 #include "llvm/Transforms/Utils/Cloning.h"
85 #include <algorithm>
86 #include <cassert>
87 #include <cstdint>
88 #include <functional>
89 #include <limits>
90 #include <map>
91 #include <memory>
92 #include <queue>
93 #include <string>
94 #include <system_error>
95 #include <utility>
96 #include <vector>
97 
98 using namespace llvm;
99 using namespace sampleprof;
100 using ProfileCount = Function::ProfileCount;
101 #define DEBUG_TYPE "sample-profile"
102 #define CSINLINE_DEBUG DEBUG_TYPE "-inline"
103 
104 STATISTIC(NumCSInlined,
105           "Number of functions inlined with context sensitive profile");
106 STATISTIC(NumCSNotInlined,
107           "Number of functions not inlined with context sensitive profile");
108 STATISTIC(NumMismatchedProfile,
109           "Number of functions with CFG mismatched profile");
110 STATISTIC(NumMatchedProfile, "Number of functions with CFG matched profile");
111 
112 STATISTIC(NumCSInlinedHitMinLimit,
113           "Number of functions with FDO inline stopped due to min size limit");
114 STATISTIC(NumCSInlinedHitMaxLimit,
115           "Number of functions with FDO inline stopped due to max size limit");
116 STATISTIC(
117     NumCSInlinedHitGrowthLimit,
118     "Number of functions with FDO inline stopped due to growth size limit");
119 
120 // Command line option to specify the file to read samples from. This is
121 // mainly used for debugging.
122 static cl::opt<std::string> SampleProfileFile(
123     "sample-profile-file", cl::init(""), cl::value_desc("filename"),
124     cl::desc("Profile file loaded by -sample-profile"), cl::Hidden);
125 
126 // The named file contains a set of transformations that may have been applied
127 // to the symbol names between the program from which the sample data was
128 // collected and the current program's symbols.
129 static cl::opt<std::string> SampleProfileRemappingFile(
130     "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"),
131     cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden);
132 
133 static cl::opt<unsigned> SampleProfileMaxPropagateIterations(
134     "sample-profile-max-propagate-iterations", cl::init(100),
135     cl::desc("Maximum number of iterations to go through when propagating "
136              "sample block/edge weights through the CFG."));
137 
138 static cl::opt<unsigned> SampleProfileRecordCoverage(
139     "sample-profile-check-record-coverage", cl::init(0), cl::value_desc("N"),
140     cl::desc("Emit a warning if less than N% of records in the input profile "
141              "are matched to the IR."));
142 
143 static cl::opt<unsigned> SampleProfileSampleCoverage(
144     "sample-profile-check-sample-coverage", cl::init(0), cl::value_desc("N"),
145     cl::desc("Emit a warning if less than N% of samples in the input profile "
146              "are matched to the IR."));
147 
148 static cl::opt<bool> NoWarnSampleUnused(
149     "no-warn-sample-unused", cl::init(false), cl::Hidden,
150     cl::desc("Use this option to turn off/on warnings about function with "
151              "samples but without debug information to use those samples. "));
152 
153 static cl::opt<bool> ProfileSampleAccurate(
154     "profile-sample-accurate", cl::Hidden, cl::init(false),
155     cl::desc("If the sample profile is accurate, we will mark all un-sampled "
156              "callsite and function as having 0 samples. Otherwise, treat "
157              "un-sampled callsites and functions conservatively as unknown. "));
158 
159 static cl::opt<bool> ProfileAccurateForSymsInList(
160     "profile-accurate-for-symsinlist", cl::Hidden, cl::ZeroOrMore,
161     cl::init(true),
162     cl::desc("For symbols in profile symbol list, regard their profiles to "
163              "be accurate. It may be overriden by profile-sample-accurate. "));
164 
165 static cl::opt<bool> ProfileMergeInlinee(
166     "sample-profile-merge-inlinee", cl::Hidden, cl::init(true),
167     cl::desc("Merge past inlinee's profile to outline version if sample "
168              "profile loader decided not to inline a call site. It will "
169              "only be enabled when top-down order of profile loading is "
170              "enabled. "));
171 
172 static cl::opt<bool> ProfileTopDownLoad(
173     "sample-profile-top-down-load", cl::Hidden, cl::init(true),
174     cl::desc("Do profile annotation and inlining for functions in top-down "
175              "order of call graph during sample profile loading. It only "
176              "works for new pass manager. "));
177 
178 static cl::opt<bool> ProfileSizeInline(
179     "sample-profile-inline-size", cl::Hidden, cl::init(false),
180     cl::desc("Inline cold call sites in profile loader if it's beneficial "
181              "for code size."));
182 
183 static cl::opt<int> ProfileInlineGrowthLimit(
184     "sample-profile-inline-growth-limit", cl::Hidden, cl::init(12),
185     cl::desc("The size growth ratio limit for proirity-based sample profile "
186              "loader inlining."));
187 
188 static cl::opt<int> ProfileInlineLimitMin(
189     "sample-profile-inline-limit-min", cl::Hidden, cl::init(100),
190     cl::desc("The lower bound of size growth limit for "
191              "proirity-based sample profile loader inlining."));
192 
193 static cl::opt<int> ProfileInlineLimitMax(
194     "sample-profile-inline-limit-max", cl::Hidden, cl::init(10000),
195     cl::desc("The upper bound of size growth limit for "
196              "proirity-based sample profile loader inlining."));
197 
198 static cl::opt<int> ProfileICPThreshold(
199     "sample-profile-icp-threshold", cl::Hidden, cl::init(5),
200     cl::desc(
201         "Relative hotness threshold for indirect "
202         "call promotion in proirity-based sample profile loader inlining."));
203 
204 static cl::opt<int> SampleHotCallSiteThreshold(
205     "sample-profile-hot-inline-threshold", cl::Hidden, cl::init(3000),
206     cl::desc("Hot callsite threshold for proirity-based sample profile loader "
207              "inlining."));
208 
209 static cl::opt<bool> CallsitePrioritizedInline(
210     "sample-profile-prioritized-inline", cl::Hidden, cl::ZeroOrMore,
211     cl::init(false),
212     cl::desc("Use call site prioritized inlining for sample profile loader."
213              "Currently only CSSPGO is supported."));
214 
215 static cl::opt<int> SampleColdCallSiteThreshold(
216     "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45),
217     cl::desc("Threshold for inlining cold callsites"));
218 
219 static cl::opt<std::string> ProfileInlineReplayFile(
220     "sample-profile-inline-replay", cl::init(""), cl::value_desc("filename"),
221     cl::desc(
222         "Optimization remarks file containing inline remarks to be replayed "
223         "by inlining from sample profile loader."),
224     cl::Hidden);
225 
226 namespace {
227 
228 using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>;
229 using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>;
230 using Edge = std::pair<const BasicBlock *, const BasicBlock *>;
231 using EdgeWeightMap = DenseMap<Edge, uint64_t>;
232 using BlockEdgeMap =
233     DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>;
234 
235 class SampleProfileLoader;
236 
237 class SampleCoverageTracker {
238 public:
239   SampleCoverageTracker(SampleProfileLoader &SPL) : SPLoader(SPL){};
240 
241   bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset,
242                        uint32_t Discriminator, uint64_t Samples);
243   unsigned computeCoverage(unsigned Used, unsigned Total) const;
244   unsigned countUsedRecords(const FunctionSamples *FS,
245                             ProfileSummaryInfo *PSI) const;
246   unsigned countBodyRecords(const FunctionSamples *FS,
247                             ProfileSummaryInfo *PSI) const;
248   uint64_t getTotalUsedSamples() const { return TotalUsedSamples; }
249   uint64_t countBodySamples(const FunctionSamples *FS,
250                             ProfileSummaryInfo *PSI) const;
251 
252   void clear() {
253     SampleCoverage.clear();
254     TotalUsedSamples = 0;
255   }
256 
257 private:
258   using BodySampleCoverageMap = std::map<LineLocation, unsigned>;
259   using FunctionSamplesCoverageMap =
260       DenseMap<const FunctionSamples *, BodySampleCoverageMap>;
261 
262   /// Coverage map for sampling records.
263   ///
264   /// This map keeps a record of sampling records that have been matched to
265   /// an IR instruction. This is used to detect some form of staleness in
266   /// profiles (see flag -sample-profile-check-coverage).
267   ///
268   /// Each entry in the map corresponds to a FunctionSamples instance.  This is
269   /// another map that counts how many times the sample record at the
270   /// given location has been used.
271   FunctionSamplesCoverageMap SampleCoverage;
272 
273   /// Number of samples used from the profile.
274   ///
275   /// When a sampling record is used for the first time, the samples from
276   /// that record are added to this accumulator.  Coverage is later computed
277   /// based on the total number of samples available in this function and
278   /// its callsites.
279   ///
280   /// Note that this accumulator tracks samples used from a single function
281   /// and all the inlined callsites. Strictly, we should have a map of counters
282   /// keyed by FunctionSamples pointers, but these stats are cleared after
283   /// every function, so we just need to keep a single counter.
284   uint64_t TotalUsedSamples = 0;
285 
286   SampleProfileLoader &SPLoader;
287 };
288 
289 class GUIDToFuncNameMapper {
290 public:
291   GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader,
292                         DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap)
293       : CurrentReader(Reader), CurrentModule(M),
294       CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) {
295     if (!CurrentReader.useMD5())
296       return;
297 
298     for (const auto &F : CurrentModule) {
299       StringRef OrigName = F.getName();
300       CurrentGUIDToFuncNameMap.insert(
301           {Function::getGUID(OrigName), OrigName});
302 
303       // Local to global var promotion used by optimization like thinlto
304       // will rename the var and add suffix like ".llvm.xxx" to the
305       // original local name. In sample profile, the suffixes of function
306       // names are all stripped. Since it is possible that the mapper is
307       // built in post-thin-link phase and var promotion has been done,
308       // we need to add the substring of function name without the suffix
309       // into the GUIDToFuncNameMap.
310       StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
311       if (CanonName != OrigName)
312         CurrentGUIDToFuncNameMap.insert(
313             {Function::getGUID(CanonName), CanonName});
314     }
315 
316     // Update GUIDToFuncNameMap for each function including inlinees.
317     SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap);
318   }
319 
320   ~GUIDToFuncNameMapper() {
321     if (!CurrentReader.useMD5())
322       return;
323 
324     CurrentGUIDToFuncNameMap.clear();
325 
326     // Reset GUIDToFuncNameMap for of each function as they're no
327     // longer valid at this point.
328     SetGUIDToFuncNameMapForAll(nullptr);
329   }
330 
331 private:
332   void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) {
333     std::queue<FunctionSamples *> FSToUpdate;
334     for (auto &IFS : CurrentReader.getProfiles()) {
335       FSToUpdate.push(&IFS.second);
336     }
337 
338     while (!FSToUpdate.empty()) {
339       FunctionSamples *FS = FSToUpdate.front();
340       FSToUpdate.pop();
341       FS->GUIDToFuncNameMap = Map;
342       for (const auto &ICS : FS->getCallsiteSamples()) {
343         const FunctionSamplesMap &FSMap = ICS.second;
344         for (auto &IFS : FSMap) {
345           FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second);
346           FSToUpdate.push(&FS);
347         }
348       }
349     }
350   }
351 
352   SampleProfileReader &CurrentReader;
353   Module &CurrentModule;
354   DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap;
355 };
356 
357 // Inline candidate used by iterative callsite prioritized inliner
358 struct InlineCandidate {
359   CallBase *CallInstr;
360   const FunctionSamples *CalleeSamples;
361   uint64_t CallsiteCount;
362 };
363 
364 // Inline candidate comparer using call site weight
365 struct CandidateComparer {
366   bool operator()(const InlineCandidate &LHS, const InlineCandidate &RHS) {
367     if (LHS.CallsiteCount != RHS.CallsiteCount)
368       return LHS.CallsiteCount < RHS.CallsiteCount;
369 
370     // Tie breaker using GUID so we have stable/deterministic inlining order
371     assert(LHS.CalleeSamples && RHS.CalleeSamples &&
372            "Expect non-null FunctionSamples");
373     return LHS.CalleeSamples->getGUID(LHS.CalleeSamples->getName()) <
374            RHS.CalleeSamples->getGUID(RHS.CalleeSamples->getName());
375   }
376 };
377 
378 using CandidateQueue =
379     PriorityQueue<InlineCandidate, std::vector<InlineCandidate>,
380                   CandidateComparer>;
381 
382 /// Sample profile pass.
383 ///
384 /// This pass reads profile data from the file specified by
385 /// -sample-profile-file and annotates every affected function with the
386 /// profile information found in that file.
387 class SampleProfileLoader {
388 public:
389   SampleProfileLoader(
390       StringRef Name, StringRef RemapName, ThinOrFullLTOPhase LTOPhase,
391       std::function<AssumptionCache &(Function &)> GetAssumptionCache,
392       std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo,
393       std::function<const TargetLibraryInfo &(Function &)> GetTLI)
394       : GetAC(std::move(GetAssumptionCache)),
395         GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)),
396         CoverageTracker(*this), Filename(std::string(Name)),
397         RemappingFilename(std::string(RemapName)), LTOPhase(LTOPhase) {}
398 
399   bool doInitialization(Module &M, FunctionAnalysisManager *FAM = nullptr);
400   bool runOnModule(Module &M, ModuleAnalysisManager *AM,
401                    ProfileSummaryInfo *_PSI, CallGraph *CG);
402 
403   void dump() { Reader->dump(); }
404 
405 protected:
406   friend class SampleCoverageTracker;
407 
408   bool runOnFunction(Function &F, ModuleAnalysisManager *AM);
409   unsigned getFunctionLoc(Function &F);
410   bool emitAnnotations(Function &F);
411   ErrorOr<uint64_t> getInstWeight(const Instruction &I);
412   ErrorOr<uint64_t> getProbeWeight(const Instruction &I);
413   ErrorOr<uint64_t> getBlockWeight(const BasicBlock *BB);
414   const FunctionSamples *findCalleeFunctionSamples(const CallBase &I) const;
415   std::vector<const FunctionSamples *>
416   findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const;
417   mutable DenseMap<const DILocation *, const FunctionSamples *> DILocation2SampleMap;
418   const FunctionSamples *findFunctionSamples(const Instruction &I) const;
419   CallBase *tryPromoteIndirectCall(Function &F, StringRef CalleeName,
420                                    uint64_t &Sum, uint64_t Count, CallBase *I,
421                                    const char *&Reason);
422   bool inlineCallInstruction(CallBase &CB,
423                              const FunctionSamples *CalleeSamples);
424   bool inlineHotFunctions(Function &F,
425                           DenseSet<GlobalValue::GUID> &InlinedGUIDs);
426   // Helper functions call-site prioritized BFS inliner
427   // Will change the main FDO inliner to be work list based directly in
428   // upstream, then merge this change with that and remove the duplication.
429   InlineCost shouldInlineCandidate(InlineCandidate &Candidate);
430   bool getInlineCandidate(InlineCandidate *NewCandidate, CallBase *CB);
431   bool tryInlineCandidate(InlineCandidate &Candidate,
432                           SmallVector<CallBase *, 8> &InlinedCallSites);
433   bool
434   inlineHotFunctionsWithPriority(Function &F,
435                                  DenseSet<GlobalValue::GUID> &InlinedGUIDs);
436   // Inline cold/small functions in addition to hot ones
437   bool shouldInlineColdCallee(CallBase &CallInst);
438   void emitOptimizationRemarksForInlineCandidates(
439       const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
440       bool Hot);
441   void printEdgeWeight(raw_ostream &OS, Edge E);
442   void printBlockWeight(raw_ostream &OS, const BasicBlock *BB) const;
443   void printBlockEquivalence(raw_ostream &OS, const BasicBlock *BB);
444   bool computeBlockWeights(Function &F);
445   void findEquivalenceClasses(Function &F);
446   template <bool IsPostDom>
447   void findEquivalencesFor(BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants,
448                            DominatorTreeBase<BasicBlock, IsPostDom> *DomTree);
449 
450   void propagateWeights(Function &F);
451   uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge);
452   void buildEdges(Function &F);
453   std::vector<Function *> buildFunctionOrder(Module &M, CallGraph *CG);
454   bool propagateThroughEdges(Function &F, bool UpdateBlockCount);
455   void computeDominanceAndLoopInfo(Function &F);
456   void clearFunctionData();
457   bool callsiteIsHot(const FunctionSamples *CallsiteFS,
458                      ProfileSummaryInfo *PSI);
459 
460   /// Map basic blocks to their computed weights.
461   ///
462   /// The weight of a basic block is defined to be the maximum
463   /// of all the instruction weights in that block.
464   BlockWeightMap BlockWeights;
465 
466   /// Map edges to their computed weights.
467   ///
468   /// Edge weights are computed by propagating basic block weights in
469   /// SampleProfile::propagateWeights.
470   EdgeWeightMap EdgeWeights;
471 
472   /// Set of visited blocks during propagation.
473   SmallPtrSet<const BasicBlock *, 32> VisitedBlocks;
474 
475   /// Set of visited edges during propagation.
476   SmallSet<Edge, 32> VisitedEdges;
477 
478   /// Equivalence classes for block weights.
479   ///
480   /// Two blocks BB1 and BB2 are in the same equivalence class if they
481   /// dominate and post-dominate each other, and they are in the same loop
482   /// nest. When this happens, the two blocks are guaranteed to execute
483   /// the same number of times.
484   EquivalenceClassMap EquivalenceClass;
485 
486   /// Map from function name to Function *. Used to find the function from
487   /// the function name. If the function name contains suffix, additional
488   /// entry is added to map from the stripped name to the function if there
489   /// is one-to-one mapping.
490   StringMap<Function *> SymbolMap;
491 
492   /// Dominance, post-dominance and loop information.
493   std::unique_ptr<DominatorTree> DT;
494   std::unique_ptr<PostDominatorTree> PDT;
495   std::unique_ptr<LoopInfo> LI;
496 
497   std::function<AssumptionCache &(Function &)> GetAC;
498   std::function<TargetTransformInfo &(Function &)> GetTTI;
499   std::function<const TargetLibraryInfo &(Function &)> GetTLI;
500 
501   /// Predecessors for each basic block in the CFG.
502   BlockEdgeMap Predecessors;
503 
504   /// Successors for each basic block in the CFG.
505   BlockEdgeMap Successors;
506 
507   SampleCoverageTracker CoverageTracker;
508 
509   /// Profile reader object.
510   std::unique_ptr<SampleProfileReader> Reader;
511 
512   /// Profile tracker for different context.
513   std::unique_ptr<SampleContextTracker> ContextTracker;
514 
515   /// Samples collected for the body of this function.
516   FunctionSamples *Samples = nullptr;
517 
518   /// Name of the profile file to load.
519   std::string Filename;
520 
521   /// Name of the profile remapping file to load.
522   std::string RemappingFilename;
523 
524   /// Flag indicating whether the profile input loaded successfully.
525   bool ProfileIsValid = false;
526 
527   /// Flag indicating whether input profile is context-sensitive
528   bool ProfileIsCS = false;
529 
530   /// Flag indicating which LTO/ThinLTO phase the pass is invoked in.
531   ///
532   /// We need to know the LTO phase because for example in ThinLTOPrelink
533   /// phase, in annotation, we should not promote indirect calls. Instead,
534   /// we will mark GUIDs that needs to be annotated to the function.
535   ThinOrFullLTOPhase LTOPhase;
536 
537   /// Profile Summary Info computed from sample profile.
538   ProfileSummaryInfo *PSI = nullptr;
539 
540   /// Profle Symbol list tells whether a function name appears in the binary
541   /// used to generate the current profile.
542   std::unique_ptr<ProfileSymbolList> PSL;
543 
544   /// Total number of samples collected in this profile.
545   ///
546   /// This is the sum of all the samples collected in all the functions executed
547   /// at runtime.
548   uint64_t TotalCollectedSamples = 0;
549 
550   /// Optimization Remark Emitter used to emit diagnostic remarks.
551   OptimizationRemarkEmitter *ORE = nullptr;
552 
553   // Information recorded when we declined to inline a call site
554   // because we have determined it is too cold is accumulated for
555   // each callee function. Initially this is just the entry count.
556   struct NotInlinedProfileInfo {
557     uint64_t entryCount;
558   };
559   DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo;
560 
561   // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for
562   // all the function symbols defined or declared in current module.
563   DenseMap<uint64_t, StringRef> GUIDToFuncNameMap;
564 
565   // All the Names used in FunctionSamples including outline function
566   // names, inline instance names and call target names.
567   StringSet<> NamesInProfile;
568 
569   // For symbol in profile symbol list, whether to regard their profiles
570   // to be accurate. It is mainly decided by existance of profile symbol
571   // list and -profile-accurate-for-symsinlist flag, but it can be
572   // overriden by -profile-sample-accurate or profile-sample-accurate
573   // attribute.
574   bool ProfAccForSymsInList;
575 
576   // External inline advisor used to replay inline decision from remarks.
577   std::unique_ptr<ReplayInlineAdvisor> ExternalInlineAdvisor;
578 
579   // A pseudo probe helper to correlate the imported sample counts.
580   std::unique_ptr<PseudoProbeManager> ProbeManager;
581 };
582 
583 class SampleProfileLoaderLegacyPass : public ModulePass {
584 public:
585   // Class identification, replacement for typeinfo
586   static char ID;
587 
588   SampleProfileLoaderLegacyPass(
589       StringRef Name = SampleProfileFile,
590       ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None)
591       : ModulePass(ID), SampleLoader(
592                             Name, SampleProfileRemappingFile, LTOPhase,
593                             [&](Function &F) -> AssumptionCache & {
594                               return ACT->getAssumptionCache(F);
595                             },
596                             [&](Function &F) -> TargetTransformInfo & {
597                               return TTIWP->getTTI(F);
598                             },
599                             [&](Function &F) -> TargetLibraryInfo & {
600                               return TLIWP->getTLI(F);
601                             }) {
602     initializeSampleProfileLoaderLegacyPassPass(
603         *PassRegistry::getPassRegistry());
604   }
605 
606   void dump() { SampleLoader.dump(); }
607 
608   bool doInitialization(Module &M) override {
609     return SampleLoader.doInitialization(M);
610   }
611 
612   StringRef getPassName() const override { return "Sample profile pass"; }
613   bool runOnModule(Module &M) override;
614 
615   void getAnalysisUsage(AnalysisUsage &AU) const override {
616     AU.addRequired<AssumptionCacheTracker>();
617     AU.addRequired<TargetTransformInfoWrapperPass>();
618     AU.addRequired<TargetLibraryInfoWrapperPass>();
619     AU.addRequired<ProfileSummaryInfoWrapperPass>();
620   }
621 
622 private:
623   SampleProfileLoader SampleLoader;
624   AssumptionCacheTracker *ACT = nullptr;
625   TargetTransformInfoWrapperPass *TTIWP = nullptr;
626   TargetLibraryInfoWrapperPass *TLIWP = nullptr;
627 };
628 
629 } // end anonymous namespace
630 
631 /// Return true if the given callsite is hot wrt to hot cutoff threshold.
632 ///
633 /// Functions that were inlined in the original binary will be represented
634 /// in the inline stack in the sample profile. If the profile shows that
635 /// the original inline decision was "good" (i.e., the callsite is executed
636 /// frequently), then we will recreate the inline decision and apply the
637 /// profile from the inlined callsite.
638 ///
639 /// To decide whether an inlined callsite is hot, we compare the callsite
640 /// sample count with the hot cutoff computed by ProfileSummaryInfo, it is
641 /// regarded as hot if the count is above the cutoff value.
642 ///
643 /// When ProfileAccurateForSymsInList is enabled and profile symbol list
644 /// is present, functions in the profile symbol list but without profile will
645 /// be regarded as cold and much less inlining will happen in CGSCC inlining
646 /// pass, so we tend to lower the hot criteria here to allow more early
647 /// inlining to happen for warm callsites and it is helpful for performance.
648 bool SampleProfileLoader::callsiteIsHot(const FunctionSamples *CallsiteFS,
649                                         ProfileSummaryInfo *PSI) {
650   if (!CallsiteFS)
651     return false; // The callsite was not inlined in the original binary.
652 
653   assert(PSI && "PSI is expected to be non null");
654   uint64_t CallsiteTotalSamples = CallsiteFS->getTotalSamples();
655   if (ProfAccForSymsInList)
656     return !PSI->isColdCount(CallsiteTotalSamples);
657   else
658     return PSI->isHotCount(CallsiteTotalSamples);
659 }
660 
661 /// Mark as used the sample record for the given function samples at
662 /// (LineOffset, Discriminator).
663 ///
664 /// \returns true if this is the first time we mark the given record.
665 bool SampleCoverageTracker::markSamplesUsed(const FunctionSamples *FS,
666                                             uint32_t LineOffset,
667                                             uint32_t Discriminator,
668                                             uint64_t Samples) {
669   LineLocation Loc(LineOffset, Discriminator);
670   unsigned &Count = SampleCoverage[FS][Loc];
671   bool FirstTime = (++Count == 1);
672   if (FirstTime)
673     TotalUsedSamples += Samples;
674   return FirstTime;
675 }
676 
677 /// Return the number of sample records that were applied from this profile.
678 ///
679 /// This count does not include records from cold inlined callsites.
680 unsigned
681 SampleCoverageTracker::countUsedRecords(const FunctionSamples *FS,
682                                         ProfileSummaryInfo *PSI) const {
683   auto I = SampleCoverage.find(FS);
684 
685   // The size of the coverage map for FS represents the number of records
686   // that were marked used at least once.
687   unsigned Count = (I != SampleCoverage.end()) ? I->second.size() : 0;
688 
689   // If there are inlined callsites in this function, count the samples found
690   // in the respective bodies. However, do not bother counting callees with 0
691   // total samples, these are callees that were never invoked at runtime.
692   for (const auto &I : FS->getCallsiteSamples())
693     for (const auto &J : I.second) {
694       const FunctionSamples *CalleeSamples = &J.second;
695       if (SPLoader.callsiteIsHot(CalleeSamples, PSI))
696         Count += countUsedRecords(CalleeSamples, PSI);
697     }
698 
699   return Count;
700 }
701 
702 /// Return the number of sample records in the body of this profile.
703 ///
704 /// This count does not include records from cold inlined callsites.
705 unsigned
706 SampleCoverageTracker::countBodyRecords(const FunctionSamples *FS,
707                                         ProfileSummaryInfo *PSI) const {
708   unsigned Count = FS->getBodySamples().size();
709 
710   // Only count records in hot callsites.
711   for (const auto &I : FS->getCallsiteSamples())
712     for (const auto &J : I.second) {
713       const FunctionSamples *CalleeSamples = &J.second;
714       if (SPLoader.callsiteIsHot(CalleeSamples, PSI))
715         Count += countBodyRecords(CalleeSamples, PSI);
716     }
717 
718   return Count;
719 }
720 
721 /// Return the number of samples collected in the body of this profile.
722 ///
723 /// This count does not include samples from cold inlined callsites.
724 uint64_t
725 SampleCoverageTracker::countBodySamples(const FunctionSamples *FS,
726                                         ProfileSummaryInfo *PSI) const {
727   uint64_t Total = 0;
728   for (const auto &I : FS->getBodySamples())
729     Total += I.second.getSamples();
730 
731   // Only count samples in hot callsites.
732   for (const auto &I : FS->getCallsiteSamples())
733     for (const auto &J : I.second) {
734       const FunctionSamples *CalleeSamples = &J.second;
735       if (SPLoader.callsiteIsHot(CalleeSamples, PSI))
736         Total += countBodySamples(CalleeSamples, PSI);
737     }
738 
739   return Total;
740 }
741 
742 /// Return the fraction of sample records used in this profile.
743 ///
744 /// The returned value is an unsigned integer in the range 0-100 indicating
745 /// the percentage of sample records that were used while applying this
746 /// profile to the associated function.
747 unsigned SampleCoverageTracker::computeCoverage(unsigned Used,
748                                                 unsigned Total) const {
749   assert(Used <= Total &&
750          "number of used records cannot exceed the total number of records");
751   return Total > 0 ? Used * 100 / Total : 100;
752 }
753 
754 /// Clear all the per-function data used to load samples and propagate weights.
755 void SampleProfileLoader::clearFunctionData() {
756   BlockWeights.clear();
757   EdgeWeights.clear();
758   VisitedBlocks.clear();
759   VisitedEdges.clear();
760   EquivalenceClass.clear();
761   DT = nullptr;
762   PDT = nullptr;
763   LI = nullptr;
764   Predecessors.clear();
765   Successors.clear();
766   CoverageTracker.clear();
767 }
768 
769 #ifndef NDEBUG
770 /// Print the weight of edge \p E on stream \p OS.
771 ///
772 /// \param OS  Stream to emit the output to.
773 /// \param E  Edge to print.
774 void SampleProfileLoader::printEdgeWeight(raw_ostream &OS, Edge E) {
775   OS << "weight[" << E.first->getName() << "->" << E.second->getName()
776      << "]: " << EdgeWeights[E] << "\n";
777 }
778 
779 /// Print the equivalence class of block \p BB on stream \p OS.
780 ///
781 /// \param OS  Stream to emit the output to.
782 /// \param BB  Block to print.
783 void SampleProfileLoader::printBlockEquivalence(raw_ostream &OS,
784                                                 const BasicBlock *BB) {
785   const BasicBlock *Equiv = EquivalenceClass[BB];
786   OS << "equivalence[" << BB->getName()
787      << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n";
788 }
789 
790 /// Print the weight of block \p BB on stream \p OS.
791 ///
792 /// \param OS  Stream to emit the output to.
793 /// \param BB  Block to print.
794 void SampleProfileLoader::printBlockWeight(raw_ostream &OS,
795                                            const BasicBlock *BB) const {
796   const auto &I = BlockWeights.find(BB);
797   uint64_t W = (I == BlockWeights.end() ? 0 : I->second);
798   OS << "weight[" << BB->getName() << "]: " << W << "\n";
799 }
800 #endif
801 
802 /// Get the weight for an instruction.
803 ///
804 /// The "weight" of an instruction \p Inst is the number of samples
805 /// collected on that instruction at runtime. To retrieve it, we
806 /// need to compute the line number of \p Inst relative to the start of its
807 /// function. We use HeaderLineno to compute the offset. We then
808 /// look up the samples collected for \p Inst using BodySamples.
809 ///
810 /// \param Inst Instruction to query.
811 ///
812 /// \returns the weight of \p Inst.
813 ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) {
814   if (FunctionSamples::ProfileIsProbeBased)
815     return getProbeWeight(Inst);
816 
817   const DebugLoc &DLoc = Inst.getDebugLoc();
818   if (!DLoc)
819     return std::error_code();
820 
821   const FunctionSamples *FS = findFunctionSamples(Inst);
822   if (!FS)
823     return std::error_code();
824 
825   // Ignore all intrinsics, phinodes and branch instructions.
826   // Branch and phinodes instruction usually contains debug info from sources outside of
827   // the residing basic block, thus we ignore them during annotation.
828   if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst))
829     return std::error_code();
830 
831   // If a direct call/invoke instruction is inlined in profile
832   // (findCalleeFunctionSamples returns non-empty result), but not inlined here,
833   // it means that the inlined callsite has no sample, thus the call
834   // instruction should have 0 count.
835   if (!ProfileIsCS)
836     if (const auto *CB = dyn_cast<CallBase>(&Inst))
837       if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB))
838         return 0;
839 
840   const DILocation *DIL = DLoc;
841   uint32_t LineOffset = FunctionSamples::getOffset(DIL);
842   uint32_t Discriminator = DIL->getBaseDiscriminator();
843   ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator);
844   if (R) {
845     bool FirstMark =
846         CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get());
847     if (FirstMark) {
848       ORE->emit([&]() {
849         OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
850         Remark << "Applied " << ore::NV("NumSamples", *R);
851         Remark << " samples from profile (offset: ";
852         Remark << ore::NV("LineOffset", LineOffset);
853         if (Discriminator) {
854           Remark << ".";
855           Remark << ore::NV("Discriminator", Discriminator);
856         }
857         Remark << ")";
858         return Remark;
859       });
860     }
861     LLVM_DEBUG(dbgs() << "    " << DLoc.getLine() << "."
862                       << DIL->getBaseDiscriminator() << ":" << Inst
863                       << " (line offset: " << LineOffset << "."
864                       << DIL->getBaseDiscriminator() << " - weight: " << R.get()
865                       << ")\n");
866   }
867   return R;
868 }
869 
870 ErrorOr<uint64_t> SampleProfileLoader::getProbeWeight(const Instruction &Inst) {
871   assert(FunctionSamples::ProfileIsProbeBased &&
872          "Profile is not pseudo probe based");
873   Optional<PseudoProbe> Probe = extractProbe(Inst);
874   if (!Probe)
875     return std::error_code();
876 
877   const FunctionSamples *FS = findFunctionSamples(Inst);
878   if (!FS)
879     return std::error_code();
880 
881   // If a direct call/invoke instruction is inlined in profile
882   // (findCalleeFunctionSamples returns non-empty result), but not inlined here,
883   // it means that the inlined callsite has no sample, thus the call
884   // instruction should have 0 count.
885   if (const auto *CB = dyn_cast<CallBase>(&Inst))
886     if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB))
887       return 0;
888 
889   const ErrorOr<uint64_t> &R = FS->findSamplesAt(Probe->Id, 0);
890   if (R) {
891     uint64_t Samples = R.get();
892     bool FirstMark = CoverageTracker.markSamplesUsed(FS, Probe->Id, 0, Samples);
893     if (FirstMark) {
894       ORE->emit([&]() {
895         OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
896         Remark << "Applied " << ore::NV("NumSamples", Samples);
897         Remark << " samples from profile (ProbeId=";
898         Remark << ore::NV("ProbeId", Probe->Id);
899         Remark << ")";
900         return Remark;
901       });
902     }
903 
904     LLVM_DEBUG(dbgs() << "    " << Probe->Id << ":" << Inst
905                       << " - weight: " << R.get() << ")\n");
906     return Samples;
907   }
908   return R;
909 }
910 
911 /// Compute the weight of a basic block.
912 ///
913 /// The weight of basic block \p BB is the maximum weight of all the
914 /// instructions in BB.
915 ///
916 /// \param BB The basic block to query.
917 ///
918 /// \returns the weight for \p BB.
919 ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) {
920   uint64_t Max = 0;
921   bool HasWeight = false;
922   for (auto &I : BB->getInstList()) {
923     const ErrorOr<uint64_t> &R = getInstWeight(I);
924     if (R) {
925       Max = std::max(Max, R.get());
926       HasWeight = true;
927     }
928   }
929   return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code();
930 }
931 
932 /// Compute and store the weights of every basic block.
933 ///
934 /// This populates the BlockWeights map by computing
935 /// the weights of every basic block in the CFG.
936 ///
937 /// \param F The function to query.
938 bool SampleProfileLoader::computeBlockWeights(Function &F) {
939   bool Changed = false;
940   LLVM_DEBUG(dbgs() << "Block weights\n");
941   for (const auto &BB : F) {
942     ErrorOr<uint64_t> Weight = getBlockWeight(&BB);
943     if (Weight) {
944       BlockWeights[&BB] = Weight.get();
945       VisitedBlocks.insert(&BB);
946       Changed = true;
947     }
948     LLVM_DEBUG(printBlockWeight(dbgs(), &BB));
949   }
950 
951   return Changed;
952 }
953 
954 /// Get the FunctionSamples for a call instruction.
955 ///
956 /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined
957 /// instance in which that call instruction is calling to. It contains
958 /// all samples that resides in the inlined instance. We first find the
959 /// inlined instance in which the call instruction is from, then we
960 /// traverse its children to find the callsite with the matching
961 /// location.
962 ///
963 /// \param Inst Call/Invoke instruction to query.
964 ///
965 /// \returns The FunctionSamples pointer to the inlined instance.
966 const FunctionSamples *
967 SampleProfileLoader::findCalleeFunctionSamples(const CallBase &Inst) const {
968   const DILocation *DIL = Inst.getDebugLoc();
969   if (!DIL) {
970     return nullptr;
971   }
972 
973   StringRef CalleeName;
974   if (Function *Callee = Inst.getCalledFunction())
975     CalleeName = FunctionSamples::getCanonicalFnName(*Callee);
976 
977   if (ProfileIsCS)
978     return ContextTracker->getCalleeContextSamplesFor(Inst, CalleeName);
979 
980   const FunctionSamples *FS = findFunctionSamples(Inst);
981   if (FS == nullptr)
982     return nullptr;
983 
984   return FS->findFunctionSamplesAt(FunctionSamples::getCallSiteIdentifier(DIL),
985                                    CalleeName, Reader->getRemapper());
986 }
987 
988 /// Returns a vector of FunctionSamples that are the indirect call targets
989 /// of \p Inst. The vector is sorted by the total number of samples. Stores
990 /// the total call count of the indirect call in \p Sum.
991 std::vector<const FunctionSamples *>
992 SampleProfileLoader::findIndirectCallFunctionSamples(
993     const Instruction &Inst, uint64_t &Sum) const {
994   const DILocation *DIL = Inst.getDebugLoc();
995   std::vector<const FunctionSamples *> R;
996 
997   if (!DIL) {
998     return R;
999   }
1000 
1001   auto FSCompare = [](const FunctionSamples *L, const FunctionSamples *R) {
1002     assert(L && R && "Expect non-null FunctionSamples");
1003     if (L->getEntrySamples() != R->getEntrySamples())
1004       return L->getEntrySamples() > R->getEntrySamples();
1005     return FunctionSamples::getGUID(L->getName()) <
1006            FunctionSamples::getGUID(R->getName());
1007   };
1008 
1009   if (ProfileIsCS) {
1010     auto CalleeSamples =
1011         ContextTracker->getIndirectCalleeContextSamplesFor(DIL);
1012     if (CalleeSamples.empty())
1013       return R;
1014 
1015     // For CSSPGO, we only use target context profile's entry count
1016     // as that already includes both inlined callee and non-inlined ones..
1017     Sum = 0;
1018     for (const auto *const FS : CalleeSamples) {
1019       Sum += FS->getEntrySamples();
1020       R.push_back(FS);
1021     }
1022     llvm::sort(R, FSCompare);
1023     return R;
1024   }
1025 
1026   const FunctionSamples *FS = findFunctionSamples(Inst);
1027   if (FS == nullptr)
1028     return R;
1029 
1030   auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL);
1031   auto T = FS->findCallTargetMapAt(CallSite);
1032   Sum = 0;
1033   if (T)
1034     for (const auto &T_C : T.get())
1035       Sum += T_C.second;
1036   if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(CallSite)) {
1037     if (M->empty())
1038       return R;
1039     for (const auto &NameFS : *M) {
1040       Sum += NameFS.second.getEntrySamples();
1041       R.push_back(&NameFS.second);
1042     }
1043     llvm::sort(R, FSCompare);
1044   }
1045   return R;
1046 }
1047 
1048 /// Get the FunctionSamples for an instruction.
1049 ///
1050 /// The FunctionSamples of an instruction \p Inst is the inlined instance
1051 /// in which that instruction is coming from. We traverse the inline stack
1052 /// of that instruction, and match it with the tree nodes in the profile.
1053 ///
1054 /// \param Inst Instruction to query.
1055 ///
1056 /// \returns the FunctionSamples pointer to the inlined instance.
1057 const FunctionSamples *
1058 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const {
1059   if (FunctionSamples::ProfileIsProbeBased) {
1060     Optional<PseudoProbe> Probe = extractProbe(Inst);
1061     if (!Probe)
1062       return nullptr;
1063   }
1064 
1065   const DILocation *DIL = Inst.getDebugLoc();
1066   if (!DIL)
1067     return Samples;
1068 
1069   auto it = DILocation2SampleMap.try_emplace(DIL,nullptr);
1070   if (it.second) {
1071     if (ProfileIsCS)
1072       it.first->second = ContextTracker->getContextSamplesFor(DIL);
1073     else
1074       it.first->second =
1075           Samples->findFunctionSamples(DIL, Reader->getRemapper());
1076   }
1077   return it.first->second;
1078 }
1079 
1080 CallBase *
1081 SampleProfileLoader::tryPromoteIndirectCall(Function &F, StringRef CalleeName,
1082                                             uint64_t &Sum, uint64_t Count,
1083                                             CallBase *I, const char *&Reason) {
1084   Reason = "Callee function not available";
1085   // R->getValue() != &F is to prevent promoting a recursive call.
1086   // If it is a recursive call, we do not inline it as it could bloat
1087   // the code exponentially. There is way to better handle this, e.g.
1088   // clone the caller first, and inline the cloned caller if it is
1089   // recursive. As llvm does not inline recursive calls, we will
1090   // simply ignore it instead of handling it explicitly.
1091   auto R = SymbolMap.find(CalleeName);
1092   if (R != SymbolMap.end() && R->getValue() &&
1093       !R->getValue()->isDeclaration() && R->getValue()->getSubprogram() &&
1094       R->getValue()->hasFnAttribute("use-sample-profile") &&
1095       R->getValue() != &F && isLegalToPromote(*I, R->getValue(), &Reason)) {
1096     auto *DI =
1097         &pgo::promoteIndirectCall(*I, R->getValue(), Count, Sum, false, ORE);
1098     Sum -= Count;
1099     return DI;
1100   }
1101   return nullptr;
1102 }
1103 
1104 bool SampleProfileLoader::inlineCallInstruction(
1105     CallBase &CB, const FunctionSamples *CalleeSamples) {
1106   if (ExternalInlineAdvisor) {
1107     auto Advice = ExternalInlineAdvisor->getAdvice(CB);
1108     if (!Advice->isInliningRecommended()) {
1109       Advice->recordUnattemptedInlining();
1110       return false;
1111     }
1112     // Dummy record, we don't use it for replay.
1113     Advice->recordInlining();
1114   }
1115 
1116   Function *CalledFunction = CB.getCalledFunction();
1117   assert(CalledFunction);
1118   DebugLoc DLoc = CB.getDebugLoc();
1119   BasicBlock *BB = CB.getParent();
1120   InlineParams Params = getInlineParams();
1121   Params.ComputeFullInlineCost = true;
1122   // Checks if there is anything in the reachable portion of the callee at
1123   // this callsite that makes this inlining potentially illegal. Need to
1124   // set ComputeFullInlineCost, otherwise getInlineCost may return early
1125   // when cost exceeds threshold without checking all IRs in the callee.
1126   // The acutal cost does not matter because we only checks isNever() to
1127   // see if it is legal to inline the callsite.
1128   InlineCost Cost =
1129       getInlineCost(CB, Params, GetTTI(*CalledFunction), GetAC, GetTLI);
1130   if (Cost.isNever()) {
1131     ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineFail", DLoc, BB)
1132               << "incompatible inlining");
1133     return false;
1134   }
1135   InlineFunctionInfo IFI(nullptr, GetAC);
1136   if (InlineFunction(CB, IFI).isSuccess()) {
1137     // The call to InlineFunction erases I, so we can't pass it here.
1138     emitInlinedInto(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(), Cost,
1139                     true, CSINLINE_DEBUG);
1140     if (ProfileIsCS)
1141       ContextTracker->markContextSamplesInlined(CalleeSamples);
1142     ++NumCSInlined;
1143     return true;
1144   }
1145   return false;
1146 }
1147 
1148 bool SampleProfileLoader::shouldInlineColdCallee(CallBase &CallInst) {
1149   if (!ProfileSizeInline)
1150     return false;
1151 
1152   Function *Callee = CallInst.getCalledFunction();
1153   if (Callee == nullptr)
1154     return false;
1155 
1156   InlineCost Cost = getInlineCost(CallInst, getInlineParams(), GetTTI(*Callee),
1157                                   GetAC, GetTLI);
1158 
1159   if (Cost.isNever())
1160     return false;
1161 
1162   if (Cost.isAlways())
1163     return true;
1164 
1165   return Cost.getCost() <= SampleColdCallSiteThreshold;
1166 }
1167 
1168 void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates(
1169     const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
1170     bool Hot) {
1171   for (auto I : Candidates) {
1172     Function *CalledFunction = I->getCalledFunction();
1173     if (CalledFunction) {
1174       ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineAttempt",
1175                                            I->getDebugLoc(), I->getParent())
1176                 << "previous inlining reattempted for "
1177                 << (Hot ? "hotness: '" : "size: '")
1178                 << ore::NV("Callee", CalledFunction) << "' into '"
1179                 << ore::NV("Caller", &F) << "'");
1180     }
1181   }
1182 }
1183 
1184 /// Iteratively inline hot callsites of a function.
1185 ///
1186 /// Iteratively traverse all callsites of the function \p F, and find if
1187 /// the corresponding inlined instance exists and is hot in profile. If
1188 /// it is hot enough, inline the callsites and adds new callsites of the
1189 /// callee into the caller. If the call is an indirect call, first promote
1190 /// it to direct call. Each indirect call is limited with a single target.
1191 ///
1192 /// \param F function to perform iterative inlining.
1193 /// \param InlinedGUIDs a set to be updated to include all GUIDs that are
1194 ///     inlined in the profiled binary.
1195 ///
1196 /// \returns True if there is any inline happened.
1197 bool SampleProfileLoader::inlineHotFunctions(
1198     Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
1199   DenseSet<Instruction *> PromotedInsns;
1200 
1201   // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
1202   // Profile symbol list is ignored when profile-sample-accurate is on.
1203   assert((!ProfAccForSymsInList ||
1204           (!ProfileSampleAccurate &&
1205            !F.hasFnAttribute("profile-sample-accurate"))) &&
1206          "ProfAccForSymsInList should be false when profile-sample-accurate "
1207          "is enabled");
1208 
1209   DenseMap<CallBase *, const FunctionSamples *> localNotInlinedCallSites;
1210   bool Changed = false;
1211   while (true) {
1212     bool LocalChanged = false;
1213     SmallVector<CallBase *, 10> CIS;
1214     for (auto &BB : F) {
1215       bool Hot = false;
1216       SmallVector<CallBase *, 10> AllCandidates;
1217       SmallVector<CallBase *, 10> ColdCandidates;
1218       for (auto &I : BB.getInstList()) {
1219         const FunctionSamples *FS = nullptr;
1220         if (auto *CB = dyn_cast<CallBase>(&I)) {
1221           if (!isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(*CB))) {
1222             assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) &&
1223                    "GUIDToFuncNameMap has to be populated");
1224             AllCandidates.push_back(CB);
1225             if (FS->getEntrySamples() > 0 || ProfileIsCS)
1226               localNotInlinedCallSites.try_emplace(CB, FS);
1227             if (callsiteIsHot(FS, PSI))
1228               Hot = true;
1229             else if (shouldInlineColdCallee(*CB))
1230               ColdCandidates.push_back(CB);
1231           }
1232         }
1233       }
1234       if (Hot || ExternalInlineAdvisor) {
1235         CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end());
1236         emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true);
1237       } else {
1238         CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end());
1239         emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false);
1240       }
1241     }
1242     for (CallBase *I : CIS) {
1243       Function *CalledFunction = I->getCalledFunction();
1244       // Do not inline recursive calls.
1245       if (CalledFunction == &F)
1246         continue;
1247       if (I->isIndirectCall()) {
1248         if (PromotedInsns.count(I))
1249           continue;
1250         uint64_t Sum;
1251         for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) {
1252           if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1253             FS->findInlinedFunctions(InlinedGUIDs, F.getParent(),
1254                                      PSI->getOrCompHotCountThreshold());
1255             continue;
1256           }
1257           if (!callsiteIsHot(FS, PSI))
1258             continue;
1259 
1260           const char *Reason = nullptr;
1261           auto CalleeFunctionName = FS->getFuncName();
1262           if (CallBase *DI =
1263                   tryPromoteIndirectCall(F, CalleeFunctionName, Sum,
1264                                          FS->getEntrySamples(), I, Reason)) {
1265             PromotedInsns.insert(I);
1266             // If profile mismatches, we should not attempt to inline DI.
1267             if ((isa<CallInst>(DI) || isa<InvokeInst>(DI)) &&
1268                 inlineCallInstruction(cast<CallBase>(*DI), FS)) {
1269               localNotInlinedCallSites.erase(I);
1270               LocalChanged = true;
1271             }
1272           } else {
1273             LLVM_DEBUG(dbgs()
1274                        << "\nFailed to promote indirect call to "
1275                        << CalleeFunctionName << " because " << Reason << "\n");
1276           }
1277         }
1278       } else if (CalledFunction && CalledFunction->getSubprogram() &&
1279                  !CalledFunction->isDeclaration()) {
1280         if (inlineCallInstruction(*I, localNotInlinedCallSites.count(I)
1281                                           ? localNotInlinedCallSites[I]
1282                                           : nullptr)) {
1283           localNotInlinedCallSites.erase(I);
1284           LocalChanged = true;
1285         }
1286       } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1287         findCalleeFunctionSamples(*I)->findInlinedFunctions(
1288             InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold());
1289       }
1290     }
1291     if (LocalChanged) {
1292       Changed = true;
1293     } else {
1294       break;
1295     }
1296   }
1297 
1298   // For CS profile, profile for not inlined context will be merged when
1299   // base profile is being trieved
1300   if (ProfileIsCS)
1301     return Changed;
1302 
1303   // Accumulate not inlined callsite information into notInlinedSamples
1304   for (const auto &Pair : localNotInlinedCallSites) {
1305     CallBase *I = Pair.getFirst();
1306     Function *Callee = I->getCalledFunction();
1307     if (!Callee || Callee->isDeclaration())
1308       continue;
1309 
1310     ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "NotInline",
1311                                          I->getDebugLoc(), I->getParent())
1312               << "previous inlining not repeated: '"
1313               << ore::NV("Callee", Callee) << "' into '"
1314               << ore::NV("Caller", &F) << "'");
1315 
1316     ++NumCSNotInlined;
1317     const FunctionSamples *FS = Pair.getSecond();
1318     if (FS->getTotalSamples() == 0 && FS->getEntrySamples() == 0) {
1319       continue;
1320     }
1321 
1322     if (ProfileMergeInlinee) {
1323       // A function call can be replicated by optimizations like callsite
1324       // splitting or jump threading and the replicates end up sharing the
1325       // sample nested callee profile instead of slicing the original inlinee's
1326       // profile. We want to do merge exactly once by filtering out callee
1327       // profiles with a non-zero head sample count.
1328       if (FS->getHeadSamples() == 0) {
1329         // Use entry samples as head samples during the merge, as inlinees
1330         // don't have head samples.
1331         const_cast<FunctionSamples *>(FS)->addHeadSamples(
1332             FS->getEntrySamples());
1333 
1334         // Note that we have to do the merge right after processing function.
1335         // This allows OutlineFS's profile to be used for annotation during
1336         // top-down processing of functions' annotation.
1337         FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee);
1338         OutlineFS->merge(*FS);
1339       }
1340     } else {
1341       auto pair =
1342           notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0});
1343       pair.first->second.entryCount += FS->getEntrySamples();
1344     }
1345   }
1346   return Changed;
1347 }
1348 
1349 bool SampleProfileLoader::tryInlineCandidate(
1350     InlineCandidate &Candidate, SmallVector<CallBase *, 8> &InlinedCallSites) {
1351 
1352   CallBase &CB = *Candidate.CallInstr;
1353   Function *CalledFunction = CB.getCalledFunction();
1354   assert(CalledFunction && "Expect a callee with definition");
1355   DebugLoc DLoc = CB.getDebugLoc();
1356   BasicBlock *BB = CB.getParent();
1357 
1358   InlineCost Cost = shouldInlineCandidate(Candidate);
1359   if (Cost.isNever()) {
1360     ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineFail", DLoc, BB)
1361               << "incompatible inlining");
1362     return false;
1363   }
1364 
1365   if (!Cost)
1366     return false;
1367 
1368   InlineFunctionInfo IFI(nullptr, GetAC);
1369   if (InlineFunction(CB, IFI).isSuccess()) {
1370     // The call to InlineFunction erases I, so we can't pass it here.
1371     emitInlinedInto(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(), Cost,
1372                     true, CSINLINE_DEBUG);
1373 
1374     // Now populate the list of newly exposed call sites.
1375     InlinedCallSites.clear();
1376     for (auto &I : IFI.InlinedCallSites)
1377       InlinedCallSites.push_back(I);
1378 
1379     if (ProfileIsCS)
1380       ContextTracker->markContextSamplesInlined(Candidate.CalleeSamples);
1381     ++NumCSInlined;
1382     return true;
1383   }
1384   return false;
1385 }
1386 
1387 bool SampleProfileLoader::getInlineCandidate(InlineCandidate *NewCandidate,
1388                                              CallBase *CB) {
1389   assert(CB && "Expect non-null call instruction");
1390 
1391   if (isa<IntrinsicInst>(CB))
1392     return false;
1393 
1394   // Find the callee's profile. For indirect call, find hottest target profile.
1395   const FunctionSamples *CalleeSamples = findCalleeFunctionSamples(*CB);
1396   if (!CalleeSamples)
1397     return false;
1398 
1399   uint64_t CallsiteCount = 0;
1400   ErrorOr<uint64_t> Weight = getBlockWeight(CB->getParent());
1401   if (Weight)
1402     CallsiteCount = Weight.get();
1403   if (CalleeSamples)
1404     CallsiteCount = std::max(CallsiteCount, CalleeSamples->getEntrySamples());
1405 
1406   *NewCandidate = {CB, CalleeSamples, CallsiteCount};
1407   return true;
1408 }
1409 
1410 InlineCost
1411 SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) {
1412   assert(ProfileIsCS && "Prioritiy based inliner only works with CSSPGO now");
1413 
1414   std::unique_ptr<InlineAdvice> Advice = nullptr;
1415   if (ExternalInlineAdvisor) {
1416     Advice = ExternalInlineAdvisor->getAdvice(*Candidate.CallInstr);
1417     if (!Advice->isInliningRecommended()) {
1418       Advice->recordUnattemptedInlining();
1419       return InlineCost::getNever("not previously inlined");
1420     }
1421     Advice->recordInlining();
1422     return InlineCost::getAlways("previously inlined");
1423   }
1424 
1425   // Adjust threshold based on call site hotness, only do this for callsite
1426   // prioritized inliner because otherwise cost-benefit check is done earlier.
1427   int SampleThreshold = SampleColdCallSiteThreshold;
1428   if (CallsitePrioritizedInline) {
1429     if (Candidate.CallsiteCount > PSI->getHotCountThreshold())
1430       SampleThreshold = SampleHotCallSiteThreshold;
1431     else if (!ProfileSizeInline)
1432       return InlineCost::getNever("cold callsite");
1433   }
1434 
1435   Function *Callee = Candidate.CallInstr->getCalledFunction();
1436   assert(Callee && "Expect a definition for inline candidate of direct call");
1437 
1438   InlineParams Params = getInlineParams();
1439   Params.ComputeFullInlineCost = true;
1440   // Checks if there is anything in the reachable portion of the callee at
1441   // this callsite that makes this inlining potentially illegal. Need to
1442   // set ComputeFullInlineCost, otherwise getInlineCost may return early
1443   // when cost exceeds threshold without checking all IRs in the callee.
1444   // The acutal cost does not matter because we only checks isNever() to
1445   // see if it is legal to inline the callsite.
1446   InlineCost Cost = getInlineCost(*Candidate.CallInstr, Callee, Params,
1447                                   GetTTI(*Callee), GetAC, GetTLI);
1448 
1449   // For old FDO inliner, we inline the call site as long as cost is not
1450   // "Never". The cost-benefit check is done earlier.
1451   if (!CallsitePrioritizedInline) {
1452     if (Cost.isNever())
1453       return Cost;
1454     return InlineCost::getAlways("hot callsite previously inlined");
1455   }
1456 
1457   // Honor always inline and never inline from call analyzer
1458   if (Cost.isNever() || Cost.isAlways())
1459     return Cost;
1460 
1461   // Otherwise only use the cost from call analyzer, but overwite threshold with
1462   // Sample PGO threshold.
1463   return InlineCost::get(Cost.getCost(), SampleThreshold);
1464 }
1465 
1466 bool SampleProfileLoader::inlineHotFunctionsWithPriority(
1467     Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
1468   DenseSet<Instruction *> PromotedInsns;
1469   assert(ProfileIsCS && "Prioritiy based inliner only works with CSSPGO now");
1470 
1471   // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
1472   // Profile symbol list is ignored when profile-sample-accurate is on.
1473   assert((!ProfAccForSymsInList ||
1474           (!ProfileSampleAccurate &&
1475            !F.hasFnAttribute("profile-sample-accurate"))) &&
1476          "ProfAccForSymsInList should be false when profile-sample-accurate "
1477          "is enabled");
1478 
1479   // Populating worklist with initial call sites from root inliner, along
1480   // with call site weights.
1481   CandidateQueue CQueue;
1482   InlineCandidate NewCandidate;
1483   for (auto &BB : F) {
1484     for (auto &I : BB.getInstList()) {
1485       auto *CB = dyn_cast<CallBase>(&I);
1486       if (!CB)
1487         continue;
1488       if (getInlineCandidate(&NewCandidate, CB))
1489         CQueue.push(NewCandidate);
1490     }
1491   }
1492 
1493   // Cap the size growth from profile guided inlining. This is needed even
1494   // though cost of each inline candidate already accounts for callee size,
1495   // because with top-down inlining, we can grow inliner size significantly
1496   // with large number of smaller inlinees each pass the cost check.
1497   assert(ProfileInlineLimitMax >= ProfileInlineLimitMin &&
1498          "Max inline size limit should not be smaller than min inline size "
1499          "limit.");
1500   unsigned SizeLimit = F.getInstructionCount() * ProfileInlineGrowthLimit;
1501   SizeLimit = std::min(SizeLimit, (unsigned)ProfileInlineLimitMax);
1502   SizeLimit = std::max(SizeLimit, (unsigned)ProfileInlineLimitMin);
1503   if (ExternalInlineAdvisor)
1504     SizeLimit = std::numeric_limits<unsigned>::max();
1505 
1506   // Perform iterative BFS call site prioritized inlining
1507   bool Changed = false;
1508   while (!CQueue.empty() && F.getInstructionCount() < SizeLimit) {
1509     InlineCandidate Candidate = CQueue.top();
1510     CQueue.pop();
1511     CallBase *I = Candidate.CallInstr;
1512     Function *CalledFunction = I->getCalledFunction();
1513 
1514     if (CalledFunction == &F)
1515       continue;
1516     if (I->isIndirectCall()) {
1517       if (PromotedInsns.count(I))
1518         continue;
1519       uint64_t Sum;
1520       auto CalleeSamples = findIndirectCallFunctionSamples(*I, Sum);
1521       uint64_t SumOrigin = Sum;
1522       for (const auto *FS : CalleeSamples) {
1523         // TODO: Consider disable pre-lTO ICP for MonoLTO as well
1524         if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1525           FS->findInlinedFunctions(InlinedGUIDs, F.getParent(),
1526                                    PSI->getOrCompHotCountThreshold());
1527           continue;
1528         }
1529         uint64_t EntryCountDistributed = FS->getEntrySamples();
1530         // In addition to regular inline cost check, we also need to make sure
1531         // ICP isn't introducing excessive speculative checks even if individual
1532         // target looks beneficial to promote and inline. That means we should
1533         // only do ICP when there's a small number dominant targets.
1534         if (EntryCountDistributed < SumOrigin / ProfileICPThreshold)
1535           break;
1536         // TODO: Fix CallAnalyzer to handle all indirect calls.
1537         // For indirect call, we don't run CallAnalyzer to get InlineCost
1538         // before actual inlining. This is because we could see two different
1539         // types from the same definition, which makes CallAnalyzer choke as
1540         // it's expecting matching parameter type on both caller and callee
1541         // side. See example from PR18962 for the triggering cases (the bug was
1542         // fixed, but we generate different types).
1543         if (!PSI->isHotCount(EntryCountDistributed))
1544           break;
1545         const char *Reason = nullptr;
1546         auto CalleeFunctionName = FS->getFuncName();
1547         if (CallBase *DI = tryPromoteIndirectCall(
1548                 F, CalleeFunctionName, Sum, EntryCountDistributed, I, Reason)) {
1549           // Attach function profile for promoted indirect callee, and update
1550           // call site count for the promoted inline candidate too.
1551           Candidate = {DI, FS, EntryCountDistributed};
1552           PromotedInsns.insert(I);
1553           SmallVector<CallBase *, 8> InlinedCallSites;
1554           // If profile mismatches, we should not attempt to inline DI.
1555           if ((isa<CallInst>(DI) || isa<InvokeInst>(DI)) &&
1556               tryInlineCandidate(Candidate, InlinedCallSites)) {
1557             for (auto *CB : InlinedCallSites) {
1558               if (getInlineCandidate(&NewCandidate, CB))
1559                 CQueue.emplace(NewCandidate);
1560             }
1561             Changed = true;
1562           }
1563         } else {
1564           LLVM_DEBUG(dbgs()
1565                      << "\nFailed to promote indirect call to "
1566                      << CalleeFunctionName << " because " << Reason << "\n");
1567         }
1568       }
1569     } else if (CalledFunction && CalledFunction->getSubprogram() &&
1570                !CalledFunction->isDeclaration()) {
1571       SmallVector<CallBase *, 8> InlinedCallSites;
1572       if (tryInlineCandidate(Candidate, InlinedCallSites)) {
1573         for (auto *CB : InlinedCallSites) {
1574           if (getInlineCandidate(&NewCandidate, CB))
1575             CQueue.emplace(NewCandidate);
1576         }
1577         Changed = true;
1578       }
1579     } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1580       findCalleeFunctionSamples(*I)->findInlinedFunctions(
1581           InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold());
1582     }
1583   }
1584 
1585   if (!CQueue.empty()) {
1586     if (SizeLimit == (unsigned)ProfileInlineLimitMax)
1587       ++NumCSInlinedHitMaxLimit;
1588     else if (SizeLimit == (unsigned)ProfileInlineLimitMin)
1589       ++NumCSInlinedHitMinLimit;
1590     else
1591       ++NumCSInlinedHitGrowthLimit;
1592   }
1593 
1594   return Changed;
1595 }
1596 
1597 /// Find equivalence classes for the given block.
1598 ///
1599 /// This finds all the blocks that are guaranteed to execute the same
1600 /// number of times as \p BB1. To do this, it traverses all the
1601 /// descendants of \p BB1 in the dominator or post-dominator tree.
1602 ///
1603 /// A block BB2 will be in the same equivalence class as \p BB1 if
1604 /// the following holds:
1605 ///
1606 /// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2
1607 ///    is a descendant of \p BB1 in the dominator tree, then BB2 should
1608 ///    dominate BB1 in the post-dominator tree.
1609 ///
1610 /// 2- Both BB2 and \p BB1 must be in the same loop.
1611 ///
1612 /// For every block BB2 that meets those two requirements, we set BB2's
1613 /// equivalence class to \p BB1.
1614 ///
1615 /// \param BB1  Block to check.
1616 /// \param Descendants  Descendants of \p BB1 in either the dom or pdom tree.
1617 /// \param DomTree  Opposite dominator tree. If \p Descendants is filled
1618 ///                 with blocks from \p BB1's dominator tree, then
1619 ///                 this is the post-dominator tree, and vice versa.
1620 template <bool IsPostDom>
1621 void SampleProfileLoader::findEquivalencesFor(
1622     BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants,
1623     DominatorTreeBase<BasicBlock, IsPostDom> *DomTree) {
1624   const BasicBlock *EC = EquivalenceClass[BB1];
1625   uint64_t Weight = BlockWeights[EC];
1626   for (const auto *BB2 : Descendants) {
1627     bool IsDomParent = DomTree->dominates(BB2, BB1);
1628     bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2);
1629     if (BB1 != BB2 && IsDomParent && IsInSameLoop) {
1630       EquivalenceClass[BB2] = EC;
1631       // If BB2 is visited, then the entire EC should be marked as visited.
1632       if (VisitedBlocks.count(BB2)) {
1633         VisitedBlocks.insert(EC);
1634       }
1635 
1636       // If BB2 is heavier than BB1, make BB2 have the same weight
1637       // as BB1.
1638       //
1639       // Note that we don't worry about the opposite situation here
1640       // (when BB2 is lighter than BB1). We will deal with this
1641       // during the propagation phase. Right now, we just want to
1642       // make sure that BB1 has the largest weight of all the
1643       // members of its equivalence set.
1644       Weight = std::max(Weight, BlockWeights[BB2]);
1645     }
1646   }
1647   if (EC == &EC->getParent()->getEntryBlock()) {
1648     BlockWeights[EC] = Samples->getHeadSamples() + 1;
1649   } else {
1650     BlockWeights[EC] = Weight;
1651   }
1652 }
1653 
1654 /// Find equivalence classes.
1655 ///
1656 /// Since samples may be missing from blocks, we can fill in the gaps by setting
1657 /// the weights of all the blocks in the same equivalence class to the same
1658 /// weight. To compute the concept of equivalence, we use dominance and loop
1659 /// information. Two blocks B1 and B2 are in the same equivalence class if B1
1660 /// dominates B2, B2 post-dominates B1 and both are in the same loop.
1661 ///
1662 /// \param F The function to query.
1663 void SampleProfileLoader::findEquivalenceClasses(Function &F) {
1664   SmallVector<BasicBlock *, 8> DominatedBBs;
1665   LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n");
1666   // Find equivalence sets based on dominance and post-dominance information.
1667   for (auto &BB : F) {
1668     BasicBlock *BB1 = &BB;
1669 
1670     // Compute BB1's equivalence class once.
1671     if (EquivalenceClass.count(BB1)) {
1672       LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
1673       continue;
1674     }
1675 
1676     // By default, blocks are in their own equivalence class.
1677     EquivalenceClass[BB1] = BB1;
1678 
1679     // Traverse all the blocks dominated by BB1. We are looking for
1680     // every basic block BB2 such that:
1681     //
1682     // 1- BB1 dominates BB2.
1683     // 2- BB2 post-dominates BB1.
1684     // 3- BB1 and BB2 are in the same loop nest.
1685     //
1686     // If all those conditions hold, it means that BB2 is executed
1687     // as many times as BB1, so they are placed in the same equivalence
1688     // class by making BB2's equivalence class be BB1.
1689     DominatedBBs.clear();
1690     DT->getDescendants(BB1, DominatedBBs);
1691     findEquivalencesFor(BB1, DominatedBBs, PDT.get());
1692 
1693     LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
1694   }
1695 
1696   // Assign weights to equivalence classes.
1697   //
1698   // All the basic blocks in the same equivalence class will execute
1699   // the same number of times. Since we know that the head block in
1700   // each equivalence class has the largest weight, assign that weight
1701   // to all the blocks in that equivalence class.
1702   LLVM_DEBUG(
1703       dbgs() << "\nAssign the same weight to all blocks in the same class\n");
1704   for (auto &BI : F) {
1705     const BasicBlock *BB = &BI;
1706     const BasicBlock *EquivBB = EquivalenceClass[BB];
1707     if (BB != EquivBB)
1708       BlockWeights[BB] = BlockWeights[EquivBB];
1709     LLVM_DEBUG(printBlockWeight(dbgs(), BB));
1710   }
1711 }
1712 
1713 /// Visit the given edge to decide if it has a valid weight.
1714 ///
1715 /// If \p E has not been visited before, we copy to \p UnknownEdge
1716 /// and increment the count of unknown edges.
1717 ///
1718 /// \param E  Edge to visit.
1719 /// \param NumUnknownEdges  Current number of unknown edges.
1720 /// \param UnknownEdge  Set if E has not been visited before.
1721 ///
1722 /// \returns E's weight, if known. Otherwise, return 0.
1723 uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges,
1724                                         Edge *UnknownEdge) {
1725   if (!VisitedEdges.count(E)) {
1726     (*NumUnknownEdges)++;
1727     *UnknownEdge = E;
1728     return 0;
1729   }
1730 
1731   return EdgeWeights[E];
1732 }
1733 
1734 /// Propagate weights through incoming/outgoing edges.
1735 ///
1736 /// If the weight of a basic block is known, and there is only one edge
1737 /// with an unknown weight, we can calculate the weight of that edge.
1738 ///
1739 /// Similarly, if all the edges have a known count, we can calculate the
1740 /// count of the basic block, if needed.
1741 ///
1742 /// \param F  Function to process.
1743 /// \param UpdateBlockCount  Whether we should update basic block counts that
1744 ///                          has already been annotated.
1745 ///
1746 /// \returns  True if new weights were assigned to edges or blocks.
1747 bool SampleProfileLoader::propagateThroughEdges(Function &F,
1748                                                 bool UpdateBlockCount) {
1749   bool Changed = false;
1750   LLVM_DEBUG(dbgs() << "\nPropagation through edges\n");
1751   for (const auto &BI : F) {
1752     const BasicBlock *BB = &BI;
1753     const BasicBlock *EC = EquivalenceClass[BB];
1754 
1755     // Visit all the predecessor and successor edges to determine
1756     // which ones have a weight assigned already. Note that it doesn't
1757     // matter that we only keep track of a single unknown edge. The
1758     // only case we are interested in handling is when only a single
1759     // edge is unknown (see setEdgeOrBlockWeight).
1760     for (unsigned i = 0; i < 2; i++) {
1761       uint64_t TotalWeight = 0;
1762       unsigned NumUnknownEdges = 0, NumTotalEdges = 0;
1763       Edge UnknownEdge, SelfReferentialEdge, SingleEdge;
1764 
1765       if (i == 0) {
1766         // First, visit all predecessor edges.
1767         NumTotalEdges = Predecessors[BB].size();
1768         for (auto *Pred : Predecessors[BB]) {
1769           Edge E = std::make_pair(Pred, BB);
1770           TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
1771           if (E.first == E.second)
1772             SelfReferentialEdge = E;
1773         }
1774         if (NumTotalEdges == 1) {
1775           SingleEdge = std::make_pair(Predecessors[BB][0], BB);
1776         }
1777       } else {
1778         // On the second round, visit all successor edges.
1779         NumTotalEdges = Successors[BB].size();
1780         for (auto *Succ : Successors[BB]) {
1781           Edge E = std::make_pair(BB, Succ);
1782           TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
1783         }
1784         if (NumTotalEdges == 1) {
1785           SingleEdge = std::make_pair(BB, Successors[BB][0]);
1786         }
1787       }
1788 
1789       // After visiting all the edges, there are three cases that we
1790       // can handle immediately:
1791       //
1792       // - All the edge weights are known (i.e., NumUnknownEdges == 0).
1793       //   In this case, we simply check that the sum of all the edges
1794       //   is the same as BB's weight. If not, we change BB's weight
1795       //   to match. Additionally, if BB had not been visited before,
1796       //   we mark it visited.
1797       //
1798       // - Only one edge is unknown and BB has already been visited.
1799       //   In this case, we can compute the weight of the edge by
1800       //   subtracting the total block weight from all the known
1801       //   edge weights. If the edges weight more than BB, then the
1802       //   edge of the last remaining edge is set to zero.
1803       //
1804       // - There exists a self-referential edge and the weight of BB is
1805       //   known. In this case, this edge can be based on BB's weight.
1806       //   We add up all the other known edges and set the weight on
1807       //   the self-referential edge as we did in the previous case.
1808       //
1809       // In any other case, we must continue iterating. Eventually,
1810       // all edges will get a weight, or iteration will stop when
1811       // it reaches SampleProfileMaxPropagateIterations.
1812       if (NumUnknownEdges <= 1) {
1813         uint64_t &BBWeight = BlockWeights[EC];
1814         if (NumUnknownEdges == 0) {
1815           if (!VisitedBlocks.count(EC)) {
1816             // If we already know the weight of all edges, the weight of the
1817             // basic block can be computed. It should be no larger than the sum
1818             // of all edge weights.
1819             if (TotalWeight > BBWeight) {
1820               BBWeight = TotalWeight;
1821               Changed = true;
1822               LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName()
1823                                 << " known. Set weight for block: ";
1824                          printBlockWeight(dbgs(), BB););
1825             }
1826           } else if (NumTotalEdges == 1 &&
1827                      EdgeWeights[SingleEdge] < BlockWeights[EC]) {
1828             // If there is only one edge for the visited basic block, use the
1829             // block weight to adjust edge weight if edge weight is smaller.
1830             EdgeWeights[SingleEdge] = BlockWeights[EC];
1831             Changed = true;
1832           }
1833         } else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) {
1834           // If there is a single unknown edge and the block has been
1835           // visited, then we can compute E's weight.
1836           if (BBWeight >= TotalWeight)
1837             EdgeWeights[UnknownEdge] = BBWeight - TotalWeight;
1838           else
1839             EdgeWeights[UnknownEdge] = 0;
1840           const BasicBlock *OtherEC;
1841           if (i == 0)
1842             OtherEC = EquivalenceClass[UnknownEdge.first];
1843           else
1844             OtherEC = EquivalenceClass[UnknownEdge.second];
1845           // Edge weights should never exceed the BB weights it connects.
1846           if (VisitedBlocks.count(OtherEC) &&
1847               EdgeWeights[UnknownEdge] > BlockWeights[OtherEC])
1848             EdgeWeights[UnknownEdge] = BlockWeights[OtherEC];
1849           VisitedEdges.insert(UnknownEdge);
1850           Changed = true;
1851           LLVM_DEBUG(dbgs() << "Set weight for edge: ";
1852                      printEdgeWeight(dbgs(), UnknownEdge));
1853         }
1854       } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) {
1855         // If a block Weights 0, all its in/out edges should weight 0.
1856         if (i == 0) {
1857           for (auto *Pred : Predecessors[BB]) {
1858             Edge E = std::make_pair(Pred, BB);
1859             EdgeWeights[E] = 0;
1860             VisitedEdges.insert(E);
1861           }
1862         } else {
1863           for (auto *Succ : Successors[BB]) {
1864             Edge E = std::make_pair(BB, Succ);
1865             EdgeWeights[E] = 0;
1866             VisitedEdges.insert(E);
1867           }
1868         }
1869       } else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) {
1870         uint64_t &BBWeight = BlockWeights[BB];
1871         // We have a self-referential edge and the weight of BB is known.
1872         if (BBWeight >= TotalWeight)
1873           EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight;
1874         else
1875           EdgeWeights[SelfReferentialEdge] = 0;
1876         VisitedEdges.insert(SelfReferentialEdge);
1877         Changed = true;
1878         LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: ";
1879                    printEdgeWeight(dbgs(), SelfReferentialEdge));
1880       }
1881       if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) {
1882         BlockWeights[EC] = TotalWeight;
1883         VisitedBlocks.insert(EC);
1884         Changed = true;
1885       }
1886     }
1887   }
1888 
1889   return Changed;
1890 }
1891 
1892 /// Build in/out edge lists for each basic block in the CFG.
1893 ///
1894 /// We are interested in unique edges. If a block B1 has multiple
1895 /// edges to another block B2, we only add a single B1->B2 edge.
1896 void SampleProfileLoader::buildEdges(Function &F) {
1897   for (auto &BI : F) {
1898     BasicBlock *B1 = &BI;
1899 
1900     // Add predecessors for B1.
1901     SmallPtrSet<BasicBlock *, 16> Visited;
1902     if (!Predecessors[B1].empty())
1903       llvm_unreachable("Found a stale predecessors list in a basic block.");
1904     for (pred_iterator PI = pred_begin(B1), PE = pred_end(B1); PI != PE; ++PI) {
1905       BasicBlock *B2 = *PI;
1906       if (Visited.insert(B2).second)
1907         Predecessors[B1].push_back(B2);
1908     }
1909 
1910     // Add successors for B1.
1911     Visited.clear();
1912     if (!Successors[B1].empty())
1913       llvm_unreachable("Found a stale successors list in a basic block.");
1914     for (succ_iterator SI = succ_begin(B1), SE = succ_end(B1); SI != SE; ++SI) {
1915       BasicBlock *B2 = *SI;
1916       if (Visited.insert(B2).second)
1917         Successors[B1].push_back(B2);
1918     }
1919   }
1920 }
1921 
1922 /// Returns the sorted CallTargetMap \p M by count in descending order.
1923 static SmallVector<InstrProfValueData, 2> GetSortedValueDataFromCallTargets(
1924     const SampleRecord::CallTargetMap & M) {
1925   SmallVector<InstrProfValueData, 2> R;
1926   for (const auto &I : SampleRecord::SortCallTargets(M)) {
1927     R.emplace_back(InstrProfValueData{FunctionSamples::getGUID(I.first), I.second});
1928   }
1929   return R;
1930 }
1931 
1932 /// Propagate weights into edges
1933 ///
1934 /// The following rules are applied to every block BB in the CFG:
1935 ///
1936 /// - If BB has a single predecessor/successor, then the weight
1937 ///   of that edge is the weight of the block.
1938 ///
1939 /// - If all incoming or outgoing edges are known except one, and the
1940 ///   weight of the block is already known, the weight of the unknown
1941 ///   edge will be the weight of the block minus the sum of all the known
1942 ///   edges. If the sum of all the known edges is larger than BB's weight,
1943 ///   we set the unknown edge weight to zero.
1944 ///
1945 /// - If there is a self-referential edge, and the weight of the block is
1946 ///   known, the weight for that edge is set to the weight of the block
1947 ///   minus the weight of the other incoming edges to that block (if
1948 ///   known).
1949 void SampleProfileLoader::propagateWeights(Function &F) {
1950   bool Changed = true;
1951   unsigned I = 0;
1952 
1953   // If BB weight is larger than its corresponding loop's header BB weight,
1954   // use the BB weight to replace the loop header BB weight.
1955   for (auto &BI : F) {
1956     BasicBlock *BB = &BI;
1957     Loop *L = LI->getLoopFor(BB);
1958     if (!L) {
1959       continue;
1960     }
1961     BasicBlock *Header = L->getHeader();
1962     if (Header && BlockWeights[BB] > BlockWeights[Header]) {
1963       BlockWeights[Header] = BlockWeights[BB];
1964     }
1965   }
1966 
1967   // Before propagation starts, build, for each block, a list of
1968   // unique predecessors and successors. This is necessary to handle
1969   // identical edges in multiway branches. Since we visit all blocks and all
1970   // edges of the CFG, it is cleaner to build these lists once at the start
1971   // of the pass.
1972   buildEdges(F);
1973 
1974   // Propagate until we converge or we go past the iteration limit.
1975   while (Changed && I++ < SampleProfileMaxPropagateIterations) {
1976     Changed = propagateThroughEdges(F, false);
1977   }
1978 
1979   // The first propagation propagates BB counts from annotated BBs to unknown
1980   // BBs. The 2nd propagation pass resets edges weights, and use all BB weights
1981   // to propagate edge weights.
1982   VisitedEdges.clear();
1983   Changed = true;
1984   while (Changed && I++ < SampleProfileMaxPropagateIterations) {
1985     Changed = propagateThroughEdges(F, false);
1986   }
1987 
1988   // The 3rd propagation pass allows adjust annotated BB weights that are
1989   // obviously wrong.
1990   Changed = true;
1991   while (Changed && I++ < SampleProfileMaxPropagateIterations) {
1992     Changed = propagateThroughEdges(F, true);
1993   }
1994 
1995   // Generate MD_prof metadata for every branch instruction using the
1996   // edge weights computed during propagation.
1997   LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n");
1998   LLVMContext &Ctx = F.getContext();
1999   MDBuilder MDB(Ctx);
2000   for (auto &BI : F) {
2001     BasicBlock *BB = &BI;
2002 
2003     if (BlockWeights[BB]) {
2004       for (auto &I : BB->getInstList()) {
2005         if (!isa<CallInst>(I) && !isa<InvokeInst>(I))
2006           continue;
2007         if (!cast<CallBase>(I).getCalledFunction()) {
2008           const DebugLoc &DLoc = I.getDebugLoc();
2009           if (!DLoc)
2010             continue;
2011           const DILocation *DIL = DLoc;
2012           const FunctionSamples *FS = findFunctionSamples(I);
2013           if (!FS)
2014             continue;
2015           auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL);
2016           auto T = FS->findCallTargetMapAt(CallSite);
2017           if (!T || T.get().empty())
2018             continue;
2019           SmallVector<InstrProfValueData, 2> SortedCallTargets =
2020               GetSortedValueDataFromCallTargets(T.get());
2021           uint64_t Sum;
2022           findIndirectCallFunctionSamples(I, Sum);
2023           annotateValueSite(*I.getParent()->getParent()->getParent(), I,
2024                             SortedCallTargets, Sum, IPVK_IndirectCallTarget,
2025                             SortedCallTargets.size());
2026         } else if (!isa<IntrinsicInst>(&I)) {
2027           I.setMetadata(LLVMContext::MD_prof,
2028                         MDB.createBranchWeights(
2029                             {static_cast<uint32_t>(BlockWeights[BB])}));
2030         }
2031       }
2032     }
2033     Instruction *TI = BB->getTerminator();
2034     if (TI->getNumSuccessors() == 1)
2035       continue;
2036     if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
2037       continue;
2038 
2039     DebugLoc BranchLoc = TI->getDebugLoc();
2040     LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line "
2041                       << ((BranchLoc) ? Twine(BranchLoc.getLine())
2042                                       : Twine("<UNKNOWN LOCATION>"))
2043                       << ".\n");
2044     SmallVector<uint32_t, 4> Weights;
2045     uint32_t MaxWeight = 0;
2046     Instruction *MaxDestInst;
2047     for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) {
2048       BasicBlock *Succ = TI->getSuccessor(I);
2049       Edge E = std::make_pair(BB, Succ);
2050       uint64_t Weight = EdgeWeights[E];
2051       LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E));
2052       // Use uint32_t saturated arithmetic to adjust the incoming weights,
2053       // if needed. Sample counts in profiles are 64-bit unsigned values,
2054       // but internally branch weights are expressed as 32-bit values.
2055       if (Weight > std::numeric_limits<uint32_t>::max()) {
2056         LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)");
2057         Weight = std::numeric_limits<uint32_t>::max();
2058       }
2059       // Weight is added by one to avoid propagation errors introduced by
2060       // 0 weights.
2061       Weights.push_back(static_cast<uint32_t>(Weight + 1));
2062       if (Weight != 0) {
2063         if (Weight > MaxWeight) {
2064           MaxWeight = Weight;
2065           MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime();
2066         }
2067       }
2068     }
2069 
2070     uint64_t TempWeight;
2071     // Only set weights if there is at least one non-zero weight.
2072     // In any other case, let the analyzer set weights.
2073     // Do not set weights if the weights are present. In ThinLTO, the profile
2074     // annotation is done twice. If the first annotation already set the
2075     // weights, the second pass does not need to set it.
2076     if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) {
2077       LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
2078       TI->setMetadata(LLVMContext::MD_prof,
2079                       MDB.createBranchWeights(Weights));
2080       ORE->emit([&]() {
2081         return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst)
2082                << "most popular destination for conditional branches at "
2083                << ore::NV("CondBranchesLoc", BranchLoc);
2084       });
2085     } else {
2086       LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
2087     }
2088   }
2089 }
2090 
2091 /// Get the line number for the function header.
2092 ///
2093 /// This looks up function \p F in the current compilation unit and
2094 /// retrieves the line number where the function is defined. This is
2095 /// line 0 for all the samples read from the profile file. Every line
2096 /// number is relative to this line.
2097 ///
2098 /// \param F  Function object to query.
2099 ///
2100 /// \returns the line number where \p F is defined. If it returns 0,
2101 ///          it means that there is no debug information available for \p F.
2102 unsigned SampleProfileLoader::getFunctionLoc(Function &F) {
2103   if (DISubprogram *S = F.getSubprogram())
2104     return S->getLine();
2105 
2106   if (NoWarnSampleUnused)
2107     return 0;
2108 
2109   // If the start of \p F is missing, emit a diagnostic to inform the user
2110   // about the missed opportunity.
2111   F.getContext().diagnose(DiagnosticInfoSampleProfile(
2112       "No debug information found in function " + F.getName() +
2113           ": Function profile not used",
2114       DS_Warning));
2115   return 0;
2116 }
2117 
2118 void SampleProfileLoader::computeDominanceAndLoopInfo(Function &F) {
2119   DT.reset(new DominatorTree);
2120   DT->recalculate(F);
2121 
2122   PDT.reset(new PostDominatorTree(F));
2123 
2124   LI.reset(new LoopInfo);
2125   LI->analyze(*DT);
2126 }
2127 
2128 /// Generate branch weight metadata for all branches in \p F.
2129 ///
2130 /// Branch weights are computed out of instruction samples using a
2131 /// propagation heuristic. Propagation proceeds in 3 phases:
2132 ///
2133 /// 1- Assignment of block weights. All the basic blocks in the function
2134 ///    are initial assigned the same weight as their most frequently
2135 ///    executed instruction.
2136 ///
2137 /// 2- Creation of equivalence classes. Since samples may be missing from
2138 ///    blocks, we can fill in the gaps by setting the weights of all the
2139 ///    blocks in the same equivalence class to the same weight. To compute
2140 ///    the concept of equivalence, we use dominance and loop information.
2141 ///    Two blocks B1 and B2 are in the same equivalence class if B1
2142 ///    dominates B2, B2 post-dominates B1 and both are in the same loop.
2143 ///
2144 /// 3- Propagation of block weights into edges. This uses a simple
2145 ///    propagation heuristic. The following rules are applied to every
2146 ///    block BB in the CFG:
2147 ///
2148 ///    - If BB has a single predecessor/successor, then the weight
2149 ///      of that edge is the weight of the block.
2150 ///
2151 ///    - If all the edges are known except one, and the weight of the
2152 ///      block is already known, the weight of the unknown edge will
2153 ///      be the weight of the block minus the sum of all the known
2154 ///      edges. If the sum of all the known edges is larger than BB's weight,
2155 ///      we set the unknown edge weight to zero.
2156 ///
2157 ///    - If there is a self-referential edge, and the weight of the block is
2158 ///      known, the weight for that edge is set to the weight of the block
2159 ///      minus the weight of the other incoming edges to that block (if
2160 ///      known).
2161 ///
2162 /// Since this propagation is not guaranteed to finalize for every CFG, we
2163 /// only allow it to proceed for a limited number of iterations (controlled
2164 /// by -sample-profile-max-propagate-iterations).
2165 ///
2166 /// FIXME: Try to replace this propagation heuristic with a scheme
2167 /// that is guaranteed to finalize. A work-list approach similar to
2168 /// the standard value propagation algorithm used by SSA-CCP might
2169 /// work here.
2170 ///
2171 /// Once all the branch weights are computed, we emit the MD_prof
2172 /// metadata on BB using the computed values for each of its branches.
2173 ///
2174 /// \param F The function to query.
2175 ///
2176 /// \returns true if \p F was modified. Returns false, otherwise.
2177 bool SampleProfileLoader::emitAnnotations(Function &F) {
2178   bool Changed = false;
2179 
2180   if (FunctionSamples::ProfileIsProbeBased) {
2181     if (!ProbeManager->profileIsValid(F, *Samples)) {
2182       LLVM_DEBUG(
2183           dbgs() << "Profile is invalid due to CFG mismatch for Function "
2184                  << F.getName());
2185       ++NumMismatchedProfile;
2186       return false;
2187     }
2188     ++NumMatchedProfile;
2189   } else {
2190     if (getFunctionLoc(F) == 0)
2191       return false;
2192 
2193     LLVM_DEBUG(dbgs() << "Line number for the first instruction in "
2194                       << F.getName() << ": " << getFunctionLoc(F) << "\n");
2195   }
2196 
2197   DenseSet<GlobalValue::GUID> InlinedGUIDs;
2198   if (ProfileIsCS && CallsitePrioritizedInline)
2199     Changed |= inlineHotFunctionsWithPriority(F, InlinedGUIDs);
2200   else
2201     Changed |= inlineHotFunctions(F, InlinedGUIDs);
2202 
2203   // Compute basic block weights.
2204   Changed |= computeBlockWeights(F);
2205 
2206   if (Changed) {
2207     // Add an entry count to the function using the samples gathered at the
2208     // function entry.
2209     // Sets the GUIDs that are inlined in the profiled binary. This is used
2210     // for ThinLink to make correct liveness analysis, and also make the IR
2211     // match the profiled binary before annotation.
2212     F.setEntryCount(
2213         ProfileCount(Samples->getHeadSamples() + 1, Function::PCT_Real),
2214         &InlinedGUIDs);
2215 
2216     // Compute dominance and loop info needed for propagation.
2217     computeDominanceAndLoopInfo(F);
2218 
2219     // Find equivalence classes.
2220     findEquivalenceClasses(F);
2221 
2222     // Propagate weights to all edges.
2223     propagateWeights(F);
2224   }
2225 
2226   // If coverage checking was requested, compute it now.
2227   if (SampleProfileRecordCoverage) {
2228     unsigned Used = CoverageTracker.countUsedRecords(Samples, PSI);
2229     unsigned Total = CoverageTracker.countBodyRecords(Samples, PSI);
2230     unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
2231     if (Coverage < SampleProfileRecordCoverage) {
2232       F.getContext().diagnose(DiagnosticInfoSampleProfile(
2233           F.getSubprogram()->getFilename(), getFunctionLoc(F),
2234           Twine(Used) + " of " + Twine(Total) + " available profile records (" +
2235               Twine(Coverage) + "%) were applied",
2236           DS_Warning));
2237     }
2238   }
2239 
2240   if (SampleProfileSampleCoverage) {
2241     uint64_t Used = CoverageTracker.getTotalUsedSamples();
2242     uint64_t Total = CoverageTracker.countBodySamples(Samples, PSI);
2243     unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
2244     if (Coverage < SampleProfileSampleCoverage) {
2245       F.getContext().diagnose(DiagnosticInfoSampleProfile(
2246           F.getSubprogram()->getFilename(), getFunctionLoc(F),
2247           Twine(Used) + " of " + Twine(Total) + " available profile samples (" +
2248               Twine(Coverage) + "%) were applied",
2249           DS_Warning));
2250     }
2251   }
2252   return Changed;
2253 }
2254 
2255 char SampleProfileLoaderLegacyPass::ID = 0;
2256 
2257 INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile",
2258                       "Sample Profile loader", false, false)
2259 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2260 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
2261 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2262 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
2263 INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile",
2264                     "Sample Profile loader", false, false)
2265 
2266 std::vector<Function *>
2267 SampleProfileLoader::buildFunctionOrder(Module &M, CallGraph *CG) {
2268   std::vector<Function *> FunctionOrderList;
2269   FunctionOrderList.reserve(M.size());
2270 
2271   if (!ProfileTopDownLoad || CG == nullptr) {
2272     if (ProfileMergeInlinee) {
2273       // Disable ProfileMergeInlinee if profile is not loaded in top down order,
2274       // because the profile for a function may be used for the profile
2275       // annotation of its outline copy before the profile merging of its
2276       // non-inlined inline instances, and that is not the way how
2277       // ProfileMergeInlinee is supposed to work.
2278       ProfileMergeInlinee = false;
2279     }
2280 
2281     for (Function &F : M)
2282       if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile"))
2283         FunctionOrderList.push_back(&F);
2284     return FunctionOrderList;
2285   }
2286 
2287   assert(&CG->getModule() == &M);
2288   scc_iterator<CallGraph *> CGI = scc_begin(CG);
2289   while (!CGI.isAtEnd()) {
2290     for (CallGraphNode *node : *CGI) {
2291       auto F = node->getFunction();
2292       if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile"))
2293         FunctionOrderList.push_back(F);
2294     }
2295     ++CGI;
2296   }
2297 
2298   std::reverse(FunctionOrderList.begin(), FunctionOrderList.end());
2299   return FunctionOrderList;
2300 }
2301 
2302 bool SampleProfileLoader::doInitialization(Module &M,
2303                                            FunctionAnalysisManager *FAM) {
2304   auto &Ctx = M.getContext();
2305 
2306   auto ReaderOrErr =
2307       SampleProfileReader::create(Filename, Ctx, RemappingFilename);
2308   if (std::error_code EC = ReaderOrErr.getError()) {
2309     std::string Msg = "Could not open profile: " + EC.message();
2310     Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2311     return false;
2312   }
2313   Reader = std::move(ReaderOrErr.get());
2314   Reader->setSkipFlatProf(LTOPhase == ThinOrFullLTOPhase::ThinLTOPostLink);
2315   Reader->collectFuncsFrom(M);
2316   if (std::error_code EC = Reader->read()) {
2317     std::string Msg = "profile reading failed: " + EC.message();
2318     Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2319     return false;
2320   }
2321 
2322   PSL = Reader->getProfileSymbolList();
2323 
2324   // While profile-sample-accurate is on, ignore symbol list.
2325   ProfAccForSymsInList =
2326       ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate;
2327   if (ProfAccForSymsInList) {
2328     NamesInProfile.clear();
2329     if (auto NameTable = Reader->getNameTable())
2330       NamesInProfile.insert(NameTable->begin(), NameTable->end());
2331   }
2332 
2333   if (FAM && !ProfileInlineReplayFile.empty()) {
2334     ExternalInlineAdvisor = std::make_unique<ReplayInlineAdvisor>(
2335         M, *FAM, Ctx, /*OriginalAdvisor=*/nullptr, ProfileInlineReplayFile,
2336         /*EmitRemarks=*/false);
2337     if (!ExternalInlineAdvisor->areReplayRemarksLoaded())
2338       ExternalInlineAdvisor.reset();
2339   }
2340 
2341   // Apply tweaks if context-sensitive profile is available.
2342   if (Reader->profileIsCS()) {
2343     ProfileIsCS = true;
2344     FunctionSamples::ProfileIsCS = true;
2345 
2346     // Enable priority-base inliner and size inline by default for CSSPGO.
2347     if (!ProfileSizeInline.getNumOccurrences())
2348       ProfileSizeInline = true;
2349     if (!CallsitePrioritizedInline.getNumOccurrences())
2350       CallsitePrioritizedInline = true;
2351 
2352     // Tracker for profiles under different context
2353     ContextTracker =
2354         std::make_unique<SampleContextTracker>(Reader->getProfiles());
2355   }
2356 
2357   // Load pseudo probe descriptors for probe-based function samples.
2358   if (Reader->profileIsProbeBased()) {
2359     ProbeManager = std::make_unique<PseudoProbeManager>(M);
2360     if (!ProbeManager->moduleIsProbed(M)) {
2361       const char *Msg =
2362           "Pseudo-probe-based profile requires SampleProfileProbePass";
2363       Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2364       return false;
2365     }
2366   }
2367 
2368   return true;
2369 }
2370 
2371 ModulePass *llvm::createSampleProfileLoaderPass() {
2372   return new SampleProfileLoaderLegacyPass();
2373 }
2374 
2375 ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) {
2376   return new SampleProfileLoaderLegacyPass(Name);
2377 }
2378 
2379 bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM,
2380                                       ProfileSummaryInfo *_PSI, CallGraph *CG) {
2381   GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap);
2382 
2383   PSI = _PSI;
2384   if (M.getProfileSummary(/* IsCS */ false) == nullptr) {
2385     M.setProfileSummary(Reader->getSummary().getMD(M.getContext()),
2386                         ProfileSummary::PSK_Sample);
2387     PSI->refresh();
2388   }
2389   // Compute the total number of samples collected in this profile.
2390   for (const auto &I : Reader->getProfiles())
2391     TotalCollectedSamples += I.second.getTotalSamples();
2392 
2393   auto Remapper = Reader->getRemapper();
2394   // Populate the symbol map.
2395   for (const auto &N_F : M.getValueSymbolTable()) {
2396     StringRef OrigName = N_F.getKey();
2397     Function *F = dyn_cast<Function>(N_F.getValue());
2398     if (F == nullptr)
2399       continue;
2400     SymbolMap[OrigName] = F;
2401     auto pos = OrigName.find('.');
2402     if (pos != StringRef::npos) {
2403       StringRef NewName = OrigName.substr(0, pos);
2404       auto r = SymbolMap.insert(std::make_pair(NewName, F));
2405       // Failiing to insert means there is already an entry in SymbolMap,
2406       // thus there are multiple functions that are mapped to the same
2407       // stripped name. In this case of name conflicting, set the value
2408       // to nullptr to avoid confusion.
2409       if (!r.second)
2410         r.first->second = nullptr;
2411       OrigName = NewName;
2412     }
2413     // Insert the remapped names into SymbolMap.
2414     if (Remapper) {
2415       if (auto MapName = Remapper->lookUpNameInProfile(OrigName)) {
2416         if (*MapName == OrigName)
2417           continue;
2418         SymbolMap.insert(std::make_pair(*MapName, F));
2419       }
2420     }
2421   }
2422 
2423   bool retval = false;
2424   for (auto F : buildFunctionOrder(M, CG)) {
2425     assert(!F->isDeclaration());
2426     clearFunctionData();
2427     retval |= runOnFunction(*F, AM);
2428   }
2429 
2430   // Account for cold calls not inlined....
2431   if (!ProfileIsCS)
2432     for (const std::pair<Function *, NotInlinedProfileInfo> &pair :
2433          notInlinedCallInfo)
2434       updateProfileCallee(pair.first, pair.second.entryCount);
2435 
2436   return retval;
2437 }
2438 
2439 bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) {
2440   ACT = &getAnalysis<AssumptionCacheTracker>();
2441   TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>();
2442   TLIWP = &getAnalysis<TargetLibraryInfoWrapperPass>();
2443   ProfileSummaryInfo *PSI =
2444       &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2445   return SampleLoader.runOnModule(M, nullptr, PSI, nullptr);
2446 }
2447 
2448 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) {
2449   DILocation2SampleMap.clear();
2450   // By default the entry count is initialized to -1, which will be treated
2451   // conservatively by getEntryCount as the same as unknown (None). This is
2452   // to avoid newly added code to be treated as cold. If we have samples
2453   // this will be overwritten in emitAnnotations.
2454   uint64_t initialEntryCount = -1;
2455 
2456   ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL;
2457   if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) {
2458     // initialize all the function entry counts to 0. It means all the
2459     // functions without profile will be regarded as cold.
2460     initialEntryCount = 0;
2461     // profile-sample-accurate is a user assertion which has a higher precedence
2462     // than symbol list. When profile-sample-accurate is on, ignore symbol list.
2463     ProfAccForSymsInList = false;
2464   }
2465 
2466   // PSL -- profile symbol list include all the symbols in sampled binary.
2467   // If ProfileAccurateForSymsInList is enabled, PSL is used to treat
2468   // old functions without samples being cold, without having to worry
2469   // about new and hot functions being mistakenly treated as cold.
2470   if (ProfAccForSymsInList) {
2471     // Initialize the entry count to 0 for functions in the list.
2472     if (PSL->contains(F.getName()))
2473       initialEntryCount = 0;
2474 
2475     // Function in the symbol list but without sample will be regarded as
2476     // cold. To minimize the potential negative performance impact it could
2477     // have, we want to be a little conservative here saying if a function
2478     // shows up in the profile, no matter as outline function, inline instance
2479     // or call targets, treat the function as not being cold. This will handle
2480     // the cases such as most callsites of a function are inlined in sampled
2481     // binary but not inlined in current build (because of source code drift,
2482     // imprecise debug information, or the callsites are all cold individually
2483     // but not cold accumulatively...), so the outline function showing up as
2484     // cold in sampled binary will actually not be cold after current build.
2485     StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
2486     if (NamesInProfile.count(CanonName))
2487       initialEntryCount = -1;
2488   }
2489 
2490   // Initialize entry count when the function has no existing entry
2491   // count value.
2492   if (!F.getEntryCount().hasValue())
2493     F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real));
2494   std::unique_ptr<OptimizationRemarkEmitter> OwnedORE;
2495   if (AM) {
2496     auto &FAM =
2497         AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent())
2498             .getManager();
2499     ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
2500   } else {
2501     OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2502     ORE = OwnedORE.get();
2503   }
2504 
2505   if (ProfileIsCS)
2506     Samples = ContextTracker->getBaseSamplesFor(F);
2507   else
2508     Samples = Reader->getSamplesFor(F);
2509 
2510   if (Samples && !Samples->empty())
2511     return emitAnnotations(F);
2512   return false;
2513 }
2514 
2515 PreservedAnalyses SampleProfileLoaderPass::run(Module &M,
2516                                                ModuleAnalysisManager &AM) {
2517   FunctionAnalysisManager &FAM =
2518       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
2519 
2520   auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
2521     return FAM.getResult<AssumptionAnalysis>(F);
2522   };
2523   auto GetTTI = [&](Function &F) -> TargetTransformInfo & {
2524     return FAM.getResult<TargetIRAnalysis>(F);
2525   };
2526   auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
2527     return FAM.getResult<TargetLibraryAnalysis>(F);
2528   };
2529 
2530   SampleProfileLoader SampleLoader(
2531       ProfileFileName.empty() ? SampleProfileFile : ProfileFileName,
2532       ProfileRemappingFileName.empty() ? SampleProfileRemappingFile
2533                                        : ProfileRemappingFileName,
2534       LTOPhase, GetAssumptionCache, GetTTI, GetTLI);
2535 
2536   if (!SampleLoader.doInitialization(M, &FAM))
2537     return PreservedAnalyses::all();
2538 
2539   ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M);
2540   CallGraph &CG = AM.getResult<CallGraphAnalysis>(M);
2541   if (!SampleLoader.runOnModule(M, &AM, PSI, &CG))
2542     return PreservedAnalyses::all();
2543 
2544   return PreservedAnalyses::none();
2545 }
2546