1 //===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the SampleProfileLoader transformation. This pass
10 // reads a profile file generated by a sampling profiler (e.g. Linux Perf -
11 // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the
12 // profile information in the given profile.
13 //
14 // This pass generates branch weight annotations on the IR:
15 //
16 // - prof: Represents branch weights. This annotation is added to branches
17 //      to indicate the weights of each edge coming out of the branch.
18 //      The weight of each edge is the weight of the target block for
19 //      that edge. The weight of a block B is computed as the maximum
20 //      number of samples found in B.
21 //
22 //===----------------------------------------------------------------------===//
23 
24 #include "llvm/Transforms/IPO/SampleProfile.h"
25 #include "llvm/ADT/ArrayRef.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/DenseSet.h"
28 #include "llvm/ADT/None.h"
29 #include "llvm/ADT/PriorityQueue.h"
30 #include "llvm/ADT/SCCIterator.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringMap.h"
36 #include "llvm/ADT/StringRef.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/Analysis/AssumptionCache.h"
39 #include "llvm/Analysis/CallGraph.h"
40 #include "llvm/Analysis/CallGraphSCCPass.h"
41 #include "llvm/Analysis/InlineAdvisor.h"
42 #include "llvm/Analysis/InlineCost.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
45 #include "llvm/Analysis/PostDominators.h"
46 #include "llvm/Analysis/ProfileSummaryInfo.h"
47 #include "llvm/Analysis/ReplayInlineAdvisor.h"
48 #include "llvm/Analysis/TargetLibraryInfo.h"
49 #include "llvm/Analysis/TargetTransformInfo.h"
50 #include "llvm/IR/BasicBlock.h"
51 #include "llvm/IR/CFG.h"
52 #include "llvm/IR/DebugInfoMetadata.h"
53 #include "llvm/IR/DebugLoc.h"
54 #include "llvm/IR/DiagnosticInfo.h"
55 #include "llvm/IR/Dominators.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/GlobalValue.h"
58 #include "llvm/IR/InstrTypes.h"
59 #include "llvm/IR/Instruction.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/LLVMContext.h"
63 #include "llvm/IR/MDBuilder.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/PassManager.h"
66 #include "llvm/IR/ValueSymbolTable.h"
67 #include "llvm/InitializePasses.h"
68 #include "llvm/Pass.h"
69 #include "llvm/ProfileData/InstrProf.h"
70 #include "llvm/ProfileData/SampleProf.h"
71 #include "llvm/ProfileData/SampleProfReader.h"
72 #include "llvm/Support/Casting.h"
73 #include "llvm/Support/CommandLine.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/ErrorOr.h"
77 #include "llvm/Support/GenericDomTree.h"
78 #include "llvm/Support/raw_ostream.h"
79 #include "llvm/Transforms/IPO.h"
80 #include "llvm/Transforms/IPO/SampleContextTracker.h"
81 #include "llvm/Transforms/IPO/SampleProfileProbe.h"
82 #include "llvm/Transforms/Instrumentation.h"
83 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
84 #include "llvm/Transforms/Utils/Cloning.h"
85 #include <algorithm>
86 #include <cassert>
87 #include <cstdint>
88 #include <functional>
89 #include <limits>
90 #include <map>
91 #include <memory>
92 #include <queue>
93 #include <string>
94 #include <system_error>
95 #include <utility>
96 #include <vector>
97 
98 using namespace llvm;
99 using namespace sampleprof;
100 using ProfileCount = Function::ProfileCount;
101 #define DEBUG_TYPE "sample-profile"
102 #define CSINLINE_DEBUG DEBUG_TYPE "-inline"
103 
104 STATISTIC(NumCSInlined,
105           "Number of functions inlined with context sensitive profile");
106 STATISTIC(NumCSNotInlined,
107           "Number of functions not inlined with context sensitive profile");
108 STATISTIC(NumMismatchedProfile,
109           "Number of functions with CFG mismatched profile");
110 STATISTIC(NumMatchedProfile, "Number of functions with CFG matched profile");
111 STATISTIC(NumDuplicatedInlinesite,
112           "Number of inlined callsites with a partial distribution factor");
113 
114 STATISTIC(NumCSInlinedHitMinLimit,
115           "Number of functions with FDO inline stopped due to min size limit");
116 STATISTIC(NumCSInlinedHitMaxLimit,
117           "Number of functions with FDO inline stopped due to max size limit");
118 STATISTIC(
119     NumCSInlinedHitGrowthLimit,
120     "Number of functions with FDO inline stopped due to growth size limit");
121 
122 // Command line option to specify the file to read samples from. This is
123 // mainly used for debugging.
124 static cl::opt<std::string> SampleProfileFile(
125     "sample-profile-file", cl::init(""), cl::value_desc("filename"),
126     cl::desc("Profile file loaded by -sample-profile"), cl::Hidden);
127 
128 // The named file contains a set of transformations that may have been applied
129 // to the symbol names between the program from which the sample data was
130 // collected and the current program's symbols.
131 static cl::opt<std::string> SampleProfileRemappingFile(
132     "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"),
133     cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden);
134 
135 static cl::opt<unsigned> SampleProfileMaxPropagateIterations(
136     "sample-profile-max-propagate-iterations", cl::init(100),
137     cl::desc("Maximum number of iterations to go through when propagating "
138              "sample block/edge weights through the CFG."));
139 
140 static cl::opt<unsigned> SampleProfileRecordCoverage(
141     "sample-profile-check-record-coverage", cl::init(0), cl::value_desc("N"),
142     cl::desc("Emit a warning if less than N% of records in the input profile "
143              "are matched to the IR."));
144 
145 static cl::opt<unsigned> SampleProfileSampleCoverage(
146     "sample-profile-check-sample-coverage", cl::init(0), cl::value_desc("N"),
147     cl::desc("Emit a warning if less than N% of samples in the input profile "
148              "are matched to the IR."));
149 
150 static cl::opt<bool> NoWarnSampleUnused(
151     "no-warn-sample-unused", cl::init(false), cl::Hidden,
152     cl::desc("Use this option to turn off/on warnings about function with "
153              "samples but without debug information to use those samples. "));
154 
155 static cl::opt<bool> ProfileSampleAccurate(
156     "profile-sample-accurate", cl::Hidden, cl::init(false),
157     cl::desc("If the sample profile is accurate, we will mark all un-sampled "
158              "callsite and function as having 0 samples. Otherwise, treat "
159              "un-sampled callsites and functions conservatively as unknown. "));
160 
161 static cl::opt<bool> ProfileAccurateForSymsInList(
162     "profile-accurate-for-symsinlist", cl::Hidden, cl::ZeroOrMore,
163     cl::init(true),
164     cl::desc("For symbols in profile symbol list, regard their profiles to "
165              "be accurate. It may be overriden by profile-sample-accurate. "));
166 
167 static cl::opt<bool> ProfileMergeInlinee(
168     "sample-profile-merge-inlinee", cl::Hidden, cl::init(true),
169     cl::desc("Merge past inlinee's profile to outline version if sample "
170              "profile loader decided not to inline a call site. It will "
171              "only be enabled when top-down order of profile loading is "
172              "enabled. "));
173 
174 static cl::opt<bool> ProfileTopDownLoad(
175     "sample-profile-top-down-load", cl::Hidden, cl::init(true),
176     cl::desc("Do profile annotation and inlining for functions in top-down "
177              "order of call graph during sample profile loading. It only "
178              "works for new pass manager. "));
179 
180 static cl::opt<bool> UseProfileIndirectCallEdges(
181     "use-profile-indirect-call-edges", cl::init(true), cl::Hidden,
182     cl::desc("Considering indirect call samples from profile when top-down "
183              "processing functions. Only CSSPGO is supported."));
184 
185 static cl::opt<bool> UseProfileTopDownOrder(
186     "use-profile-top-down-order", cl::init(false), cl::Hidden,
187     cl::desc("Process functions in one SCC in a top-down order "
188              "based on the input profile."));
189 
190 static cl::opt<bool> ProfileSizeInline(
191     "sample-profile-inline-size", cl::Hidden, cl::init(false),
192     cl::desc("Inline cold call sites in profile loader if it's beneficial "
193              "for code size."));
194 
195 static cl::opt<int> ProfileInlineGrowthLimit(
196     "sample-profile-inline-growth-limit", cl::Hidden, cl::init(12),
197     cl::desc("The size growth ratio limit for proirity-based sample profile "
198              "loader inlining."));
199 
200 static cl::opt<int> ProfileInlineLimitMin(
201     "sample-profile-inline-limit-min", cl::Hidden, cl::init(100),
202     cl::desc("The lower bound of size growth limit for "
203              "proirity-based sample profile loader inlining."));
204 
205 static cl::opt<int> ProfileInlineLimitMax(
206     "sample-profile-inline-limit-max", cl::Hidden, cl::init(10000),
207     cl::desc("The upper bound of size growth limit for "
208              "proirity-based sample profile loader inlining."));
209 
210 static cl::opt<int> ProfileICPThreshold(
211     "sample-profile-icp-threshold", cl::Hidden, cl::init(5),
212     cl::desc(
213         "Relative hotness threshold for indirect "
214         "call promotion in proirity-based sample profile loader inlining."));
215 
216 static cl::opt<int> SampleHotCallSiteThreshold(
217     "sample-profile-hot-inline-threshold", cl::Hidden, cl::init(3000),
218     cl::desc("Hot callsite threshold for proirity-based sample profile loader "
219              "inlining."));
220 
221 static cl::opt<bool> CallsitePrioritizedInline(
222     "sample-profile-prioritized-inline", cl::Hidden, cl::ZeroOrMore,
223     cl::init(false),
224     cl::desc("Use call site prioritized inlining for sample profile loader."
225              "Currently only CSSPGO is supported."));
226 
227 static cl::opt<int> SampleColdCallSiteThreshold(
228     "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45),
229     cl::desc("Threshold for inlining cold callsites"));
230 
231 static cl::opt<std::string> ProfileInlineReplayFile(
232     "sample-profile-inline-replay", cl::init(""), cl::value_desc("filename"),
233     cl::desc(
234         "Optimization remarks file containing inline remarks to be replayed "
235         "by inlining from sample profile loader."),
236     cl::Hidden);
237 
238 namespace {
239 
240 using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>;
241 using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>;
242 using Edge = std::pair<const BasicBlock *, const BasicBlock *>;
243 using EdgeWeightMap = DenseMap<Edge, uint64_t>;
244 using BlockEdgeMap =
245     DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>;
246 
247 class SampleCoverageTracker {
248 public:
249   bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset,
250                        uint32_t Discriminator, uint64_t Samples);
251   unsigned computeCoverage(unsigned Used, unsigned Total) const;
252   unsigned countUsedRecords(const FunctionSamples *FS,
253                             ProfileSummaryInfo *PSI) const;
254   unsigned countBodyRecords(const FunctionSamples *FS,
255                             ProfileSummaryInfo *PSI) const;
256   uint64_t getTotalUsedSamples() const { return TotalUsedSamples; }
257   uint64_t countBodySamples(const FunctionSamples *FS,
258                             ProfileSummaryInfo *PSI) const;
259 
260   void clear() {
261     SampleCoverage.clear();
262     TotalUsedSamples = 0;
263   }
264   inline void setProfAccForSymsInList(bool V) { ProfAccForSymsInList = V; }
265 
266 private:
267   using BodySampleCoverageMap = std::map<LineLocation, unsigned>;
268   using FunctionSamplesCoverageMap =
269       DenseMap<const FunctionSamples *, BodySampleCoverageMap>;
270 
271   /// Coverage map for sampling records.
272   ///
273   /// This map keeps a record of sampling records that have been matched to
274   /// an IR instruction. This is used to detect some form of staleness in
275   /// profiles (see flag -sample-profile-check-coverage).
276   ///
277   /// Each entry in the map corresponds to a FunctionSamples instance.  This is
278   /// another map that counts how many times the sample record at the
279   /// given location has been used.
280   FunctionSamplesCoverageMap SampleCoverage;
281 
282   /// Number of samples used from the profile.
283   ///
284   /// When a sampling record is used for the first time, the samples from
285   /// that record are added to this accumulator.  Coverage is later computed
286   /// based on the total number of samples available in this function and
287   /// its callsites.
288   ///
289   /// Note that this accumulator tracks samples used from a single function
290   /// and all the inlined callsites. Strictly, we should have a map of counters
291   /// keyed by FunctionSamples pointers, but these stats are cleared after
292   /// every function, so we just need to keep a single counter.
293   uint64_t TotalUsedSamples = 0;
294 
295   // For symbol in profile symbol list, whether to regard their profiles
296   // to be accurate. This is passed from the SampleLoader instance.
297   bool ProfAccForSymsInList = false;
298 };
299 
300 class GUIDToFuncNameMapper {
301 public:
302   GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader,
303                         DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap)
304       : CurrentReader(Reader), CurrentModule(M),
305       CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) {
306     if (!CurrentReader.useMD5())
307       return;
308 
309     for (const auto &F : CurrentModule) {
310       StringRef OrigName = F.getName();
311       CurrentGUIDToFuncNameMap.insert(
312           {Function::getGUID(OrigName), OrigName});
313 
314       // Local to global var promotion used by optimization like thinlto
315       // will rename the var and add suffix like ".llvm.xxx" to the
316       // original local name. In sample profile, the suffixes of function
317       // names are all stripped. Since it is possible that the mapper is
318       // built in post-thin-link phase and var promotion has been done,
319       // we need to add the substring of function name without the suffix
320       // into the GUIDToFuncNameMap.
321       StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
322       if (CanonName != OrigName)
323         CurrentGUIDToFuncNameMap.insert(
324             {Function::getGUID(CanonName), CanonName});
325     }
326 
327     // Update GUIDToFuncNameMap for each function including inlinees.
328     SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap);
329   }
330 
331   ~GUIDToFuncNameMapper() {
332     if (!CurrentReader.useMD5())
333       return;
334 
335     CurrentGUIDToFuncNameMap.clear();
336 
337     // Reset GUIDToFuncNameMap for of each function as they're no
338     // longer valid at this point.
339     SetGUIDToFuncNameMapForAll(nullptr);
340   }
341 
342 private:
343   void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) {
344     std::queue<FunctionSamples *> FSToUpdate;
345     for (auto &IFS : CurrentReader.getProfiles()) {
346       FSToUpdate.push(&IFS.second);
347     }
348 
349     while (!FSToUpdate.empty()) {
350       FunctionSamples *FS = FSToUpdate.front();
351       FSToUpdate.pop();
352       FS->GUIDToFuncNameMap = Map;
353       for (const auto &ICS : FS->getCallsiteSamples()) {
354         const FunctionSamplesMap &FSMap = ICS.second;
355         for (auto &IFS : FSMap) {
356           FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second);
357           FSToUpdate.push(&FS);
358         }
359       }
360     }
361   }
362 
363   SampleProfileReader &CurrentReader;
364   Module &CurrentModule;
365   DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap;
366 };
367 
368 // Inline candidate used by iterative callsite prioritized inliner
369 struct InlineCandidate {
370   CallBase *CallInstr;
371   const FunctionSamples *CalleeSamples;
372   // Prorated callsite count, which will be used to guide inlining. For example,
373   // if a callsite is duplicated in LTO prelink, then in LTO postlink the two
374   // copies will get their own distribution factors and their prorated counts
375   // will be used to decide if they should be inlined independently.
376   uint64_t CallsiteCount;
377   // Call site distribution factor to prorate the profile samples for a
378   // duplicated callsite. Default value is 1.0.
379   float CallsiteDistribution;
380 };
381 
382 // Inline candidate comparer using call site weight
383 struct CandidateComparer {
384   bool operator()(const InlineCandidate &LHS, const InlineCandidate &RHS) {
385     if (LHS.CallsiteCount != RHS.CallsiteCount)
386       return LHS.CallsiteCount < RHS.CallsiteCount;
387 
388     // Tie breaker using GUID so we have stable/deterministic inlining order
389     assert(LHS.CalleeSamples && RHS.CalleeSamples &&
390            "Expect non-null FunctionSamples");
391     return LHS.CalleeSamples->getGUID(LHS.CalleeSamples->getName()) <
392            RHS.CalleeSamples->getGUID(RHS.CalleeSamples->getName());
393   }
394 };
395 
396 using CandidateQueue =
397     PriorityQueue<InlineCandidate, std::vector<InlineCandidate>,
398                   CandidateComparer>;
399 
400 class SampleProfileLoaderBaseImpl {
401 public:
402   SampleProfileLoaderBaseImpl(std::string Name) : Filename(Name) {}
403   void dump() { Reader->dump(); }
404 
405 protected:
406   friend class SampleCoverageTracker;
407 
408   ~SampleProfileLoaderBaseImpl() = default;
409 
410   unsigned getFunctionLoc(Function &F);
411   virtual ErrorOr<uint64_t> getInstWeight(const Instruction &Inst);
412   ErrorOr<uint64_t> getInstWeightImpl(const Instruction &Inst);
413   ErrorOr<uint64_t> getBlockWeight(const BasicBlock *BB);
414   mutable DenseMap<const DILocation *, const FunctionSamples *>
415       DILocation2SampleMap;
416   virtual const FunctionSamples *
417   findFunctionSamples(const Instruction &I) const;
418   void printEdgeWeight(raw_ostream &OS, Edge E);
419   void printBlockWeight(raw_ostream &OS, const BasicBlock *BB) const;
420   void printBlockEquivalence(raw_ostream &OS, const BasicBlock *BB);
421   bool computeBlockWeights(Function &F);
422   void findEquivalenceClasses(Function &F);
423   template <bool IsPostDom>
424   void findEquivalencesFor(BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants,
425                            DominatorTreeBase<BasicBlock, IsPostDom> *DomTree);
426 
427   void propagateWeights(Function &F);
428   uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge);
429   void buildEdges(Function &F);
430   bool propagateThroughEdges(Function &F, bool UpdateBlockCount);
431   void clearFunctionData();
432   void computeDominanceAndLoopInfo(Function &F);
433   bool
434   computeAndPropagateWeights(Function &F,
435                              const DenseSet<GlobalValue::GUID> &InlinedGUIDs);
436   void emitCoverageRemarks(Function &F);
437 
438   /// Map basic blocks to their computed weights.
439   ///
440   /// The weight of a basic block is defined to be the maximum
441   /// of all the instruction weights in that block.
442   BlockWeightMap BlockWeights;
443 
444   /// Map edges to their computed weights.
445   ///
446   /// Edge weights are computed by propagating basic block weights in
447   /// SampleProfile::propagateWeights.
448   EdgeWeightMap EdgeWeights;
449 
450   /// Set of visited blocks during propagation.
451   SmallPtrSet<const BasicBlock *, 32> VisitedBlocks;
452 
453   /// Set of visited edges during propagation.
454   SmallSet<Edge, 32> VisitedEdges;
455 
456   /// Equivalence classes for block weights.
457   ///
458   /// Two blocks BB1 and BB2 are in the same equivalence class if they
459   /// dominate and post-dominate each other, and they are in the same loop
460   /// nest. When this happens, the two blocks are guaranteed to execute
461   /// the same number of times.
462   EquivalenceClassMap EquivalenceClass;
463 
464   /// Dominance, post-dominance and loop information.
465   std::unique_ptr<DominatorTree> DT;
466   std::unique_ptr<PostDominatorTree> PDT;
467   std::unique_ptr<LoopInfo> LI;
468 
469   /// Predecessors for each basic block in the CFG.
470   BlockEdgeMap Predecessors;
471 
472   /// Successors for each basic block in the CFG.
473   BlockEdgeMap Successors;
474 
475   SampleCoverageTracker CoverageTracker;
476 
477   /// Profile reader object.
478   std::unique_ptr<SampleProfileReader> Reader;
479 
480   /// Samples collected for the body of this function.
481   FunctionSamples *Samples = nullptr;
482 
483   /// Name of the profile file to load.
484   std::string Filename;
485 
486   /// Profile Summary Info computed from sample profile.
487   ProfileSummaryInfo *PSI = nullptr;
488 
489   /// Optimization Remark Emitter used to emit diagnostic remarks.
490   OptimizationRemarkEmitter *ORE = nullptr;
491 };
492 
493 /// Sample profile pass.
494 ///
495 /// This pass reads profile data from the file specified by
496 /// -sample-profile-file and annotates every affected function with the
497 /// profile information found in that file.
498 class SampleProfileLoader final : public SampleProfileLoaderBaseImpl {
499 public:
500   SampleProfileLoader(
501       StringRef Name, StringRef RemapName, ThinOrFullLTOPhase LTOPhase,
502       std::function<AssumptionCache &(Function &)> GetAssumptionCache,
503       std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo,
504       std::function<const TargetLibraryInfo &(Function &)> GetTLI)
505       : SampleProfileLoaderBaseImpl(std::string(Name)),
506         GetAC(std::move(GetAssumptionCache)),
507         GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)),
508         RemappingFilename(std::string(RemapName)), LTOPhase(LTOPhase) {}
509 
510   bool doInitialization(Module &M, FunctionAnalysisManager *FAM = nullptr);
511   bool runOnModule(Module &M, ModuleAnalysisManager *AM,
512                    ProfileSummaryInfo *_PSI, CallGraph *CG);
513 
514 protected:
515   bool runOnFunction(Function &F, ModuleAnalysisManager *AM);
516   bool emitAnnotations(Function &F);
517   ErrorOr<uint64_t> getInstWeight(const Instruction &I) override;
518   ErrorOr<uint64_t> getProbeWeight(const Instruction &I);
519   const FunctionSamples *findCalleeFunctionSamples(const CallBase &I) const;
520   const FunctionSamples *
521   findFunctionSamples(const Instruction &I) const override;
522   std::vector<const FunctionSamples *>
523   findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const;
524   // Attempt to promote indirect call and also inline the promoted call
525   bool tryPromoteAndInlineCandidate(
526       Function &F, InlineCandidate &Candidate, uint64_t SumOrigin,
527       uint64_t &Sum, DenseSet<Instruction *> &PromotedInsns,
528       SmallVector<CallBase *, 8> *InlinedCallSites = nullptr);
529   bool inlineHotFunctions(Function &F,
530                           DenseSet<GlobalValue::GUID> &InlinedGUIDs);
531   InlineCost shouldInlineCandidate(InlineCandidate &Candidate);
532   bool getInlineCandidate(InlineCandidate *NewCandidate, CallBase *CB);
533   bool
534   tryInlineCandidate(InlineCandidate &Candidate,
535                      SmallVector<CallBase *, 8> *InlinedCallSites = nullptr);
536   bool
537   inlineHotFunctionsWithPriority(Function &F,
538                                  DenseSet<GlobalValue::GUID> &InlinedGUIDs);
539   // Inline cold/small functions in addition to hot ones
540   bool shouldInlineColdCallee(CallBase &CallInst);
541   void emitOptimizationRemarksForInlineCandidates(
542       const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
543       bool Hot);
544   std::vector<Function *> buildFunctionOrder(Module &M, CallGraph *CG);
545   void addCallGraphEdges(CallGraph &CG, const FunctionSamples &Samples);
546   void replaceCallGraphEdges(CallGraph &CG, StringMap<Function *> &SymbolMap);
547   void generateMDProfMetadata(Function &F);
548 
549   /// Map from function name to Function *. Used to find the function from
550   /// the function name. If the function name contains suffix, additional
551   /// entry is added to map from the stripped name to the function if there
552   /// is one-to-one mapping.
553   StringMap<Function *> SymbolMap;
554 
555   std::function<AssumptionCache &(Function &)> GetAC;
556   std::function<TargetTransformInfo &(Function &)> GetTTI;
557   std::function<const TargetLibraryInfo &(Function &)> GetTLI;
558 
559   /// Profile tracker for different context.
560   std::unique_ptr<SampleContextTracker> ContextTracker;
561 
562   /// Name of the profile remapping file to load.
563   std::string RemappingFilename;
564 
565   /// Flag indicating whether the profile input loaded successfully.
566   bool ProfileIsValid = false;
567 
568   /// Flag indicating whether input profile is context-sensitive
569   bool ProfileIsCS = false;
570 
571   /// Flag indicating which LTO/ThinLTO phase the pass is invoked in.
572   ///
573   /// We need to know the LTO phase because for example in ThinLTOPrelink
574   /// phase, in annotation, we should not promote indirect calls. Instead,
575   /// we will mark GUIDs that needs to be annotated to the function.
576   ThinOrFullLTOPhase LTOPhase;
577 
578   /// Profle Symbol list tells whether a function name appears in the binary
579   /// used to generate the current profile.
580   std::unique_ptr<ProfileSymbolList> PSL;
581 
582   /// Total number of samples collected in this profile.
583   ///
584   /// This is the sum of all the samples collected in all the functions executed
585   /// at runtime.
586   uint64_t TotalCollectedSamples = 0;
587 
588   // Information recorded when we declined to inline a call site
589   // because we have determined it is too cold is accumulated for
590   // each callee function. Initially this is just the entry count.
591   struct NotInlinedProfileInfo {
592     uint64_t entryCount;
593   };
594   DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo;
595 
596   // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for
597   // all the function symbols defined or declared in current module.
598   DenseMap<uint64_t, StringRef> GUIDToFuncNameMap;
599 
600   // All the Names used in FunctionSamples including outline function
601   // names, inline instance names and call target names.
602   StringSet<> NamesInProfile;
603 
604   // For symbol in profile symbol list, whether to regard their profiles
605   // to be accurate. It is mainly decided by existance of profile symbol
606   // list and -profile-accurate-for-symsinlist flag, but it can be
607   // overriden by -profile-sample-accurate or profile-sample-accurate
608   // attribute.
609   bool ProfAccForSymsInList;
610 
611   // External inline advisor used to replay inline decision from remarks.
612   std::unique_ptr<ReplayInlineAdvisor> ExternalInlineAdvisor;
613 
614   // A pseudo probe helper to correlate the imported sample counts.
615   std::unique_ptr<PseudoProbeManager> ProbeManager;
616 };
617 
618 class SampleProfileLoaderLegacyPass : public ModulePass {
619 public:
620   // Class identification, replacement for typeinfo
621   static char ID;
622 
623   SampleProfileLoaderLegacyPass(
624       StringRef Name = SampleProfileFile,
625       ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None)
626       : ModulePass(ID), SampleLoader(
627                             Name, SampleProfileRemappingFile, LTOPhase,
628                             [&](Function &F) -> AssumptionCache & {
629                               return ACT->getAssumptionCache(F);
630                             },
631                             [&](Function &F) -> TargetTransformInfo & {
632                               return TTIWP->getTTI(F);
633                             },
634                             [&](Function &F) -> TargetLibraryInfo & {
635                               return TLIWP->getTLI(F);
636                             }) {
637     initializeSampleProfileLoaderLegacyPassPass(
638         *PassRegistry::getPassRegistry());
639   }
640 
641   void dump() { SampleLoader.dump(); }
642 
643   bool doInitialization(Module &M) override {
644     return SampleLoader.doInitialization(M);
645   }
646 
647   StringRef getPassName() const override { return "Sample profile pass"; }
648   bool runOnModule(Module &M) override;
649 
650   void getAnalysisUsage(AnalysisUsage &AU) const override {
651     AU.addRequired<AssumptionCacheTracker>();
652     AU.addRequired<TargetTransformInfoWrapperPass>();
653     AU.addRequired<TargetLibraryInfoWrapperPass>();
654     AU.addRequired<ProfileSummaryInfoWrapperPass>();
655   }
656 
657 private:
658   SampleProfileLoader SampleLoader;
659   AssumptionCacheTracker *ACT = nullptr;
660   TargetTransformInfoWrapperPass *TTIWP = nullptr;
661   TargetLibraryInfoWrapperPass *TLIWP = nullptr;
662 };
663 
664 } // end anonymous namespace
665 
666 /// Return true if the given callsite is hot wrt to hot cutoff threshold.
667 ///
668 /// Functions that were inlined in the original binary will be represented
669 /// in the inline stack in the sample profile. If the profile shows that
670 /// the original inline decision was "good" (i.e., the callsite is executed
671 /// frequently), then we will recreate the inline decision and apply the
672 /// profile from the inlined callsite.
673 ///
674 /// To decide whether an inlined callsite is hot, we compare the callsite
675 /// sample count with the hot cutoff computed by ProfileSummaryInfo, it is
676 /// regarded as hot if the count is above the cutoff value.
677 ///
678 /// When ProfileAccurateForSymsInList is enabled and profile symbol list
679 /// is present, functions in the profile symbol list but without profile will
680 /// be regarded as cold and much less inlining will happen in CGSCC inlining
681 /// pass, so we tend to lower the hot criteria here to allow more early
682 /// inlining to happen for warm callsites and it is helpful for performance.
683 static bool callsiteIsHot(const FunctionSamples *CallsiteFS,
684                           ProfileSummaryInfo *PSI, bool ProfAccForSymsInList) {
685   if (!CallsiteFS)
686     return false; // The callsite was not inlined in the original binary.
687 
688   assert(PSI && "PSI is expected to be non null");
689   uint64_t CallsiteTotalSamples = CallsiteFS->getTotalSamples();
690   if (ProfAccForSymsInList)
691     return !PSI->isColdCount(CallsiteTotalSamples);
692   else
693     return PSI->isHotCount(CallsiteTotalSamples);
694 }
695 
696 /// Mark as used the sample record for the given function samples at
697 /// (LineOffset, Discriminator).
698 ///
699 /// \returns true if this is the first time we mark the given record.
700 bool SampleCoverageTracker::markSamplesUsed(const FunctionSamples *FS,
701                                             uint32_t LineOffset,
702                                             uint32_t Discriminator,
703                                             uint64_t Samples) {
704   LineLocation Loc(LineOffset, Discriminator);
705   unsigned &Count = SampleCoverage[FS][Loc];
706   bool FirstTime = (++Count == 1);
707   if (FirstTime)
708     TotalUsedSamples += Samples;
709   return FirstTime;
710 }
711 
712 /// Return the number of sample records that were applied from this profile.
713 ///
714 /// This count does not include records from cold inlined callsites.
715 unsigned
716 SampleCoverageTracker::countUsedRecords(const FunctionSamples *FS,
717                                         ProfileSummaryInfo *PSI) const {
718   auto I = SampleCoverage.find(FS);
719 
720   // The size of the coverage map for FS represents the number of records
721   // that were marked used at least once.
722   unsigned Count = (I != SampleCoverage.end()) ? I->second.size() : 0;
723 
724   // If there are inlined callsites in this function, count the samples found
725   // in the respective bodies. However, do not bother counting callees with 0
726   // total samples, these are callees that were never invoked at runtime.
727   for (const auto &I : FS->getCallsiteSamples())
728     for (const auto &J : I.second) {
729       const FunctionSamples *CalleeSamples = &J.second;
730       if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList))
731         Count += countUsedRecords(CalleeSamples, PSI);
732     }
733 
734   return Count;
735 }
736 
737 /// Return the number of sample records in the body of this profile.
738 ///
739 /// This count does not include records from cold inlined callsites.
740 unsigned
741 SampleCoverageTracker::countBodyRecords(const FunctionSamples *FS,
742                                         ProfileSummaryInfo *PSI) const {
743   unsigned Count = FS->getBodySamples().size();
744 
745   // Only count records in hot callsites.
746   for (const auto &I : FS->getCallsiteSamples())
747     for (const auto &J : I.second) {
748       const FunctionSamples *CalleeSamples = &J.second;
749       if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList))
750         Count += countBodyRecords(CalleeSamples, PSI);
751     }
752 
753   return Count;
754 }
755 
756 /// Return the number of samples collected in the body of this profile.
757 ///
758 /// This count does not include samples from cold inlined callsites.
759 uint64_t
760 SampleCoverageTracker::countBodySamples(const FunctionSamples *FS,
761                                         ProfileSummaryInfo *PSI) const {
762   uint64_t Total = 0;
763   for (const auto &I : FS->getBodySamples())
764     Total += I.second.getSamples();
765 
766   // Only count samples in hot callsites.
767   for (const auto &I : FS->getCallsiteSamples())
768     for (const auto &J : I.second) {
769       const FunctionSamples *CalleeSamples = &J.second;
770       if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList))
771         Total += countBodySamples(CalleeSamples, PSI);
772     }
773 
774   return Total;
775 }
776 
777 /// Return the fraction of sample records used in this profile.
778 ///
779 /// The returned value is an unsigned integer in the range 0-100 indicating
780 /// the percentage of sample records that were used while applying this
781 /// profile to the associated function.
782 unsigned SampleCoverageTracker::computeCoverage(unsigned Used,
783                                                 unsigned Total) const {
784   assert(Used <= Total &&
785          "number of used records cannot exceed the total number of records");
786   return Total > 0 ? Used * 100 / Total : 100;
787 }
788 
789 /// Clear all the per-function data used to load samples and propagate weights.
790 void SampleProfileLoaderBaseImpl::clearFunctionData() {
791   BlockWeights.clear();
792   EdgeWeights.clear();
793   VisitedBlocks.clear();
794   VisitedEdges.clear();
795   EquivalenceClass.clear();
796   DT = nullptr;
797   PDT = nullptr;
798   LI = nullptr;
799   Predecessors.clear();
800   Successors.clear();
801   CoverageTracker.clear();
802 }
803 
804 #ifndef NDEBUG
805 /// Print the weight of edge \p E on stream \p OS.
806 ///
807 /// \param OS  Stream to emit the output to.
808 /// \param E  Edge to print.
809 void SampleProfileLoaderBaseImpl::printEdgeWeight(raw_ostream &OS, Edge E) {
810   OS << "weight[" << E.first->getName() << "->" << E.second->getName()
811      << "]: " << EdgeWeights[E] << "\n";
812 }
813 
814 /// Print the equivalence class of block \p BB on stream \p OS.
815 ///
816 /// \param OS  Stream to emit the output to.
817 /// \param BB  Block to print.
818 void SampleProfileLoaderBaseImpl::printBlockEquivalence(raw_ostream &OS,
819                                                         const BasicBlock *BB) {
820   const BasicBlock *Equiv = EquivalenceClass[BB];
821   OS << "equivalence[" << BB->getName()
822      << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n";
823 }
824 
825 /// Print the weight of block \p BB on stream \p OS.
826 ///
827 /// \param OS  Stream to emit the output to.
828 /// \param BB  Block to print.
829 void SampleProfileLoaderBaseImpl::printBlockWeight(raw_ostream &OS,
830                                                    const BasicBlock *BB) const {
831   const auto &I = BlockWeights.find(BB);
832   uint64_t W = (I == BlockWeights.end() ? 0 : I->second);
833   OS << "weight[" << BB->getName() << "]: " << W << "\n";
834 }
835 #endif
836 
837 /// Get the weight for an instruction.
838 ///
839 /// The "weight" of an instruction \p Inst is the number of samples
840 /// collected on that instruction at runtime. To retrieve it, we
841 /// need to compute the line number of \p Inst relative to the start of its
842 /// function. We use HeaderLineno to compute the offset. We then
843 /// look up the samples collected for \p Inst using BodySamples.
844 ///
845 /// \param Inst Instruction to query.
846 ///
847 /// \returns the weight of \p Inst.
848 ErrorOr<uint64_t>
849 SampleProfileLoaderBaseImpl::getInstWeight(const Instruction &Inst) {
850   return getInstWeightImpl(Inst);
851 }
852 
853 ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) {
854   if (FunctionSamples::ProfileIsProbeBased)
855     return getProbeWeight(Inst);
856 
857   const DebugLoc &DLoc = Inst.getDebugLoc();
858   if (!DLoc)
859     return std::error_code();
860 
861   // Ignore all intrinsics, phinodes and branch instructions.
862   // Branch and phinodes instruction usually contains debug info from sources
863   // outside of the residing basic block, thus we ignore them during annotation.
864   if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst))
865     return std::error_code();
866 
867   // If a direct call/invoke instruction is inlined in profile
868   // (findCalleeFunctionSamples returns non-empty result), but not inlined here,
869   // it means that the inlined callsite has no sample, thus the call
870   // instruction should have 0 count.
871   if (!ProfileIsCS)
872     if (const auto *CB = dyn_cast<CallBase>(&Inst))
873       if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB))
874         return 0;
875 
876   return getInstWeightImpl(Inst);
877 }
878 
879 ErrorOr<uint64_t>
880 SampleProfileLoaderBaseImpl::getInstWeightImpl(const Instruction &Inst) {
881   const FunctionSamples *FS = findFunctionSamples(Inst);
882   if (!FS)
883     return std::error_code();
884 
885   const DebugLoc &DLoc = Inst.getDebugLoc();
886   if (!DLoc)
887     return std::error_code();
888 
889   const DILocation *DIL = DLoc;
890   uint32_t LineOffset = FunctionSamples::getOffset(DIL);
891   uint32_t Discriminator = DIL->getBaseDiscriminator();
892   ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator);
893   if (R) {
894     bool FirstMark =
895         CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get());
896     if (FirstMark) {
897       ORE->emit([&]() {
898         OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
899         Remark << "Applied " << ore::NV("NumSamples", *R);
900         Remark << " samples from profile (offset: ";
901         Remark << ore::NV("LineOffset", LineOffset);
902         if (Discriminator) {
903           Remark << ".";
904           Remark << ore::NV("Discriminator", Discriminator);
905         }
906         Remark << ")";
907         return Remark;
908       });
909     }
910     LLVM_DEBUG(dbgs() << "    " << DLoc.getLine() << "."
911                       << DIL->getBaseDiscriminator() << ":" << Inst
912                       << " (line offset: " << LineOffset << "."
913                       << DIL->getBaseDiscriminator() << " - weight: " << R.get()
914                       << ")\n");
915   }
916   return R;
917 }
918 
919 ErrorOr<uint64_t> SampleProfileLoader::getProbeWeight(const Instruction &Inst) {
920   assert(FunctionSamples::ProfileIsProbeBased &&
921          "Profile is not pseudo probe based");
922   Optional<PseudoProbe> Probe = extractProbe(Inst);
923   if (!Probe)
924     return std::error_code();
925 
926   const FunctionSamples *FS = findFunctionSamples(Inst);
927   if (!FS)
928     return std::error_code();
929 
930   // If a direct call/invoke instruction is inlined in profile
931   // (findCalleeFunctionSamples returns non-empty result), but not inlined here,
932   // it means that the inlined callsite has no sample, thus the call
933   // instruction should have 0 count.
934   if (const auto *CB = dyn_cast<CallBase>(&Inst))
935     if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB))
936       return 0;
937 
938   const ErrorOr<uint64_t> &R = FS->findSamplesAt(Probe->Id, 0);
939   if (R) {
940     uint64_t Samples = R.get() * Probe->Factor;
941     bool FirstMark = CoverageTracker.markSamplesUsed(FS, Probe->Id, 0, Samples);
942     if (FirstMark) {
943       ORE->emit([&]() {
944         OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
945         Remark << "Applied " << ore::NV("NumSamples", Samples);
946         Remark << " samples from profile (ProbeId=";
947         Remark << ore::NV("ProbeId", Probe->Id);
948         Remark << ", Factor=";
949         Remark << ore::NV("Factor", Probe->Factor);
950         Remark << ", OriginalSamples=";
951         Remark << ore::NV("OriginalSamples", R.get());
952         Remark << ")";
953         return Remark;
954       });
955     }
956     LLVM_DEBUG(dbgs() << "    " << Probe->Id << ":" << Inst
957                       << " - weight: " << R.get() << " - factor: "
958                       << format("%0.2f", Probe->Factor) << ")\n");
959     return Samples;
960   }
961   return R;
962 }
963 
964 /// Compute the weight of a basic block.
965 ///
966 /// The weight of basic block \p BB is the maximum weight of all the
967 /// instructions in BB.
968 ///
969 /// \param BB The basic block to query.
970 ///
971 /// \returns the weight for \p BB.
972 ErrorOr<uint64_t>
973 SampleProfileLoaderBaseImpl::getBlockWeight(const BasicBlock *BB) {
974   uint64_t Max = 0;
975   bool HasWeight = false;
976   for (auto &I : BB->getInstList()) {
977     const ErrorOr<uint64_t> &R = getInstWeight(I);
978     if (R) {
979       Max = std::max(Max, R.get());
980       HasWeight = true;
981     }
982   }
983   return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code();
984 }
985 
986 /// Compute and store the weights of every basic block.
987 ///
988 /// This populates the BlockWeights map by computing
989 /// the weights of every basic block in the CFG.
990 ///
991 /// \param F The function to query.
992 bool SampleProfileLoaderBaseImpl::computeBlockWeights(Function &F) {
993   bool Changed = false;
994   LLVM_DEBUG(dbgs() << "Block weights\n");
995   for (const auto &BB : F) {
996     ErrorOr<uint64_t> Weight = getBlockWeight(&BB);
997     if (Weight) {
998       BlockWeights[&BB] = Weight.get();
999       VisitedBlocks.insert(&BB);
1000       Changed = true;
1001     }
1002     LLVM_DEBUG(printBlockWeight(dbgs(), &BB));
1003   }
1004 
1005   return Changed;
1006 }
1007 
1008 /// Get the FunctionSamples for a call instruction.
1009 ///
1010 /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined
1011 /// instance in which that call instruction is calling to. It contains
1012 /// all samples that resides in the inlined instance. We first find the
1013 /// inlined instance in which the call instruction is from, then we
1014 /// traverse its children to find the callsite with the matching
1015 /// location.
1016 ///
1017 /// \param Inst Call/Invoke instruction to query.
1018 ///
1019 /// \returns The FunctionSamples pointer to the inlined instance.
1020 const FunctionSamples *
1021 SampleProfileLoader::findCalleeFunctionSamples(const CallBase &Inst) const {
1022   const DILocation *DIL = Inst.getDebugLoc();
1023   if (!DIL) {
1024     return nullptr;
1025   }
1026 
1027   StringRef CalleeName;
1028   if (Function *Callee = Inst.getCalledFunction())
1029     CalleeName = FunctionSamples::getCanonicalFnName(*Callee);
1030 
1031   if (ProfileIsCS)
1032     return ContextTracker->getCalleeContextSamplesFor(Inst, CalleeName);
1033 
1034   const FunctionSamples *FS = findFunctionSamples(Inst);
1035   if (FS == nullptr)
1036     return nullptr;
1037 
1038   return FS->findFunctionSamplesAt(FunctionSamples::getCallSiteIdentifier(DIL),
1039                                    CalleeName, Reader->getRemapper());
1040 }
1041 
1042 /// Returns a vector of FunctionSamples that are the indirect call targets
1043 /// of \p Inst. The vector is sorted by the total number of samples. Stores
1044 /// the total call count of the indirect call in \p Sum.
1045 std::vector<const FunctionSamples *>
1046 SampleProfileLoader::findIndirectCallFunctionSamples(
1047     const Instruction &Inst, uint64_t &Sum) const {
1048   const DILocation *DIL = Inst.getDebugLoc();
1049   std::vector<const FunctionSamples *> R;
1050 
1051   if (!DIL) {
1052     return R;
1053   }
1054 
1055   auto FSCompare = [](const FunctionSamples *L, const FunctionSamples *R) {
1056     assert(L && R && "Expect non-null FunctionSamples");
1057     if (L->getEntrySamples() != R->getEntrySamples())
1058       return L->getEntrySamples() > R->getEntrySamples();
1059     return FunctionSamples::getGUID(L->getName()) <
1060            FunctionSamples::getGUID(R->getName());
1061   };
1062 
1063   if (ProfileIsCS) {
1064     auto CalleeSamples =
1065         ContextTracker->getIndirectCalleeContextSamplesFor(DIL);
1066     if (CalleeSamples.empty())
1067       return R;
1068 
1069     // For CSSPGO, we only use target context profile's entry count
1070     // as that already includes both inlined callee and non-inlined ones..
1071     Sum = 0;
1072     for (const auto *const FS : CalleeSamples) {
1073       Sum += FS->getEntrySamples();
1074       R.push_back(FS);
1075     }
1076     llvm::sort(R, FSCompare);
1077     return R;
1078   }
1079 
1080   const FunctionSamples *FS = findFunctionSamples(Inst);
1081   if (FS == nullptr)
1082     return R;
1083 
1084   auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL);
1085   auto T = FS->findCallTargetMapAt(CallSite);
1086   Sum = 0;
1087   if (T)
1088     for (const auto &T_C : T.get())
1089       Sum += T_C.second;
1090   if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(CallSite)) {
1091     if (M->empty())
1092       return R;
1093     for (const auto &NameFS : *M) {
1094       Sum += NameFS.second.getEntrySamples();
1095       R.push_back(&NameFS.second);
1096     }
1097     llvm::sort(R, FSCompare);
1098   }
1099   return R;
1100 }
1101 
1102 /// Get the FunctionSamples for an instruction.
1103 ///
1104 /// The FunctionSamples of an instruction \p Inst is the inlined instance
1105 /// in which that instruction is coming from. We traverse the inline stack
1106 /// of that instruction, and match it with the tree nodes in the profile.
1107 ///
1108 /// \param Inst Instruction to query.
1109 ///
1110 /// \returns the FunctionSamples pointer to the inlined instance.
1111 const FunctionSamples *SampleProfileLoaderBaseImpl::findFunctionSamples(
1112     const Instruction &Inst) const {
1113   const DILocation *DIL = Inst.getDebugLoc();
1114   if (!DIL)
1115     return Samples;
1116 
1117   auto it = DILocation2SampleMap.try_emplace(DIL, nullptr);
1118   if (it.second) {
1119     it.first->second = Samples->findFunctionSamples(DIL, Reader->getRemapper());
1120   }
1121   return it.first->second;
1122 }
1123 
1124 const FunctionSamples *
1125 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const {
1126   if (FunctionSamples::ProfileIsProbeBased) {
1127     Optional<PseudoProbe> Probe = extractProbe(Inst);
1128     if (!Probe)
1129       return nullptr;
1130   }
1131 
1132   const DILocation *DIL = Inst.getDebugLoc();
1133   if (!DIL)
1134     return Samples;
1135 
1136   auto it = DILocation2SampleMap.try_emplace(DIL,nullptr);
1137   if (it.second) {
1138     if (ProfileIsCS)
1139       it.first->second = ContextTracker->getContextSamplesFor(DIL);
1140     else
1141       it.first->second =
1142           Samples->findFunctionSamples(DIL, Reader->getRemapper());
1143   }
1144   return it.first->second;
1145 }
1146 
1147 /// Attempt to promote indirect call and also inline the promoted call.
1148 ///
1149 /// \param F  Caller function.
1150 /// \param Candidate  ICP and inline candidate.
1151 /// \param Sum  Sum of target counts for indirect call.
1152 /// \param PromotedInsns  Map to keep track of indirect call already processed.
1153 /// \param InlinedCallSite  Output vector for new call sites exposed after
1154 /// inlining.
1155 bool SampleProfileLoader::tryPromoteAndInlineCandidate(
1156     Function &F, InlineCandidate &Candidate, uint64_t SumOrigin, uint64_t &Sum,
1157     DenseSet<Instruction *> &PromotedInsns,
1158     SmallVector<CallBase *, 8> *InlinedCallSite) {
1159   const char *Reason = "Callee function not available";
1160   // R->getValue() != &F is to prevent promoting a recursive call.
1161   // If it is a recursive call, we do not inline it as it could bloat
1162   // the code exponentially. There is way to better handle this, e.g.
1163   // clone the caller first, and inline the cloned caller if it is
1164   // recursive. As llvm does not inline recursive calls, we will
1165   // simply ignore it instead of handling it explicitly.
1166   auto R = SymbolMap.find(Candidate.CalleeSamples->getFuncName());
1167   if (R != SymbolMap.end() && R->getValue() &&
1168       !R->getValue()->isDeclaration() && R->getValue()->getSubprogram() &&
1169       R->getValue()->hasFnAttribute("use-sample-profile") &&
1170       R->getValue() != &F &&
1171       isLegalToPromote(*Candidate.CallInstr, R->getValue(), &Reason)) {
1172     auto *DI =
1173         &pgo::promoteIndirectCall(*Candidate.CallInstr, R->getValue(),
1174                                   Candidate.CallsiteCount, Sum, false, ORE);
1175     if (DI) {
1176       Sum -= Candidate.CallsiteCount;
1177       // Prorate the indirect callsite distribution.
1178       // Do not update the promoted direct callsite distribution at this
1179       // point since the original distribution combined with the callee
1180       // profile will be used to prorate callsites from the callee if
1181       // inlined. Once not inlined, the direct callsite distribution should
1182       // be prorated so that the it will reflect the real callsite counts.
1183       setProbeDistributionFactor(*Candidate.CallInstr,
1184                                  Candidate.CallsiteDistribution * Sum /
1185                                      SumOrigin);
1186       PromotedInsns.insert(Candidate.CallInstr);
1187       Candidate.CallInstr = DI;
1188       if (isa<CallInst>(DI) || isa<InvokeInst>(DI)) {
1189         bool Inlined = tryInlineCandidate(Candidate, InlinedCallSite);
1190         if (!Inlined) {
1191           // Prorate the direct callsite distribution so that it reflects real
1192           // callsite counts.
1193           setProbeDistributionFactor(*DI, Candidate.CallsiteDistribution *
1194                                               Candidate.CallsiteCount /
1195                                               SumOrigin);
1196         }
1197         return Inlined;
1198       }
1199     }
1200   } else {
1201     LLVM_DEBUG(dbgs() << "\nFailed to promote indirect call to "
1202                       << Candidate.CalleeSamples->getFuncName() << " because "
1203                       << Reason << "\n");
1204   }
1205   return false;
1206 }
1207 
1208 bool SampleProfileLoader::shouldInlineColdCallee(CallBase &CallInst) {
1209   if (!ProfileSizeInline)
1210     return false;
1211 
1212   Function *Callee = CallInst.getCalledFunction();
1213   if (Callee == nullptr)
1214     return false;
1215 
1216   InlineCost Cost = getInlineCost(CallInst, getInlineParams(), GetTTI(*Callee),
1217                                   GetAC, GetTLI);
1218 
1219   if (Cost.isNever())
1220     return false;
1221 
1222   if (Cost.isAlways())
1223     return true;
1224 
1225   return Cost.getCost() <= SampleColdCallSiteThreshold;
1226 }
1227 
1228 void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates(
1229     const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
1230     bool Hot) {
1231   for (auto I : Candidates) {
1232     Function *CalledFunction = I->getCalledFunction();
1233     if (CalledFunction) {
1234       ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineAttempt",
1235                                            I->getDebugLoc(), I->getParent())
1236                 << "previous inlining reattempted for "
1237                 << (Hot ? "hotness: '" : "size: '")
1238                 << ore::NV("Callee", CalledFunction) << "' into '"
1239                 << ore::NV("Caller", &F) << "'");
1240     }
1241   }
1242 }
1243 
1244 /// Iteratively inline hot callsites of a function.
1245 ///
1246 /// Iteratively traverse all callsites of the function \p F, and find if
1247 /// the corresponding inlined instance exists and is hot in profile. If
1248 /// it is hot enough, inline the callsites and adds new callsites of the
1249 /// callee into the caller. If the call is an indirect call, first promote
1250 /// it to direct call. Each indirect call is limited with a single target.
1251 ///
1252 /// \param F function to perform iterative inlining.
1253 /// \param InlinedGUIDs a set to be updated to include all GUIDs that are
1254 ///     inlined in the profiled binary.
1255 ///
1256 /// \returns True if there is any inline happened.
1257 bool SampleProfileLoader::inlineHotFunctions(
1258     Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
1259   DenseSet<Instruction *> PromotedInsns;
1260 
1261   // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
1262   // Profile symbol list is ignored when profile-sample-accurate is on.
1263   assert((!ProfAccForSymsInList ||
1264           (!ProfileSampleAccurate &&
1265            !F.hasFnAttribute("profile-sample-accurate"))) &&
1266          "ProfAccForSymsInList should be false when profile-sample-accurate "
1267          "is enabled");
1268 
1269   DenseMap<CallBase *, const FunctionSamples *> LocalNotInlinedCallSites;
1270   bool Changed = false;
1271   bool LocalChanged = true;
1272   while (LocalChanged) {
1273     LocalChanged = false;
1274     SmallVector<CallBase *, 10> CIS;
1275     for (auto &BB : F) {
1276       bool Hot = false;
1277       SmallVector<CallBase *, 10> AllCandidates;
1278       SmallVector<CallBase *, 10> ColdCandidates;
1279       for (auto &I : BB.getInstList()) {
1280         const FunctionSamples *FS = nullptr;
1281         if (auto *CB = dyn_cast<CallBase>(&I)) {
1282           if (!isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(*CB))) {
1283             assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) &&
1284                    "GUIDToFuncNameMap has to be populated");
1285             AllCandidates.push_back(CB);
1286             if (FS->getEntrySamples() > 0 || ProfileIsCS)
1287               LocalNotInlinedCallSites.try_emplace(CB, FS);
1288             if (callsiteIsHot(FS, PSI, ProfAccForSymsInList))
1289               Hot = true;
1290             else if (shouldInlineColdCallee(*CB))
1291               ColdCandidates.push_back(CB);
1292           }
1293         }
1294       }
1295       if (Hot || ExternalInlineAdvisor) {
1296         CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end());
1297         emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true);
1298       } else {
1299         CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end());
1300         emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false);
1301       }
1302     }
1303     for (CallBase *I : CIS) {
1304       Function *CalledFunction = I->getCalledFunction();
1305       InlineCandidate Candidate = {
1306           I,
1307           LocalNotInlinedCallSites.count(I) ? LocalNotInlinedCallSites[I]
1308                                             : nullptr,
1309           0 /* dummy count */, 1.0 /* dummy distribution factor */};
1310       // Do not inline recursive calls.
1311       if (CalledFunction == &F)
1312         continue;
1313       if (I->isIndirectCall()) {
1314         if (PromotedInsns.count(I))
1315           continue;
1316         uint64_t Sum;
1317         for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) {
1318           uint64_t SumOrigin = Sum;
1319           if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1320             FS->findInlinedFunctions(InlinedGUIDs, F.getParent(),
1321                                      PSI->getOrCompHotCountThreshold());
1322             continue;
1323           }
1324           if (!callsiteIsHot(FS, PSI, ProfAccForSymsInList))
1325             continue;
1326 
1327           Candidate = {I, FS, FS->getEntrySamples(), 1.0};
1328           if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum,
1329                                            PromotedInsns)) {
1330             LocalNotInlinedCallSites.erase(I);
1331             LocalChanged = true;
1332           }
1333         }
1334       } else if (CalledFunction && CalledFunction->getSubprogram() &&
1335                  !CalledFunction->isDeclaration()) {
1336         if (tryInlineCandidate(Candidate)) {
1337           LocalNotInlinedCallSites.erase(I);
1338           LocalChanged = true;
1339         }
1340       } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1341         findCalleeFunctionSamples(*I)->findInlinedFunctions(
1342             InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold());
1343       }
1344     }
1345     Changed |= LocalChanged;
1346   }
1347 
1348   // For CS profile, profile for not inlined context will be merged when
1349   // base profile is being trieved
1350   if (ProfileIsCS)
1351     return Changed;
1352 
1353   // Accumulate not inlined callsite information into notInlinedSamples
1354   for (const auto &Pair : LocalNotInlinedCallSites) {
1355     CallBase *I = Pair.getFirst();
1356     Function *Callee = I->getCalledFunction();
1357     if (!Callee || Callee->isDeclaration())
1358       continue;
1359 
1360     ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "NotInline",
1361                                          I->getDebugLoc(), I->getParent())
1362               << "previous inlining not repeated: '"
1363               << ore::NV("Callee", Callee) << "' into '"
1364               << ore::NV("Caller", &F) << "'");
1365 
1366     ++NumCSNotInlined;
1367     const FunctionSamples *FS = Pair.getSecond();
1368     if (FS->getTotalSamples() == 0 && FS->getEntrySamples() == 0) {
1369       continue;
1370     }
1371 
1372     if (ProfileMergeInlinee) {
1373       // A function call can be replicated by optimizations like callsite
1374       // splitting or jump threading and the replicates end up sharing the
1375       // sample nested callee profile instead of slicing the original inlinee's
1376       // profile. We want to do merge exactly once by filtering out callee
1377       // profiles with a non-zero head sample count.
1378       if (FS->getHeadSamples() == 0) {
1379         // Use entry samples as head samples during the merge, as inlinees
1380         // don't have head samples.
1381         const_cast<FunctionSamples *>(FS)->addHeadSamples(
1382             FS->getEntrySamples());
1383 
1384         // Note that we have to do the merge right after processing function.
1385         // This allows OutlineFS's profile to be used for annotation during
1386         // top-down processing of functions' annotation.
1387         FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee);
1388         OutlineFS->merge(*FS);
1389       }
1390     } else {
1391       auto pair =
1392           notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0});
1393       pair.first->second.entryCount += FS->getEntrySamples();
1394     }
1395   }
1396   return Changed;
1397 }
1398 
1399 bool SampleProfileLoader::tryInlineCandidate(
1400     InlineCandidate &Candidate, SmallVector<CallBase *, 8> *InlinedCallSites) {
1401 
1402   CallBase &CB = *Candidate.CallInstr;
1403   Function *CalledFunction = CB.getCalledFunction();
1404   assert(CalledFunction && "Expect a callee with definition");
1405   DebugLoc DLoc = CB.getDebugLoc();
1406   BasicBlock *BB = CB.getParent();
1407 
1408   InlineCost Cost = shouldInlineCandidate(Candidate);
1409   if (Cost.isNever()) {
1410     ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineFail", DLoc, BB)
1411               << "incompatible inlining");
1412     return false;
1413   }
1414 
1415   if (!Cost)
1416     return false;
1417 
1418   InlineFunctionInfo IFI(nullptr, GetAC);
1419   if (InlineFunction(CB, IFI).isSuccess()) {
1420     // The call to InlineFunction erases I, so we can't pass it here.
1421     emitInlinedInto(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(), Cost,
1422                     true, CSINLINE_DEBUG);
1423 
1424     // Now populate the list of newly exposed call sites.
1425     if (InlinedCallSites) {
1426       InlinedCallSites->clear();
1427       for (auto &I : IFI.InlinedCallSites)
1428         InlinedCallSites->push_back(I);
1429     }
1430 
1431     if (ProfileIsCS)
1432       ContextTracker->markContextSamplesInlined(Candidate.CalleeSamples);
1433     ++NumCSInlined;
1434 
1435     // Prorate inlined probes for a duplicated inlining callsite which probably
1436     // has a distribution less than 100%. Samples for an inlinee should be
1437     // distributed among the copies of the original callsite based on each
1438     // callsite's distribution factor for counts accuracy. Note that an inlined
1439     // probe may come with its own distribution factor if it has been duplicated
1440     // in the inlinee body. The two factor are multiplied to reflect the
1441     // aggregation of duplication.
1442     if (Candidate.CallsiteDistribution < 1) {
1443       for (auto &I : IFI.InlinedCallSites) {
1444         if (Optional<PseudoProbe> Probe = extractProbe(*I))
1445           setProbeDistributionFactor(*I, Probe->Factor *
1446                                              Candidate.CallsiteDistribution);
1447       }
1448       NumDuplicatedInlinesite++;
1449     }
1450 
1451     return true;
1452   }
1453   return false;
1454 }
1455 
1456 bool SampleProfileLoader::getInlineCandidate(InlineCandidate *NewCandidate,
1457                                              CallBase *CB) {
1458   assert(CB && "Expect non-null call instruction");
1459 
1460   if (isa<IntrinsicInst>(CB))
1461     return false;
1462 
1463   // Find the callee's profile. For indirect call, find hottest target profile.
1464   const FunctionSamples *CalleeSamples = findCalleeFunctionSamples(*CB);
1465   if (!CalleeSamples)
1466     return false;
1467 
1468   float Factor = 1.0;
1469   if (Optional<PseudoProbe> Probe = extractProbe(*CB))
1470     Factor = Probe->Factor;
1471 
1472   uint64_t CallsiteCount = 0;
1473   ErrorOr<uint64_t> Weight = getBlockWeight(CB->getParent());
1474   if (Weight)
1475     CallsiteCount = Weight.get();
1476   if (CalleeSamples)
1477     CallsiteCount = std::max(
1478         CallsiteCount, uint64_t(CalleeSamples->getEntrySamples() * Factor));
1479 
1480   *NewCandidate = {CB, CalleeSamples, CallsiteCount, Factor};
1481   return true;
1482 }
1483 
1484 InlineCost
1485 SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) {
1486   std::unique_ptr<InlineAdvice> Advice = nullptr;
1487   if (ExternalInlineAdvisor) {
1488     Advice = ExternalInlineAdvisor->getAdvice(*Candidate.CallInstr);
1489     if (!Advice->isInliningRecommended()) {
1490       Advice->recordUnattemptedInlining();
1491       return InlineCost::getNever("not previously inlined");
1492     }
1493     Advice->recordInlining();
1494     return InlineCost::getAlways("previously inlined");
1495   }
1496 
1497   // Adjust threshold based on call site hotness, only do this for callsite
1498   // prioritized inliner because otherwise cost-benefit check is done earlier.
1499   int SampleThreshold = SampleColdCallSiteThreshold;
1500   if (CallsitePrioritizedInline) {
1501     if (Candidate.CallsiteCount > PSI->getHotCountThreshold())
1502       SampleThreshold = SampleHotCallSiteThreshold;
1503     else if (!ProfileSizeInline)
1504       return InlineCost::getNever("cold callsite");
1505   }
1506 
1507   Function *Callee = Candidate.CallInstr->getCalledFunction();
1508   assert(Callee && "Expect a definition for inline candidate of direct call");
1509 
1510   InlineParams Params = getInlineParams();
1511   Params.ComputeFullInlineCost = true;
1512   // Checks if there is anything in the reachable portion of the callee at
1513   // this callsite that makes this inlining potentially illegal. Need to
1514   // set ComputeFullInlineCost, otherwise getInlineCost may return early
1515   // when cost exceeds threshold without checking all IRs in the callee.
1516   // The acutal cost does not matter because we only checks isNever() to
1517   // see if it is legal to inline the callsite.
1518   InlineCost Cost = getInlineCost(*Candidate.CallInstr, Callee, Params,
1519                                   GetTTI(*Callee), GetAC, GetTLI);
1520 
1521   // Honor always inline and never inline from call analyzer
1522   if (Cost.isNever() || Cost.isAlways())
1523     return Cost;
1524 
1525   // For old FDO inliner, we inline the call site as long as cost is not
1526   // "Never". The cost-benefit check is done earlier.
1527   if (!CallsitePrioritizedInline) {
1528     return InlineCost::get(Cost.getCost(), INT_MAX);
1529   }
1530 
1531   // Otherwise only use the cost from call analyzer, but overwite threshold with
1532   // Sample PGO threshold.
1533   return InlineCost::get(Cost.getCost(), SampleThreshold);
1534 }
1535 
1536 bool SampleProfileLoader::inlineHotFunctionsWithPriority(
1537     Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
1538   DenseSet<Instruction *> PromotedInsns;
1539   assert(ProfileIsCS && "Prioritiy based inliner only works with CSSPGO now");
1540 
1541   // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
1542   // Profile symbol list is ignored when profile-sample-accurate is on.
1543   assert((!ProfAccForSymsInList ||
1544           (!ProfileSampleAccurate &&
1545            !F.hasFnAttribute("profile-sample-accurate"))) &&
1546          "ProfAccForSymsInList should be false when profile-sample-accurate "
1547          "is enabled");
1548 
1549   // Populating worklist with initial call sites from root inliner, along
1550   // with call site weights.
1551   CandidateQueue CQueue;
1552   InlineCandidate NewCandidate;
1553   for (auto &BB : F) {
1554     for (auto &I : BB.getInstList()) {
1555       auto *CB = dyn_cast<CallBase>(&I);
1556       if (!CB)
1557         continue;
1558       if (getInlineCandidate(&NewCandidate, CB))
1559         CQueue.push(NewCandidate);
1560     }
1561   }
1562 
1563   // Cap the size growth from profile guided inlining. This is needed even
1564   // though cost of each inline candidate already accounts for callee size,
1565   // because with top-down inlining, we can grow inliner size significantly
1566   // with large number of smaller inlinees each pass the cost check.
1567   assert(ProfileInlineLimitMax >= ProfileInlineLimitMin &&
1568          "Max inline size limit should not be smaller than min inline size "
1569          "limit.");
1570   unsigned SizeLimit = F.getInstructionCount() * ProfileInlineGrowthLimit;
1571   SizeLimit = std::min(SizeLimit, (unsigned)ProfileInlineLimitMax);
1572   SizeLimit = std::max(SizeLimit, (unsigned)ProfileInlineLimitMin);
1573   if (ExternalInlineAdvisor)
1574     SizeLimit = std::numeric_limits<unsigned>::max();
1575 
1576   // Perform iterative BFS call site prioritized inlining
1577   bool Changed = false;
1578   while (!CQueue.empty() && F.getInstructionCount() < SizeLimit) {
1579     InlineCandidate Candidate = CQueue.top();
1580     CQueue.pop();
1581     CallBase *I = Candidate.CallInstr;
1582     Function *CalledFunction = I->getCalledFunction();
1583 
1584     if (CalledFunction == &F)
1585       continue;
1586     if (I->isIndirectCall()) {
1587       if (PromotedInsns.count(I))
1588         continue;
1589       uint64_t Sum;
1590       auto CalleeSamples = findIndirectCallFunctionSamples(*I, Sum);
1591       uint64_t SumOrigin = Sum;
1592       Sum *= Candidate.CallsiteDistribution;
1593       for (const auto *FS : CalleeSamples) {
1594         // TODO: Consider disable pre-lTO ICP for MonoLTO as well
1595         if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1596           FS->findInlinedFunctions(InlinedGUIDs, F.getParent(),
1597                                    PSI->getOrCompHotCountThreshold());
1598           continue;
1599         }
1600         uint64_t EntryCountDistributed =
1601             FS->getEntrySamples() * Candidate.CallsiteDistribution;
1602         // In addition to regular inline cost check, we also need to make sure
1603         // ICP isn't introducing excessive speculative checks even if individual
1604         // target looks beneficial to promote and inline. That means we should
1605         // only do ICP when there's a small number dominant targets.
1606         if (EntryCountDistributed < SumOrigin / ProfileICPThreshold)
1607           break;
1608         // TODO: Fix CallAnalyzer to handle all indirect calls.
1609         // For indirect call, we don't run CallAnalyzer to get InlineCost
1610         // before actual inlining. This is because we could see two different
1611         // types from the same definition, which makes CallAnalyzer choke as
1612         // it's expecting matching parameter type on both caller and callee
1613         // side. See example from PR18962 for the triggering cases (the bug was
1614         // fixed, but we generate different types).
1615         if (!PSI->isHotCount(EntryCountDistributed))
1616           break;
1617         SmallVector<CallBase *, 8> InlinedCallSites;
1618         // Attach function profile for promoted indirect callee, and update
1619         // call site count for the promoted inline candidate too.
1620         Candidate = {I, FS, EntryCountDistributed,
1621                      Candidate.CallsiteDistribution};
1622         if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum,
1623                                          PromotedInsns, &InlinedCallSites)) {
1624           for (auto *CB : InlinedCallSites) {
1625             if (getInlineCandidate(&NewCandidate, CB))
1626               CQueue.emplace(NewCandidate);
1627           }
1628           Changed = true;
1629         }
1630       }
1631     } else if (CalledFunction && CalledFunction->getSubprogram() &&
1632                !CalledFunction->isDeclaration()) {
1633       SmallVector<CallBase *, 8> InlinedCallSites;
1634       if (tryInlineCandidate(Candidate, &InlinedCallSites)) {
1635         for (auto *CB : InlinedCallSites) {
1636           if (getInlineCandidate(&NewCandidate, CB))
1637             CQueue.emplace(NewCandidate);
1638         }
1639         Changed = true;
1640       }
1641     } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1642       findCalleeFunctionSamples(*I)->findInlinedFunctions(
1643           InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold());
1644     }
1645   }
1646 
1647   if (!CQueue.empty()) {
1648     if (SizeLimit == (unsigned)ProfileInlineLimitMax)
1649       ++NumCSInlinedHitMaxLimit;
1650     else if (SizeLimit == (unsigned)ProfileInlineLimitMin)
1651       ++NumCSInlinedHitMinLimit;
1652     else
1653       ++NumCSInlinedHitGrowthLimit;
1654   }
1655 
1656   return Changed;
1657 }
1658 
1659 /// Find equivalence classes for the given block.
1660 ///
1661 /// This finds all the blocks that are guaranteed to execute the same
1662 /// number of times as \p BB1. To do this, it traverses all the
1663 /// descendants of \p BB1 in the dominator or post-dominator tree.
1664 ///
1665 /// A block BB2 will be in the same equivalence class as \p BB1 if
1666 /// the following holds:
1667 ///
1668 /// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2
1669 ///    is a descendant of \p BB1 in the dominator tree, then BB2 should
1670 ///    dominate BB1 in the post-dominator tree.
1671 ///
1672 /// 2- Both BB2 and \p BB1 must be in the same loop.
1673 ///
1674 /// For every block BB2 that meets those two requirements, we set BB2's
1675 /// equivalence class to \p BB1.
1676 ///
1677 /// \param BB1  Block to check.
1678 /// \param Descendants  Descendants of \p BB1 in either the dom or pdom tree.
1679 /// \param DomTree  Opposite dominator tree. If \p Descendants is filled
1680 ///                 with blocks from \p BB1's dominator tree, then
1681 ///                 this is the post-dominator tree, and vice versa.
1682 template <bool IsPostDom>
1683 void SampleProfileLoaderBaseImpl::findEquivalencesFor(
1684     BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants,
1685     DominatorTreeBase<BasicBlock, IsPostDom> *DomTree) {
1686   const BasicBlock *EC = EquivalenceClass[BB1];
1687   uint64_t Weight = BlockWeights[EC];
1688   for (const auto *BB2 : Descendants) {
1689     bool IsDomParent = DomTree->dominates(BB2, BB1);
1690     bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2);
1691     if (BB1 != BB2 && IsDomParent && IsInSameLoop) {
1692       EquivalenceClass[BB2] = EC;
1693       // If BB2 is visited, then the entire EC should be marked as visited.
1694       if (VisitedBlocks.count(BB2)) {
1695         VisitedBlocks.insert(EC);
1696       }
1697 
1698       // If BB2 is heavier than BB1, make BB2 have the same weight
1699       // as BB1.
1700       //
1701       // Note that we don't worry about the opposite situation here
1702       // (when BB2 is lighter than BB1). We will deal with this
1703       // during the propagation phase. Right now, we just want to
1704       // make sure that BB1 has the largest weight of all the
1705       // members of its equivalence set.
1706       Weight = std::max(Weight, BlockWeights[BB2]);
1707     }
1708   }
1709   if (EC == &EC->getParent()->getEntryBlock()) {
1710     BlockWeights[EC] = Samples->getHeadSamples() + 1;
1711   } else {
1712     BlockWeights[EC] = Weight;
1713   }
1714 }
1715 
1716 /// Find equivalence classes.
1717 ///
1718 /// Since samples may be missing from blocks, we can fill in the gaps by setting
1719 /// the weights of all the blocks in the same equivalence class to the same
1720 /// weight. To compute the concept of equivalence, we use dominance and loop
1721 /// information. Two blocks B1 and B2 are in the same equivalence class if B1
1722 /// dominates B2, B2 post-dominates B1 and both are in the same loop.
1723 ///
1724 /// \param F The function to query.
1725 void SampleProfileLoaderBaseImpl::findEquivalenceClasses(Function &F) {
1726   SmallVector<BasicBlock *, 8> DominatedBBs;
1727   LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n");
1728   // Find equivalence sets based on dominance and post-dominance information.
1729   for (auto &BB : F) {
1730     BasicBlock *BB1 = &BB;
1731 
1732     // Compute BB1's equivalence class once.
1733     if (EquivalenceClass.count(BB1)) {
1734       LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
1735       continue;
1736     }
1737 
1738     // By default, blocks are in their own equivalence class.
1739     EquivalenceClass[BB1] = BB1;
1740 
1741     // Traverse all the blocks dominated by BB1. We are looking for
1742     // every basic block BB2 such that:
1743     //
1744     // 1- BB1 dominates BB2.
1745     // 2- BB2 post-dominates BB1.
1746     // 3- BB1 and BB2 are in the same loop nest.
1747     //
1748     // If all those conditions hold, it means that BB2 is executed
1749     // as many times as BB1, so they are placed in the same equivalence
1750     // class by making BB2's equivalence class be BB1.
1751     DominatedBBs.clear();
1752     DT->getDescendants(BB1, DominatedBBs);
1753     findEquivalencesFor(BB1, DominatedBBs, PDT.get());
1754 
1755     LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1));
1756   }
1757 
1758   // Assign weights to equivalence classes.
1759   //
1760   // All the basic blocks in the same equivalence class will execute
1761   // the same number of times. Since we know that the head block in
1762   // each equivalence class has the largest weight, assign that weight
1763   // to all the blocks in that equivalence class.
1764   LLVM_DEBUG(
1765       dbgs() << "\nAssign the same weight to all blocks in the same class\n");
1766   for (auto &BI : F) {
1767     const BasicBlock *BB = &BI;
1768     const BasicBlock *EquivBB = EquivalenceClass[BB];
1769     if (BB != EquivBB)
1770       BlockWeights[BB] = BlockWeights[EquivBB];
1771     LLVM_DEBUG(printBlockWeight(dbgs(), BB));
1772   }
1773 }
1774 
1775 /// Visit the given edge to decide if it has a valid weight.
1776 ///
1777 /// If \p E has not been visited before, we copy to \p UnknownEdge
1778 /// and increment the count of unknown edges.
1779 ///
1780 /// \param E  Edge to visit.
1781 /// \param NumUnknownEdges  Current number of unknown edges.
1782 /// \param UnknownEdge  Set if E has not been visited before.
1783 ///
1784 /// \returns E's weight, if known. Otherwise, return 0.
1785 uint64_t SampleProfileLoaderBaseImpl::visitEdge(Edge E,
1786                                                 unsigned *NumUnknownEdges,
1787                                                 Edge *UnknownEdge) {
1788   if (!VisitedEdges.count(E)) {
1789     (*NumUnknownEdges)++;
1790     *UnknownEdge = E;
1791     return 0;
1792   }
1793 
1794   return EdgeWeights[E];
1795 }
1796 
1797 /// Propagate weights through incoming/outgoing edges.
1798 ///
1799 /// If the weight of a basic block is known, and there is only one edge
1800 /// with an unknown weight, we can calculate the weight of that edge.
1801 ///
1802 /// Similarly, if all the edges have a known count, we can calculate the
1803 /// count of the basic block, if needed.
1804 ///
1805 /// \param F  Function to process.
1806 /// \param UpdateBlockCount  Whether we should update basic block counts that
1807 ///                          has already been annotated.
1808 ///
1809 /// \returns  True if new weights were assigned to edges or blocks.
1810 bool SampleProfileLoaderBaseImpl::propagateThroughEdges(Function &F,
1811                                                         bool UpdateBlockCount) {
1812   bool Changed = false;
1813   LLVM_DEBUG(dbgs() << "\nPropagation through edges\n");
1814   for (const auto &BI : F) {
1815     const BasicBlock *BB = &BI;
1816     const BasicBlock *EC = EquivalenceClass[BB];
1817 
1818     // Visit all the predecessor and successor edges to determine
1819     // which ones have a weight assigned already. Note that it doesn't
1820     // matter that we only keep track of a single unknown edge. The
1821     // only case we are interested in handling is when only a single
1822     // edge is unknown (see setEdgeOrBlockWeight).
1823     for (unsigned i = 0; i < 2; i++) {
1824       uint64_t TotalWeight = 0;
1825       unsigned NumUnknownEdges = 0, NumTotalEdges = 0;
1826       Edge UnknownEdge, SelfReferentialEdge, SingleEdge;
1827 
1828       if (i == 0) {
1829         // First, visit all predecessor edges.
1830         NumTotalEdges = Predecessors[BB].size();
1831         for (auto *Pred : Predecessors[BB]) {
1832           Edge E = std::make_pair(Pred, BB);
1833           TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
1834           if (E.first == E.second)
1835             SelfReferentialEdge = E;
1836         }
1837         if (NumTotalEdges == 1) {
1838           SingleEdge = std::make_pair(Predecessors[BB][0], BB);
1839         }
1840       } else {
1841         // On the second round, visit all successor edges.
1842         NumTotalEdges = Successors[BB].size();
1843         for (auto *Succ : Successors[BB]) {
1844           Edge E = std::make_pair(BB, Succ);
1845           TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge);
1846         }
1847         if (NumTotalEdges == 1) {
1848           SingleEdge = std::make_pair(BB, Successors[BB][0]);
1849         }
1850       }
1851 
1852       // After visiting all the edges, there are three cases that we
1853       // can handle immediately:
1854       //
1855       // - All the edge weights are known (i.e., NumUnknownEdges == 0).
1856       //   In this case, we simply check that the sum of all the edges
1857       //   is the same as BB's weight. If not, we change BB's weight
1858       //   to match. Additionally, if BB had not been visited before,
1859       //   we mark it visited.
1860       //
1861       // - Only one edge is unknown and BB has already been visited.
1862       //   In this case, we can compute the weight of the edge by
1863       //   subtracting the total block weight from all the known
1864       //   edge weights. If the edges weight more than BB, then the
1865       //   edge of the last remaining edge is set to zero.
1866       //
1867       // - There exists a self-referential edge and the weight of BB is
1868       //   known. In this case, this edge can be based on BB's weight.
1869       //   We add up all the other known edges and set the weight on
1870       //   the self-referential edge as we did in the previous case.
1871       //
1872       // In any other case, we must continue iterating. Eventually,
1873       // all edges will get a weight, or iteration will stop when
1874       // it reaches SampleProfileMaxPropagateIterations.
1875       if (NumUnknownEdges <= 1) {
1876         uint64_t &BBWeight = BlockWeights[EC];
1877         if (NumUnknownEdges == 0) {
1878           if (!VisitedBlocks.count(EC)) {
1879             // If we already know the weight of all edges, the weight of the
1880             // basic block can be computed. It should be no larger than the sum
1881             // of all edge weights.
1882             if (TotalWeight > BBWeight) {
1883               BBWeight = TotalWeight;
1884               Changed = true;
1885               LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName()
1886                                 << " known. Set weight for block: ";
1887                          printBlockWeight(dbgs(), BB););
1888             }
1889           } else if (NumTotalEdges == 1 &&
1890                      EdgeWeights[SingleEdge] < BlockWeights[EC]) {
1891             // If there is only one edge for the visited basic block, use the
1892             // block weight to adjust edge weight if edge weight is smaller.
1893             EdgeWeights[SingleEdge] = BlockWeights[EC];
1894             Changed = true;
1895           }
1896         } else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) {
1897           // If there is a single unknown edge and the block has been
1898           // visited, then we can compute E's weight.
1899           if (BBWeight >= TotalWeight)
1900             EdgeWeights[UnknownEdge] = BBWeight - TotalWeight;
1901           else
1902             EdgeWeights[UnknownEdge] = 0;
1903           const BasicBlock *OtherEC;
1904           if (i == 0)
1905             OtherEC = EquivalenceClass[UnknownEdge.first];
1906           else
1907             OtherEC = EquivalenceClass[UnknownEdge.second];
1908           // Edge weights should never exceed the BB weights it connects.
1909           if (VisitedBlocks.count(OtherEC) &&
1910               EdgeWeights[UnknownEdge] > BlockWeights[OtherEC])
1911             EdgeWeights[UnknownEdge] = BlockWeights[OtherEC];
1912           VisitedEdges.insert(UnknownEdge);
1913           Changed = true;
1914           LLVM_DEBUG(dbgs() << "Set weight for edge: ";
1915                      printEdgeWeight(dbgs(), UnknownEdge));
1916         }
1917       } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) {
1918         // If a block Weights 0, all its in/out edges should weight 0.
1919         if (i == 0) {
1920           for (auto *Pred : Predecessors[BB]) {
1921             Edge E = std::make_pair(Pred, BB);
1922             EdgeWeights[E] = 0;
1923             VisitedEdges.insert(E);
1924           }
1925         } else {
1926           for (auto *Succ : Successors[BB]) {
1927             Edge E = std::make_pair(BB, Succ);
1928             EdgeWeights[E] = 0;
1929             VisitedEdges.insert(E);
1930           }
1931         }
1932       } else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) {
1933         uint64_t &BBWeight = BlockWeights[BB];
1934         // We have a self-referential edge and the weight of BB is known.
1935         if (BBWeight >= TotalWeight)
1936           EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight;
1937         else
1938           EdgeWeights[SelfReferentialEdge] = 0;
1939         VisitedEdges.insert(SelfReferentialEdge);
1940         Changed = true;
1941         LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: ";
1942                    printEdgeWeight(dbgs(), SelfReferentialEdge));
1943       }
1944       if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) {
1945         BlockWeights[EC] = TotalWeight;
1946         VisitedBlocks.insert(EC);
1947         Changed = true;
1948       }
1949     }
1950   }
1951 
1952   return Changed;
1953 }
1954 
1955 /// Build in/out edge lists for each basic block in the CFG.
1956 ///
1957 /// We are interested in unique edges. If a block B1 has multiple
1958 /// edges to another block B2, we only add a single B1->B2 edge.
1959 void SampleProfileLoaderBaseImpl::buildEdges(Function &F) {
1960   for (auto &BI : F) {
1961     BasicBlock *B1 = &BI;
1962 
1963     // Add predecessors for B1.
1964     SmallPtrSet<BasicBlock *, 16> Visited;
1965     if (!Predecessors[B1].empty())
1966       llvm_unreachable("Found a stale predecessors list in a basic block.");
1967     for (BasicBlock *B2 : predecessors(B1))
1968       if (Visited.insert(B2).second)
1969         Predecessors[B1].push_back(B2);
1970 
1971     // Add successors for B1.
1972     Visited.clear();
1973     if (!Successors[B1].empty())
1974       llvm_unreachable("Found a stale successors list in a basic block.");
1975     for (BasicBlock *B2 : successors(B1))
1976       if (Visited.insert(B2).second)
1977         Successors[B1].push_back(B2);
1978   }
1979 }
1980 
1981 /// Returns the sorted CallTargetMap \p M by count in descending order.
1982 static SmallVector<InstrProfValueData, 2> GetSortedValueDataFromCallTargets(
1983     const SampleRecord::CallTargetMap & M) {
1984   SmallVector<InstrProfValueData, 2> R;
1985   for (const auto &I : SampleRecord::SortCallTargets(M)) {
1986     R.emplace_back(InstrProfValueData{FunctionSamples::getGUID(I.first), I.second});
1987   }
1988   return R;
1989 }
1990 
1991 /// Propagate weights into edges
1992 ///
1993 /// The following rules are applied to every block BB in the CFG:
1994 ///
1995 /// - If BB has a single predecessor/successor, then the weight
1996 ///   of that edge is the weight of the block.
1997 ///
1998 /// - If all incoming or outgoing edges are known except one, and the
1999 ///   weight of the block is already known, the weight of the unknown
2000 ///   edge will be the weight of the block minus the sum of all the known
2001 ///   edges. If the sum of all the known edges is larger than BB's weight,
2002 ///   we set the unknown edge weight to zero.
2003 ///
2004 /// - If there is a self-referential edge, and the weight of the block is
2005 ///   known, the weight for that edge is set to the weight of the block
2006 ///   minus the weight of the other incoming edges to that block (if
2007 ///   known).
2008 void SampleProfileLoaderBaseImpl::propagateWeights(Function &F) {
2009   bool Changed = true;
2010   unsigned I = 0;
2011 
2012   // If BB weight is larger than its corresponding loop's header BB weight,
2013   // use the BB weight to replace the loop header BB weight.
2014   for (auto &BI : F) {
2015     BasicBlock *BB = &BI;
2016     Loop *L = LI->getLoopFor(BB);
2017     if (!L) {
2018       continue;
2019     }
2020     BasicBlock *Header = L->getHeader();
2021     if (Header && BlockWeights[BB] > BlockWeights[Header]) {
2022       BlockWeights[Header] = BlockWeights[BB];
2023     }
2024   }
2025 
2026   // Before propagation starts, build, for each block, a list of
2027   // unique predecessors and successors. This is necessary to handle
2028   // identical edges in multiway branches. Since we visit all blocks and all
2029   // edges of the CFG, it is cleaner to build these lists once at the start
2030   // of the pass.
2031   buildEdges(F);
2032 
2033   // Propagate until we converge or we go past the iteration limit.
2034   while (Changed && I++ < SampleProfileMaxPropagateIterations) {
2035     Changed = propagateThroughEdges(F, false);
2036   }
2037 
2038   // The first propagation propagates BB counts from annotated BBs to unknown
2039   // BBs. The 2nd propagation pass resets edges weights, and use all BB weights
2040   // to propagate edge weights.
2041   VisitedEdges.clear();
2042   Changed = true;
2043   while (Changed && I++ < SampleProfileMaxPropagateIterations) {
2044     Changed = propagateThroughEdges(F, false);
2045   }
2046 
2047   // The 3rd propagation pass allows adjust annotated BB weights that are
2048   // obviously wrong.
2049   Changed = true;
2050   while (Changed && I++ < SampleProfileMaxPropagateIterations) {
2051     Changed = propagateThroughEdges(F, true);
2052   }
2053 }
2054 
2055 /// Generate branch weight metadata for all branches in \p F.
2056 ///
2057 /// Branch weights are computed out of instruction samples using a
2058 /// propagation heuristic. Propagation proceeds in 3 phases:
2059 ///
2060 /// 1- Assignment of block weights. All the basic blocks in the function
2061 ///    are initial assigned the same weight as their most frequently
2062 ///    executed instruction.
2063 ///
2064 /// 2- Creation of equivalence classes. Since samples may be missing from
2065 ///    blocks, we can fill in the gaps by setting the weights of all the
2066 ///    blocks in the same equivalence class to the same weight. To compute
2067 ///    the concept of equivalence, we use dominance and loop information.
2068 ///    Two blocks B1 and B2 are in the same equivalence class if B1
2069 ///    dominates B2, B2 post-dominates B1 and both are in the same loop.
2070 ///
2071 /// 3- Propagation of block weights into edges. This uses a simple
2072 ///    propagation heuristic. The following rules are applied to every
2073 ///    block BB in the CFG:
2074 ///
2075 ///    - If BB has a single predecessor/successor, then the weight
2076 ///      of that edge is the weight of the block.
2077 ///
2078 ///    - If all the edges are known except one, and the weight of the
2079 ///      block is already known, the weight of the unknown edge will
2080 ///      be the weight of the block minus the sum of all the known
2081 ///      edges. If the sum of all the known edges is larger than BB's weight,
2082 ///      we set the unknown edge weight to zero.
2083 ///
2084 ///    - If there is a self-referential edge, and the weight of the block is
2085 ///      known, the weight for that edge is set to the weight of the block
2086 ///      minus the weight of the other incoming edges to that block (if
2087 ///      known).
2088 ///
2089 /// Since this propagation is not guaranteed to finalize for every CFG, we
2090 /// only allow it to proceed for a limited number of iterations (controlled
2091 /// by -sample-profile-max-propagate-iterations).
2092 ///
2093 /// FIXME: Try to replace this propagation heuristic with a scheme
2094 /// that is guaranteed to finalize. A work-list approach similar to
2095 /// the standard value propagation algorithm used by SSA-CCP might
2096 /// work here.
2097 ///
2098 /// \param F The function to query.
2099 ///
2100 /// \returns true if \p F was modified. Returns false, otherwise.
2101 bool SampleProfileLoaderBaseImpl::computeAndPropagateWeights(
2102     Function &F, const DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
2103   bool Changed = (InlinedGUIDs.size() != 0);
2104 
2105   // Compute basic block weights.
2106   Changed |= computeBlockWeights(F);
2107 
2108   if (Changed) {
2109     // Add an entry count to the function using the samples gathered at the
2110     // function entry.
2111     // Sets the GUIDs that are inlined in the profiled binary. This is used
2112     // for ThinLink to make correct liveness analysis, and also make the IR
2113     // match the profiled binary before annotation.
2114     F.setEntryCount(
2115         ProfileCount(Samples->getHeadSamples() + 1, Function::PCT_Real),
2116         &InlinedGUIDs);
2117 
2118     // Compute dominance and loop info needed for propagation.
2119     computeDominanceAndLoopInfo(F);
2120 
2121     // Find equivalence classes.
2122     findEquivalenceClasses(F);
2123 
2124     // Propagate weights to all edges.
2125     propagateWeights(F);
2126   }
2127 
2128   return Changed;
2129 }
2130 
2131 void SampleProfileLoaderBaseImpl::emitCoverageRemarks(Function &F) {
2132   // If coverage checking was requested, compute it now.
2133   if (SampleProfileRecordCoverage) {
2134     unsigned Used = CoverageTracker.countUsedRecords(Samples, PSI);
2135     unsigned Total = CoverageTracker.countBodyRecords(Samples, PSI);
2136     unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
2137     if (Coverage < SampleProfileRecordCoverage) {
2138       F.getContext().diagnose(DiagnosticInfoSampleProfile(
2139           F.getSubprogram()->getFilename(), getFunctionLoc(F),
2140           Twine(Used) + " of " + Twine(Total) + " available profile records (" +
2141               Twine(Coverage) + "%) were applied",
2142           DS_Warning));
2143     }
2144   }
2145 
2146   if (SampleProfileSampleCoverage) {
2147     uint64_t Used = CoverageTracker.getTotalUsedSamples();
2148     uint64_t Total = CoverageTracker.countBodySamples(Samples, PSI);
2149     unsigned Coverage = CoverageTracker.computeCoverage(Used, Total);
2150     if (Coverage < SampleProfileSampleCoverage) {
2151       F.getContext().diagnose(DiagnosticInfoSampleProfile(
2152           F.getSubprogram()->getFilename(), getFunctionLoc(F),
2153           Twine(Used) + " of " + Twine(Total) + " available profile samples (" +
2154               Twine(Coverage) + "%) were applied",
2155           DS_Warning));
2156     }
2157   }
2158 }
2159 
2160 // Generate MD_prof metadata for every branch instruction using the
2161 // edge weights computed during propagation.
2162 void SampleProfileLoader::generateMDProfMetadata(Function &F) {
2163   // Generate MD_prof metadata for every branch instruction using the
2164   // edge weights computed during propagation.
2165   LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n");
2166   LLVMContext &Ctx = F.getContext();
2167   MDBuilder MDB(Ctx);
2168   for (auto &BI : F) {
2169     BasicBlock *BB = &BI;
2170 
2171     if (BlockWeights[BB]) {
2172       for (auto &I : BB->getInstList()) {
2173         if (!isa<CallInst>(I) && !isa<InvokeInst>(I))
2174           continue;
2175         if (!cast<CallBase>(I).getCalledFunction()) {
2176           const DebugLoc &DLoc = I.getDebugLoc();
2177           if (!DLoc)
2178             continue;
2179           const DILocation *DIL = DLoc;
2180           const FunctionSamples *FS = findFunctionSamples(I);
2181           if (!FS)
2182             continue;
2183           auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL);
2184           auto T = FS->findCallTargetMapAt(CallSite);
2185           if (!T || T.get().empty())
2186             continue;
2187           // Prorate the callsite counts to reflect what is already done to the
2188           // callsite, such as ICP or calliste cloning.
2189           if (FunctionSamples::ProfileIsProbeBased) {
2190             if (Optional<PseudoProbe> Probe = extractProbe(I)) {
2191               if (Probe->Factor < 1)
2192                 T = SampleRecord::adjustCallTargets(T.get(), Probe->Factor);
2193             }
2194           }
2195           SmallVector<InstrProfValueData, 2> SortedCallTargets =
2196               GetSortedValueDataFromCallTargets(T.get());
2197           uint64_t Sum;
2198           findIndirectCallFunctionSamples(I, Sum);
2199           annotateValueSite(*I.getParent()->getParent()->getParent(), I,
2200                             SortedCallTargets, Sum, IPVK_IndirectCallTarget,
2201                             SortedCallTargets.size());
2202         } else if (!isa<IntrinsicInst>(&I)) {
2203           I.setMetadata(LLVMContext::MD_prof,
2204                         MDB.createBranchWeights(
2205                             {static_cast<uint32_t>(BlockWeights[BB])}));
2206         }
2207       }
2208     }
2209     Instruction *TI = BB->getTerminator();
2210     if (TI->getNumSuccessors() == 1)
2211       continue;
2212     if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
2213       continue;
2214 
2215     DebugLoc BranchLoc = TI->getDebugLoc();
2216     LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line "
2217                       << ((BranchLoc) ? Twine(BranchLoc.getLine())
2218                                       : Twine("<UNKNOWN LOCATION>"))
2219                       << ".\n");
2220     SmallVector<uint32_t, 4> Weights;
2221     uint32_t MaxWeight = 0;
2222     Instruction *MaxDestInst;
2223     for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) {
2224       BasicBlock *Succ = TI->getSuccessor(I);
2225       Edge E = std::make_pair(BB, Succ);
2226       uint64_t Weight = EdgeWeights[E];
2227       LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E));
2228       // Use uint32_t saturated arithmetic to adjust the incoming weights,
2229       // if needed. Sample counts in profiles are 64-bit unsigned values,
2230       // but internally branch weights are expressed as 32-bit values.
2231       if (Weight > std::numeric_limits<uint32_t>::max()) {
2232         LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)");
2233         Weight = std::numeric_limits<uint32_t>::max();
2234       }
2235       // Weight is added by one to avoid propagation errors introduced by
2236       // 0 weights.
2237       Weights.push_back(static_cast<uint32_t>(Weight + 1));
2238       if (Weight != 0) {
2239         if (Weight > MaxWeight) {
2240           MaxWeight = Weight;
2241           MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime();
2242         }
2243       }
2244     }
2245 
2246     uint64_t TempWeight;
2247     // Only set weights if there is at least one non-zero weight.
2248     // In any other case, let the analyzer set weights.
2249     // Do not set weights if the weights are present. In ThinLTO, the profile
2250     // annotation is done twice. If the first annotation already set the
2251     // weights, the second pass does not need to set it.
2252     if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) {
2253       LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
2254       TI->setMetadata(LLVMContext::MD_prof,
2255                       MDB.createBranchWeights(Weights));
2256       ORE->emit([&]() {
2257         return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst)
2258                << "most popular destination for conditional branches at "
2259                << ore::NV("CondBranchesLoc", BranchLoc);
2260       });
2261     } else {
2262       LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
2263     }
2264   }
2265 }
2266 
2267 /// Get the line number for the function header.
2268 ///
2269 /// This looks up function \p F in the current compilation unit and
2270 /// retrieves the line number where the function is defined. This is
2271 /// line 0 for all the samples read from the profile file. Every line
2272 /// number is relative to this line.
2273 ///
2274 /// \param F  Function object to query.
2275 ///
2276 /// \returns the line number where \p F is defined. If it returns 0,
2277 ///          it means that there is no debug information available for \p F.
2278 unsigned SampleProfileLoaderBaseImpl::getFunctionLoc(Function &F) {
2279   if (DISubprogram *S = F.getSubprogram())
2280     return S->getLine();
2281 
2282   if (NoWarnSampleUnused)
2283     return 0;
2284 
2285   // If the start of \p F is missing, emit a diagnostic to inform the user
2286   // about the missed opportunity.
2287   F.getContext().diagnose(DiagnosticInfoSampleProfile(
2288       "No debug information found in function " + F.getName() +
2289           ": Function profile not used",
2290       DS_Warning));
2291   return 0;
2292 }
2293 
2294 void SampleProfileLoaderBaseImpl::computeDominanceAndLoopInfo(Function &F) {
2295   DT.reset(new DominatorTree);
2296   DT->recalculate(F);
2297 
2298   PDT.reset(new PostDominatorTree(F));
2299 
2300   LI.reset(new LoopInfo);
2301   LI->analyze(*DT);
2302 }
2303 
2304 /// Once all the branch weights are computed, we emit the MD_prof
2305 /// metadata on BB using the computed values for each of its branches.
2306 ///
2307 /// \param F The function to query.
2308 ///
2309 /// \returns true if \p F was modified. Returns false, otherwise.
2310 bool SampleProfileLoader::emitAnnotations(Function &F) {
2311   bool Changed = false;
2312 
2313   if (FunctionSamples::ProfileIsProbeBased) {
2314     if (!ProbeManager->profileIsValid(F, *Samples)) {
2315       LLVM_DEBUG(
2316           dbgs() << "Profile is invalid due to CFG mismatch for Function "
2317                  << F.getName());
2318       ++NumMismatchedProfile;
2319       return false;
2320     }
2321     ++NumMatchedProfile;
2322   } else {
2323     if (getFunctionLoc(F) == 0)
2324       return false;
2325 
2326     LLVM_DEBUG(dbgs() << "Line number for the first instruction in "
2327                       << F.getName() << ": " << getFunctionLoc(F) << "\n");
2328   }
2329 
2330   DenseSet<GlobalValue::GUID> InlinedGUIDs;
2331   if (ProfileIsCS && CallsitePrioritizedInline)
2332     Changed |= inlineHotFunctionsWithPriority(F, InlinedGUIDs);
2333   else
2334     Changed |= inlineHotFunctions(F, InlinedGUIDs);
2335 
2336   Changed |= computeAndPropagateWeights(F, InlinedGUIDs);
2337 
2338   if (Changed)
2339     generateMDProfMetadata(F);
2340 
2341   emitCoverageRemarks(F);
2342   return Changed;
2343 }
2344 
2345 char SampleProfileLoaderLegacyPass::ID = 0;
2346 
2347 INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile",
2348                       "Sample Profile loader", false, false)
2349 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2350 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
2351 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2352 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
2353 INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile",
2354                     "Sample Profile loader", false, false)
2355 
2356 // Add inlined profile call edges to the call graph.
2357 void SampleProfileLoader::addCallGraphEdges(CallGraph &CG,
2358                                             const FunctionSamples &Samples) {
2359   Function *Caller = SymbolMap.lookup(Samples.getFuncName());
2360   if (!Caller || Caller->isDeclaration())
2361     return;
2362 
2363   // Skip non-inlined call edges which are not important since top down inlining
2364   // for non-CS profile is to get more precise profile matching, not to enable
2365   // more inlining.
2366 
2367   for (const auto &CallsiteSamples : Samples.getCallsiteSamples()) {
2368     for (const auto &InlinedSamples : CallsiteSamples.second) {
2369       Function *Callee = SymbolMap.lookup(InlinedSamples.first);
2370       if (Callee && !Callee->isDeclaration())
2371         CG[Caller]->addCalledFunction(nullptr, CG[Callee]);
2372       addCallGraphEdges(CG, InlinedSamples.second);
2373     }
2374   }
2375 }
2376 
2377 // Replace call graph edges with dynamic call edges from the profile.
2378 void SampleProfileLoader::replaceCallGraphEdges(
2379     CallGraph &CG, StringMap<Function *> &SymbolMap) {
2380   // Remove static call edges from the call graph except for the ones from the
2381   // root which make the call graph connected.
2382   for (const auto &Node : CG)
2383     if (Node.second.get() != CG.getExternalCallingNode())
2384       Node.second->removeAllCalledFunctions();
2385 
2386   // Add profile call edges to the call graph.
2387   if (ProfileIsCS) {
2388     ContextTracker->addCallGraphEdges(CG, SymbolMap);
2389   } else {
2390     for (const auto &Samples : Reader->getProfiles())
2391       addCallGraphEdges(CG, Samples.second);
2392   }
2393 }
2394 
2395 std::vector<Function *>
2396 SampleProfileLoader::buildFunctionOrder(Module &M, CallGraph *CG) {
2397   std::vector<Function *> FunctionOrderList;
2398   FunctionOrderList.reserve(M.size());
2399 
2400   if (!ProfileTopDownLoad || CG == nullptr) {
2401     if (ProfileMergeInlinee) {
2402       // Disable ProfileMergeInlinee if profile is not loaded in top down order,
2403       // because the profile for a function may be used for the profile
2404       // annotation of its outline copy before the profile merging of its
2405       // non-inlined inline instances, and that is not the way how
2406       // ProfileMergeInlinee is supposed to work.
2407       ProfileMergeInlinee = false;
2408     }
2409 
2410     for (Function &F : M)
2411       if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile"))
2412         FunctionOrderList.push_back(&F);
2413     return FunctionOrderList;
2414   }
2415 
2416   assert(&CG->getModule() == &M);
2417 
2418   // Add indirect call edges from profile to augment the static call graph.
2419   // Functions will be processed in a top-down order defined by the static call
2420   // graph. Adjusting the order by considering indirect call edges from the
2421   // profile (which don't exist in the static call graph) can enable the
2422   // inlining of indirect call targets by processing the caller before them.
2423   // TODO: enable this for non-CS profile and fix the counts returning logic to
2424   // have a full support for indirect calls.
2425   if (UseProfileIndirectCallEdges && ProfileIsCS) {
2426     for (auto &Entry : *CG) {
2427       const auto *F = Entry.first;
2428       if (!F || F->isDeclaration() || !F->hasFnAttribute("use-sample-profile"))
2429         continue;
2430       auto &AllContexts = ContextTracker->getAllContextSamplesFor(F->getName());
2431       if (AllContexts.empty())
2432         continue;
2433 
2434       for (const auto &BB : *F) {
2435         for (const auto &I : BB.getInstList()) {
2436           const auto *CB = dyn_cast<CallBase>(&I);
2437           if (!CB || !CB->isIndirectCall())
2438             continue;
2439           const DebugLoc &DLoc = I.getDebugLoc();
2440           if (!DLoc)
2441             continue;
2442           auto CallSite = FunctionSamples::getCallSiteIdentifier(DLoc);
2443           for (FunctionSamples *Samples : AllContexts) {
2444             if (auto CallTargets = Samples->findCallTargetMapAt(CallSite)) {
2445               for (const auto &Target : CallTargets.get()) {
2446                 Function *Callee = SymbolMap.lookup(Target.first());
2447                 if (Callee && !Callee->isDeclaration())
2448                   Entry.second->addCalledFunction(nullptr, (*CG)[Callee]);
2449               }
2450             }
2451           }
2452         }
2453       }
2454     }
2455   }
2456 
2457   // Compute a top-down order the profile which is used to sort functions in
2458   // one SCC later. The static processing order computed for an SCC may not
2459   // reflect the call contexts in the context-sensitive profile, thus may cause
2460   // potential inlining to be overlooked. The function order in one SCC is being
2461   // adjusted to a top-down order based on the profile to favor more inlining.
2462   DenseMap<Function *, uint64_t> ProfileOrderMap;
2463   if (UseProfileTopDownOrder ||
2464       (ProfileIsCS && !UseProfileTopDownOrder.getNumOccurrences())) {
2465     // Create a static call graph. The call edges are not important since they
2466     // will be replaced by dynamic edges from the profile.
2467     CallGraph ProfileCG(M);
2468     replaceCallGraphEdges(ProfileCG, SymbolMap);
2469     scc_iterator<CallGraph *> CGI = scc_begin(&ProfileCG);
2470     uint64_t I = 0;
2471     while (!CGI.isAtEnd()) {
2472       for (CallGraphNode *Node : *CGI) {
2473         if (auto *F = Node->getFunction())
2474           ProfileOrderMap[F] = ++I;
2475       }
2476       ++CGI;
2477     }
2478   }
2479 
2480   scc_iterator<CallGraph *> CGI = scc_begin(CG);
2481   while (!CGI.isAtEnd()) {
2482     uint64_t Start = FunctionOrderList.size();
2483     for (CallGraphNode *Node : *CGI) {
2484       auto *F = Node->getFunction();
2485       if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile"))
2486         FunctionOrderList.push_back(F);
2487     }
2488 
2489     // Sort nodes in SCC based on the profile top-down order.
2490     if (!ProfileOrderMap.empty()) {
2491       std::stable_sort(FunctionOrderList.begin() + Start,
2492                        FunctionOrderList.end(),
2493                        [&ProfileOrderMap](Function *Left, Function *Right) {
2494                          return ProfileOrderMap[Left] < ProfileOrderMap[Right];
2495                        });
2496     }
2497 
2498     ++CGI;
2499   }
2500 
2501   LLVM_DEBUG({
2502     dbgs() << "Function processing order:\n";
2503     for (auto F : reverse(FunctionOrderList)) {
2504       dbgs() << F->getName() << "\n";
2505     }
2506   });
2507 
2508   std::reverse(FunctionOrderList.begin(), FunctionOrderList.end());
2509   return FunctionOrderList;
2510 }
2511 
2512 bool SampleProfileLoader::doInitialization(Module &M,
2513                                            FunctionAnalysisManager *FAM) {
2514   auto &Ctx = M.getContext();
2515 
2516   auto ReaderOrErr =
2517       SampleProfileReader::create(Filename, Ctx, RemappingFilename);
2518   if (std::error_code EC = ReaderOrErr.getError()) {
2519     std::string Msg = "Could not open profile: " + EC.message();
2520     Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2521     return false;
2522   }
2523   Reader = std::move(ReaderOrErr.get());
2524   Reader->setSkipFlatProf(LTOPhase == ThinOrFullLTOPhase::ThinLTOPostLink);
2525   Reader->collectFuncsFrom(M);
2526   if (std::error_code EC = Reader->read()) {
2527     std::string Msg = "profile reading failed: " + EC.message();
2528     Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2529     return false;
2530   }
2531 
2532   PSL = Reader->getProfileSymbolList();
2533 
2534   // While profile-sample-accurate is on, ignore symbol list.
2535   ProfAccForSymsInList =
2536       ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate;
2537   if (ProfAccForSymsInList) {
2538     NamesInProfile.clear();
2539     if (auto NameTable = Reader->getNameTable())
2540       NamesInProfile.insert(NameTable->begin(), NameTable->end());
2541     CoverageTracker.setProfAccForSymsInList(true);
2542   }
2543 
2544   if (FAM && !ProfileInlineReplayFile.empty()) {
2545     ExternalInlineAdvisor = std::make_unique<ReplayInlineAdvisor>(
2546         M, *FAM, Ctx, /*OriginalAdvisor=*/nullptr, ProfileInlineReplayFile,
2547         /*EmitRemarks=*/false);
2548     if (!ExternalInlineAdvisor->areReplayRemarksLoaded())
2549       ExternalInlineAdvisor.reset();
2550   }
2551 
2552   // Apply tweaks if context-sensitive profile is available.
2553   if (Reader->profileIsCS()) {
2554     ProfileIsCS = true;
2555     FunctionSamples::ProfileIsCS = true;
2556 
2557     // Enable priority-base inliner and size inline by default for CSSPGO.
2558     if (!ProfileSizeInline.getNumOccurrences())
2559       ProfileSizeInline = true;
2560     if (!CallsitePrioritizedInline.getNumOccurrences())
2561       CallsitePrioritizedInline = true;
2562 
2563     // Tracker for profiles under different context
2564     ContextTracker =
2565         std::make_unique<SampleContextTracker>(Reader->getProfiles());
2566   }
2567 
2568   // Load pseudo probe descriptors for probe-based function samples.
2569   if (Reader->profileIsProbeBased()) {
2570     ProbeManager = std::make_unique<PseudoProbeManager>(M);
2571     if (!ProbeManager->moduleIsProbed(M)) {
2572       const char *Msg =
2573           "Pseudo-probe-based profile requires SampleProfileProbePass";
2574       Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2575       return false;
2576     }
2577   }
2578 
2579   return true;
2580 }
2581 
2582 ModulePass *llvm::createSampleProfileLoaderPass() {
2583   return new SampleProfileLoaderLegacyPass();
2584 }
2585 
2586 ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) {
2587   return new SampleProfileLoaderLegacyPass(Name);
2588 }
2589 
2590 bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM,
2591                                       ProfileSummaryInfo *_PSI, CallGraph *CG) {
2592   GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap);
2593 
2594   PSI = _PSI;
2595   if (M.getProfileSummary(/* IsCS */ false) == nullptr) {
2596     M.setProfileSummary(Reader->getSummary().getMD(M.getContext()),
2597                         ProfileSummary::PSK_Sample);
2598     PSI->refresh();
2599   }
2600   // Compute the total number of samples collected in this profile.
2601   for (const auto &I : Reader->getProfiles())
2602     TotalCollectedSamples += I.second.getTotalSamples();
2603 
2604   auto Remapper = Reader->getRemapper();
2605   // Populate the symbol map.
2606   for (const auto &N_F : M.getValueSymbolTable()) {
2607     StringRef OrigName = N_F.getKey();
2608     Function *F = dyn_cast<Function>(N_F.getValue());
2609     if (F == nullptr)
2610       continue;
2611     SymbolMap[OrigName] = F;
2612     auto pos = OrigName.find('.');
2613     if (pos != StringRef::npos) {
2614       StringRef NewName = OrigName.substr(0, pos);
2615       auto r = SymbolMap.insert(std::make_pair(NewName, F));
2616       // Failiing to insert means there is already an entry in SymbolMap,
2617       // thus there are multiple functions that are mapped to the same
2618       // stripped name. In this case of name conflicting, set the value
2619       // to nullptr to avoid confusion.
2620       if (!r.second)
2621         r.first->second = nullptr;
2622       OrigName = NewName;
2623     }
2624     // Insert the remapped names into SymbolMap.
2625     if (Remapper) {
2626       if (auto MapName = Remapper->lookUpNameInProfile(OrigName)) {
2627         if (*MapName == OrigName)
2628           continue;
2629         SymbolMap.insert(std::make_pair(*MapName, F));
2630       }
2631     }
2632   }
2633 
2634   bool retval = false;
2635   for (auto F : buildFunctionOrder(M, CG)) {
2636     assert(!F->isDeclaration());
2637     clearFunctionData();
2638     retval |= runOnFunction(*F, AM);
2639   }
2640 
2641   // Account for cold calls not inlined....
2642   if (!ProfileIsCS)
2643     for (const std::pair<Function *, NotInlinedProfileInfo> &pair :
2644          notInlinedCallInfo)
2645       updateProfileCallee(pair.first, pair.second.entryCount);
2646 
2647   return retval;
2648 }
2649 
2650 bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) {
2651   ACT = &getAnalysis<AssumptionCacheTracker>();
2652   TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>();
2653   TLIWP = &getAnalysis<TargetLibraryInfoWrapperPass>();
2654   ProfileSummaryInfo *PSI =
2655       &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
2656   return SampleLoader.runOnModule(M, nullptr, PSI, nullptr);
2657 }
2658 
2659 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) {
2660   LLVM_DEBUG(dbgs() << "\n\nProcessing Function " << F.getName() << "\n");
2661   DILocation2SampleMap.clear();
2662   // By default the entry count is initialized to -1, which will be treated
2663   // conservatively by getEntryCount as the same as unknown (None). This is
2664   // to avoid newly added code to be treated as cold. If we have samples
2665   // this will be overwritten in emitAnnotations.
2666   uint64_t initialEntryCount = -1;
2667 
2668   ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL;
2669   if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) {
2670     // initialize all the function entry counts to 0. It means all the
2671     // functions without profile will be regarded as cold.
2672     initialEntryCount = 0;
2673     // profile-sample-accurate is a user assertion which has a higher precedence
2674     // than symbol list. When profile-sample-accurate is on, ignore symbol list.
2675     ProfAccForSymsInList = false;
2676   }
2677   CoverageTracker.setProfAccForSymsInList(ProfAccForSymsInList);
2678 
2679   // PSL -- profile symbol list include all the symbols in sampled binary.
2680   // If ProfileAccurateForSymsInList is enabled, PSL is used to treat
2681   // old functions without samples being cold, without having to worry
2682   // about new and hot functions being mistakenly treated as cold.
2683   if (ProfAccForSymsInList) {
2684     // Initialize the entry count to 0 for functions in the list.
2685     if (PSL->contains(F.getName()))
2686       initialEntryCount = 0;
2687 
2688     // Function in the symbol list but without sample will be regarded as
2689     // cold. To minimize the potential negative performance impact it could
2690     // have, we want to be a little conservative here saying if a function
2691     // shows up in the profile, no matter as outline function, inline instance
2692     // or call targets, treat the function as not being cold. This will handle
2693     // the cases such as most callsites of a function are inlined in sampled
2694     // binary but not inlined in current build (because of source code drift,
2695     // imprecise debug information, or the callsites are all cold individually
2696     // but not cold accumulatively...), so the outline function showing up as
2697     // cold in sampled binary will actually not be cold after current build.
2698     StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
2699     if (NamesInProfile.count(CanonName))
2700       initialEntryCount = -1;
2701   }
2702 
2703   // Initialize entry count when the function has no existing entry
2704   // count value.
2705   if (!F.getEntryCount().hasValue())
2706     F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real));
2707   std::unique_ptr<OptimizationRemarkEmitter> OwnedORE;
2708   if (AM) {
2709     auto &FAM =
2710         AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent())
2711             .getManager();
2712     ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
2713   } else {
2714     OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2715     ORE = OwnedORE.get();
2716   }
2717 
2718   if (ProfileIsCS)
2719     Samples = ContextTracker->getBaseSamplesFor(F);
2720   else
2721     Samples = Reader->getSamplesFor(F);
2722 
2723   if (Samples && !Samples->empty())
2724     return emitAnnotations(F);
2725   return false;
2726 }
2727 
2728 PreservedAnalyses SampleProfileLoaderPass::run(Module &M,
2729                                                ModuleAnalysisManager &AM) {
2730   FunctionAnalysisManager &FAM =
2731       AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
2732 
2733   auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
2734     return FAM.getResult<AssumptionAnalysis>(F);
2735   };
2736   auto GetTTI = [&](Function &F) -> TargetTransformInfo & {
2737     return FAM.getResult<TargetIRAnalysis>(F);
2738   };
2739   auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
2740     return FAM.getResult<TargetLibraryAnalysis>(F);
2741   };
2742 
2743   SampleProfileLoader SampleLoader(
2744       ProfileFileName.empty() ? SampleProfileFile : ProfileFileName,
2745       ProfileRemappingFileName.empty() ? SampleProfileRemappingFile
2746                                        : ProfileRemappingFileName,
2747       LTOPhase, GetAssumptionCache, GetTTI, GetTLI);
2748 
2749   if (!SampleLoader.doInitialization(M, &FAM))
2750     return PreservedAnalyses::all();
2751 
2752   ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M);
2753   CallGraph &CG = AM.getResult<CallGraphAnalysis>(M);
2754   if (!SampleLoader.runOnModule(M, &AM, PSI, &CG))
2755     return PreservedAnalyses::all();
2756 
2757   return PreservedAnalyses::none();
2758 }
2759