1 //===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the SampleProfileLoader transformation. This pass
10 // reads a profile file generated by a sampling profiler (e.g. Linux Perf -
11 // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the
12 // profile information in the given profile.
13 //
14 // This pass generates branch weight annotations on the IR:
15 //
16 // - prof: Represents branch weights. This annotation is added to branches
17 // to indicate the weights of each edge coming out of the branch.
18 // The weight of each edge is the weight of the target block for
19 // that edge. The weight of a block B is computed as the maximum
20 // number of samples found in B.
21 //
22 //===----------------------------------------------------------------------===//
23
24 #include "llvm/Transforms/IPO/SampleProfile.h"
25 #include "llvm/ADT/ArrayRef.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/DenseSet.h"
28 #include "llvm/ADT/PriorityQueue.h"
29 #include "llvm/ADT/SCCIterator.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/StringMap.h"
33 #include "llvm/ADT/StringRef.h"
34 #include "llvm/ADT/Twine.h"
35 #include "llvm/Analysis/AssumptionCache.h"
36 #include "llvm/Analysis/BlockFrequencyInfoImpl.h"
37 #include "llvm/Analysis/CallGraph.h"
38 #include "llvm/Analysis/InlineAdvisor.h"
39 #include "llvm/Analysis/InlineCost.h"
40 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
41 #include "llvm/Analysis/ProfileSummaryInfo.h"
42 #include "llvm/Analysis/ReplayInlineAdvisor.h"
43 #include "llvm/Analysis/TargetLibraryInfo.h"
44 #include "llvm/Analysis/TargetTransformInfo.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/DebugLoc.h"
47 #include "llvm/IR/DiagnosticInfo.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/MDBuilder.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/PassManager.h"
58 #include "llvm/IR/PseudoProbe.h"
59 #include "llvm/IR/ValueSymbolTable.h"
60 #include "llvm/InitializePasses.h"
61 #include "llvm/Pass.h"
62 #include "llvm/ProfileData/InstrProf.h"
63 #include "llvm/ProfileData/SampleProf.h"
64 #include "llvm/ProfileData/SampleProfReader.h"
65 #include "llvm/Support/Casting.h"
66 #include "llvm/Support/CommandLine.h"
67 #include "llvm/Support/Debug.h"
68 #include "llvm/Support/ErrorOr.h"
69 #include "llvm/Support/raw_ostream.h"
70 #include "llvm/Transforms/IPO.h"
71 #include "llvm/Transforms/IPO/ProfiledCallGraph.h"
72 #include "llvm/Transforms/IPO/SampleContextTracker.h"
73 #include "llvm/Transforms/IPO/SampleProfileProbe.h"
74 #include "llvm/Transforms/Instrumentation.h"
75 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
76 #include "llvm/Transforms/Utils/Cloning.h"
77 #include "llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h"
78 #include "llvm/Transforms/Utils/SampleProfileLoaderBaseUtil.h"
79 #include <algorithm>
80 #include <cassert>
81 #include <cstdint>
82 #include <functional>
83 #include <limits>
84 #include <map>
85 #include <memory>
86 #include <queue>
87 #include <string>
88 #include <system_error>
89 #include <utility>
90 #include <vector>
91
92 using namespace llvm;
93 using namespace sampleprof;
94 using namespace llvm::sampleprofutil;
95 using ProfileCount = Function::ProfileCount;
96 #define DEBUG_TYPE "sample-profile"
97 #define CSINLINE_DEBUG DEBUG_TYPE "-inline"
98
99 STATISTIC(NumCSInlined,
100 "Number of functions inlined with context sensitive profile");
101 STATISTIC(NumCSNotInlined,
102 "Number of functions not inlined with context sensitive profile");
103 STATISTIC(NumMismatchedProfile,
104 "Number of functions with CFG mismatched profile");
105 STATISTIC(NumMatchedProfile, "Number of functions with CFG matched profile");
106 STATISTIC(NumDuplicatedInlinesite,
107 "Number of inlined callsites with a partial distribution factor");
108
109 STATISTIC(NumCSInlinedHitMinLimit,
110 "Number of functions with FDO inline stopped due to min size limit");
111 STATISTIC(NumCSInlinedHitMaxLimit,
112 "Number of functions with FDO inline stopped due to max size limit");
113 STATISTIC(
114 NumCSInlinedHitGrowthLimit,
115 "Number of functions with FDO inline stopped due to growth size limit");
116
117 // Command line option to specify the file to read samples from. This is
118 // mainly used for debugging.
119 static cl::opt<std::string> SampleProfileFile(
120 "sample-profile-file", cl::init(""), cl::value_desc("filename"),
121 cl::desc("Profile file loaded by -sample-profile"), cl::Hidden);
122
123 // The named file contains a set of transformations that may have been applied
124 // to the symbol names between the program from which the sample data was
125 // collected and the current program's symbols.
126 static cl::opt<std::string> SampleProfileRemappingFile(
127 "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"),
128 cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden);
129
130 static cl::opt<bool> ProfileSampleAccurate(
131 "profile-sample-accurate", cl::Hidden, cl::init(false),
132 cl::desc("If the sample profile is accurate, we will mark all un-sampled "
133 "callsite and function as having 0 samples. Otherwise, treat "
134 "un-sampled callsites and functions conservatively as unknown. "));
135
136 static cl::opt<bool> ProfileSampleBlockAccurate(
137 "profile-sample-block-accurate", cl::Hidden, cl::init(false),
138 cl::desc("If the sample profile is accurate, we will mark all un-sampled "
139 "branches and calls as having 0 samples. Otherwise, treat "
140 "them conservatively as unknown. "));
141
142 static cl::opt<bool> ProfileAccurateForSymsInList(
143 "profile-accurate-for-symsinlist", cl::Hidden, cl::init(true),
144 cl::desc("For symbols in profile symbol list, regard their profiles to "
145 "be accurate. It may be overriden by profile-sample-accurate. "));
146
147 static cl::opt<bool> ProfileMergeInlinee(
148 "sample-profile-merge-inlinee", cl::Hidden, cl::init(true),
149 cl::desc("Merge past inlinee's profile to outline version if sample "
150 "profile loader decided not to inline a call site. It will "
151 "only be enabled when top-down order of profile loading is "
152 "enabled. "));
153
154 static cl::opt<bool> ProfileTopDownLoad(
155 "sample-profile-top-down-load", cl::Hidden, cl::init(true),
156 cl::desc("Do profile annotation and inlining for functions in top-down "
157 "order of call graph during sample profile loading. It only "
158 "works for new pass manager. "));
159
160 static cl::opt<bool>
161 UseProfiledCallGraph("use-profiled-call-graph", cl::init(true), cl::Hidden,
162 cl::desc("Process functions in a top-down order "
163 "defined by the profiled call graph when "
164 "-sample-profile-top-down-load is on."));
165 cl::opt<bool>
166 SortProfiledSCC("sort-profiled-scc-member", cl::init(true), cl::Hidden,
167 cl::desc("Sort profiled recursion by edge weights."));
168
169 static cl::opt<bool> ProfileSizeInline(
170 "sample-profile-inline-size", cl::Hidden, cl::init(false),
171 cl::desc("Inline cold call sites in profile loader if it's beneficial "
172 "for code size."));
173
174 // Since profiles are consumed by many passes, turning on this option has
175 // side effects. For instance, pre-link SCC inliner would see merged profiles
176 // and inline the hot functions (that are skipped in this pass).
177 static cl::opt<bool> DisableSampleLoaderInlining(
178 "disable-sample-loader-inlining", cl::Hidden, cl::init(false),
179 cl::desc("If true, artifically skip inline transformation in sample-loader "
180 "pass, and merge (or scale) profiles (as configured by "
181 "--sample-profile-merge-inlinee)."));
182
183 cl::opt<int> ProfileInlineGrowthLimit(
184 "sample-profile-inline-growth-limit", cl::Hidden, cl::init(12),
185 cl::desc("The size growth ratio limit for proirity-based sample profile "
186 "loader inlining."));
187
188 cl::opt<int> ProfileInlineLimitMin(
189 "sample-profile-inline-limit-min", cl::Hidden, cl::init(100),
190 cl::desc("The lower bound of size growth limit for "
191 "proirity-based sample profile loader inlining."));
192
193 cl::opt<int> ProfileInlineLimitMax(
194 "sample-profile-inline-limit-max", cl::Hidden, cl::init(10000),
195 cl::desc("The upper bound of size growth limit for "
196 "proirity-based sample profile loader inlining."));
197
198 cl::opt<int> SampleHotCallSiteThreshold(
199 "sample-profile-hot-inline-threshold", cl::Hidden, cl::init(3000),
200 cl::desc("Hot callsite threshold for proirity-based sample profile loader "
201 "inlining."));
202
203 cl::opt<int> SampleColdCallSiteThreshold(
204 "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45),
205 cl::desc("Threshold for inlining cold callsites"));
206
207 static cl::opt<unsigned> ProfileICPRelativeHotness(
208 "sample-profile-icp-relative-hotness", cl::Hidden, cl::init(25),
209 cl::desc(
210 "Relative hotness percentage threshold for indirect "
211 "call promotion in proirity-based sample profile loader inlining."));
212
213 static cl::opt<unsigned> ProfileICPRelativeHotnessSkip(
214 "sample-profile-icp-relative-hotness-skip", cl::Hidden, cl::init(1),
215 cl::desc(
216 "Skip relative hotness check for ICP up to given number of targets."));
217
218 static cl::opt<bool> CallsitePrioritizedInline(
219 "sample-profile-prioritized-inline", cl::Hidden,
220
221 cl::desc("Use call site prioritized inlining for sample profile loader."
222 "Currently only CSSPGO is supported."));
223
224 static cl::opt<bool> UsePreInlinerDecision(
225 "sample-profile-use-preinliner", cl::Hidden,
226
227 cl::desc("Use the preinliner decisions stored in profile context."));
228
229 static cl::opt<bool> AllowRecursiveInline(
230 "sample-profile-recursive-inline", cl::Hidden,
231
232 cl::desc("Allow sample loader inliner to inline recursive calls."));
233
234 static cl::opt<std::string> ProfileInlineReplayFile(
235 "sample-profile-inline-replay", cl::init(""), cl::value_desc("filename"),
236 cl::desc(
237 "Optimization remarks file containing inline remarks to be replayed "
238 "by inlining from sample profile loader."),
239 cl::Hidden);
240
241 static cl::opt<ReplayInlinerSettings::Scope> ProfileInlineReplayScope(
242 "sample-profile-inline-replay-scope",
243 cl::init(ReplayInlinerSettings::Scope::Function),
244 cl::values(clEnumValN(ReplayInlinerSettings::Scope::Function, "Function",
245 "Replay on functions that have remarks associated "
246 "with them (default)"),
247 clEnumValN(ReplayInlinerSettings::Scope::Module, "Module",
248 "Replay on the entire module")),
249 cl::desc("Whether inline replay should be applied to the entire "
250 "Module or just the Functions (default) that are present as "
251 "callers in remarks during sample profile inlining."),
252 cl::Hidden);
253
254 static cl::opt<ReplayInlinerSettings::Fallback> ProfileInlineReplayFallback(
255 "sample-profile-inline-replay-fallback",
256 cl::init(ReplayInlinerSettings::Fallback::Original),
257 cl::values(
258 clEnumValN(
259 ReplayInlinerSettings::Fallback::Original, "Original",
260 "All decisions not in replay send to original advisor (default)"),
261 clEnumValN(ReplayInlinerSettings::Fallback::AlwaysInline,
262 "AlwaysInline", "All decisions not in replay are inlined"),
263 clEnumValN(ReplayInlinerSettings::Fallback::NeverInline, "NeverInline",
264 "All decisions not in replay are not inlined")),
265 cl::desc("How sample profile inline replay treats sites that don't come "
266 "from the replay. Original: defers to original advisor, "
267 "AlwaysInline: inline all sites not in replay, NeverInline: "
268 "inline no sites not in replay"),
269 cl::Hidden);
270
271 static cl::opt<CallSiteFormat::Format> ProfileInlineReplayFormat(
272 "sample-profile-inline-replay-format",
273 cl::init(CallSiteFormat::Format::LineColumnDiscriminator),
274 cl::values(
275 clEnumValN(CallSiteFormat::Format::Line, "Line", "<Line Number>"),
276 clEnumValN(CallSiteFormat::Format::LineColumn, "LineColumn",
277 "<Line Number>:<Column Number>"),
278 clEnumValN(CallSiteFormat::Format::LineDiscriminator,
279 "LineDiscriminator", "<Line Number>.<Discriminator>"),
280 clEnumValN(CallSiteFormat::Format::LineColumnDiscriminator,
281 "LineColumnDiscriminator",
282 "<Line Number>:<Column Number>.<Discriminator> (default)")),
283 cl::desc("How sample profile inline replay file is formatted"), cl::Hidden);
284
285 static cl::opt<unsigned>
286 MaxNumPromotions("sample-profile-icp-max-prom", cl::init(3), cl::Hidden,
287 cl::desc("Max number of promotions for a single indirect "
288 "call callsite in sample profile loader"));
289
290 static cl::opt<bool> OverwriteExistingWeights(
291 "overwrite-existing-weights", cl::Hidden, cl::init(false),
292 cl::desc("Ignore existing branch weights on IR and always overwrite."));
293
294 static cl::opt<bool> AnnotateSampleProfileInlinePhase(
295 "annotate-sample-profile-inline-phase", cl::Hidden, cl::init(false),
296 cl::desc("Annotate LTO phase (prelink / postlink), or main (no LTO) for "
297 "sample-profile inline pass name."));
298
299 extern cl::opt<bool> EnableExtTspBlockPlacement;
300
301 namespace {
302
303 using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>;
304 using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>;
305 using Edge = std::pair<const BasicBlock *, const BasicBlock *>;
306 using EdgeWeightMap = DenseMap<Edge, uint64_t>;
307 using BlockEdgeMap =
308 DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>;
309
310 class GUIDToFuncNameMapper {
311 public:
GUIDToFuncNameMapper(Module & M,SampleProfileReader & Reader,DenseMap<uint64_t,StringRef> & GUIDToFuncNameMap)312 GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader,
313 DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap)
314 : CurrentReader(Reader), CurrentModule(M),
315 CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) {
316 if (!CurrentReader.useMD5())
317 return;
318
319 for (const auto &F : CurrentModule) {
320 StringRef OrigName = F.getName();
321 CurrentGUIDToFuncNameMap.insert(
322 {Function::getGUID(OrigName), OrigName});
323
324 // Local to global var promotion used by optimization like thinlto
325 // will rename the var and add suffix like ".llvm.xxx" to the
326 // original local name. In sample profile, the suffixes of function
327 // names are all stripped. Since it is possible that the mapper is
328 // built in post-thin-link phase and var promotion has been done,
329 // we need to add the substring of function name without the suffix
330 // into the GUIDToFuncNameMap.
331 StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
332 if (CanonName != OrigName)
333 CurrentGUIDToFuncNameMap.insert(
334 {Function::getGUID(CanonName), CanonName});
335 }
336
337 // Update GUIDToFuncNameMap for each function including inlinees.
338 SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap);
339 }
340
~GUIDToFuncNameMapper()341 ~GUIDToFuncNameMapper() {
342 if (!CurrentReader.useMD5())
343 return;
344
345 CurrentGUIDToFuncNameMap.clear();
346
347 // Reset GUIDToFuncNameMap for of each function as they're no
348 // longer valid at this point.
349 SetGUIDToFuncNameMapForAll(nullptr);
350 }
351
352 private:
SetGUIDToFuncNameMapForAll(DenseMap<uint64_t,StringRef> * Map)353 void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) {
354 std::queue<FunctionSamples *> FSToUpdate;
355 for (auto &IFS : CurrentReader.getProfiles()) {
356 FSToUpdate.push(&IFS.second);
357 }
358
359 while (!FSToUpdate.empty()) {
360 FunctionSamples *FS = FSToUpdate.front();
361 FSToUpdate.pop();
362 FS->GUIDToFuncNameMap = Map;
363 for (const auto &ICS : FS->getCallsiteSamples()) {
364 const FunctionSamplesMap &FSMap = ICS.second;
365 for (auto &IFS : FSMap) {
366 FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second);
367 FSToUpdate.push(&FS);
368 }
369 }
370 }
371 }
372
373 SampleProfileReader &CurrentReader;
374 Module &CurrentModule;
375 DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap;
376 };
377
378 // Inline candidate used by iterative callsite prioritized inliner
379 struct InlineCandidate {
380 CallBase *CallInstr;
381 const FunctionSamples *CalleeSamples;
382 // Prorated callsite count, which will be used to guide inlining. For example,
383 // if a callsite is duplicated in LTO prelink, then in LTO postlink the two
384 // copies will get their own distribution factors and their prorated counts
385 // will be used to decide if they should be inlined independently.
386 uint64_t CallsiteCount;
387 // Call site distribution factor to prorate the profile samples for a
388 // duplicated callsite. Default value is 1.0.
389 float CallsiteDistribution;
390 };
391
392 // Inline candidate comparer using call site weight
393 struct CandidateComparer {
operator ()__anonb9d8ce480111::CandidateComparer394 bool operator()(const InlineCandidate &LHS, const InlineCandidate &RHS) {
395 if (LHS.CallsiteCount != RHS.CallsiteCount)
396 return LHS.CallsiteCount < RHS.CallsiteCount;
397
398 const FunctionSamples *LCS = LHS.CalleeSamples;
399 const FunctionSamples *RCS = RHS.CalleeSamples;
400 assert(LCS && RCS && "Expect non-null FunctionSamples");
401
402 // Tie breaker using number of samples try to favor smaller functions first
403 if (LCS->getBodySamples().size() != RCS->getBodySamples().size())
404 return LCS->getBodySamples().size() > RCS->getBodySamples().size();
405
406 // Tie breaker using GUID so we have stable/deterministic inlining order
407 return LCS->getGUID(LCS->getName()) < RCS->getGUID(RCS->getName());
408 }
409 };
410
411 using CandidateQueue =
412 PriorityQueue<InlineCandidate, std::vector<InlineCandidate>,
413 CandidateComparer>;
414
415 /// Sample profile pass.
416 ///
417 /// This pass reads profile data from the file specified by
418 /// -sample-profile-file and annotates every affected function with the
419 /// profile information found in that file.
420 class SampleProfileLoader final
421 : public SampleProfileLoaderBaseImpl<BasicBlock> {
422 public:
SampleProfileLoader(StringRef Name,StringRef RemapName,ThinOrFullLTOPhase LTOPhase,std::function<AssumptionCache & (Function &)> GetAssumptionCache,std::function<TargetTransformInfo & (Function &)> GetTargetTransformInfo,std::function<const TargetLibraryInfo & (Function &)> GetTLI)423 SampleProfileLoader(
424 StringRef Name, StringRef RemapName, ThinOrFullLTOPhase LTOPhase,
425 std::function<AssumptionCache &(Function &)> GetAssumptionCache,
426 std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo,
427 std::function<const TargetLibraryInfo &(Function &)> GetTLI)
428 : SampleProfileLoaderBaseImpl(std::string(Name), std::string(RemapName)),
429 GetAC(std::move(GetAssumptionCache)),
430 GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)),
431 LTOPhase(LTOPhase),
432 AnnotatedPassName(AnnotateSampleProfileInlinePhase
433 ? llvm::AnnotateInlinePassName(InlineContext{
434 LTOPhase, InlinePass::SampleProfileInliner})
435 : CSINLINE_DEBUG) {}
436
437 bool doInitialization(Module &M, FunctionAnalysisManager *FAM = nullptr);
438 bool runOnModule(Module &M, ModuleAnalysisManager *AM,
439 ProfileSummaryInfo *_PSI, CallGraph *CG);
440
441 protected:
442 bool runOnFunction(Function &F, ModuleAnalysisManager *AM);
443 bool emitAnnotations(Function &F);
444 ErrorOr<uint64_t> getInstWeight(const Instruction &I) override;
445 ErrorOr<uint64_t> getProbeWeight(const Instruction &I);
446 const FunctionSamples *findCalleeFunctionSamples(const CallBase &I) const;
447 const FunctionSamples *
448 findFunctionSamples(const Instruction &I) const override;
449 std::vector<const FunctionSamples *>
450 findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const;
451 void findExternalInlineCandidate(CallBase *CB, const FunctionSamples *Samples,
452 DenseSet<GlobalValue::GUID> &InlinedGUIDs,
453 const StringMap<Function *> &SymbolMap,
454 uint64_t Threshold);
455 // Attempt to promote indirect call and also inline the promoted call
456 bool tryPromoteAndInlineCandidate(
457 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin,
458 uint64_t &Sum, SmallVector<CallBase *, 8> *InlinedCallSites = nullptr);
459
460 bool inlineHotFunctions(Function &F,
461 DenseSet<GlobalValue::GUID> &InlinedGUIDs);
462 Optional<InlineCost> getExternalInlineAdvisorCost(CallBase &CB);
463 bool getExternalInlineAdvisorShouldInline(CallBase &CB);
464 InlineCost shouldInlineCandidate(InlineCandidate &Candidate);
465 bool getInlineCandidate(InlineCandidate *NewCandidate, CallBase *CB);
466 bool
467 tryInlineCandidate(InlineCandidate &Candidate,
468 SmallVector<CallBase *, 8> *InlinedCallSites = nullptr);
469 bool
470 inlineHotFunctionsWithPriority(Function &F,
471 DenseSet<GlobalValue::GUID> &InlinedGUIDs);
472 // Inline cold/small functions in addition to hot ones
473 bool shouldInlineColdCallee(CallBase &CallInst);
474 void emitOptimizationRemarksForInlineCandidates(
475 const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
476 bool Hot);
477 void promoteMergeNotInlinedContextSamples(
478 DenseMap<CallBase *, const FunctionSamples *> NonInlinedCallSites,
479 const Function &F);
480 std::vector<Function *> buildFunctionOrder(Module &M, CallGraph *CG);
481 std::unique_ptr<ProfiledCallGraph> buildProfiledCallGraph(CallGraph &CG);
482 void generateMDProfMetadata(Function &F);
483
484 /// Map from function name to Function *. Used to find the function from
485 /// the function name. If the function name contains suffix, additional
486 /// entry is added to map from the stripped name to the function if there
487 /// is one-to-one mapping.
488 StringMap<Function *> SymbolMap;
489
490 std::function<AssumptionCache &(Function &)> GetAC;
491 std::function<TargetTransformInfo &(Function &)> GetTTI;
492 std::function<const TargetLibraryInfo &(Function &)> GetTLI;
493
494 /// Profile tracker for different context.
495 std::unique_ptr<SampleContextTracker> ContextTracker;
496
497 /// Flag indicating which LTO/ThinLTO phase the pass is invoked in.
498 ///
499 /// We need to know the LTO phase because for example in ThinLTOPrelink
500 /// phase, in annotation, we should not promote indirect calls. Instead,
501 /// we will mark GUIDs that needs to be annotated to the function.
502 const ThinOrFullLTOPhase LTOPhase;
503 const std::string AnnotatedPassName;
504
505 /// Profle Symbol list tells whether a function name appears in the binary
506 /// used to generate the current profile.
507 std::unique_ptr<ProfileSymbolList> PSL;
508
509 /// Total number of samples collected in this profile.
510 ///
511 /// This is the sum of all the samples collected in all the functions executed
512 /// at runtime.
513 uint64_t TotalCollectedSamples = 0;
514
515 // Information recorded when we declined to inline a call site
516 // because we have determined it is too cold is accumulated for
517 // each callee function. Initially this is just the entry count.
518 struct NotInlinedProfileInfo {
519 uint64_t entryCount;
520 };
521 DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo;
522
523 // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for
524 // all the function symbols defined or declared in current module.
525 DenseMap<uint64_t, StringRef> GUIDToFuncNameMap;
526
527 // All the Names used in FunctionSamples including outline function
528 // names, inline instance names and call target names.
529 StringSet<> NamesInProfile;
530
531 // For symbol in profile symbol list, whether to regard their profiles
532 // to be accurate. It is mainly decided by existance of profile symbol
533 // list and -profile-accurate-for-symsinlist flag, but it can be
534 // overriden by -profile-sample-accurate or profile-sample-accurate
535 // attribute.
536 bool ProfAccForSymsInList;
537
538 // External inline advisor used to replay inline decision from remarks.
539 std::unique_ptr<InlineAdvisor> ExternalInlineAdvisor;
540
541 // A pseudo probe helper to correlate the imported sample counts.
542 std::unique_ptr<PseudoProbeManager> ProbeManager;
543
544 private:
getAnnotatedRemarkPassName() const545 const char *getAnnotatedRemarkPassName() const {
546 return AnnotatedPassName.c_str();
547 }
548 };
549 } // end anonymous namespace
550
getInstWeight(const Instruction & Inst)551 ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) {
552 if (FunctionSamples::ProfileIsProbeBased)
553 return getProbeWeight(Inst);
554
555 const DebugLoc &DLoc = Inst.getDebugLoc();
556 if (!DLoc)
557 return std::error_code();
558
559 // Ignore all intrinsics, phinodes and branch instructions.
560 // Branch and phinodes instruction usually contains debug info from sources
561 // outside of the residing basic block, thus we ignore them during annotation.
562 if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst))
563 return std::error_code();
564
565 // For non-CS profile, if a direct call/invoke instruction is inlined in
566 // profile (findCalleeFunctionSamples returns non-empty result), but not
567 // inlined here, it means that the inlined callsite has no sample, thus the
568 // call instruction should have 0 count.
569 // For CS profile, the callsite count of previously inlined callees is
570 // populated with the entry count of the callees.
571 if (!FunctionSamples::ProfileIsCS)
572 if (const auto *CB = dyn_cast<CallBase>(&Inst))
573 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB))
574 return 0;
575
576 return getInstWeightImpl(Inst);
577 }
578
579 // Here use error_code to represent: 1) The dangling probe. 2) Ignore the weight
580 // of non-probe instruction. So if all instructions of the BB give error_code,
581 // tell the inference algorithm to infer the BB weight.
getProbeWeight(const Instruction & Inst)582 ErrorOr<uint64_t> SampleProfileLoader::getProbeWeight(const Instruction &Inst) {
583 assert(FunctionSamples::ProfileIsProbeBased &&
584 "Profile is not pseudo probe based");
585 Optional<PseudoProbe> Probe = extractProbe(Inst);
586 // Ignore the non-probe instruction. If none of the instruction in the BB is
587 // probe, we choose to infer the BB's weight.
588 if (!Probe)
589 return std::error_code();
590
591 const FunctionSamples *FS = findFunctionSamples(Inst);
592 // If none of the instruction has FunctionSample, we choose to return zero
593 // value sample to indicate the BB is cold. This could happen when the
594 // instruction is from inlinee and no profile data is found.
595 // FIXME: This should not be affected by the source drift issue as 1) if the
596 // newly added function is top-level inliner, it won't match the CFG checksum
597 // in the function profile or 2) if it's the inlinee, the inlinee should have
598 // a profile, otherwise it wouldn't be inlined. For non-probe based profile,
599 // we can improve it by adding a switch for profile-sample-block-accurate for
600 // block level counts in the future.
601 if (!FS)
602 return 0;
603
604 // For non-CS profile, If a direct call/invoke instruction is inlined in
605 // profile (findCalleeFunctionSamples returns non-empty result), but not
606 // inlined here, it means that the inlined callsite has no sample, thus the
607 // call instruction should have 0 count.
608 // For CS profile, the callsite count of previously inlined callees is
609 // populated with the entry count of the callees.
610 if (!FunctionSamples::ProfileIsCS)
611 if (const auto *CB = dyn_cast<CallBase>(&Inst))
612 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB))
613 return 0;
614
615 const ErrorOr<uint64_t> &R = FS->findSamplesAt(Probe->Id, 0);
616 if (R) {
617 uint64_t Samples = R.get() * Probe->Factor;
618 bool FirstMark = CoverageTracker.markSamplesUsed(FS, Probe->Id, 0, Samples);
619 if (FirstMark) {
620 ORE->emit([&]() {
621 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst);
622 Remark << "Applied " << ore::NV("NumSamples", Samples);
623 Remark << " samples from profile (ProbeId=";
624 Remark << ore::NV("ProbeId", Probe->Id);
625 Remark << ", Factor=";
626 Remark << ore::NV("Factor", Probe->Factor);
627 Remark << ", OriginalSamples=";
628 Remark << ore::NV("OriginalSamples", R.get());
629 Remark << ")";
630 return Remark;
631 });
632 }
633 LLVM_DEBUG(dbgs() << " " << Probe->Id << ":" << Inst
634 << " - weight: " << R.get() << " - factor: "
635 << format("%0.2f", Probe->Factor) << ")\n");
636 return Samples;
637 }
638 return R;
639 }
640
641 /// Get the FunctionSamples for a call instruction.
642 ///
643 /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined
644 /// instance in which that call instruction is calling to. It contains
645 /// all samples that resides in the inlined instance. We first find the
646 /// inlined instance in which the call instruction is from, then we
647 /// traverse its children to find the callsite with the matching
648 /// location.
649 ///
650 /// \param Inst Call/Invoke instruction to query.
651 ///
652 /// \returns The FunctionSamples pointer to the inlined instance.
653 const FunctionSamples *
findCalleeFunctionSamples(const CallBase & Inst) const654 SampleProfileLoader::findCalleeFunctionSamples(const CallBase &Inst) const {
655 const DILocation *DIL = Inst.getDebugLoc();
656 if (!DIL) {
657 return nullptr;
658 }
659
660 StringRef CalleeName;
661 if (Function *Callee = Inst.getCalledFunction())
662 CalleeName = Callee->getName();
663
664 if (FunctionSamples::ProfileIsCS)
665 return ContextTracker->getCalleeContextSamplesFor(Inst, CalleeName);
666
667 const FunctionSamples *FS = findFunctionSamples(Inst);
668 if (FS == nullptr)
669 return nullptr;
670
671 return FS->findFunctionSamplesAt(FunctionSamples::getCallSiteIdentifier(DIL),
672 CalleeName, Reader->getRemapper());
673 }
674
675 /// Returns a vector of FunctionSamples that are the indirect call targets
676 /// of \p Inst. The vector is sorted by the total number of samples. Stores
677 /// the total call count of the indirect call in \p Sum.
678 std::vector<const FunctionSamples *>
findIndirectCallFunctionSamples(const Instruction & Inst,uint64_t & Sum) const679 SampleProfileLoader::findIndirectCallFunctionSamples(
680 const Instruction &Inst, uint64_t &Sum) const {
681 const DILocation *DIL = Inst.getDebugLoc();
682 std::vector<const FunctionSamples *> R;
683
684 if (!DIL) {
685 return R;
686 }
687
688 auto FSCompare = [](const FunctionSamples *L, const FunctionSamples *R) {
689 assert(L && R && "Expect non-null FunctionSamples");
690 if (L->getHeadSamplesEstimate() != R->getHeadSamplesEstimate())
691 return L->getHeadSamplesEstimate() > R->getHeadSamplesEstimate();
692 return FunctionSamples::getGUID(L->getName()) <
693 FunctionSamples::getGUID(R->getName());
694 };
695
696 if (FunctionSamples::ProfileIsCS) {
697 auto CalleeSamples =
698 ContextTracker->getIndirectCalleeContextSamplesFor(DIL);
699 if (CalleeSamples.empty())
700 return R;
701
702 // For CSSPGO, we only use target context profile's entry count
703 // as that already includes both inlined callee and non-inlined ones..
704 Sum = 0;
705 for (const auto *const FS : CalleeSamples) {
706 Sum += FS->getHeadSamplesEstimate();
707 R.push_back(FS);
708 }
709 llvm::sort(R, FSCompare);
710 return R;
711 }
712
713 const FunctionSamples *FS = findFunctionSamples(Inst);
714 if (FS == nullptr)
715 return R;
716
717 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL);
718 auto T = FS->findCallTargetMapAt(CallSite);
719 Sum = 0;
720 if (T)
721 for (const auto &T_C : T.get())
722 Sum += T_C.second;
723 if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(CallSite)) {
724 if (M->empty())
725 return R;
726 for (const auto &NameFS : *M) {
727 Sum += NameFS.second.getHeadSamplesEstimate();
728 R.push_back(&NameFS.second);
729 }
730 llvm::sort(R, FSCompare);
731 }
732 return R;
733 }
734
735 const FunctionSamples *
findFunctionSamples(const Instruction & Inst) const736 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const {
737 if (FunctionSamples::ProfileIsProbeBased) {
738 Optional<PseudoProbe> Probe = extractProbe(Inst);
739 if (!Probe)
740 return nullptr;
741 }
742
743 const DILocation *DIL = Inst.getDebugLoc();
744 if (!DIL)
745 return Samples;
746
747 auto it = DILocation2SampleMap.try_emplace(DIL,nullptr);
748 if (it.second) {
749 if (FunctionSamples::ProfileIsCS)
750 it.first->second = ContextTracker->getContextSamplesFor(DIL);
751 else
752 it.first->second =
753 Samples->findFunctionSamples(DIL, Reader->getRemapper());
754 }
755 return it.first->second;
756 }
757
758 /// Check whether the indirect call promotion history of \p Inst allows
759 /// the promotion for \p Candidate.
760 /// If the profile count for the promotion candidate \p Candidate is
761 /// NOMORE_ICP_MAGICNUM, it means \p Candidate has already been promoted
762 /// for \p Inst. If we already have at least MaxNumPromotions
763 /// NOMORE_ICP_MAGICNUM count values in the value profile of \p Inst, we
764 /// cannot promote for \p Inst anymore.
doesHistoryAllowICP(const Instruction & Inst,StringRef Candidate)765 static bool doesHistoryAllowICP(const Instruction &Inst, StringRef Candidate) {
766 uint32_t NumVals = 0;
767 uint64_t TotalCount = 0;
768 std::unique_ptr<InstrProfValueData[]> ValueData =
769 std::make_unique<InstrProfValueData[]>(MaxNumPromotions);
770 bool Valid =
771 getValueProfDataFromInst(Inst, IPVK_IndirectCallTarget, MaxNumPromotions,
772 ValueData.get(), NumVals, TotalCount, true);
773 // No valid value profile so no promoted targets have been recorded
774 // before. Ok to do ICP.
775 if (!Valid)
776 return true;
777
778 unsigned NumPromoted = 0;
779 for (uint32_t I = 0; I < NumVals; I++) {
780 if (ValueData[I].Count != NOMORE_ICP_MAGICNUM)
781 continue;
782
783 // If the promotion candidate has NOMORE_ICP_MAGICNUM count in the
784 // metadata, it means the candidate has been promoted for this
785 // indirect call.
786 if (ValueData[I].Value == Function::getGUID(Candidate))
787 return false;
788 NumPromoted++;
789 // If already have MaxNumPromotions promotion, don't do it anymore.
790 if (NumPromoted == MaxNumPromotions)
791 return false;
792 }
793 return true;
794 }
795
796 /// Update indirect call target profile metadata for \p Inst.
797 /// Usually \p Sum is the sum of counts of all the targets for \p Inst.
798 /// If it is 0, it means updateIDTMetaData is used to mark a
799 /// certain target to be promoted already. If it is not zero,
800 /// we expect to use it to update the total count in the value profile.
801 static void
updateIDTMetaData(Instruction & Inst,const SmallVectorImpl<InstrProfValueData> & CallTargets,uint64_t Sum)802 updateIDTMetaData(Instruction &Inst,
803 const SmallVectorImpl<InstrProfValueData> &CallTargets,
804 uint64_t Sum) {
805 // Bail out early if MaxNumPromotions is zero.
806 // This prevents allocating an array of zero length below.
807 //
808 // Note `updateIDTMetaData` is called in two places so check
809 // `MaxNumPromotions` inside it.
810 if (MaxNumPromotions == 0)
811 return;
812 uint32_t NumVals = 0;
813 // OldSum is the existing total count in the value profile data.
814 uint64_t OldSum = 0;
815 std::unique_ptr<InstrProfValueData[]> ValueData =
816 std::make_unique<InstrProfValueData[]>(MaxNumPromotions);
817 bool Valid =
818 getValueProfDataFromInst(Inst, IPVK_IndirectCallTarget, MaxNumPromotions,
819 ValueData.get(), NumVals, OldSum, true);
820
821 DenseMap<uint64_t, uint64_t> ValueCountMap;
822 if (Sum == 0) {
823 assert((CallTargets.size() == 1 &&
824 CallTargets[0].Count == NOMORE_ICP_MAGICNUM) &&
825 "If sum is 0, assume only one element in CallTargets "
826 "with count being NOMORE_ICP_MAGICNUM");
827 // Initialize ValueCountMap with existing value profile data.
828 if (Valid) {
829 for (uint32_t I = 0; I < NumVals; I++)
830 ValueCountMap[ValueData[I].Value] = ValueData[I].Count;
831 }
832 auto Pair =
833 ValueCountMap.try_emplace(CallTargets[0].Value, CallTargets[0].Count);
834 // If the target already exists in value profile, decrease the total
835 // count OldSum and reset the target's count to NOMORE_ICP_MAGICNUM.
836 if (!Pair.second) {
837 OldSum -= Pair.first->second;
838 Pair.first->second = NOMORE_ICP_MAGICNUM;
839 }
840 Sum = OldSum;
841 } else {
842 // Initialize ValueCountMap with existing NOMORE_ICP_MAGICNUM
843 // counts in the value profile.
844 if (Valid) {
845 for (uint32_t I = 0; I < NumVals; I++) {
846 if (ValueData[I].Count == NOMORE_ICP_MAGICNUM)
847 ValueCountMap[ValueData[I].Value] = ValueData[I].Count;
848 }
849 }
850
851 for (const auto &Data : CallTargets) {
852 auto Pair = ValueCountMap.try_emplace(Data.Value, Data.Count);
853 if (Pair.second)
854 continue;
855 // The target represented by Data.Value has already been promoted.
856 // Keep the count as NOMORE_ICP_MAGICNUM in the profile and decrease
857 // Sum by Data.Count.
858 assert(Sum >= Data.Count && "Sum should never be less than Data.Count");
859 Sum -= Data.Count;
860 }
861 }
862
863 SmallVector<InstrProfValueData, 8> NewCallTargets;
864 for (const auto &ValueCount : ValueCountMap) {
865 NewCallTargets.emplace_back(
866 InstrProfValueData{ValueCount.first, ValueCount.second});
867 }
868
869 llvm::sort(NewCallTargets,
870 [](const InstrProfValueData &L, const InstrProfValueData &R) {
871 if (L.Count != R.Count)
872 return L.Count > R.Count;
873 return L.Value > R.Value;
874 });
875
876 uint32_t MaxMDCount =
877 std::min(NewCallTargets.size(), static_cast<size_t>(MaxNumPromotions));
878 annotateValueSite(*Inst.getParent()->getParent()->getParent(), Inst,
879 NewCallTargets, Sum, IPVK_IndirectCallTarget, MaxMDCount);
880 }
881
882 /// Attempt to promote indirect call and also inline the promoted call.
883 ///
884 /// \param F Caller function.
885 /// \param Candidate ICP and inline candidate.
886 /// \param SumOrigin Original sum of target counts for indirect call before
887 /// promoting given candidate.
888 /// \param Sum Prorated sum of remaining target counts for indirect call
889 /// after promoting given candidate.
890 /// \param InlinedCallSite Output vector for new call sites exposed after
891 /// inlining.
tryPromoteAndInlineCandidate(Function & F,InlineCandidate & Candidate,uint64_t SumOrigin,uint64_t & Sum,SmallVector<CallBase *,8> * InlinedCallSite)892 bool SampleProfileLoader::tryPromoteAndInlineCandidate(
893 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin, uint64_t &Sum,
894 SmallVector<CallBase *, 8> *InlinedCallSite) {
895 // Bail out early if sample-loader inliner is disabled.
896 if (DisableSampleLoaderInlining)
897 return false;
898
899 // Bail out early if MaxNumPromotions is zero.
900 // This prevents allocating an array of zero length in callees below.
901 if (MaxNumPromotions == 0)
902 return false;
903 auto CalleeFunctionName = Candidate.CalleeSamples->getFuncName();
904 auto R = SymbolMap.find(CalleeFunctionName);
905 if (R == SymbolMap.end() || !R->getValue())
906 return false;
907
908 auto &CI = *Candidate.CallInstr;
909 if (!doesHistoryAllowICP(CI, R->getValue()->getName()))
910 return false;
911
912 const char *Reason = "Callee function not available";
913 // R->getValue() != &F is to prevent promoting a recursive call.
914 // If it is a recursive call, we do not inline it as it could bloat
915 // the code exponentially. There is way to better handle this, e.g.
916 // clone the caller first, and inline the cloned caller if it is
917 // recursive. As llvm does not inline recursive calls, we will
918 // simply ignore it instead of handling it explicitly.
919 if (!R->getValue()->isDeclaration() && R->getValue()->getSubprogram() &&
920 R->getValue()->hasFnAttribute("use-sample-profile") &&
921 R->getValue() != &F && isLegalToPromote(CI, R->getValue(), &Reason)) {
922 // For promoted target, set its value with NOMORE_ICP_MAGICNUM count
923 // in the value profile metadata so the target won't be promoted again.
924 SmallVector<InstrProfValueData, 1> SortedCallTargets = {InstrProfValueData{
925 Function::getGUID(R->getValue()->getName()), NOMORE_ICP_MAGICNUM}};
926 updateIDTMetaData(CI, SortedCallTargets, 0);
927
928 auto *DI = &pgo::promoteIndirectCall(
929 CI, R->getValue(), Candidate.CallsiteCount, Sum, false, ORE);
930 if (DI) {
931 Sum -= Candidate.CallsiteCount;
932 // Do not prorate the indirect callsite distribution since the original
933 // distribution will be used to scale down non-promoted profile target
934 // counts later. By doing this we lose track of the real callsite count
935 // for the leftover indirect callsite as a trade off for accurate call
936 // target counts.
937 // TODO: Ideally we would have two separate factors, one for call site
938 // counts and one is used to prorate call target counts.
939 // Do not update the promoted direct callsite distribution at this
940 // point since the original distribution combined with the callee profile
941 // will be used to prorate callsites from the callee if inlined. Once not
942 // inlined, the direct callsite distribution should be prorated so that
943 // the it will reflect the real callsite counts.
944 Candidate.CallInstr = DI;
945 if (isa<CallInst>(DI) || isa<InvokeInst>(DI)) {
946 bool Inlined = tryInlineCandidate(Candidate, InlinedCallSite);
947 if (!Inlined) {
948 // Prorate the direct callsite distribution so that it reflects real
949 // callsite counts.
950 setProbeDistributionFactor(
951 *DI, static_cast<float>(Candidate.CallsiteCount) / SumOrigin);
952 }
953 return Inlined;
954 }
955 }
956 } else {
957 LLVM_DEBUG(dbgs() << "\nFailed to promote indirect call to "
958 << Candidate.CalleeSamples->getFuncName() << " because "
959 << Reason << "\n");
960 }
961 return false;
962 }
963
shouldInlineColdCallee(CallBase & CallInst)964 bool SampleProfileLoader::shouldInlineColdCallee(CallBase &CallInst) {
965 if (!ProfileSizeInline)
966 return false;
967
968 Function *Callee = CallInst.getCalledFunction();
969 if (Callee == nullptr)
970 return false;
971
972 InlineCost Cost = getInlineCost(CallInst, getInlineParams(), GetTTI(*Callee),
973 GetAC, GetTLI);
974
975 if (Cost.isNever())
976 return false;
977
978 if (Cost.isAlways())
979 return true;
980
981 return Cost.getCost() <= SampleColdCallSiteThreshold;
982 }
983
emitOptimizationRemarksForInlineCandidates(const SmallVectorImpl<CallBase * > & Candidates,const Function & F,bool Hot)984 void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates(
985 const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
986 bool Hot) {
987 for (auto I : Candidates) {
988 Function *CalledFunction = I->getCalledFunction();
989 if (CalledFunction) {
990 ORE->emit(OptimizationRemarkAnalysis(getAnnotatedRemarkPassName(),
991 "InlineAttempt", I->getDebugLoc(),
992 I->getParent())
993 << "previous inlining reattempted for "
994 << (Hot ? "hotness: '" : "size: '")
995 << ore::NV("Callee", CalledFunction) << "' into '"
996 << ore::NV("Caller", &F) << "'");
997 }
998 }
999 }
1000
findExternalInlineCandidate(CallBase * CB,const FunctionSamples * Samples,DenseSet<GlobalValue::GUID> & InlinedGUIDs,const StringMap<Function * > & SymbolMap,uint64_t Threshold)1001 void SampleProfileLoader::findExternalInlineCandidate(
1002 CallBase *CB, const FunctionSamples *Samples,
1003 DenseSet<GlobalValue::GUID> &InlinedGUIDs,
1004 const StringMap<Function *> &SymbolMap, uint64_t Threshold) {
1005
1006 // If ExternalInlineAdvisor wants to inline an external function
1007 // make sure it's imported
1008 if (CB && getExternalInlineAdvisorShouldInline(*CB)) {
1009 // Samples may not exist for replayed function, if so
1010 // just add the direct GUID and move on
1011 if (!Samples) {
1012 InlinedGUIDs.insert(
1013 FunctionSamples::getGUID(CB->getCalledFunction()->getName()));
1014 return;
1015 }
1016 // Otherwise, drop the threshold to import everything that we can
1017 Threshold = 0;
1018 }
1019
1020 assert(Samples && "expect non-null caller profile");
1021
1022 // For AutoFDO profile, retrieve candidate profiles by walking over
1023 // the nested inlinee profiles.
1024 if (!FunctionSamples::ProfileIsCS) {
1025 Samples->findInlinedFunctions(InlinedGUIDs, SymbolMap, Threshold);
1026 return;
1027 }
1028
1029 ContextTrieNode *Caller = ContextTracker->getContextNodeForProfile(Samples);
1030 std::queue<ContextTrieNode *> CalleeList;
1031 CalleeList.push(Caller);
1032 while (!CalleeList.empty()) {
1033 ContextTrieNode *Node = CalleeList.front();
1034 CalleeList.pop();
1035 FunctionSamples *CalleeSample = Node->getFunctionSamples();
1036 // For CSSPGO profile, retrieve candidate profile by walking over the
1037 // trie built for context profile. Note that also take call targets
1038 // even if callee doesn't have a corresponding context profile.
1039 if (!CalleeSample)
1040 continue;
1041
1042 // If pre-inliner decision is used, honor that for importing as well.
1043 bool PreInline =
1044 UsePreInlinerDecision &&
1045 CalleeSample->getContext().hasAttribute(ContextShouldBeInlined);
1046 if (!PreInline && CalleeSample->getHeadSamplesEstimate() < Threshold)
1047 continue;
1048
1049 StringRef Name = CalleeSample->getFuncName();
1050 Function *Func = SymbolMap.lookup(Name);
1051 // Add to the import list only when it's defined out of module.
1052 if (!Func || Func->isDeclaration())
1053 InlinedGUIDs.insert(FunctionSamples::getGUID(CalleeSample->getName()));
1054
1055 // Import hot CallTargets, which may not be available in IR because full
1056 // profile annotation cannot be done until backend compilation in ThinLTO.
1057 for (const auto &BS : CalleeSample->getBodySamples())
1058 for (const auto &TS : BS.second.getCallTargets())
1059 if (TS.getValue() > Threshold) {
1060 StringRef CalleeName = CalleeSample->getFuncName(TS.getKey());
1061 const Function *Callee = SymbolMap.lookup(CalleeName);
1062 if (!Callee || Callee->isDeclaration())
1063 InlinedGUIDs.insert(FunctionSamples::getGUID(TS.getKey()));
1064 }
1065
1066 // Import hot child context profile associted with callees. Note that this
1067 // may have some overlap with the call target loop above, but doing this
1068 // based child context profile again effectively allow us to use the max of
1069 // entry count and call target count to determine importing.
1070 for (auto &Child : Node->getAllChildContext()) {
1071 ContextTrieNode *CalleeNode = &Child.second;
1072 CalleeList.push(CalleeNode);
1073 }
1074 }
1075 }
1076
1077 /// Iteratively inline hot callsites of a function.
1078 ///
1079 /// Iteratively traverse all callsites of the function \p F, so as to
1080 /// find out callsites with corresponding inline instances.
1081 ///
1082 /// For such callsites,
1083 /// - If it is hot enough, inline the callsites and adds callsites of the callee
1084 /// into the caller. If the call is an indirect call, first promote
1085 /// it to direct call. Each indirect call is limited with a single target.
1086 ///
1087 /// - If a callsite is not inlined, merge the its profile to the outline
1088 /// version (if --sample-profile-merge-inlinee is true), or scale the
1089 /// counters of standalone function based on the profile of inlined
1090 /// instances (if --sample-profile-merge-inlinee is false).
1091 ///
1092 /// Later passes may consume the updated profiles.
1093 ///
1094 /// \param F function to perform iterative inlining.
1095 /// \param InlinedGUIDs a set to be updated to include all GUIDs that are
1096 /// inlined in the profiled binary.
1097 ///
1098 /// \returns True if there is any inline happened.
inlineHotFunctions(Function & F,DenseSet<GlobalValue::GUID> & InlinedGUIDs)1099 bool SampleProfileLoader::inlineHotFunctions(
1100 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
1101 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
1102 // Profile symbol list is ignored when profile-sample-accurate is on.
1103 assert((!ProfAccForSymsInList ||
1104 (!ProfileSampleAccurate &&
1105 !F.hasFnAttribute("profile-sample-accurate"))) &&
1106 "ProfAccForSymsInList should be false when profile-sample-accurate "
1107 "is enabled");
1108
1109 DenseMap<CallBase *, const FunctionSamples *> LocalNotInlinedCallSites;
1110 bool Changed = false;
1111 bool LocalChanged = true;
1112 while (LocalChanged) {
1113 LocalChanged = false;
1114 SmallVector<CallBase *, 10> CIS;
1115 for (auto &BB : F) {
1116 bool Hot = false;
1117 SmallVector<CallBase *, 10> AllCandidates;
1118 SmallVector<CallBase *, 10> ColdCandidates;
1119 for (auto &I : BB.getInstList()) {
1120 const FunctionSamples *FS = nullptr;
1121 if (auto *CB = dyn_cast<CallBase>(&I)) {
1122 if (!isa<IntrinsicInst>(I)) {
1123 if ((FS = findCalleeFunctionSamples(*CB))) {
1124 assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) &&
1125 "GUIDToFuncNameMap has to be populated");
1126 AllCandidates.push_back(CB);
1127 if (FS->getHeadSamplesEstimate() > 0 ||
1128 FunctionSamples::ProfileIsCS)
1129 LocalNotInlinedCallSites.try_emplace(CB, FS);
1130 if (callsiteIsHot(FS, PSI, ProfAccForSymsInList))
1131 Hot = true;
1132 else if (shouldInlineColdCallee(*CB))
1133 ColdCandidates.push_back(CB);
1134 } else if (getExternalInlineAdvisorShouldInline(*CB)) {
1135 AllCandidates.push_back(CB);
1136 }
1137 }
1138 }
1139 }
1140 if (Hot || ExternalInlineAdvisor) {
1141 CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end());
1142 emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true);
1143 } else {
1144 CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end());
1145 emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false);
1146 }
1147 }
1148 for (CallBase *I : CIS) {
1149 Function *CalledFunction = I->getCalledFunction();
1150 InlineCandidate Candidate = {I, LocalNotInlinedCallSites.lookup(I),
1151 0 /* dummy count */,
1152 1.0 /* dummy distribution factor */};
1153 // Do not inline recursive calls.
1154 if (CalledFunction == &F)
1155 continue;
1156 if (I->isIndirectCall()) {
1157 uint64_t Sum;
1158 for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) {
1159 uint64_t SumOrigin = Sum;
1160 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1161 findExternalInlineCandidate(I, FS, InlinedGUIDs, SymbolMap,
1162 PSI->getOrCompHotCountThreshold());
1163 continue;
1164 }
1165 if (!callsiteIsHot(FS, PSI, ProfAccForSymsInList))
1166 continue;
1167
1168 Candidate = {I, FS, FS->getHeadSamplesEstimate(), 1.0};
1169 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum)) {
1170 LocalNotInlinedCallSites.erase(I);
1171 LocalChanged = true;
1172 }
1173 }
1174 } else if (CalledFunction && CalledFunction->getSubprogram() &&
1175 !CalledFunction->isDeclaration()) {
1176 if (tryInlineCandidate(Candidate)) {
1177 LocalNotInlinedCallSites.erase(I);
1178 LocalChanged = true;
1179 }
1180 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1181 findExternalInlineCandidate(I, findCalleeFunctionSamples(*I),
1182 InlinedGUIDs, SymbolMap,
1183 PSI->getOrCompHotCountThreshold());
1184 }
1185 }
1186 Changed |= LocalChanged;
1187 }
1188
1189 // For CS profile, profile for not inlined context will be merged when
1190 // base profile is being retrieved.
1191 if (!FunctionSamples::ProfileIsCS)
1192 promoteMergeNotInlinedContextSamples(LocalNotInlinedCallSites, F);
1193 return Changed;
1194 }
1195
tryInlineCandidate(InlineCandidate & Candidate,SmallVector<CallBase *,8> * InlinedCallSites)1196 bool SampleProfileLoader::tryInlineCandidate(
1197 InlineCandidate &Candidate, SmallVector<CallBase *, 8> *InlinedCallSites) {
1198 // Do not attempt to inline a candidate if
1199 // --disable-sample-loader-inlining is true.
1200 if (DisableSampleLoaderInlining)
1201 return false;
1202
1203 CallBase &CB = *Candidate.CallInstr;
1204 Function *CalledFunction = CB.getCalledFunction();
1205 assert(CalledFunction && "Expect a callee with definition");
1206 DebugLoc DLoc = CB.getDebugLoc();
1207 BasicBlock *BB = CB.getParent();
1208
1209 InlineCost Cost = shouldInlineCandidate(Candidate);
1210 if (Cost.isNever()) {
1211 ORE->emit(OptimizationRemarkAnalysis(getAnnotatedRemarkPassName(),
1212 "InlineFail", DLoc, BB)
1213 << "incompatible inlining");
1214 return false;
1215 }
1216
1217 if (!Cost)
1218 return false;
1219
1220 InlineFunctionInfo IFI(nullptr, GetAC);
1221 IFI.UpdateProfile = false;
1222 if (!InlineFunction(CB, IFI).isSuccess())
1223 return false;
1224
1225 // Merge the attributes based on the inlining.
1226 AttributeFuncs::mergeAttributesForInlining(*BB->getParent(),
1227 *CalledFunction);
1228
1229 // The call to InlineFunction erases I, so we can't pass it here.
1230 emitInlinedIntoBasedOnCost(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(),
1231 Cost, true, getAnnotatedRemarkPassName());
1232
1233 // Now populate the list of newly exposed call sites.
1234 if (InlinedCallSites) {
1235 InlinedCallSites->clear();
1236 for (auto &I : IFI.InlinedCallSites)
1237 InlinedCallSites->push_back(I);
1238 }
1239
1240 if (FunctionSamples::ProfileIsCS)
1241 ContextTracker->markContextSamplesInlined(Candidate.CalleeSamples);
1242 ++NumCSInlined;
1243
1244 // Prorate inlined probes for a duplicated inlining callsite which probably
1245 // has a distribution less than 100%. Samples for an inlinee should be
1246 // distributed among the copies of the original callsite based on each
1247 // callsite's distribution factor for counts accuracy. Note that an inlined
1248 // probe may come with its own distribution factor if it has been duplicated
1249 // in the inlinee body. The two factor are multiplied to reflect the
1250 // aggregation of duplication.
1251 if (Candidate.CallsiteDistribution < 1) {
1252 for (auto &I : IFI.InlinedCallSites) {
1253 if (Optional<PseudoProbe> Probe = extractProbe(*I))
1254 setProbeDistributionFactor(*I, Probe->Factor *
1255 Candidate.CallsiteDistribution);
1256 }
1257 NumDuplicatedInlinesite++;
1258 }
1259
1260 return true;
1261 }
1262
getInlineCandidate(InlineCandidate * NewCandidate,CallBase * CB)1263 bool SampleProfileLoader::getInlineCandidate(InlineCandidate *NewCandidate,
1264 CallBase *CB) {
1265 assert(CB && "Expect non-null call instruction");
1266
1267 if (isa<IntrinsicInst>(CB))
1268 return false;
1269
1270 // Find the callee's profile. For indirect call, find hottest target profile.
1271 const FunctionSamples *CalleeSamples = findCalleeFunctionSamples(*CB);
1272 // If ExternalInlineAdvisor wants to inline this site, do so even
1273 // if Samples are not present.
1274 if (!CalleeSamples && !getExternalInlineAdvisorShouldInline(*CB))
1275 return false;
1276
1277 float Factor = 1.0;
1278 if (Optional<PseudoProbe> Probe = extractProbe(*CB))
1279 Factor = Probe->Factor;
1280
1281 uint64_t CallsiteCount =
1282 CalleeSamples ? CalleeSamples->getHeadSamplesEstimate() * Factor : 0;
1283 *NewCandidate = {CB, CalleeSamples, CallsiteCount, Factor};
1284 return true;
1285 }
1286
1287 Optional<InlineCost>
getExternalInlineAdvisorCost(CallBase & CB)1288 SampleProfileLoader::getExternalInlineAdvisorCost(CallBase &CB) {
1289 std::unique_ptr<InlineAdvice> Advice = nullptr;
1290 if (ExternalInlineAdvisor) {
1291 Advice = ExternalInlineAdvisor->getAdvice(CB);
1292 if (Advice) {
1293 if (!Advice->isInliningRecommended()) {
1294 Advice->recordUnattemptedInlining();
1295 return InlineCost::getNever("not previously inlined");
1296 }
1297 Advice->recordInlining();
1298 return InlineCost::getAlways("previously inlined");
1299 }
1300 }
1301
1302 return {};
1303 }
1304
getExternalInlineAdvisorShouldInline(CallBase & CB)1305 bool SampleProfileLoader::getExternalInlineAdvisorShouldInline(CallBase &CB) {
1306 Optional<InlineCost> Cost = getExternalInlineAdvisorCost(CB);
1307 return Cost ? !!Cost.value() : false;
1308 }
1309
1310 InlineCost
shouldInlineCandidate(InlineCandidate & Candidate)1311 SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) {
1312 if (Optional<InlineCost> ReplayCost =
1313 getExternalInlineAdvisorCost(*Candidate.CallInstr))
1314 return ReplayCost.value();
1315 // Adjust threshold based on call site hotness, only do this for callsite
1316 // prioritized inliner because otherwise cost-benefit check is done earlier.
1317 int SampleThreshold = SampleColdCallSiteThreshold;
1318 if (CallsitePrioritizedInline) {
1319 if (Candidate.CallsiteCount > PSI->getHotCountThreshold())
1320 SampleThreshold = SampleHotCallSiteThreshold;
1321 else if (!ProfileSizeInline)
1322 return InlineCost::getNever("cold callsite");
1323 }
1324
1325 Function *Callee = Candidate.CallInstr->getCalledFunction();
1326 assert(Callee && "Expect a definition for inline candidate of direct call");
1327
1328 InlineParams Params = getInlineParams();
1329 // We will ignore the threshold from inline cost, so always get full cost.
1330 Params.ComputeFullInlineCost = true;
1331 Params.AllowRecursiveCall = AllowRecursiveInline;
1332 // Checks if there is anything in the reachable portion of the callee at
1333 // this callsite that makes this inlining potentially illegal. Need to
1334 // set ComputeFullInlineCost, otherwise getInlineCost may return early
1335 // when cost exceeds threshold without checking all IRs in the callee.
1336 // The acutal cost does not matter because we only checks isNever() to
1337 // see if it is legal to inline the callsite.
1338 InlineCost Cost = getInlineCost(*Candidate.CallInstr, Callee, Params,
1339 GetTTI(*Callee), GetAC, GetTLI);
1340
1341 // Honor always inline and never inline from call analyzer
1342 if (Cost.isNever() || Cost.isAlways())
1343 return Cost;
1344
1345 // With CSSPGO, the preinliner in llvm-profgen can estimate global inline
1346 // decisions based on hotness as well as accurate function byte sizes for
1347 // given context using function/inlinee sizes from previous build. It
1348 // stores the decision in profile, and also adjust/merge context profile
1349 // aiming at better context-sensitive post-inline profile quality, assuming
1350 // all inline decision estimates are going to be honored by compiler. Here
1351 // we replay that inline decision under `sample-profile-use-preinliner`.
1352 // Note that we don't need to handle negative decision from preinliner as
1353 // context profile for not inlined calls are merged by preinliner already.
1354 if (UsePreInlinerDecision && Candidate.CalleeSamples) {
1355 // Once two node are merged due to promotion, we're losing some context
1356 // so the original context-sensitive preinliner decision should be ignored
1357 // for SyntheticContext.
1358 SampleContext &Context = Candidate.CalleeSamples->getContext();
1359 if (!Context.hasState(SyntheticContext) &&
1360 Context.hasAttribute(ContextShouldBeInlined))
1361 return InlineCost::getAlways("preinliner");
1362 }
1363
1364 // For old FDO inliner, we inline the call site as long as cost is not
1365 // "Never". The cost-benefit check is done earlier.
1366 if (!CallsitePrioritizedInline) {
1367 return InlineCost::get(Cost.getCost(), INT_MAX);
1368 }
1369
1370 // Otherwise only use the cost from call analyzer, but overwite threshold with
1371 // Sample PGO threshold.
1372 return InlineCost::get(Cost.getCost(), SampleThreshold);
1373 }
1374
inlineHotFunctionsWithPriority(Function & F,DenseSet<GlobalValue::GUID> & InlinedGUIDs)1375 bool SampleProfileLoader::inlineHotFunctionsWithPriority(
1376 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
1377 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
1378 // Profile symbol list is ignored when profile-sample-accurate is on.
1379 assert((!ProfAccForSymsInList ||
1380 (!ProfileSampleAccurate &&
1381 !F.hasFnAttribute("profile-sample-accurate"))) &&
1382 "ProfAccForSymsInList should be false when profile-sample-accurate "
1383 "is enabled");
1384
1385 // Populating worklist with initial call sites from root inliner, along
1386 // with call site weights.
1387 CandidateQueue CQueue;
1388 InlineCandidate NewCandidate;
1389 for (auto &BB : F) {
1390 for (auto &I : BB.getInstList()) {
1391 auto *CB = dyn_cast<CallBase>(&I);
1392 if (!CB)
1393 continue;
1394 if (getInlineCandidate(&NewCandidate, CB))
1395 CQueue.push(NewCandidate);
1396 }
1397 }
1398
1399 // Cap the size growth from profile guided inlining. This is needed even
1400 // though cost of each inline candidate already accounts for callee size,
1401 // because with top-down inlining, we can grow inliner size significantly
1402 // with large number of smaller inlinees each pass the cost check.
1403 assert(ProfileInlineLimitMax >= ProfileInlineLimitMin &&
1404 "Max inline size limit should not be smaller than min inline size "
1405 "limit.");
1406 unsigned SizeLimit = F.getInstructionCount() * ProfileInlineGrowthLimit;
1407 SizeLimit = std::min(SizeLimit, (unsigned)ProfileInlineLimitMax);
1408 SizeLimit = std::max(SizeLimit, (unsigned)ProfileInlineLimitMin);
1409 if (ExternalInlineAdvisor)
1410 SizeLimit = std::numeric_limits<unsigned>::max();
1411
1412 DenseMap<CallBase *, const FunctionSamples *> LocalNotInlinedCallSites;
1413
1414 // Perform iterative BFS call site prioritized inlining
1415 bool Changed = false;
1416 while (!CQueue.empty() && F.getInstructionCount() < SizeLimit) {
1417 InlineCandidate Candidate = CQueue.top();
1418 CQueue.pop();
1419 CallBase *I = Candidate.CallInstr;
1420 Function *CalledFunction = I->getCalledFunction();
1421
1422 if (CalledFunction == &F)
1423 continue;
1424 if (I->isIndirectCall()) {
1425 uint64_t Sum = 0;
1426 auto CalleeSamples = findIndirectCallFunctionSamples(*I, Sum);
1427 uint64_t SumOrigin = Sum;
1428 Sum *= Candidate.CallsiteDistribution;
1429 unsigned ICPCount = 0;
1430 for (const auto *FS : CalleeSamples) {
1431 // TODO: Consider disable pre-lTO ICP for MonoLTO as well
1432 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1433 findExternalInlineCandidate(I, FS, InlinedGUIDs, SymbolMap,
1434 PSI->getOrCompHotCountThreshold());
1435 continue;
1436 }
1437 uint64_t EntryCountDistributed =
1438 FS->getHeadSamplesEstimate() * Candidate.CallsiteDistribution;
1439 // In addition to regular inline cost check, we also need to make sure
1440 // ICP isn't introducing excessive speculative checks even if individual
1441 // target looks beneficial to promote and inline. That means we should
1442 // only do ICP when there's a small number dominant targets.
1443 if (ICPCount >= ProfileICPRelativeHotnessSkip &&
1444 EntryCountDistributed * 100 < SumOrigin * ProfileICPRelativeHotness)
1445 break;
1446 // TODO: Fix CallAnalyzer to handle all indirect calls.
1447 // For indirect call, we don't run CallAnalyzer to get InlineCost
1448 // before actual inlining. This is because we could see two different
1449 // types from the same definition, which makes CallAnalyzer choke as
1450 // it's expecting matching parameter type on both caller and callee
1451 // side. See example from PR18962 for the triggering cases (the bug was
1452 // fixed, but we generate different types).
1453 if (!PSI->isHotCount(EntryCountDistributed))
1454 break;
1455 SmallVector<CallBase *, 8> InlinedCallSites;
1456 // Attach function profile for promoted indirect callee, and update
1457 // call site count for the promoted inline candidate too.
1458 Candidate = {I, FS, EntryCountDistributed,
1459 Candidate.CallsiteDistribution};
1460 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum,
1461 &InlinedCallSites)) {
1462 for (auto *CB : InlinedCallSites) {
1463 if (getInlineCandidate(&NewCandidate, CB))
1464 CQueue.emplace(NewCandidate);
1465 }
1466 ICPCount++;
1467 Changed = true;
1468 } else if (!ContextTracker) {
1469 LocalNotInlinedCallSites.try_emplace(I, FS);
1470 }
1471 }
1472 } else if (CalledFunction && CalledFunction->getSubprogram() &&
1473 !CalledFunction->isDeclaration()) {
1474 SmallVector<CallBase *, 8> InlinedCallSites;
1475 if (tryInlineCandidate(Candidate, &InlinedCallSites)) {
1476 for (auto *CB : InlinedCallSites) {
1477 if (getInlineCandidate(&NewCandidate, CB))
1478 CQueue.emplace(NewCandidate);
1479 }
1480 Changed = true;
1481 } else if (!ContextTracker) {
1482 LocalNotInlinedCallSites.try_emplace(I, Candidate.CalleeSamples);
1483 }
1484 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1485 findExternalInlineCandidate(I, findCalleeFunctionSamples(*I),
1486 InlinedGUIDs, SymbolMap,
1487 PSI->getOrCompHotCountThreshold());
1488 }
1489 }
1490
1491 if (!CQueue.empty()) {
1492 if (SizeLimit == (unsigned)ProfileInlineLimitMax)
1493 ++NumCSInlinedHitMaxLimit;
1494 else if (SizeLimit == (unsigned)ProfileInlineLimitMin)
1495 ++NumCSInlinedHitMinLimit;
1496 else
1497 ++NumCSInlinedHitGrowthLimit;
1498 }
1499
1500 // For CS profile, profile for not inlined context will be merged when
1501 // base profile is being retrieved.
1502 if (!FunctionSamples::ProfileIsCS)
1503 promoteMergeNotInlinedContextSamples(LocalNotInlinedCallSites, F);
1504 return Changed;
1505 }
1506
promoteMergeNotInlinedContextSamples(DenseMap<CallBase *,const FunctionSamples * > NonInlinedCallSites,const Function & F)1507 void SampleProfileLoader::promoteMergeNotInlinedContextSamples(
1508 DenseMap<CallBase *, const FunctionSamples *> NonInlinedCallSites,
1509 const Function &F) {
1510 // Accumulate not inlined callsite information into notInlinedSamples
1511 for (const auto &Pair : NonInlinedCallSites) {
1512 CallBase *I = Pair.getFirst();
1513 Function *Callee = I->getCalledFunction();
1514 if (!Callee || Callee->isDeclaration())
1515 continue;
1516
1517 ORE->emit(
1518 OptimizationRemarkAnalysis(getAnnotatedRemarkPassName(), "NotInline",
1519 I->getDebugLoc(), I->getParent())
1520 << "previous inlining not repeated: '" << ore::NV("Callee", Callee)
1521 << "' into '" << ore::NV("Caller", &F) << "'");
1522
1523 ++NumCSNotInlined;
1524 const FunctionSamples *FS = Pair.getSecond();
1525 if (FS->getTotalSamples() == 0 && FS->getHeadSamplesEstimate() == 0) {
1526 continue;
1527 }
1528
1529 // Do not merge a context that is already duplicated into the base profile.
1530 if (FS->getContext().hasAttribute(sampleprof::ContextDuplicatedIntoBase))
1531 continue;
1532
1533 if (ProfileMergeInlinee) {
1534 // A function call can be replicated by optimizations like callsite
1535 // splitting or jump threading and the replicates end up sharing the
1536 // sample nested callee profile instead of slicing the original
1537 // inlinee's profile. We want to do merge exactly once by filtering out
1538 // callee profiles with a non-zero head sample count.
1539 if (FS->getHeadSamples() == 0) {
1540 // Use entry samples as head samples during the merge, as inlinees
1541 // don't have head samples.
1542 const_cast<FunctionSamples *>(FS)->addHeadSamples(
1543 FS->getHeadSamplesEstimate());
1544
1545 // Note that we have to do the merge right after processing function.
1546 // This allows OutlineFS's profile to be used for annotation during
1547 // top-down processing of functions' annotation.
1548 FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee);
1549 OutlineFS->merge(*FS, 1);
1550 // Set outlined profile to be synthetic to not bias the inliner.
1551 OutlineFS->SetContextSynthetic();
1552 }
1553 } else {
1554 auto pair =
1555 notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0});
1556 pair.first->second.entryCount += FS->getHeadSamplesEstimate();
1557 }
1558 }
1559 }
1560
1561 /// Returns the sorted CallTargetMap \p M by count in descending order.
1562 static SmallVector<InstrProfValueData, 2>
GetSortedValueDataFromCallTargets(const SampleRecord::CallTargetMap & M)1563 GetSortedValueDataFromCallTargets(const SampleRecord::CallTargetMap &M) {
1564 SmallVector<InstrProfValueData, 2> R;
1565 for (const auto &I : SampleRecord::SortCallTargets(M)) {
1566 R.emplace_back(
1567 InstrProfValueData{FunctionSamples::getGUID(I.first), I.second});
1568 }
1569 return R;
1570 }
1571
1572 // Generate MD_prof metadata for every branch instruction using the
1573 // edge weights computed during propagation.
generateMDProfMetadata(Function & F)1574 void SampleProfileLoader::generateMDProfMetadata(Function &F) {
1575 // Generate MD_prof metadata for every branch instruction using the
1576 // edge weights computed during propagation.
1577 LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n");
1578 LLVMContext &Ctx = F.getContext();
1579 MDBuilder MDB(Ctx);
1580 for (auto &BI : F) {
1581 BasicBlock *BB = &BI;
1582
1583 if (BlockWeights[BB]) {
1584 for (auto &I : BB->getInstList()) {
1585 if (!isa<CallInst>(I) && !isa<InvokeInst>(I))
1586 continue;
1587 if (!cast<CallBase>(I).getCalledFunction()) {
1588 const DebugLoc &DLoc = I.getDebugLoc();
1589 if (!DLoc)
1590 continue;
1591 const DILocation *DIL = DLoc;
1592 const FunctionSamples *FS = findFunctionSamples(I);
1593 if (!FS)
1594 continue;
1595 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL);
1596 auto T = FS->findCallTargetMapAt(CallSite);
1597 if (!T || T.get().empty())
1598 continue;
1599 if (FunctionSamples::ProfileIsProbeBased) {
1600 // Prorate the callsite counts based on the pre-ICP distribution
1601 // factor to reflect what is already done to the callsite before
1602 // ICP, such as calliste cloning.
1603 if (Optional<PseudoProbe> Probe = extractProbe(I)) {
1604 if (Probe->Factor < 1)
1605 T = SampleRecord::adjustCallTargets(T.get(), Probe->Factor);
1606 }
1607 }
1608 SmallVector<InstrProfValueData, 2> SortedCallTargets =
1609 GetSortedValueDataFromCallTargets(T.get());
1610 uint64_t Sum = 0;
1611 for (const auto &C : T.get())
1612 Sum += C.second;
1613 // With CSSPGO all indirect call targets are counted torwards the
1614 // original indirect call site in the profile, including both
1615 // inlined and non-inlined targets.
1616 if (!FunctionSamples::ProfileIsCS) {
1617 if (const FunctionSamplesMap *M =
1618 FS->findFunctionSamplesMapAt(CallSite)) {
1619 for (const auto &NameFS : *M)
1620 Sum += NameFS.second.getHeadSamplesEstimate();
1621 }
1622 }
1623 if (Sum)
1624 updateIDTMetaData(I, SortedCallTargets, Sum);
1625 else if (OverwriteExistingWeights)
1626 I.setMetadata(LLVMContext::MD_prof, nullptr);
1627 } else if (!isa<IntrinsicInst>(&I)) {
1628 I.setMetadata(LLVMContext::MD_prof,
1629 MDB.createBranchWeights(
1630 {static_cast<uint32_t>(BlockWeights[BB])}));
1631 }
1632 }
1633 } else if (OverwriteExistingWeights || ProfileSampleBlockAccurate) {
1634 // Set profile metadata (possibly annotated by LTO prelink) to zero or
1635 // clear it for cold code.
1636 for (auto &I : BB->getInstList()) {
1637 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1638 if (cast<CallBase>(I).isIndirectCall())
1639 I.setMetadata(LLVMContext::MD_prof, nullptr);
1640 else
1641 I.setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(0));
1642 }
1643 }
1644 }
1645
1646 Instruction *TI = BB->getTerminator();
1647 if (TI->getNumSuccessors() == 1)
1648 continue;
1649 if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI) &&
1650 !isa<IndirectBrInst>(TI))
1651 continue;
1652
1653 DebugLoc BranchLoc = TI->getDebugLoc();
1654 LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line "
1655 << ((BranchLoc) ? Twine(BranchLoc.getLine())
1656 : Twine("<UNKNOWN LOCATION>"))
1657 << ".\n");
1658 SmallVector<uint32_t, 4> Weights;
1659 uint32_t MaxWeight = 0;
1660 Instruction *MaxDestInst;
1661 // Since profi treats multiple edges (multiway branches) as a single edge,
1662 // we need to distribute the computed weight among the branches. We do
1663 // this by evenly splitting the edge weight among destinations.
1664 DenseMap<const BasicBlock *, uint64_t> EdgeMultiplicity;
1665 std::vector<uint64_t> EdgeIndex;
1666 if (SampleProfileUseProfi) {
1667 EdgeIndex.resize(TI->getNumSuccessors());
1668 for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) {
1669 const BasicBlock *Succ = TI->getSuccessor(I);
1670 EdgeIndex[I] = EdgeMultiplicity[Succ];
1671 EdgeMultiplicity[Succ]++;
1672 }
1673 }
1674 for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) {
1675 BasicBlock *Succ = TI->getSuccessor(I);
1676 Edge E = std::make_pair(BB, Succ);
1677 uint64_t Weight = EdgeWeights[E];
1678 LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E));
1679 // Use uint32_t saturated arithmetic to adjust the incoming weights,
1680 // if needed. Sample counts in profiles are 64-bit unsigned values,
1681 // but internally branch weights are expressed as 32-bit values.
1682 if (Weight > std::numeric_limits<uint32_t>::max()) {
1683 LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)");
1684 Weight = std::numeric_limits<uint32_t>::max();
1685 }
1686 if (!SampleProfileUseProfi) {
1687 // Weight is added by one to avoid propagation errors introduced by
1688 // 0 weights.
1689 Weights.push_back(static_cast<uint32_t>(Weight + 1));
1690 } else {
1691 // Profi creates proper weights that do not require "+1" adjustments but
1692 // we evenly split the weight among branches with the same destination.
1693 uint64_t W = Weight / EdgeMultiplicity[Succ];
1694 // Rounding up, if needed, so that first branches are hotter.
1695 if (EdgeIndex[I] < Weight % EdgeMultiplicity[Succ])
1696 W++;
1697 Weights.push_back(static_cast<uint32_t>(W));
1698 }
1699 if (Weight != 0) {
1700 if (Weight > MaxWeight) {
1701 MaxWeight = Weight;
1702 MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime();
1703 }
1704 }
1705 }
1706
1707 // FIXME: Re-enable for sample profiling after investigating why the sum
1708 // of branch weights can be 0
1709 //
1710 // misexpect::checkExpectAnnotations(*TI, Weights, /*IsFrontend=*/false);
1711
1712 uint64_t TempWeight;
1713 // Only set weights if there is at least one non-zero weight.
1714 // In any other case, let the analyzer set weights.
1715 // Do not set weights if the weights are present unless under
1716 // OverwriteExistingWeights. In ThinLTO, the profile annotation is done
1717 // twice. If the first annotation already set the weights, the second pass
1718 // does not need to set it. With OverwriteExistingWeights, Blocks with zero
1719 // weight should have their existing metadata (possibly annotated by LTO
1720 // prelink) cleared.
1721 if (MaxWeight > 0 &&
1722 (!TI->extractProfTotalWeight(TempWeight) || OverwriteExistingWeights)) {
1723 LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
1724 TI->setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
1725 ORE->emit([&]() {
1726 return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst)
1727 << "most popular destination for conditional branches at "
1728 << ore::NV("CondBranchesLoc", BranchLoc);
1729 });
1730 } else {
1731 if (OverwriteExistingWeights) {
1732 TI->setMetadata(LLVMContext::MD_prof, nullptr);
1733 LLVM_DEBUG(dbgs() << "CLEARED. All branch weights are zero.\n");
1734 } else {
1735 LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
1736 }
1737 }
1738 }
1739 }
1740
1741 /// Once all the branch weights are computed, we emit the MD_prof
1742 /// metadata on BB using the computed values for each of its branches.
1743 ///
1744 /// \param F The function to query.
1745 ///
1746 /// \returns true if \p F was modified. Returns false, otherwise.
emitAnnotations(Function & F)1747 bool SampleProfileLoader::emitAnnotations(Function &F) {
1748 bool Changed = false;
1749
1750 if (FunctionSamples::ProfileIsProbeBased) {
1751 if (!ProbeManager->profileIsValid(F, *Samples)) {
1752 LLVM_DEBUG(
1753 dbgs() << "Profile is invalid due to CFG mismatch for Function "
1754 << F.getName());
1755 ++NumMismatchedProfile;
1756 return false;
1757 }
1758 ++NumMatchedProfile;
1759 } else {
1760 if (getFunctionLoc(F) == 0)
1761 return false;
1762
1763 LLVM_DEBUG(dbgs() << "Line number for the first instruction in "
1764 << F.getName() << ": " << getFunctionLoc(F) << "\n");
1765 }
1766
1767 DenseSet<GlobalValue::GUID> InlinedGUIDs;
1768 if (CallsitePrioritizedInline)
1769 Changed |= inlineHotFunctionsWithPriority(F, InlinedGUIDs);
1770 else
1771 Changed |= inlineHotFunctions(F, InlinedGUIDs);
1772
1773 Changed |= computeAndPropagateWeights(F, InlinedGUIDs);
1774
1775 if (Changed)
1776 generateMDProfMetadata(F);
1777
1778 emitCoverageRemarks(F);
1779 return Changed;
1780 }
1781
1782 std::unique_ptr<ProfiledCallGraph>
buildProfiledCallGraph(CallGraph & CG)1783 SampleProfileLoader::buildProfiledCallGraph(CallGraph &CG) {
1784 std::unique_ptr<ProfiledCallGraph> ProfiledCG;
1785 if (FunctionSamples::ProfileIsCS)
1786 ProfiledCG = std::make_unique<ProfiledCallGraph>(*ContextTracker);
1787 else
1788 ProfiledCG = std::make_unique<ProfiledCallGraph>(Reader->getProfiles());
1789
1790 // Add all functions into the profiled call graph even if they are not in
1791 // the profile. This makes sure functions missing from the profile still
1792 // gets a chance to be processed.
1793 for (auto &Node : CG) {
1794 const auto *F = Node.first;
1795 if (!F || F->isDeclaration() || !F->hasFnAttribute("use-sample-profile"))
1796 continue;
1797 ProfiledCG->addProfiledFunction(FunctionSamples::getCanonicalFnName(*F));
1798 }
1799
1800 return ProfiledCG;
1801 }
1802
1803 std::vector<Function *>
buildFunctionOrder(Module & M,CallGraph * CG)1804 SampleProfileLoader::buildFunctionOrder(Module &M, CallGraph *CG) {
1805 std::vector<Function *> FunctionOrderList;
1806 FunctionOrderList.reserve(M.size());
1807
1808 if (!ProfileTopDownLoad && UseProfiledCallGraph)
1809 errs() << "WARNING: -use-profiled-call-graph ignored, should be used "
1810 "together with -sample-profile-top-down-load.\n";
1811
1812 if (!ProfileTopDownLoad || CG == nullptr) {
1813 if (ProfileMergeInlinee) {
1814 // Disable ProfileMergeInlinee if profile is not loaded in top down order,
1815 // because the profile for a function may be used for the profile
1816 // annotation of its outline copy before the profile merging of its
1817 // non-inlined inline instances, and that is not the way how
1818 // ProfileMergeInlinee is supposed to work.
1819 ProfileMergeInlinee = false;
1820 }
1821
1822 for (Function &F : M)
1823 if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile"))
1824 FunctionOrderList.push_back(&F);
1825 return FunctionOrderList;
1826 }
1827
1828 assert(&CG->getModule() == &M);
1829
1830 if (UseProfiledCallGraph || (FunctionSamples::ProfileIsCS &&
1831 !UseProfiledCallGraph.getNumOccurrences())) {
1832 // Use profiled call edges to augment the top-down order. There are cases
1833 // that the top-down order computed based on the static call graph doesn't
1834 // reflect real execution order. For example
1835 //
1836 // 1. Incomplete static call graph due to unknown indirect call targets.
1837 // Adjusting the order by considering indirect call edges from the
1838 // profile can enable the inlining of indirect call targets by allowing
1839 // the caller processed before them.
1840 // 2. Mutual call edges in an SCC. The static processing order computed for
1841 // an SCC may not reflect the call contexts in the context-sensitive
1842 // profile, thus may cause potential inlining to be overlooked. The
1843 // function order in one SCC is being adjusted to a top-down order based
1844 // on the profile to favor more inlining. This is only a problem with CS
1845 // profile.
1846 // 3. Transitive indirect call edges due to inlining. When a callee function
1847 // (say B) is inlined into into a caller function (say A) in LTO prelink,
1848 // every call edge originated from the callee B will be transferred to
1849 // the caller A. If any transferred edge (say A->C) is indirect, the
1850 // original profiled indirect edge B->C, even if considered, would not
1851 // enforce a top-down order from the caller A to the potential indirect
1852 // call target C in LTO postlink since the inlined callee B is gone from
1853 // the static call graph.
1854 // 4. #3 can happen even for direct call targets, due to functions defined
1855 // in header files. A header function (say A), when included into source
1856 // files, is defined multiple times but only one definition survives due
1857 // to ODR. Therefore, the LTO prelink inlining done on those dropped
1858 // definitions can be useless based on a local file scope. More
1859 // importantly, the inlinee (say B), once fully inlined to a
1860 // to-be-dropped A, will have no profile to consume when its outlined
1861 // version is compiled. This can lead to a profile-less prelink
1862 // compilation for the outlined version of B which may be called from
1863 // external modules. while this isn't easy to fix, we rely on the
1864 // postlink AutoFDO pipeline to optimize B. Since the survived copy of
1865 // the A can be inlined in its local scope in prelink, it may not exist
1866 // in the merged IR in postlink, and we'll need the profiled call edges
1867 // to enforce a top-down order for the rest of the functions.
1868 //
1869 // Considering those cases, a profiled call graph completely independent of
1870 // the static call graph is constructed based on profile data, where
1871 // function objects are not even needed to handle case #3 and case 4.
1872 //
1873 // Note that static callgraph edges are completely ignored since they
1874 // can be conflicting with profiled edges for cyclic SCCs and may result in
1875 // an SCC order incompatible with profile-defined one. Using strictly
1876 // profile order ensures a maximum inlining experience. On the other hand,
1877 // static call edges are not so important when they don't correspond to a
1878 // context in the profile.
1879
1880 std::unique_ptr<ProfiledCallGraph> ProfiledCG = buildProfiledCallGraph(*CG);
1881 scc_iterator<ProfiledCallGraph *> CGI = scc_begin(ProfiledCG.get());
1882 while (!CGI.isAtEnd()) {
1883 auto Range = *CGI;
1884 if (SortProfiledSCC) {
1885 // Sort nodes in one SCC based on callsite hotness.
1886 scc_member_iterator<ProfiledCallGraph *> SI(*CGI);
1887 Range = *SI;
1888 }
1889 for (auto *Node : Range) {
1890 Function *F = SymbolMap.lookup(Node->Name);
1891 if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile"))
1892 FunctionOrderList.push_back(F);
1893 }
1894 ++CGI;
1895 }
1896 } else {
1897 scc_iterator<CallGraph *> CGI = scc_begin(CG);
1898 while (!CGI.isAtEnd()) {
1899 for (CallGraphNode *Node : *CGI) {
1900 auto *F = Node->getFunction();
1901 if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile"))
1902 FunctionOrderList.push_back(F);
1903 }
1904 ++CGI;
1905 }
1906 }
1907
1908 LLVM_DEBUG({
1909 dbgs() << "Function processing order:\n";
1910 for (auto F : reverse(FunctionOrderList)) {
1911 dbgs() << F->getName() << "\n";
1912 }
1913 });
1914
1915 std::reverse(FunctionOrderList.begin(), FunctionOrderList.end());
1916 return FunctionOrderList;
1917 }
1918
doInitialization(Module & M,FunctionAnalysisManager * FAM)1919 bool SampleProfileLoader::doInitialization(Module &M,
1920 FunctionAnalysisManager *FAM) {
1921 auto &Ctx = M.getContext();
1922
1923 auto ReaderOrErr = SampleProfileReader::create(
1924 Filename, Ctx, FSDiscriminatorPass::Base, RemappingFilename);
1925 if (std::error_code EC = ReaderOrErr.getError()) {
1926 std::string Msg = "Could not open profile: " + EC.message();
1927 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
1928 return false;
1929 }
1930 Reader = std::move(ReaderOrErr.get());
1931 Reader->setSkipFlatProf(LTOPhase == ThinOrFullLTOPhase::ThinLTOPostLink);
1932 // set module before reading the profile so reader may be able to only
1933 // read the function profiles which are used by the current module.
1934 Reader->setModule(&M);
1935 if (std::error_code EC = Reader->read()) {
1936 std::string Msg = "profile reading failed: " + EC.message();
1937 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
1938 return false;
1939 }
1940
1941 PSL = Reader->getProfileSymbolList();
1942
1943 // While profile-sample-accurate is on, ignore symbol list.
1944 ProfAccForSymsInList =
1945 ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate;
1946 if (ProfAccForSymsInList) {
1947 NamesInProfile.clear();
1948 if (auto NameTable = Reader->getNameTable())
1949 NamesInProfile.insert(NameTable->begin(), NameTable->end());
1950 CoverageTracker.setProfAccForSymsInList(true);
1951 }
1952
1953 if (FAM && !ProfileInlineReplayFile.empty()) {
1954 ExternalInlineAdvisor = getReplayInlineAdvisor(
1955 M, *FAM, Ctx, /*OriginalAdvisor=*/nullptr,
1956 ReplayInlinerSettings{ProfileInlineReplayFile,
1957 ProfileInlineReplayScope,
1958 ProfileInlineReplayFallback,
1959 {ProfileInlineReplayFormat}},
1960 /*EmitRemarks=*/false, InlineContext{LTOPhase, InlinePass::ReplaySampleProfileInliner});
1961 }
1962
1963 // Apply tweaks if context-sensitive or probe-based profile is available.
1964 if (Reader->profileIsCS() || Reader->profileIsPreInlined() ||
1965 Reader->profileIsProbeBased()) {
1966 if (!UseIterativeBFIInference.getNumOccurrences())
1967 UseIterativeBFIInference = true;
1968 if (!SampleProfileUseProfi.getNumOccurrences())
1969 SampleProfileUseProfi = true;
1970 if (!EnableExtTspBlockPlacement.getNumOccurrences())
1971 EnableExtTspBlockPlacement = true;
1972 // Enable priority-base inliner and size inline by default for CSSPGO.
1973 if (!ProfileSizeInline.getNumOccurrences())
1974 ProfileSizeInline = true;
1975 if (!CallsitePrioritizedInline.getNumOccurrences())
1976 CallsitePrioritizedInline = true;
1977 // For CSSPGO, we also allow recursive inline to best use context profile.
1978 if (!AllowRecursiveInline.getNumOccurrences())
1979 AllowRecursiveInline = true;
1980
1981 if (Reader->profileIsPreInlined()) {
1982 if (!UsePreInlinerDecision.getNumOccurrences())
1983 UsePreInlinerDecision = true;
1984 }
1985
1986 if (!Reader->profileIsCS()) {
1987 // Non-CS profile should be fine without a function size budget for the
1988 // inliner since the contexts in the profile are either all from inlining
1989 // in the prevoius build or pre-computed by the preinliner with a size
1990 // cap, thus they are bounded.
1991 if (!ProfileInlineLimitMin.getNumOccurrences())
1992 ProfileInlineLimitMin = std::numeric_limits<unsigned>::max();
1993 if (!ProfileInlineLimitMax.getNumOccurrences())
1994 ProfileInlineLimitMax = std::numeric_limits<unsigned>::max();
1995 }
1996 }
1997
1998 if (Reader->profileIsCS()) {
1999 // Tracker for profiles under different context
2000 ContextTracker = std::make_unique<SampleContextTracker>(
2001 Reader->getProfiles(), &GUIDToFuncNameMap);
2002 }
2003
2004 // Load pseudo probe descriptors for probe-based function samples.
2005 if (Reader->profileIsProbeBased()) {
2006 ProbeManager = std::make_unique<PseudoProbeManager>(M);
2007 if (!ProbeManager->moduleIsProbed(M)) {
2008 const char *Msg =
2009 "Pseudo-probe-based profile requires SampleProfileProbePass";
2010 Ctx.diagnose(DiagnosticInfoSampleProfile(M.getModuleIdentifier(), Msg,
2011 DS_Warning));
2012 return false;
2013 }
2014 }
2015
2016 return true;
2017 }
2018
runOnModule(Module & M,ModuleAnalysisManager * AM,ProfileSummaryInfo * _PSI,CallGraph * CG)2019 bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM,
2020 ProfileSummaryInfo *_PSI, CallGraph *CG) {
2021 GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap);
2022
2023 PSI = _PSI;
2024 if (M.getProfileSummary(/* IsCS */ false) == nullptr) {
2025 M.setProfileSummary(Reader->getSummary().getMD(M.getContext()),
2026 ProfileSummary::PSK_Sample);
2027 PSI->refresh();
2028 }
2029 // Compute the total number of samples collected in this profile.
2030 for (const auto &I : Reader->getProfiles())
2031 TotalCollectedSamples += I.second.getTotalSamples();
2032
2033 auto Remapper = Reader->getRemapper();
2034 // Populate the symbol map.
2035 for (const auto &N_F : M.getValueSymbolTable()) {
2036 StringRef OrigName = N_F.getKey();
2037 Function *F = dyn_cast<Function>(N_F.getValue());
2038 if (F == nullptr || OrigName.empty())
2039 continue;
2040 SymbolMap[OrigName] = F;
2041 StringRef NewName = FunctionSamples::getCanonicalFnName(*F);
2042 if (OrigName != NewName && !NewName.empty()) {
2043 auto r = SymbolMap.insert(std::make_pair(NewName, F));
2044 // Failiing to insert means there is already an entry in SymbolMap,
2045 // thus there are multiple functions that are mapped to the same
2046 // stripped name. In this case of name conflicting, set the value
2047 // to nullptr to avoid confusion.
2048 if (!r.second)
2049 r.first->second = nullptr;
2050 OrigName = NewName;
2051 }
2052 // Insert the remapped names into SymbolMap.
2053 if (Remapper) {
2054 if (auto MapName = Remapper->lookUpNameInProfile(OrigName)) {
2055 if (*MapName != OrigName && !MapName->empty())
2056 SymbolMap.insert(std::make_pair(*MapName, F));
2057 }
2058 }
2059 }
2060 assert(SymbolMap.count(StringRef()) == 0 &&
2061 "No empty StringRef should be added in SymbolMap");
2062
2063 bool retval = false;
2064 for (auto F : buildFunctionOrder(M, CG)) {
2065 assert(!F->isDeclaration());
2066 clearFunctionData();
2067 retval |= runOnFunction(*F, AM);
2068 }
2069
2070 // Account for cold calls not inlined....
2071 if (!FunctionSamples::ProfileIsCS)
2072 for (const std::pair<Function *, NotInlinedProfileInfo> &pair :
2073 notInlinedCallInfo)
2074 updateProfileCallee(pair.first, pair.second.entryCount);
2075
2076 return retval;
2077 }
2078
runOnFunction(Function & F,ModuleAnalysisManager * AM)2079 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) {
2080 LLVM_DEBUG(dbgs() << "\n\nProcessing Function " << F.getName() << "\n");
2081 DILocation2SampleMap.clear();
2082 // By default the entry count is initialized to -1, which will be treated
2083 // conservatively by getEntryCount as the same as unknown (None). This is
2084 // to avoid newly added code to be treated as cold. If we have samples
2085 // this will be overwritten in emitAnnotations.
2086 uint64_t initialEntryCount = -1;
2087
2088 ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL;
2089 if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) {
2090 // initialize all the function entry counts to 0. It means all the
2091 // functions without profile will be regarded as cold.
2092 initialEntryCount = 0;
2093 // profile-sample-accurate is a user assertion which has a higher precedence
2094 // than symbol list. When profile-sample-accurate is on, ignore symbol list.
2095 ProfAccForSymsInList = false;
2096 }
2097 CoverageTracker.setProfAccForSymsInList(ProfAccForSymsInList);
2098
2099 // PSL -- profile symbol list include all the symbols in sampled binary.
2100 // If ProfileAccurateForSymsInList is enabled, PSL is used to treat
2101 // old functions without samples being cold, without having to worry
2102 // about new and hot functions being mistakenly treated as cold.
2103 if (ProfAccForSymsInList) {
2104 // Initialize the entry count to 0 for functions in the list.
2105 if (PSL->contains(F.getName()))
2106 initialEntryCount = 0;
2107
2108 // Function in the symbol list but without sample will be regarded as
2109 // cold. To minimize the potential negative performance impact it could
2110 // have, we want to be a little conservative here saying if a function
2111 // shows up in the profile, no matter as outline function, inline instance
2112 // or call targets, treat the function as not being cold. This will handle
2113 // the cases such as most callsites of a function are inlined in sampled
2114 // binary but not inlined in current build (because of source code drift,
2115 // imprecise debug information, or the callsites are all cold individually
2116 // but not cold accumulatively...), so the outline function showing up as
2117 // cold in sampled binary will actually not be cold after current build.
2118 StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
2119 if (NamesInProfile.count(CanonName))
2120 initialEntryCount = -1;
2121 }
2122
2123 // Initialize entry count when the function has no existing entry
2124 // count value.
2125 if (!F.getEntryCount())
2126 F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real));
2127 std::unique_ptr<OptimizationRemarkEmitter> OwnedORE;
2128 if (AM) {
2129 auto &FAM =
2130 AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent())
2131 .getManager();
2132 ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
2133 } else {
2134 OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2135 ORE = OwnedORE.get();
2136 }
2137
2138 if (FunctionSamples::ProfileIsCS)
2139 Samples = ContextTracker->getBaseSamplesFor(F);
2140 else
2141 Samples = Reader->getSamplesFor(F);
2142
2143 if (Samples && !Samples->empty())
2144 return emitAnnotations(F);
2145 return false;
2146 }
2147
run(Module & M,ModuleAnalysisManager & AM)2148 PreservedAnalyses SampleProfileLoaderPass::run(Module &M,
2149 ModuleAnalysisManager &AM) {
2150 FunctionAnalysisManager &FAM =
2151 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
2152
2153 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
2154 return FAM.getResult<AssumptionAnalysis>(F);
2155 };
2156 auto GetTTI = [&](Function &F) -> TargetTransformInfo & {
2157 return FAM.getResult<TargetIRAnalysis>(F);
2158 };
2159 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
2160 return FAM.getResult<TargetLibraryAnalysis>(F);
2161 };
2162
2163 SampleProfileLoader SampleLoader(
2164 ProfileFileName.empty() ? SampleProfileFile : ProfileFileName,
2165 ProfileRemappingFileName.empty() ? SampleProfileRemappingFile
2166 : ProfileRemappingFileName,
2167 LTOPhase, GetAssumptionCache, GetTTI, GetTLI);
2168
2169 if (!SampleLoader.doInitialization(M, &FAM))
2170 return PreservedAnalyses::all();
2171
2172 ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M);
2173 CallGraph &CG = AM.getResult<CallGraphAnalysis>(M);
2174 if (!SampleLoader.runOnModule(M, &AM, PSI, &CG))
2175 return PreservedAnalyses::all();
2176
2177 return PreservedAnalyses::none();
2178 }
2179