1 //===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the SampleProfileLoader transformation. This pass 10 // reads a profile file generated by a sampling profiler (e.g. Linux Perf - 11 // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the 12 // profile information in the given profile. 13 // 14 // This pass generates branch weight annotations on the IR: 15 // 16 // - prof: Represents branch weights. This annotation is added to branches 17 // to indicate the weights of each edge coming out of the branch. 18 // The weight of each edge is the weight of the target block for 19 // that edge. The weight of a block B is computed as the maximum 20 // number of samples found in B. 21 // 22 //===----------------------------------------------------------------------===// 23 24 #include "llvm/Transforms/IPO/SampleProfile.h" 25 #include "llvm/ADT/ArrayRef.h" 26 #include "llvm/ADT/DenseMap.h" 27 #include "llvm/ADT/DenseSet.h" 28 #include "llvm/ADT/None.h" 29 #include "llvm/ADT/PriorityQueue.h" 30 #include "llvm/ADT/SCCIterator.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringMap.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/Analysis/AssumptionCache.h" 39 #include "llvm/Analysis/CallGraph.h" 40 #include "llvm/Analysis/CallGraphSCCPass.h" 41 #include "llvm/Analysis/InlineAdvisor.h" 42 #include "llvm/Analysis/InlineCost.h" 43 #include "llvm/Analysis/LoopInfo.h" 44 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 45 #include "llvm/Analysis/PostDominators.h" 46 #include "llvm/Analysis/ProfileSummaryInfo.h" 47 #include "llvm/Analysis/ReplayInlineAdvisor.h" 48 #include "llvm/Analysis/TargetLibraryInfo.h" 49 #include "llvm/Analysis/TargetTransformInfo.h" 50 #include "llvm/IR/BasicBlock.h" 51 #include "llvm/IR/CFG.h" 52 #include "llvm/IR/DebugInfoMetadata.h" 53 #include "llvm/IR/DebugLoc.h" 54 #include "llvm/IR/DiagnosticInfo.h" 55 #include "llvm/IR/Dominators.h" 56 #include "llvm/IR/Function.h" 57 #include "llvm/IR/GlobalValue.h" 58 #include "llvm/IR/InstrTypes.h" 59 #include "llvm/IR/Instruction.h" 60 #include "llvm/IR/Instructions.h" 61 #include "llvm/IR/IntrinsicInst.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/MDBuilder.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/PassManager.h" 66 #include "llvm/IR/ValueSymbolTable.h" 67 #include "llvm/InitializePasses.h" 68 #include "llvm/Pass.h" 69 #include "llvm/ProfileData/InstrProf.h" 70 #include "llvm/ProfileData/SampleProf.h" 71 #include "llvm/ProfileData/SampleProfReader.h" 72 #include "llvm/Support/Casting.h" 73 #include "llvm/Support/CommandLine.h" 74 #include "llvm/Support/Debug.h" 75 #include "llvm/Support/ErrorHandling.h" 76 #include "llvm/Support/ErrorOr.h" 77 #include "llvm/Support/GenericDomTree.h" 78 #include "llvm/Support/raw_ostream.h" 79 #include "llvm/Transforms/IPO.h" 80 #include "llvm/Transforms/IPO/SampleContextTracker.h" 81 #include "llvm/Transforms/IPO/SampleProfileProbe.h" 82 #include "llvm/Transforms/Instrumentation.h" 83 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 84 #include "llvm/Transforms/Utils/Cloning.h" 85 #include <algorithm> 86 #include <cassert> 87 #include <cstdint> 88 #include <functional> 89 #include <limits> 90 #include <map> 91 #include <memory> 92 #include <queue> 93 #include <string> 94 #include <system_error> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 using namespace sampleprof; 100 using ProfileCount = Function::ProfileCount; 101 #define DEBUG_TYPE "sample-profile" 102 #define CSINLINE_DEBUG DEBUG_TYPE "-inline" 103 104 STATISTIC(NumCSInlined, 105 "Number of functions inlined with context sensitive profile"); 106 STATISTIC(NumCSNotInlined, 107 "Number of functions not inlined with context sensitive profile"); 108 STATISTIC(NumMismatchedProfile, 109 "Number of functions with CFG mismatched profile"); 110 STATISTIC(NumMatchedProfile, "Number of functions with CFG matched profile"); 111 STATISTIC(NumDuplicatedInlinesite, 112 "Number of inlined callsites with a partial distribution factor"); 113 114 STATISTIC(NumCSInlinedHitMinLimit, 115 "Number of functions with FDO inline stopped due to min size limit"); 116 STATISTIC(NumCSInlinedHitMaxLimit, 117 "Number of functions with FDO inline stopped due to max size limit"); 118 STATISTIC( 119 NumCSInlinedHitGrowthLimit, 120 "Number of functions with FDO inline stopped due to growth size limit"); 121 122 // Command line option to specify the file to read samples from. This is 123 // mainly used for debugging. 124 static cl::opt<std::string> SampleProfileFile( 125 "sample-profile-file", cl::init(""), cl::value_desc("filename"), 126 cl::desc("Profile file loaded by -sample-profile"), cl::Hidden); 127 128 // The named file contains a set of transformations that may have been applied 129 // to the symbol names between the program from which the sample data was 130 // collected and the current program's symbols. 131 static cl::opt<std::string> SampleProfileRemappingFile( 132 "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"), 133 cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden); 134 135 static cl::opt<unsigned> SampleProfileMaxPropagateIterations( 136 "sample-profile-max-propagate-iterations", cl::init(100), 137 cl::desc("Maximum number of iterations to go through when propagating " 138 "sample block/edge weights through the CFG.")); 139 140 static cl::opt<unsigned> SampleProfileRecordCoverage( 141 "sample-profile-check-record-coverage", cl::init(0), cl::value_desc("N"), 142 cl::desc("Emit a warning if less than N% of records in the input profile " 143 "are matched to the IR.")); 144 145 static cl::opt<unsigned> SampleProfileSampleCoverage( 146 "sample-profile-check-sample-coverage", cl::init(0), cl::value_desc("N"), 147 cl::desc("Emit a warning if less than N% of samples in the input profile " 148 "are matched to the IR.")); 149 150 static cl::opt<bool> NoWarnSampleUnused( 151 "no-warn-sample-unused", cl::init(false), cl::Hidden, 152 cl::desc("Use this option to turn off/on warnings about function with " 153 "samples but without debug information to use those samples. ")); 154 155 static cl::opt<bool> ProfileSampleAccurate( 156 "profile-sample-accurate", cl::Hidden, cl::init(false), 157 cl::desc("If the sample profile is accurate, we will mark all un-sampled " 158 "callsite and function as having 0 samples. Otherwise, treat " 159 "un-sampled callsites and functions conservatively as unknown. ")); 160 161 static cl::opt<bool> ProfileAccurateForSymsInList( 162 "profile-accurate-for-symsinlist", cl::Hidden, cl::ZeroOrMore, 163 cl::init(true), 164 cl::desc("For symbols in profile symbol list, regard their profiles to " 165 "be accurate. It may be overriden by profile-sample-accurate. ")); 166 167 static cl::opt<bool> ProfileMergeInlinee( 168 "sample-profile-merge-inlinee", cl::Hidden, cl::init(true), 169 cl::desc("Merge past inlinee's profile to outline version if sample " 170 "profile loader decided not to inline a call site. It will " 171 "only be enabled when top-down order of profile loading is " 172 "enabled. ")); 173 174 static cl::opt<bool> ProfileTopDownLoad( 175 "sample-profile-top-down-load", cl::Hidden, cl::init(true), 176 cl::desc("Do profile annotation and inlining for functions in top-down " 177 "order of call graph during sample profile loading. It only " 178 "works for new pass manager. ")); 179 180 static cl::opt<bool> ProfileSizeInline( 181 "sample-profile-inline-size", cl::Hidden, cl::init(false), 182 cl::desc("Inline cold call sites in profile loader if it's beneficial " 183 "for code size.")); 184 185 static cl::opt<int> ProfileInlineGrowthLimit( 186 "sample-profile-inline-growth-limit", cl::Hidden, cl::init(12), 187 cl::desc("The size growth ratio limit for proirity-based sample profile " 188 "loader inlining.")); 189 190 static cl::opt<int> ProfileInlineLimitMin( 191 "sample-profile-inline-limit-min", cl::Hidden, cl::init(100), 192 cl::desc("The lower bound of size growth limit for " 193 "proirity-based sample profile loader inlining.")); 194 195 static cl::opt<int> ProfileInlineLimitMax( 196 "sample-profile-inline-limit-max", cl::Hidden, cl::init(10000), 197 cl::desc("The upper bound of size growth limit for " 198 "proirity-based sample profile loader inlining.")); 199 200 static cl::opt<int> ProfileICPThreshold( 201 "sample-profile-icp-threshold", cl::Hidden, cl::init(5), 202 cl::desc( 203 "Relative hotness threshold for indirect " 204 "call promotion in proirity-based sample profile loader inlining.")); 205 206 static cl::opt<int> SampleHotCallSiteThreshold( 207 "sample-profile-hot-inline-threshold", cl::Hidden, cl::init(3000), 208 cl::desc("Hot callsite threshold for proirity-based sample profile loader " 209 "inlining.")); 210 211 static cl::opt<bool> CallsitePrioritizedInline( 212 "sample-profile-prioritized-inline", cl::Hidden, cl::ZeroOrMore, 213 cl::init(false), 214 cl::desc("Use call site prioritized inlining for sample profile loader." 215 "Currently only CSSPGO is supported.")); 216 217 static cl::opt<int> SampleColdCallSiteThreshold( 218 "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45), 219 cl::desc("Threshold for inlining cold callsites")); 220 221 static cl::opt<std::string> ProfileInlineReplayFile( 222 "sample-profile-inline-replay", cl::init(""), cl::value_desc("filename"), 223 cl::desc( 224 "Optimization remarks file containing inline remarks to be replayed " 225 "by inlining from sample profile loader."), 226 cl::Hidden); 227 228 namespace { 229 230 using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>; 231 using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>; 232 using Edge = std::pair<const BasicBlock *, const BasicBlock *>; 233 using EdgeWeightMap = DenseMap<Edge, uint64_t>; 234 using BlockEdgeMap = 235 DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>; 236 237 class SampleProfileLoader; 238 239 class SampleCoverageTracker { 240 public: 241 bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset, 242 uint32_t Discriminator, uint64_t Samples); 243 unsigned computeCoverage(unsigned Used, unsigned Total) const; 244 unsigned countUsedRecords(const FunctionSamples *FS, 245 ProfileSummaryInfo *PSI) const; 246 unsigned countBodyRecords(const FunctionSamples *FS, 247 ProfileSummaryInfo *PSI) const; 248 uint64_t getTotalUsedSamples() const { return TotalUsedSamples; } 249 uint64_t countBodySamples(const FunctionSamples *FS, 250 ProfileSummaryInfo *PSI) const; 251 252 void clear() { 253 SampleCoverage.clear(); 254 TotalUsedSamples = 0; 255 } 256 inline void setProfAccForSymsInList(bool V) { ProfAccForSymsInList = V; } 257 258 private: 259 using BodySampleCoverageMap = std::map<LineLocation, unsigned>; 260 using FunctionSamplesCoverageMap = 261 DenseMap<const FunctionSamples *, BodySampleCoverageMap>; 262 263 /// Coverage map for sampling records. 264 /// 265 /// This map keeps a record of sampling records that have been matched to 266 /// an IR instruction. This is used to detect some form of staleness in 267 /// profiles (see flag -sample-profile-check-coverage). 268 /// 269 /// Each entry in the map corresponds to a FunctionSamples instance. This is 270 /// another map that counts how many times the sample record at the 271 /// given location has been used. 272 FunctionSamplesCoverageMap SampleCoverage; 273 274 /// Number of samples used from the profile. 275 /// 276 /// When a sampling record is used for the first time, the samples from 277 /// that record are added to this accumulator. Coverage is later computed 278 /// based on the total number of samples available in this function and 279 /// its callsites. 280 /// 281 /// Note that this accumulator tracks samples used from a single function 282 /// and all the inlined callsites. Strictly, we should have a map of counters 283 /// keyed by FunctionSamples pointers, but these stats are cleared after 284 /// every function, so we just need to keep a single counter. 285 uint64_t TotalUsedSamples = 0; 286 287 // For symbol in profile symbol list, whether to regard their profiles 288 // to be accurate. This is passed from the SampleLoader instance. 289 bool ProfAccForSymsInList = false; 290 }; 291 292 class GUIDToFuncNameMapper { 293 public: 294 GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader, 295 DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap) 296 : CurrentReader(Reader), CurrentModule(M), 297 CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) { 298 if (!CurrentReader.useMD5()) 299 return; 300 301 for (const auto &F : CurrentModule) { 302 StringRef OrigName = F.getName(); 303 CurrentGUIDToFuncNameMap.insert( 304 {Function::getGUID(OrigName), OrigName}); 305 306 // Local to global var promotion used by optimization like thinlto 307 // will rename the var and add suffix like ".llvm.xxx" to the 308 // original local name. In sample profile, the suffixes of function 309 // names are all stripped. Since it is possible that the mapper is 310 // built in post-thin-link phase and var promotion has been done, 311 // we need to add the substring of function name without the suffix 312 // into the GUIDToFuncNameMap. 313 StringRef CanonName = FunctionSamples::getCanonicalFnName(F); 314 if (CanonName != OrigName) 315 CurrentGUIDToFuncNameMap.insert( 316 {Function::getGUID(CanonName), CanonName}); 317 } 318 319 // Update GUIDToFuncNameMap for each function including inlinees. 320 SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap); 321 } 322 323 ~GUIDToFuncNameMapper() { 324 if (!CurrentReader.useMD5()) 325 return; 326 327 CurrentGUIDToFuncNameMap.clear(); 328 329 // Reset GUIDToFuncNameMap for of each function as they're no 330 // longer valid at this point. 331 SetGUIDToFuncNameMapForAll(nullptr); 332 } 333 334 private: 335 void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) { 336 std::queue<FunctionSamples *> FSToUpdate; 337 for (auto &IFS : CurrentReader.getProfiles()) { 338 FSToUpdate.push(&IFS.second); 339 } 340 341 while (!FSToUpdate.empty()) { 342 FunctionSamples *FS = FSToUpdate.front(); 343 FSToUpdate.pop(); 344 FS->GUIDToFuncNameMap = Map; 345 for (const auto &ICS : FS->getCallsiteSamples()) { 346 const FunctionSamplesMap &FSMap = ICS.second; 347 for (auto &IFS : FSMap) { 348 FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second); 349 FSToUpdate.push(&FS); 350 } 351 } 352 } 353 } 354 355 SampleProfileReader &CurrentReader; 356 Module &CurrentModule; 357 DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap; 358 }; 359 360 // Inline candidate used by iterative callsite prioritized inliner 361 struct InlineCandidate { 362 CallBase *CallInstr; 363 const FunctionSamples *CalleeSamples; 364 // Prorated callsite count, which will be used to guide inlining. For example, 365 // if a callsite is duplicated in LTO prelink, then in LTO postlink the two 366 // copies will get their own distribution factors and their prorated counts 367 // will be used to decide if they should be inlined independently. 368 uint64_t CallsiteCount; 369 // Call site distribution factor to prorate the profile samples for a 370 // duplicated callsite. Default value is 1.0. 371 float CallsiteDistribution; 372 }; 373 374 // Inline candidate comparer using call site weight 375 struct CandidateComparer { 376 bool operator()(const InlineCandidate &LHS, const InlineCandidate &RHS) { 377 if (LHS.CallsiteCount != RHS.CallsiteCount) 378 return LHS.CallsiteCount < RHS.CallsiteCount; 379 380 // Tie breaker using GUID so we have stable/deterministic inlining order 381 assert(LHS.CalleeSamples && RHS.CalleeSamples && 382 "Expect non-null FunctionSamples"); 383 return LHS.CalleeSamples->getGUID(LHS.CalleeSamples->getName()) < 384 RHS.CalleeSamples->getGUID(RHS.CalleeSamples->getName()); 385 } 386 }; 387 388 using CandidateQueue = 389 PriorityQueue<InlineCandidate, std::vector<InlineCandidate>, 390 CandidateComparer>; 391 392 /// Sample profile pass. 393 /// 394 /// This pass reads profile data from the file specified by 395 /// -sample-profile-file and annotates every affected function with the 396 /// profile information found in that file. 397 class SampleProfileLoader { 398 public: 399 SampleProfileLoader( 400 StringRef Name, StringRef RemapName, ThinOrFullLTOPhase LTOPhase, 401 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 402 std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo, 403 std::function<const TargetLibraryInfo &(Function &)> GetTLI) 404 : GetAC(std::move(GetAssumptionCache)), 405 GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)), 406 Filename(std::string(Name)), RemappingFilename(std::string(RemapName)), 407 LTOPhase(LTOPhase) {} 408 409 bool doInitialization(Module &M, FunctionAnalysisManager *FAM = nullptr); 410 bool runOnModule(Module &M, ModuleAnalysisManager *AM, 411 ProfileSummaryInfo *_PSI, CallGraph *CG); 412 413 void dump() { Reader->dump(); } 414 415 protected: 416 friend class SampleCoverageTracker; 417 418 bool runOnFunction(Function &F, ModuleAnalysisManager *AM); 419 unsigned getFunctionLoc(Function &F); 420 bool emitAnnotations(Function &F); 421 ErrorOr<uint64_t> getInstWeight(const Instruction &I); 422 ErrorOr<uint64_t> getProbeWeight(const Instruction &I); 423 ErrorOr<uint64_t> getBlockWeight(const BasicBlock *BB); 424 const FunctionSamples *findCalleeFunctionSamples(const CallBase &I) const; 425 std::vector<const FunctionSamples *> 426 findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const; 427 mutable DenseMap<const DILocation *, const FunctionSamples *> DILocation2SampleMap; 428 const FunctionSamples *findFunctionSamples(const Instruction &I) const; 429 // Attempt to promote indirect call and also inline the promoted call 430 bool tryPromoteAndInlineCandidate( 431 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin, 432 uint64_t &Sum, DenseSet<Instruction *> &PromotedInsns, 433 SmallVector<CallBase *, 8> *InlinedCallSites = nullptr); 434 bool inlineHotFunctions(Function &F, 435 DenseSet<GlobalValue::GUID> &InlinedGUIDs); 436 InlineCost shouldInlineCandidate(InlineCandidate &Candidate); 437 bool getInlineCandidate(InlineCandidate *NewCandidate, CallBase *CB); 438 bool 439 tryInlineCandidate(InlineCandidate &Candidate, 440 SmallVector<CallBase *, 8> *InlinedCallSites = nullptr); 441 bool 442 inlineHotFunctionsWithPriority(Function &F, 443 DenseSet<GlobalValue::GUID> &InlinedGUIDs); 444 // Inline cold/small functions in addition to hot ones 445 bool shouldInlineColdCallee(CallBase &CallInst); 446 void emitOptimizationRemarksForInlineCandidates( 447 const SmallVectorImpl<CallBase *> &Candidates, const Function &F, 448 bool Hot); 449 void printEdgeWeight(raw_ostream &OS, Edge E); 450 void printBlockWeight(raw_ostream &OS, const BasicBlock *BB) const; 451 void printBlockEquivalence(raw_ostream &OS, const BasicBlock *BB); 452 bool computeBlockWeights(Function &F); 453 void findEquivalenceClasses(Function &F); 454 template <bool IsPostDom> 455 void findEquivalencesFor(BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants, 456 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree); 457 458 void propagateWeights(Function &F); 459 uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge); 460 void buildEdges(Function &F); 461 std::vector<Function *> buildFunctionOrder(Module &M, CallGraph *CG); 462 bool propagateThroughEdges(Function &F, bool UpdateBlockCount); 463 void computeDominanceAndLoopInfo(Function &F); 464 void clearFunctionData(); 465 466 /// Map basic blocks to their computed weights. 467 /// 468 /// The weight of a basic block is defined to be the maximum 469 /// of all the instruction weights in that block. 470 BlockWeightMap BlockWeights; 471 472 /// Map edges to their computed weights. 473 /// 474 /// Edge weights are computed by propagating basic block weights in 475 /// SampleProfile::propagateWeights. 476 EdgeWeightMap EdgeWeights; 477 478 /// Set of visited blocks during propagation. 479 SmallPtrSet<const BasicBlock *, 32> VisitedBlocks; 480 481 /// Set of visited edges during propagation. 482 SmallSet<Edge, 32> VisitedEdges; 483 484 /// Equivalence classes for block weights. 485 /// 486 /// Two blocks BB1 and BB2 are in the same equivalence class if they 487 /// dominate and post-dominate each other, and they are in the same loop 488 /// nest. When this happens, the two blocks are guaranteed to execute 489 /// the same number of times. 490 EquivalenceClassMap EquivalenceClass; 491 492 /// Map from function name to Function *. Used to find the function from 493 /// the function name. If the function name contains suffix, additional 494 /// entry is added to map from the stripped name to the function if there 495 /// is one-to-one mapping. 496 StringMap<Function *> SymbolMap; 497 498 /// Dominance, post-dominance and loop information. 499 std::unique_ptr<DominatorTree> DT; 500 std::unique_ptr<PostDominatorTree> PDT; 501 std::unique_ptr<LoopInfo> LI; 502 503 std::function<AssumptionCache &(Function &)> GetAC; 504 std::function<TargetTransformInfo &(Function &)> GetTTI; 505 std::function<const TargetLibraryInfo &(Function &)> GetTLI; 506 507 /// Predecessors for each basic block in the CFG. 508 BlockEdgeMap Predecessors; 509 510 /// Successors for each basic block in the CFG. 511 BlockEdgeMap Successors; 512 513 SampleCoverageTracker CoverageTracker; 514 515 /// Profile reader object. 516 std::unique_ptr<SampleProfileReader> Reader; 517 518 /// Profile tracker for different context. 519 std::unique_ptr<SampleContextTracker> ContextTracker; 520 521 /// Samples collected for the body of this function. 522 FunctionSamples *Samples = nullptr; 523 524 /// Name of the profile file to load. 525 std::string Filename; 526 527 /// Name of the profile remapping file to load. 528 std::string RemappingFilename; 529 530 /// Flag indicating whether the profile input loaded successfully. 531 bool ProfileIsValid = false; 532 533 /// Flag indicating whether input profile is context-sensitive 534 bool ProfileIsCS = false; 535 536 /// Flag indicating which LTO/ThinLTO phase the pass is invoked in. 537 /// 538 /// We need to know the LTO phase because for example in ThinLTOPrelink 539 /// phase, in annotation, we should not promote indirect calls. Instead, 540 /// we will mark GUIDs that needs to be annotated to the function. 541 ThinOrFullLTOPhase LTOPhase; 542 543 /// Profile Summary Info computed from sample profile. 544 ProfileSummaryInfo *PSI = nullptr; 545 546 /// Profle Symbol list tells whether a function name appears in the binary 547 /// used to generate the current profile. 548 std::unique_ptr<ProfileSymbolList> PSL; 549 550 /// Total number of samples collected in this profile. 551 /// 552 /// This is the sum of all the samples collected in all the functions executed 553 /// at runtime. 554 uint64_t TotalCollectedSamples = 0; 555 556 /// Optimization Remark Emitter used to emit diagnostic remarks. 557 OptimizationRemarkEmitter *ORE = nullptr; 558 559 // Information recorded when we declined to inline a call site 560 // because we have determined it is too cold is accumulated for 561 // each callee function. Initially this is just the entry count. 562 struct NotInlinedProfileInfo { 563 uint64_t entryCount; 564 }; 565 DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo; 566 567 // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for 568 // all the function symbols defined or declared in current module. 569 DenseMap<uint64_t, StringRef> GUIDToFuncNameMap; 570 571 // All the Names used in FunctionSamples including outline function 572 // names, inline instance names and call target names. 573 StringSet<> NamesInProfile; 574 575 // For symbol in profile symbol list, whether to regard their profiles 576 // to be accurate. It is mainly decided by existance of profile symbol 577 // list and -profile-accurate-for-symsinlist flag, but it can be 578 // overriden by -profile-sample-accurate or profile-sample-accurate 579 // attribute. 580 bool ProfAccForSymsInList; 581 582 // External inline advisor used to replay inline decision from remarks. 583 std::unique_ptr<ReplayInlineAdvisor> ExternalInlineAdvisor; 584 585 // A pseudo probe helper to correlate the imported sample counts. 586 std::unique_ptr<PseudoProbeManager> ProbeManager; 587 }; 588 589 class SampleProfileLoaderLegacyPass : public ModulePass { 590 public: 591 // Class identification, replacement for typeinfo 592 static char ID; 593 594 SampleProfileLoaderLegacyPass( 595 StringRef Name = SampleProfileFile, 596 ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None) 597 : ModulePass(ID), SampleLoader( 598 Name, SampleProfileRemappingFile, LTOPhase, 599 [&](Function &F) -> AssumptionCache & { 600 return ACT->getAssumptionCache(F); 601 }, 602 [&](Function &F) -> TargetTransformInfo & { 603 return TTIWP->getTTI(F); 604 }, 605 [&](Function &F) -> TargetLibraryInfo & { 606 return TLIWP->getTLI(F); 607 }) { 608 initializeSampleProfileLoaderLegacyPassPass( 609 *PassRegistry::getPassRegistry()); 610 } 611 612 void dump() { SampleLoader.dump(); } 613 614 bool doInitialization(Module &M) override { 615 return SampleLoader.doInitialization(M); 616 } 617 618 StringRef getPassName() const override { return "Sample profile pass"; } 619 bool runOnModule(Module &M) override; 620 621 void getAnalysisUsage(AnalysisUsage &AU) const override { 622 AU.addRequired<AssumptionCacheTracker>(); 623 AU.addRequired<TargetTransformInfoWrapperPass>(); 624 AU.addRequired<TargetLibraryInfoWrapperPass>(); 625 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 626 } 627 628 private: 629 SampleProfileLoader SampleLoader; 630 AssumptionCacheTracker *ACT = nullptr; 631 TargetTransformInfoWrapperPass *TTIWP = nullptr; 632 TargetLibraryInfoWrapperPass *TLIWP = nullptr; 633 }; 634 635 } // end anonymous namespace 636 637 /// Return true if the given callsite is hot wrt to hot cutoff threshold. 638 /// 639 /// Functions that were inlined in the original binary will be represented 640 /// in the inline stack in the sample profile. If the profile shows that 641 /// the original inline decision was "good" (i.e., the callsite is executed 642 /// frequently), then we will recreate the inline decision and apply the 643 /// profile from the inlined callsite. 644 /// 645 /// To decide whether an inlined callsite is hot, we compare the callsite 646 /// sample count with the hot cutoff computed by ProfileSummaryInfo, it is 647 /// regarded as hot if the count is above the cutoff value. 648 /// 649 /// When ProfileAccurateForSymsInList is enabled and profile symbol list 650 /// is present, functions in the profile symbol list but without profile will 651 /// be regarded as cold and much less inlining will happen in CGSCC inlining 652 /// pass, so we tend to lower the hot criteria here to allow more early 653 /// inlining to happen for warm callsites and it is helpful for performance. 654 static bool callsiteIsHot(const FunctionSamples *CallsiteFS, 655 ProfileSummaryInfo *PSI, bool ProfAccForSymsInList) { 656 if (!CallsiteFS) 657 return false; // The callsite was not inlined in the original binary. 658 659 assert(PSI && "PSI is expected to be non null"); 660 uint64_t CallsiteTotalSamples = CallsiteFS->getTotalSamples(); 661 if (ProfAccForSymsInList) 662 return !PSI->isColdCount(CallsiteTotalSamples); 663 else 664 return PSI->isHotCount(CallsiteTotalSamples); 665 } 666 667 /// Mark as used the sample record for the given function samples at 668 /// (LineOffset, Discriminator). 669 /// 670 /// \returns true if this is the first time we mark the given record. 671 bool SampleCoverageTracker::markSamplesUsed(const FunctionSamples *FS, 672 uint32_t LineOffset, 673 uint32_t Discriminator, 674 uint64_t Samples) { 675 LineLocation Loc(LineOffset, Discriminator); 676 unsigned &Count = SampleCoverage[FS][Loc]; 677 bool FirstTime = (++Count == 1); 678 if (FirstTime) 679 TotalUsedSamples += Samples; 680 return FirstTime; 681 } 682 683 /// Return the number of sample records that were applied from this profile. 684 /// 685 /// This count does not include records from cold inlined callsites. 686 unsigned 687 SampleCoverageTracker::countUsedRecords(const FunctionSamples *FS, 688 ProfileSummaryInfo *PSI) const { 689 auto I = SampleCoverage.find(FS); 690 691 // The size of the coverage map for FS represents the number of records 692 // that were marked used at least once. 693 unsigned Count = (I != SampleCoverage.end()) ? I->second.size() : 0; 694 695 // If there are inlined callsites in this function, count the samples found 696 // in the respective bodies. However, do not bother counting callees with 0 697 // total samples, these are callees that were never invoked at runtime. 698 for (const auto &I : FS->getCallsiteSamples()) 699 for (const auto &J : I.second) { 700 const FunctionSamples *CalleeSamples = &J.second; 701 if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList)) 702 Count += countUsedRecords(CalleeSamples, PSI); 703 } 704 705 return Count; 706 } 707 708 /// Return the number of sample records in the body of this profile. 709 /// 710 /// This count does not include records from cold inlined callsites. 711 unsigned 712 SampleCoverageTracker::countBodyRecords(const FunctionSamples *FS, 713 ProfileSummaryInfo *PSI) const { 714 unsigned Count = FS->getBodySamples().size(); 715 716 // Only count records in hot callsites. 717 for (const auto &I : FS->getCallsiteSamples()) 718 for (const auto &J : I.second) { 719 const FunctionSamples *CalleeSamples = &J.second; 720 if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList)) 721 Count += countBodyRecords(CalleeSamples, PSI); 722 } 723 724 return Count; 725 } 726 727 /// Return the number of samples collected in the body of this profile. 728 /// 729 /// This count does not include samples from cold inlined callsites. 730 uint64_t 731 SampleCoverageTracker::countBodySamples(const FunctionSamples *FS, 732 ProfileSummaryInfo *PSI) const { 733 uint64_t Total = 0; 734 for (const auto &I : FS->getBodySamples()) 735 Total += I.second.getSamples(); 736 737 // Only count samples in hot callsites. 738 for (const auto &I : FS->getCallsiteSamples()) 739 for (const auto &J : I.second) { 740 const FunctionSamples *CalleeSamples = &J.second; 741 if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList)) 742 Total += countBodySamples(CalleeSamples, PSI); 743 } 744 745 return Total; 746 } 747 748 /// Return the fraction of sample records used in this profile. 749 /// 750 /// The returned value is an unsigned integer in the range 0-100 indicating 751 /// the percentage of sample records that were used while applying this 752 /// profile to the associated function. 753 unsigned SampleCoverageTracker::computeCoverage(unsigned Used, 754 unsigned Total) const { 755 assert(Used <= Total && 756 "number of used records cannot exceed the total number of records"); 757 return Total > 0 ? Used * 100 / Total : 100; 758 } 759 760 /// Clear all the per-function data used to load samples and propagate weights. 761 void SampleProfileLoader::clearFunctionData() { 762 BlockWeights.clear(); 763 EdgeWeights.clear(); 764 VisitedBlocks.clear(); 765 VisitedEdges.clear(); 766 EquivalenceClass.clear(); 767 DT = nullptr; 768 PDT = nullptr; 769 LI = nullptr; 770 Predecessors.clear(); 771 Successors.clear(); 772 CoverageTracker.clear(); 773 } 774 775 #ifndef NDEBUG 776 /// Print the weight of edge \p E on stream \p OS. 777 /// 778 /// \param OS Stream to emit the output to. 779 /// \param E Edge to print. 780 void SampleProfileLoader::printEdgeWeight(raw_ostream &OS, Edge E) { 781 OS << "weight[" << E.first->getName() << "->" << E.second->getName() 782 << "]: " << EdgeWeights[E] << "\n"; 783 } 784 785 /// Print the equivalence class of block \p BB on stream \p OS. 786 /// 787 /// \param OS Stream to emit the output to. 788 /// \param BB Block to print. 789 void SampleProfileLoader::printBlockEquivalence(raw_ostream &OS, 790 const BasicBlock *BB) { 791 const BasicBlock *Equiv = EquivalenceClass[BB]; 792 OS << "equivalence[" << BB->getName() 793 << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n"; 794 } 795 796 /// Print the weight of block \p BB on stream \p OS. 797 /// 798 /// \param OS Stream to emit the output to. 799 /// \param BB Block to print. 800 void SampleProfileLoader::printBlockWeight(raw_ostream &OS, 801 const BasicBlock *BB) const { 802 const auto &I = BlockWeights.find(BB); 803 uint64_t W = (I == BlockWeights.end() ? 0 : I->second); 804 OS << "weight[" << BB->getName() << "]: " << W << "\n"; 805 } 806 #endif 807 808 /// Get the weight for an instruction. 809 /// 810 /// The "weight" of an instruction \p Inst is the number of samples 811 /// collected on that instruction at runtime. To retrieve it, we 812 /// need to compute the line number of \p Inst relative to the start of its 813 /// function. We use HeaderLineno to compute the offset. We then 814 /// look up the samples collected for \p Inst using BodySamples. 815 /// 816 /// \param Inst Instruction to query. 817 /// 818 /// \returns the weight of \p Inst. 819 ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) { 820 if (FunctionSamples::ProfileIsProbeBased) 821 return getProbeWeight(Inst); 822 823 const DebugLoc &DLoc = Inst.getDebugLoc(); 824 if (!DLoc) 825 return std::error_code(); 826 827 const FunctionSamples *FS = findFunctionSamples(Inst); 828 if (!FS) 829 return std::error_code(); 830 831 // Ignore all intrinsics, phinodes and branch instructions. 832 // Branch and phinodes instruction usually contains debug info from sources outside of 833 // the residing basic block, thus we ignore them during annotation. 834 if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst)) 835 return std::error_code(); 836 837 // If a direct call/invoke instruction is inlined in profile 838 // (findCalleeFunctionSamples returns non-empty result), but not inlined here, 839 // it means that the inlined callsite has no sample, thus the call 840 // instruction should have 0 count. 841 if (!ProfileIsCS) 842 if (const auto *CB = dyn_cast<CallBase>(&Inst)) 843 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB)) 844 return 0; 845 846 const DILocation *DIL = DLoc; 847 uint32_t LineOffset = FunctionSamples::getOffset(DIL); 848 uint32_t Discriminator = DIL->getBaseDiscriminator(); 849 ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator); 850 if (R) { 851 bool FirstMark = 852 CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get()); 853 if (FirstMark) { 854 ORE->emit([&]() { 855 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst); 856 Remark << "Applied " << ore::NV("NumSamples", *R); 857 Remark << " samples from profile (offset: "; 858 Remark << ore::NV("LineOffset", LineOffset); 859 if (Discriminator) { 860 Remark << "."; 861 Remark << ore::NV("Discriminator", Discriminator); 862 } 863 Remark << ")"; 864 return Remark; 865 }); 866 } 867 LLVM_DEBUG(dbgs() << " " << DLoc.getLine() << "." 868 << DIL->getBaseDiscriminator() << ":" << Inst 869 << " (line offset: " << LineOffset << "." 870 << DIL->getBaseDiscriminator() << " - weight: " << R.get() 871 << ")\n"); 872 } 873 return R; 874 } 875 876 ErrorOr<uint64_t> SampleProfileLoader::getProbeWeight(const Instruction &Inst) { 877 assert(FunctionSamples::ProfileIsProbeBased && 878 "Profile is not pseudo probe based"); 879 Optional<PseudoProbe> Probe = extractProbe(Inst); 880 if (!Probe) 881 return std::error_code(); 882 883 const FunctionSamples *FS = findFunctionSamples(Inst); 884 if (!FS) 885 return std::error_code(); 886 887 // If a direct call/invoke instruction is inlined in profile 888 // (findCalleeFunctionSamples returns non-empty result), but not inlined here, 889 // it means that the inlined callsite has no sample, thus the call 890 // instruction should have 0 count. 891 if (const auto *CB = dyn_cast<CallBase>(&Inst)) 892 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB)) 893 return 0; 894 895 const ErrorOr<uint64_t> &R = FS->findSamplesAt(Probe->Id, 0); 896 if (R) { 897 uint64_t Samples = R.get() * Probe->Factor; 898 bool FirstMark = CoverageTracker.markSamplesUsed(FS, Probe->Id, 0, Samples); 899 if (FirstMark) { 900 ORE->emit([&]() { 901 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst); 902 Remark << "Applied " << ore::NV("NumSamples", Samples); 903 Remark << " samples from profile (ProbeId="; 904 Remark << ore::NV("ProbeId", Probe->Id); 905 Remark << ", Factor="; 906 Remark << ore::NV("Factor", Probe->Factor); 907 Remark << ", OriginalSamples="; 908 Remark << ore::NV("OriginalSamples", R.get()); 909 Remark << ")"; 910 return Remark; 911 }); 912 } 913 LLVM_DEBUG(dbgs() << " " << Probe->Id << ":" << Inst 914 << " - weight: " << R.get() << " - factor: " 915 << format("%0.2f", Probe->Factor) << ")\n"); 916 return Samples; 917 } 918 return R; 919 } 920 921 /// Compute the weight of a basic block. 922 /// 923 /// The weight of basic block \p BB is the maximum weight of all the 924 /// instructions in BB. 925 /// 926 /// \param BB The basic block to query. 927 /// 928 /// \returns the weight for \p BB. 929 ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) { 930 uint64_t Max = 0; 931 bool HasWeight = false; 932 for (auto &I : BB->getInstList()) { 933 const ErrorOr<uint64_t> &R = getInstWeight(I); 934 if (R) { 935 Max = std::max(Max, R.get()); 936 HasWeight = true; 937 } 938 } 939 return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code(); 940 } 941 942 /// Compute and store the weights of every basic block. 943 /// 944 /// This populates the BlockWeights map by computing 945 /// the weights of every basic block in the CFG. 946 /// 947 /// \param F The function to query. 948 bool SampleProfileLoader::computeBlockWeights(Function &F) { 949 bool Changed = false; 950 LLVM_DEBUG(dbgs() << "Block weights\n"); 951 for (const auto &BB : F) { 952 ErrorOr<uint64_t> Weight = getBlockWeight(&BB); 953 if (Weight) { 954 BlockWeights[&BB] = Weight.get(); 955 VisitedBlocks.insert(&BB); 956 Changed = true; 957 } 958 LLVM_DEBUG(printBlockWeight(dbgs(), &BB)); 959 } 960 961 return Changed; 962 } 963 964 /// Get the FunctionSamples for a call instruction. 965 /// 966 /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined 967 /// instance in which that call instruction is calling to. It contains 968 /// all samples that resides in the inlined instance. We first find the 969 /// inlined instance in which the call instruction is from, then we 970 /// traverse its children to find the callsite with the matching 971 /// location. 972 /// 973 /// \param Inst Call/Invoke instruction to query. 974 /// 975 /// \returns The FunctionSamples pointer to the inlined instance. 976 const FunctionSamples * 977 SampleProfileLoader::findCalleeFunctionSamples(const CallBase &Inst) const { 978 const DILocation *DIL = Inst.getDebugLoc(); 979 if (!DIL) { 980 return nullptr; 981 } 982 983 StringRef CalleeName; 984 if (Function *Callee = Inst.getCalledFunction()) 985 CalleeName = FunctionSamples::getCanonicalFnName(*Callee); 986 987 if (ProfileIsCS) 988 return ContextTracker->getCalleeContextSamplesFor(Inst, CalleeName); 989 990 const FunctionSamples *FS = findFunctionSamples(Inst); 991 if (FS == nullptr) 992 return nullptr; 993 994 return FS->findFunctionSamplesAt(FunctionSamples::getCallSiteIdentifier(DIL), 995 CalleeName, Reader->getRemapper()); 996 } 997 998 /// Returns a vector of FunctionSamples that are the indirect call targets 999 /// of \p Inst. The vector is sorted by the total number of samples. Stores 1000 /// the total call count of the indirect call in \p Sum. 1001 std::vector<const FunctionSamples *> 1002 SampleProfileLoader::findIndirectCallFunctionSamples( 1003 const Instruction &Inst, uint64_t &Sum) const { 1004 const DILocation *DIL = Inst.getDebugLoc(); 1005 std::vector<const FunctionSamples *> R; 1006 1007 if (!DIL) { 1008 return R; 1009 } 1010 1011 auto FSCompare = [](const FunctionSamples *L, const FunctionSamples *R) { 1012 assert(L && R && "Expect non-null FunctionSamples"); 1013 if (L->getEntrySamples() != R->getEntrySamples()) 1014 return L->getEntrySamples() > R->getEntrySamples(); 1015 return FunctionSamples::getGUID(L->getName()) < 1016 FunctionSamples::getGUID(R->getName()); 1017 }; 1018 1019 if (ProfileIsCS) { 1020 auto CalleeSamples = 1021 ContextTracker->getIndirectCalleeContextSamplesFor(DIL); 1022 if (CalleeSamples.empty()) 1023 return R; 1024 1025 // For CSSPGO, we only use target context profile's entry count 1026 // as that already includes both inlined callee and non-inlined ones.. 1027 Sum = 0; 1028 for (const auto *const FS : CalleeSamples) { 1029 Sum += FS->getEntrySamples(); 1030 R.push_back(FS); 1031 } 1032 llvm::sort(R, FSCompare); 1033 return R; 1034 } 1035 1036 const FunctionSamples *FS = findFunctionSamples(Inst); 1037 if (FS == nullptr) 1038 return R; 1039 1040 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL); 1041 auto T = FS->findCallTargetMapAt(CallSite); 1042 Sum = 0; 1043 if (T) 1044 for (const auto &T_C : T.get()) 1045 Sum += T_C.second; 1046 if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(CallSite)) { 1047 if (M->empty()) 1048 return R; 1049 for (const auto &NameFS : *M) { 1050 Sum += NameFS.second.getEntrySamples(); 1051 R.push_back(&NameFS.second); 1052 } 1053 llvm::sort(R, FSCompare); 1054 } 1055 return R; 1056 } 1057 1058 /// Get the FunctionSamples for an instruction. 1059 /// 1060 /// The FunctionSamples of an instruction \p Inst is the inlined instance 1061 /// in which that instruction is coming from. We traverse the inline stack 1062 /// of that instruction, and match it with the tree nodes in the profile. 1063 /// 1064 /// \param Inst Instruction to query. 1065 /// 1066 /// \returns the FunctionSamples pointer to the inlined instance. 1067 const FunctionSamples * 1068 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const { 1069 if (FunctionSamples::ProfileIsProbeBased) { 1070 Optional<PseudoProbe> Probe = extractProbe(Inst); 1071 if (!Probe) 1072 return nullptr; 1073 } 1074 1075 const DILocation *DIL = Inst.getDebugLoc(); 1076 if (!DIL) 1077 return Samples; 1078 1079 auto it = DILocation2SampleMap.try_emplace(DIL,nullptr); 1080 if (it.second) { 1081 if (ProfileIsCS) 1082 it.first->second = ContextTracker->getContextSamplesFor(DIL); 1083 else 1084 it.first->second = 1085 Samples->findFunctionSamples(DIL, Reader->getRemapper()); 1086 } 1087 return it.first->second; 1088 } 1089 1090 /// Attempt to promote indirect call and also inline the promoted call. 1091 /// 1092 /// \param F Caller function. 1093 /// \param Candidate ICP and inline candidate. 1094 /// \param Sum Sum of target counts for indirect call. 1095 /// \param PromotedInsns Map to keep track of indirect call already processed. 1096 /// \param Candidate ICP and inline candidate. 1097 /// \param InlinedCallSite Output vector for new call sites exposed after 1098 /// inlining. 1099 bool SampleProfileLoader::tryPromoteAndInlineCandidate( 1100 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin, uint64_t &Sum, 1101 DenseSet<Instruction *> &PromotedInsns, 1102 SmallVector<CallBase *, 8> *InlinedCallSite) { 1103 const char *Reason = "Callee function not available"; 1104 // R->getValue() != &F is to prevent promoting a recursive call. 1105 // If it is a recursive call, we do not inline it as it could bloat 1106 // the code exponentially. There is way to better handle this, e.g. 1107 // clone the caller first, and inline the cloned caller if it is 1108 // recursive. As llvm does not inline recursive calls, we will 1109 // simply ignore it instead of handling it explicitly. 1110 auto R = SymbolMap.find(Candidate.CalleeSamples->getFuncName()); 1111 if (R != SymbolMap.end() && R->getValue() && 1112 !R->getValue()->isDeclaration() && R->getValue()->getSubprogram() && 1113 R->getValue()->hasFnAttribute("use-sample-profile") && 1114 R->getValue() != &F && 1115 isLegalToPromote(*Candidate.CallInstr, R->getValue(), &Reason)) { 1116 auto *DI = 1117 &pgo::promoteIndirectCall(*Candidate.CallInstr, R->getValue(), 1118 Candidate.CallsiteCount, Sum, false, ORE); 1119 if (DI) { 1120 Sum -= Candidate.CallsiteCount; 1121 // Prorate the indirect callsite distribution. 1122 // Do not update the promoted direct callsite distribution at this 1123 // point since the original distribution combined with the callee 1124 // profile will be used to prorate callsites from the callee if 1125 // inlined. Once not inlined, the direct callsite distribution should 1126 // be prorated so that the it will reflect the real callsite counts. 1127 setProbeDistributionFactor(*Candidate.CallInstr, 1128 Candidate.CallsiteDistribution * Sum / 1129 SumOrigin); 1130 PromotedInsns.insert(Candidate.CallInstr); 1131 Candidate.CallInstr = DI; 1132 if (isa<CallInst>(DI) || isa<InvokeInst>(DI)) { 1133 bool Inlined = tryInlineCandidate(Candidate, InlinedCallSite); 1134 if (!Inlined) { 1135 // Prorate the direct callsite distribution so that it reflects real 1136 // callsite counts. 1137 setProbeDistributionFactor(*DI, Candidate.CallsiteDistribution * 1138 Candidate.CallsiteCount / 1139 SumOrigin); 1140 } 1141 return Inlined; 1142 } 1143 } 1144 } else { 1145 LLVM_DEBUG(dbgs() << "\nFailed to promote indirect call to " 1146 << Candidate.CalleeSamples->getFuncName() << " because " 1147 << Reason << "\n"); 1148 } 1149 return false; 1150 } 1151 1152 bool SampleProfileLoader::shouldInlineColdCallee(CallBase &CallInst) { 1153 if (!ProfileSizeInline) 1154 return false; 1155 1156 Function *Callee = CallInst.getCalledFunction(); 1157 if (Callee == nullptr) 1158 return false; 1159 1160 InlineCost Cost = getInlineCost(CallInst, getInlineParams(), GetTTI(*Callee), 1161 GetAC, GetTLI); 1162 1163 if (Cost.isNever()) 1164 return false; 1165 1166 if (Cost.isAlways()) 1167 return true; 1168 1169 return Cost.getCost() <= SampleColdCallSiteThreshold; 1170 } 1171 1172 void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates( 1173 const SmallVectorImpl<CallBase *> &Candidates, const Function &F, 1174 bool Hot) { 1175 for (auto I : Candidates) { 1176 Function *CalledFunction = I->getCalledFunction(); 1177 if (CalledFunction) { 1178 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineAttempt", 1179 I->getDebugLoc(), I->getParent()) 1180 << "previous inlining reattempted for " 1181 << (Hot ? "hotness: '" : "size: '") 1182 << ore::NV("Callee", CalledFunction) << "' into '" 1183 << ore::NV("Caller", &F) << "'"); 1184 } 1185 } 1186 } 1187 1188 /// Iteratively inline hot callsites of a function. 1189 /// 1190 /// Iteratively traverse all callsites of the function \p F, and find if 1191 /// the corresponding inlined instance exists and is hot in profile. If 1192 /// it is hot enough, inline the callsites and adds new callsites of the 1193 /// callee into the caller. If the call is an indirect call, first promote 1194 /// it to direct call. Each indirect call is limited with a single target. 1195 /// 1196 /// \param F function to perform iterative inlining. 1197 /// \param InlinedGUIDs a set to be updated to include all GUIDs that are 1198 /// inlined in the profiled binary. 1199 /// 1200 /// \returns True if there is any inline happened. 1201 bool SampleProfileLoader::inlineHotFunctions( 1202 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) { 1203 DenseSet<Instruction *> PromotedInsns; 1204 1205 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure 1206 // Profile symbol list is ignored when profile-sample-accurate is on. 1207 assert((!ProfAccForSymsInList || 1208 (!ProfileSampleAccurate && 1209 !F.hasFnAttribute("profile-sample-accurate"))) && 1210 "ProfAccForSymsInList should be false when profile-sample-accurate " 1211 "is enabled"); 1212 1213 DenseMap<CallBase *, const FunctionSamples *> LocalNotInlinedCallSites; 1214 bool Changed = false; 1215 bool LocalChanged = true; 1216 while (LocalChanged) { 1217 LocalChanged = false; 1218 SmallVector<CallBase *, 10> CIS; 1219 for (auto &BB : F) { 1220 bool Hot = false; 1221 SmallVector<CallBase *, 10> AllCandidates; 1222 SmallVector<CallBase *, 10> ColdCandidates; 1223 for (auto &I : BB.getInstList()) { 1224 const FunctionSamples *FS = nullptr; 1225 if (auto *CB = dyn_cast<CallBase>(&I)) { 1226 if (!isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(*CB))) { 1227 assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) && 1228 "GUIDToFuncNameMap has to be populated"); 1229 AllCandidates.push_back(CB); 1230 if (FS->getEntrySamples() > 0 || ProfileIsCS) 1231 LocalNotInlinedCallSites.try_emplace(CB, FS); 1232 if (callsiteIsHot(FS, PSI, ProfAccForSymsInList)) 1233 Hot = true; 1234 else if (shouldInlineColdCallee(*CB)) 1235 ColdCandidates.push_back(CB); 1236 } 1237 } 1238 } 1239 if (Hot || ExternalInlineAdvisor) { 1240 CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end()); 1241 emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true); 1242 } else { 1243 CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end()); 1244 emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false); 1245 } 1246 } 1247 for (CallBase *I : CIS) { 1248 Function *CalledFunction = I->getCalledFunction(); 1249 InlineCandidate Candidate = { 1250 I, 1251 LocalNotInlinedCallSites.count(I) ? LocalNotInlinedCallSites[I] 1252 : nullptr, 1253 0 /* dummy count */, 1.0 /* dummy distribution factor */}; 1254 // Do not inline recursive calls. 1255 if (CalledFunction == &F) 1256 continue; 1257 if (I->isIndirectCall()) { 1258 if (PromotedInsns.count(I)) 1259 continue; 1260 uint64_t Sum; 1261 for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) { 1262 uint64_t SumOrigin = Sum; 1263 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1264 FS->findInlinedFunctions(InlinedGUIDs, F.getParent(), 1265 PSI->getOrCompHotCountThreshold()); 1266 continue; 1267 } 1268 if (!callsiteIsHot(FS, PSI, ProfAccForSymsInList)) 1269 continue; 1270 1271 Candidate = {I, FS, FS->getEntrySamples(), 1.0}; 1272 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum, 1273 PromotedInsns)) { 1274 LocalNotInlinedCallSites.erase(I); 1275 LocalChanged = true; 1276 } 1277 } 1278 } else if (CalledFunction && CalledFunction->getSubprogram() && 1279 !CalledFunction->isDeclaration()) { 1280 if (tryInlineCandidate(Candidate)) { 1281 LocalNotInlinedCallSites.erase(I); 1282 LocalChanged = true; 1283 } 1284 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1285 findCalleeFunctionSamples(*I)->findInlinedFunctions( 1286 InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold()); 1287 } 1288 } 1289 Changed |= LocalChanged; 1290 } 1291 1292 // For CS profile, profile for not inlined context will be merged when 1293 // base profile is being trieved 1294 if (ProfileIsCS) 1295 return Changed; 1296 1297 // Accumulate not inlined callsite information into notInlinedSamples 1298 for (const auto &Pair : LocalNotInlinedCallSites) { 1299 CallBase *I = Pair.getFirst(); 1300 Function *Callee = I->getCalledFunction(); 1301 if (!Callee || Callee->isDeclaration()) 1302 continue; 1303 1304 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "NotInline", 1305 I->getDebugLoc(), I->getParent()) 1306 << "previous inlining not repeated: '" 1307 << ore::NV("Callee", Callee) << "' into '" 1308 << ore::NV("Caller", &F) << "'"); 1309 1310 ++NumCSNotInlined; 1311 const FunctionSamples *FS = Pair.getSecond(); 1312 if (FS->getTotalSamples() == 0 && FS->getEntrySamples() == 0) { 1313 continue; 1314 } 1315 1316 if (ProfileMergeInlinee) { 1317 // A function call can be replicated by optimizations like callsite 1318 // splitting or jump threading and the replicates end up sharing the 1319 // sample nested callee profile instead of slicing the original inlinee's 1320 // profile. We want to do merge exactly once by filtering out callee 1321 // profiles with a non-zero head sample count. 1322 if (FS->getHeadSamples() == 0) { 1323 // Use entry samples as head samples during the merge, as inlinees 1324 // don't have head samples. 1325 const_cast<FunctionSamples *>(FS)->addHeadSamples( 1326 FS->getEntrySamples()); 1327 1328 // Note that we have to do the merge right after processing function. 1329 // This allows OutlineFS's profile to be used for annotation during 1330 // top-down processing of functions' annotation. 1331 FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee); 1332 OutlineFS->merge(*FS); 1333 } 1334 } else { 1335 auto pair = 1336 notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0}); 1337 pair.first->second.entryCount += FS->getEntrySamples(); 1338 } 1339 } 1340 return Changed; 1341 } 1342 1343 bool SampleProfileLoader::tryInlineCandidate( 1344 InlineCandidate &Candidate, SmallVector<CallBase *, 8> *InlinedCallSites) { 1345 1346 CallBase &CB = *Candidate.CallInstr; 1347 Function *CalledFunction = CB.getCalledFunction(); 1348 assert(CalledFunction && "Expect a callee with definition"); 1349 DebugLoc DLoc = CB.getDebugLoc(); 1350 BasicBlock *BB = CB.getParent(); 1351 1352 InlineCost Cost = shouldInlineCandidate(Candidate); 1353 if (Cost.isNever()) { 1354 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineFail", DLoc, BB) 1355 << "incompatible inlining"); 1356 return false; 1357 } 1358 1359 if (!Cost) 1360 return false; 1361 1362 InlineFunctionInfo IFI(nullptr, GetAC); 1363 if (InlineFunction(CB, IFI).isSuccess()) { 1364 // The call to InlineFunction erases I, so we can't pass it here. 1365 emitInlinedInto(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(), Cost, 1366 true, CSINLINE_DEBUG); 1367 1368 // Now populate the list of newly exposed call sites. 1369 if (InlinedCallSites) { 1370 InlinedCallSites->clear(); 1371 for (auto &I : IFI.InlinedCallSites) 1372 InlinedCallSites->push_back(I); 1373 } 1374 1375 if (ProfileIsCS) 1376 ContextTracker->markContextSamplesInlined(Candidate.CalleeSamples); 1377 ++NumCSInlined; 1378 1379 // Prorate inlined probes for a duplicated inlining callsite which probably 1380 // has a distribution less than 100%. Samples for an inlinee should be 1381 // distributed among the copies of the original callsite based on each 1382 // callsite's distribution factor for counts accuracy. Note that an inlined 1383 // probe may come with its own distribution factor if it has been duplicated 1384 // in the inlinee body. The two factor are multiplied to reflect the 1385 // aggregation of duplication. 1386 if (Candidate.CallsiteDistribution < 1) { 1387 for (auto &I : IFI.InlinedCallSites) { 1388 if (Optional<PseudoProbe> Probe = extractProbe(*I)) 1389 setProbeDistributionFactor(*I, Probe->Factor * 1390 Candidate.CallsiteDistribution); 1391 } 1392 NumDuplicatedInlinesite++; 1393 } 1394 1395 return true; 1396 } 1397 return false; 1398 } 1399 1400 bool SampleProfileLoader::getInlineCandidate(InlineCandidate *NewCandidate, 1401 CallBase *CB) { 1402 assert(CB && "Expect non-null call instruction"); 1403 1404 if (isa<IntrinsicInst>(CB)) 1405 return false; 1406 1407 // Find the callee's profile. For indirect call, find hottest target profile. 1408 const FunctionSamples *CalleeSamples = findCalleeFunctionSamples(*CB); 1409 if (!CalleeSamples) 1410 return false; 1411 1412 float Factor = 1.0; 1413 if (Optional<PseudoProbe> Probe = extractProbe(*CB)) 1414 Factor = Probe->Factor; 1415 1416 uint64_t CallsiteCount = 0; 1417 ErrorOr<uint64_t> Weight = getBlockWeight(CB->getParent()); 1418 if (Weight) 1419 CallsiteCount = Weight.get(); 1420 if (CalleeSamples) 1421 CallsiteCount = std::max( 1422 CallsiteCount, uint64_t(CalleeSamples->getEntrySamples() * Factor)); 1423 1424 *NewCandidate = {CB, CalleeSamples, CallsiteCount, Factor}; 1425 return true; 1426 } 1427 1428 InlineCost 1429 SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) { 1430 std::unique_ptr<InlineAdvice> Advice = nullptr; 1431 if (ExternalInlineAdvisor) { 1432 Advice = ExternalInlineAdvisor->getAdvice(*Candidate.CallInstr); 1433 if (!Advice->isInliningRecommended()) { 1434 Advice->recordUnattemptedInlining(); 1435 return InlineCost::getNever("not previously inlined"); 1436 } 1437 Advice->recordInlining(); 1438 return InlineCost::getAlways("previously inlined"); 1439 } 1440 1441 // Adjust threshold based on call site hotness, only do this for callsite 1442 // prioritized inliner because otherwise cost-benefit check is done earlier. 1443 int SampleThreshold = SampleColdCallSiteThreshold; 1444 if (CallsitePrioritizedInline) { 1445 if (Candidate.CallsiteCount > PSI->getHotCountThreshold()) 1446 SampleThreshold = SampleHotCallSiteThreshold; 1447 else if (!ProfileSizeInline) 1448 return InlineCost::getNever("cold callsite"); 1449 } 1450 1451 Function *Callee = Candidate.CallInstr->getCalledFunction(); 1452 assert(Callee && "Expect a definition for inline candidate of direct call"); 1453 1454 InlineParams Params = getInlineParams(); 1455 Params.ComputeFullInlineCost = true; 1456 // Checks if there is anything in the reachable portion of the callee at 1457 // this callsite that makes this inlining potentially illegal. Need to 1458 // set ComputeFullInlineCost, otherwise getInlineCost may return early 1459 // when cost exceeds threshold without checking all IRs in the callee. 1460 // The acutal cost does not matter because we only checks isNever() to 1461 // see if it is legal to inline the callsite. 1462 InlineCost Cost = getInlineCost(*Candidate.CallInstr, Callee, Params, 1463 GetTTI(*Callee), GetAC, GetTLI); 1464 1465 // Honor always inline and never inline from call analyzer 1466 if (Cost.isNever() || Cost.isAlways()) 1467 return Cost; 1468 1469 // For old FDO inliner, we inline the call site as long as cost is not 1470 // "Never". The cost-benefit check is done earlier. 1471 if (!CallsitePrioritizedInline) { 1472 return InlineCost::get(Cost.getCost(), INT_MAX); 1473 } 1474 1475 // Otherwise only use the cost from call analyzer, but overwite threshold with 1476 // Sample PGO threshold. 1477 return InlineCost::get(Cost.getCost(), SampleThreshold); 1478 } 1479 1480 bool SampleProfileLoader::inlineHotFunctionsWithPriority( 1481 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) { 1482 DenseSet<Instruction *> PromotedInsns; 1483 assert(ProfileIsCS && "Prioritiy based inliner only works with CSSPGO now"); 1484 1485 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure 1486 // Profile symbol list is ignored when profile-sample-accurate is on. 1487 assert((!ProfAccForSymsInList || 1488 (!ProfileSampleAccurate && 1489 !F.hasFnAttribute("profile-sample-accurate"))) && 1490 "ProfAccForSymsInList should be false when profile-sample-accurate " 1491 "is enabled"); 1492 1493 // Populating worklist with initial call sites from root inliner, along 1494 // with call site weights. 1495 CandidateQueue CQueue; 1496 InlineCandidate NewCandidate; 1497 for (auto &BB : F) { 1498 for (auto &I : BB.getInstList()) { 1499 auto *CB = dyn_cast<CallBase>(&I); 1500 if (!CB) 1501 continue; 1502 if (getInlineCandidate(&NewCandidate, CB)) 1503 CQueue.push(NewCandidate); 1504 } 1505 } 1506 1507 // Cap the size growth from profile guided inlining. This is needed even 1508 // though cost of each inline candidate already accounts for callee size, 1509 // because with top-down inlining, we can grow inliner size significantly 1510 // with large number of smaller inlinees each pass the cost check. 1511 assert(ProfileInlineLimitMax >= ProfileInlineLimitMin && 1512 "Max inline size limit should not be smaller than min inline size " 1513 "limit."); 1514 unsigned SizeLimit = F.getInstructionCount() * ProfileInlineGrowthLimit; 1515 SizeLimit = std::min(SizeLimit, (unsigned)ProfileInlineLimitMax); 1516 SizeLimit = std::max(SizeLimit, (unsigned)ProfileInlineLimitMin); 1517 if (ExternalInlineAdvisor) 1518 SizeLimit = std::numeric_limits<unsigned>::max(); 1519 1520 // Perform iterative BFS call site prioritized inlining 1521 bool Changed = false; 1522 while (!CQueue.empty() && F.getInstructionCount() < SizeLimit) { 1523 InlineCandidate Candidate = CQueue.top(); 1524 CQueue.pop(); 1525 CallBase *I = Candidate.CallInstr; 1526 Function *CalledFunction = I->getCalledFunction(); 1527 1528 if (CalledFunction == &F) 1529 continue; 1530 if (I->isIndirectCall()) { 1531 if (PromotedInsns.count(I)) 1532 continue; 1533 uint64_t Sum; 1534 auto CalleeSamples = findIndirectCallFunctionSamples(*I, Sum); 1535 uint64_t SumOrigin = Sum; 1536 Sum *= Candidate.CallsiteDistribution; 1537 for (const auto *FS : CalleeSamples) { 1538 // TODO: Consider disable pre-lTO ICP for MonoLTO as well 1539 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1540 FS->findInlinedFunctions(InlinedGUIDs, F.getParent(), 1541 PSI->getOrCompHotCountThreshold()); 1542 continue; 1543 } 1544 uint64_t EntryCountDistributed = 1545 FS->getEntrySamples() * Candidate.CallsiteDistribution; 1546 // In addition to regular inline cost check, we also need to make sure 1547 // ICP isn't introducing excessive speculative checks even if individual 1548 // target looks beneficial to promote and inline. That means we should 1549 // only do ICP when there's a small number dominant targets. 1550 if (EntryCountDistributed < SumOrigin / ProfileICPThreshold) 1551 break; 1552 // TODO: Fix CallAnalyzer to handle all indirect calls. 1553 // For indirect call, we don't run CallAnalyzer to get InlineCost 1554 // before actual inlining. This is because we could see two different 1555 // types from the same definition, which makes CallAnalyzer choke as 1556 // it's expecting matching parameter type on both caller and callee 1557 // side. See example from PR18962 for the triggering cases (the bug was 1558 // fixed, but we generate different types). 1559 if (!PSI->isHotCount(EntryCountDistributed)) 1560 break; 1561 SmallVector<CallBase *, 8> InlinedCallSites; 1562 // Attach function profile for promoted indirect callee, and update 1563 // call site count for the promoted inline candidate too. 1564 Candidate = {I, FS, EntryCountDistributed, 1565 Candidate.CallsiteDistribution}; 1566 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum, 1567 PromotedInsns, &InlinedCallSites)) { 1568 for (auto *CB : InlinedCallSites) { 1569 if (getInlineCandidate(&NewCandidate, CB)) 1570 CQueue.emplace(NewCandidate); 1571 } 1572 Changed = true; 1573 } 1574 } 1575 } else if (CalledFunction && CalledFunction->getSubprogram() && 1576 !CalledFunction->isDeclaration()) { 1577 SmallVector<CallBase *, 8> InlinedCallSites; 1578 if (tryInlineCandidate(Candidate, &InlinedCallSites)) { 1579 for (auto *CB : InlinedCallSites) { 1580 if (getInlineCandidate(&NewCandidate, CB)) 1581 CQueue.emplace(NewCandidate); 1582 } 1583 Changed = true; 1584 } 1585 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1586 findCalleeFunctionSamples(*I)->findInlinedFunctions( 1587 InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold()); 1588 } 1589 } 1590 1591 if (!CQueue.empty()) { 1592 if (SizeLimit == (unsigned)ProfileInlineLimitMax) 1593 ++NumCSInlinedHitMaxLimit; 1594 else if (SizeLimit == (unsigned)ProfileInlineLimitMin) 1595 ++NumCSInlinedHitMinLimit; 1596 else 1597 ++NumCSInlinedHitGrowthLimit; 1598 } 1599 1600 return Changed; 1601 } 1602 1603 /// Find equivalence classes for the given block. 1604 /// 1605 /// This finds all the blocks that are guaranteed to execute the same 1606 /// number of times as \p BB1. To do this, it traverses all the 1607 /// descendants of \p BB1 in the dominator or post-dominator tree. 1608 /// 1609 /// A block BB2 will be in the same equivalence class as \p BB1 if 1610 /// the following holds: 1611 /// 1612 /// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2 1613 /// is a descendant of \p BB1 in the dominator tree, then BB2 should 1614 /// dominate BB1 in the post-dominator tree. 1615 /// 1616 /// 2- Both BB2 and \p BB1 must be in the same loop. 1617 /// 1618 /// For every block BB2 that meets those two requirements, we set BB2's 1619 /// equivalence class to \p BB1. 1620 /// 1621 /// \param BB1 Block to check. 1622 /// \param Descendants Descendants of \p BB1 in either the dom or pdom tree. 1623 /// \param DomTree Opposite dominator tree. If \p Descendants is filled 1624 /// with blocks from \p BB1's dominator tree, then 1625 /// this is the post-dominator tree, and vice versa. 1626 template <bool IsPostDom> 1627 void SampleProfileLoader::findEquivalencesFor( 1628 BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants, 1629 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree) { 1630 const BasicBlock *EC = EquivalenceClass[BB1]; 1631 uint64_t Weight = BlockWeights[EC]; 1632 for (const auto *BB2 : Descendants) { 1633 bool IsDomParent = DomTree->dominates(BB2, BB1); 1634 bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2); 1635 if (BB1 != BB2 && IsDomParent && IsInSameLoop) { 1636 EquivalenceClass[BB2] = EC; 1637 // If BB2 is visited, then the entire EC should be marked as visited. 1638 if (VisitedBlocks.count(BB2)) { 1639 VisitedBlocks.insert(EC); 1640 } 1641 1642 // If BB2 is heavier than BB1, make BB2 have the same weight 1643 // as BB1. 1644 // 1645 // Note that we don't worry about the opposite situation here 1646 // (when BB2 is lighter than BB1). We will deal with this 1647 // during the propagation phase. Right now, we just want to 1648 // make sure that BB1 has the largest weight of all the 1649 // members of its equivalence set. 1650 Weight = std::max(Weight, BlockWeights[BB2]); 1651 } 1652 } 1653 if (EC == &EC->getParent()->getEntryBlock()) { 1654 BlockWeights[EC] = Samples->getHeadSamples() + 1; 1655 } else { 1656 BlockWeights[EC] = Weight; 1657 } 1658 } 1659 1660 /// Find equivalence classes. 1661 /// 1662 /// Since samples may be missing from blocks, we can fill in the gaps by setting 1663 /// the weights of all the blocks in the same equivalence class to the same 1664 /// weight. To compute the concept of equivalence, we use dominance and loop 1665 /// information. Two blocks B1 and B2 are in the same equivalence class if B1 1666 /// dominates B2, B2 post-dominates B1 and both are in the same loop. 1667 /// 1668 /// \param F The function to query. 1669 void SampleProfileLoader::findEquivalenceClasses(Function &F) { 1670 SmallVector<BasicBlock *, 8> DominatedBBs; 1671 LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n"); 1672 // Find equivalence sets based on dominance and post-dominance information. 1673 for (auto &BB : F) { 1674 BasicBlock *BB1 = &BB; 1675 1676 // Compute BB1's equivalence class once. 1677 if (EquivalenceClass.count(BB1)) { 1678 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); 1679 continue; 1680 } 1681 1682 // By default, blocks are in their own equivalence class. 1683 EquivalenceClass[BB1] = BB1; 1684 1685 // Traverse all the blocks dominated by BB1. We are looking for 1686 // every basic block BB2 such that: 1687 // 1688 // 1- BB1 dominates BB2. 1689 // 2- BB2 post-dominates BB1. 1690 // 3- BB1 and BB2 are in the same loop nest. 1691 // 1692 // If all those conditions hold, it means that BB2 is executed 1693 // as many times as BB1, so they are placed in the same equivalence 1694 // class by making BB2's equivalence class be BB1. 1695 DominatedBBs.clear(); 1696 DT->getDescendants(BB1, DominatedBBs); 1697 findEquivalencesFor(BB1, DominatedBBs, PDT.get()); 1698 1699 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); 1700 } 1701 1702 // Assign weights to equivalence classes. 1703 // 1704 // All the basic blocks in the same equivalence class will execute 1705 // the same number of times. Since we know that the head block in 1706 // each equivalence class has the largest weight, assign that weight 1707 // to all the blocks in that equivalence class. 1708 LLVM_DEBUG( 1709 dbgs() << "\nAssign the same weight to all blocks in the same class\n"); 1710 for (auto &BI : F) { 1711 const BasicBlock *BB = &BI; 1712 const BasicBlock *EquivBB = EquivalenceClass[BB]; 1713 if (BB != EquivBB) 1714 BlockWeights[BB] = BlockWeights[EquivBB]; 1715 LLVM_DEBUG(printBlockWeight(dbgs(), BB)); 1716 } 1717 } 1718 1719 /// Visit the given edge to decide if it has a valid weight. 1720 /// 1721 /// If \p E has not been visited before, we copy to \p UnknownEdge 1722 /// and increment the count of unknown edges. 1723 /// 1724 /// \param E Edge to visit. 1725 /// \param NumUnknownEdges Current number of unknown edges. 1726 /// \param UnknownEdge Set if E has not been visited before. 1727 /// 1728 /// \returns E's weight, if known. Otherwise, return 0. 1729 uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges, 1730 Edge *UnknownEdge) { 1731 if (!VisitedEdges.count(E)) { 1732 (*NumUnknownEdges)++; 1733 *UnknownEdge = E; 1734 return 0; 1735 } 1736 1737 return EdgeWeights[E]; 1738 } 1739 1740 /// Propagate weights through incoming/outgoing edges. 1741 /// 1742 /// If the weight of a basic block is known, and there is only one edge 1743 /// with an unknown weight, we can calculate the weight of that edge. 1744 /// 1745 /// Similarly, if all the edges have a known count, we can calculate the 1746 /// count of the basic block, if needed. 1747 /// 1748 /// \param F Function to process. 1749 /// \param UpdateBlockCount Whether we should update basic block counts that 1750 /// has already been annotated. 1751 /// 1752 /// \returns True if new weights were assigned to edges or blocks. 1753 bool SampleProfileLoader::propagateThroughEdges(Function &F, 1754 bool UpdateBlockCount) { 1755 bool Changed = false; 1756 LLVM_DEBUG(dbgs() << "\nPropagation through edges\n"); 1757 for (const auto &BI : F) { 1758 const BasicBlock *BB = &BI; 1759 const BasicBlock *EC = EquivalenceClass[BB]; 1760 1761 // Visit all the predecessor and successor edges to determine 1762 // which ones have a weight assigned already. Note that it doesn't 1763 // matter that we only keep track of a single unknown edge. The 1764 // only case we are interested in handling is when only a single 1765 // edge is unknown (see setEdgeOrBlockWeight). 1766 for (unsigned i = 0; i < 2; i++) { 1767 uint64_t TotalWeight = 0; 1768 unsigned NumUnknownEdges = 0, NumTotalEdges = 0; 1769 Edge UnknownEdge, SelfReferentialEdge, SingleEdge; 1770 1771 if (i == 0) { 1772 // First, visit all predecessor edges. 1773 NumTotalEdges = Predecessors[BB].size(); 1774 for (auto *Pred : Predecessors[BB]) { 1775 Edge E = std::make_pair(Pred, BB); 1776 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); 1777 if (E.first == E.second) 1778 SelfReferentialEdge = E; 1779 } 1780 if (NumTotalEdges == 1) { 1781 SingleEdge = std::make_pair(Predecessors[BB][0], BB); 1782 } 1783 } else { 1784 // On the second round, visit all successor edges. 1785 NumTotalEdges = Successors[BB].size(); 1786 for (auto *Succ : Successors[BB]) { 1787 Edge E = std::make_pair(BB, Succ); 1788 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); 1789 } 1790 if (NumTotalEdges == 1) { 1791 SingleEdge = std::make_pair(BB, Successors[BB][0]); 1792 } 1793 } 1794 1795 // After visiting all the edges, there are three cases that we 1796 // can handle immediately: 1797 // 1798 // - All the edge weights are known (i.e., NumUnknownEdges == 0). 1799 // In this case, we simply check that the sum of all the edges 1800 // is the same as BB's weight. If not, we change BB's weight 1801 // to match. Additionally, if BB had not been visited before, 1802 // we mark it visited. 1803 // 1804 // - Only one edge is unknown and BB has already been visited. 1805 // In this case, we can compute the weight of the edge by 1806 // subtracting the total block weight from all the known 1807 // edge weights. If the edges weight more than BB, then the 1808 // edge of the last remaining edge is set to zero. 1809 // 1810 // - There exists a self-referential edge and the weight of BB is 1811 // known. In this case, this edge can be based on BB's weight. 1812 // We add up all the other known edges and set the weight on 1813 // the self-referential edge as we did in the previous case. 1814 // 1815 // In any other case, we must continue iterating. Eventually, 1816 // all edges will get a weight, or iteration will stop when 1817 // it reaches SampleProfileMaxPropagateIterations. 1818 if (NumUnknownEdges <= 1) { 1819 uint64_t &BBWeight = BlockWeights[EC]; 1820 if (NumUnknownEdges == 0) { 1821 if (!VisitedBlocks.count(EC)) { 1822 // If we already know the weight of all edges, the weight of the 1823 // basic block can be computed. It should be no larger than the sum 1824 // of all edge weights. 1825 if (TotalWeight > BBWeight) { 1826 BBWeight = TotalWeight; 1827 Changed = true; 1828 LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName() 1829 << " known. Set weight for block: "; 1830 printBlockWeight(dbgs(), BB);); 1831 } 1832 } else if (NumTotalEdges == 1 && 1833 EdgeWeights[SingleEdge] < BlockWeights[EC]) { 1834 // If there is only one edge for the visited basic block, use the 1835 // block weight to adjust edge weight if edge weight is smaller. 1836 EdgeWeights[SingleEdge] = BlockWeights[EC]; 1837 Changed = true; 1838 } 1839 } else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) { 1840 // If there is a single unknown edge and the block has been 1841 // visited, then we can compute E's weight. 1842 if (BBWeight >= TotalWeight) 1843 EdgeWeights[UnknownEdge] = BBWeight - TotalWeight; 1844 else 1845 EdgeWeights[UnknownEdge] = 0; 1846 const BasicBlock *OtherEC; 1847 if (i == 0) 1848 OtherEC = EquivalenceClass[UnknownEdge.first]; 1849 else 1850 OtherEC = EquivalenceClass[UnknownEdge.second]; 1851 // Edge weights should never exceed the BB weights it connects. 1852 if (VisitedBlocks.count(OtherEC) && 1853 EdgeWeights[UnknownEdge] > BlockWeights[OtherEC]) 1854 EdgeWeights[UnknownEdge] = BlockWeights[OtherEC]; 1855 VisitedEdges.insert(UnknownEdge); 1856 Changed = true; 1857 LLVM_DEBUG(dbgs() << "Set weight for edge: "; 1858 printEdgeWeight(dbgs(), UnknownEdge)); 1859 } 1860 } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) { 1861 // If a block Weights 0, all its in/out edges should weight 0. 1862 if (i == 0) { 1863 for (auto *Pred : Predecessors[BB]) { 1864 Edge E = std::make_pair(Pred, BB); 1865 EdgeWeights[E] = 0; 1866 VisitedEdges.insert(E); 1867 } 1868 } else { 1869 for (auto *Succ : Successors[BB]) { 1870 Edge E = std::make_pair(BB, Succ); 1871 EdgeWeights[E] = 0; 1872 VisitedEdges.insert(E); 1873 } 1874 } 1875 } else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) { 1876 uint64_t &BBWeight = BlockWeights[BB]; 1877 // We have a self-referential edge and the weight of BB is known. 1878 if (BBWeight >= TotalWeight) 1879 EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight; 1880 else 1881 EdgeWeights[SelfReferentialEdge] = 0; 1882 VisitedEdges.insert(SelfReferentialEdge); 1883 Changed = true; 1884 LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: "; 1885 printEdgeWeight(dbgs(), SelfReferentialEdge)); 1886 } 1887 if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) { 1888 BlockWeights[EC] = TotalWeight; 1889 VisitedBlocks.insert(EC); 1890 Changed = true; 1891 } 1892 } 1893 } 1894 1895 return Changed; 1896 } 1897 1898 /// Build in/out edge lists for each basic block in the CFG. 1899 /// 1900 /// We are interested in unique edges. If a block B1 has multiple 1901 /// edges to another block B2, we only add a single B1->B2 edge. 1902 void SampleProfileLoader::buildEdges(Function &F) { 1903 for (auto &BI : F) { 1904 BasicBlock *B1 = &BI; 1905 1906 // Add predecessors for B1. 1907 SmallPtrSet<BasicBlock *, 16> Visited; 1908 if (!Predecessors[B1].empty()) 1909 llvm_unreachable("Found a stale predecessors list in a basic block."); 1910 for (BasicBlock *B2 : predecessors(B1)) 1911 if (Visited.insert(B2).second) 1912 Predecessors[B1].push_back(B2); 1913 1914 // Add successors for B1. 1915 Visited.clear(); 1916 if (!Successors[B1].empty()) 1917 llvm_unreachable("Found a stale successors list in a basic block."); 1918 for (BasicBlock *B2 : successors(B1)) 1919 if (Visited.insert(B2).second) 1920 Successors[B1].push_back(B2); 1921 } 1922 } 1923 1924 /// Returns the sorted CallTargetMap \p M by count in descending order. 1925 static SmallVector<InstrProfValueData, 2> GetSortedValueDataFromCallTargets( 1926 const SampleRecord::CallTargetMap & M) { 1927 SmallVector<InstrProfValueData, 2> R; 1928 for (const auto &I : SampleRecord::SortCallTargets(M)) { 1929 R.emplace_back(InstrProfValueData{FunctionSamples::getGUID(I.first), I.second}); 1930 } 1931 return R; 1932 } 1933 1934 /// Propagate weights into edges 1935 /// 1936 /// The following rules are applied to every block BB in the CFG: 1937 /// 1938 /// - If BB has a single predecessor/successor, then the weight 1939 /// of that edge is the weight of the block. 1940 /// 1941 /// - If all incoming or outgoing edges are known except one, and the 1942 /// weight of the block is already known, the weight of the unknown 1943 /// edge will be the weight of the block minus the sum of all the known 1944 /// edges. If the sum of all the known edges is larger than BB's weight, 1945 /// we set the unknown edge weight to zero. 1946 /// 1947 /// - If there is a self-referential edge, and the weight of the block is 1948 /// known, the weight for that edge is set to the weight of the block 1949 /// minus the weight of the other incoming edges to that block (if 1950 /// known). 1951 void SampleProfileLoader::propagateWeights(Function &F) { 1952 bool Changed = true; 1953 unsigned I = 0; 1954 1955 // If BB weight is larger than its corresponding loop's header BB weight, 1956 // use the BB weight to replace the loop header BB weight. 1957 for (auto &BI : F) { 1958 BasicBlock *BB = &BI; 1959 Loop *L = LI->getLoopFor(BB); 1960 if (!L) { 1961 continue; 1962 } 1963 BasicBlock *Header = L->getHeader(); 1964 if (Header && BlockWeights[BB] > BlockWeights[Header]) { 1965 BlockWeights[Header] = BlockWeights[BB]; 1966 } 1967 } 1968 1969 // Before propagation starts, build, for each block, a list of 1970 // unique predecessors and successors. This is necessary to handle 1971 // identical edges in multiway branches. Since we visit all blocks and all 1972 // edges of the CFG, it is cleaner to build these lists once at the start 1973 // of the pass. 1974 buildEdges(F); 1975 1976 // Propagate until we converge or we go past the iteration limit. 1977 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1978 Changed = propagateThroughEdges(F, false); 1979 } 1980 1981 // The first propagation propagates BB counts from annotated BBs to unknown 1982 // BBs. The 2nd propagation pass resets edges weights, and use all BB weights 1983 // to propagate edge weights. 1984 VisitedEdges.clear(); 1985 Changed = true; 1986 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1987 Changed = propagateThroughEdges(F, false); 1988 } 1989 1990 // The 3rd propagation pass allows adjust annotated BB weights that are 1991 // obviously wrong. 1992 Changed = true; 1993 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1994 Changed = propagateThroughEdges(F, true); 1995 } 1996 1997 // Generate MD_prof metadata for every branch instruction using the 1998 // edge weights computed during propagation. 1999 LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n"); 2000 LLVMContext &Ctx = F.getContext(); 2001 MDBuilder MDB(Ctx); 2002 for (auto &BI : F) { 2003 BasicBlock *BB = &BI; 2004 2005 if (BlockWeights[BB]) { 2006 for (auto &I : BB->getInstList()) { 2007 if (!isa<CallInst>(I) && !isa<InvokeInst>(I)) 2008 continue; 2009 if (!cast<CallBase>(I).getCalledFunction()) { 2010 const DebugLoc &DLoc = I.getDebugLoc(); 2011 if (!DLoc) 2012 continue; 2013 const DILocation *DIL = DLoc; 2014 const FunctionSamples *FS = findFunctionSamples(I); 2015 if (!FS) 2016 continue; 2017 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL); 2018 auto T = FS->findCallTargetMapAt(CallSite); 2019 if (!T || T.get().empty()) 2020 continue; 2021 // Prorate the callsite counts to reflect what is already done to the 2022 // callsite, such as ICP or calliste cloning. 2023 if (FunctionSamples::ProfileIsProbeBased) { 2024 if (Optional<PseudoProbe> Probe = extractProbe(I)) { 2025 if (Probe->Factor < 1) 2026 T = SampleRecord::adjustCallTargets(T.get(), Probe->Factor); 2027 } 2028 } 2029 SmallVector<InstrProfValueData, 2> SortedCallTargets = 2030 GetSortedValueDataFromCallTargets(T.get()); 2031 uint64_t Sum; 2032 findIndirectCallFunctionSamples(I, Sum); 2033 annotateValueSite(*I.getParent()->getParent()->getParent(), I, 2034 SortedCallTargets, Sum, IPVK_IndirectCallTarget, 2035 SortedCallTargets.size()); 2036 } else if (!isa<IntrinsicInst>(&I)) { 2037 I.setMetadata(LLVMContext::MD_prof, 2038 MDB.createBranchWeights( 2039 {static_cast<uint32_t>(BlockWeights[BB])})); 2040 } 2041 } 2042 } 2043 Instruction *TI = BB->getTerminator(); 2044 if (TI->getNumSuccessors() == 1) 2045 continue; 2046 if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI)) 2047 continue; 2048 2049 DebugLoc BranchLoc = TI->getDebugLoc(); 2050 LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line " 2051 << ((BranchLoc) ? Twine(BranchLoc.getLine()) 2052 : Twine("<UNKNOWN LOCATION>")) 2053 << ".\n"); 2054 SmallVector<uint32_t, 4> Weights; 2055 uint32_t MaxWeight = 0; 2056 Instruction *MaxDestInst; 2057 for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) { 2058 BasicBlock *Succ = TI->getSuccessor(I); 2059 Edge E = std::make_pair(BB, Succ); 2060 uint64_t Weight = EdgeWeights[E]; 2061 LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E)); 2062 // Use uint32_t saturated arithmetic to adjust the incoming weights, 2063 // if needed. Sample counts in profiles are 64-bit unsigned values, 2064 // but internally branch weights are expressed as 32-bit values. 2065 if (Weight > std::numeric_limits<uint32_t>::max()) { 2066 LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)"); 2067 Weight = std::numeric_limits<uint32_t>::max(); 2068 } 2069 // Weight is added by one to avoid propagation errors introduced by 2070 // 0 weights. 2071 Weights.push_back(static_cast<uint32_t>(Weight + 1)); 2072 if (Weight != 0) { 2073 if (Weight > MaxWeight) { 2074 MaxWeight = Weight; 2075 MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime(); 2076 } 2077 } 2078 } 2079 2080 uint64_t TempWeight; 2081 // Only set weights if there is at least one non-zero weight. 2082 // In any other case, let the analyzer set weights. 2083 // Do not set weights if the weights are present. In ThinLTO, the profile 2084 // annotation is done twice. If the first annotation already set the 2085 // weights, the second pass does not need to set it. 2086 if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) { 2087 LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n"); 2088 TI->setMetadata(LLVMContext::MD_prof, 2089 MDB.createBranchWeights(Weights)); 2090 ORE->emit([&]() { 2091 return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst) 2092 << "most popular destination for conditional branches at " 2093 << ore::NV("CondBranchesLoc", BranchLoc); 2094 }); 2095 } else { 2096 LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n"); 2097 } 2098 } 2099 } 2100 2101 /// Get the line number for the function header. 2102 /// 2103 /// This looks up function \p F in the current compilation unit and 2104 /// retrieves the line number where the function is defined. This is 2105 /// line 0 for all the samples read from the profile file. Every line 2106 /// number is relative to this line. 2107 /// 2108 /// \param F Function object to query. 2109 /// 2110 /// \returns the line number where \p F is defined. If it returns 0, 2111 /// it means that there is no debug information available for \p F. 2112 unsigned SampleProfileLoader::getFunctionLoc(Function &F) { 2113 if (DISubprogram *S = F.getSubprogram()) 2114 return S->getLine(); 2115 2116 if (NoWarnSampleUnused) 2117 return 0; 2118 2119 // If the start of \p F is missing, emit a diagnostic to inform the user 2120 // about the missed opportunity. 2121 F.getContext().diagnose(DiagnosticInfoSampleProfile( 2122 "No debug information found in function " + F.getName() + 2123 ": Function profile not used", 2124 DS_Warning)); 2125 return 0; 2126 } 2127 2128 void SampleProfileLoader::computeDominanceAndLoopInfo(Function &F) { 2129 DT.reset(new DominatorTree); 2130 DT->recalculate(F); 2131 2132 PDT.reset(new PostDominatorTree(F)); 2133 2134 LI.reset(new LoopInfo); 2135 LI->analyze(*DT); 2136 } 2137 2138 /// Generate branch weight metadata for all branches in \p F. 2139 /// 2140 /// Branch weights are computed out of instruction samples using a 2141 /// propagation heuristic. Propagation proceeds in 3 phases: 2142 /// 2143 /// 1- Assignment of block weights. All the basic blocks in the function 2144 /// are initial assigned the same weight as their most frequently 2145 /// executed instruction. 2146 /// 2147 /// 2- Creation of equivalence classes. Since samples may be missing from 2148 /// blocks, we can fill in the gaps by setting the weights of all the 2149 /// blocks in the same equivalence class to the same weight. To compute 2150 /// the concept of equivalence, we use dominance and loop information. 2151 /// Two blocks B1 and B2 are in the same equivalence class if B1 2152 /// dominates B2, B2 post-dominates B1 and both are in the same loop. 2153 /// 2154 /// 3- Propagation of block weights into edges. This uses a simple 2155 /// propagation heuristic. The following rules are applied to every 2156 /// block BB in the CFG: 2157 /// 2158 /// - If BB has a single predecessor/successor, then the weight 2159 /// of that edge is the weight of the block. 2160 /// 2161 /// - If all the edges are known except one, and the weight of the 2162 /// block is already known, the weight of the unknown edge will 2163 /// be the weight of the block minus the sum of all the known 2164 /// edges. If the sum of all the known edges is larger than BB's weight, 2165 /// we set the unknown edge weight to zero. 2166 /// 2167 /// - If there is a self-referential edge, and the weight of the block is 2168 /// known, the weight for that edge is set to the weight of the block 2169 /// minus the weight of the other incoming edges to that block (if 2170 /// known). 2171 /// 2172 /// Since this propagation is not guaranteed to finalize for every CFG, we 2173 /// only allow it to proceed for a limited number of iterations (controlled 2174 /// by -sample-profile-max-propagate-iterations). 2175 /// 2176 /// FIXME: Try to replace this propagation heuristic with a scheme 2177 /// that is guaranteed to finalize. A work-list approach similar to 2178 /// the standard value propagation algorithm used by SSA-CCP might 2179 /// work here. 2180 /// 2181 /// Once all the branch weights are computed, we emit the MD_prof 2182 /// metadata on BB using the computed values for each of its branches. 2183 /// 2184 /// \param F The function to query. 2185 /// 2186 /// \returns true if \p F was modified. Returns false, otherwise. 2187 bool SampleProfileLoader::emitAnnotations(Function &F) { 2188 bool Changed = false; 2189 2190 if (FunctionSamples::ProfileIsProbeBased) { 2191 if (!ProbeManager->profileIsValid(F, *Samples)) { 2192 LLVM_DEBUG( 2193 dbgs() << "Profile is invalid due to CFG mismatch for Function " 2194 << F.getName()); 2195 ++NumMismatchedProfile; 2196 return false; 2197 } 2198 ++NumMatchedProfile; 2199 } else { 2200 if (getFunctionLoc(F) == 0) 2201 return false; 2202 2203 LLVM_DEBUG(dbgs() << "Line number for the first instruction in " 2204 << F.getName() << ": " << getFunctionLoc(F) << "\n"); 2205 } 2206 2207 DenseSet<GlobalValue::GUID> InlinedGUIDs; 2208 if (ProfileIsCS && CallsitePrioritizedInline) 2209 Changed |= inlineHotFunctionsWithPriority(F, InlinedGUIDs); 2210 else 2211 Changed |= inlineHotFunctions(F, InlinedGUIDs); 2212 2213 // Compute basic block weights. 2214 Changed |= computeBlockWeights(F); 2215 2216 if (Changed) { 2217 // Add an entry count to the function using the samples gathered at the 2218 // function entry. 2219 // Sets the GUIDs that are inlined in the profiled binary. This is used 2220 // for ThinLink to make correct liveness analysis, and also make the IR 2221 // match the profiled binary before annotation. 2222 F.setEntryCount( 2223 ProfileCount(Samples->getHeadSamples() + 1, Function::PCT_Real), 2224 &InlinedGUIDs); 2225 2226 // Compute dominance and loop info needed for propagation. 2227 computeDominanceAndLoopInfo(F); 2228 2229 // Find equivalence classes. 2230 findEquivalenceClasses(F); 2231 2232 // Propagate weights to all edges. 2233 propagateWeights(F); 2234 } 2235 2236 // If coverage checking was requested, compute it now. 2237 if (SampleProfileRecordCoverage) { 2238 unsigned Used = CoverageTracker.countUsedRecords(Samples, PSI); 2239 unsigned Total = CoverageTracker.countBodyRecords(Samples, PSI); 2240 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total); 2241 if (Coverage < SampleProfileRecordCoverage) { 2242 F.getContext().diagnose(DiagnosticInfoSampleProfile( 2243 F.getSubprogram()->getFilename(), getFunctionLoc(F), 2244 Twine(Used) + " of " + Twine(Total) + " available profile records (" + 2245 Twine(Coverage) + "%) were applied", 2246 DS_Warning)); 2247 } 2248 } 2249 2250 if (SampleProfileSampleCoverage) { 2251 uint64_t Used = CoverageTracker.getTotalUsedSamples(); 2252 uint64_t Total = CoverageTracker.countBodySamples(Samples, PSI); 2253 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total); 2254 if (Coverage < SampleProfileSampleCoverage) { 2255 F.getContext().diagnose(DiagnosticInfoSampleProfile( 2256 F.getSubprogram()->getFilename(), getFunctionLoc(F), 2257 Twine(Used) + " of " + Twine(Total) + " available profile samples (" + 2258 Twine(Coverage) + "%) were applied", 2259 DS_Warning)); 2260 } 2261 } 2262 return Changed; 2263 } 2264 2265 char SampleProfileLoaderLegacyPass::ID = 0; 2266 2267 INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile", 2268 "Sample Profile loader", false, false) 2269 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2270 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 2271 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2272 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 2273 INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile", 2274 "Sample Profile loader", false, false) 2275 2276 std::vector<Function *> 2277 SampleProfileLoader::buildFunctionOrder(Module &M, CallGraph *CG) { 2278 std::vector<Function *> FunctionOrderList; 2279 FunctionOrderList.reserve(M.size()); 2280 2281 if (!ProfileTopDownLoad || CG == nullptr) { 2282 if (ProfileMergeInlinee) { 2283 // Disable ProfileMergeInlinee if profile is not loaded in top down order, 2284 // because the profile for a function may be used for the profile 2285 // annotation of its outline copy before the profile merging of its 2286 // non-inlined inline instances, and that is not the way how 2287 // ProfileMergeInlinee is supposed to work. 2288 ProfileMergeInlinee = false; 2289 } 2290 2291 for (Function &F : M) 2292 if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile")) 2293 FunctionOrderList.push_back(&F); 2294 return FunctionOrderList; 2295 } 2296 2297 assert(&CG->getModule() == &M); 2298 scc_iterator<CallGraph *> CGI = scc_begin(CG); 2299 while (!CGI.isAtEnd()) { 2300 for (CallGraphNode *node : *CGI) { 2301 auto F = node->getFunction(); 2302 if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile")) 2303 FunctionOrderList.push_back(F); 2304 } 2305 ++CGI; 2306 } 2307 2308 std::reverse(FunctionOrderList.begin(), FunctionOrderList.end()); 2309 return FunctionOrderList; 2310 } 2311 2312 bool SampleProfileLoader::doInitialization(Module &M, 2313 FunctionAnalysisManager *FAM) { 2314 auto &Ctx = M.getContext(); 2315 2316 auto ReaderOrErr = 2317 SampleProfileReader::create(Filename, Ctx, RemappingFilename); 2318 if (std::error_code EC = ReaderOrErr.getError()) { 2319 std::string Msg = "Could not open profile: " + EC.message(); 2320 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 2321 return false; 2322 } 2323 Reader = std::move(ReaderOrErr.get()); 2324 Reader->setSkipFlatProf(LTOPhase == ThinOrFullLTOPhase::ThinLTOPostLink); 2325 Reader->collectFuncsFrom(M); 2326 if (std::error_code EC = Reader->read()) { 2327 std::string Msg = "profile reading failed: " + EC.message(); 2328 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 2329 return false; 2330 } 2331 2332 PSL = Reader->getProfileSymbolList(); 2333 2334 // While profile-sample-accurate is on, ignore symbol list. 2335 ProfAccForSymsInList = 2336 ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate; 2337 if (ProfAccForSymsInList) { 2338 NamesInProfile.clear(); 2339 if (auto NameTable = Reader->getNameTable()) 2340 NamesInProfile.insert(NameTable->begin(), NameTable->end()); 2341 CoverageTracker.setProfAccForSymsInList(true); 2342 } 2343 2344 if (FAM && !ProfileInlineReplayFile.empty()) { 2345 ExternalInlineAdvisor = std::make_unique<ReplayInlineAdvisor>( 2346 M, *FAM, Ctx, /*OriginalAdvisor=*/nullptr, ProfileInlineReplayFile, 2347 /*EmitRemarks=*/false); 2348 if (!ExternalInlineAdvisor->areReplayRemarksLoaded()) 2349 ExternalInlineAdvisor.reset(); 2350 } 2351 2352 // Apply tweaks if context-sensitive profile is available. 2353 if (Reader->profileIsCS()) { 2354 ProfileIsCS = true; 2355 FunctionSamples::ProfileIsCS = true; 2356 2357 // Enable priority-base inliner and size inline by default for CSSPGO. 2358 if (!ProfileSizeInline.getNumOccurrences()) 2359 ProfileSizeInline = true; 2360 if (!CallsitePrioritizedInline.getNumOccurrences()) 2361 CallsitePrioritizedInline = true; 2362 2363 // Tracker for profiles under different context 2364 ContextTracker = 2365 std::make_unique<SampleContextTracker>(Reader->getProfiles()); 2366 } 2367 2368 // Load pseudo probe descriptors for probe-based function samples. 2369 if (Reader->profileIsProbeBased()) { 2370 ProbeManager = std::make_unique<PseudoProbeManager>(M); 2371 if (!ProbeManager->moduleIsProbed(M)) { 2372 const char *Msg = 2373 "Pseudo-probe-based profile requires SampleProfileProbePass"; 2374 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 2375 return false; 2376 } 2377 } 2378 2379 return true; 2380 } 2381 2382 ModulePass *llvm::createSampleProfileLoaderPass() { 2383 return new SampleProfileLoaderLegacyPass(); 2384 } 2385 2386 ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) { 2387 return new SampleProfileLoaderLegacyPass(Name); 2388 } 2389 2390 bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM, 2391 ProfileSummaryInfo *_PSI, CallGraph *CG) { 2392 GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap); 2393 2394 PSI = _PSI; 2395 if (M.getProfileSummary(/* IsCS */ false) == nullptr) { 2396 M.setProfileSummary(Reader->getSummary().getMD(M.getContext()), 2397 ProfileSummary::PSK_Sample); 2398 PSI->refresh(); 2399 } 2400 // Compute the total number of samples collected in this profile. 2401 for (const auto &I : Reader->getProfiles()) 2402 TotalCollectedSamples += I.second.getTotalSamples(); 2403 2404 auto Remapper = Reader->getRemapper(); 2405 // Populate the symbol map. 2406 for (const auto &N_F : M.getValueSymbolTable()) { 2407 StringRef OrigName = N_F.getKey(); 2408 Function *F = dyn_cast<Function>(N_F.getValue()); 2409 if (F == nullptr) 2410 continue; 2411 SymbolMap[OrigName] = F; 2412 auto pos = OrigName.find('.'); 2413 if (pos != StringRef::npos) { 2414 StringRef NewName = OrigName.substr(0, pos); 2415 auto r = SymbolMap.insert(std::make_pair(NewName, F)); 2416 // Failiing to insert means there is already an entry in SymbolMap, 2417 // thus there are multiple functions that are mapped to the same 2418 // stripped name. In this case of name conflicting, set the value 2419 // to nullptr to avoid confusion. 2420 if (!r.second) 2421 r.first->second = nullptr; 2422 OrigName = NewName; 2423 } 2424 // Insert the remapped names into SymbolMap. 2425 if (Remapper) { 2426 if (auto MapName = Remapper->lookUpNameInProfile(OrigName)) { 2427 if (*MapName == OrigName) 2428 continue; 2429 SymbolMap.insert(std::make_pair(*MapName, F)); 2430 } 2431 } 2432 } 2433 2434 bool retval = false; 2435 for (auto F : buildFunctionOrder(M, CG)) { 2436 assert(!F->isDeclaration()); 2437 clearFunctionData(); 2438 retval |= runOnFunction(*F, AM); 2439 } 2440 2441 // Account for cold calls not inlined.... 2442 if (!ProfileIsCS) 2443 for (const std::pair<Function *, NotInlinedProfileInfo> &pair : 2444 notInlinedCallInfo) 2445 updateProfileCallee(pair.first, pair.second.entryCount); 2446 2447 return retval; 2448 } 2449 2450 bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) { 2451 ACT = &getAnalysis<AssumptionCacheTracker>(); 2452 TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>(); 2453 TLIWP = &getAnalysis<TargetLibraryInfoWrapperPass>(); 2454 ProfileSummaryInfo *PSI = 2455 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2456 return SampleLoader.runOnModule(M, nullptr, PSI, nullptr); 2457 } 2458 2459 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) { 2460 DILocation2SampleMap.clear(); 2461 // By default the entry count is initialized to -1, which will be treated 2462 // conservatively by getEntryCount as the same as unknown (None). This is 2463 // to avoid newly added code to be treated as cold. If we have samples 2464 // this will be overwritten in emitAnnotations. 2465 uint64_t initialEntryCount = -1; 2466 2467 ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL; 2468 if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) { 2469 // initialize all the function entry counts to 0. It means all the 2470 // functions without profile will be regarded as cold. 2471 initialEntryCount = 0; 2472 // profile-sample-accurate is a user assertion which has a higher precedence 2473 // than symbol list. When profile-sample-accurate is on, ignore symbol list. 2474 ProfAccForSymsInList = false; 2475 } 2476 CoverageTracker.setProfAccForSymsInList(ProfAccForSymsInList); 2477 2478 // PSL -- profile symbol list include all the symbols in sampled binary. 2479 // If ProfileAccurateForSymsInList is enabled, PSL is used to treat 2480 // old functions without samples being cold, without having to worry 2481 // about new and hot functions being mistakenly treated as cold. 2482 if (ProfAccForSymsInList) { 2483 // Initialize the entry count to 0 for functions in the list. 2484 if (PSL->contains(F.getName())) 2485 initialEntryCount = 0; 2486 2487 // Function in the symbol list but without sample will be regarded as 2488 // cold. To minimize the potential negative performance impact it could 2489 // have, we want to be a little conservative here saying if a function 2490 // shows up in the profile, no matter as outline function, inline instance 2491 // or call targets, treat the function as not being cold. This will handle 2492 // the cases such as most callsites of a function are inlined in sampled 2493 // binary but not inlined in current build (because of source code drift, 2494 // imprecise debug information, or the callsites are all cold individually 2495 // but not cold accumulatively...), so the outline function showing up as 2496 // cold in sampled binary will actually not be cold after current build. 2497 StringRef CanonName = FunctionSamples::getCanonicalFnName(F); 2498 if (NamesInProfile.count(CanonName)) 2499 initialEntryCount = -1; 2500 } 2501 2502 // Initialize entry count when the function has no existing entry 2503 // count value. 2504 if (!F.getEntryCount().hasValue()) 2505 F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real)); 2506 std::unique_ptr<OptimizationRemarkEmitter> OwnedORE; 2507 if (AM) { 2508 auto &FAM = 2509 AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent()) 2510 .getManager(); 2511 ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 2512 } else { 2513 OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F); 2514 ORE = OwnedORE.get(); 2515 } 2516 2517 if (ProfileIsCS) 2518 Samples = ContextTracker->getBaseSamplesFor(F); 2519 else 2520 Samples = Reader->getSamplesFor(F); 2521 2522 if (Samples && !Samples->empty()) 2523 return emitAnnotations(F); 2524 return false; 2525 } 2526 2527 PreservedAnalyses SampleProfileLoaderPass::run(Module &M, 2528 ModuleAnalysisManager &AM) { 2529 FunctionAnalysisManager &FAM = 2530 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 2531 2532 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 2533 return FAM.getResult<AssumptionAnalysis>(F); 2534 }; 2535 auto GetTTI = [&](Function &F) -> TargetTransformInfo & { 2536 return FAM.getResult<TargetIRAnalysis>(F); 2537 }; 2538 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 2539 return FAM.getResult<TargetLibraryAnalysis>(F); 2540 }; 2541 2542 SampleProfileLoader SampleLoader( 2543 ProfileFileName.empty() ? SampleProfileFile : ProfileFileName, 2544 ProfileRemappingFileName.empty() ? SampleProfileRemappingFile 2545 : ProfileRemappingFileName, 2546 LTOPhase, GetAssumptionCache, GetTTI, GetTLI); 2547 2548 if (!SampleLoader.doInitialization(M, &FAM)) 2549 return PreservedAnalyses::all(); 2550 2551 ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M); 2552 CallGraph &CG = AM.getResult<CallGraphAnalysis>(M); 2553 if (!SampleLoader.runOnModule(M, &AM, PSI, &CG)) 2554 return PreservedAnalyses::all(); 2555 2556 return PreservedAnalyses::none(); 2557 } 2558