1 //===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the SampleProfileLoader transformation. This pass 10 // reads a profile file generated by a sampling profiler (e.g. Linux Perf - 11 // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the 12 // profile information in the given profile. 13 // 14 // This pass generates branch weight annotations on the IR: 15 // 16 // - prof: Represents branch weights. This annotation is added to branches 17 // to indicate the weights of each edge coming out of the branch. 18 // The weight of each edge is the weight of the target block for 19 // that edge. The weight of a block B is computed as the maximum 20 // number of samples found in B. 21 // 22 //===----------------------------------------------------------------------===// 23 24 #include "llvm/Transforms/IPO/SampleProfile.h" 25 #include "llvm/ADT/ArrayRef.h" 26 #include "llvm/ADT/DenseMap.h" 27 #include "llvm/ADT/DenseSet.h" 28 #include "llvm/ADT/None.h" 29 #include "llvm/ADT/PriorityQueue.h" 30 #include "llvm/ADT/SCCIterator.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringMap.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/Analysis/AssumptionCache.h" 39 #include "llvm/Analysis/CallGraph.h" 40 #include "llvm/Analysis/CallGraphSCCPass.h" 41 #include "llvm/Analysis/InlineAdvisor.h" 42 #include "llvm/Analysis/InlineCost.h" 43 #include "llvm/Analysis/LoopInfo.h" 44 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 45 #include "llvm/Analysis/PostDominators.h" 46 #include "llvm/Analysis/ProfileSummaryInfo.h" 47 #include "llvm/Analysis/ReplayInlineAdvisor.h" 48 #include "llvm/Analysis/TargetLibraryInfo.h" 49 #include "llvm/Analysis/TargetTransformInfo.h" 50 #include "llvm/IR/BasicBlock.h" 51 #include "llvm/IR/CFG.h" 52 #include "llvm/IR/DebugInfoMetadata.h" 53 #include "llvm/IR/DebugLoc.h" 54 #include "llvm/IR/DiagnosticInfo.h" 55 #include "llvm/IR/Dominators.h" 56 #include "llvm/IR/Function.h" 57 #include "llvm/IR/GlobalValue.h" 58 #include "llvm/IR/InstrTypes.h" 59 #include "llvm/IR/Instruction.h" 60 #include "llvm/IR/Instructions.h" 61 #include "llvm/IR/IntrinsicInst.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/MDBuilder.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/PassManager.h" 66 #include "llvm/IR/ValueSymbolTable.h" 67 #include "llvm/InitializePasses.h" 68 #include "llvm/Pass.h" 69 #include "llvm/ProfileData/InstrProf.h" 70 #include "llvm/ProfileData/SampleProf.h" 71 #include "llvm/ProfileData/SampleProfReader.h" 72 #include "llvm/Support/Casting.h" 73 #include "llvm/Support/CommandLine.h" 74 #include "llvm/Support/Debug.h" 75 #include "llvm/Support/ErrorHandling.h" 76 #include "llvm/Support/ErrorOr.h" 77 #include "llvm/Support/GenericDomTree.h" 78 #include "llvm/Support/raw_ostream.h" 79 #include "llvm/Transforms/IPO.h" 80 #include "llvm/Transforms/IPO/SampleContextTracker.h" 81 #include "llvm/Transforms/IPO/SampleProfileProbe.h" 82 #include "llvm/Transforms/Instrumentation.h" 83 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 84 #include "llvm/Transforms/Utils/Cloning.h" 85 #include <algorithm> 86 #include <cassert> 87 #include <cstdint> 88 #include <functional> 89 #include <limits> 90 #include <map> 91 #include <memory> 92 #include <queue> 93 #include <string> 94 #include <system_error> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 using namespace sampleprof; 100 using ProfileCount = Function::ProfileCount; 101 #define DEBUG_TYPE "sample-profile" 102 #define CSINLINE_DEBUG DEBUG_TYPE "-inline" 103 104 STATISTIC(NumCSInlined, 105 "Number of functions inlined with context sensitive profile"); 106 STATISTIC(NumCSNotInlined, 107 "Number of functions not inlined with context sensitive profile"); 108 STATISTIC(NumMismatchedProfile, 109 "Number of functions with CFG mismatched profile"); 110 STATISTIC(NumMatchedProfile, "Number of functions with CFG matched profile"); 111 STATISTIC(NumDuplicatedInlinesite, 112 "Number of inlined callsites with a partial distribution factor"); 113 114 STATISTIC(NumCSInlinedHitMinLimit, 115 "Number of functions with FDO inline stopped due to min size limit"); 116 STATISTIC(NumCSInlinedHitMaxLimit, 117 "Number of functions with FDO inline stopped due to max size limit"); 118 STATISTIC( 119 NumCSInlinedHitGrowthLimit, 120 "Number of functions with FDO inline stopped due to growth size limit"); 121 122 // Command line option to specify the file to read samples from. This is 123 // mainly used for debugging. 124 static cl::opt<std::string> SampleProfileFile( 125 "sample-profile-file", cl::init(""), cl::value_desc("filename"), 126 cl::desc("Profile file loaded by -sample-profile"), cl::Hidden); 127 128 // The named file contains a set of transformations that may have been applied 129 // to the symbol names between the program from which the sample data was 130 // collected and the current program's symbols. 131 static cl::opt<std::string> SampleProfileRemappingFile( 132 "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"), 133 cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden); 134 135 static cl::opt<unsigned> SampleProfileMaxPropagateIterations( 136 "sample-profile-max-propagate-iterations", cl::init(100), 137 cl::desc("Maximum number of iterations to go through when propagating " 138 "sample block/edge weights through the CFG.")); 139 140 static cl::opt<unsigned> SampleProfileRecordCoverage( 141 "sample-profile-check-record-coverage", cl::init(0), cl::value_desc("N"), 142 cl::desc("Emit a warning if less than N% of records in the input profile " 143 "are matched to the IR.")); 144 145 static cl::opt<unsigned> SampleProfileSampleCoverage( 146 "sample-profile-check-sample-coverage", cl::init(0), cl::value_desc("N"), 147 cl::desc("Emit a warning if less than N% of samples in the input profile " 148 "are matched to the IR.")); 149 150 static cl::opt<bool> NoWarnSampleUnused( 151 "no-warn-sample-unused", cl::init(false), cl::Hidden, 152 cl::desc("Use this option to turn off/on warnings about function with " 153 "samples but without debug information to use those samples. ")); 154 155 static cl::opt<bool> ProfileSampleAccurate( 156 "profile-sample-accurate", cl::Hidden, cl::init(false), 157 cl::desc("If the sample profile is accurate, we will mark all un-sampled " 158 "callsite and function as having 0 samples. Otherwise, treat " 159 "un-sampled callsites and functions conservatively as unknown. ")); 160 161 static cl::opt<bool> ProfileAccurateForSymsInList( 162 "profile-accurate-for-symsinlist", cl::Hidden, cl::ZeroOrMore, 163 cl::init(true), 164 cl::desc("For symbols in profile symbol list, regard their profiles to " 165 "be accurate. It may be overriden by profile-sample-accurate. ")); 166 167 static cl::opt<bool> ProfileMergeInlinee( 168 "sample-profile-merge-inlinee", cl::Hidden, cl::init(true), 169 cl::desc("Merge past inlinee's profile to outline version if sample " 170 "profile loader decided not to inline a call site. It will " 171 "only be enabled when top-down order of profile loading is " 172 "enabled. ")); 173 174 static cl::opt<bool> ProfileTopDownLoad( 175 "sample-profile-top-down-load", cl::Hidden, cl::init(true), 176 cl::desc("Do profile annotation and inlining for functions in top-down " 177 "order of call graph during sample profile loading. It only " 178 "works for new pass manager. ")); 179 180 static cl::opt<bool> ProfileSizeInline( 181 "sample-profile-inline-size", cl::Hidden, cl::init(false), 182 cl::desc("Inline cold call sites in profile loader if it's beneficial " 183 "for code size.")); 184 185 static cl::opt<int> ProfileInlineGrowthLimit( 186 "sample-profile-inline-growth-limit", cl::Hidden, cl::init(12), 187 cl::desc("The size growth ratio limit for proirity-based sample profile " 188 "loader inlining.")); 189 190 static cl::opt<int> ProfileInlineLimitMin( 191 "sample-profile-inline-limit-min", cl::Hidden, cl::init(100), 192 cl::desc("The lower bound of size growth limit for " 193 "proirity-based sample profile loader inlining.")); 194 195 static cl::opt<int> ProfileInlineLimitMax( 196 "sample-profile-inline-limit-max", cl::Hidden, cl::init(10000), 197 cl::desc("The upper bound of size growth limit for " 198 "proirity-based sample profile loader inlining.")); 199 200 static cl::opt<int> ProfileICPThreshold( 201 "sample-profile-icp-threshold", cl::Hidden, cl::init(5), 202 cl::desc( 203 "Relative hotness threshold for indirect " 204 "call promotion in proirity-based sample profile loader inlining.")); 205 206 static cl::opt<int> SampleHotCallSiteThreshold( 207 "sample-profile-hot-inline-threshold", cl::Hidden, cl::init(3000), 208 cl::desc("Hot callsite threshold for proirity-based sample profile loader " 209 "inlining.")); 210 211 static cl::opt<bool> CallsitePrioritizedInline( 212 "sample-profile-prioritized-inline", cl::Hidden, cl::ZeroOrMore, 213 cl::init(false), 214 cl::desc("Use call site prioritized inlining for sample profile loader." 215 "Currently only CSSPGO is supported.")); 216 217 static cl::opt<int> SampleColdCallSiteThreshold( 218 "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45), 219 cl::desc("Threshold for inlining cold callsites")); 220 221 static cl::opt<std::string> ProfileInlineReplayFile( 222 "sample-profile-inline-replay", cl::init(""), cl::value_desc("filename"), 223 cl::desc( 224 "Optimization remarks file containing inline remarks to be replayed " 225 "by inlining from sample profile loader."), 226 cl::Hidden); 227 228 namespace { 229 230 using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>; 231 using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>; 232 using Edge = std::pair<const BasicBlock *, const BasicBlock *>; 233 using EdgeWeightMap = DenseMap<Edge, uint64_t>; 234 using BlockEdgeMap = 235 DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>; 236 237 class SampleProfileLoader; 238 239 class SampleCoverageTracker { 240 public: 241 bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset, 242 uint32_t Discriminator, uint64_t Samples); 243 unsigned computeCoverage(unsigned Used, unsigned Total) const; 244 unsigned countUsedRecords(const FunctionSamples *FS, 245 ProfileSummaryInfo *PSI) const; 246 unsigned countBodyRecords(const FunctionSamples *FS, 247 ProfileSummaryInfo *PSI) const; 248 uint64_t getTotalUsedSamples() const { return TotalUsedSamples; } 249 uint64_t countBodySamples(const FunctionSamples *FS, 250 ProfileSummaryInfo *PSI) const; 251 252 void clear() { 253 SampleCoverage.clear(); 254 TotalUsedSamples = 0; 255 } 256 inline void setProfAccForSymsInList(bool V) { ProfAccForSymsInList = V; } 257 258 private: 259 using BodySampleCoverageMap = std::map<LineLocation, unsigned>; 260 using FunctionSamplesCoverageMap = 261 DenseMap<const FunctionSamples *, BodySampleCoverageMap>; 262 263 /// Coverage map for sampling records. 264 /// 265 /// This map keeps a record of sampling records that have been matched to 266 /// an IR instruction. This is used to detect some form of staleness in 267 /// profiles (see flag -sample-profile-check-coverage). 268 /// 269 /// Each entry in the map corresponds to a FunctionSamples instance. This is 270 /// another map that counts how many times the sample record at the 271 /// given location has been used. 272 FunctionSamplesCoverageMap SampleCoverage; 273 274 /// Number of samples used from the profile. 275 /// 276 /// When a sampling record is used for the first time, the samples from 277 /// that record are added to this accumulator. Coverage is later computed 278 /// based on the total number of samples available in this function and 279 /// its callsites. 280 /// 281 /// Note that this accumulator tracks samples used from a single function 282 /// and all the inlined callsites. Strictly, we should have a map of counters 283 /// keyed by FunctionSamples pointers, but these stats are cleared after 284 /// every function, so we just need to keep a single counter. 285 uint64_t TotalUsedSamples = 0; 286 287 // For symbol in profile symbol list, whether to regard their profiles 288 // to be accurate. This is passed from the SampleLoader instance. 289 bool ProfAccForSymsInList = false; 290 }; 291 292 class GUIDToFuncNameMapper { 293 public: 294 GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader, 295 DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap) 296 : CurrentReader(Reader), CurrentModule(M), 297 CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) { 298 if (!CurrentReader.useMD5()) 299 return; 300 301 for (const auto &F : CurrentModule) { 302 StringRef OrigName = F.getName(); 303 CurrentGUIDToFuncNameMap.insert( 304 {Function::getGUID(OrigName), OrigName}); 305 306 // Local to global var promotion used by optimization like thinlto 307 // will rename the var and add suffix like ".llvm.xxx" to the 308 // original local name. In sample profile, the suffixes of function 309 // names are all stripped. Since it is possible that the mapper is 310 // built in post-thin-link phase and var promotion has been done, 311 // we need to add the substring of function name without the suffix 312 // into the GUIDToFuncNameMap. 313 StringRef CanonName = FunctionSamples::getCanonicalFnName(F); 314 if (CanonName != OrigName) 315 CurrentGUIDToFuncNameMap.insert( 316 {Function::getGUID(CanonName), CanonName}); 317 } 318 319 // Update GUIDToFuncNameMap for each function including inlinees. 320 SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap); 321 } 322 323 ~GUIDToFuncNameMapper() { 324 if (!CurrentReader.useMD5()) 325 return; 326 327 CurrentGUIDToFuncNameMap.clear(); 328 329 // Reset GUIDToFuncNameMap for of each function as they're no 330 // longer valid at this point. 331 SetGUIDToFuncNameMapForAll(nullptr); 332 } 333 334 private: 335 void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) { 336 std::queue<FunctionSamples *> FSToUpdate; 337 for (auto &IFS : CurrentReader.getProfiles()) { 338 FSToUpdate.push(&IFS.second); 339 } 340 341 while (!FSToUpdate.empty()) { 342 FunctionSamples *FS = FSToUpdate.front(); 343 FSToUpdate.pop(); 344 FS->GUIDToFuncNameMap = Map; 345 for (const auto &ICS : FS->getCallsiteSamples()) { 346 const FunctionSamplesMap &FSMap = ICS.second; 347 for (auto &IFS : FSMap) { 348 FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second); 349 FSToUpdate.push(&FS); 350 } 351 } 352 } 353 } 354 355 SampleProfileReader &CurrentReader; 356 Module &CurrentModule; 357 DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap; 358 }; 359 360 // Inline candidate used by iterative callsite prioritized inliner 361 struct InlineCandidate { 362 CallBase *CallInstr; 363 const FunctionSamples *CalleeSamples; 364 // Prorated callsite count, which will be used to guide inlining. For example, 365 // if a callsite is duplicated in LTO prelink, then in LTO postlink the two 366 // copies will get their own distribution factors and their prorated counts 367 // will be used to decide if they should be inlined independently. 368 uint64_t CallsiteCount; 369 // Call site distribution factor to prorate the profile samples for a 370 // duplicated callsite. Default value is 1.0. 371 float CallsiteDistribution; 372 }; 373 374 // Inline candidate comparer using call site weight 375 struct CandidateComparer { 376 bool operator()(const InlineCandidate &LHS, const InlineCandidate &RHS) { 377 if (LHS.CallsiteCount != RHS.CallsiteCount) 378 return LHS.CallsiteCount < RHS.CallsiteCount; 379 380 // Tie breaker using GUID so we have stable/deterministic inlining order 381 assert(LHS.CalleeSamples && RHS.CalleeSamples && 382 "Expect non-null FunctionSamples"); 383 return LHS.CalleeSamples->getGUID(LHS.CalleeSamples->getName()) < 384 RHS.CalleeSamples->getGUID(RHS.CalleeSamples->getName()); 385 } 386 }; 387 388 using CandidateQueue = 389 PriorityQueue<InlineCandidate, std::vector<InlineCandidate>, 390 CandidateComparer>; 391 392 /// Sample profile pass. 393 /// 394 /// This pass reads profile data from the file specified by 395 /// -sample-profile-file and annotates every affected function with the 396 /// profile information found in that file. 397 class SampleProfileLoader { 398 public: 399 SampleProfileLoader( 400 StringRef Name, StringRef RemapName, ThinOrFullLTOPhase LTOPhase, 401 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 402 std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo, 403 std::function<const TargetLibraryInfo &(Function &)> GetTLI) 404 : GetAC(std::move(GetAssumptionCache)), 405 GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)), 406 Filename(std::string(Name)), RemappingFilename(std::string(RemapName)), 407 LTOPhase(LTOPhase) {} 408 409 bool doInitialization(Module &M, FunctionAnalysisManager *FAM = nullptr); 410 bool runOnModule(Module &M, ModuleAnalysisManager *AM, 411 ProfileSummaryInfo *_PSI, CallGraph *CG); 412 413 void dump() { Reader->dump(); } 414 415 protected: 416 friend class SampleCoverageTracker; 417 418 bool runOnFunction(Function &F, ModuleAnalysisManager *AM); 419 unsigned getFunctionLoc(Function &F); 420 bool emitAnnotations(Function &F); 421 ErrorOr<uint64_t> getInstWeight(const Instruction &I); 422 ErrorOr<uint64_t> getProbeWeight(const Instruction &I); 423 ErrorOr<uint64_t> getBlockWeight(const BasicBlock *BB); 424 const FunctionSamples *findCalleeFunctionSamples(const CallBase &I) const; 425 std::vector<const FunctionSamples *> 426 findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const; 427 mutable DenseMap<const DILocation *, const FunctionSamples *> DILocation2SampleMap; 428 const FunctionSamples *findFunctionSamples(const Instruction &I) const; 429 // Attempt to promote indirect call and also inline the promoted call 430 bool tryPromoteAndInlineCandidate( 431 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin, 432 uint64_t &Sum, DenseSet<Instruction *> &PromotedInsns, 433 SmallVector<CallBase *, 8> *InlinedCallSites = nullptr); 434 bool inlineHotFunctions(Function &F, 435 DenseSet<GlobalValue::GUID> &InlinedGUIDs); 436 InlineCost shouldInlineCandidate(InlineCandidate &Candidate); 437 bool getInlineCandidate(InlineCandidate *NewCandidate, CallBase *CB); 438 bool 439 tryInlineCandidate(InlineCandidate &Candidate, 440 SmallVector<CallBase *, 8> *InlinedCallSites = nullptr); 441 bool 442 inlineHotFunctionsWithPriority(Function &F, 443 DenseSet<GlobalValue::GUID> &InlinedGUIDs); 444 // Inline cold/small functions in addition to hot ones 445 bool shouldInlineColdCallee(CallBase &CallInst); 446 void emitOptimizationRemarksForInlineCandidates( 447 const SmallVectorImpl<CallBase *> &Candidates, const Function &F, 448 bool Hot); 449 void printEdgeWeight(raw_ostream &OS, Edge E); 450 void printBlockWeight(raw_ostream &OS, const BasicBlock *BB) const; 451 void printBlockEquivalence(raw_ostream &OS, const BasicBlock *BB); 452 bool computeBlockWeights(Function &F); 453 void findEquivalenceClasses(Function &F); 454 template <bool IsPostDom> 455 void findEquivalencesFor(BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants, 456 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree); 457 458 void propagateWeights(Function &F); 459 uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge); 460 void buildEdges(Function &F); 461 std::vector<Function *> buildFunctionOrder(Module &M, CallGraph *CG); 462 bool propagateThroughEdges(Function &F, bool UpdateBlockCount); 463 void computeDominanceAndLoopInfo(Function &F); 464 void clearFunctionData(); 465 466 /// Map basic blocks to their computed weights. 467 /// 468 /// The weight of a basic block is defined to be the maximum 469 /// of all the instruction weights in that block. 470 BlockWeightMap BlockWeights; 471 472 /// Map edges to their computed weights. 473 /// 474 /// Edge weights are computed by propagating basic block weights in 475 /// SampleProfile::propagateWeights. 476 EdgeWeightMap EdgeWeights; 477 478 /// Set of visited blocks during propagation. 479 SmallPtrSet<const BasicBlock *, 32> VisitedBlocks; 480 481 /// Set of visited edges during propagation. 482 SmallSet<Edge, 32> VisitedEdges; 483 484 /// Equivalence classes for block weights. 485 /// 486 /// Two blocks BB1 and BB2 are in the same equivalence class if they 487 /// dominate and post-dominate each other, and they are in the same loop 488 /// nest. When this happens, the two blocks are guaranteed to execute 489 /// the same number of times. 490 EquivalenceClassMap EquivalenceClass; 491 492 /// Map from function name to Function *. Used to find the function from 493 /// the function name. If the function name contains suffix, additional 494 /// entry is added to map from the stripped name to the function if there 495 /// is one-to-one mapping. 496 StringMap<Function *> SymbolMap; 497 498 /// Dominance, post-dominance and loop information. 499 std::unique_ptr<DominatorTree> DT; 500 std::unique_ptr<PostDominatorTree> PDT; 501 std::unique_ptr<LoopInfo> LI; 502 503 std::function<AssumptionCache &(Function &)> GetAC; 504 std::function<TargetTransformInfo &(Function &)> GetTTI; 505 std::function<const TargetLibraryInfo &(Function &)> GetTLI; 506 507 /// Predecessors for each basic block in the CFG. 508 BlockEdgeMap Predecessors; 509 510 /// Successors for each basic block in the CFG. 511 BlockEdgeMap Successors; 512 513 SampleCoverageTracker CoverageTracker; 514 515 /// Profile reader object. 516 std::unique_ptr<SampleProfileReader> Reader; 517 518 /// Profile tracker for different context. 519 std::unique_ptr<SampleContextTracker> ContextTracker; 520 521 /// Samples collected for the body of this function. 522 FunctionSamples *Samples = nullptr; 523 524 /// Name of the profile file to load. 525 std::string Filename; 526 527 /// Name of the profile remapping file to load. 528 std::string RemappingFilename; 529 530 /// Flag indicating whether the profile input loaded successfully. 531 bool ProfileIsValid = false; 532 533 /// Flag indicating whether input profile is context-sensitive 534 bool ProfileIsCS = false; 535 536 /// Flag indicating which LTO/ThinLTO phase the pass is invoked in. 537 /// 538 /// We need to know the LTO phase because for example in ThinLTOPrelink 539 /// phase, in annotation, we should not promote indirect calls. Instead, 540 /// we will mark GUIDs that needs to be annotated to the function. 541 ThinOrFullLTOPhase LTOPhase; 542 543 /// Profile Summary Info computed from sample profile. 544 ProfileSummaryInfo *PSI = nullptr; 545 546 /// Profle Symbol list tells whether a function name appears in the binary 547 /// used to generate the current profile. 548 std::unique_ptr<ProfileSymbolList> PSL; 549 550 /// Total number of samples collected in this profile. 551 /// 552 /// This is the sum of all the samples collected in all the functions executed 553 /// at runtime. 554 uint64_t TotalCollectedSamples = 0; 555 556 /// Optimization Remark Emitter used to emit diagnostic remarks. 557 OptimizationRemarkEmitter *ORE = nullptr; 558 559 // Information recorded when we declined to inline a call site 560 // because we have determined it is too cold is accumulated for 561 // each callee function. Initially this is just the entry count. 562 struct NotInlinedProfileInfo { 563 uint64_t entryCount; 564 }; 565 DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo; 566 567 // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for 568 // all the function symbols defined or declared in current module. 569 DenseMap<uint64_t, StringRef> GUIDToFuncNameMap; 570 571 // All the Names used in FunctionSamples including outline function 572 // names, inline instance names and call target names. 573 StringSet<> NamesInProfile; 574 575 // For symbol in profile symbol list, whether to regard their profiles 576 // to be accurate. It is mainly decided by existance of profile symbol 577 // list and -profile-accurate-for-symsinlist flag, but it can be 578 // overriden by -profile-sample-accurate or profile-sample-accurate 579 // attribute. 580 bool ProfAccForSymsInList; 581 582 // External inline advisor used to replay inline decision from remarks. 583 std::unique_ptr<ReplayInlineAdvisor> ExternalInlineAdvisor; 584 585 // A pseudo probe helper to correlate the imported sample counts. 586 std::unique_ptr<PseudoProbeManager> ProbeManager; 587 }; 588 589 class SampleProfileLoaderLegacyPass : public ModulePass { 590 public: 591 // Class identification, replacement for typeinfo 592 static char ID; 593 594 SampleProfileLoaderLegacyPass( 595 StringRef Name = SampleProfileFile, 596 ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None) 597 : ModulePass(ID), SampleLoader( 598 Name, SampleProfileRemappingFile, LTOPhase, 599 [&](Function &F) -> AssumptionCache & { 600 return ACT->getAssumptionCache(F); 601 }, 602 [&](Function &F) -> TargetTransformInfo & { 603 return TTIWP->getTTI(F); 604 }, 605 [&](Function &F) -> TargetLibraryInfo & { 606 return TLIWP->getTLI(F); 607 }) { 608 initializeSampleProfileLoaderLegacyPassPass( 609 *PassRegistry::getPassRegistry()); 610 } 611 612 void dump() { SampleLoader.dump(); } 613 614 bool doInitialization(Module &M) override { 615 return SampleLoader.doInitialization(M); 616 } 617 618 StringRef getPassName() const override { return "Sample profile pass"; } 619 bool runOnModule(Module &M) override; 620 621 void getAnalysisUsage(AnalysisUsage &AU) const override { 622 AU.addRequired<AssumptionCacheTracker>(); 623 AU.addRequired<TargetTransformInfoWrapperPass>(); 624 AU.addRequired<TargetLibraryInfoWrapperPass>(); 625 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 626 } 627 628 private: 629 SampleProfileLoader SampleLoader; 630 AssumptionCacheTracker *ACT = nullptr; 631 TargetTransformInfoWrapperPass *TTIWP = nullptr; 632 TargetLibraryInfoWrapperPass *TLIWP = nullptr; 633 }; 634 635 } // end anonymous namespace 636 637 /// Return true if the given callsite is hot wrt to hot cutoff threshold. 638 /// 639 /// Functions that were inlined in the original binary will be represented 640 /// in the inline stack in the sample profile. If the profile shows that 641 /// the original inline decision was "good" (i.e., the callsite is executed 642 /// frequently), then we will recreate the inline decision and apply the 643 /// profile from the inlined callsite. 644 /// 645 /// To decide whether an inlined callsite is hot, we compare the callsite 646 /// sample count with the hot cutoff computed by ProfileSummaryInfo, it is 647 /// regarded as hot if the count is above the cutoff value. 648 /// 649 /// When ProfileAccurateForSymsInList is enabled and profile symbol list 650 /// is present, functions in the profile symbol list but without profile will 651 /// be regarded as cold and much less inlining will happen in CGSCC inlining 652 /// pass, so we tend to lower the hot criteria here to allow more early 653 /// inlining to happen for warm callsites and it is helpful for performance. 654 static bool callsiteIsHot(const FunctionSamples *CallsiteFS, 655 ProfileSummaryInfo *PSI, bool ProfAccForSymsInList) { 656 if (!CallsiteFS) 657 return false; // The callsite was not inlined in the original binary. 658 659 assert(PSI && "PSI is expected to be non null"); 660 uint64_t CallsiteTotalSamples = CallsiteFS->getTotalSamples(); 661 if (ProfAccForSymsInList) 662 return !PSI->isColdCount(CallsiteTotalSamples); 663 else 664 return PSI->isHotCount(CallsiteTotalSamples); 665 } 666 667 /// Mark as used the sample record for the given function samples at 668 /// (LineOffset, Discriminator). 669 /// 670 /// \returns true if this is the first time we mark the given record. 671 bool SampleCoverageTracker::markSamplesUsed(const FunctionSamples *FS, 672 uint32_t LineOffset, 673 uint32_t Discriminator, 674 uint64_t Samples) { 675 LineLocation Loc(LineOffset, Discriminator); 676 unsigned &Count = SampleCoverage[FS][Loc]; 677 bool FirstTime = (++Count == 1); 678 if (FirstTime) 679 TotalUsedSamples += Samples; 680 return FirstTime; 681 } 682 683 /// Return the number of sample records that were applied from this profile. 684 /// 685 /// This count does not include records from cold inlined callsites. 686 unsigned 687 SampleCoverageTracker::countUsedRecords(const FunctionSamples *FS, 688 ProfileSummaryInfo *PSI) const { 689 auto I = SampleCoverage.find(FS); 690 691 // The size of the coverage map for FS represents the number of records 692 // that were marked used at least once. 693 unsigned Count = (I != SampleCoverage.end()) ? I->second.size() : 0; 694 695 // If there are inlined callsites in this function, count the samples found 696 // in the respective bodies. However, do not bother counting callees with 0 697 // total samples, these are callees that were never invoked at runtime. 698 for (const auto &I : FS->getCallsiteSamples()) 699 for (const auto &J : I.second) { 700 const FunctionSamples *CalleeSamples = &J.second; 701 if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList)) 702 Count += countUsedRecords(CalleeSamples, PSI); 703 } 704 705 return Count; 706 } 707 708 /// Return the number of sample records in the body of this profile. 709 /// 710 /// This count does not include records from cold inlined callsites. 711 unsigned 712 SampleCoverageTracker::countBodyRecords(const FunctionSamples *FS, 713 ProfileSummaryInfo *PSI) const { 714 unsigned Count = FS->getBodySamples().size(); 715 716 // Only count records in hot callsites. 717 for (const auto &I : FS->getCallsiteSamples()) 718 for (const auto &J : I.second) { 719 const FunctionSamples *CalleeSamples = &J.second; 720 if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList)) 721 Count += countBodyRecords(CalleeSamples, PSI); 722 } 723 724 return Count; 725 } 726 727 /// Return the number of samples collected in the body of this profile. 728 /// 729 /// This count does not include samples from cold inlined callsites. 730 uint64_t 731 SampleCoverageTracker::countBodySamples(const FunctionSamples *FS, 732 ProfileSummaryInfo *PSI) const { 733 uint64_t Total = 0; 734 for (const auto &I : FS->getBodySamples()) 735 Total += I.second.getSamples(); 736 737 // Only count samples in hot callsites. 738 for (const auto &I : FS->getCallsiteSamples()) 739 for (const auto &J : I.second) { 740 const FunctionSamples *CalleeSamples = &J.second; 741 if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList)) 742 Total += countBodySamples(CalleeSamples, PSI); 743 } 744 745 return Total; 746 } 747 748 /// Return the fraction of sample records used in this profile. 749 /// 750 /// The returned value is an unsigned integer in the range 0-100 indicating 751 /// the percentage of sample records that were used while applying this 752 /// profile to the associated function. 753 unsigned SampleCoverageTracker::computeCoverage(unsigned Used, 754 unsigned Total) const { 755 assert(Used <= Total && 756 "number of used records cannot exceed the total number of records"); 757 return Total > 0 ? Used * 100 / Total : 100; 758 } 759 760 /// Clear all the per-function data used to load samples and propagate weights. 761 void SampleProfileLoader::clearFunctionData() { 762 BlockWeights.clear(); 763 EdgeWeights.clear(); 764 VisitedBlocks.clear(); 765 VisitedEdges.clear(); 766 EquivalenceClass.clear(); 767 DT = nullptr; 768 PDT = nullptr; 769 LI = nullptr; 770 Predecessors.clear(); 771 Successors.clear(); 772 CoverageTracker.clear(); 773 } 774 775 #ifndef NDEBUG 776 /// Print the weight of edge \p E on stream \p OS. 777 /// 778 /// \param OS Stream to emit the output to. 779 /// \param E Edge to print. 780 void SampleProfileLoader::printEdgeWeight(raw_ostream &OS, Edge E) { 781 OS << "weight[" << E.first->getName() << "->" << E.second->getName() 782 << "]: " << EdgeWeights[E] << "\n"; 783 } 784 785 /// Print the equivalence class of block \p BB on stream \p OS. 786 /// 787 /// \param OS Stream to emit the output to. 788 /// \param BB Block to print. 789 void SampleProfileLoader::printBlockEquivalence(raw_ostream &OS, 790 const BasicBlock *BB) { 791 const BasicBlock *Equiv = EquivalenceClass[BB]; 792 OS << "equivalence[" << BB->getName() 793 << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n"; 794 } 795 796 /// Print the weight of block \p BB on stream \p OS. 797 /// 798 /// \param OS Stream to emit the output to. 799 /// \param BB Block to print. 800 void SampleProfileLoader::printBlockWeight(raw_ostream &OS, 801 const BasicBlock *BB) const { 802 const auto &I = BlockWeights.find(BB); 803 uint64_t W = (I == BlockWeights.end() ? 0 : I->second); 804 OS << "weight[" << BB->getName() << "]: " << W << "\n"; 805 } 806 #endif 807 808 /// Get the weight for an instruction. 809 /// 810 /// The "weight" of an instruction \p Inst is the number of samples 811 /// collected on that instruction at runtime. To retrieve it, we 812 /// need to compute the line number of \p Inst relative to the start of its 813 /// function. We use HeaderLineno to compute the offset. We then 814 /// look up the samples collected for \p Inst using BodySamples. 815 /// 816 /// \param Inst Instruction to query. 817 /// 818 /// \returns the weight of \p Inst. 819 ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) { 820 if (FunctionSamples::ProfileIsProbeBased) 821 return getProbeWeight(Inst); 822 823 const DebugLoc &DLoc = Inst.getDebugLoc(); 824 if (!DLoc) 825 return std::error_code(); 826 827 const FunctionSamples *FS = findFunctionSamples(Inst); 828 if (!FS) 829 return std::error_code(); 830 831 // Ignore all intrinsics, phinodes and branch instructions. 832 // Branch and phinodes instruction usually contains debug info from sources outside of 833 // the residing basic block, thus we ignore them during annotation. 834 if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst)) 835 return std::error_code(); 836 837 // If a direct call/invoke instruction is inlined in profile 838 // (findCalleeFunctionSamples returns non-empty result), but not inlined here, 839 // it means that the inlined callsite has no sample, thus the call 840 // instruction should have 0 count. 841 if (!ProfileIsCS) 842 if (const auto *CB = dyn_cast<CallBase>(&Inst)) 843 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB)) 844 return 0; 845 846 const DILocation *DIL = DLoc; 847 uint32_t LineOffset = FunctionSamples::getOffset(DIL); 848 uint32_t Discriminator = DIL->getBaseDiscriminator(); 849 ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator); 850 if (R) { 851 bool FirstMark = 852 CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get()); 853 if (FirstMark) { 854 ORE->emit([&]() { 855 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst); 856 Remark << "Applied " << ore::NV("NumSamples", *R); 857 Remark << " samples from profile (offset: "; 858 Remark << ore::NV("LineOffset", LineOffset); 859 if (Discriminator) { 860 Remark << "."; 861 Remark << ore::NV("Discriminator", Discriminator); 862 } 863 Remark << ")"; 864 return Remark; 865 }); 866 } 867 LLVM_DEBUG(dbgs() << " " << DLoc.getLine() << "." 868 << DIL->getBaseDiscriminator() << ":" << Inst 869 << " (line offset: " << LineOffset << "." 870 << DIL->getBaseDiscriminator() << " - weight: " << R.get() 871 << ")\n"); 872 } 873 return R; 874 } 875 876 ErrorOr<uint64_t> SampleProfileLoader::getProbeWeight(const Instruction &Inst) { 877 assert(FunctionSamples::ProfileIsProbeBased && 878 "Profile is not pseudo probe based"); 879 Optional<PseudoProbe> Probe = extractProbe(Inst); 880 if (!Probe) 881 return std::error_code(); 882 883 const FunctionSamples *FS = findFunctionSamples(Inst); 884 if (!FS) 885 return std::error_code(); 886 887 // If a direct call/invoke instruction is inlined in profile 888 // (findCalleeFunctionSamples returns non-empty result), but not inlined here, 889 // it means that the inlined callsite has no sample, thus the call 890 // instruction should have 0 count. 891 if (const auto *CB = dyn_cast<CallBase>(&Inst)) 892 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB)) 893 return 0; 894 895 const ErrorOr<uint64_t> &R = FS->findSamplesAt(Probe->Id, 0); 896 if (R) { 897 uint64_t Samples = R.get() * Probe->Factor; 898 bool FirstMark = CoverageTracker.markSamplesUsed(FS, Probe->Id, 0, Samples); 899 if (FirstMark) { 900 ORE->emit([&]() { 901 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst); 902 Remark << "Applied " << ore::NV("NumSamples", Samples); 903 Remark << " samples from profile (ProbeId="; 904 Remark << ore::NV("ProbeId", Probe->Id); 905 Remark << ", Factor="; 906 Remark << ore::NV("Factor", Probe->Factor); 907 Remark << ", OriginalSamples="; 908 Remark << ore::NV("OriginalSamples", R.get()); 909 Remark << ")"; 910 return Remark; 911 }); 912 } 913 LLVM_DEBUG(dbgs() << " " << Probe->Id << ":" << Inst 914 << " - weight: " << R.get() << " - factor: " 915 << format("%0.2f", Probe->Factor) << ")\n"); 916 return Samples; 917 } 918 return R; 919 } 920 921 /// Compute the weight of a basic block. 922 /// 923 /// The weight of basic block \p BB is the maximum weight of all the 924 /// instructions in BB. 925 /// 926 /// \param BB The basic block to query. 927 /// 928 /// \returns the weight for \p BB. 929 ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) { 930 uint64_t Max = 0; 931 bool HasWeight = false; 932 for (auto &I : BB->getInstList()) { 933 const ErrorOr<uint64_t> &R = getInstWeight(I); 934 if (R) { 935 Max = std::max(Max, R.get()); 936 HasWeight = true; 937 } 938 } 939 return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code(); 940 } 941 942 /// Compute and store the weights of every basic block. 943 /// 944 /// This populates the BlockWeights map by computing 945 /// the weights of every basic block in the CFG. 946 /// 947 /// \param F The function to query. 948 bool SampleProfileLoader::computeBlockWeights(Function &F) { 949 bool Changed = false; 950 LLVM_DEBUG(dbgs() << "Block weights\n"); 951 for (const auto &BB : F) { 952 ErrorOr<uint64_t> Weight = getBlockWeight(&BB); 953 if (Weight) { 954 BlockWeights[&BB] = Weight.get(); 955 VisitedBlocks.insert(&BB); 956 Changed = true; 957 } 958 LLVM_DEBUG(printBlockWeight(dbgs(), &BB)); 959 } 960 961 return Changed; 962 } 963 964 /// Get the FunctionSamples for a call instruction. 965 /// 966 /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined 967 /// instance in which that call instruction is calling to. It contains 968 /// all samples that resides in the inlined instance. We first find the 969 /// inlined instance in which the call instruction is from, then we 970 /// traverse its children to find the callsite with the matching 971 /// location. 972 /// 973 /// \param Inst Call/Invoke instruction to query. 974 /// 975 /// \returns The FunctionSamples pointer to the inlined instance. 976 const FunctionSamples * 977 SampleProfileLoader::findCalleeFunctionSamples(const CallBase &Inst) const { 978 const DILocation *DIL = Inst.getDebugLoc(); 979 if (!DIL) { 980 return nullptr; 981 } 982 983 StringRef CalleeName; 984 if (Function *Callee = Inst.getCalledFunction()) 985 CalleeName = FunctionSamples::getCanonicalFnName(*Callee); 986 987 if (ProfileIsCS) 988 return ContextTracker->getCalleeContextSamplesFor(Inst, CalleeName); 989 990 const FunctionSamples *FS = findFunctionSamples(Inst); 991 if (FS == nullptr) 992 return nullptr; 993 994 return FS->findFunctionSamplesAt(FunctionSamples::getCallSiteIdentifier(DIL), 995 CalleeName, Reader->getRemapper()); 996 } 997 998 /// Returns a vector of FunctionSamples that are the indirect call targets 999 /// of \p Inst. The vector is sorted by the total number of samples. Stores 1000 /// the total call count of the indirect call in \p Sum. 1001 std::vector<const FunctionSamples *> 1002 SampleProfileLoader::findIndirectCallFunctionSamples( 1003 const Instruction &Inst, uint64_t &Sum) const { 1004 const DILocation *DIL = Inst.getDebugLoc(); 1005 std::vector<const FunctionSamples *> R; 1006 1007 if (!DIL) { 1008 return R; 1009 } 1010 1011 auto FSCompare = [](const FunctionSamples *L, const FunctionSamples *R) { 1012 assert(L && R && "Expect non-null FunctionSamples"); 1013 if (L->getEntrySamples() != R->getEntrySamples()) 1014 return L->getEntrySamples() > R->getEntrySamples(); 1015 return FunctionSamples::getGUID(L->getName()) < 1016 FunctionSamples::getGUID(R->getName()); 1017 }; 1018 1019 if (ProfileIsCS) { 1020 auto CalleeSamples = 1021 ContextTracker->getIndirectCalleeContextSamplesFor(DIL); 1022 if (CalleeSamples.empty()) 1023 return R; 1024 1025 // For CSSPGO, we only use target context profile's entry count 1026 // as that already includes both inlined callee and non-inlined ones.. 1027 Sum = 0; 1028 for (const auto *const FS : CalleeSamples) { 1029 Sum += FS->getEntrySamples(); 1030 R.push_back(FS); 1031 } 1032 llvm::sort(R, FSCompare); 1033 return R; 1034 } 1035 1036 const FunctionSamples *FS = findFunctionSamples(Inst); 1037 if (FS == nullptr) 1038 return R; 1039 1040 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL); 1041 auto T = FS->findCallTargetMapAt(CallSite); 1042 Sum = 0; 1043 if (T) 1044 for (const auto &T_C : T.get()) 1045 Sum += T_C.second; 1046 if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(CallSite)) { 1047 if (M->empty()) 1048 return R; 1049 for (const auto &NameFS : *M) { 1050 Sum += NameFS.second.getEntrySamples(); 1051 R.push_back(&NameFS.second); 1052 } 1053 llvm::sort(R, FSCompare); 1054 } 1055 return R; 1056 } 1057 1058 /// Get the FunctionSamples for an instruction. 1059 /// 1060 /// The FunctionSamples of an instruction \p Inst is the inlined instance 1061 /// in which that instruction is coming from. We traverse the inline stack 1062 /// of that instruction, and match it with the tree nodes in the profile. 1063 /// 1064 /// \param Inst Instruction to query. 1065 /// 1066 /// \returns the FunctionSamples pointer to the inlined instance. 1067 const FunctionSamples * 1068 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const { 1069 if (FunctionSamples::ProfileIsProbeBased) { 1070 Optional<PseudoProbe> Probe = extractProbe(Inst); 1071 if (!Probe) 1072 return nullptr; 1073 } 1074 1075 const DILocation *DIL = Inst.getDebugLoc(); 1076 if (!DIL) 1077 return Samples; 1078 1079 auto it = DILocation2SampleMap.try_emplace(DIL,nullptr); 1080 if (it.second) { 1081 if (ProfileIsCS) 1082 it.first->second = ContextTracker->getContextSamplesFor(DIL); 1083 else 1084 it.first->second = 1085 Samples->findFunctionSamples(DIL, Reader->getRemapper()); 1086 } 1087 return it.first->second; 1088 } 1089 1090 /// Attempt to promote indirect call and also inline the promoted call. 1091 /// 1092 /// \param F Caller function. 1093 /// \param Candidate ICP and inline candidate. 1094 /// \param Sum Sum of target counts for indirect call. 1095 /// \param PromotedInsns Map to keep track of indirect call already processed. 1096 /// \param InlinedCallSite Output vector for new call sites exposed after 1097 /// inlining. 1098 bool SampleProfileLoader::tryPromoteAndInlineCandidate( 1099 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin, uint64_t &Sum, 1100 DenseSet<Instruction *> &PromotedInsns, 1101 SmallVector<CallBase *, 8> *InlinedCallSite) { 1102 const char *Reason = "Callee function not available"; 1103 // R->getValue() != &F is to prevent promoting a recursive call. 1104 // If it is a recursive call, we do not inline it as it could bloat 1105 // the code exponentially. There is way to better handle this, e.g. 1106 // clone the caller first, and inline the cloned caller if it is 1107 // recursive. As llvm does not inline recursive calls, we will 1108 // simply ignore it instead of handling it explicitly. 1109 auto R = SymbolMap.find(Candidate.CalleeSamples->getFuncName()); 1110 if (R != SymbolMap.end() && R->getValue() && 1111 !R->getValue()->isDeclaration() && R->getValue()->getSubprogram() && 1112 R->getValue()->hasFnAttribute("use-sample-profile") && 1113 R->getValue() != &F && 1114 isLegalToPromote(*Candidate.CallInstr, R->getValue(), &Reason)) { 1115 auto *DI = 1116 &pgo::promoteIndirectCall(*Candidate.CallInstr, R->getValue(), 1117 Candidate.CallsiteCount, Sum, false, ORE); 1118 if (DI) { 1119 Sum -= Candidate.CallsiteCount; 1120 // Prorate the indirect callsite distribution. 1121 // Do not update the promoted direct callsite distribution at this 1122 // point since the original distribution combined with the callee 1123 // profile will be used to prorate callsites from the callee if 1124 // inlined. Once not inlined, the direct callsite distribution should 1125 // be prorated so that the it will reflect the real callsite counts. 1126 setProbeDistributionFactor(*Candidate.CallInstr, 1127 Candidate.CallsiteDistribution * Sum / 1128 SumOrigin); 1129 PromotedInsns.insert(Candidate.CallInstr); 1130 Candidate.CallInstr = DI; 1131 if (isa<CallInst>(DI) || isa<InvokeInst>(DI)) { 1132 bool Inlined = tryInlineCandidate(Candidate, InlinedCallSite); 1133 if (!Inlined) { 1134 // Prorate the direct callsite distribution so that it reflects real 1135 // callsite counts. 1136 setProbeDistributionFactor(*DI, Candidate.CallsiteDistribution * 1137 Candidate.CallsiteCount / 1138 SumOrigin); 1139 } 1140 return Inlined; 1141 } 1142 } 1143 } else { 1144 LLVM_DEBUG(dbgs() << "\nFailed to promote indirect call to " 1145 << Candidate.CalleeSamples->getFuncName() << " because " 1146 << Reason << "\n"); 1147 } 1148 return false; 1149 } 1150 1151 bool SampleProfileLoader::shouldInlineColdCallee(CallBase &CallInst) { 1152 if (!ProfileSizeInline) 1153 return false; 1154 1155 Function *Callee = CallInst.getCalledFunction(); 1156 if (Callee == nullptr) 1157 return false; 1158 1159 InlineCost Cost = getInlineCost(CallInst, getInlineParams(), GetTTI(*Callee), 1160 GetAC, GetTLI); 1161 1162 if (Cost.isNever()) 1163 return false; 1164 1165 if (Cost.isAlways()) 1166 return true; 1167 1168 return Cost.getCost() <= SampleColdCallSiteThreshold; 1169 } 1170 1171 void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates( 1172 const SmallVectorImpl<CallBase *> &Candidates, const Function &F, 1173 bool Hot) { 1174 for (auto I : Candidates) { 1175 Function *CalledFunction = I->getCalledFunction(); 1176 if (CalledFunction) { 1177 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineAttempt", 1178 I->getDebugLoc(), I->getParent()) 1179 << "previous inlining reattempted for " 1180 << (Hot ? "hotness: '" : "size: '") 1181 << ore::NV("Callee", CalledFunction) << "' into '" 1182 << ore::NV("Caller", &F) << "'"); 1183 } 1184 } 1185 } 1186 1187 /// Iteratively inline hot callsites of a function. 1188 /// 1189 /// Iteratively traverse all callsites of the function \p F, and find if 1190 /// the corresponding inlined instance exists and is hot in profile. If 1191 /// it is hot enough, inline the callsites and adds new callsites of the 1192 /// callee into the caller. If the call is an indirect call, first promote 1193 /// it to direct call. Each indirect call is limited with a single target. 1194 /// 1195 /// \param F function to perform iterative inlining. 1196 /// \param InlinedGUIDs a set to be updated to include all GUIDs that are 1197 /// inlined in the profiled binary. 1198 /// 1199 /// \returns True if there is any inline happened. 1200 bool SampleProfileLoader::inlineHotFunctions( 1201 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) { 1202 DenseSet<Instruction *> PromotedInsns; 1203 1204 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure 1205 // Profile symbol list is ignored when profile-sample-accurate is on. 1206 assert((!ProfAccForSymsInList || 1207 (!ProfileSampleAccurate && 1208 !F.hasFnAttribute("profile-sample-accurate"))) && 1209 "ProfAccForSymsInList should be false when profile-sample-accurate " 1210 "is enabled"); 1211 1212 DenseMap<CallBase *, const FunctionSamples *> LocalNotInlinedCallSites; 1213 bool Changed = false; 1214 bool LocalChanged = true; 1215 while (LocalChanged) { 1216 LocalChanged = false; 1217 SmallVector<CallBase *, 10> CIS; 1218 for (auto &BB : F) { 1219 bool Hot = false; 1220 SmallVector<CallBase *, 10> AllCandidates; 1221 SmallVector<CallBase *, 10> ColdCandidates; 1222 for (auto &I : BB.getInstList()) { 1223 const FunctionSamples *FS = nullptr; 1224 if (auto *CB = dyn_cast<CallBase>(&I)) { 1225 if (!isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(*CB))) { 1226 assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) && 1227 "GUIDToFuncNameMap has to be populated"); 1228 AllCandidates.push_back(CB); 1229 if (FS->getEntrySamples() > 0 || ProfileIsCS) 1230 LocalNotInlinedCallSites.try_emplace(CB, FS); 1231 if (callsiteIsHot(FS, PSI, ProfAccForSymsInList)) 1232 Hot = true; 1233 else if (shouldInlineColdCallee(*CB)) 1234 ColdCandidates.push_back(CB); 1235 } 1236 } 1237 } 1238 if (Hot || ExternalInlineAdvisor) { 1239 CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end()); 1240 emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true); 1241 } else { 1242 CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end()); 1243 emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false); 1244 } 1245 } 1246 for (CallBase *I : CIS) { 1247 Function *CalledFunction = I->getCalledFunction(); 1248 InlineCandidate Candidate = { 1249 I, 1250 LocalNotInlinedCallSites.count(I) ? LocalNotInlinedCallSites[I] 1251 : nullptr, 1252 0 /* dummy count */, 1.0 /* dummy distribution factor */}; 1253 // Do not inline recursive calls. 1254 if (CalledFunction == &F) 1255 continue; 1256 if (I->isIndirectCall()) { 1257 if (PromotedInsns.count(I)) 1258 continue; 1259 uint64_t Sum; 1260 for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) { 1261 uint64_t SumOrigin = Sum; 1262 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1263 FS->findInlinedFunctions(InlinedGUIDs, F.getParent(), 1264 PSI->getOrCompHotCountThreshold()); 1265 continue; 1266 } 1267 if (!callsiteIsHot(FS, PSI, ProfAccForSymsInList)) 1268 continue; 1269 1270 Candidate = {I, FS, FS->getEntrySamples(), 1.0}; 1271 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum, 1272 PromotedInsns)) { 1273 LocalNotInlinedCallSites.erase(I); 1274 LocalChanged = true; 1275 } 1276 } 1277 } else if (CalledFunction && CalledFunction->getSubprogram() && 1278 !CalledFunction->isDeclaration()) { 1279 if (tryInlineCandidate(Candidate)) { 1280 LocalNotInlinedCallSites.erase(I); 1281 LocalChanged = true; 1282 } 1283 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1284 findCalleeFunctionSamples(*I)->findInlinedFunctions( 1285 InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold()); 1286 } 1287 } 1288 Changed |= LocalChanged; 1289 } 1290 1291 // For CS profile, profile for not inlined context will be merged when 1292 // base profile is being trieved 1293 if (ProfileIsCS) 1294 return Changed; 1295 1296 // Accumulate not inlined callsite information into notInlinedSamples 1297 for (const auto &Pair : LocalNotInlinedCallSites) { 1298 CallBase *I = Pair.getFirst(); 1299 Function *Callee = I->getCalledFunction(); 1300 if (!Callee || Callee->isDeclaration()) 1301 continue; 1302 1303 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "NotInline", 1304 I->getDebugLoc(), I->getParent()) 1305 << "previous inlining not repeated: '" 1306 << ore::NV("Callee", Callee) << "' into '" 1307 << ore::NV("Caller", &F) << "'"); 1308 1309 ++NumCSNotInlined; 1310 const FunctionSamples *FS = Pair.getSecond(); 1311 if (FS->getTotalSamples() == 0 && FS->getEntrySamples() == 0) { 1312 continue; 1313 } 1314 1315 if (ProfileMergeInlinee) { 1316 // A function call can be replicated by optimizations like callsite 1317 // splitting or jump threading and the replicates end up sharing the 1318 // sample nested callee profile instead of slicing the original inlinee's 1319 // profile. We want to do merge exactly once by filtering out callee 1320 // profiles with a non-zero head sample count. 1321 if (FS->getHeadSamples() == 0) { 1322 // Use entry samples as head samples during the merge, as inlinees 1323 // don't have head samples. 1324 const_cast<FunctionSamples *>(FS)->addHeadSamples( 1325 FS->getEntrySamples()); 1326 1327 // Note that we have to do the merge right after processing function. 1328 // This allows OutlineFS's profile to be used for annotation during 1329 // top-down processing of functions' annotation. 1330 FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee); 1331 OutlineFS->merge(*FS); 1332 } 1333 } else { 1334 auto pair = 1335 notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0}); 1336 pair.first->second.entryCount += FS->getEntrySamples(); 1337 } 1338 } 1339 return Changed; 1340 } 1341 1342 bool SampleProfileLoader::tryInlineCandidate( 1343 InlineCandidate &Candidate, SmallVector<CallBase *, 8> *InlinedCallSites) { 1344 1345 CallBase &CB = *Candidate.CallInstr; 1346 Function *CalledFunction = CB.getCalledFunction(); 1347 assert(CalledFunction && "Expect a callee with definition"); 1348 DebugLoc DLoc = CB.getDebugLoc(); 1349 BasicBlock *BB = CB.getParent(); 1350 1351 InlineCost Cost = shouldInlineCandidate(Candidate); 1352 if (Cost.isNever()) { 1353 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineFail", DLoc, BB) 1354 << "incompatible inlining"); 1355 return false; 1356 } 1357 1358 if (!Cost) 1359 return false; 1360 1361 InlineFunctionInfo IFI(nullptr, GetAC); 1362 if (InlineFunction(CB, IFI).isSuccess()) { 1363 // The call to InlineFunction erases I, so we can't pass it here. 1364 emitInlinedInto(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(), Cost, 1365 true, CSINLINE_DEBUG); 1366 1367 // Now populate the list of newly exposed call sites. 1368 if (InlinedCallSites) { 1369 InlinedCallSites->clear(); 1370 for (auto &I : IFI.InlinedCallSites) 1371 InlinedCallSites->push_back(I); 1372 } 1373 1374 if (ProfileIsCS) 1375 ContextTracker->markContextSamplesInlined(Candidate.CalleeSamples); 1376 ++NumCSInlined; 1377 1378 // Prorate inlined probes for a duplicated inlining callsite which probably 1379 // has a distribution less than 100%. Samples for an inlinee should be 1380 // distributed among the copies of the original callsite based on each 1381 // callsite's distribution factor for counts accuracy. Note that an inlined 1382 // probe may come with its own distribution factor if it has been duplicated 1383 // in the inlinee body. The two factor are multiplied to reflect the 1384 // aggregation of duplication. 1385 if (Candidate.CallsiteDistribution < 1) { 1386 for (auto &I : IFI.InlinedCallSites) { 1387 if (Optional<PseudoProbe> Probe = extractProbe(*I)) 1388 setProbeDistributionFactor(*I, Probe->Factor * 1389 Candidate.CallsiteDistribution); 1390 } 1391 NumDuplicatedInlinesite++; 1392 } 1393 1394 return true; 1395 } 1396 return false; 1397 } 1398 1399 bool SampleProfileLoader::getInlineCandidate(InlineCandidate *NewCandidate, 1400 CallBase *CB) { 1401 assert(CB && "Expect non-null call instruction"); 1402 1403 if (isa<IntrinsicInst>(CB)) 1404 return false; 1405 1406 // Find the callee's profile. For indirect call, find hottest target profile. 1407 const FunctionSamples *CalleeSamples = findCalleeFunctionSamples(*CB); 1408 if (!CalleeSamples) 1409 return false; 1410 1411 float Factor = 1.0; 1412 if (Optional<PseudoProbe> Probe = extractProbe(*CB)) 1413 Factor = Probe->Factor; 1414 1415 uint64_t CallsiteCount = 0; 1416 ErrorOr<uint64_t> Weight = getBlockWeight(CB->getParent()); 1417 if (Weight) 1418 CallsiteCount = Weight.get(); 1419 if (CalleeSamples) 1420 CallsiteCount = std::max( 1421 CallsiteCount, uint64_t(CalleeSamples->getEntrySamples() * Factor)); 1422 1423 *NewCandidate = {CB, CalleeSamples, CallsiteCount, Factor}; 1424 return true; 1425 } 1426 1427 InlineCost 1428 SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) { 1429 std::unique_ptr<InlineAdvice> Advice = nullptr; 1430 if (ExternalInlineAdvisor) { 1431 Advice = ExternalInlineAdvisor->getAdvice(*Candidate.CallInstr); 1432 if (!Advice->isInliningRecommended()) { 1433 Advice->recordUnattemptedInlining(); 1434 return InlineCost::getNever("not previously inlined"); 1435 } 1436 Advice->recordInlining(); 1437 return InlineCost::getAlways("previously inlined"); 1438 } 1439 1440 // Adjust threshold based on call site hotness, only do this for callsite 1441 // prioritized inliner because otherwise cost-benefit check is done earlier. 1442 int SampleThreshold = SampleColdCallSiteThreshold; 1443 if (CallsitePrioritizedInline) { 1444 if (Candidate.CallsiteCount > PSI->getHotCountThreshold()) 1445 SampleThreshold = SampleHotCallSiteThreshold; 1446 else if (!ProfileSizeInline) 1447 return InlineCost::getNever("cold callsite"); 1448 } 1449 1450 Function *Callee = Candidate.CallInstr->getCalledFunction(); 1451 assert(Callee && "Expect a definition for inline candidate of direct call"); 1452 1453 InlineParams Params = getInlineParams(); 1454 Params.ComputeFullInlineCost = true; 1455 // Checks if there is anything in the reachable portion of the callee at 1456 // this callsite that makes this inlining potentially illegal. Need to 1457 // set ComputeFullInlineCost, otherwise getInlineCost may return early 1458 // when cost exceeds threshold without checking all IRs in the callee. 1459 // The acutal cost does not matter because we only checks isNever() to 1460 // see if it is legal to inline the callsite. 1461 InlineCost Cost = getInlineCost(*Candidate.CallInstr, Callee, Params, 1462 GetTTI(*Callee), GetAC, GetTLI); 1463 1464 // Honor always inline and never inline from call analyzer 1465 if (Cost.isNever() || Cost.isAlways()) 1466 return Cost; 1467 1468 // For old FDO inliner, we inline the call site as long as cost is not 1469 // "Never". The cost-benefit check is done earlier. 1470 if (!CallsitePrioritizedInline) { 1471 return InlineCost::get(Cost.getCost(), INT_MAX); 1472 } 1473 1474 // Otherwise only use the cost from call analyzer, but overwite threshold with 1475 // Sample PGO threshold. 1476 return InlineCost::get(Cost.getCost(), SampleThreshold); 1477 } 1478 1479 bool SampleProfileLoader::inlineHotFunctionsWithPriority( 1480 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) { 1481 DenseSet<Instruction *> PromotedInsns; 1482 assert(ProfileIsCS && "Prioritiy based inliner only works with CSSPGO now"); 1483 1484 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure 1485 // Profile symbol list is ignored when profile-sample-accurate is on. 1486 assert((!ProfAccForSymsInList || 1487 (!ProfileSampleAccurate && 1488 !F.hasFnAttribute("profile-sample-accurate"))) && 1489 "ProfAccForSymsInList should be false when profile-sample-accurate " 1490 "is enabled"); 1491 1492 // Populating worklist with initial call sites from root inliner, along 1493 // with call site weights. 1494 CandidateQueue CQueue; 1495 InlineCandidate NewCandidate; 1496 for (auto &BB : F) { 1497 for (auto &I : BB.getInstList()) { 1498 auto *CB = dyn_cast<CallBase>(&I); 1499 if (!CB) 1500 continue; 1501 if (getInlineCandidate(&NewCandidate, CB)) 1502 CQueue.push(NewCandidate); 1503 } 1504 } 1505 1506 // Cap the size growth from profile guided inlining. This is needed even 1507 // though cost of each inline candidate already accounts for callee size, 1508 // because with top-down inlining, we can grow inliner size significantly 1509 // with large number of smaller inlinees each pass the cost check. 1510 assert(ProfileInlineLimitMax >= ProfileInlineLimitMin && 1511 "Max inline size limit should not be smaller than min inline size " 1512 "limit."); 1513 unsigned SizeLimit = F.getInstructionCount() * ProfileInlineGrowthLimit; 1514 SizeLimit = std::min(SizeLimit, (unsigned)ProfileInlineLimitMax); 1515 SizeLimit = std::max(SizeLimit, (unsigned)ProfileInlineLimitMin); 1516 if (ExternalInlineAdvisor) 1517 SizeLimit = std::numeric_limits<unsigned>::max(); 1518 1519 // Perform iterative BFS call site prioritized inlining 1520 bool Changed = false; 1521 while (!CQueue.empty() && F.getInstructionCount() < SizeLimit) { 1522 InlineCandidate Candidate = CQueue.top(); 1523 CQueue.pop(); 1524 CallBase *I = Candidate.CallInstr; 1525 Function *CalledFunction = I->getCalledFunction(); 1526 1527 if (CalledFunction == &F) 1528 continue; 1529 if (I->isIndirectCall()) { 1530 if (PromotedInsns.count(I)) 1531 continue; 1532 uint64_t Sum; 1533 auto CalleeSamples = findIndirectCallFunctionSamples(*I, Sum); 1534 uint64_t SumOrigin = Sum; 1535 Sum *= Candidate.CallsiteDistribution; 1536 for (const auto *FS : CalleeSamples) { 1537 // TODO: Consider disable pre-lTO ICP for MonoLTO as well 1538 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1539 FS->findInlinedFunctions(InlinedGUIDs, F.getParent(), 1540 PSI->getOrCompHotCountThreshold()); 1541 continue; 1542 } 1543 uint64_t EntryCountDistributed = 1544 FS->getEntrySamples() * Candidate.CallsiteDistribution; 1545 // In addition to regular inline cost check, we also need to make sure 1546 // ICP isn't introducing excessive speculative checks even if individual 1547 // target looks beneficial to promote and inline. That means we should 1548 // only do ICP when there's a small number dominant targets. 1549 if (EntryCountDistributed < SumOrigin / ProfileICPThreshold) 1550 break; 1551 // TODO: Fix CallAnalyzer to handle all indirect calls. 1552 // For indirect call, we don't run CallAnalyzer to get InlineCost 1553 // before actual inlining. This is because we could see two different 1554 // types from the same definition, which makes CallAnalyzer choke as 1555 // it's expecting matching parameter type on both caller and callee 1556 // side. See example from PR18962 for the triggering cases (the bug was 1557 // fixed, but we generate different types). 1558 if (!PSI->isHotCount(EntryCountDistributed)) 1559 break; 1560 SmallVector<CallBase *, 8> InlinedCallSites; 1561 // Attach function profile for promoted indirect callee, and update 1562 // call site count for the promoted inline candidate too. 1563 Candidate = {I, FS, EntryCountDistributed, 1564 Candidate.CallsiteDistribution}; 1565 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum, 1566 PromotedInsns, &InlinedCallSites)) { 1567 for (auto *CB : InlinedCallSites) { 1568 if (getInlineCandidate(&NewCandidate, CB)) 1569 CQueue.emplace(NewCandidate); 1570 } 1571 Changed = true; 1572 } 1573 } 1574 } else if (CalledFunction && CalledFunction->getSubprogram() && 1575 !CalledFunction->isDeclaration()) { 1576 SmallVector<CallBase *, 8> InlinedCallSites; 1577 if (tryInlineCandidate(Candidate, &InlinedCallSites)) { 1578 for (auto *CB : InlinedCallSites) { 1579 if (getInlineCandidate(&NewCandidate, CB)) 1580 CQueue.emplace(NewCandidate); 1581 } 1582 Changed = true; 1583 } 1584 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1585 findCalleeFunctionSamples(*I)->findInlinedFunctions( 1586 InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold()); 1587 } 1588 } 1589 1590 if (!CQueue.empty()) { 1591 if (SizeLimit == (unsigned)ProfileInlineLimitMax) 1592 ++NumCSInlinedHitMaxLimit; 1593 else if (SizeLimit == (unsigned)ProfileInlineLimitMin) 1594 ++NumCSInlinedHitMinLimit; 1595 else 1596 ++NumCSInlinedHitGrowthLimit; 1597 } 1598 1599 return Changed; 1600 } 1601 1602 /// Find equivalence classes for the given block. 1603 /// 1604 /// This finds all the blocks that are guaranteed to execute the same 1605 /// number of times as \p BB1. To do this, it traverses all the 1606 /// descendants of \p BB1 in the dominator or post-dominator tree. 1607 /// 1608 /// A block BB2 will be in the same equivalence class as \p BB1 if 1609 /// the following holds: 1610 /// 1611 /// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2 1612 /// is a descendant of \p BB1 in the dominator tree, then BB2 should 1613 /// dominate BB1 in the post-dominator tree. 1614 /// 1615 /// 2- Both BB2 and \p BB1 must be in the same loop. 1616 /// 1617 /// For every block BB2 that meets those two requirements, we set BB2's 1618 /// equivalence class to \p BB1. 1619 /// 1620 /// \param BB1 Block to check. 1621 /// \param Descendants Descendants of \p BB1 in either the dom or pdom tree. 1622 /// \param DomTree Opposite dominator tree. If \p Descendants is filled 1623 /// with blocks from \p BB1's dominator tree, then 1624 /// this is the post-dominator tree, and vice versa. 1625 template <bool IsPostDom> 1626 void SampleProfileLoader::findEquivalencesFor( 1627 BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants, 1628 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree) { 1629 const BasicBlock *EC = EquivalenceClass[BB1]; 1630 uint64_t Weight = BlockWeights[EC]; 1631 for (const auto *BB2 : Descendants) { 1632 bool IsDomParent = DomTree->dominates(BB2, BB1); 1633 bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2); 1634 if (BB1 != BB2 && IsDomParent && IsInSameLoop) { 1635 EquivalenceClass[BB2] = EC; 1636 // If BB2 is visited, then the entire EC should be marked as visited. 1637 if (VisitedBlocks.count(BB2)) { 1638 VisitedBlocks.insert(EC); 1639 } 1640 1641 // If BB2 is heavier than BB1, make BB2 have the same weight 1642 // as BB1. 1643 // 1644 // Note that we don't worry about the opposite situation here 1645 // (when BB2 is lighter than BB1). We will deal with this 1646 // during the propagation phase. Right now, we just want to 1647 // make sure that BB1 has the largest weight of all the 1648 // members of its equivalence set. 1649 Weight = std::max(Weight, BlockWeights[BB2]); 1650 } 1651 } 1652 if (EC == &EC->getParent()->getEntryBlock()) { 1653 BlockWeights[EC] = Samples->getHeadSamples() + 1; 1654 } else { 1655 BlockWeights[EC] = Weight; 1656 } 1657 } 1658 1659 /// Find equivalence classes. 1660 /// 1661 /// Since samples may be missing from blocks, we can fill in the gaps by setting 1662 /// the weights of all the blocks in the same equivalence class to the same 1663 /// weight. To compute the concept of equivalence, we use dominance and loop 1664 /// information. Two blocks B1 and B2 are in the same equivalence class if B1 1665 /// dominates B2, B2 post-dominates B1 and both are in the same loop. 1666 /// 1667 /// \param F The function to query. 1668 void SampleProfileLoader::findEquivalenceClasses(Function &F) { 1669 SmallVector<BasicBlock *, 8> DominatedBBs; 1670 LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n"); 1671 // Find equivalence sets based on dominance and post-dominance information. 1672 for (auto &BB : F) { 1673 BasicBlock *BB1 = &BB; 1674 1675 // Compute BB1's equivalence class once. 1676 if (EquivalenceClass.count(BB1)) { 1677 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); 1678 continue; 1679 } 1680 1681 // By default, blocks are in their own equivalence class. 1682 EquivalenceClass[BB1] = BB1; 1683 1684 // Traverse all the blocks dominated by BB1. We are looking for 1685 // every basic block BB2 such that: 1686 // 1687 // 1- BB1 dominates BB2. 1688 // 2- BB2 post-dominates BB1. 1689 // 3- BB1 and BB2 are in the same loop nest. 1690 // 1691 // If all those conditions hold, it means that BB2 is executed 1692 // as many times as BB1, so they are placed in the same equivalence 1693 // class by making BB2's equivalence class be BB1. 1694 DominatedBBs.clear(); 1695 DT->getDescendants(BB1, DominatedBBs); 1696 findEquivalencesFor(BB1, DominatedBBs, PDT.get()); 1697 1698 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); 1699 } 1700 1701 // Assign weights to equivalence classes. 1702 // 1703 // All the basic blocks in the same equivalence class will execute 1704 // the same number of times. Since we know that the head block in 1705 // each equivalence class has the largest weight, assign that weight 1706 // to all the blocks in that equivalence class. 1707 LLVM_DEBUG( 1708 dbgs() << "\nAssign the same weight to all blocks in the same class\n"); 1709 for (auto &BI : F) { 1710 const BasicBlock *BB = &BI; 1711 const BasicBlock *EquivBB = EquivalenceClass[BB]; 1712 if (BB != EquivBB) 1713 BlockWeights[BB] = BlockWeights[EquivBB]; 1714 LLVM_DEBUG(printBlockWeight(dbgs(), BB)); 1715 } 1716 } 1717 1718 /// Visit the given edge to decide if it has a valid weight. 1719 /// 1720 /// If \p E has not been visited before, we copy to \p UnknownEdge 1721 /// and increment the count of unknown edges. 1722 /// 1723 /// \param E Edge to visit. 1724 /// \param NumUnknownEdges Current number of unknown edges. 1725 /// \param UnknownEdge Set if E has not been visited before. 1726 /// 1727 /// \returns E's weight, if known. Otherwise, return 0. 1728 uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges, 1729 Edge *UnknownEdge) { 1730 if (!VisitedEdges.count(E)) { 1731 (*NumUnknownEdges)++; 1732 *UnknownEdge = E; 1733 return 0; 1734 } 1735 1736 return EdgeWeights[E]; 1737 } 1738 1739 /// Propagate weights through incoming/outgoing edges. 1740 /// 1741 /// If the weight of a basic block is known, and there is only one edge 1742 /// with an unknown weight, we can calculate the weight of that edge. 1743 /// 1744 /// Similarly, if all the edges have a known count, we can calculate the 1745 /// count of the basic block, if needed. 1746 /// 1747 /// \param F Function to process. 1748 /// \param UpdateBlockCount Whether we should update basic block counts that 1749 /// has already been annotated. 1750 /// 1751 /// \returns True if new weights were assigned to edges or blocks. 1752 bool SampleProfileLoader::propagateThroughEdges(Function &F, 1753 bool UpdateBlockCount) { 1754 bool Changed = false; 1755 LLVM_DEBUG(dbgs() << "\nPropagation through edges\n"); 1756 for (const auto &BI : F) { 1757 const BasicBlock *BB = &BI; 1758 const BasicBlock *EC = EquivalenceClass[BB]; 1759 1760 // Visit all the predecessor and successor edges to determine 1761 // which ones have a weight assigned already. Note that it doesn't 1762 // matter that we only keep track of a single unknown edge. The 1763 // only case we are interested in handling is when only a single 1764 // edge is unknown (see setEdgeOrBlockWeight). 1765 for (unsigned i = 0; i < 2; i++) { 1766 uint64_t TotalWeight = 0; 1767 unsigned NumUnknownEdges = 0, NumTotalEdges = 0; 1768 Edge UnknownEdge, SelfReferentialEdge, SingleEdge; 1769 1770 if (i == 0) { 1771 // First, visit all predecessor edges. 1772 NumTotalEdges = Predecessors[BB].size(); 1773 for (auto *Pred : Predecessors[BB]) { 1774 Edge E = std::make_pair(Pred, BB); 1775 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); 1776 if (E.first == E.second) 1777 SelfReferentialEdge = E; 1778 } 1779 if (NumTotalEdges == 1) { 1780 SingleEdge = std::make_pair(Predecessors[BB][0], BB); 1781 } 1782 } else { 1783 // On the second round, visit all successor edges. 1784 NumTotalEdges = Successors[BB].size(); 1785 for (auto *Succ : Successors[BB]) { 1786 Edge E = std::make_pair(BB, Succ); 1787 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); 1788 } 1789 if (NumTotalEdges == 1) { 1790 SingleEdge = std::make_pair(BB, Successors[BB][0]); 1791 } 1792 } 1793 1794 // After visiting all the edges, there are three cases that we 1795 // can handle immediately: 1796 // 1797 // - All the edge weights are known (i.e., NumUnknownEdges == 0). 1798 // In this case, we simply check that the sum of all the edges 1799 // is the same as BB's weight. If not, we change BB's weight 1800 // to match. Additionally, if BB had not been visited before, 1801 // we mark it visited. 1802 // 1803 // - Only one edge is unknown and BB has already been visited. 1804 // In this case, we can compute the weight of the edge by 1805 // subtracting the total block weight from all the known 1806 // edge weights. If the edges weight more than BB, then the 1807 // edge of the last remaining edge is set to zero. 1808 // 1809 // - There exists a self-referential edge and the weight of BB is 1810 // known. In this case, this edge can be based on BB's weight. 1811 // We add up all the other known edges and set the weight on 1812 // the self-referential edge as we did in the previous case. 1813 // 1814 // In any other case, we must continue iterating. Eventually, 1815 // all edges will get a weight, or iteration will stop when 1816 // it reaches SampleProfileMaxPropagateIterations. 1817 if (NumUnknownEdges <= 1) { 1818 uint64_t &BBWeight = BlockWeights[EC]; 1819 if (NumUnknownEdges == 0) { 1820 if (!VisitedBlocks.count(EC)) { 1821 // If we already know the weight of all edges, the weight of the 1822 // basic block can be computed. It should be no larger than the sum 1823 // of all edge weights. 1824 if (TotalWeight > BBWeight) { 1825 BBWeight = TotalWeight; 1826 Changed = true; 1827 LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName() 1828 << " known. Set weight for block: "; 1829 printBlockWeight(dbgs(), BB);); 1830 } 1831 } else if (NumTotalEdges == 1 && 1832 EdgeWeights[SingleEdge] < BlockWeights[EC]) { 1833 // If there is only one edge for the visited basic block, use the 1834 // block weight to adjust edge weight if edge weight is smaller. 1835 EdgeWeights[SingleEdge] = BlockWeights[EC]; 1836 Changed = true; 1837 } 1838 } else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) { 1839 // If there is a single unknown edge and the block has been 1840 // visited, then we can compute E's weight. 1841 if (BBWeight >= TotalWeight) 1842 EdgeWeights[UnknownEdge] = BBWeight - TotalWeight; 1843 else 1844 EdgeWeights[UnknownEdge] = 0; 1845 const BasicBlock *OtherEC; 1846 if (i == 0) 1847 OtherEC = EquivalenceClass[UnknownEdge.first]; 1848 else 1849 OtherEC = EquivalenceClass[UnknownEdge.second]; 1850 // Edge weights should never exceed the BB weights it connects. 1851 if (VisitedBlocks.count(OtherEC) && 1852 EdgeWeights[UnknownEdge] > BlockWeights[OtherEC]) 1853 EdgeWeights[UnknownEdge] = BlockWeights[OtherEC]; 1854 VisitedEdges.insert(UnknownEdge); 1855 Changed = true; 1856 LLVM_DEBUG(dbgs() << "Set weight for edge: "; 1857 printEdgeWeight(dbgs(), UnknownEdge)); 1858 } 1859 } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) { 1860 // If a block Weights 0, all its in/out edges should weight 0. 1861 if (i == 0) { 1862 for (auto *Pred : Predecessors[BB]) { 1863 Edge E = std::make_pair(Pred, BB); 1864 EdgeWeights[E] = 0; 1865 VisitedEdges.insert(E); 1866 } 1867 } else { 1868 for (auto *Succ : Successors[BB]) { 1869 Edge E = std::make_pair(BB, Succ); 1870 EdgeWeights[E] = 0; 1871 VisitedEdges.insert(E); 1872 } 1873 } 1874 } else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) { 1875 uint64_t &BBWeight = BlockWeights[BB]; 1876 // We have a self-referential edge and the weight of BB is known. 1877 if (BBWeight >= TotalWeight) 1878 EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight; 1879 else 1880 EdgeWeights[SelfReferentialEdge] = 0; 1881 VisitedEdges.insert(SelfReferentialEdge); 1882 Changed = true; 1883 LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: "; 1884 printEdgeWeight(dbgs(), SelfReferentialEdge)); 1885 } 1886 if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) { 1887 BlockWeights[EC] = TotalWeight; 1888 VisitedBlocks.insert(EC); 1889 Changed = true; 1890 } 1891 } 1892 } 1893 1894 return Changed; 1895 } 1896 1897 /// Build in/out edge lists for each basic block in the CFG. 1898 /// 1899 /// We are interested in unique edges. If a block B1 has multiple 1900 /// edges to another block B2, we only add a single B1->B2 edge. 1901 void SampleProfileLoader::buildEdges(Function &F) { 1902 for (auto &BI : F) { 1903 BasicBlock *B1 = &BI; 1904 1905 // Add predecessors for B1. 1906 SmallPtrSet<BasicBlock *, 16> Visited; 1907 if (!Predecessors[B1].empty()) 1908 llvm_unreachable("Found a stale predecessors list in a basic block."); 1909 for (BasicBlock *B2 : predecessors(B1)) 1910 if (Visited.insert(B2).second) 1911 Predecessors[B1].push_back(B2); 1912 1913 // Add successors for B1. 1914 Visited.clear(); 1915 if (!Successors[B1].empty()) 1916 llvm_unreachable("Found a stale successors list in a basic block."); 1917 for (BasicBlock *B2 : successors(B1)) 1918 if (Visited.insert(B2).second) 1919 Successors[B1].push_back(B2); 1920 } 1921 } 1922 1923 /// Returns the sorted CallTargetMap \p M by count in descending order. 1924 static SmallVector<InstrProfValueData, 2> GetSortedValueDataFromCallTargets( 1925 const SampleRecord::CallTargetMap & M) { 1926 SmallVector<InstrProfValueData, 2> R; 1927 for (const auto &I : SampleRecord::SortCallTargets(M)) { 1928 R.emplace_back(InstrProfValueData{FunctionSamples::getGUID(I.first), I.second}); 1929 } 1930 return R; 1931 } 1932 1933 /// Propagate weights into edges 1934 /// 1935 /// The following rules are applied to every block BB in the CFG: 1936 /// 1937 /// - If BB has a single predecessor/successor, then the weight 1938 /// of that edge is the weight of the block. 1939 /// 1940 /// - If all incoming or outgoing edges are known except one, and the 1941 /// weight of the block is already known, the weight of the unknown 1942 /// edge will be the weight of the block minus the sum of all the known 1943 /// edges. If the sum of all the known edges is larger than BB's weight, 1944 /// we set the unknown edge weight to zero. 1945 /// 1946 /// - If there is a self-referential edge, and the weight of the block is 1947 /// known, the weight for that edge is set to the weight of the block 1948 /// minus the weight of the other incoming edges to that block (if 1949 /// known). 1950 void SampleProfileLoader::propagateWeights(Function &F) { 1951 bool Changed = true; 1952 unsigned I = 0; 1953 1954 // If BB weight is larger than its corresponding loop's header BB weight, 1955 // use the BB weight to replace the loop header BB weight. 1956 for (auto &BI : F) { 1957 BasicBlock *BB = &BI; 1958 Loop *L = LI->getLoopFor(BB); 1959 if (!L) { 1960 continue; 1961 } 1962 BasicBlock *Header = L->getHeader(); 1963 if (Header && BlockWeights[BB] > BlockWeights[Header]) { 1964 BlockWeights[Header] = BlockWeights[BB]; 1965 } 1966 } 1967 1968 // Before propagation starts, build, for each block, a list of 1969 // unique predecessors and successors. This is necessary to handle 1970 // identical edges in multiway branches. Since we visit all blocks and all 1971 // edges of the CFG, it is cleaner to build these lists once at the start 1972 // of the pass. 1973 buildEdges(F); 1974 1975 // Propagate until we converge or we go past the iteration limit. 1976 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1977 Changed = propagateThroughEdges(F, false); 1978 } 1979 1980 // The first propagation propagates BB counts from annotated BBs to unknown 1981 // BBs. The 2nd propagation pass resets edges weights, and use all BB weights 1982 // to propagate edge weights. 1983 VisitedEdges.clear(); 1984 Changed = true; 1985 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1986 Changed = propagateThroughEdges(F, false); 1987 } 1988 1989 // The 3rd propagation pass allows adjust annotated BB weights that are 1990 // obviously wrong. 1991 Changed = true; 1992 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1993 Changed = propagateThroughEdges(F, true); 1994 } 1995 1996 // Generate MD_prof metadata for every branch instruction using the 1997 // edge weights computed during propagation. 1998 LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n"); 1999 LLVMContext &Ctx = F.getContext(); 2000 MDBuilder MDB(Ctx); 2001 for (auto &BI : F) { 2002 BasicBlock *BB = &BI; 2003 2004 if (BlockWeights[BB]) { 2005 for (auto &I : BB->getInstList()) { 2006 if (!isa<CallInst>(I) && !isa<InvokeInst>(I)) 2007 continue; 2008 if (!cast<CallBase>(I).getCalledFunction()) { 2009 const DebugLoc &DLoc = I.getDebugLoc(); 2010 if (!DLoc) 2011 continue; 2012 const DILocation *DIL = DLoc; 2013 const FunctionSamples *FS = findFunctionSamples(I); 2014 if (!FS) 2015 continue; 2016 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL); 2017 auto T = FS->findCallTargetMapAt(CallSite); 2018 if (!T || T.get().empty()) 2019 continue; 2020 // Prorate the callsite counts to reflect what is already done to the 2021 // callsite, such as ICP or calliste cloning. 2022 if (FunctionSamples::ProfileIsProbeBased) { 2023 if (Optional<PseudoProbe> Probe = extractProbe(I)) { 2024 if (Probe->Factor < 1) 2025 T = SampleRecord::adjustCallTargets(T.get(), Probe->Factor); 2026 } 2027 } 2028 SmallVector<InstrProfValueData, 2> SortedCallTargets = 2029 GetSortedValueDataFromCallTargets(T.get()); 2030 uint64_t Sum; 2031 findIndirectCallFunctionSamples(I, Sum); 2032 annotateValueSite(*I.getParent()->getParent()->getParent(), I, 2033 SortedCallTargets, Sum, IPVK_IndirectCallTarget, 2034 SortedCallTargets.size()); 2035 } else if (!isa<IntrinsicInst>(&I)) { 2036 I.setMetadata(LLVMContext::MD_prof, 2037 MDB.createBranchWeights( 2038 {static_cast<uint32_t>(BlockWeights[BB])})); 2039 } 2040 } 2041 } 2042 Instruction *TI = BB->getTerminator(); 2043 if (TI->getNumSuccessors() == 1) 2044 continue; 2045 if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI)) 2046 continue; 2047 2048 DebugLoc BranchLoc = TI->getDebugLoc(); 2049 LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line " 2050 << ((BranchLoc) ? Twine(BranchLoc.getLine()) 2051 : Twine("<UNKNOWN LOCATION>")) 2052 << ".\n"); 2053 SmallVector<uint32_t, 4> Weights; 2054 uint32_t MaxWeight = 0; 2055 Instruction *MaxDestInst; 2056 for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) { 2057 BasicBlock *Succ = TI->getSuccessor(I); 2058 Edge E = std::make_pair(BB, Succ); 2059 uint64_t Weight = EdgeWeights[E]; 2060 LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E)); 2061 // Use uint32_t saturated arithmetic to adjust the incoming weights, 2062 // if needed. Sample counts in profiles are 64-bit unsigned values, 2063 // but internally branch weights are expressed as 32-bit values. 2064 if (Weight > std::numeric_limits<uint32_t>::max()) { 2065 LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)"); 2066 Weight = std::numeric_limits<uint32_t>::max(); 2067 } 2068 // Weight is added by one to avoid propagation errors introduced by 2069 // 0 weights. 2070 Weights.push_back(static_cast<uint32_t>(Weight + 1)); 2071 if (Weight != 0) { 2072 if (Weight > MaxWeight) { 2073 MaxWeight = Weight; 2074 MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime(); 2075 } 2076 } 2077 } 2078 2079 uint64_t TempWeight; 2080 // Only set weights if there is at least one non-zero weight. 2081 // In any other case, let the analyzer set weights. 2082 // Do not set weights if the weights are present. In ThinLTO, the profile 2083 // annotation is done twice. If the first annotation already set the 2084 // weights, the second pass does not need to set it. 2085 if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) { 2086 LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n"); 2087 TI->setMetadata(LLVMContext::MD_prof, 2088 MDB.createBranchWeights(Weights)); 2089 ORE->emit([&]() { 2090 return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst) 2091 << "most popular destination for conditional branches at " 2092 << ore::NV("CondBranchesLoc", BranchLoc); 2093 }); 2094 } else { 2095 LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n"); 2096 } 2097 } 2098 } 2099 2100 /// Get the line number for the function header. 2101 /// 2102 /// This looks up function \p F in the current compilation unit and 2103 /// retrieves the line number where the function is defined. This is 2104 /// line 0 for all the samples read from the profile file. Every line 2105 /// number is relative to this line. 2106 /// 2107 /// \param F Function object to query. 2108 /// 2109 /// \returns the line number where \p F is defined. If it returns 0, 2110 /// it means that there is no debug information available for \p F. 2111 unsigned SampleProfileLoader::getFunctionLoc(Function &F) { 2112 if (DISubprogram *S = F.getSubprogram()) 2113 return S->getLine(); 2114 2115 if (NoWarnSampleUnused) 2116 return 0; 2117 2118 // If the start of \p F is missing, emit a diagnostic to inform the user 2119 // about the missed opportunity. 2120 F.getContext().diagnose(DiagnosticInfoSampleProfile( 2121 "No debug information found in function " + F.getName() + 2122 ": Function profile not used", 2123 DS_Warning)); 2124 return 0; 2125 } 2126 2127 void SampleProfileLoader::computeDominanceAndLoopInfo(Function &F) { 2128 DT.reset(new DominatorTree); 2129 DT->recalculate(F); 2130 2131 PDT.reset(new PostDominatorTree(F)); 2132 2133 LI.reset(new LoopInfo); 2134 LI->analyze(*DT); 2135 } 2136 2137 /// Generate branch weight metadata for all branches in \p F. 2138 /// 2139 /// Branch weights are computed out of instruction samples using a 2140 /// propagation heuristic. Propagation proceeds in 3 phases: 2141 /// 2142 /// 1- Assignment of block weights. All the basic blocks in the function 2143 /// are initial assigned the same weight as their most frequently 2144 /// executed instruction. 2145 /// 2146 /// 2- Creation of equivalence classes. Since samples may be missing from 2147 /// blocks, we can fill in the gaps by setting the weights of all the 2148 /// blocks in the same equivalence class to the same weight. To compute 2149 /// the concept of equivalence, we use dominance and loop information. 2150 /// Two blocks B1 and B2 are in the same equivalence class if B1 2151 /// dominates B2, B2 post-dominates B1 and both are in the same loop. 2152 /// 2153 /// 3- Propagation of block weights into edges. This uses a simple 2154 /// propagation heuristic. The following rules are applied to every 2155 /// block BB in the CFG: 2156 /// 2157 /// - If BB has a single predecessor/successor, then the weight 2158 /// of that edge is the weight of the block. 2159 /// 2160 /// - If all the edges are known except one, and the weight of the 2161 /// block is already known, the weight of the unknown edge will 2162 /// be the weight of the block minus the sum of all the known 2163 /// edges. If the sum of all the known edges is larger than BB's weight, 2164 /// we set the unknown edge weight to zero. 2165 /// 2166 /// - If there is a self-referential edge, and the weight of the block is 2167 /// known, the weight for that edge is set to the weight of the block 2168 /// minus the weight of the other incoming edges to that block (if 2169 /// known). 2170 /// 2171 /// Since this propagation is not guaranteed to finalize for every CFG, we 2172 /// only allow it to proceed for a limited number of iterations (controlled 2173 /// by -sample-profile-max-propagate-iterations). 2174 /// 2175 /// FIXME: Try to replace this propagation heuristic with a scheme 2176 /// that is guaranteed to finalize. A work-list approach similar to 2177 /// the standard value propagation algorithm used by SSA-CCP might 2178 /// work here. 2179 /// 2180 /// Once all the branch weights are computed, we emit the MD_prof 2181 /// metadata on BB using the computed values for each of its branches. 2182 /// 2183 /// \param F The function to query. 2184 /// 2185 /// \returns true if \p F was modified. Returns false, otherwise. 2186 bool SampleProfileLoader::emitAnnotations(Function &F) { 2187 bool Changed = false; 2188 2189 if (FunctionSamples::ProfileIsProbeBased) { 2190 if (!ProbeManager->profileIsValid(F, *Samples)) { 2191 LLVM_DEBUG( 2192 dbgs() << "Profile is invalid due to CFG mismatch for Function " 2193 << F.getName()); 2194 ++NumMismatchedProfile; 2195 return false; 2196 } 2197 ++NumMatchedProfile; 2198 } else { 2199 if (getFunctionLoc(F) == 0) 2200 return false; 2201 2202 LLVM_DEBUG(dbgs() << "Line number for the first instruction in " 2203 << F.getName() << ": " << getFunctionLoc(F) << "\n"); 2204 } 2205 2206 DenseSet<GlobalValue::GUID> InlinedGUIDs; 2207 if (ProfileIsCS && CallsitePrioritizedInline) 2208 Changed |= inlineHotFunctionsWithPriority(F, InlinedGUIDs); 2209 else 2210 Changed |= inlineHotFunctions(F, InlinedGUIDs); 2211 2212 // Compute basic block weights. 2213 Changed |= computeBlockWeights(F); 2214 2215 if (Changed) { 2216 // Add an entry count to the function using the samples gathered at the 2217 // function entry. 2218 // Sets the GUIDs that are inlined in the profiled binary. This is used 2219 // for ThinLink to make correct liveness analysis, and also make the IR 2220 // match the profiled binary before annotation. 2221 F.setEntryCount( 2222 ProfileCount(Samples->getHeadSamples() + 1, Function::PCT_Real), 2223 &InlinedGUIDs); 2224 2225 // Compute dominance and loop info needed for propagation. 2226 computeDominanceAndLoopInfo(F); 2227 2228 // Find equivalence classes. 2229 findEquivalenceClasses(F); 2230 2231 // Propagate weights to all edges. 2232 propagateWeights(F); 2233 } 2234 2235 // If coverage checking was requested, compute it now. 2236 if (SampleProfileRecordCoverage) { 2237 unsigned Used = CoverageTracker.countUsedRecords(Samples, PSI); 2238 unsigned Total = CoverageTracker.countBodyRecords(Samples, PSI); 2239 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total); 2240 if (Coverage < SampleProfileRecordCoverage) { 2241 F.getContext().diagnose(DiagnosticInfoSampleProfile( 2242 F.getSubprogram()->getFilename(), getFunctionLoc(F), 2243 Twine(Used) + " of " + Twine(Total) + " available profile records (" + 2244 Twine(Coverage) + "%) were applied", 2245 DS_Warning)); 2246 } 2247 } 2248 2249 if (SampleProfileSampleCoverage) { 2250 uint64_t Used = CoverageTracker.getTotalUsedSamples(); 2251 uint64_t Total = CoverageTracker.countBodySamples(Samples, PSI); 2252 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total); 2253 if (Coverage < SampleProfileSampleCoverage) { 2254 F.getContext().diagnose(DiagnosticInfoSampleProfile( 2255 F.getSubprogram()->getFilename(), getFunctionLoc(F), 2256 Twine(Used) + " of " + Twine(Total) + " available profile samples (" + 2257 Twine(Coverage) + "%) were applied", 2258 DS_Warning)); 2259 } 2260 } 2261 return Changed; 2262 } 2263 2264 char SampleProfileLoaderLegacyPass::ID = 0; 2265 2266 INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile", 2267 "Sample Profile loader", false, false) 2268 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2269 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 2270 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2271 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 2272 INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile", 2273 "Sample Profile loader", false, false) 2274 2275 std::vector<Function *> 2276 SampleProfileLoader::buildFunctionOrder(Module &M, CallGraph *CG) { 2277 std::vector<Function *> FunctionOrderList; 2278 FunctionOrderList.reserve(M.size()); 2279 2280 if (!ProfileTopDownLoad || CG == nullptr) { 2281 if (ProfileMergeInlinee) { 2282 // Disable ProfileMergeInlinee if profile is not loaded in top down order, 2283 // because the profile for a function may be used for the profile 2284 // annotation of its outline copy before the profile merging of its 2285 // non-inlined inline instances, and that is not the way how 2286 // ProfileMergeInlinee is supposed to work. 2287 ProfileMergeInlinee = false; 2288 } 2289 2290 for (Function &F : M) 2291 if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile")) 2292 FunctionOrderList.push_back(&F); 2293 return FunctionOrderList; 2294 } 2295 2296 assert(&CG->getModule() == &M); 2297 scc_iterator<CallGraph *> CGI = scc_begin(CG); 2298 while (!CGI.isAtEnd()) { 2299 for (CallGraphNode *node : *CGI) { 2300 auto F = node->getFunction(); 2301 if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile")) 2302 FunctionOrderList.push_back(F); 2303 } 2304 ++CGI; 2305 } 2306 2307 std::reverse(FunctionOrderList.begin(), FunctionOrderList.end()); 2308 return FunctionOrderList; 2309 } 2310 2311 bool SampleProfileLoader::doInitialization(Module &M, 2312 FunctionAnalysisManager *FAM) { 2313 auto &Ctx = M.getContext(); 2314 2315 auto ReaderOrErr = 2316 SampleProfileReader::create(Filename, Ctx, RemappingFilename); 2317 if (std::error_code EC = ReaderOrErr.getError()) { 2318 std::string Msg = "Could not open profile: " + EC.message(); 2319 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 2320 return false; 2321 } 2322 Reader = std::move(ReaderOrErr.get()); 2323 Reader->setSkipFlatProf(LTOPhase == ThinOrFullLTOPhase::ThinLTOPostLink); 2324 Reader->collectFuncsFrom(M); 2325 if (std::error_code EC = Reader->read()) { 2326 std::string Msg = "profile reading failed: " + EC.message(); 2327 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 2328 return false; 2329 } 2330 2331 PSL = Reader->getProfileSymbolList(); 2332 2333 // While profile-sample-accurate is on, ignore symbol list. 2334 ProfAccForSymsInList = 2335 ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate; 2336 if (ProfAccForSymsInList) { 2337 NamesInProfile.clear(); 2338 if (auto NameTable = Reader->getNameTable()) 2339 NamesInProfile.insert(NameTable->begin(), NameTable->end()); 2340 CoverageTracker.setProfAccForSymsInList(true); 2341 } 2342 2343 if (FAM && !ProfileInlineReplayFile.empty()) { 2344 ExternalInlineAdvisor = std::make_unique<ReplayInlineAdvisor>( 2345 M, *FAM, Ctx, /*OriginalAdvisor=*/nullptr, ProfileInlineReplayFile, 2346 /*EmitRemarks=*/false); 2347 if (!ExternalInlineAdvisor->areReplayRemarksLoaded()) 2348 ExternalInlineAdvisor.reset(); 2349 } 2350 2351 // Apply tweaks if context-sensitive profile is available. 2352 if (Reader->profileIsCS()) { 2353 ProfileIsCS = true; 2354 FunctionSamples::ProfileIsCS = true; 2355 2356 // Enable priority-base inliner and size inline by default for CSSPGO. 2357 if (!ProfileSizeInline.getNumOccurrences()) 2358 ProfileSizeInline = true; 2359 if (!CallsitePrioritizedInline.getNumOccurrences()) 2360 CallsitePrioritizedInline = true; 2361 2362 // Tracker for profiles under different context 2363 ContextTracker = 2364 std::make_unique<SampleContextTracker>(Reader->getProfiles()); 2365 } 2366 2367 // Load pseudo probe descriptors for probe-based function samples. 2368 if (Reader->profileIsProbeBased()) { 2369 ProbeManager = std::make_unique<PseudoProbeManager>(M); 2370 if (!ProbeManager->moduleIsProbed(M)) { 2371 const char *Msg = 2372 "Pseudo-probe-based profile requires SampleProfileProbePass"; 2373 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 2374 return false; 2375 } 2376 } 2377 2378 return true; 2379 } 2380 2381 ModulePass *llvm::createSampleProfileLoaderPass() { 2382 return new SampleProfileLoaderLegacyPass(); 2383 } 2384 2385 ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) { 2386 return new SampleProfileLoaderLegacyPass(Name); 2387 } 2388 2389 bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM, 2390 ProfileSummaryInfo *_PSI, CallGraph *CG) { 2391 GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap); 2392 2393 PSI = _PSI; 2394 if (M.getProfileSummary(/* IsCS */ false) == nullptr) { 2395 M.setProfileSummary(Reader->getSummary().getMD(M.getContext()), 2396 ProfileSummary::PSK_Sample); 2397 PSI->refresh(); 2398 } 2399 // Compute the total number of samples collected in this profile. 2400 for (const auto &I : Reader->getProfiles()) 2401 TotalCollectedSamples += I.second.getTotalSamples(); 2402 2403 auto Remapper = Reader->getRemapper(); 2404 // Populate the symbol map. 2405 for (const auto &N_F : M.getValueSymbolTable()) { 2406 StringRef OrigName = N_F.getKey(); 2407 Function *F = dyn_cast<Function>(N_F.getValue()); 2408 if (F == nullptr) 2409 continue; 2410 SymbolMap[OrigName] = F; 2411 auto pos = OrigName.find('.'); 2412 if (pos != StringRef::npos) { 2413 StringRef NewName = OrigName.substr(0, pos); 2414 auto r = SymbolMap.insert(std::make_pair(NewName, F)); 2415 // Failiing to insert means there is already an entry in SymbolMap, 2416 // thus there are multiple functions that are mapped to the same 2417 // stripped name. In this case of name conflicting, set the value 2418 // to nullptr to avoid confusion. 2419 if (!r.second) 2420 r.first->second = nullptr; 2421 OrigName = NewName; 2422 } 2423 // Insert the remapped names into SymbolMap. 2424 if (Remapper) { 2425 if (auto MapName = Remapper->lookUpNameInProfile(OrigName)) { 2426 if (*MapName == OrigName) 2427 continue; 2428 SymbolMap.insert(std::make_pair(*MapName, F)); 2429 } 2430 } 2431 } 2432 2433 bool retval = false; 2434 for (auto F : buildFunctionOrder(M, CG)) { 2435 assert(!F->isDeclaration()); 2436 clearFunctionData(); 2437 retval |= runOnFunction(*F, AM); 2438 } 2439 2440 // Account for cold calls not inlined.... 2441 if (!ProfileIsCS) 2442 for (const std::pair<Function *, NotInlinedProfileInfo> &pair : 2443 notInlinedCallInfo) 2444 updateProfileCallee(pair.first, pair.second.entryCount); 2445 2446 return retval; 2447 } 2448 2449 bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) { 2450 ACT = &getAnalysis<AssumptionCacheTracker>(); 2451 TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>(); 2452 TLIWP = &getAnalysis<TargetLibraryInfoWrapperPass>(); 2453 ProfileSummaryInfo *PSI = 2454 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2455 return SampleLoader.runOnModule(M, nullptr, PSI, nullptr); 2456 } 2457 2458 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) { 2459 DILocation2SampleMap.clear(); 2460 // By default the entry count is initialized to -1, which will be treated 2461 // conservatively by getEntryCount as the same as unknown (None). This is 2462 // to avoid newly added code to be treated as cold. If we have samples 2463 // this will be overwritten in emitAnnotations. 2464 uint64_t initialEntryCount = -1; 2465 2466 ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL; 2467 if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) { 2468 // initialize all the function entry counts to 0. It means all the 2469 // functions without profile will be regarded as cold. 2470 initialEntryCount = 0; 2471 // profile-sample-accurate is a user assertion which has a higher precedence 2472 // than symbol list. When profile-sample-accurate is on, ignore symbol list. 2473 ProfAccForSymsInList = false; 2474 } 2475 CoverageTracker.setProfAccForSymsInList(ProfAccForSymsInList); 2476 2477 // PSL -- profile symbol list include all the symbols in sampled binary. 2478 // If ProfileAccurateForSymsInList is enabled, PSL is used to treat 2479 // old functions without samples being cold, without having to worry 2480 // about new and hot functions being mistakenly treated as cold. 2481 if (ProfAccForSymsInList) { 2482 // Initialize the entry count to 0 for functions in the list. 2483 if (PSL->contains(F.getName())) 2484 initialEntryCount = 0; 2485 2486 // Function in the symbol list but without sample will be regarded as 2487 // cold. To minimize the potential negative performance impact it could 2488 // have, we want to be a little conservative here saying if a function 2489 // shows up in the profile, no matter as outline function, inline instance 2490 // or call targets, treat the function as not being cold. This will handle 2491 // the cases such as most callsites of a function are inlined in sampled 2492 // binary but not inlined in current build (because of source code drift, 2493 // imprecise debug information, or the callsites are all cold individually 2494 // but not cold accumulatively...), so the outline function showing up as 2495 // cold in sampled binary will actually not be cold after current build. 2496 StringRef CanonName = FunctionSamples::getCanonicalFnName(F); 2497 if (NamesInProfile.count(CanonName)) 2498 initialEntryCount = -1; 2499 } 2500 2501 // Initialize entry count when the function has no existing entry 2502 // count value. 2503 if (!F.getEntryCount().hasValue()) 2504 F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real)); 2505 std::unique_ptr<OptimizationRemarkEmitter> OwnedORE; 2506 if (AM) { 2507 auto &FAM = 2508 AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent()) 2509 .getManager(); 2510 ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 2511 } else { 2512 OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F); 2513 ORE = OwnedORE.get(); 2514 } 2515 2516 if (ProfileIsCS) 2517 Samples = ContextTracker->getBaseSamplesFor(F); 2518 else 2519 Samples = Reader->getSamplesFor(F); 2520 2521 if (Samples && !Samples->empty()) 2522 return emitAnnotations(F); 2523 return false; 2524 } 2525 2526 PreservedAnalyses SampleProfileLoaderPass::run(Module &M, 2527 ModuleAnalysisManager &AM) { 2528 FunctionAnalysisManager &FAM = 2529 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 2530 2531 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 2532 return FAM.getResult<AssumptionAnalysis>(F); 2533 }; 2534 auto GetTTI = [&](Function &F) -> TargetTransformInfo & { 2535 return FAM.getResult<TargetIRAnalysis>(F); 2536 }; 2537 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 2538 return FAM.getResult<TargetLibraryAnalysis>(F); 2539 }; 2540 2541 SampleProfileLoader SampleLoader( 2542 ProfileFileName.empty() ? SampleProfileFile : ProfileFileName, 2543 ProfileRemappingFileName.empty() ? SampleProfileRemappingFile 2544 : ProfileRemappingFileName, 2545 LTOPhase, GetAssumptionCache, GetTTI, GetTLI); 2546 2547 if (!SampleLoader.doInitialization(M, &FAM)) 2548 return PreservedAnalyses::all(); 2549 2550 ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M); 2551 CallGraph &CG = AM.getResult<CallGraphAnalysis>(M); 2552 if (!SampleLoader.runOnModule(M, &AM, PSI, &CG)) 2553 return PreservedAnalyses::all(); 2554 2555 return PreservedAnalyses::none(); 2556 } 2557