1 //===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the SampleProfileLoader transformation. This pass 10 // reads a profile file generated by a sampling profiler (e.g. Linux Perf - 11 // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the 12 // profile information in the given profile. 13 // 14 // This pass generates branch weight annotations on the IR: 15 // 16 // - prof: Represents branch weights. This annotation is added to branches 17 // to indicate the weights of each edge coming out of the branch. 18 // The weight of each edge is the weight of the target block for 19 // that edge. The weight of a block B is computed as the maximum 20 // number of samples found in B. 21 // 22 //===----------------------------------------------------------------------===// 23 24 #include "llvm/Transforms/IPO/SampleProfile.h" 25 #include "llvm/ADT/ArrayRef.h" 26 #include "llvm/ADT/DenseMap.h" 27 #include "llvm/ADT/DenseSet.h" 28 #include "llvm/ADT/None.h" 29 #include "llvm/ADT/PriorityQueue.h" 30 #include "llvm/ADT/SCCIterator.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringMap.h" 36 #include "llvm/ADT/StringRef.h" 37 #include "llvm/ADT/Twine.h" 38 #include "llvm/Analysis/AssumptionCache.h" 39 #include "llvm/Analysis/CallGraph.h" 40 #include "llvm/Analysis/CallGraphSCCPass.h" 41 #include "llvm/Analysis/InlineAdvisor.h" 42 #include "llvm/Analysis/InlineCost.h" 43 #include "llvm/Analysis/LoopInfo.h" 44 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 45 #include "llvm/Analysis/PostDominators.h" 46 #include "llvm/Analysis/ProfileSummaryInfo.h" 47 #include "llvm/Analysis/ReplayInlineAdvisor.h" 48 #include "llvm/Analysis/TargetLibraryInfo.h" 49 #include "llvm/Analysis/TargetTransformInfo.h" 50 #include "llvm/IR/BasicBlock.h" 51 #include "llvm/IR/CFG.h" 52 #include "llvm/IR/DebugInfoMetadata.h" 53 #include "llvm/IR/DebugLoc.h" 54 #include "llvm/IR/DiagnosticInfo.h" 55 #include "llvm/IR/Dominators.h" 56 #include "llvm/IR/Function.h" 57 #include "llvm/IR/GlobalValue.h" 58 #include "llvm/IR/InstrTypes.h" 59 #include "llvm/IR/Instruction.h" 60 #include "llvm/IR/Instructions.h" 61 #include "llvm/IR/IntrinsicInst.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/MDBuilder.h" 64 #include "llvm/IR/Module.h" 65 #include "llvm/IR/PassManager.h" 66 #include "llvm/IR/ValueSymbolTable.h" 67 #include "llvm/InitializePasses.h" 68 #include "llvm/Pass.h" 69 #include "llvm/ProfileData/InstrProf.h" 70 #include "llvm/ProfileData/SampleProf.h" 71 #include "llvm/ProfileData/SampleProfReader.h" 72 #include "llvm/Support/Casting.h" 73 #include "llvm/Support/CommandLine.h" 74 #include "llvm/Support/Debug.h" 75 #include "llvm/Support/ErrorHandling.h" 76 #include "llvm/Support/ErrorOr.h" 77 #include "llvm/Support/GenericDomTree.h" 78 #include "llvm/Support/raw_ostream.h" 79 #include "llvm/Transforms/IPO.h" 80 #include "llvm/Transforms/IPO/SampleContextTracker.h" 81 #include "llvm/Transforms/IPO/SampleProfileProbe.h" 82 #include "llvm/Transforms/Instrumentation.h" 83 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 84 #include "llvm/Transforms/Utils/Cloning.h" 85 #include <algorithm> 86 #include <cassert> 87 #include <cstdint> 88 #include <functional> 89 #include <limits> 90 #include <map> 91 #include <memory> 92 #include <queue> 93 #include <string> 94 #include <system_error> 95 #include <utility> 96 #include <vector> 97 98 using namespace llvm; 99 using namespace sampleprof; 100 using ProfileCount = Function::ProfileCount; 101 #define DEBUG_TYPE "sample-profile" 102 #define CSINLINE_DEBUG DEBUG_TYPE "-inline" 103 104 STATISTIC(NumCSInlined, 105 "Number of functions inlined with context sensitive profile"); 106 STATISTIC(NumCSNotInlined, 107 "Number of functions not inlined with context sensitive profile"); 108 STATISTIC(NumMismatchedProfile, 109 "Number of functions with CFG mismatched profile"); 110 STATISTIC(NumMatchedProfile, "Number of functions with CFG matched profile"); 111 STATISTIC(NumDuplicatedInlinesite, 112 "Number of inlined callsites with a partial distribution factor"); 113 114 STATISTIC(NumCSInlinedHitMinLimit, 115 "Number of functions with FDO inline stopped due to min size limit"); 116 STATISTIC(NumCSInlinedHitMaxLimit, 117 "Number of functions with FDO inline stopped due to max size limit"); 118 STATISTIC( 119 NumCSInlinedHitGrowthLimit, 120 "Number of functions with FDO inline stopped due to growth size limit"); 121 122 // Command line option to specify the file to read samples from. This is 123 // mainly used for debugging. 124 static cl::opt<std::string> SampleProfileFile( 125 "sample-profile-file", cl::init(""), cl::value_desc("filename"), 126 cl::desc("Profile file loaded by -sample-profile"), cl::Hidden); 127 128 // The named file contains a set of transformations that may have been applied 129 // to the symbol names between the program from which the sample data was 130 // collected and the current program's symbols. 131 static cl::opt<std::string> SampleProfileRemappingFile( 132 "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"), 133 cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden); 134 135 static cl::opt<unsigned> SampleProfileMaxPropagateIterations( 136 "sample-profile-max-propagate-iterations", cl::init(100), 137 cl::desc("Maximum number of iterations to go through when propagating " 138 "sample block/edge weights through the CFG.")); 139 140 static cl::opt<unsigned> SampleProfileRecordCoverage( 141 "sample-profile-check-record-coverage", cl::init(0), cl::value_desc("N"), 142 cl::desc("Emit a warning if less than N% of records in the input profile " 143 "are matched to the IR.")); 144 145 static cl::opt<unsigned> SampleProfileSampleCoverage( 146 "sample-profile-check-sample-coverage", cl::init(0), cl::value_desc("N"), 147 cl::desc("Emit a warning if less than N% of samples in the input profile " 148 "are matched to the IR.")); 149 150 static cl::opt<bool> NoWarnSampleUnused( 151 "no-warn-sample-unused", cl::init(false), cl::Hidden, 152 cl::desc("Use this option to turn off/on warnings about function with " 153 "samples but without debug information to use those samples. ")); 154 155 static cl::opt<bool> ProfileSampleAccurate( 156 "profile-sample-accurate", cl::Hidden, cl::init(false), 157 cl::desc("If the sample profile is accurate, we will mark all un-sampled " 158 "callsite and function as having 0 samples. Otherwise, treat " 159 "un-sampled callsites and functions conservatively as unknown. ")); 160 161 static cl::opt<bool> ProfileAccurateForSymsInList( 162 "profile-accurate-for-symsinlist", cl::Hidden, cl::ZeroOrMore, 163 cl::init(true), 164 cl::desc("For symbols in profile symbol list, regard their profiles to " 165 "be accurate. It may be overriden by profile-sample-accurate. ")); 166 167 static cl::opt<bool> ProfileMergeInlinee( 168 "sample-profile-merge-inlinee", cl::Hidden, cl::init(true), 169 cl::desc("Merge past inlinee's profile to outline version if sample " 170 "profile loader decided not to inline a call site. It will " 171 "only be enabled when top-down order of profile loading is " 172 "enabled. ")); 173 174 static cl::opt<bool> ProfileTopDownLoad( 175 "sample-profile-top-down-load", cl::Hidden, cl::init(true), 176 cl::desc("Do profile annotation and inlining for functions in top-down " 177 "order of call graph during sample profile loading. It only " 178 "works for new pass manager. ")); 179 180 static cl::opt<bool> ProfileSizeInline( 181 "sample-profile-inline-size", cl::Hidden, cl::init(false), 182 cl::desc("Inline cold call sites in profile loader if it's beneficial " 183 "for code size.")); 184 185 static cl::opt<int> ProfileInlineGrowthLimit( 186 "sample-profile-inline-growth-limit", cl::Hidden, cl::init(12), 187 cl::desc("The size growth ratio limit for proirity-based sample profile " 188 "loader inlining.")); 189 190 static cl::opt<int> ProfileInlineLimitMin( 191 "sample-profile-inline-limit-min", cl::Hidden, cl::init(100), 192 cl::desc("The lower bound of size growth limit for " 193 "proirity-based sample profile loader inlining.")); 194 195 static cl::opt<int> ProfileInlineLimitMax( 196 "sample-profile-inline-limit-max", cl::Hidden, cl::init(10000), 197 cl::desc("The upper bound of size growth limit for " 198 "proirity-based sample profile loader inlining.")); 199 200 static cl::opt<int> ProfileICPThreshold( 201 "sample-profile-icp-threshold", cl::Hidden, cl::init(5), 202 cl::desc( 203 "Relative hotness threshold for indirect " 204 "call promotion in proirity-based sample profile loader inlining.")); 205 206 static cl::opt<int> SampleHotCallSiteThreshold( 207 "sample-profile-hot-inline-threshold", cl::Hidden, cl::init(3000), 208 cl::desc("Hot callsite threshold for proirity-based sample profile loader " 209 "inlining.")); 210 211 static cl::opt<bool> CallsitePrioritizedInline( 212 "sample-profile-prioritized-inline", cl::Hidden, cl::ZeroOrMore, 213 cl::init(false), 214 cl::desc("Use call site prioritized inlining for sample profile loader." 215 "Currently only CSSPGO is supported.")); 216 217 static cl::opt<int> SampleColdCallSiteThreshold( 218 "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45), 219 cl::desc("Threshold for inlining cold callsites")); 220 221 static cl::opt<std::string> ProfileInlineReplayFile( 222 "sample-profile-inline-replay", cl::init(""), cl::value_desc("filename"), 223 cl::desc( 224 "Optimization remarks file containing inline remarks to be replayed " 225 "by inlining from sample profile loader."), 226 cl::Hidden); 227 228 namespace { 229 230 using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>; 231 using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>; 232 using Edge = std::pair<const BasicBlock *, const BasicBlock *>; 233 using EdgeWeightMap = DenseMap<Edge, uint64_t>; 234 using BlockEdgeMap = 235 DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>; 236 237 class SampleCoverageTracker { 238 public: 239 bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset, 240 uint32_t Discriminator, uint64_t Samples); 241 unsigned computeCoverage(unsigned Used, unsigned Total) const; 242 unsigned countUsedRecords(const FunctionSamples *FS, 243 ProfileSummaryInfo *PSI) const; 244 unsigned countBodyRecords(const FunctionSamples *FS, 245 ProfileSummaryInfo *PSI) const; 246 uint64_t getTotalUsedSamples() const { return TotalUsedSamples; } 247 uint64_t countBodySamples(const FunctionSamples *FS, 248 ProfileSummaryInfo *PSI) const; 249 250 void clear() { 251 SampleCoverage.clear(); 252 TotalUsedSamples = 0; 253 } 254 inline void setProfAccForSymsInList(bool V) { ProfAccForSymsInList = V; } 255 256 private: 257 using BodySampleCoverageMap = std::map<LineLocation, unsigned>; 258 using FunctionSamplesCoverageMap = 259 DenseMap<const FunctionSamples *, BodySampleCoverageMap>; 260 261 /// Coverage map for sampling records. 262 /// 263 /// This map keeps a record of sampling records that have been matched to 264 /// an IR instruction. This is used to detect some form of staleness in 265 /// profiles (see flag -sample-profile-check-coverage). 266 /// 267 /// Each entry in the map corresponds to a FunctionSamples instance. This is 268 /// another map that counts how many times the sample record at the 269 /// given location has been used. 270 FunctionSamplesCoverageMap SampleCoverage; 271 272 /// Number of samples used from the profile. 273 /// 274 /// When a sampling record is used for the first time, the samples from 275 /// that record are added to this accumulator. Coverage is later computed 276 /// based on the total number of samples available in this function and 277 /// its callsites. 278 /// 279 /// Note that this accumulator tracks samples used from a single function 280 /// and all the inlined callsites. Strictly, we should have a map of counters 281 /// keyed by FunctionSamples pointers, but these stats are cleared after 282 /// every function, so we just need to keep a single counter. 283 uint64_t TotalUsedSamples = 0; 284 285 // For symbol in profile symbol list, whether to regard their profiles 286 // to be accurate. This is passed from the SampleLoader instance. 287 bool ProfAccForSymsInList = false; 288 }; 289 290 class GUIDToFuncNameMapper { 291 public: 292 GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader, 293 DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap) 294 : CurrentReader(Reader), CurrentModule(M), 295 CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) { 296 if (!CurrentReader.useMD5()) 297 return; 298 299 for (const auto &F : CurrentModule) { 300 StringRef OrigName = F.getName(); 301 CurrentGUIDToFuncNameMap.insert( 302 {Function::getGUID(OrigName), OrigName}); 303 304 // Local to global var promotion used by optimization like thinlto 305 // will rename the var and add suffix like ".llvm.xxx" to the 306 // original local name. In sample profile, the suffixes of function 307 // names are all stripped. Since it is possible that the mapper is 308 // built in post-thin-link phase and var promotion has been done, 309 // we need to add the substring of function name without the suffix 310 // into the GUIDToFuncNameMap. 311 StringRef CanonName = FunctionSamples::getCanonicalFnName(F); 312 if (CanonName != OrigName) 313 CurrentGUIDToFuncNameMap.insert( 314 {Function::getGUID(CanonName), CanonName}); 315 } 316 317 // Update GUIDToFuncNameMap for each function including inlinees. 318 SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap); 319 } 320 321 ~GUIDToFuncNameMapper() { 322 if (!CurrentReader.useMD5()) 323 return; 324 325 CurrentGUIDToFuncNameMap.clear(); 326 327 // Reset GUIDToFuncNameMap for of each function as they're no 328 // longer valid at this point. 329 SetGUIDToFuncNameMapForAll(nullptr); 330 } 331 332 private: 333 void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) { 334 std::queue<FunctionSamples *> FSToUpdate; 335 for (auto &IFS : CurrentReader.getProfiles()) { 336 FSToUpdate.push(&IFS.second); 337 } 338 339 while (!FSToUpdate.empty()) { 340 FunctionSamples *FS = FSToUpdate.front(); 341 FSToUpdate.pop(); 342 FS->GUIDToFuncNameMap = Map; 343 for (const auto &ICS : FS->getCallsiteSamples()) { 344 const FunctionSamplesMap &FSMap = ICS.second; 345 for (auto &IFS : FSMap) { 346 FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second); 347 FSToUpdate.push(&FS); 348 } 349 } 350 } 351 } 352 353 SampleProfileReader &CurrentReader; 354 Module &CurrentModule; 355 DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap; 356 }; 357 358 // Inline candidate used by iterative callsite prioritized inliner 359 struct InlineCandidate { 360 CallBase *CallInstr; 361 const FunctionSamples *CalleeSamples; 362 // Prorated callsite count, which will be used to guide inlining. For example, 363 // if a callsite is duplicated in LTO prelink, then in LTO postlink the two 364 // copies will get their own distribution factors and their prorated counts 365 // will be used to decide if they should be inlined independently. 366 uint64_t CallsiteCount; 367 // Call site distribution factor to prorate the profile samples for a 368 // duplicated callsite. Default value is 1.0. 369 float CallsiteDistribution; 370 }; 371 372 // Inline candidate comparer using call site weight 373 struct CandidateComparer { 374 bool operator()(const InlineCandidate &LHS, const InlineCandidate &RHS) { 375 if (LHS.CallsiteCount != RHS.CallsiteCount) 376 return LHS.CallsiteCount < RHS.CallsiteCount; 377 378 // Tie breaker using GUID so we have stable/deterministic inlining order 379 assert(LHS.CalleeSamples && RHS.CalleeSamples && 380 "Expect non-null FunctionSamples"); 381 return LHS.CalleeSamples->getGUID(LHS.CalleeSamples->getName()) < 382 RHS.CalleeSamples->getGUID(RHS.CalleeSamples->getName()); 383 } 384 }; 385 386 using CandidateQueue = 387 PriorityQueue<InlineCandidate, std::vector<InlineCandidate>, 388 CandidateComparer>; 389 390 class SampleProfileLoaderBaseImpl { 391 public: 392 SampleProfileLoaderBaseImpl(std::string Name) : Filename(Name) {} 393 void dump() { Reader->dump(); } 394 395 protected: 396 friend class SampleCoverageTracker; 397 398 ~SampleProfileLoaderBaseImpl() = default; 399 400 unsigned getFunctionLoc(Function &F); 401 virtual ErrorOr<uint64_t> getInstWeight(const Instruction &Inst); 402 ErrorOr<uint64_t> getInstWeightImpl(const Instruction &Inst); 403 ErrorOr<uint64_t> getBlockWeight(const BasicBlock *BB); 404 mutable DenseMap<const DILocation *, const FunctionSamples *> 405 DILocation2SampleMap; 406 virtual const FunctionSamples * 407 findFunctionSamples(const Instruction &I) const; 408 void printEdgeWeight(raw_ostream &OS, Edge E); 409 void printBlockWeight(raw_ostream &OS, const BasicBlock *BB) const; 410 void printBlockEquivalence(raw_ostream &OS, const BasicBlock *BB); 411 bool computeBlockWeights(Function &F); 412 void findEquivalenceClasses(Function &F); 413 template <bool IsPostDom> 414 void findEquivalencesFor(BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants, 415 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree); 416 417 void propagateWeights(Function &F); 418 uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge); 419 void buildEdges(Function &F); 420 bool propagateThroughEdges(Function &F, bool UpdateBlockCount); 421 void clearFunctionData(); 422 void computeDominanceAndLoopInfo(Function &F); 423 bool 424 computeAndPropagateWeights(Function &F, 425 const DenseSet<GlobalValue::GUID> &InlinedGUIDs); 426 void emitCoverageRemarks(Function &F); 427 428 /// Map basic blocks to their computed weights. 429 /// 430 /// The weight of a basic block is defined to be the maximum 431 /// of all the instruction weights in that block. 432 BlockWeightMap BlockWeights; 433 434 /// Map edges to their computed weights. 435 /// 436 /// Edge weights are computed by propagating basic block weights in 437 /// SampleProfile::propagateWeights. 438 EdgeWeightMap EdgeWeights; 439 440 /// Set of visited blocks during propagation. 441 SmallPtrSet<const BasicBlock *, 32> VisitedBlocks; 442 443 /// Set of visited edges during propagation. 444 SmallSet<Edge, 32> VisitedEdges; 445 446 /// Equivalence classes for block weights. 447 /// 448 /// Two blocks BB1 and BB2 are in the same equivalence class if they 449 /// dominate and post-dominate each other, and they are in the same loop 450 /// nest. When this happens, the two blocks are guaranteed to execute 451 /// the same number of times. 452 EquivalenceClassMap EquivalenceClass; 453 454 /// Dominance, post-dominance and loop information. 455 std::unique_ptr<DominatorTree> DT; 456 std::unique_ptr<PostDominatorTree> PDT; 457 std::unique_ptr<LoopInfo> LI; 458 459 /// Predecessors for each basic block in the CFG. 460 BlockEdgeMap Predecessors; 461 462 /// Successors for each basic block in the CFG. 463 BlockEdgeMap Successors; 464 465 SampleCoverageTracker CoverageTracker; 466 467 /// Profile reader object. 468 std::unique_ptr<SampleProfileReader> Reader; 469 470 /// Samples collected for the body of this function. 471 FunctionSamples *Samples = nullptr; 472 473 /// Name of the profile file to load. 474 std::string Filename; 475 476 /// Profile Summary Info computed from sample profile. 477 ProfileSummaryInfo *PSI = nullptr; 478 479 /// Optimization Remark Emitter used to emit diagnostic remarks. 480 OptimizationRemarkEmitter *ORE = nullptr; 481 }; 482 483 /// Sample profile pass. 484 /// 485 /// This pass reads profile data from the file specified by 486 /// -sample-profile-file and annotates every affected function with the 487 /// profile information found in that file. 488 class SampleProfileLoader final : public SampleProfileLoaderBaseImpl { 489 public: 490 SampleProfileLoader( 491 StringRef Name, StringRef RemapName, ThinOrFullLTOPhase LTOPhase, 492 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 493 std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo, 494 std::function<const TargetLibraryInfo &(Function &)> GetTLI) 495 : SampleProfileLoaderBaseImpl(std::string(Name)), 496 GetAC(std::move(GetAssumptionCache)), 497 GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)), 498 RemappingFilename(std::string(RemapName)), LTOPhase(LTOPhase) {} 499 500 bool doInitialization(Module &M, FunctionAnalysisManager *FAM = nullptr); 501 bool runOnModule(Module &M, ModuleAnalysisManager *AM, 502 ProfileSummaryInfo *_PSI, CallGraph *CG); 503 504 protected: 505 bool runOnFunction(Function &F, ModuleAnalysisManager *AM); 506 bool emitAnnotations(Function &F); 507 ErrorOr<uint64_t> getInstWeight(const Instruction &I) override; 508 ErrorOr<uint64_t> getProbeWeight(const Instruction &I); 509 const FunctionSamples *findCalleeFunctionSamples(const CallBase &I) const; 510 const FunctionSamples * 511 findFunctionSamples(const Instruction &I) const override; 512 std::vector<const FunctionSamples *> 513 findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const; 514 // Attempt to promote indirect call and also inline the promoted call 515 bool tryPromoteAndInlineCandidate( 516 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin, 517 uint64_t &Sum, DenseSet<Instruction *> &PromotedInsns, 518 SmallVector<CallBase *, 8> *InlinedCallSites = nullptr); 519 bool inlineHotFunctions(Function &F, 520 DenseSet<GlobalValue::GUID> &InlinedGUIDs); 521 InlineCost shouldInlineCandidate(InlineCandidate &Candidate); 522 bool getInlineCandidate(InlineCandidate *NewCandidate, CallBase *CB); 523 bool 524 tryInlineCandidate(InlineCandidate &Candidate, 525 SmallVector<CallBase *, 8> *InlinedCallSites = nullptr); 526 bool 527 inlineHotFunctionsWithPriority(Function &F, 528 DenseSet<GlobalValue::GUID> &InlinedGUIDs); 529 // Inline cold/small functions in addition to hot ones 530 bool shouldInlineColdCallee(CallBase &CallInst); 531 void emitOptimizationRemarksForInlineCandidates( 532 const SmallVectorImpl<CallBase *> &Candidates, const Function &F, 533 bool Hot); 534 std::vector<Function *> buildFunctionOrder(Module &M, CallGraph *CG); 535 void generateMDProfMetadata(Function &F); 536 537 /// Map from function name to Function *. Used to find the function from 538 /// the function name. If the function name contains suffix, additional 539 /// entry is added to map from the stripped name to the function if there 540 /// is one-to-one mapping. 541 StringMap<Function *> SymbolMap; 542 543 std::function<AssumptionCache &(Function &)> GetAC; 544 std::function<TargetTransformInfo &(Function &)> GetTTI; 545 std::function<const TargetLibraryInfo &(Function &)> GetTLI; 546 547 /// Profile tracker for different context. 548 std::unique_ptr<SampleContextTracker> ContextTracker; 549 550 /// Name of the profile remapping file to load. 551 std::string RemappingFilename; 552 553 /// Flag indicating whether the profile input loaded successfully. 554 bool ProfileIsValid = false; 555 556 /// Flag indicating whether input profile is context-sensitive 557 bool ProfileIsCS = false; 558 559 /// Flag indicating which LTO/ThinLTO phase the pass is invoked in. 560 /// 561 /// We need to know the LTO phase because for example in ThinLTOPrelink 562 /// phase, in annotation, we should not promote indirect calls. Instead, 563 /// we will mark GUIDs that needs to be annotated to the function. 564 ThinOrFullLTOPhase LTOPhase; 565 566 /// Profle Symbol list tells whether a function name appears in the binary 567 /// used to generate the current profile. 568 std::unique_ptr<ProfileSymbolList> PSL; 569 570 /// Total number of samples collected in this profile. 571 /// 572 /// This is the sum of all the samples collected in all the functions executed 573 /// at runtime. 574 uint64_t TotalCollectedSamples = 0; 575 576 // Information recorded when we declined to inline a call site 577 // because we have determined it is too cold is accumulated for 578 // each callee function. Initially this is just the entry count. 579 struct NotInlinedProfileInfo { 580 uint64_t entryCount; 581 }; 582 DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo; 583 584 // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for 585 // all the function symbols defined or declared in current module. 586 DenseMap<uint64_t, StringRef> GUIDToFuncNameMap; 587 588 // All the Names used in FunctionSamples including outline function 589 // names, inline instance names and call target names. 590 StringSet<> NamesInProfile; 591 592 // For symbol in profile symbol list, whether to regard their profiles 593 // to be accurate. It is mainly decided by existance of profile symbol 594 // list and -profile-accurate-for-symsinlist flag, but it can be 595 // overriden by -profile-sample-accurate or profile-sample-accurate 596 // attribute. 597 bool ProfAccForSymsInList; 598 599 // External inline advisor used to replay inline decision from remarks. 600 std::unique_ptr<ReplayInlineAdvisor> ExternalInlineAdvisor; 601 602 // A pseudo probe helper to correlate the imported sample counts. 603 std::unique_ptr<PseudoProbeManager> ProbeManager; 604 }; 605 606 class SampleProfileLoaderLegacyPass : public ModulePass { 607 public: 608 // Class identification, replacement for typeinfo 609 static char ID; 610 611 SampleProfileLoaderLegacyPass( 612 StringRef Name = SampleProfileFile, 613 ThinOrFullLTOPhase LTOPhase = ThinOrFullLTOPhase::None) 614 : ModulePass(ID), SampleLoader( 615 Name, SampleProfileRemappingFile, LTOPhase, 616 [&](Function &F) -> AssumptionCache & { 617 return ACT->getAssumptionCache(F); 618 }, 619 [&](Function &F) -> TargetTransformInfo & { 620 return TTIWP->getTTI(F); 621 }, 622 [&](Function &F) -> TargetLibraryInfo & { 623 return TLIWP->getTLI(F); 624 }) { 625 initializeSampleProfileLoaderLegacyPassPass( 626 *PassRegistry::getPassRegistry()); 627 } 628 629 void dump() { SampleLoader.dump(); } 630 631 bool doInitialization(Module &M) override { 632 return SampleLoader.doInitialization(M); 633 } 634 635 StringRef getPassName() const override { return "Sample profile pass"; } 636 bool runOnModule(Module &M) override; 637 638 void getAnalysisUsage(AnalysisUsage &AU) const override { 639 AU.addRequired<AssumptionCacheTracker>(); 640 AU.addRequired<TargetTransformInfoWrapperPass>(); 641 AU.addRequired<TargetLibraryInfoWrapperPass>(); 642 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 643 } 644 645 private: 646 SampleProfileLoader SampleLoader; 647 AssumptionCacheTracker *ACT = nullptr; 648 TargetTransformInfoWrapperPass *TTIWP = nullptr; 649 TargetLibraryInfoWrapperPass *TLIWP = nullptr; 650 }; 651 652 } // end anonymous namespace 653 654 /// Return true if the given callsite is hot wrt to hot cutoff threshold. 655 /// 656 /// Functions that were inlined in the original binary will be represented 657 /// in the inline stack in the sample profile. If the profile shows that 658 /// the original inline decision was "good" (i.e., the callsite is executed 659 /// frequently), then we will recreate the inline decision and apply the 660 /// profile from the inlined callsite. 661 /// 662 /// To decide whether an inlined callsite is hot, we compare the callsite 663 /// sample count with the hot cutoff computed by ProfileSummaryInfo, it is 664 /// regarded as hot if the count is above the cutoff value. 665 /// 666 /// When ProfileAccurateForSymsInList is enabled and profile symbol list 667 /// is present, functions in the profile symbol list but without profile will 668 /// be regarded as cold and much less inlining will happen in CGSCC inlining 669 /// pass, so we tend to lower the hot criteria here to allow more early 670 /// inlining to happen for warm callsites and it is helpful for performance. 671 static bool callsiteIsHot(const FunctionSamples *CallsiteFS, 672 ProfileSummaryInfo *PSI, bool ProfAccForSymsInList) { 673 if (!CallsiteFS) 674 return false; // The callsite was not inlined in the original binary. 675 676 assert(PSI && "PSI is expected to be non null"); 677 uint64_t CallsiteTotalSamples = CallsiteFS->getTotalSamples(); 678 if (ProfAccForSymsInList) 679 return !PSI->isColdCount(CallsiteTotalSamples); 680 else 681 return PSI->isHotCount(CallsiteTotalSamples); 682 } 683 684 /// Mark as used the sample record for the given function samples at 685 /// (LineOffset, Discriminator). 686 /// 687 /// \returns true if this is the first time we mark the given record. 688 bool SampleCoverageTracker::markSamplesUsed(const FunctionSamples *FS, 689 uint32_t LineOffset, 690 uint32_t Discriminator, 691 uint64_t Samples) { 692 LineLocation Loc(LineOffset, Discriminator); 693 unsigned &Count = SampleCoverage[FS][Loc]; 694 bool FirstTime = (++Count == 1); 695 if (FirstTime) 696 TotalUsedSamples += Samples; 697 return FirstTime; 698 } 699 700 /// Return the number of sample records that were applied from this profile. 701 /// 702 /// This count does not include records from cold inlined callsites. 703 unsigned 704 SampleCoverageTracker::countUsedRecords(const FunctionSamples *FS, 705 ProfileSummaryInfo *PSI) const { 706 auto I = SampleCoverage.find(FS); 707 708 // The size of the coverage map for FS represents the number of records 709 // that were marked used at least once. 710 unsigned Count = (I != SampleCoverage.end()) ? I->second.size() : 0; 711 712 // If there are inlined callsites in this function, count the samples found 713 // in the respective bodies. However, do not bother counting callees with 0 714 // total samples, these are callees that were never invoked at runtime. 715 for (const auto &I : FS->getCallsiteSamples()) 716 for (const auto &J : I.second) { 717 const FunctionSamples *CalleeSamples = &J.second; 718 if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList)) 719 Count += countUsedRecords(CalleeSamples, PSI); 720 } 721 722 return Count; 723 } 724 725 /// Return the number of sample records in the body of this profile. 726 /// 727 /// This count does not include records from cold inlined callsites. 728 unsigned 729 SampleCoverageTracker::countBodyRecords(const FunctionSamples *FS, 730 ProfileSummaryInfo *PSI) const { 731 unsigned Count = FS->getBodySamples().size(); 732 733 // Only count records in hot callsites. 734 for (const auto &I : FS->getCallsiteSamples()) 735 for (const auto &J : I.second) { 736 const FunctionSamples *CalleeSamples = &J.second; 737 if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList)) 738 Count += countBodyRecords(CalleeSamples, PSI); 739 } 740 741 return Count; 742 } 743 744 /// Return the number of samples collected in the body of this profile. 745 /// 746 /// This count does not include samples from cold inlined callsites. 747 uint64_t 748 SampleCoverageTracker::countBodySamples(const FunctionSamples *FS, 749 ProfileSummaryInfo *PSI) const { 750 uint64_t Total = 0; 751 for (const auto &I : FS->getBodySamples()) 752 Total += I.second.getSamples(); 753 754 // Only count samples in hot callsites. 755 for (const auto &I : FS->getCallsiteSamples()) 756 for (const auto &J : I.second) { 757 const FunctionSamples *CalleeSamples = &J.second; 758 if (callsiteIsHot(CalleeSamples, PSI, ProfAccForSymsInList)) 759 Total += countBodySamples(CalleeSamples, PSI); 760 } 761 762 return Total; 763 } 764 765 /// Return the fraction of sample records used in this profile. 766 /// 767 /// The returned value is an unsigned integer in the range 0-100 indicating 768 /// the percentage of sample records that were used while applying this 769 /// profile to the associated function. 770 unsigned SampleCoverageTracker::computeCoverage(unsigned Used, 771 unsigned Total) const { 772 assert(Used <= Total && 773 "number of used records cannot exceed the total number of records"); 774 return Total > 0 ? Used * 100 / Total : 100; 775 } 776 777 /// Clear all the per-function data used to load samples and propagate weights. 778 void SampleProfileLoaderBaseImpl::clearFunctionData() { 779 BlockWeights.clear(); 780 EdgeWeights.clear(); 781 VisitedBlocks.clear(); 782 VisitedEdges.clear(); 783 EquivalenceClass.clear(); 784 DT = nullptr; 785 PDT = nullptr; 786 LI = nullptr; 787 Predecessors.clear(); 788 Successors.clear(); 789 CoverageTracker.clear(); 790 } 791 792 #ifndef NDEBUG 793 /// Print the weight of edge \p E on stream \p OS. 794 /// 795 /// \param OS Stream to emit the output to. 796 /// \param E Edge to print. 797 void SampleProfileLoaderBaseImpl::printEdgeWeight(raw_ostream &OS, Edge E) { 798 OS << "weight[" << E.first->getName() << "->" << E.second->getName() 799 << "]: " << EdgeWeights[E] << "\n"; 800 } 801 802 /// Print the equivalence class of block \p BB on stream \p OS. 803 /// 804 /// \param OS Stream to emit the output to. 805 /// \param BB Block to print. 806 void SampleProfileLoaderBaseImpl::printBlockEquivalence(raw_ostream &OS, 807 const BasicBlock *BB) { 808 const BasicBlock *Equiv = EquivalenceClass[BB]; 809 OS << "equivalence[" << BB->getName() 810 << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n"; 811 } 812 813 /// Print the weight of block \p BB on stream \p OS. 814 /// 815 /// \param OS Stream to emit the output to. 816 /// \param BB Block to print. 817 void SampleProfileLoaderBaseImpl::printBlockWeight(raw_ostream &OS, 818 const BasicBlock *BB) const { 819 const auto &I = BlockWeights.find(BB); 820 uint64_t W = (I == BlockWeights.end() ? 0 : I->second); 821 OS << "weight[" << BB->getName() << "]: " << W << "\n"; 822 } 823 #endif 824 825 /// Get the weight for an instruction. 826 /// 827 /// The "weight" of an instruction \p Inst is the number of samples 828 /// collected on that instruction at runtime. To retrieve it, we 829 /// need to compute the line number of \p Inst relative to the start of its 830 /// function. We use HeaderLineno to compute the offset. We then 831 /// look up the samples collected for \p Inst using BodySamples. 832 /// 833 /// \param Inst Instruction to query. 834 /// 835 /// \returns the weight of \p Inst. 836 ErrorOr<uint64_t> 837 SampleProfileLoaderBaseImpl::getInstWeight(const Instruction &Inst) { 838 return getInstWeightImpl(Inst); 839 } 840 841 ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) { 842 if (FunctionSamples::ProfileIsProbeBased) 843 return getProbeWeight(Inst); 844 845 const DebugLoc &DLoc = Inst.getDebugLoc(); 846 if (!DLoc) 847 return std::error_code(); 848 849 // Ignore all intrinsics, phinodes and branch instructions. 850 // Branch and phinodes instruction usually contains debug info from sources 851 // outside of the residing basic block, thus we ignore them during annotation. 852 if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst)) 853 return std::error_code(); 854 855 // If a direct call/invoke instruction is inlined in profile 856 // (findCalleeFunctionSamples returns non-empty result), but not inlined here, 857 // it means that the inlined callsite has no sample, thus the call 858 // instruction should have 0 count. 859 if (!ProfileIsCS) 860 if (const auto *CB = dyn_cast<CallBase>(&Inst)) 861 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB)) 862 return 0; 863 864 return getInstWeightImpl(Inst); 865 } 866 867 ErrorOr<uint64_t> 868 SampleProfileLoaderBaseImpl::getInstWeightImpl(const Instruction &Inst) { 869 const FunctionSamples *FS = findFunctionSamples(Inst); 870 if (!FS) 871 return std::error_code(); 872 873 const DebugLoc &DLoc = Inst.getDebugLoc(); 874 if (!DLoc) 875 return std::error_code(); 876 877 const DILocation *DIL = DLoc; 878 uint32_t LineOffset = FunctionSamples::getOffset(DIL); 879 uint32_t Discriminator = DIL->getBaseDiscriminator(); 880 ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator); 881 if (R) { 882 bool FirstMark = 883 CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get()); 884 if (FirstMark) { 885 ORE->emit([&]() { 886 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst); 887 Remark << "Applied " << ore::NV("NumSamples", *R); 888 Remark << " samples from profile (offset: "; 889 Remark << ore::NV("LineOffset", LineOffset); 890 if (Discriminator) { 891 Remark << "."; 892 Remark << ore::NV("Discriminator", Discriminator); 893 } 894 Remark << ")"; 895 return Remark; 896 }); 897 } 898 LLVM_DEBUG(dbgs() << " " << DLoc.getLine() << "." 899 << DIL->getBaseDiscriminator() << ":" << Inst 900 << " (line offset: " << LineOffset << "." 901 << DIL->getBaseDiscriminator() << " - weight: " << R.get() 902 << ")\n"); 903 } 904 return R; 905 } 906 907 ErrorOr<uint64_t> SampleProfileLoader::getProbeWeight(const Instruction &Inst) { 908 assert(FunctionSamples::ProfileIsProbeBased && 909 "Profile is not pseudo probe based"); 910 Optional<PseudoProbe> Probe = extractProbe(Inst); 911 if (!Probe) 912 return std::error_code(); 913 914 const FunctionSamples *FS = findFunctionSamples(Inst); 915 if (!FS) 916 return std::error_code(); 917 918 // If a direct call/invoke instruction is inlined in profile 919 // (findCalleeFunctionSamples returns non-empty result), but not inlined here, 920 // it means that the inlined callsite has no sample, thus the call 921 // instruction should have 0 count. 922 if (const auto *CB = dyn_cast<CallBase>(&Inst)) 923 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB)) 924 return 0; 925 926 const ErrorOr<uint64_t> &R = FS->findSamplesAt(Probe->Id, 0); 927 if (R) { 928 uint64_t Samples = R.get() * Probe->Factor; 929 bool FirstMark = CoverageTracker.markSamplesUsed(FS, Probe->Id, 0, Samples); 930 if (FirstMark) { 931 ORE->emit([&]() { 932 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst); 933 Remark << "Applied " << ore::NV("NumSamples", Samples); 934 Remark << " samples from profile (ProbeId="; 935 Remark << ore::NV("ProbeId", Probe->Id); 936 Remark << ", Factor="; 937 Remark << ore::NV("Factor", Probe->Factor); 938 Remark << ", OriginalSamples="; 939 Remark << ore::NV("OriginalSamples", R.get()); 940 Remark << ")"; 941 return Remark; 942 }); 943 } 944 LLVM_DEBUG(dbgs() << " " << Probe->Id << ":" << Inst 945 << " - weight: " << R.get() << " - factor: " 946 << format("%0.2f", Probe->Factor) << ")\n"); 947 return Samples; 948 } 949 return R; 950 } 951 952 /// Compute the weight of a basic block. 953 /// 954 /// The weight of basic block \p BB is the maximum weight of all the 955 /// instructions in BB. 956 /// 957 /// \param BB The basic block to query. 958 /// 959 /// \returns the weight for \p BB. 960 ErrorOr<uint64_t> 961 SampleProfileLoaderBaseImpl::getBlockWeight(const BasicBlock *BB) { 962 uint64_t Max = 0; 963 bool HasWeight = false; 964 for (auto &I : BB->getInstList()) { 965 const ErrorOr<uint64_t> &R = getInstWeight(I); 966 if (R) { 967 Max = std::max(Max, R.get()); 968 HasWeight = true; 969 } 970 } 971 return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code(); 972 } 973 974 /// Compute and store the weights of every basic block. 975 /// 976 /// This populates the BlockWeights map by computing 977 /// the weights of every basic block in the CFG. 978 /// 979 /// \param F The function to query. 980 bool SampleProfileLoaderBaseImpl::computeBlockWeights(Function &F) { 981 bool Changed = false; 982 LLVM_DEBUG(dbgs() << "Block weights\n"); 983 for (const auto &BB : F) { 984 ErrorOr<uint64_t> Weight = getBlockWeight(&BB); 985 if (Weight) { 986 BlockWeights[&BB] = Weight.get(); 987 VisitedBlocks.insert(&BB); 988 Changed = true; 989 } 990 LLVM_DEBUG(printBlockWeight(dbgs(), &BB)); 991 } 992 993 return Changed; 994 } 995 996 /// Get the FunctionSamples for a call instruction. 997 /// 998 /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined 999 /// instance in which that call instruction is calling to. It contains 1000 /// all samples that resides in the inlined instance. We first find the 1001 /// inlined instance in which the call instruction is from, then we 1002 /// traverse its children to find the callsite with the matching 1003 /// location. 1004 /// 1005 /// \param Inst Call/Invoke instruction to query. 1006 /// 1007 /// \returns The FunctionSamples pointer to the inlined instance. 1008 const FunctionSamples * 1009 SampleProfileLoader::findCalleeFunctionSamples(const CallBase &Inst) const { 1010 const DILocation *DIL = Inst.getDebugLoc(); 1011 if (!DIL) { 1012 return nullptr; 1013 } 1014 1015 StringRef CalleeName; 1016 if (Function *Callee = Inst.getCalledFunction()) 1017 CalleeName = FunctionSamples::getCanonicalFnName(*Callee); 1018 1019 if (ProfileIsCS) 1020 return ContextTracker->getCalleeContextSamplesFor(Inst, CalleeName); 1021 1022 const FunctionSamples *FS = findFunctionSamples(Inst); 1023 if (FS == nullptr) 1024 return nullptr; 1025 1026 return FS->findFunctionSamplesAt(FunctionSamples::getCallSiteIdentifier(DIL), 1027 CalleeName, Reader->getRemapper()); 1028 } 1029 1030 /// Returns a vector of FunctionSamples that are the indirect call targets 1031 /// of \p Inst. The vector is sorted by the total number of samples. Stores 1032 /// the total call count of the indirect call in \p Sum. 1033 std::vector<const FunctionSamples *> 1034 SampleProfileLoader::findIndirectCallFunctionSamples( 1035 const Instruction &Inst, uint64_t &Sum) const { 1036 const DILocation *DIL = Inst.getDebugLoc(); 1037 std::vector<const FunctionSamples *> R; 1038 1039 if (!DIL) { 1040 return R; 1041 } 1042 1043 auto FSCompare = [](const FunctionSamples *L, const FunctionSamples *R) { 1044 assert(L && R && "Expect non-null FunctionSamples"); 1045 if (L->getEntrySamples() != R->getEntrySamples()) 1046 return L->getEntrySamples() > R->getEntrySamples(); 1047 return FunctionSamples::getGUID(L->getName()) < 1048 FunctionSamples::getGUID(R->getName()); 1049 }; 1050 1051 if (ProfileIsCS) { 1052 auto CalleeSamples = 1053 ContextTracker->getIndirectCalleeContextSamplesFor(DIL); 1054 if (CalleeSamples.empty()) 1055 return R; 1056 1057 // For CSSPGO, we only use target context profile's entry count 1058 // as that already includes both inlined callee and non-inlined ones.. 1059 Sum = 0; 1060 for (const auto *const FS : CalleeSamples) { 1061 Sum += FS->getEntrySamples(); 1062 R.push_back(FS); 1063 } 1064 llvm::sort(R, FSCompare); 1065 return R; 1066 } 1067 1068 const FunctionSamples *FS = findFunctionSamples(Inst); 1069 if (FS == nullptr) 1070 return R; 1071 1072 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL); 1073 auto T = FS->findCallTargetMapAt(CallSite); 1074 Sum = 0; 1075 if (T) 1076 for (const auto &T_C : T.get()) 1077 Sum += T_C.second; 1078 if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(CallSite)) { 1079 if (M->empty()) 1080 return R; 1081 for (const auto &NameFS : *M) { 1082 Sum += NameFS.second.getEntrySamples(); 1083 R.push_back(&NameFS.second); 1084 } 1085 llvm::sort(R, FSCompare); 1086 } 1087 return R; 1088 } 1089 1090 /// Get the FunctionSamples for an instruction. 1091 /// 1092 /// The FunctionSamples of an instruction \p Inst is the inlined instance 1093 /// in which that instruction is coming from. We traverse the inline stack 1094 /// of that instruction, and match it with the tree nodes in the profile. 1095 /// 1096 /// \param Inst Instruction to query. 1097 /// 1098 /// \returns the FunctionSamples pointer to the inlined instance. 1099 const FunctionSamples *SampleProfileLoaderBaseImpl::findFunctionSamples( 1100 const Instruction &Inst) const { 1101 const DILocation *DIL = Inst.getDebugLoc(); 1102 if (!DIL) 1103 return Samples; 1104 1105 auto it = DILocation2SampleMap.try_emplace(DIL, nullptr); 1106 if (it.second) { 1107 it.first->second = Samples->findFunctionSamples(DIL, Reader->getRemapper()); 1108 } 1109 return it.first->second; 1110 } 1111 1112 const FunctionSamples * 1113 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const { 1114 if (FunctionSamples::ProfileIsProbeBased) { 1115 Optional<PseudoProbe> Probe = extractProbe(Inst); 1116 if (!Probe) 1117 return nullptr; 1118 } 1119 1120 const DILocation *DIL = Inst.getDebugLoc(); 1121 if (!DIL) 1122 return Samples; 1123 1124 auto it = DILocation2SampleMap.try_emplace(DIL,nullptr); 1125 if (it.second) { 1126 if (ProfileIsCS) 1127 it.first->second = ContextTracker->getContextSamplesFor(DIL); 1128 else 1129 it.first->second = 1130 Samples->findFunctionSamples(DIL, Reader->getRemapper()); 1131 } 1132 return it.first->second; 1133 } 1134 1135 /// Attempt to promote indirect call and also inline the promoted call. 1136 /// 1137 /// \param F Caller function. 1138 /// \param Candidate ICP and inline candidate. 1139 /// \param Sum Sum of target counts for indirect call. 1140 /// \param PromotedInsns Map to keep track of indirect call already processed. 1141 /// \param InlinedCallSite Output vector for new call sites exposed after 1142 /// inlining. 1143 bool SampleProfileLoader::tryPromoteAndInlineCandidate( 1144 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin, uint64_t &Sum, 1145 DenseSet<Instruction *> &PromotedInsns, 1146 SmallVector<CallBase *, 8> *InlinedCallSite) { 1147 const char *Reason = "Callee function not available"; 1148 // R->getValue() != &F is to prevent promoting a recursive call. 1149 // If it is a recursive call, we do not inline it as it could bloat 1150 // the code exponentially. There is way to better handle this, e.g. 1151 // clone the caller first, and inline the cloned caller if it is 1152 // recursive. As llvm does not inline recursive calls, we will 1153 // simply ignore it instead of handling it explicitly. 1154 auto R = SymbolMap.find(Candidate.CalleeSamples->getFuncName()); 1155 if (R != SymbolMap.end() && R->getValue() && 1156 !R->getValue()->isDeclaration() && R->getValue()->getSubprogram() && 1157 R->getValue()->hasFnAttribute("use-sample-profile") && 1158 R->getValue() != &F && 1159 isLegalToPromote(*Candidate.CallInstr, R->getValue(), &Reason)) { 1160 auto *DI = 1161 &pgo::promoteIndirectCall(*Candidate.CallInstr, R->getValue(), 1162 Candidate.CallsiteCount, Sum, false, ORE); 1163 if (DI) { 1164 Sum -= Candidate.CallsiteCount; 1165 // Prorate the indirect callsite distribution. 1166 // Do not update the promoted direct callsite distribution at this 1167 // point since the original distribution combined with the callee 1168 // profile will be used to prorate callsites from the callee if 1169 // inlined. Once not inlined, the direct callsite distribution should 1170 // be prorated so that the it will reflect the real callsite counts. 1171 setProbeDistributionFactor(*Candidate.CallInstr, 1172 Candidate.CallsiteDistribution * Sum / 1173 SumOrigin); 1174 PromotedInsns.insert(Candidate.CallInstr); 1175 Candidate.CallInstr = DI; 1176 if (isa<CallInst>(DI) || isa<InvokeInst>(DI)) { 1177 bool Inlined = tryInlineCandidate(Candidate, InlinedCallSite); 1178 if (!Inlined) { 1179 // Prorate the direct callsite distribution so that it reflects real 1180 // callsite counts. 1181 setProbeDistributionFactor(*DI, Candidate.CallsiteDistribution * 1182 Candidate.CallsiteCount / 1183 SumOrigin); 1184 } 1185 return Inlined; 1186 } 1187 } 1188 } else { 1189 LLVM_DEBUG(dbgs() << "\nFailed to promote indirect call to " 1190 << Candidate.CalleeSamples->getFuncName() << " because " 1191 << Reason << "\n"); 1192 } 1193 return false; 1194 } 1195 1196 bool SampleProfileLoader::shouldInlineColdCallee(CallBase &CallInst) { 1197 if (!ProfileSizeInline) 1198 return false; 1199 1200 Function *Callee = CallInst.getCalledFunction(); 1201 if (Callee == nullptr) 1202 return false; 1203 1204 InlineCost Cost = getInlineCost(CallInst, getInlineParams(), GetTTI(*Callee), 1205 GetAC, GetTLI); 1206 1207 if (Cost.isNever()) 1208 return false; 1209 1210 if (Cost.isAlways()) 1211 return true; 1212 1213 return Cost.getCost() <= SampleColdCallSiteThreshold; 1214 } 1215 1216 void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates( 1217 const SmallVectorImpl<CallBase *> &Candidates, const Function &F, 1218 bool Hot) { 1219 for (auto I : Candidates) { 1220 Function *CalledFunction = I->getCalledFunction(); 1221 if (CalledFunction) { 1222 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineAttempt", 1223 I->getDebugLoc(), I->getParent()) 1224 << "previous inlining reattempted for " 1225 << (Hot ? "hotness: '" : "size: '") 1226 << ore::NV("Callee", CalledFunction) << "' into '" 1227 << ore::NV("Caller", &F) << "'"); 1228 } 1229 } 1230 } 1231 1232 /// Iteratively inline hot callsites of a function. 1233 /// 1234 /// Iteratively traverse all callsites of the function \p F, and find if 1235 /// the corresponding inlined instance exists and is hot in profile. If 1236 /// it is hot enough, inline the callsites and adds new callsites of the 1237 /// callee into the caller. If the call is an indirect call, first promote 1238 /// it to direct call. Each indirect call is limited with a single target. 1239 /// 1240 /// \param F function to perform iterative inlining. 1241 /// \param InlinedGUIDs a set to be updated to include all GUIDs that are 1242 /// inlined in the profiled binary. 1243 /// 1244 /// \returns True if there is any inline happened. 1245 bool SampleProfileLoader::inlineHotFunctions( 1246 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) { 1247 DenseSet<Instruction *> PromotedInsns; 1248 1249 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure 1250 // Profile symbol list is ignored when profile-sample-accurate is on. 1251 assert((!ProfAccForSymsInList || 1252 (!ProfileSampleAccurate && 1253 !F.hasFnAttribute("profile-sample-accurate"))) && 1254 "ProfAccForSymsInList should be false when profile-sample-accurate " 1255 "is enabled"); 1256 1257 DenseMap<CallBase *, const FunctionSamples *> LocalNotInlinedCallSites; 1258 bool Changed = false; 1259 bool LocalChanged = true; 1260 while (LocalChanged) { 1261 LocalChanged = false; 1262 SmallVector<CallBase *, 10> CIS; 1263 for (auto &BB : F) { 1264 bool Hot = false; 1265 SmallVector<CallBase *, 10> AllCandidates; 1266 SmallVector<CallBase *, 10> ColdCandidates; 1267 for (auto &I : BB.getInstList()) { 1268 const FunctionSamples *FS = nullptr; 1269 if (auto *CB = dyn_cast<CallBase>(&I)) { 1270 if (!isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(*CB))) { 1271 assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) && 1272 "GUIDToFuncNameMap has to be populated"); 1273 AllCandidates.push_back(CB); 1274 if (FS->getEntrySamples() > 0 || ProfileIsCS) 1275 LocalNotInlinedCallSites.try_emplace(CB, FS); 1276 if (callsiteIsHot(FS, PSI, ProfAccForSymsInList)) 1277 Hot = true; 1278 else if (shouldInlineColdCallee(*CB)) 1279 ColdCandidates.push_back(CB); 1280 } 1281 } 1282 } 1283 if (Hot || ExternalInlineAdvisor) { 1284 CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end()); 1285 emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true); 1286 } else { 1287 CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end()); 1288 emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false); 1289 } 1290 } 1291 for (CallBase *I : CIS) { 1292 Function *CalledFunction = I->getCalledFunction(); 1293 InlineCandidate Candidate = { 1294 I, 1295 LocalNotInlinedCallSites.count(I) ? LocalNotInlinedCallSites[I] 1296 : nullptr, 1297 0 /* dummy count */, 1.0 /* dummy distribution factor */}; 1298 // Do not inline recursive calls. 1299 if (CalledFunction == &F) 1300 continue; 1301 if (I->isIndirectCall()) { 1302 if (PromotedInsns.count(I)) 1303 continue; 1304 uint64_t Sum; 1305 for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) { 1306 uint64_t SumOrigin = Sum; 1307 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1308 FS->findInlinedFunctions(InlinedGUIDs, F.getParent(), 1309 PSI->getOrCompHotCountThreshold()); 1310 continue; 1311 } 1312 if (!callsiteIsHot(FS, PSI, ProfAccForSymsInList)) 1313 continue; 1314 1315 Candidate = {I, FS, FS->getEntrySamples(), 1.0}; 1316 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum, 1317 PromotedInsns)) { 1318 LocalNotInlinedCallSites.erase(I); 1319 LocalChanged = true; 1320 } 1321 } 1322 } else if (CalledFunction && CalledFunction->getSubprogram() && 1323 !CalledFunction->isDeclaration()) { 1324 if (tryInlineCandidate(Candidate)) { 1325 LocalNotInlinedCallSites.erase(I); 1326 LocalChanged = true; 1327 } 1328 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1329 findCalleeFunctionSamples(*I)->findInlinedFunctions( 1330 InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold()); 1331 } 1332 } 1333 Changed |= LocalChanged; 1334 } 1335 1336 // For CS profile, profile for not inlined context will be merged when 1337 // base profile is being trieved 1338 if (ProfileIsCS) 1339 return Changed; 1340 1341 // Accumulate not inlined callsite information into notInlinedSamples 1342 for (const auto &Pair : LocalNotInlinedCallSites) { 1343 CallBase *I = Pair.getFirst(); 1344 Function *Callee = I->getCalledFunction(); 1345 if (!Callee || Callee->isDeclaration()) 1346 continue; 1347 1348 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "NotInline", 1349 I->getDebugLoc(), I->getParent()) 1350 << "previous inlining not repeated: '" 1351 << ore::NV("Callee", Callee) << "' into '" 1352 << ore::NV("Caller", &F) << "'"); 1353 1354 ++NumCSNotInlined; 1355 const FunctionSamples *FS = Pair.getSecond(); 1356 if (FS->getTotalSamples() == 0 && FS->getEntrySamples() == 0) { 1357 continue; 1358 } 1359 1360 if (ProfileMergeInlinee) { 1361 // A function call can be replicated by optimizations like callsite 1362 // splitting or jump threading and the replicates end up sharing the 1363 // sample nested callee profile instead of slicing the original inlinee's 1364 // profile. We want to do merge exactly once by filtering out callee 1365 // profiles with a non-zero head sample count. 1366 if (FS->getHeadSamples() == 0) { 1367 // Use entry samples as head samples during the merge, as inlinees 1368 // don't have head samples. 1369 const_cast<FunctionSamples *>(FS)->addHeadSamples( 1370 FS->getEntrySamples()); 1371 1372 // Note that we have to do the merge right after processing function. 1373 // This allows OutlineFS's profile to be used for annotation during 1374 // top-down processing of functions' annotation. 1375 FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee); 1376 OutlineFS->merge(*FS); 1377 } 1378 } else { 1379 auto pair = 1380 notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0}); 1381 pair.first->second.entryCount += FS->getEntrySamples(); 1382 } 1383 } 1384 return Changed; 1385 } 1386 1387 bool SampleProfileLoader::tryInlineCandidate( 1388 InlineCandidate &Candidate, SmallVector<CallBase *, 8> *InlinedCallSites) { 1389 1390 CallBase &CB = *Candidate.CallInstr; 1391 Function *CalledFunction = CB.getCalledFunction(); 1392 assert(CalledFunction && "Expect a callee with definition"); 1393 DebugLoc DLoc = CB.getDebugLoc(); 1394 BasicBlock *BB = CB.getParent(); 1395 1396 InlineCost Cost = shouldInlineCandidate(Candidate); 1397 if (Cost.isNever()) { 1398 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineFail", DLoc, BB) 1399 << "incompatible inlining"); 1400 return false; 1401 } 1402 1403 if (!Cost) 1404 return false; 1405 1406 InlineFunctionInfo IFI(nullptr, GetAC); 1407 if (InlineFunction(CB, IFI).isSuccess()) { 1408 // The call to InlineFunction erases I, so we can't pass it here. 1409 emitInlinedInto(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(), Cost, 1410 true, CSINLINE_DEBUG); 1411 1412 // Now populate the list of newly exposed call sites. 1413 if (InlinedCallSites) { 1414 InlinedCallSites->clear(); 1415 for (auto &I : IFI.InlinedCallSites) 1416 InlinedCallSites->push_back(I); 1417 } 1418 1419 if (ProfileIsCS) 1420 ContextTracker->markContextSamplesInlined(Candidate.CalleeSamples); 1421 ++NumCSInlined; 1422 1423 // Prorate inlined probes for a duplicated inlining callsite which probably 1424 // has a distribution less than 100%. Samples for an inlinee should be 1425 // distributed among the copies of the original callsite based on each 1426 // callsite's distribution factor for counts accuracy. Note that an inlined 1427 // probe may come with its own distribution factor if it has been duplicated 1428 // in the inlinee body. The two factor are multiplied to reflect the 1429 // aggregation of duplication. 1430 if (Candidate.CallsiteDistribution < 1) { 1431 for (auto &I : IFI.InlinedCallSites) { 1432 if (Optional<PseudoProbe> Probe = extractProbe(*I)) 1433 setProbeDistributionFactor(*I, Probe->Factor * 1434 Candidate.CallsiteDistribution); 1435 } 1436 NumDuplicatedInlinesite++; 1437 } 1438 1439 return true; 1440 } 1441 return false; 1442 } 1443 1444 bool SampleProfileLoader::getInlineCandidate(InlineCandidate *NewCandidate, 1445 CallBase *CB) { 1446 assert(CB && "Expect non-null call instruction"); 1447 1448 if (isa<IntrinsicInst>(CB)) 1449 return false; 1450 1451 // Find the callee's profile. For indirect call, find hottest target profile. 1452 const FunctionSamples *CalleeSamples = findCalleeFunctionSamples(*CB); 1453 if (!CalleeSamples) 1454 return false; 1455 1456 float Factor = 1.0; 1457 if (Optional<PseudoProbe> Probe = extractProbe(*CB)) 1458 Factor = Probe->Factor; 1459 1460 uint64_t CallsiteCount = 0; 1461 ErrorOr<uint64_t> Weight = getBlockWeight(CB->getParent()); 1462 if (Weight) 1463 CallsiteCount = Weight.get(); 1464 if (CalleeSamples) 1465 CallsiteCount = std::max( 1466 CallsiteCount, uint64_t(CalleeSamples->getEntrySamples() * Factor)); 1467 1468 *NewCandidate = {CB, CalleeSamples, CallsiteCount, Factor}; 1469 return true; 1470 } 1471 1472 InlineCost 1473 SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) { 1474 std::unique_ptr<InlineAdvice> Advice = nullptr; 1475 if (ExternalInlineAdvisor) { 1476 Advice = ExternalInlineAdvisor->getAdvice(*Candidate.CallInstr); 1477 if (!Advice->isInliningRecommended()) { 1478 Advice->recordUnattemptedInlining(); 1479 return InlineCost::getNever("not previously inlined"); 1480 } 1481 Advice->recordInlining(); 1482 return InlineCost::getAlways("previously inlined"); 1483 } 1484 1485 // Adjust threshold based on call site hotness, only do this for callsite 1486 // prioritized inliner because otherwise cost-benefit check is done earlier. 1487 int SampleThreshold = SampleColdCallSiteThreshold; 1488 if (CallsitePrioritizedInline) { 1489 if (Candidate.CallsiteCount > PSI->getHotCountThreshold()) 1490 SampleThreshold = SampleHotCallSiteThreshold; 1491 else if (!ProfileSizeInline) 1492 return InlineCost::getNever("cold callsite"); 1493 } 1494 1495 Function *Callee = Candidate.CallInstr->getCalledFunction(); 1496 assert(Callee && "Expect a definition for inline candidate of direct call"); 1497 1498 InlineParams Params = getInlineParams(); 1499 Params.ComputeFullInlineCost = true; 1500 // Checks if there is anything in the reachable portion of the callee at 1501 // this callsite that makes this inlining potentially illegal. Need to 1502 // set ComputeFullInlineCost, otherwise getInlineCost may return early 1503 // when cost exceeds threshold without checking all IRs in the callee. 1504 // The acutal cost does not matter because we only checks isNever() to 1505 // see if it is legal to inline the callsite. 1506 InlineCost Cost = getInlineCost(*Candidate.CallInstr, Callee, Params, 1507 GetTTI(*Callee), GetAC, GetTLI); 1508 1509 // Honor always inline and never inline from call analyzer 1510 if (Cost.isNever() || Cost.isAlways()) 1511 return Cost; 1512 1513 // For old FDO inliner, we inline the call site as long as cost is not 1514 // "Never". The cost-benefit check is done earlier. 1515 if (!CallsitePrioritizedInline) { 1516 return InlineCost::get(Cost.getCost(), INT_MAX); 1517 } 1518 1519 // Otherwise only use the cost from call analyzer, but overwite threshold with 1520 // Sample PGO threshold. 1521 return InlineCost::get(Cost.getCost(), SampleThreshold); 1522 } 1523 1524 bool SampleProfileLoader::inlineHotFunctionsWithPriority( 1525 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) { 1526 DenseSet<Instruction *> PromotedInsns; 1527 assert(ProfileIsCS && "Prioritiy based inliner only works with CSSPGO now"); 1528 1529 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure 1530 // Profile symbol list is ignored when profile-sample-accurate is on. 1531 assert((!ProfAccForSymsInList || 1532 (!ProfileSampleAccurate && 1533 !F.hasFnAttribute("profile-sample-accurate"))) && 1534 "ProfAccForSymsInList should be false when profile-sample-accurate " 1535 "is enabled"); 1536 1537 // Populating worklist with initial call sites from root inliner, along 1538 // with call site weights. 1539 CandidateQueue CQueue; 1540 InlineCandidate NewCandidate; 1541 for (auto &BB : F) { 1542 for (auto &I : BB.getInstList()) { 1543 auto *CB = dyn_cast<CallBase>(&I); 1544 if (!CB) 1545 continue; 1546 if (getInlineCandidate(&NewCandidate, CB)) 1547 CQueue.push(NewCandidate); 1548 } 1549 } 1550 1551 // Cap the size growth from profile guided inlining. This is needed even 1552 // though cost of each inline candidate already accounts for callee size, 1553 // because with top-down inlining, we can grow inliner size significantly 1554 // with large number of smaller inlinees each pass the cost check. 1555 assert(ProfileInlineLimitMax >= ProfileInlineLimitMin && 1556 "Max inline size limit should not be smaller than min inline size " 1557 "limit."); 1558 unsigned SizeLimit = F.getInstructionCount() * ProfileInlineGrowthLimit; 1559 SizeLimit = std::min(SizeLimit, (unsigned)ProfileInlineLimitMax); 1560 SizeLimit = std::max(SizeLimit, (unsigned)ProfileInlineLimitMin); 1561 if (ExternalInlineAdvisor) 1562 SizeLimit = std::numeric_limits<unsigned>::max(); 1563 1564 // Perform iterative BFS call site prioritized inlining 1565 bool Changed = false; 1566 while (!CQueue.empty() && F.getInstructionCount() < SizeLimit) { 1567 InlineCandidate Candidate = CQueue.top(); 1568 CQueue.pop(); 1569 CallBase *I = Candidate.CallInstr; 1570 Function *CalledFunction = I->getCalledFunction(); 1571 1572 if (CalledFunction == &F) 1573 continue; 1574 if (I->isIndirectCall()) { 1575 if (PromotedInsns.count(I)) 1576 continue; 1577 uint64_t Sum; 1578 auto CalleeSamples = findIndirectCallFunctionSamples(*I, Sum); 1579 uint64_t SumOrigin = Sum; 1580 Sum *= Candidate.CallsiteDistribution; 1581 for (const auto *FS : CalleeSamples) { 1582 // TODO: Consider disable pre-lTO ICP for MonoLTO as well 1583 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1584 FS->findInlinedFunctions(InlinedGUIDs, F.getParent(), 1585 PSI->getOrCompHotCountThreshold()); 1586 continue; 1587 } 1588 uint64_t EntryCountDistributed = 1589 FS->getEntrySamples() * Candidate.CallsiteDistribution; 1590 // In addition to regular inline cost check, we also need to make sure 1591 // ICP isn't introducing excessive speculative checks even if individual 1592 // target looks beneficial to promote and inline. That means we should 1593 // only do ICP when there's a small number dominant targets. 1594 if (EntryCountDistributed < SumOrigin / ProfileICPThreshold) 1595 break; 1596 // TODO: Fix CallAnalyzer to handle all indirect calls. 1597 // For indirect call, we don't run CallAnalyzer to get InlineCost 1598 // before actual inlining. This is because we could see two different 1599 // types from the same definition, which makes CallAnalyzer choke as 1600 // it's expecting matching parameter type on both caller and callee 1601 // side. See example from PR18962 for the triggering cases (the bug was 1602 // fixed, but we generate different types). 1603 if (!PSI->isHotCount(EntryCountDistributed)) 1604 break; 1605 SmallVector<CallBase *, 8> InlinedCallSites; 1606 // Attach function profile for promoted indirect callee, and update 1607 // call site count for the promoted inline candidate too. 1608 Candidate = {I, FS, EntryCountDistributed, 1609 Candidate.CallsiteDistribution}; 1610 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum, 1611 PromotedInsns, &InlinedCallSites)) { 1612 for (auto *CB : InlinedCallSites) { 1613 if (getInlineCandidate(&NewCandidate, CB)) 1614 CQueue.emplace(NewCandidate); 1615 } 1616 Changed = true; 1617 } 1618 } 1619 } else if (CalledFunction && CalledFunction->getSubprogram() && 1620 !CalledFunction->isDeclaration()) { 1621 SmallVector<CallBase *, 8> InlinedCallSites; 1622 if (tryInlineCandidate(Candidate, &InlinedCallSites)) { 1623 for (auto *CB : InlinedCallSites) { 1624 if (getInlineCandidate(&NewCandidate, CB)) 1625 CQueue.emplace(NewCandidate); 1626 } 1627 Changed = true; 1628 } 1629 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) { 1630 findCalleeFunctionSamples(*I)->findInlinedFunctions( 1631 InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold()); 1632 } 1633 } 1634 1635 if (!CQueue.empty()) { 1636 if (SizeLimit == (unsigned)ProfileInlineLimitMax) 1637 ++NumCSInlinedHitMaxLimit; 1638 else if (SizeLimit == (unsigned)ProfileInlineLimitMin) 1639 ++NumCSInlinedHitMinLimit; 1640 else 1641 ++NumCSInlinedHitGrowthLimit; 1642 } 1643 1644 return Changed; 1645 } 1646 1647 /// Find equivalence classes for the given block. 1648 /// 1649 /// This finds all the blocks that are guaranteed to execute the same 1650 /// number of times as \p BB1. To do this, it traverses all the 1651 /// descendants of \p BB1 in the dominator or post-dominator tree. 1652 /// 1653 /// A block BB2 will be in the same equivalence class as \p BB1 if 1654 /// the following holds: 1655 /// 1656 /// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2 1657 /// is a descendant of \p BB1 in the dominator tree, then BB2 should 1658 /// dominate BB1 in the post-dominator tree. 1659 /// 1660 /// 2- Both BB2 and \p BB1 must be in the same loop. 1661 /// 1662 /// For every block BB2 that meets those two requirements, we set BB2's 1663 /// equivalence class to \p BB1. 1664 /// 1665 /// \param BB1 Block to check. 1666 /// \param Descendants Descendants of \p BB1 in either the dom or pdom tree. 1667 /// \param DomTree Opposite dominator tree. If \p Descendants is filled 1668 /// with blocks from \p BB1's dominator tree, then 1669 /// this is the post-dominator tree, and vice versa. 1670 template <bool IsPostDom> 1671 void SampleProfileLoaderBaseImpl::findEquivalencesFor( 1672 BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants, 1673 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree) { 1674 const BasicBlock *EC = EquivalenceClass[BB1]; 1675 uint64_t Weight = BlockWeights[EC]; 1676 for (const auto *BB2 : Descendants) { 1677 bool IsDomParent = DomTree->dominates(BB2, BB1); 1678 bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2); 1679 if (BB1 != BB2 && IsDomParent && IsInSameLoop) { 1680 EquivalenceClass[BB2] = EC; 1681 // If BB2 is visited, then the entire EC should be marked as visited. 1682 if (VisitedBlocks.count(BB2)) { 1683 VisitedBlocks.insert(EC); 1684 } 1685 1686 // If BB2 is heavier than BB1, make BB2 have the same weight 1687 // as BB1. 1688 // 1689 // Note that we don't worry about the opposite situation here 1690 // (when BB2 is lighter than BB1). We will deal with this 1691 // during the propagation phase. Right now, we just want to 1692 // make sure that BB1 has the largest weight of all the 1693 // members of its equivalence set. 1694 Weight = std::max(Weight, BlockWeights[BB2]); 1695 } 1696 } 1697 if (EC == &EC->getParent()->getEntryBlock()) { 1698 BlockWeights[EC] = Samples->getHeadSamples() + 1; 1699 } else { 1700 BlockWeights[EC] = Weight; 1701 } 1702 } 1703 1704 /// Find equivalence classes. 1705 /// 1706 /// Since samples may be missing from blocks, we can fill in the gaps by setting 1707 /// the weights of all the blocks in the same equivalence class to the same 1708 /// weight. To compute the concept of equivalence, we use dominance and loop 1709 /// information. Two blocks B1 and B2 are in the same equivalence class if B1 1710 /// dominates B2, B2 post-dominates B1 and both are in the same loop. 1711 /// 1712 /// \param F The function to query. 1713 void SampleProfileLoaderBaseImpl::findEquivalenceClasses(Function &F) { 1714 SmallVector<BasicBlock *, 8> DominatedBBs; 1715 LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n"); 1716 // Find equivalence sets based on dominance and post-dominance information. 1717 for (auto &BB : F) { 1718 BasicBlock *BB1 = &BB; 1719 1720 // Compute BB1's equivalence class once. 1721 if (EquivalenceClass.count(BB1)) { 1722 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); 1723 continue; 1724 } 1725 1726 // By default, blocks are in their own equivalence class. 1727 EquivalenceClass[BB1] = BB1; 1728 1729 // Traverse all the blocks dominated by BB1. We are looking for 1730 // every basic block BB2 such that: 1731 // 1732 // 1- BB1 dominates BB2. 1733 // 2- BB2 post-dominates BB1. 1734 // 3- BB1 and BB2 are in the same loop nest. 1735 // 1736 // If all those conditions hold, it means that BB2 is executed 1737 // as many times as BB1, so they are placed in the same equivalence 1738 // class by making BB2's equivalence class be BB1. 1739 DominatedBBs.clear(); 1740 DT->getDescendants(BB1, DominatedBBs); 1741 findEquivalencesFor(BB1, DominatedBBs, PDT.get()); 1742 1743 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); 1744 } 1745 1746 // Assign weights to equivalence classes. 1747 // 1748 // All the basic blocks in the same equivalence class will execute 1749 // the same number of times. Since we know that the head block in 1750 // each equivalence class has the largest weight, assign that weight 1751 // to all the blocks in that equivalence class. 1752 LLVM_DEBUG( 1753 dbgs() << "\nAssign the same weight to all blocks in the same class\n"); 1754 for (auto &BI : F) { 1755 const BasicBlock *BB = &BI; 1756 const BasicBlock *EquivBB = EquivalenceClass[BB]; 1757 if (BB != EquivBB) 1758 BlockWeights[BB] = BlockWeights[EquivBB]; 1759 LLVM_DEBUG(printBlockWeight(dbgs(), BB)); 1760 } 1761 } 1762 1763 /// Visit the given edge to decide if it has a valid weight. 1764 /// 1765 /// If \p E has not been visited before, we copy to \p UnknownEdge 1766 /// and increment the count of unknown edges. 1767 /// 1768 /// \param E Edge to visit. 1769 /// \param NumUnknownEdges Current number of unknown edges. 1770 /// \param UnknownEdge Set if E has not been visited before. 1771 /// 1772 /// \returns E's weight, if known. Otherwise, return 0. 1773 uint64_t SampleProfileLoaderBaseImpl::visitEdge(Edge E, 1774 unsigned *NumUnknownEdges, 1775 Edge *UnknownEdge) { 1776 if (!VisitedEdges.count(E)) { 1777 (*NumUnknownEdges)++; 1778 *UnknownEdge = E; 1779 return 0; 1780 } 1781 1782 return EdgeWeights[E]; 1783 } 1784 1785 /// Propagate weights through incoming/outgoing edges. 1786 /// 1787 /// If the weight of a basic block is known, and there is only one edge 1788 /// with an unknown weight, we can calculate the weight of that edge. 1789 /// 1790 /// Similarly, if all the edges have a known count, we can calculate the 1791 /// count of the basic block, if needed. 1792 /// 1793 /// \param F Function to process. 1794 /// \param UpdateBlockCount Whether we should update basic block counts that 1795 /// has already been annotated. 1796 /// 1797 /// \returns True if new weights were assigned to edges or blocks. 1798 bool SampleProfileLoaderBaseImpl::propagateThroughEdges(Function &F, 1799 bool UpdateBlockCount) { 1800 bool Changed = false; 1801 LLVM_DEBUG(dbgs() << "\nPropagation through edges\n"); 1802 for (const auto &BI : F) { 1803 const BasicBlock *BB = &BI; 1804 const BasicBlock *EC = EquivalenceClass[BB]; 1805 1806 // Visit all the predecessor and successor edges to determine 1807 // which ones have a weight assigned already. Note that it doesn't 1808 // matter that we only keep track of a single unknown edge. The 1809 // only case we are interested in handling is when only a single 1810 // edge is unknown (see setEdgeOrBlockWeight). 1811 for (unsigned i = 0; i < 2; i++) { 1812 uint64_t TotalWeight = 0; 1813 unsigned NumUnknownEdges = 0, NumTotalEdges = 0; 1814 Edge UnknownEdge, SelfReferentialEdge, SingleEdge; 1815 1816 if (i == 0) { 1817 // First, visit all predecessor edges. 1818 NumTotalEdges = Predecessors[BB].size(); 1819 for (auto *Pred : Predecessors[BB]) { 1820 Edge E = std::make_pair(Pred, BB); 1821 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); 1822 if (E.first == E.second) 1823 SelfReferentialEdge = E; 1824 } 1825 if (NumTotalEdges == 1) { 1826 SingleEdge = std::make_pair(Predecessors[BB][0], BB); 1827 } 1828 } else { 1829 // On the second round, visit all successor edges. 1830 NumTotalEdges = Successors[BB].size(); 1831 for (auto *Succ : Successors[BB]) { 1832 Edge E = std::make_pair(BB, Succ); 1833 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); 1834 } 1835 if (NumTotalEdges == 1) { 1836 SingleEdge = std::make_pair(BB, Successors[BB][0]); 1837 } 1838 } 1839 1840 // After visiting all the edges, there are three cases that we 1841 // can handle immediately: 1842 // 1843 // - All the edge weights are known (i.e., NumUnknownEdges == 0). 1844 // In this case, we simply check that the sum of all the edges 1845 // is the same as BB's weight. If not, we change BB's weight 1846 // to match. Additionally, if BB had not been visited before, 1847 // we mark it visited. 1848 // 1849 // - Only one edge is unknown and BB has already been visited. 1850 // In this case, we can compute the weight of the edge by 1851 // subtracting the total block weight from all the known 1852 // edge weights. If the edges weight more than BB, then the 1853 // edge of the last remaining edge is set to zero. 1854 // 1855 // - There exists a self-referential edge and the weight of BB is 1856 // known. In this case, this edge can be based on BB's weight. 1857 // We add up all the other known edges and set the weight on 1858 // the self-referential edge as we did in the previous case. 1859 // 1860 // In any other case, we must continue iterating. Eventually, 1861 // all edges will get a weight, or iteration will stop when 1862 // it reaches SampleProfileMaxPropagateIterations. 1863 if (NumUnknownEdges <= 1) { 1864 uint64_t &BBWeight = BlockWeights[EC]; 1865 if (NumUnknownEdges == 0) { 1866 if (!VisitedBlocks.count(EC)) { 1867 // If we already know the weight of all edges, the weight of the 1868 // basic block can be computed. It should be no larger than the sum 1869 // of all edge weights. 1870 if (TotalWeight > BBWeight) { 1871 BBWeight = TotalWeight; 1872 Changed = true; 1873 LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName() 1874 << " known. Set weight for block: "; 1875 printBlockWeight(dbgs(), BB);); 1876 } 1877 } else if (NumTotalEdges == 1 && 1878 EdgeWeights[SingleEdge] < BlockWeights[EC]) { 1879 // If there is only one edge for the visited basic block, use the 1880 // block weight to adjust edge weight if edge weight is smaller. 1881 EdgeWeights[SingleEdge] = BlockWeights[EC]; 1882 Changed = true; 1883 } 1884 } else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) { 1885 // If there is a single unknown edge and the block has been 1886 // visited, then we can compute E's weight. 1887 if (BBWeight >= TotalWeight) 1888 EdgeWeights[UnknownEdge] = BBWeight - TotalWeight; 1889 else 1890 EdgeWeights[UnknownEdge] = 0; 1891 const BasicBlock *OtherEC; 1892 if (i == 0) 1893 OtherEC = EquivalenceClass[UnknownEdge.first]; 1894 else 1895 OtherEC = EquivalenceClass[UnknownEdge.second]; 1896 // Edge weights should never exceed the BB weights it connects. 1897 if (VisitedBlocks.count(OtherEC) && 1898 EdgeWeights[UnknownEdge] > BlockWeights[OtherEC]) 1899 EdgeWeights[UnknownEdge] = BlockWeights[OtherEC]; 1900 VisitedEdges.insert(UnknownEdge); 1901 Changed = true; 1902 LLVM_DEBUG(dbgs() << "Set weight for edge: "; 1903 printEdgeWeight(dbgs(), UnknownEdge)); 1904 } 1905 } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) { 1906 // If a block Weights 0, all its in/out edges should weight 0. 1907 if (i == 0) { 1908 for (auto *Pred : Predecessors[BB]) { 1909 Edge E = std::make_pair(Pred, BB); 1910 EdgeWeights[E] = 0; 1911 VisitedEdges.insert(E); 1912 } 1913 } else { 1914 for (auto *Succ : Successors[BB]) { 1915 Edge E = std::make_pair(BB, Succ); 1916 EdgeWeights[E] = 0; 1917 VisitedEdges.insert(E); 1918 } 1919 } 1920 } else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) { 1921 uint64_t &BBWeight = BlockWeights[BB]; 1922 // We have a self-referential edge and the weight of BB is known. 1923 if (BBWeight >= TotalWeight) 1924 EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight; 1925 else 1926 EdgeWeights[SelfReferentialEdge] = 0; 1927 VisitedEdges.insert(SelfReferentialEdge); 1928 Changed = true; 1929 LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: "; 1930 printEdgeWeight(dbgs(), SelfReferentialEdge)); 1931 } 1932 if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) { 1933 BlockWeights[EC] = TotalWeight; 1934 VisitedBlocks.insert(EC); 1935 Changed = true; 1936 } 1937 } 1938 } 1939 1940 return Changed; 1941 } 1942 1943 /// Build in/out edge lists for each basic block in the CFG. 1944 /// 1945 /// We are interested in unique edges. If a block B1 has multiple 1946 /// edges to another block B2, we only add a single B1->B2 edge. 1947 void SampleProfileLoaderBaseImpl::buildEdges(Function &F) { 1948 for (auto &BI : F) { 1949 BasicBlock *B1 = &BI; 1950 1951 // Add predecessors for B1. 1952 SmallPtrSet<BasicBlock *, 16> Visited; 1953 if (!Predecessors[B1].empty()) 1954 llvm_unreachable("Found a stale predecessors list in a basic block."); 1955 for (BasicBlock *B2 : predecessors(B1)) 1956 if (Visited.insert(B2).second) 1957 Predecessors[B1].push_back(B2); 1958 1959 // Add successors for B1. 1960 Visited.clear(); 1961 if (!Successors[B1].empty()) 1962 llvm_unreachable("Found a stale successors list in a basic block."); 1963 for (BasicBlock *B2 : successors(B1)) 1964 if (Visited.insert(B2).second) 1965 Successors[B1].push_back(B2); 1966 } 1967 } 1968 1969 /// Returns the sorted CallTargetMap \p M by count in descending order. 1970 static SmallVector<InstrProfValueData, 2> GetSortedValueDataFromCallTargets( 1971 const SampleRecord::CallTargetMap & M) { 1972 SmallVector<InstrProfValueData, 2> R; 1973 for (const auto &I : SampleRecord::SortCallTargets(M)) { 1974 R.emplace_back(InstrProfValueData{FunctionSamples::getGUID(I.first), I.second}); 1975 } 1976 return R; 1977 } 1978 1979 /// Propagate weights into edges 1980 /// 1981 /// The following rules are applied to every block BB in the CFG: 1982 /// 1983 /// - If BB has a single predecessor/successor, then the weight 1984 /// of that edge is the weight of the block. 1985 /// 1986 /// - If all incoming or outgoing edges are known except one, and the 1987 /// weight of the block is already known, the weight of the unknown 1988 /// edge will be the weight of the block minus the sum of all the known 1989 /// edges. If the sum of all the known edges is larger than BB's weight, 1990 /// we set the unknown edge weight to zero. 1991 /// 1992 /// - If there is a self-referential edge, and the weight of the block is 1993 /// known, the weight for that edge is set to the weight of the block 1994 /// minus the weight of the other incoming edges to that block (if 1995 /// known). 1996 void SampleProfileLoaderBaseImpl::propagateWeights(Function &F) { 1997 bool Changed = true; 1998 unsigned I = 0; 1999 2000 // If BB weight is larger than its corresponding loop's header BB weight, 2001 // use the BB weight to replace the loop header BB weight. 2002 for (auto &BI : F) { 2003 BasicBlock *BB = &BI; 2004 Loop *L = LI->getLoopFor(BB); 2005 if (!L) { 2006 continue; 2007 } 2008 BasicBlock *Header = L->getHeader(); 2009 if (Header && BlockWeights[BB] > BlockWeights[Header]) { 2010 BlockWeights[Header] = BlockWeights[BB]; 2011 } 2012 } 2013 2014 // Before propagation starts, build, for each block, a list of 2015 // unique predecessors and successors. This is necessary to handle 2016 // identical edges in multiway branches. Since we visit all blocks and all 2017 // edges of the CFG, it is cleaner to build these lists once at the start 2018 // of the pass. 2019 buildEdges(F); 2020 2021 // Propagate until we converge or we go past the iteration limit. 2022 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 2023 Changed = propagateThroughEdges(F, false); 2024 } 2025 2026 // The first propagation propagates BB counts from annotated BBs to unknown 2027 // BBs. The 2nd propagation pass resets edges weights, and use all BB weights 2028 // to propagate edge weights. 2029 VisitedEdges.clear(); 2030 Changed = true; 2031 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 2032 Changed = propagateThroughEdges(F, false); 2033 } 2034 2035 // The 3rd propagation pass allows adjust annotated BB weights that are 2036 // obviously wrong. 2037 Changed = true; 2038 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 2039 Changed = propagateThroughEdges(F, true); 2040 } 2041 } 2042 2043 /// Generate branch weight metadata for all branches in \p F. 2044 /// 2045 /// Branch weights are computed out of instruction samples using a 2046 /// propagation heuristic. Propagation proceeds in 3 phases: 2047 /// 2048 /// 1- Assignment of block weights. All the basic blocks in the function 2049 /// are initial assigned the same weight as their most frequently 2050 /// executed instruction. 2051 /// 2052 /// 2- Creation of equivalence classes. Since samples may be missing from 2053 /// blocks, we can fill in the gaps by setting the weights of all the 2054 /// blocks in the same equivalence class to the same weight. To compute 2055 /// the concept of equivalence, we use dominance and loop information. 2056 /// Two blocks B1 and B2 are in the same equivalence class if B1 2057 /// dominates B2, B2 post-dominates B1 and both are in the same loop. 2058 /// 2059 /// 3- Propagation of block weights into edges. This uses a simple 2060 /// propagation heuristic. The following rules are applied to every 2061 /// block BB in the CFG: 2062 /// 2063 /// - If BB has a single predecessor/successor, then the weight 2064 /// of that edge is the weight of the block. 2065 /// 2066 /// - If all the edges are known except one, and the weight of the 2067 /// block is already known, the weight of the unknown edge will 2068 /// be the weight of the block minus the sum of all the known 2069 /// edges. If the sum of all the known edges is larger than BB's weight, 2070 /// we set the unknown edge weight to zero. 2071 /// 2072 /// - If there is a self-referential edge, and the weight of the block is 2073 /// known, the weight for that edge is set to the weight of the block 2074 /// minus the weight of the other incoming edges to that block (if 2075 /// known). 2076 /// 2077 /// Since this propagation is not guaranteed to finalize for every CFG, we 2078 /// only allow it to proceed for a limited number of iterations (controlled 2079 /// by -sample-profile-max-propagate-iterations). 2080 /// 2081 /// FIXME: Try to replace this propagation heuristic with a scheme 2082 /// that is guaranteed to finalize. A work-list approach similar to 2083 /// the standard value propagation algorithm used by SSA-CCP might 2084 /// work here. 2085 /// 2086 /// \param F The function to query. 2087 /// 2088 /// \returns true if \p F was modified. Returns false, otherwise. 2089 bool SampleProfileLoaderBaseImpl::computeAndPropagateWeights( 2090 Function &F, const DenseSet<GlobalValue::GUID> &InlinedGUIDs) { 2091 bool Changed = (InlinedGUIDs.size() != 0); 2092 2093 // Compute basic block weights. 2094 Changed |= computeBlockWeights(F); 2095 2096 if (Changed) { 2097 // Add an entry count to the function using the samples gathered at the 2098 // function entry. 2099 // Sets the GUIDs that are inlined in the profiled binary. This is used 2100 // for ThinLink to make correct liveness analysis, and also make the IR 2101 // match the profiled binary before annotation. 2102 F.setEntryCount( 2103 ProfileCount(Samples->getHeadSamples() + 1, Function::PCT_Real), 2104 &InlinedGUIDs); 2105 2106 // Compute dominance and loop info needed for propagation. 2107 computeDominanceAndLoopInfo(F); 2108 2109 // Find equivalence classes. 2110 findEquivalenceClasses(F); 2111 2112 // Propagate weights to all edges. 2113 propagateWeights(F); 2114 } 2115 2116 return Changed; 2117 } 2118 2119 void SampleProfileLoaderBaseImpl::emitCoverageRemarks(Function &F) { 2120 // If coverage checking was requested, compute it now. 2121 if (SampleProfileRecordCoverage) { 2122 unsigned Used = CoverageTracker.countUsedRecords(Samples, PSI); 2123 unsigned Total = CoverageTracker.countBodyRecords(Samples, PSI); 2124 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total); 2125 if (Coverage < SampleProfileRecordCoverage) { 2126 F.getContext().diagnose(DiagnosticInfoSampleProfile( 2127 F.getSubprogram()->getFilename(), getFunctionLoc(F), 2128 Twine(Used) + " of " + Twine(Total) + " available profile records (" + 2129 Twine(Coverage) + "%) were applied", 2130 DS_Warning)); 2131 } 2132 } 2133 2134 if (SampleProfileSampleCoverage) { 2135 uint64_t Used = CoverageTracker.getTotalUsedSamples(); 2136 uint64_t Total = CoverageTracker.countBodySamples(Samples, PSI); 2137 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total); 2138 if (Coverage < SampleProfileSampleCoverage) { 2139 F.getContext().diagnose(DiagnosticInfoSampleProfile( 2140 F.getSubprogram()->getFilename(), getFunctionLoc(F), 2141 Twine(Used) + " of " + Twine(Total) + " available profile samples (" + 2142 Twine(Coverage) + "%) were applied", 2143 DS_Warning)); 2144 } 2145 } 2146 } 2147 2148 // Generate MD_prof metadata for every branch instruction using the 2149 // edge weights computed during propagation. 2150 void SampleProfileLoader::generateMDProfMetadata(Function &F) { 2151 // Generate MD_prof metadata for every branch instruction using the 2152 // edge weights computed during propagation. 2153 LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n"); 2154 LLVMContext &Ctx = F.getContext(); 2155 MDBuilder MDB(Ctx); 2156 for (auto &BI : F) { 2157 BasicBlock *BB = &BI; 2158 2159 if (BlockWeights[BB]) { 2160 for (auto &I : BB->getInstList()) { 2161 if (!isa<CallInst>(I) && !isa<InvokeInst>(I)) 2162 continue; 2163 if (!cast<CallBase>(I).getCalledFunction()) { 2164 const DebugLoc &DLoc = I.getDebugLoc(); 2165 if (!DLoc) 2166 continue; 2167 const DILocation *DIL = DLoc; 2168 const FunctionSamples *FS = findFunctionSamples(I); 2169 if (!FS) 2170 continue; 2171 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL); 2172 auto T = FS->findCallTargetMapAt(CallSite); 2173 if (!T || T.get().empty()) 2174 continue; 2175 // Prorate the callsite counts to reflect what is already done to the 2176 // callsite, such as ICP or calliste cloning. 2177 if (FunctionSamples::ProfileIsProbeBased) { 2178 if (Optional<PseudoProbe> Probe = extractProbe(I)) { 2179 if (Probe->Factor < 1) 2180 T = SampleRecord::adjustCallTargets(T.get(), Probe->Factor); 2181 } 2182 } 2183 SmallVector<InstrProfValueData, 2> SortedCallTargets = 2184 GetSortedValueDataFromCallTargets(T.get()); 2185 uint64_t Sum; 2186 findIndirectCallFunctionSamples(I, Sum); 2187 annotateValueSite(*I.getParent()->getParent()->getParent(), I, 2188 SortedCallTargets, Sum, IPVK_IndirectCallTarget, 2189 SortedCallTargets.size()); 2190 } else if (!isa<IntrinsicInst>(&I)) { 2191 I.setMetadata(LLVMContext::MD_prof, 2192 MDB.createBranchWeights( 2193 {static_cast<uint32_t>(BlockWeights[BB])})); 2194 } 2195 } 2196 } 2197 Instruction *TI = BB->getTerminator(); 2198 if (TI->getNumSuccessors() == 1) 2199 continue; 2200 if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI)) 2201 continue; 2202 2203 DebugLoc BranchLoc = TI->getDebugLoc(); 2204 LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line " 2205 << ((BranchLoc) ? Twine(BranchLoc.getLine()) 2206 : Twine("<UNKNOWN LOCATION>")) 2207 << ".\n"); 2208 SmallVector<uint32_t, 4> Weights; 2209 uint32_t MaxWeight = 0; 2210 Instruction *MaxDestInst; 2211 for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) { 2212 BasicBlock *Succ = TI->getSuccessor(I); 2213 Edge E = std::make_pair(BB, Succ); 2214 uint64_t Weight = EdgeWeights[E]; 2215 LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E)); 2216 // Use uint32_t saturated arithmetic to adjust the incoming weights, 2217 // if needed. Sample counts in profiles are 64-bit unsigned values, 2218 // but internally branch weights are expressed as 32-bit values. 2219 if (Weight > std::numeric_limits<uint32_t>::max()) { 2220 LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)"); 2221 Weight = std::numeric_limits<uint32_t>::max(); 2222 } 2223 // Weight is added by one to avoid propagation errors introduced by 2224 // 0 weights. 2225 Weights.push_back(static_cast<uint32_t>(Weight + 1)); 2226 if (Weight != 0) { 2227 if (Weight > MaxWeight) { 2228 MaxWeight = Weight; 2229 MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime(); 2230 } 2231 } 2232 } 2233 2234 uint64_t TempWeight; 2235 // Only set weights if there is at least one non-zero weight. 2236 // In any other case, let the analyzer set weights. 2237 // Do not set weights if the weights are present. In ThinLTO, the profile 2238 // annotation is done twice. If the first annotation already set the 2239 // weights, the second pass does not need to set it. 2240 if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) { 2241 LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n"); 2242 TI->setMetadata(LLVMContext::MD_prof, 2243 MDB.createBranchWeights(Weights)); 2244 ORE->emit([&]() { 2245 return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst) 2246 << "most popular destination for conditional branches at " 2247 << ore::NV("CondBranchesLoc", BranchLoc); 2248 }); 2249 } else { 2250 LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n"); 2251 } 2252 } 2253 } 2254 2255 /// Get the line number for the function header. 2256 /// 2257 /// This looks up function \p F in the current compilation unit and 2258 /// retrieves the line number where the function is defined. This is 2259 /// line 0 for all the samples read from the profile file. Every line 2260 /// number is relative to this line. 2261 /// 2262 /// \param F Function object to query. 2263 /// 2264 /// \returns the line number where \p F is defined. If it returns 0, 2265 /// it means that there is no debug information available for \p F. 2266 unsigned SampleProfileLoaderBaseImpl::getFunctionLoc(Function &F) { 2267 if (DISubprogram *S = F.getSubprogram()) 2268 return S->getLine(); 2269 2270 if (NoWarnSampleUnused) 2271 return 0; 2272 2273 // If the start of \p F is missing, emit a diagnostic to inform the user 2274 // about the missed opportunity. 2275 F.getContext().diagnose(DiagnosticInfoSampleProfile( 2276 "No debug information found in function " + F.getName() + 2277 ": Function profile not used", 2278 DS_Warning)); 2279 return 0; 2280 } 2281 2282 void SampleProfileLoaderBaseImpl::computeDominanceAndLoopInfo(Function &F) { 2283 DT.reset(new DominatorTree); 2284 DT->recalculate(F); 2285 2286 PDT.reset(new PostDominatorTree(F)); 2287 2288 LI.reset(new LoopInfo); 2289 LI->analyze(*DT); 2290 } 2291 2292 /// Once all the branch weights are computed, we emit the MD_prof 2293 /// metadata on BB using the computed values for each of its branches. 2294 /// 2295 /// \param F The function to query. 2296 /// 2297 /// \returns true if \p F was modified. Returns false, otherwise. 2298 bool SampleProfileLoader::emitAnnotations(Function &F) { 2299 bool Changed = false; 2300 2301 if (FunctionSamples::ProfileIsProbeBased) { 2302 if (!ProbeManager->profileIsValid(F, *Samples)) { 2303 LLVM_DEBUG( 2304 dbgs() << "Profile is invalid due to CFG mismatch for Function " 2305 << F.getName()); 2306 ++NumMismatchedProfile; 2307 return false; 2308 } 2309 ++NumMatchedProfile; 2310 } else { 2311 if (getFunctionLoc(F) == 0) 2312 return false; 2313 2314 LLVM_DEBUG(dbgs() << "Line number for the first instruction in " 2315 << F.getName() << ": " << getFunctionLoc(F) << "\n"); 2316 } 2317 2318 DenseSet<GlobalValue::GUID> InlinedGUIDs; 2319 if (ProfileIsCS && CallsitePrioritizedInline) 2320 Changed |= inlineHotFunctionsWithPriority(F, InlinedGUIDs); 2321 else 2322 Changed |= inlineHotFunctions(F, InlinedGUIDs); 2323 2324 Changed |= computeAndPropagateWeights(F, InlinedGUIDs); 2325 2326 if (Changed) 2327 generateMDProfMetadata(F); 2328 2329 emitCoverageRemarks(F); 2330 return Changed; 2331 } 2332 2333 char SampleProfileLoaderLegacyPass::ID = 0; 2334 2335 INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile", 2336 "Sample Profile loader", false, false) 2337 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2338 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 2339 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2340 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 2341 INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile", 2342 "Sample Profile loader", false, false) 2343 2344 std::vector<Function *> 2345 SampleProfileLoader::buildFunctionOrder(Module &M, CallGraph *CG) { 2346 std::vector<Function *> FunctionOrderList; 2347 FunctionOrderList.reserve(M.size()); 2348 2349 if (!ProfileTopDownLoad || CG == nullptr) { 2350 if (ProfileMergeInlinee) { 2351 // Disable ProfileMergeInlinee if profile is not loaded in top down order, 2352 // because the profile for a function may be used for the profile 2353 // annotation of its outline copy before the profile merging of its 2354 // non-inlined inline instances, and that is not the way how 2355 // ProfileMergeInlinee is supposed to work. 2356 ProfileMergeInlinee = false; 2357 } 2358 2359 for (Function &F : M) 2360 if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile")) 2361 FunctionOrderList.push_back(&F); 2362 return FunctionOrderList; 2363 } 2364 2365 assert(&CG->getModule() == &M); 2366 scc_iterator<CallGraph *> CGI = scc_begin(CG); 2367 while (!CGI.isAtEnd()) { 2368 for (CallGraphNode *node : *CGI) { 2369 auto F = node->getFunction(); 2370 if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile")) 2371 FunctionOrderList.push_back(F); 2372 } 2373 ++CGI; 2374 } 2375 2376 std::reverse(FunctionOrderList.begin(), FunctionOrderList.end()); 2377 return FunctionOrderList; 2378 } 2379 2380 bool SampleProfileLoader::doInitialization(Module &M, 2381 FunctionAnalysisManager *FAM) { 2382 auto &Ctx = M.getContext(); 2383 2384 auto ReaderOrErr = 2385 SampleProfileReader::create(Filename, Ctx, RemappingFilename); 2386 if (std::error_code EC = ReaderOrErr.getError()) { 2387 std::string Msg = "Could not open profile: " + EC.message(); 2388 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 2389 return false; 2390 } 2391 Reader = std::move(ReaderOrErr.get()); 2392 Reader->setSkipFlatProf(LTOPhase == ThinOrFullLTOPhase::ThinLTOPostLink); 2393 Reader->collectFuncsFrom(M); 2394 if (std::error_code EC = Reader->read()) { 2395 std::string Msg = "profile reading failed: " + EC.message(); 2396 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 2397 return false; 2398 } 2399 2400 PSL = Reader->getProfileSymbolList(); 2401 2402 // While profile-sample-accurate is on, ignore symbol list. 2403 ProfAccForSymsInList = 2404 ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate; 2405 if (ProfAccForSymsInList) { 2406 NamesInProfile.clear(); 2407 if (auto NameTable = Reader->getNameTable()) 2408 NamesInProfile.insert(NameTable->begin(), NameTable->end()); 2409 CoverageTracker.setProfAccForSymsInList(true); 2410 } 2411 2412 if (FAM && !ProfileInlineReplayFile.empty()) { 2413 ExternalInlineAdvisor = std::make_unique<ReplayInlineAdvisor>( 2414 M, *FAM, Ctx, /*OriginalAdvisor=*/nullptr, ProfileInlineReplayFile, 2415 /*EmitRemarks=*/false); 2416 if (!ExternalInlineAdvisor->areReplayRemarksLoaded()) 2417 ExternalInlineAdvisor.reset(); 2418 } 2419 2420 // Apply tweaks if context-sensitive profile is available. 2421 if (Reader->profileIsCS()) { 2422 ProfileIsCS = true; 2423 FunctionSamples::ProfileIsCS = true; 2424 2425 // Enable priority-base inliner and size inline by default for CSSPGO. 2426 if (!ProfileSizeInline.getNumOccurrences()) 2427 ProfileSizeInline = true; 2428 if (!CallsitePrioritizedInline.getNumOccurrences()) 2429 CallsitePrioritizedInline = true; 2430 2431 // Tracker for profiles under different context 2432 ContextTracker = 2433 std::make_unique<SampleContextTracker>(Reader->getProfiles()); 2434 } 2435 2436 // Load pseudo probe descriptors for probe-based function samples. 2437 if (Reader->profileIsProbeBased()) { 2438 ProbeManager = std::make_unique<PseudoProbeManager>(M); 2439 if (!ProbeManager->moduleIsProbed(M)) { 2440 const char *Msg = 2441 "Pseudo-probe-based profile requires SampleProfileProbePass"; 2442 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 2443 return false; 2444 } 2445 } 2446 2447 return true; 2448 } 2449 2450 ModulePass *llvm::createSampleProfileLoaderPass() { 2451 return new SampleProfileLoaderLegacyPass(); 2452 } 2453 2454 ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) { 2455 return new SampleProfileLoaderLegacyPass(Name); 2456 } 2457 2458 bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM, 2459 ProfileSummaryInfo *_PSI, CallGraph *CG) { 2460 GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap); 2461 2462 PSI = _PSI; 2463 if (M.getProfileSummary(/* IsCS */ false) == nullptr) { 2464 M.setProfileSummary(Reader->getSummary().getMD(M.getContext()), 2465 ProfileSummary::PSK_Sample); 2466 PSI->refresh(); 2467 } 2468 // Compute the total number of samples collected in this profile. 2469 for (const auto &I : Reader->getProfiles()) 2470 TotalCollectedSamples += I.second.getTotalSamples(); 2471 2472 auto Remapper = Reader->getRemapper(); 2473 // Populate the symbol map. 2474 for (const auto &N_F : M.getValueSymbolTable()) { 2475 StringRef OrigName = N_F.getKey(); 2476 Function *F = dyn_cast<Function>(N_F.getValue()); 2477 if (F == nullptr) 2478 continue; 2479 SymbolMap[OrigName] = F; 2480 auto pos = OrigName.find('.'); 2481 if (pos != StringRef::npos) { 2482 StringRef NewName = OrigName.substr(0, pos); 2483 auto r = SymbolMap.insert(std::make_pair(NewName, F)); 2484 // Failiing to insert means there is already an entry in SymbolMap, 2485 // thus there are multiple functions that are mapped to the same 2486 // stripped name. In this case of name conflicting, set the value 2487 // to nullptr to avoid confusion. 2488 if (!r.second) 2489 r.first->second = nullptr; 2490 OrigName = NewName; 2491 } 2492 // Insert the remapped names into SymbolMap. 2493 if (Remapper) { 2494 if (auto MapName = Remapper->lookUpNameInProfile(OrigName)) { 2495 if (*MapName == OrigName) 2496 continue; 2497 SymbolMap.insert(std::make_pair(*MapName, F)); 2498 } 2499 } 2500 } 2501 2502 bool retval = false; 2503 for (auto F : buildFunctionOrder(M, CG)) { 2504 assert(!F->isDeclaration()); 2505 clearFunctionData(); 2506 retval |= runOnFunction(*F, AM); 2507 } 2508 2509 // Account for cold calls not inlined.... 2510 if (!ProfileIsCS) 2511 for (const std::pair<Function *, NotInlinedProfileInfo> &pair : 2512 notInlinedCallInfo) 2513 updateProfileCallee(pair.first, pair.second.entryCount); 2514 2515 return retval; 2516 } 2517 2518 bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) { 2519 ACT = &getAnalysis<AssumptionCacheTracker>(); 2520 TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>(); 2521 TLIWP = &getAnalysis<TargetLibraryInfoWrapperPass>(); 2522 ProfileSummaryInfo *PSI = 2523 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 2524 return SampleLoader.runOnModule(M, nullptr, PSI, nullptr); 2525 } 2526 2527 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) { 2528 DILocation2SampleMap.clear(); 2529 // By default the entry count is initialized to -1, which will be treated 2530 // conservatively by getEntryCount as the same as unknown (None). This is 2531 // to avoid newly added code to be treated as cold. If we have samples 2532 // this will be overwritten in emitAnnotations. 2533 uint64_t initialEntryCount = -1; 2534 2535 ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL; 2536 if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) { 2537 // initialize all the function entry counts to 0. It means all the 2538 // functions without profile will be regarded as cold. 2539 initialEntryCount = 0; 2540 // profile-sample-accurate is a user assertion which has a higher precedence 2541 // than symbol list. When profile-sample-accurate is on, ignore symbol list. 2542 ProfAccForSymsInList = false; 2543 } 2544 CoverageTracker.setProfAccForSymsInList(ProfAccForSymsInList); 2545 2546 // PSL -- profile symbol list include all the symbols in sampled binary. 2547 // If ProfileAccurateForSymsInList is enabled, PSL is used to treat 2548 // old functions without samples being cold, without having to worry 2549 // about new and hot functions being mistakenly treated as cold. 2550 if (ProfAccForSymsInList) { 2551 // Initialize the entry count to 0 for functions in the list. 2552 if (PSL->contains(F.getName())) 2553 initialEntryCount = 0; 2554 2555 // Function in the symbol list but without sample will be regarded as 2556 // cold. To minimize the potential negative performance impact it could 2557 // have, we want to be a little conservative here saying if a function 2558 // shows up in the profile, no matter as outline function, inline instance 2559 // or call targets, treat the function as not being cold. This will handle 2560 // the cases such as most callsites of a function are inlined in sampled 2561 // binary but not inlined in current build (because of source code drift, 2562 // imprecise debug information, or the callsites are all cold individually 2563 // but not cold accumulatively...), so the outline function showing up as 2564 // cold in sampled binary will actually not be cold after current build. 2565 StringRef CanonName = FunctionSamples::getCanonicalFnName(F); 2566 if (NamesInProfile.count(CanonName)) 2567 initialEntryCount = -1; 2568 } 2569 2570 // Initialize entry count when the function has no existing entry 2571 // count value. 2572 if (!F.getEntryCount().hasValue()) 2573 F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real)); 2574 std::unique_ptr<OptimizationRemarkEmitter> OwnedORE; 2575 if (AM) { 2576 auto &FAM = 2577 AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent()) 2578 .getManager(); 2579 ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 2580 } else { 2581 OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F); 2582 ORE = OwnedORE.get(); 2583 } 2584 2585 if (ProfileIsCS) 2586 Samples = ContextTracker->getBaseSamplesFor(F); 2587 else 2588 Samples = Reader->getSamplesFor(F); 2589 2590 if (Samples && !Samples->empty()) 2591 return emitAnnotations(F); 2592 return false; 2593 } 2594 2595 PreservedAnalyses SampleProfileLoaderPass::run(Module &M, 2596 ModuleAnalysisManager &AM) { 2597 FunctionAnalysisManager &FAM = 2598 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 2599 2600 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 2601 return FAM.getResult<AssumptionAnalysis>(F); 2602 }; 2603 auto GetTTI = [&](Function &F) -> TargetTransformInfo & { 2604 return FAM.getResult<TargetIRAnalysis>(F); 2605 }; 2606 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 2607 return FAM.getResult<TargetLibraryAnalysis>(F); 2608 }; 2609 2610 SampleProfileLoader SampleLoader( 2611 ProfileFileName.empty() ? SampleProfileFile : ProfileFileName, 2612 ProfileRemappingFileName.empty() ? SampleProfileRemappingFile 2613 : ProfileRemappingFileName, 2614 LTOPhase, GetAssumptionCache, GetTTI, GetTLI); 2615 2616 if (!SampleLoader.doInitialization(M, &FAM)) 2617 return PreservedAnalyses::all(); 2618 2619 ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M); 2620 CallGraph &CG = AM.getResult<CallGraphAnalysis>(M); 2621 if (!SampleLoader.runOnModule(M, &AM, PSI, &CG)) 2622 return PreservedAnalyses::all(); 2623 2624 return PreservedAnalyses::none(); 2625 } 2626