1 //===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the SampleProfileLoader transformation. This pass 10 // reads a profile file generated by a sampling profiler (e.g. Linux Perf - 11 // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the 12 // profile information in the given profile. 13 // 14 // This pass generates branch weight annotations on the IR: 15 // 16 // - prof: Represents branch weights. This annotation is added to branches 17 // to indicate the weights of each edge coming out of the branch. 18 // The weight of each edge is the weight of the target block for 19 // that edge. The weight of a block B is computed as the maximum 20 // number of samples found in B. 21 // 22 //===----------------------------------------------------------------------===// 23 24 #include "llvm/Transforms/IPO/SampleProfile.h" 25 #include "llvm/ADT/ArrayRef.h" 26 #include "llvm/ADT/DenseMap.h" 27 #include "llvm/ADT/DenseSet.h" 28 #include "llvm/ADT/None.h" 29 #include "llvm/ADT/SCCIterator.h" 30 #include "llvm/ADT/SmallPtrSet.h" 31 #include "llvm/ADT/SmallSet.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/Statistic.h" 34 #include "llvm/ADT/StringMap.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/Twine.h" 37 #include "llvm/Analysis/AssumptionCache.h" 38 #include "llvm/Analysis/CallGraph.h" 39 #include "llvm/Analysis/CallGraphSCCPass.h" 40 #include "llvm/Analysis/InlineAdvisor.h" 41 #include "llvm/Analysis/InlineCost.h" 42 #include "llvm/Analysis/LoopInfo.h" 43 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 44 #include "llvm/Analysis/PostDominators.h" 45 #include "llvm/Analysis/ProfileSummaryInfo.h" 46 #include "llvm/Analysis/TargetLibraryInfo.h" 47 #include "llvm/Analysis/TargetTransformInfo.h" 48 #include "llvm/IR/BasicBlock.h" 49 #include "llvm/IR/CFG.h" 50 #include "llvm/IR/DebugInfoMetadata.h" 51 #include "llvm/IR/DebugLoc.h" 52 #include "llvm/IR/DiagnosticInfo.h" 53 #include "llvm/IR/Dominators.h" 54 #include "llvm/IR/Function.h" 55 #include "llvm/IR/GlobalValue.h" 56 #include "llvm/IR/InstrTypes.h" 57 #include "llvm/IR/Instruction.h" 58 #include "llvm/IR/Instructions.h" 59 #include "llvm/IR/IntrinsicInst.h" 60 #include "llvm/IR/LLVMContext.h" 61 #include "llvm/IR/MDBuilder.h" 62 #include "llvm/IR/Module.h" 63 #include "llvm/IR/PassManager.h" 64 #include "llvm/IR/ValueSymbolTable.h" 65 #include "llvm/InitializePasses.h" 66 #include "llvm/Pass.h" 67 #include "llvm/ProfileData/InstrProf.h" 68 #include "llvm/ProfileData/SampleProf.h" 69 #include "llvm/ProfileData/SampleProfReader.h" 70 #include "llvm/Support/Casting.h" 71 #include "llvm/Support/CommandLine.h" 72 #include "llvm/Support/Debug.h" 73 #include "llvm/Support/ErrorHandling.h" 74 #include "llvm/Support/ErrorOr.h" 75 #include "llvm/Support/GenericDomTree.h" 76 #include "llvm/Support/raw_ostream.h" 77 #include "llvm/Transforms/IPO.h" 78 #include "llvm/Transforms/Instrumentation.h" 79 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 80 #include "llvm/Transforms/Utils/Cloning.h" 81 #include "llvm/Transforms/Utils/MisExpect.h" 82 #include <algorithm> 83 #include <cassert> 84 #include <cstdint> 85 #include <functional> 86 #include <limits> 87 #include <map> 88 #include <memory> 89 #include <queue> 90 #include <string> 91 #include <system_error> 92 #include <utility> 93 #include <vector> 94 95 using namespace llvm; 96 using namespace sampleprof; 97 using ProfileCount = Function::ProfileCount; 98 #define DEBUG_TYPE "sample-profile" 99 #define CSINLINE_DEBUG DEBUG_TYPE "-inline" 100 101 STATISTIC(NumCSInlined, 102 "Number of functions inlined with context sensitive profile"); 103 STATISTIC(NumCSNotInlined, 104 "Number of functions not inlined with context sensitive profile"); 105 106 // Command line option to specify the file to read samples from. This is 107 // mainly used for debugging. 108 static cl::opt<std::string> SampleProfileFile( 109 "sample-profile-file", cl::init(""), cl::value_desc("filename"), 110 cl::desc("Profile file loaded by -sample-profile"), cl::Hidden); 111 112 // The named file contains a set of transformations that may have been applied 113 // to the symbol names between the program from which the sample data was 114 // collected and the current program's symbols. 115 static cl::opt<std::string> SampleProfileRemappingFile( 116 "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"), 117 cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden); 118 119 static cl::opt<unsigned> SampleProfileMaxPropagateIterations( 120 "sample-profile-max-propagate-iterations", cl::init(100), 121 cl::desc("Maximum number of iterations to go through when propagating " 122 "sample block/edge weights through the CFG.")); 123 124 static cl::opt<unsigned> SampleProfileRecordCoverage( 125 "sample-profile-check-record-coverage", cl::init(0), cl::value_desc("N"), 126 cl::desc("Emit a warning if less than N% of records in the input profile " 127 "are matched to the IR.")); 128 129 static cl::opt<unsigned> SampleProfileSampleCoverage( 130 "sample-profile-check-sample-coverage", cl::init(0), cl::value_desc("N"), 131 cl::desc("Emit a warning if less than N% of samples in the input profile " 132 "are matched to the IR.")); 133 134 static cl::opt<bool> NoWarnSampleUnused( 135 "no-warn-sample-unused", cl::init(false), cl::Hidden, 136 cl::desc("Use this option to turn off/on warnings about function with " 137 "samples but without debug information to use those samples. ")); 138 139 static cl::opt<bool> ProfileSampleAccurate( 140 "profile-sample-accurate", cl::Hidden, cl::init(false), 141 cl::desc("If the sample profile is accurate, we will mark all un-sampled " 142 "callsite and function as having 0 samples. Otherwise, treat " 143 "un-sampled callsites and functions conservatively as unknown. ")); 144 145 static cl::opt<bool> ProfileAccurateForSymsInList( 146 "profile-accurate-for-symsinlist", cl::Hidden, cl::ZeroOrMore, 147 cl::init(true), 148 cl::desc("For symbols in profile symbol list, regard their profiles to " 149 "be accurate. It may be overriden by profile-sample-accurate. ")); 150 151 static cl::opt<bool> ProfileMergeInlinee( 152 "sample-profile-merge-inlinee", cl::Hidden, cl::init(true), 153 cl::desc("Merge past inlinee's profile to outline version if sample " 154 "profile loader decided not to inline a call site. It will " 155 "only be enabled when top-down order of profile loading is " 156 "enabled. ")); 157 158 static cl::opt<bool> ProfileTopDownLoad( 159 "sample-profile-top-down-load", cl::Hidden, cl::init(true), 160 cl::desc("Do profile annotation and inlining for functions in top-down " 161 "order of call graph during sample profile loading. It only " 162 "works for new pass manager. ")); 163 164 static cl::opt<bool> ProfileSizeInline( 165 "sample-profile-inline-size", cl::Hidden, cl::init(false), 166 cl::desc("Inline cold call sites in profile loader if it's beneficial " 167 "for code size.")); 168 169 static cl::opt<int> SampleColdCallSiteThreshold( 170 "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45), 171 cl::desc("Threshold for inlining cold callsites")); 172 173 namespace { 174 175 using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>; 176 using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>; 177 using Edge = std::pair<const BasicBlock *, const BasicBlock *>; 178 using EdgeWeightMap = DenseMap<Edge, uint64_t>; 179 using BlockEdgeMap = 180 DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>; 181 182 class SampleProfileLoader; 183 184 class SampleCoverageTracker { 185 public: 186 SampleCoverageTracker(SampleProfileLoader &SPL) : SPLoader(SPL){}; 187 188 bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset, 189 uint32_t Discriminator, uint64_t Samples); 190 unsigned computeCoverage(unsigned Used, unsigned Total) const; 191 unsigned countUsedRecords(const FunctionSamples *FS, 192 ProfileSummaryInfo *PSI) const; 193 unsigned countBodyRecords(const FunctionSamples *FS, 194 ProfileSummaryInfo *PSI) const; 195 uint64_t getTotalUsedSamples() const { return TotalUsedSamples; } 196 uint64_t countBodySamples(const FunctionSamples *FS, 197 ProfileSummaryInfo *PSI) const; 198 199 void clear() { 200 SampleCoverage.clear(); 201 TotalUsedSamples = 0; 202 } 203 204 private: 205 using BodySampleCoverageMap = std::map<LineLocation, unsigned>; 206 using FunctionSamplesCoverageMap = 207 DenseMap<const FunctionSamples *, BodySampleCoverageMap>; 208 209 /// Coverage map for sampling records. 210 /// 211 /// This map keeps a record of sampling records that have been matched to 212 /// an IR instruction. This is used to detect some form of staleness in 213 /// profiles (see flag -sample-profile-check-coverage). 214 /// 215 /// Each entry in the map corresponds to a FunctionSamples instance. This is 216 /// another map that counts how many times the sample record at the 217 /// given location has been used. 218 FunctionSamplesCoverageMap SampleCoverage; 219 220 /// Number of samples used from the profile. 221 /// 222 /// When a sampling record is used for the first time, the samples from 223 /// that record are added to this accumulator. Coverage is later computed 224 /// based on the total number of samples available in this function and 225 /// its callsites. 226 /// 227 /// Note that this accumulator tracks samples used from a single function 228 /// and all the inlined callsites. Strictly, we should have a map of counters 229 /// keyed by FunctionSamples pointers, but these stats are cleared after 230 /// every function, so we just need to keep a single counter. 231 uint64_t TotalUsedSamples = 0; 232 233 SampleProfileLoader &SPLoader; 234 }; 235 236 class GUIDToFuncNameMapper { 237 public: 238 GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader, 239 DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap) 240 : CurrentReader(Reader), CurrentModule(M), 241 CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) { 242 if (!CurrentReader.useMD5()) 243 return; 244 245 for (const auto &F : CurrentModule) { 246 StringRef OrigName = F.getName(); 247 CurrentGUIDToFuncNameMap.insert( 248 {Function::getGUID(OrigName), OrigName}); 249 250 // Local to global var promotion used by optimization like thinlto 251 // will rename the var and add suffix like ".llvm.xxx" to the 252 // original local name. In sample profile, the suffixes of function 253 // names are all stripped. Since it is possible that the mapper is 254 // built in post-thin-link phase and var promotion has been done, 255 // we need to add the substring of function name without the suffix 256 // into the GUIDToFuncNameMap. 257 StringRef CanonName = FunctionSamples::getCanonicalFnName(F); 258 if (CanonName != OrigName) 259 CurrentGUIDToFuncNameMap.insert( 260 {Function::getGUID(CanonName), CanonName}); 261 } 262 263 // Update GUIDToFuncNameMap for each function including inlinees. 264 SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap); 265 } 266 267 ~GUIDToFuncNameMapper() { 268 if (!CurrentReader.useMD5()) 269 return; 270 271 CurrentGUIDToFuncNameMap.clear(); 272 273 // Reset GUIDToFuncNameMap for of each function as they're no 274 // longer valid at this point. 275 SetGUIDToFuncNameMapForAll(nullptr); 276 } 277 278 private: 279 void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) { 280 std::queue<FunctionSamples *> FSToUpdate; 281 for (auto &IFS : CurrentReader.getProfiles()) { 282 FSToUpdate.push(&IFS.second); 283 } 284 285 while (!FSToUpdate.empty()) { 286 FunctionSamples *FS = FSToUpdate.front(); 287 FSToUpdate.pop(); 288 FS->GUIDToFuncNameMap = Map; 289 for (const auto &ICS : FS->getCallsiteSamples()) { 290 const FunctionSamplesMap &FSMap = ICS.second; 291 for (auto &IFS : FSMap) { 292 FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second); 293 FSToUpdate.push(&FS); 294 } 295 } 296 } 297 } 298 299 SampleProfileReader &CurrentReader; 300 Module &CurrentModule; 301 DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap; 302 }; 303 304 /// Sample profile pass. 305 /// 306 /// This pass reads profile data from the file specified by 307 /// -sample-profile-file and annotates every affected function with the 308 /// profile information found in that file. 309 class SampleProfileLoader { 310 public: 311 SampleProfileLoader( 312 StringRef Name, StringRef RemapName, bool IsThinLTOPreLink, 313 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 314 std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo, 315 std::function<const TargetLibraryInfo &(Function &)> GetTLI) 316 : GetAC(std::move(GetAssumptionCache)), 317 GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)), 318 CoverageTracker(*this), Filename(std::string(Name)), 319 RemappingFilename(std::string(RemapName)), 320 IsThinLTOPreLink(IsThinLTOPreLink) {} 321 322 bool doInitialization(Module &M); 323 bool runOnModule(Module &M, ModuleAnalysisManager *AM, 324 ProfileSummaryInfo *_PSI, CallGraph *CG); 325 326 void dump() { Reader->dump(); } 327 328 protected: 329 friend class SampleCoverageTracker; 330 331 bool runOnFunction(Function &F, ModuleAnalysisManager *AM); 332 unsigned getFunctionLoc(Function &F); 333 bool emitAnnotations(Function &F); 334 ErrorOr<uint64_t> getInstWeight(const Instruction &I); 335 ErrorOr<uint64_t> getBlockWeight(const BasicBlock *BB); 336 const FunctionSamples *findCalleeFunctionSamples(const CallBase &I) const; 337 std::vector<const FunctionSamples *> 338 findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const; 339 mutable DenseMap<const DILocation *, const FunctionSamples *> DILocation2SampleMap; 340 const FunctionSamples *findFunctionSamples(const Instruction &I) const; 341 bool inlineCallInstruction(CallBase &CB); 342 bool inlineHotFunctions(Function &F, 343 DenseSet<GlobalValue::GUID> &InlinedGUIDs); 344 // Inline cold/small functions in addition to hot ones 345 bool shouldInlineColdCallee(CallBase &CallInst); 346 void emitOptimizationRemarksForInlineCandidates( 347 const SmallVectorImpl<CallBase *> &Candidates, const Function &F, 348 bool Hot); 349 void printEdgeWeight(raw_ostream &OS, Edge E); 350 void printBlockWeight(raw_ostream &OS, const BasicBlock *BB) const; 351 void printBlockEquivalence(raw_ostream &OS, const BasicBlock *BB); 352 bool computeBlockWeights(Function &F); 353 void findEquivalenceClasses(Function &F); 354 template <bool IsPostDom> 355 void findEquivalencesFor(BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants, 356 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree); 357 358 void propagateWeights(Function &F); 359 uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge); 360 void buildEdges(Function &F); 361 std::vector<Function *> buildFunctionOrder(Module &M, CallGraph *CG); 362 bool propagateThroughEdges(Function &F, bool UpdateBlockCount); 363 void computeDominanceAndLoopInfo(Function &F); 364 void clearFunctionData(); 365 bool callsiteIsHot(const FunctionSamples *CallsiteFS, 366 ProfileSummaryInfo *PSI); 367 368 /// Map basic blocks to their computed weights. 369 /// 370 /// The weight of a basic block is defined to be the maximum 371 /// of all the instruction weights in that block. 372 BlockWeightMap BlockWeights; 373 374 /// Map edges to their computed weights. 375 /// 376 /// Edge weights are computed by propagating basic block weights in 377 /// SampleProfile::propagateWeights. 378 EdgeWeightMap EdgeWeights; 379 380 /// Set of visited blocks during propagation. 381 SmallPtrSet<const BasicBlock *, 32> VisitedBlocks; 382 383 /// Set of visited edges during propagation. 384 SmallSet<Edge, 32> VisitedEdges; 385 386 /// Equivalence classes for block weights. 387 /// 388 /// Two blocks BB1 and BB2 are in the same equivalence class if they 389 /// dominate and post-dominate each other, and they are in the same loop 390 /// nest. When this happens, the two blocks are guaranteed to execute 391 /// the same number of times. 392 EquivalenceClassMap EquivalenceClass; 393 394 /// Map from function name to Function *. Used to find the function from 395 /// the function name. If the function name contains suffix, additional 396 /// entry is added to map from the stripped name to the function if there 397 /// is one-to-one mapping. 398 StringMap<Function *> SymbolMap; 399 400 /// Dominance, post-dominance and loop information. 401 std::unique_ptr<DominatorTree> DT; 402 std::unique_ptr<PostDominatorTree> PDT; 403 std::unique_ptr<LoopInfo> LI; 404 405 std::function<AssumptionCache &(Function &)> GetAC; 406 std::function<TargetTransformInfo &(Function &)> GetTTI; 407 std::function<const TargetLibraryInfo &(Function &)> GetTLI; 408 409 /// Predecessors for each basic block in the CFG. 410 BlockEdgeMap Predecessors; 411 412 /// Successors for each basic block in the CFG. 413 BlockEdgeMap Successors; 414 415 SampleCoverageTracker CoverageTracker; 416 417 /// Profile reader object. 418 std::unique_ptr<SampleProfileReader> Reader; 419 420 /// Samples collected for the body of this function. 421 FunctionSamples *Samples = nullptr; 422 423 /// Name of the profile file to load. 424 std::string Filename; 425 426 /// Name of the profile remapping file to load. 427 std::string RemappingFilename; 428 429 /// Flag indicating whether the profile input loaded successfully. 430 bool ProfileIsValid = false; 431 432 /// Flag indicating if the pass is invoked in ThinLTO compile phase. 433 /// 434 /// In this phase, in annotation, we should not promote indirect calls. 435 /// Instead, we will mark GUIDs that needs to be annotated to the function. 436 bool IsThinLTOPreLink; 437 438 /// Profile Summary Info computed from sample profile. 439 ProfileSummaryInfo *PSI = nullptr; 440 441 /// Profle Symbol list tells whether a function name appears in the binary 442 /// used to generate the current profile. 443 std::unique_ptr<ProfileSymbolList> PSL; 444 445 /// Total number of samples collected in this profile. 446 /// 447 /// This is the sum of all the samples collected in all the functions executed 448 /// at runtime. 449 uint64_t TotalCollectedSamples = 0; 450 451 /// Optimization Remark Emitter used to emit diagnostic remarks. 452 OptimizationRemarkEmitter *ORE = nullptr; 453 454 // Information recorded when we declined to inline a call site 455 // because we have determined it is too cold is accumulated for 456 // each callee function. Initially this is just the entry count. 457 struct NotInlinedProfileInfo { 458 uint64_t entryCount; 459 }; 460 DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo; 461 462 // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for 463 // all the function symbols defined or declared in current module. 464 DenseMap<uint64_t, StringRef> GUIDToFuncNameMap; 465 466 // All the Names used in FunctionSamples including outline function 467 // names, inline instance names and call target names. 468 StringSet<> NamesInProfile; 469 470 // For symbol in profile symbol list, whether to regard their profiles 471 // to be accurate. It is mainly decided by existance of profile symbol 472 // list and -profile-accurate-for-symsinlist flag, but it can be 473 // overriden by -profile-sample-accurate or profile-sample-accurate 474 // attribute. 475 bool ProfAccForSymsInList; 476 }; 477 478 class SampleProfileLoaderLegacyPass : public ModulePass { 479 public: 480 // Class identification, replacement for typeinfo 481 static char ID; 482 483 SampleProfileLoaderLegacyPass(StringRef Name = SampleProfileFile, 484 bool IsThinLTOPreLink = false) 485 : ModulePass(ID), SampleLoader( 486 Name, SampleProfileRemappingFile, IsThinLTOPreLink, 487 [&](Function &F) -> AssumptionCache & { 488 return ACT->getAssumptionCache(F); 489 }, 490 [&](Function &F) -> TargetTransformInfo & { 491 return TTIWP->getTTI(F); 492 }, 493 [&](Function &F) -> TargetLibraryInfo & { 494 return TLIWP->getTLI(F); 495 }) { 496 initializeSampleProfileLoaderLegacyPassPass( 497 *PassRegistry::getPassRegistry()); 498 } 499 500 void dump() { SampleLoader.dump(); } 501 502 bool doInitialization(Module &M) override { 503 return SampleLoader.doInitialization(M); 504 } 505 506 StringRef getPassName() const override { return "Sample profile pass"; } 507 bool runOnModule(Module &M) override; 508 509 void getAnalysisUsage(AnalysisUsage &AU) const override { 510 AU.addRequired<AssumptionCacheTracker>(); 511 AU.addRequired<TargetTransformInfoWrapperPass>(); 512 AU.addRequired<TargetLibraryInfoWrapperPass>(); 513 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 514 } 515 516 private: 517 SampleProfileLoader SampleLoader; 518 AssumptionCacheTracker *ACT = nullptr; 519 TargetTransformInfoWrapperPass *TTIWP = nullptr; 520 TargetLibraryInfoWrapperPass *TLIWP = nullptr; 521 }; 522 523 } // end anonymous namespace 524 525 /// Return true if the given callsite is hot wrt to hot cutoff threshold. 526 /// 527 /// Functions that were inlined in the original binary will be represented 528 /// in the inline stack in the sample profile. If the profile shows that 529 /// the original inline decision was "good" (i.e., the callsite is executed 530 /// frequently), then we will recreate the inline decision and apply the 531 /// profile from the inlined callsite. 532 /// 533 /// To decide whether an inlined callsite is hot, we compare the callsite 534 /// sample count with the hot cutoff computed by ProfileSummaryInfo, it is 535 /// regarded as hot if the count is above the cutoff value. 536 /// 537 /// When ProfileAccurateForSymsInList is enabled and profile symbol list 538 /// is present, functions in the profile symbol list but without profile will 539 /// be regarded as cold and much less inlining will happen in CGSCC inlining 540 /// pass, so we tend to lower the hot criteria here to allow more early 541 /// inlining to happen for warm callsites and it is helpful for performance. 542 bool SampleProfileLoader::callsiteIsHot(const FunctionSamples *CallsiteFS, 543 ProfileSummaryInfo *PSI) { 544 if (!CallsiteFS) 545 return false; // The callsite was not inlined in the original binary. 546 547 assert(PSI && "PSI is expected to be non null"); 548 uint64_t CallsiteTotalSamples = CallsiteFS->getTotalSamples(); 549 if (ProfAccForSymsInList) 550 return !PSI->isColdCount(CallsiteTotalSamples); 551 else 552 return PSI->isHotCount(CallsiteTotalSamples); 553 } 554 555 /// Mark as used the sample record for the given function samples at 556 /// (LineOffset, Discriminator). 557 /// 558 /// \returns true if this is the first time we mark the given record. 559 bool SampleCoverageTracker::markSamplesUsed(const FunctionSamples *FS, 560 uint32_t LineOffset, 561 uint32_t Discriminator, 562 uint64_t Samples) { 563 LineLocation Loc(LineOffset, Discriminator); 564 unsigned &Count = SampleCoverage[FS][Loc]; 565 bool FirstTime = (++Count == 1); 566 if (FirstTime) 567 TotalUsedSamples += Samples; 568 return FirstTime; 569 } 570 571 /// Return the number of sample records that were applied from this profile. 572 /// 573 /// This count does not include records from cold inlined callsites. 574 unsigned 575 SampleCoverageTracker::countUsedRecords(const FunctionSamples *FS, 576 ProfileSummaryInfo *PSI) const { 577 auto I = SampleCoverage.find(FS); 578 579 // The size of the coverage map for FS represents the number of records 580 // that were marked used at least once. 581 unsigned Count = (I != SampleCoverage.end()) ? I->second.size() : 0; 582 583 // If there are inlined callsites in this function, count the samples found 584 // in the respective bodies. However, do not bother counting callees with 0 585 // total samples, these are callees that were never invoked at runtime. 586 for (const auto &I : FS->getCallsiteSamples()) 587 for (const auto &J : I.second) { 588 const FunctionSamples *CalleeSamples = &J.second; 589 if (SPLoader.callsiteIsHot(CalleeSamples, PSI)) 590 Count += countUsedRecords(CalleeSamples, PSI); 591 } 592 593 return Count; 594 } 595 596 /// Return the number of sample records in the body of this profile. 597 /// 598 /// This count does not include records from cold inlined callsites. 599 unsigned 600 SampleCoverageTracker::countBodyRecords(const FunctionSamples *FS, 601 ProfileSummaryInfo *PSI) const { 602 unsigned Count = FS->getBodySamples().size(); 603 604 // Only count records in hot callsites. 605 for (const auto &I : FS->getCallsiteSamples()) 606 for (const auto &J : I.second) { 607 const FunctionSamples *CalleeSamples = &J.second; 608 if (SPLoader.callsiteIsHot(CalleeSamples, PSI)) 609 Count += countBodyRecords(CalleeSamples, PSI); 610 } 611 612 return Count; 613 } 614 615 /// Return the number of samples collected in the body of this profile. 616 /// 617 /// This count does not include samples from cold inlined callsites. 618 uint64_t 619 SampleCoverageTracker::countBodySamples(const FunctionSamples *FS, 620 ProfileSummaryInfo *PSI) const { 621 uint64_t Total = 0; 622 for (const auto &I : FS->getBodySamples()) 623 Total += I.second.getSamples(); 624 625 // Only count samples in hot callsites. 626 for (const auto &I : FS->getCallsiteSamples()) 627 for (const auto &J : I.second) { 628 const FunctionSamples *CalleeSamples = &J.second; 629 if (SPLoader.callsiteIsHot(CalleeSamples, PSI)) 630 Total += countBodySamples(CalleeSamples, PSI); 631 } 632 633 return Total; 634 } 635 636 /// Return the fraction of sample records used in this profile. 637 /// 638 /// The returned value is an unsigned integer in the range 0-100 indicating 639 /// the percentage of sample records that were used while applying this 640 /// profile to the associated function. 641 unsigned SampleCoverageTracker::computeCoverage(unsigned Used, 642 unsigned Total) const { 643 assert(Used <= Total && 644 "number of used records cannot exceed the total number of records"); 645 return Total > 0 ? Used * 100 / Total : 100; 646 } 647 648 /// Clear all the per-function data used to load samples and propagate weights. 649 void SampleProfileLoader::clearFunctionData() { 650 BlockWeights.clear(); 651 EdgeWeights.clear(); 652 VisitedBlocks.clear(); 653 VisitedEdges.clear(); 654 EquivalenceClass.clear(); 655 DT = nullptr; 656 PDT = nullptr; 657 LI = nullptr; 658 Predecessors.clear(); 659 Successors.clear(); 660 CoverageTracker.clear(); 661 } 662 663 #ifndef NDEBUG 664 /// Print the weight of edge \p E on stream \p OS. 665 /// 666 /// \param OS Stream to emit the output to. 667 /// \param E Edge to print. 668 void SampleProfileLoader::printEdgeWeight(raw_ostream &OS, Edge E) { 669 OS << "weight[" << E.first->getName() << "->" << E.second->getName() 670 << "]: " << EdgeWeights[E] << "\n"; 671 } 672 673 /// Print the equivalence class of block \p BB on stream \p OS. 674 /// 675 /// \param OS Stream to emit the output to. 676 /// \param BB Block to print. 677 void SampleProfileLoader::printBlockEquivalence(raw_ostream &OS, 678 const BasicBlock *BB) { 679 const BasicBlock *Equiv = EquivalenceClass[BB]; 680 OS << "equivalence[" << BB->getName() 681 << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n"; 682 } 683 684 /// Print the weight of block \p BB on stream \p OS. 685 /// 686 /// \param OS Stream to emit the output to. 687 /// \param BB Block to print. 688 void SampleProfileLoader::printBlockWeight(raw_ostream &OS, 689 const BasicBlock *BB) const { 690 const auto &I = BlockWeights.find(BB); 691 uint64_t W = (I == BlockWeights.end() ? 0 : I->second); 692 OS << "weight[" << BB->getName() << "]: " << W << "\n"; 693 } 694 #endif 695 696 /// Get the weight for an instruction. 697 /// 698 /// The "weight" of an instruction \p Inst is the number of samples 699 /// collected on that instruction at runtime. To retrieve it, we 700 /// need to compute the line number of \p Inst relative to the start of its 701 /// function. We use HeaderLineno to compute the offset. We then 702 /// look up the samples collected for \p Inst using BodySamples. 703 /// 704 /// \param Inst Instruction to query. 705 /// 706 /// \returns the weight of \p Inst. 707 ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) { 708 const DebugLoc &DLoc = Inst.getDebugLoc(); 709 if (!DLoc) 710 return std::error_code(); 711 712 const FunctionSamples *FS = findFunctionSamples(Inst); 713 if (!FS) 714 return std::error_code(); 715 716 // Ignore all intrinsics, phinodes and branch instructions. 717 // Branch and phinodes instruction usually contains debug info from sources outside of 718 // the residing basic block, thus we ignore them during annotation. 719 if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst)) 720 return std::error_code(); 721 722 // If a direct call/invoke instruction is inlined in profile 723 // (findCalleeFunctionSamples returns non-empty result), but not inlined here, 724 // it means that the inlined callsite has no sample, thus the call 725 // instruction should have 0 count. 726 if (auto *CB = dyn_cast<CallBase>(&Inst)) 727 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB)) 728 return 0; 729 730 const DILocation *DIL = DLoc; 731 uint32_t LineOffset = FunctionSamples::getOffset(DIL); 732 uint32_t Discriminator = DIL->getBaseDiscriminator(); 733 ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator); 734 if (R) { 735 bool FirstMark = 736 CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get()); 737 if (FirstMark) { 738 ORE->emit([&]() { 739 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst); 740 Remark << "Applied " << ore::NV("NumSamples", *R); 741 Remark << " samples from profile (offset: "; 742 Remark << ore::NV("LineOffset", LineOffset); 743 if (Discriminator) { 744 Remark << "."; 745 Remark << ore::NV("Discriminator", Discriminator); 746 } 747 Remark << ")"; 748 return Remark; 749 }); 750 } 751 LLVM_DEBUG(dbgs() << " " << DLoc.getLine() << "." 752 << DIL->getBaseDiscriminator() << ":" << Inst 753 << " (line offset: " << LineOffset << "." 754 << DIL->getBaseDiscriminator() << " - weight: " << R.get() 755 << ")\n"); 756 } 757 return R; 758 } 759 760 /// Compute the weight of a basic block. 761 /// 762 /// The weight of basic block \p BB is the maximum weight of all the 763 /// instructions in BB. 764 /// 765 /// \param BB The basic block to query. 766 /// 767 /// \returns the weight for \p BB. 768 ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) { 769 uint64_t Max = 0; 770 bool HasWeight = false; 771 for (auto &I : BB->getInstList()) { 772 const ErrorOr<uint64_t> &R = getInstWeight(I); 773 if (R) { 774 Max = std::max(Max, R.get()); 775 HasWeight = true; 776 } 777 } 778 return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code(); 779 } 780 781 /// Compute and store the weights of every basic block. 782 /// 783 /// This populates the BlockWeights map by computing 784 /// the weights of every basic block in the CFG. 785 /// 786 /// \param F The function to query. 787 bool SampleProfileLoader::computeBlockWeights(Function &F) { 788 bool Changed = false; 789 LLVM_DEBUG(dbgs() << "Block weights\n"); 790 for (const auto &BB : F) { 791 ErrorOr<uint64_t> Weight = getBlockWeight(&BB); 792 if (Weight) { 793 BlockWeights[&BB] = Weight.get(); 794 VisitedBlocks.insert(&BB); 795 Changed = true; 796 } 797 LLVM_DEBUG(printBlockWeight(dbgs(), &BB)); 798 } 799 800 return Changed; 801 } 802 803 /// Get the FunctionSamples for a call instruction. 804 /// 805 /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined 806 /// instance in which that call instruction is calling to. It contains 807 /// all samples that resides in the inlined instance. We first find the 808 /// inlined instance in which the call instruction is from, then we 809 /// traverse its children to find the callsite with the matching 810 /// location. 811 /// 812 /// \param Inst Call/Invoke instruction to query. 813 /// 814 /// \returns The FunctionSamples pointer to the inlined instance. 815 const FunctionSamples * 816 SampleProfileLoader::findCalleeFunctionSamples(const CallBase &Inst) const { 817 const DILocation *DIL = Inst.getDebugLoc(); 818 if (!DIL) { 819 return nullptr; 820 } 821 822 StringRef CalleeName; 823 if (Function *Callee = Inst.getCalledFunction()) 824 CalleeName = Callee->getName(); 825 826 const FunctionSamples *FS = findFunctionSamples(Inst); 827 if (FS == nullptr) 828 return nullptr; 829 830 return FS->findFunctionSamplesAt(LineLocation(FunctionSamples::getOffset(DIL), 831 DIL->getBaseDiscriminator()), 832 CalleeName); 833 } 834 835 /// Returns a vector of FunctionSamples that are the indirect call targets 836 /// of \p Inst. The vector is sorted by the total number of samples. Stores 837 /// the total call count of the indirect call in \p Sum. 838 std::vector<const FunctionSamples *> 839 SampleProfileLoader::findIndirectCallFunctionSamples( 840 const Instruction &Inst, uint64_t &Sum) const { 841 const DILocation *DIL = Inst.getDebugLoc(); 842 std::vector<const FunctionSamples *> R; 843 844 if (!DIL) { 845 return R; 846 } 847 848 const FunctionSamples *FS = findFunctionSamples(Inst); 849 if (FS == nullptr) 850 return R; 851 852 uint32_t LineOffset = FunctionSamples::getOffset(DIL); 853 uint32_t Discriminator = DIL->getBaseDiscriminator(); 854 855 auto T = FS->findCallTargetMapAt(LineOffset, Discriminator); 856 Sum = 0; 857 if (T) 858 for (const auto &T_C : T.get()) 859 Sum += T_C.second; 860 if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(LineLocation( 861 FunctionSamples::getOffset(DIL), DIL->getBaseDiscriminator()))) { 862 if (M->empty()) 863 return R; 864 for (const auto &NameFS : *M) { 865 Sum += NameFS.second.getEntrySamples(); 866 R.push_back(&NameFS.second); 867 } 868 llvm::sort(R, [](const FunctionSamples *L, const FunctionSamples *R) { 869 if (L->getEntrySamples() != R->getEntrySamples()) 870 return L->getEntrySamples() > R->getEntrySamples(); 871 return FunctionSamples::getGUID(L->getName()) < 872 FunctionSamples::getGUID(R->getName()); 873 }); 874 } 875 return R; 876 } 877 878 /// Get the FunctionSamples for an instruction. 879 /// 880 /// The FunctionSamples of an instruction \p Inst is the inlined instance 881 /// in which that instruction is coming from. We traverse the inline stack 882 /// of that instruction, and match it with the tree nodes in the profile. 883 /// 884 /// \param Inst Instruction to query. 885 /// 886 /// \returns the FunctionSamples pointer to the inlined instance. 887 const FunctionSamples * 888 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const { 889 const DILocation *DIL = Inst.getDebugLoc(); 890 if (!DIL) 891 return Samples; 892 893 auto it = DILocation2SampleMap.try_emplace(DIL,nullptr); 894 if (it.second) 895 it.first->second = Samples->findFunctionSamples(DIL); 896 return it.first->second; 897 } 898 899 bool SampleProfileLoader::inlineCallInstruction(CallBase &CB) { 900 Function *CalledFunction = CB.getCalledFunction(); 901 assert(CalledFunction); 902 DebugLoc DLoc = CB.getDebugLoc(); 903 BasicBlock *BB = CB.getParent(); 904 InlineParams Params = getInlineParams(); 905 Params.ComputeFullInlineCost = true; 906 // Checks if there is anything in the reachable portion of the callee at 907 // this callsite that makes this inlining potentially illegal. Need to 908 // set ComputeFullInlineCost, otherwise getInlineCost may return early 909 // when cost exceeds threshold without checking all IRs in the callee. 910 // The acutal cost does not matter because we only checks isNever() to 911 // see if it is legal to inline the callsite. 912 InlineCost Cost = 913 getInlineCost(CB, Params, GetTTI(*CalledFunction), GetAC, GetTLI); 914 if (Cost.isNever()) { 915 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineFail", DLoc, BB) 916 << "incompatible inlining"); 917 return false; 918 } 919 InlineFunctionInfo IFI(nullptr, GetAC); 920 if (InlineFunction(CB, IFI).isSuccess()) { 921 // The call to InlineFunction erases I, so we can't pass it here. 922 emitInlinedInto(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(), Cost, 923 true, CSINLINE_DEBUG); 924 return true; 925 } 926 return false; 927 } 928 929 bool SampleProfileLoader::shouldInlineColdCallee(CallBase &CallInst) { 930 if (!ProfileSizeInline) 931 return false; 932 933 Function *Callee = CallInst.getCalledFunction(); 934 if (Callee == nullptr) 935 return false; 936 937 InlineCost Cost = getInlineCost(CallInst, getInlineParams(), GetTTI(*Callee), 938 GetAC, GetTLI); 939 940 return Cost.getCost() <= SampleColdCallSiteThreshold; 941 } 942 943 void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates( 944 const SmallVectorImpl<CallBase *> &Candidates, const Function &F, 945 bool Hot) { 946 for (auto I : Candidates) { 947 Function *CalledFunction = I->getCalledFunction(); 948 if (CalledFunction) { 949 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineAttempt", 950 I->getDebugLoc(), I->getParent()) 951 << "previous inlining reattempted for " 952 << (Hot ? "hotness: '" : "size: '") 953 << ore::NV("Callee", CalledFunction) << "' into '" 954 << ore::NV("Caller", &F) << "'"); 955 } 956 } 957 } 958 959 /// Iteratively inline hot callsites of a function. 960 /// 961 /// Iteratively traverse all callsites of the function \p F, and find if 962 /// the corresponding inlined instance exists and is hot in profile. If 963 /// it is hot enough, inline the callsites and adds new callsites of the 964 /// callee into the caller. If the call is an indirect call, first promote 965 /// it to direct call. Each indirect call is limited with a single target. 966 /// 967 /// \param F function to perform iterative inlining. 968 /// \param InlinedGUIDs a set to be updated to include all GUIDs that are 969 /// inlined in the profiled binary. 970 /// 971 /// \returns True if there is any inline happened. 972 bool SampleProfileLoader::inlineHotFunctions( 973 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) { 974 DenseSet<Instruction *> PromotedInsns; 975 976 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure 977 // Profile symbol list is ignored when profile-sample-accurate is on. 978 assert((!ProfAccForSymsInList || 979 (!ProfileSampleAccurate && 980 !F.hasFnAttribute("profile-sample-accurate"))) && 981 "ProfAccForSymsInList should be false when profile-sample-accurate " 982 "is enabled"); 983 984 DenseMap<CallBase *, const FunctionSamples *> localNotInlinedCallSites; 985 bool Changed = false; 986 while (true) { 987 bool LocalChanged = false; 988 SmallVector<CallBase *, 10> CIS; 989 for (auto &BB : F) { 990 bool Hot = false; 991 SmallVector<CallBase *, 10> AllCandidates; 992 SmallVector<CallBase *, 10> ColdCandidates; 993 for (auto &I : BB.getInstList()) { 994 const FunctionSamples *FS = nullptr; 995 if (auto *CB = dyn_cast<CallBase>(&I)) { 996 if (!isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(*CB))) { 997 assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) && 998 "GUIDToFuncNameMap has to be populated"); 999 AllCandidates.push_back(CB); 1000 if (FS->getEntrySamples() > 0) 1001 localNotInlinedCallSites.try_emplace(CB, FS); 1002 if (callsiteIsHot(FS, PSI)) 1003 Hot = true; 1004 else if (shouldInlineColdCallee(*CB)) 1005 ColdCandidates.push_back(CB); 1006 } 1007 } 1008 } 1009 if (Hot) { 1010 CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end()); 1011 emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true); 1012 } else { 1013 CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end()); 1014 emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false); 1015 } 1016 } 1017 for (CallBase *I : CIS) { 1018 Function *CalledFunction = I->getCalledFunction(); 1019 // Do not inline recursive calls. 1020 if (CalledFunction == &F) 1021 continue; 1022 if (I->isIndirectCall()) { 1023 if (PromotedInsns.count(I)) 1024 continue; 1025 uint64_t Sum; 1026 for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) { 1027 if (IsThinLTOPreLink) { 1028 FS->findInlinedFunctions(InlinedGUIDs, F.getParent(), 1029 PSI->getOrCompHotCountThreshold()); 1030 continue; 1031 } 1032 auto CalleeFunctionName = FS->getFuncName(); 1033 // If it is a recursive call, we do not inline it as it could bloat 1034 // the code exponentially. There is way to better handle this, e.g. 1035 // clone the caller first, and inline the cloned caller if it is 1036 // recursive. As llvm does not inline recursive calls, we will 1037 // simply ignore it instead of handling it explicitly. 1038 if (CalleeFunctionName == F.getName()) 1039 continue; 1040 1041 if (!callsiteIsHot(FS, PSI)) 1042 continue; 1043 1044 const char *Reason = "Callee function not available"; 1045 auto R = SymbolMap.find(CalleeFunctionName); 1046 if (R != SymbolMap.end() && R->getValue() && 1047 !R->getValue()->isDeclaration() && 1048 R->getValue()->getSubprogram() && 1049 R->getValue()->hasFnAttribute("use-sample-profile") && 1050 isLegalToPromote(*I, R->getValue(), &Reason)) { 1051 uint64_t C = FS->getEntrySamples(); 1052 auto &DI = 1053 pgo::promoteIndirectCall(*I, R->getValue(), C, Sum, false, ORE); 1054 Sum -= C; 1055 PromotedInsns.insert(I); 1056 // If profile mismatches, we should not attempt to inline DI. 1057 if ((isa<CallInst>(DI) || isa<InvokeInst>(DI)) && 1058 inlineCallInstruction(cast<CallBase>(DI))) { 1059 localNotInlinedCallSites.erase(I); 1060 LocalChanged = true; 1061 ++NumCSInlined; 1062 } 1063 } else { 1064 LLVM_DEBUG(dbgs() 1065 << "\nFailed to promote indirect call to " 1066 << CalleeFunctionName << " because " << Reason << "\n"); 1067 } 1068 } 1069 } else if (CalledFunction && CalledFunction->getSubprogram() && 1070 !CalledFunction->isDeclaration()) { 1071 if (inlineCallInstruction(*I)) { 1072 localNotInlinedCallSites.erase(I); 1073 LocalChanged = true; 1074 ++NumCSInlined; 1075 } 1076 } else if (IsThinLTOPreLink) { 1077 findCalleeFunctionSamples(*I)->findInlinedFunctions( 1078 InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold()); 1079 } 1080 } 1081 if (LocalChanged) { 1082 Changed = true; 1083 } else { 1084 break; 1085 } 1086 } 1087 1088 // Accumulate not inlined callsite information into notInlinedSamples 1089 for (const auto &Pair : localNotInlinedCallSites) { 1090 CallBase *I = Pair.getFirst(); 1091 Function *Callee = I->getCalledFunction(); 1092 if (!Callee || Callee->isDeclaration()) 1093 continue; 1094 1095 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "NotInline", 1096 I->getDebugLoc(), I->getParent()) 1097 << "previous inlining not repeated: '" 1098 << ore::NV("Callee", Callee) << "' into '" 1099 << ore::NV("Caller", &F) << "'"); 1100 1101 ++NumCSNotInlined; 1102 const FunctionSamples *FS = Pair.getSecond(); 1103 if (FS->getTotalSamples() == 0 && FS->getEntrySamples() == 0) { 1104 continue; 1105 } 1106 1107 if (ProfileMergeInlinee) { 1108 // A function call can be replicated by optimizations like callsite 1109 // splitting or jump threading and the replicates end up sharing the 1110 // sample nested callee profile instead of slicing the original inlinee's 1111 // profile. We want to do merge exactly once by filtering out callee 1112 // profiles with a non-zero head sample count. 1113 if (FS->getHeadSamples() == 0) { 1114 // Use entry samples as head samples during the merge, as inlinees 1115 // don't have head samples. 1116 const_cast<FunctionSamples *>(FS)->addHeadSamples( 1117 FS->getEntrySamples()); 1118 1119 // Note that we have to do the merge right after processing function. 1120 // This allows OutlineFS's profile to be used for annotation during 1121 // top-down processing of functions' annotation. 1122 FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee); 1123 OutlineFS->merge(*FS); 1124 } else 1125 assert(FS->getHeadSamples() == FS->getEntrySamples() && 1126 "Expect same head and entry sample counts for profiles already " 1127 "merged."); 1128 } else { 1129 auto pair = 1130 notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0}); 1131 pair.first->second.entryCount += FS->getEntrySamples(); 1132 } 1133 } 1134 return Changed; 1135 } 1136 1137 /// Find equivalence classes for the given block. 1138 /// 1139 /// This finds all the blocks that are guaranteed to execute the same 1140 /// number of times as \p BB1. To do this, it traverses all the 1141 /// descendants of \p BB1 in the dominator or post-dominator tree. 1142 /// 1143 /// A block BB2 will be in the same equivalence class as \p BB1 if 1144 /// the following holds: 1145 /// 1146 /// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2 1147 /// is a descendant of \p BB1 in the dominator tree, then BB2 should 1148 /// dominate BB1 in the post-dominator tree. 1149 /// 1150 /// 2- Both BB2 and \p BB1 must be in the same loop. 1151 /// 1152 /// For every block BB2 that meets those two requirements, we set BB2's 1153 /// equivalence class to \p BB1. 1154 /// 1155 /// \param BB1 Block to check. 1156 /// \param Descendants Descendants of \p BB1 in either the dom or pdom tree. 1157 /// \param DomTree Opposite dominator tree. If \p Descendants is filled 1158 /// with blocks from \p BB1's dominator tree, then 1159 /// this is the post-dominator tree, and vice versa. 1160 template <bool IsPostDom> 1161 void SampleProfileLoader::findEquivalencesFor( 1162 BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants, 1163 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree) { 1164 const BasicBlock *EC = EquivalenceClass[BB1]; 1165 uint64_t Weight = BlockWeights[EC]; 1166 for (const auto *BB2 : Descendants) { 1167 bool IsDomParent = DomTree->dominates(BB2, BB1); 1168 bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2); 1169 if (BB1 != BB2 && IsDomParent && IsInSameLoop) { 1170 EquivalenceClass[BB2] = EC; 1171 // If BB2 is visited, then the entire EC should be marked as visited. 1172 if (VisitedBlocks.count(BB2)) { 1173 VisitedBlocks.insert(EC); 1174 } 1175 1176 // If BB2 is heavier than BB1, make BB2 have the same weight 1177 // as BB1. 1178 // 1179 // Note that we don't worry about the opposite situation here 1180 // (when BB2 is lighter than BB1). We will deal with this 1181 // during the propagation phase. Right now, we just want to 1182 // make sure that BB1 has the largest weight of all the 1183 // members of its equivalence set. 1184 Weight = std::max(Weight, BlockWeights[BB2]); 1185 } 1186 } 1187 if (EC == &EC->getParent()->getEntryBlock()) { 1188 BlockWeights[EC] = Samples->getHeadSamples() + 1; 1189 } else { 1190 BlockWeights[EC] = Weight; 1191 } 1192 } 1193 1194 /// Find equivalence classes. 1195 /// 1196 /// Since samples may be missing from blocks, we can fill in the gaps by setting 1197 /// the weights of all the blocks in the same equivalence class to the same 1198 /// weight. To compute the concept of equivalence, we use dominance and loop 1199 /// information. Two blocks B1 and B2 are in the same equivalence class if B1 1200 /// dominates B2, B2 post-dominates B1 and both are in the same loop. 1201 /// 1202 /// \param F The function to query. 1203 void SampleProfileLoader::findEquivalenceClasses(Function &F) { 1204 SmallVector<BasicBlock *, 8> DominatedBBs; 1205 LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n"); 1206 // Find equivalence sets based on dominance and post-dominance information. 1207 for (auto &BB : F) { 1208 BasicBlock *BB1 = &BB; 1209 1210 // Compute BB1's equivalence class once. 1211 if (EquivalenceClass.count(BB1)) { 1212 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); 1213 continue; 1214 } 1215 1216 // By default, blocks are in their own equivalence class. 1217 EquivalenceClass[BB1] = BB1; 1218 1219 // Traverse all the blocks dominated by BB1. We are looking for 1220 // every basic block BB2 such that: 1221 // 1222 // 1- BB1 dominates BB2. 1223 // 2- BB2 post-dominates BB1. 1224 // 3- BB1 and BB2 are in the same loop nest. 1225 // 1226 // If all those conditions hold, it means that BB2 is executed 1227 // as many times as BB1, so they are placed in the same equivalence 1228 // class by making BB2's equivalence class be BB1. 1229 DominatedBBs.clear(); 1230 DT->getDescendants(BB1, DominatedBBs); 1231 findEquivalencesFor(BB1, DominatedBBs, PDT.get()); 1232 1233 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); 1234 } 1235 1236 // Assign weights to equivalence classes. 1237 // 1238 // All the basic blocks in the same equivalence class will execute 1239 // the same number of times. Since we know that the head block in 1240 // each equivalence class has the largest weight, assign that weight 1241 // to all the blocks in that equivalence class. 1242 LLVM_DEBUG( 1243 dbgs() << "\nAssign the same weight to all blocks in the same class\n"); 1244 for (auto &BI : F) { 1245 const BasicBlock *BB = &BI; 1246 const BasicBlock *EquivBB = EquivalenceClass[BB]; 1247 if (BB != EquivBB) 1248 BlockWeights[BB] = BlockWeights[EquivBB]; 1249 LLVM_DEBUG(printBlockWeight(dbgs(), BB)); 1250 } 1251 } 1252 1253 /// Visit the given edge to decide if it has a valid weight. 1254 /// 1255 /// If \p E has not been visited before, we copy to \p UnknownEdge 1256 /// and increment the count of unknown edges. 1257 /// 1258 /// \param E Edge to visit. 1259 /// \param NumUnknownEdges Current number of unknown edges. 1260 /// \param UnknownEdge Set if E has not been visited before. 1261 /// 1262 /// \returns E's weight, if known. Otherwise, return 0. 1263 uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges, 1264 Edge *UnknownEdge) { 1265 if (!VisitedEdges.count(E)) { 1266 (*NumUnknownEdges)++; 1267 *UnknownEdge = E; 1268 return 0; 1269 } 1270 1271 return EdgeWeights[E]; 1272 } 1273 1274 /// Propagate weights through incoming/outgoing edges. 1275 /// 1276 /// If the weight of a basic block is known, and there is only one edge 1277 /// with an unknown weight, we can calculate the weight of that edge. 1278 /// 1279 /// Similarly, if all the edges have a known count, we can calculate the 1280 /// count of the basic block, if needed. 1281 /// 1282 /// \param F Function to process. 1283 /// \param UpdateBlockCount Whether we should update basic block counts that 1284 /// has already been annotated. 1285 /// 1286 /// \returns True if new weights were assigned to edges or blocks. 1287 bool SampleProfileLoader::propagateThroughEdges(Function &F, 1288 bool UpdateBlockCount) { 1289 bool Changed = false; 1290 LLVM_DEBUG(dbgs() << "\nPropagation through edges\n"); 1291 for (const auto &BI : F) { 1292 const BasicBlock *BB = &BI; 1293 const BasicBlock *EC = EquivalenceClass[BB]; 1294 1295 // Visit all the predecessor and successor edges to determine 1296 // which ones have a weight assigned already. Note that it doesn't 1297 // matter that we only keep track of a single unknown edge. The 1298 // only case we are interested in handling is when only a single 1299 // edge is unknown (see setEdgeOrBlockWeight). 1300 for (unsigned i = 0; i < 2; i++) { 1301 uint64_t TotalWeight = 0; 1302 unsigned NumUnknownEdges = 0, NumTotalEdges = 0; 1303 Edge UnknownEdge, SelfReferentialEdge, SingleEdge; 1304 1305 if (i == 0) { 1306 // First, visit all predecessor edges. 1307 NumTotalEdges = Predecessors[BB].size(); 1308 for (auto *Pred : Predecessors[BB]) { 1309 Edge E = std::make_pair(Pred, BB); 1310 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); 1311 if (E.first == E.second) 1312 SelfReferentialEdge = E; 1313 } 1314 if (NumTotalEdges == 1) { 1315 SingleEdge = std::make_pair(Predecessors[BB][0], BB); 1316 } 1317 } else { 1318 // On the second round, visit all successor edges. 1319 NumTotalEdges = Successors[BB].size(); 1320 for (auto *Succ : Successors[BB]) { 1321 Edge E = std::make_pair(BB, Succ); 1322 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); 1323 } 1324 if (NumTotalEdges == 1) { 1325 SingleEdge = std::make_pair(BB, Successors[BB][0]); 1326 } 1327 } 1328 1329 // After visiting all the edges, there are three cases that we 1330 // can handle immediately: 1331 // 1332 // - All the edge weights are known (i.e., NumUnknownEdges == 0). 1333 // In this case, we simply check that the sum of all the edges 1334 // is the same as BB's weight. If not, we change BB's weight 1335 // to match. Additionally, if BB had not been visited before, 1336 // we mark it visited. 1337 // 1338 // - Only one edge is unknown and BB has already been visited. 1339 // In this case, we can compute the weight of the edge by 1340 // subtracting the total block weight from all the known 1341 // edge weights. If the edges weight more than BB, then the 1342 // edge of the last remaining edge is set to zero. 1343 // 1344 // - There exists a self-referential edge and the weight of BB is 1345 // known. In this case, this edge can be based on BB's weight. 1346 // We add up all the other known edges and set the weight on 1347 // the self-referential edge as we did in the previous case. 1348 // 1349 // In any other case, we must continue iterating. Eventually, 1350 // all edges will get a weight, or iteration will stop when 1351 // it reaches SampleProfileMaxPropagateIterations. 1352 if (NumUnknownEdges <= 1) { 1353 uint64_t &BBWeight = BlockWeights[EC]; 1354 if (NumUnknownEdges == 0) { 1355 if (!VisitedBlocks.count(EC)) { 1356 // If we already know the weight of all edges, the weight of the 1357 // basic block can be computed. It should be no larger than the sum 1358 // of all edge weights. 1359 if (TotalWeight > BBWeight) { 1360 BBWeight = TotalWeight; 1361 Changed = true; 1362 LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName() 1363 << " known. Set weight for block: "; 1364 printBlockWeight(dbgs(), BB);); 1365 } 1366 } else if (NumTotalEdges == 1 && 1367 EdgeWeights[SingleEdge] < BlockWeights[EC]) { 1368 // If there is only one edge for the visited basic block, use the 1369 // block weight to adjust edge weight if edge weight is smaller. 1370 EdgeWeights[SingleEdge] = BlockWeights[EC]; 1371 Changed = true; 1372 } 1373 } else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) { 1374 // If there is a single unknown edge and the block has been 1375 // visited, then we can compute E's weight. 1376 if (BBWeight >= TotalWeight) 1377 EdgeWeights[UnknownEdge] = BBWeight - TotalWeight; 1378 else 1379 EdgeWeights[UnknownEdge] = 0; 1380 const BasicBlock *OtherEC; 1381 if (i == 0) 1382 OtherEC = EquivalenceClass[UnknownEdge.first]; 1383 else 1384 OtherEC = EquivalenceClass[UnknownEdge.second]; 1385 // Edge weights should never exceed the BB weights it connects. 1386 if (VisitedBlocks.count(OtherEC) && 1387 EdgeWeights[UnknownEdge] > BlockWeights[OtherEC]) 1388 EdgeWeights[UnknownEdge] = BlockWeights[OtherEC]; 1389 VisitedEdges.insert(UnknownEdge); 1390 Changed = true; 1391 LLVM_DEBUG(dbgs() << "Set weight for edge: "; 1392 printEdgeWeight(dbgs(), UnknownEdge)); 1393 } 1394 } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) { 1395 // If a block Weights 0, all its in/out edges should weight 0. 1396 if (i == 0) { 1397 for (auto *Pred : Predecessors[BB]) { 1398 Edge E = std::make_pair(Pred, BB); 1399 EdgeWeights[E] = 0; 1400 VisitedEdges.insert(E); 1401 } 1402 } else { 1403 for (auto *Succ : Successors[BB]) { 1404 Edge E = std::make_pair(BB, Succ); 1405 EdgeWeights[E] = 0; 1406 VisitedEdges.insert(E); 1407 } 1408 } 1409 } else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) { 1410 uint64_t &BBWeight = BlockWeights[BB]; 1411 // We have a self-referential edge and the weight of BB is known. 1412 if (BBWeight >= TotalWeight) 1413 EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight; 1414 else 1415 EdgeWeights[SelfReferentialEdge] = 0; 1416 VisitedEdges.insert(SelfReferentialEdge); 1417 Changed = true; 1418 LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: "; 1419 printEdgeWeight(dbgs(), SelfReferentialEdge)); 1420 } 1421 if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) { 1422 BlockWeights[EC] = TotalWeight; 1423 VisitedBlocks.insert(EC); 1424 Changed = true; 1425 } 1426 } 1427 } 1428 1429 return Changed; 1430 } 1431 1432 /// Build in/out edge lists for each basic block in the CFG. 1433 /// 1434 /// We are interested in unique edges. If a block B1 has multiple 1435 /// edges to another block B2, we only add a single B1->B2 edge. 1436 void SampleProfileLoader::buildEdges(Function &F) { 1437 for (auto &BI : F) { 1438 BasicBlock *B1 = &BI; 1439 1440 // Add predecessors for B1. 1441 SmallPtrSet<BasicBlock *, 16> Visited; 1442 if (!Predecessors[B1].empty()) 1443 llvm_unreachable("Found a stale predecessors list in a basic block."); 1444 for (pred_iterator PI = pred_begin(B1), PE = pred_end(B1); PI != PE; ++PI) { 1445 BasicBlock *B2 = *PI; 1446 if (Visited.insert(B2).second) 1447 Predecessors[B1].push_back(B2); 1448 } 1449 1450 // Add successors for B1. 1451 Visited.clear(); 1452 if (!Successors[B1].empty()) 1453 llvm_unreachable("Found a stale successors list in a basic block."); 1454 for (succ_iterator SI = succ_begin(B1), SE = succ_end(B1); SI != SE; ++SI) { 1455 BasicBlock *B2 = *SI; 1456 if (Visited.insert(B2).second) 1457 Successors[B1].push_back(B2); 1458 } 1459 } 1460 } 1461 1462 /// Returns the sorted CallTargetMap \p M by count in descending order. 1463 static SmallVector<InstrProfValueData, 2> GetSortedValueDataFromCallTargets( 1464 const SampleRecord::CallTargetMap & M) { 1465 SmallVector<InstrProfValueData, 2> R; 1466 for (const auto &I : SampleRecord::SortCallTargets(M)) { 1467 R.emplace_back(InstrProfValueData{FunctionSamples::getGUID(I.first), I.second}); 1468 } 1469 return R; 1470 } 1471 1472 /// Propagate weights into edges 1473 /// 1474 /// The following rules are applied to every block BB in the CFG: 1475 /// 1476 /// - If BB has a single predecessor/successor, then the weight 1477 /// of that edge is the weight of the block. 1478 /// 1479 /// - If all incoming or outgoing edges are known except one, and the 1480 /// weight of the block is already known, the weight of the unknown 1481 /// edge will be the weight of the block minus the sum of all the known 1482 /// edges. If the sum of all the known edges is larger than BB's weight, 1483 /// we set the unknown edge weight to zero. 1484 /// 1485 /// - If there is a self-referential edge, and the weight of the block is 1486 /// known, the weight for that edge is set to the weight of the block 1487 /// minus the weight of the other incoming edges to that block (if 1488 /// known). 1489 void SampleProfileLoader::propagateWeights(Function &F) { 1490 bool Changed = true; 1491 unsigned I = 0; 1492 1493 // If BB weight is larger than its corresponding loop's header BB weight, 1494 // use the BB weight to replace the loop header BB weight. 1495 for (auto &BI : F) { 1496 BasicBlock *BB = &BI; 1497 Loop *L = LI->getLoopFor(BB); 1498 if (!L) { 1499 continue; 1500 } 1501 BasicBlock *Header = L->getHeader(); 1502 if (Header && BlockWeights[BB] > BlockWeights[Header]) { 1503 BlockWeights[Header] = BlockWeights[BB]; 1504 } 1505 } 1506 1507 // Before propagation starts, build, for each block, a list of 1508 // unique predecessors and successors. This is necessary to handle 1509 // identical edges in multiway branches. Since we visit all blocks and all 1510 // edges of the CFG, it is cleaner to build these lists once at the start 1511 // of the pass. 1512 buildEdges(F); 1513 1514 // Propagate until we converge or we go past the iteration limit. 1515 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1516 Changed = propagateThroughEdges(F, false); 1517 } 1518 1519 // The first propagation propagates BB counts from annotated BBs to unknown 1520 // BBs. The 2nd propagation pass resets edges weights, and use all BB weights 1521 // to propagate edge weights. 1522 VisitedEdges.clear(); 1523 Changed = true; 1524 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1525 Changed = propagateThroughEdges(F, false); 1526 } 1527 1528 // The 3rd propagation pass allows adjust annotated BB weights that are 1529 // obviously wrong. 1530 Changed = true; 1531 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1532 Changed = propagateThroughEdges(F, true); 1533 } 1534 1535 // Generate MD_prof metadata for every branch instruction using the 1536 // edge weights computed during propagation. 1537 LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n"); 1538 LLVMContext &Ctx = F.getContext(); 1539 MDBuilder MDB(Ctx); 1540 for (auto &BI : F) { 1541 BasicBlock *BB = &BI; 1542 1543 if (BlockWeights[BB]) { 1544 for (auto &I : BB->getInstList()) { 1545 if (!isa<CallInst>(I) && !isa<InvokeInst>(I)) 1546 continue; 1547 if (!cast<CallBase>(I).getCalledFunction()) { 1548 const DebugLoc &DLoc = I.getDebugLoc(); 1549 if (!DLoc) 1550 continue; 1551 const DILocation *DIL = DLoc; 1552 uint32_t LineOffset = FunctionSamples::getOffset(DIL); 1553 uint32_t Discriminator = DIL->getBaseDiscriminator(); 1554 1555 const FunctionSamples *FS = findFunctionSamples(I); 1556 if (!FS) 1557 continue; 1558 auto T = FS->findCallTargetMapAt(LineOffset, Discriminator); 1559 if (!T || T.get().empty()) 1560 continue; 1561 SmallVector<InstrProfValueData, 2> SortedCallTargets = 1562 GetSortedValueDataFromCallTargets(T.get()); 1563 uint64_t Sum; 1564 findIndirectCallFunctionSamples(I, Sum); 1565 annotateValueSite(*I.getParent()->getParent()->getParent(), I, 1566 SortedCallTargets, Sum, IPVK_IndirectCallTarget, 1567 SortedCallTargets.size()); 1568 } else if (!isa<IntrinsicInst>(&I)) { 1569 I.setMetadata(LLVMContext::MD_prof, 1570 MDB.createBranchWeights( 1571 {static_cast<uint32_t>(BlockWeights[BB])})); 1572 } 1573 } 1574 } 1575 Instruction *TI = BB->getTerminator(); 1576 if (TI->getNumSuccessors() == 1) 1577 continue; 1578 if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI)) 1579 continue; 1580 1581 DebugLoc BranchLoc = TI->getDebugLoc(); 1582 LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line " 1583 << ((BranchLoc) ? Twine(BranchLoc.getLine()) 1584 : Twine("<UNKNOWN LOCATION>")) 1585 << ".\n"); 1586 SmallVector<uint32_t, 4> Weights; 1587 uint32_t MaxWeight = 0; 1588 Instruction *MaxDestInst; 1589 for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) { 1590 BasicBlock *Succ = TI->getSuccessor(I); 1591 Edge E = std::make_pair(BB, Succ); 1592 uint64_t Weight = EdgeWeights[E]; 1593 LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E)); 1594 // Use uint32_t saturated arithmetic to adjust the incoming weights, 1595 // if needed. Sample counts in profiles are 64-bit unsigned values, 1596 // but internally branch weights are expressed as 32-bit values. 1597 if (Weight > std::numeric_limits<uint32_t>::max()) { 1598 LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)"); 1599 Weight = std::numeric_limits<uint32_t>::max(); 1600 } 1601 // Weight is added by one to avoid propagation errors introduced by 1602 // 0 weights. 1603 Weights.push_back(static_cast<uint32_t>(Weight + 1)); 1604 if (Weight != 0) { 1605 if (Weight > MaxWeight) { 1606 MaxWeight = Weight; 1607 MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime(); 1608 } 1609 } 1610 } 1611 1612 misexpect::verifyMisExpect(TI, Weights, TI->getContext()); 1613 1614 uint64_t TempWeight; 1615 // Only set weights if there is at least one non-zero weight. 1616 // In any other case, let the analyzer set weights. 1617 // Do not set weights if the weights are present. In ThinLTO, the profile 1618 // annotation is done twice. If the first annotation already set the 1619 // weights, the second pass does not need to set it. 1620 if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) { 1621 LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n"); 1622 TI->setMetadata(LLVMContext::MD_prof, 1623 MDB.createBranchWeights(Weights)); 1624 ORE->emit([&]() { 1625 return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst) 1626 << "most popular destination for conditional branches at " 1627 << ore::NV("CondBranchesLoc", BranchLoc); 1628 }); 1629 } else { 1630 LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n"); 1631 } 1632 } 1633 } 1634 1635 /// Get the line number for the function header. 1636 /// 1637 /// This looks up function \p F in the current compilation unit and 1638 /// retrieves the line number where the function is defined. This is 1639 /// line 0 for all the samples read from the profile file. Every line 1640 /// number is relative to this line. 1641 /// 1642 /// \param F Function object to query. 1643 /// 1644 /// \returns the line number where \p F is defined. If it returns 0, 1645 /// it means that there is no debug information available for \p F. 1646 unsigned SampleProfileLoader::getFunctionLoc(Function &F) { 1647 if (DISubprogram *S = F.getSubprogram()) 1648 return S->getLine(); 1649 1650 if (NoWarnSampleUnused) 1651 return 0; 1652 1653 // If the start of \p F is missing, emit a diagnostic to inform the user 1654 // about the missed opportunity. 1655 F.getContext().diagnose(DiagnosticInfoSampleProfile( 1656 "No debug information found in function " + F.getName() + 1657 ": Function profile not used", 1658 DS_Warning)); 1659 return 0; 1660 } 1661 1662 void SampleProfileLoader::computeDominanceAndLoopInfo(Function &F) { 1663 DT.reset(new DominatorTree); 1664 DT->recalculate(F); 1665 1666 PDT.reset(new PostDominatorTree(F)); 1667 1668 LI.reset(new LoopInfo); 1669 LI->analyze(*DT); 1670 } 1671 1672 /// Generate branch weight metadata for all branches in \p F. 1673 /// 1674 /// Branch weights are computed out of instruction samples using a 1675 /// propagation heuristic. Propagation proceeds in 3 phases: 1676 /// 1677 /// 1- Assignment of block weights. All the basic blocks in the function 1678 /// are initial assigned the same weight as their most frequently 1679 /// executed instruction. 1680 /// 1681 /// 2- Creation of equivalence classes. Since samples may be missing from 1682 /// blocks, we can fill in the gaps by setting the weights of all the 1683 /// blocks in the same equivalence class to the same weight. To compute 1684 /// the concept of equivalence, we use dominance and loop information. 1685 /// Two blocks B1 and B2 are in the same equivalence class if B1 1686 /// dominates B2, B2 post-dominates B1 and both are in the same loop. 1687 /// 1688 /// 3- Propagation of block weights into edges. This uses a simple 1689 /// propagation heuristic. The following rules are applied to every 1690 /// block BB in the CFG: 1691 /// 1692 /// - If BB has a single predecessor/successor, then the weight 1693 /// of that edge is the weight of the block. 1694 /// 1695 /// - If all the edges are known except one, and the weight of the 1696 /// block is already known, the weight of the unknown edge will 1697 /// be the weight of the block minus the sum of all the known 1698 /// edges. If the sum of all the known edges is larger than BB's weight, 1699 /// we set the unknown edge weight to zero. 1700 /// 1701 /// - If there is a self-referential edge, and the weight of the block is 1702 /// known, the weight for that edge is set to the weight of the block 1703 /// minus the weight of the other incoming edges to that block (if 1704 /// known). 1705 /// 1706 /// Since this propagation is not guaranteed to finalize for every CFG, we 1707 /// only allow it to proceed for a limited number of iterations (controlled 1708 /// by -sample-profile-max-propagate-iterations). 1709 /// 1710 /// FIXME: Try to replace this propagation heuristic with a scheme 1711 /// that is guaranteed to finalize. A work-list approach similar to 1712 /// the standard value propagation algorithm used by SSA-CCP might 1713 /// work here. 1714 /// 1715 /// Once all the branch weights are computed, we emit the MD_prof 1716 /// metadata on BB using the computed values for each of its branches. 1717 /// 1718 /// \param F The function to query. 1719 /// 1720 /// \returns true if \p F was modified. Returns false, otherwise. 1721 bool SampleProfileLoader::emitAnnotations(Function &F) { 1722 bool Changed = false; 1723 1724 if (getFunctionLoc(F) == 0) 1725 return false; 1726 1727 LLVM_DEBUG(dbgs() << "Line number for the first instruction in " 1728 << F.getName() << ": " << getFunctionLoc(F) << "\n"); 1729 1730 DenseSet<GlobalValue::GUID> InlinedGUIDs; 1731 Changed |= inlineHotFunctions(F, InlinedGUIDs); 1732 1733 // Compute basic block weights. 1734 Changed |= computeBlockWeights(F); 1735 1736 if (Changed) { 1737 // Add an entry count to the function using the samples gathered at the 1738 // function entry. 1739 // Sets the GUIDs that are inlined in the profiled binary. This is used 1740 // for ThinLink to make correct liveness analysis, and also make the IR 1741 // match the profiled binary before annotation. 1742 F.setEntryCount( 1743 ProfileCount(Samples->getHeadSamples() + 1, Function::PCT_Real), 1744 &InlinedGUIDs); 1745 1746 // Compute dominance and loop info needed for propagation. 1747 computeDominanceAndLoopInfo(F); 1748 1749 // Find equivalence classes. 1750 findEquivalenceClasses(F); 1751 1752 // Propagate weights to all edges. 1753 propagateWeights(F); 1754 } 1755 1756 // If coverage checking was requested, compute it now. 1757 if (SampleProfileRecordCoverage) { 1758 unsigned Used = CoverageTracker.countUsedRecords(Samples, PSI); 1759 unsigned Total = CoverageTracker.countBodyRecords(Samples, PSI); 1760 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total); 1761 if (Coverage < SampleProfileRecordCoverage) { 1762 F.getContext().diagnose(DiagnosticInfoSampleProfile( 1763 F.getSubprogram()->getFilename(), getFunctionLoc(F), 1764 Twine(Used) + " of " + Twine(Total) + " available profile records (" + 1765 Twine(Coverage) + "%) were applied", 1766 DS_Warning)); 1767 } 1768 } 1769 1770 if (SampleProfileSampleCoverage) { 1771 uint64_t Used = CoverageTracker.getTotalUsedSamples(); 1772 uint64_t Total = CoverageTracker.countBodySamples(Samples, PSI); 1773 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total); 1774 if (Coverage < SampleProfileSampleCoverage) { 1775 F.getContext().diagnose(DiagnosticInfoSampleProfile( 1776 F.getSubprogram()->getFilename(), getFunctionLoc(F), 1777 Twine(Used) + " of " + Twine(Total) + " available profile samples (" + 1778 Twine(Coverage) + "%) were applied", 1779 DS_Warning)); 1780 } 1781 } 1782 return Changed; 1783 } 1784 1785 char SampleProfileLoaderLegacyPass::ID = 0; 1786 1787 INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile", 1788 "Sample Profile loader", false, false) 1789 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1790 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1791 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1792 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 1793 INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile", 1794 "Sample Profile loader", false, false) 1795 1796 std::vector<Function *> 1797 SampleProfileLoader::buildFunctionOrder(Module &M, CallGraph *CG) { 1798 std::vector<Function *> FunctionOrderList; 1799 FunctionOrderList.reserve(M.size()); 1800 1801 if (!ProfileTopDownLoad || CG == nullptr) { 1802 if (ProfileMergeInlinee) { 1803 // Disable ProfileMergeInlinee if profile is not loaded in top down order, 1804 // because the profile for a function may be used for the profile 1805 // annotation of its outline copy before the profile merging of its 1806 // non-inlined inline instances, and that is not the way how 1807 // ProfileMergeInlinee is supposed to work. 1808 ProfileMergeInlinee = false; 1809 } 1810 1811 for (Function &F : M) 1812 if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile")) 1813 FunctionOrderList.push_back(&F); 1814 return FunctionOrderList; 1815 } 1816 1817 assert(&CG->getModule() == &M); 1818 scc_iterator<CallGraph *> CGI = scc_begin(CG); 1819 while (!CGI.isAtEnd()) { 1820 for (CallGraphNode *node : *CGI) { 1821 auto F = node->getFunction(); 1822 if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile")) 1823 FunctionOrderList.push_back(F); 1824 } 1825 ++CGI; 1826 } 1827 1828 std::reverse(FunctionOrderList.begin(), FunctionOrderList.end()); 1829 return FunctionOrderList; 1830 } 1831 1832 bool SampleProfileLoader::doInitialization(Module &M) { 1833 auto &Ctx = M.getContext(); 1834 1835 std::unique_ptr<SampleProfileReaderItaniumRemapper> RemapReader; 1836 auto ReaderOrErr = 1837 SampleProfileReader::create(Filename, Ctx, RemappingFilename); 1838 if (std::error_code EC = ReaderOrErr.getError()) { 1839 std::string Msg = "Could not open profile: " + EC.message(); 1840 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 1841 return false; 1842 } 1843 Reader = std::move(ReaderOrErr.get()); 1844 Reader->collectFuncsFrom(M); 1845 ProfileIsValid = (Reader->read() == sampleprof_error::success); 1846 PSL = Reader->getProfileSymbolList(); 1847 1848 // While profile-sample-accurate is on, ignore symbol list. 1849 ProfAccForSymsInList = 1850 ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate; 1851 if (ProfAccForSymsInList) { 1852 NamesInProfile.clear(); 1853 if (auto NameTable = Reader->getNameTable()) 1854 NamesInProfile.insert(NameTable->begin(), NameTable->end()); 1855 } 1856 1857 return true; 1858 } 1859 1860 ModulePass *llvm::createSampleProfileLoaderPass() { 1861 return new SampleProfileLoaderLegacyPass(); 1862 } 1863 1864 ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) { 1865 return new SampleProfileLoaderLegacyPass(Name); 1866 } 1867 1868 bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM, 1869 ProfileSummaryInfo *_PSI, CallGraph *CG) { 1870 if (!ProfileIsValid) 1871 return false; 1872 GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap); 1873 1874 PSI = _PSI; 1875 if (M.getProfileSummary(/* IsCS */ false) == nullptr) { 1876 M.setProfileSummary(Reader->getSummary().getMD(M.getContext()), 1877 ProfileSummary::PSK_Sample); 1878 PSI->refresh(); 1879 } 1880 // Compute the total number of samples collected in this profile. 1881 for (const auto &I : Reader->getProfiles()) 1882 TotalCollectedSamples += I.second.getTotalSamples(); 1883 1884 // Populate the symbol map. 1885 for (const auto &N_F : M.getValueSymbolTable()) { 1886 StringRef OrigName = N_F.getKey(); 1887 Function *F = dyn_cast<Function>(N_F.getValue()); 1888 if (F == nullptr) 1889 continue; 1890 SymbolMap[OrigName] = F; 1891 auto pos = OrigName.find('.'); 1892 if (pos != StringRef::npos) { 1893 StringRef NewName = OrigName.substr(0, pos); 1894 auto r = SymbolMap.insert(std::make_pair(NewName, F)); 1895 // Failiing to insert means there is already an entry in SymbolMap, 1896 // thus there are multiple functions that are mapped to the same 1897 // stripped name. In this case of name conflicting, set the value 1898 // to nullptr to avoid confusion. 1899 if (!r.second) 1900 r.first->second = nullptr; 1901 } 1902 } 1903 1904 bool retval = false; 1905 for (auto F : buildFunctionOrder(M, CG)) { 1906 assert(!F->isDeclaration()); 1907 clearFunctionData(); 1908 retval |= runOnFunction(*F, AM); 1909 } 1910 1911 // Account for cold calls not inlined.... 1912 for (const std::pair<Function *, NotInlinedProfileInfo> &pair : 1913 notInlinedCallInfo) 1914 updateProfileCallee(pair.first, pair.second.entryCount); 1915 1916 return retval; 1917 } 1918 1919 bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) { 1920 ACT = &getAnalysis<AssumptionCacheTracker>(); 1921 TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>(); 1922 TLIWP = &getAnalysis<TargetLibraryInfoWrapperPass>(); 1923 ProfileSummaryInfo *PSI = 1924 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1925 return SampleLoader.runOnModule(M, nullptr, PSI, nullptr); 1926 } 1927 1928 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) { 1929 1930 DILocation2SampleMap.clear(); 1931 // By default the entry count is initialized to -1, which will be treated 1932 // conservatively by getEntryCount as the same as unknown (None). This is 1933 // to avoid newly added code to be treated as cold. If we have samples 1934 // this will be overwritten in emitAnnotations. 1935 uint64_t initialEntryCount = -1; 1936 1937 ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL; 1938 if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) { 1939 // initialize all the function entry counts to 0. It means all the 1940 // functions without profile will be regarded as cold. 1941 initialEntryCount = 0; 1942 // profile-sample-accurate is a user assertion which has a higher precedence 1943 // than symbol list. When profile-sample-accurate is on, ignore symbol list. 1944 ProfAccForSymsInList = false; 1945 } 1946 1947 // PSL -- profile symbol list include all the symbols in sampled binary. 1948 // If ProfileAccurateForSymsInList is enabled, PSL is used to treat 1949 // old functions without samples being cold, without having to worry 1950 // about new and hot functions being mistakenly treated as cold. 1951 if (ProfAccForSymsInList) { 1952 // Initialize the entry count to 0 for functions in the list. 1953 if (PSL->contains(F.getName())) 1954 initialEntryCount = 0; 1955 1956 // Function in the symbol list but without sample will be regarded as 1957 // cold. To minimize the potential negative performance impact it could 1958 // have, we want to be a little conservative here saying if a function 1959 // shows up in the profile, no matter as outline function, inline instance 1960 // or call targets, treat the function as not being cold. This will handle 1961 // the cases such as most callsites of a function are inlined in sampled 1962 // binary but not inlined in current build (because of source code drift, 1963 // imprecise debug information, or the callsites are all cold individually 1964 // but not cold accumulatively...), so the outline function showing up as 1965 // cold in sampled binary will actually not be cold after current build. 1966 StringRef CanonName = FunctionSamples::getCanonicalFnName(F); 1967 if (NamesInProfile.count(CanonName)) 1968 initialEntryCount = -1; 1969 } 1970 1971 F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real)); 1972 std::unique_ptr<OptimizationRemarkEmitter> OwnedORE; 1973 if (AM) { 1974 auto &FAM = 1975 AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent()) 1976 .getManager(); 1977 ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 1978 } else { 1979 OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F); 1980 ORE = OwnedORE.get(); 1981 } 1982 Samples = Reader->getSamplesFor(F); 1983 if (Samples && !Samples->empty()) 1984 return emitAnnotations(F); 1985 return false; 1986 } 1987 1988 PreservedAnalyses SampleProfileLoaderPass::run(Module &M, 1989 ModuleAnalysisManager &AM) { 1990 FunctionAnalysisManager &FAM = 1991 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 1992 1993 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 1994 return FAM.getResult<AssumptionAnalysis>(F); 1995 }; 1996 auto GetTTI = [&](Function &F) -> TargetTransformInfo & { 1997 return FAM.getResult<TargetIRAnalysis>(F); 1998 }; 1999 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 2000 return FAM.getResult<TargetLibraryAnalysis>(F); 2001 }; 2002 2003 SampleProfileLoader SampleLoader( 2004 ProfileFileName.empty() ? SampleProfileFile : ProfileFileName, 2005 ProfileRemappingFileName.empty() ? SampleProfileRemappingFile 2006 : ProfileRemappingFileName, 2007 IsThinLTOPreLink, GetAssumptionCache, GetTTI, GetTLI); 2008 2009 if (!SampleLoader.doInitialization(M)) 2010 return PreservedAnalyses::all(); 2011 2012 ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M); 2013 CallGraph &CG = AM.getResult<CallGraphAnalysis>(M); 2014 if (!SampleLoader.runOnModule(M, &AM, PSI, &CG)) 2015 return PreservedAnalyses::all(); 2016 2017 return PreservedAnalyses::none(); 2018 } 2019