1 //===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the SampleProfileLoader transformation. This pass 10 // reads a profile file generated by a sampling profiler (e.g. Linux Perf - 11 // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the 12 // profile information in the given profile. 13 // 14 // This pass generates branch weight annotations on the IR: 15 // 16 // - prof: Represents branch weights. This annotation is added to branches 17 // to indicate the weights of each edge coming out of the branch. 18 // The weight of each edge is the weight of the target block for 19 // that edge. The weight of a block B is computed as the maximum 20 // number of samples found in B. 21 // 22 //===----------------------------------------------------------------------===// 23 24 #include "llvm/Transforms/IPO/SampleProfile.h" 25 #include "llvm/ADT/ArrayRef.h" 26 #include "llvm/ADT/DenseMap.h" 27 #include "llvm/ADT/DenseSet.h" 28 #include "llvm/ADT/None.h" 29 #include "llvm/ADT/SCCIterator.h" 30 #include "llvm/ADT/SmallPtrSet.h" 31 #include "llvm/ADT/SmallSet.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/Statistic.h" 34 #include "llvm/ADT/StringMap.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/Twine.h" 37 #include "llvm/Analysis/AssumptionCache.h" 38 #include "llvm/Analysis/CallGraph.h" 39 #include "llvm/Analysis/CallGraphSCCPass.h" 40 #include "llvm/Analysis/InlineCost.h" 41 #include "llvm/Analysis/LoopInfo.h" 42 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 43 #include "llvm/Analysis/PostDominators.h" 44 #include "llvm/Analysis/ProfileSummaryInfo.h" 45 #include "llvm/Analysis/TargetLibraryInfo.h" 46 #include "llvm/Analysis/TargetTransformInfo.h" 47 #include "llvm/IR/BasicBlock.h" 48 #include "llvm/IR/CFG.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DebugLoc.h" 51 #include "llvm/IR/DiagnosticInfo.h" 52 #include "llvm/IR/Dominators.h" 53 #include "llvm/IR/Function.h" 54 #include "llvm/IR/GlobalValue.h" 55 #include "llvm/IR/InstrTypes.h" 56 #include "llvm/IR/Instruction.h" 57 #include "llvm/IR/Instructions.h" 58 #include "llvm/IR/IntrinsicInst.h" 59 #include "llvm/IR/LLVMContext.h" 60 #include "llvm/IR/MDBuilder.h" 61 #include "llvm/IR/Module.h" 62 #include "llvm/IR/PassManager.h" 63 #include "llvm/IR/ValueSymbolTable.h" 64 #include "llvm/InitializePasses.h" 65 #include "llvm/Pass.h" 66 #include "llvm/ProfileData/InstrProf.h" 67 #include "llvm/ProfileData/SampleProf.h" 68 #include "llvm/ProfileData/SampleProfReader.h" 69 #include "llvm/Support/Casting.h" 70 #include "llvm/Support/CommandLine.h" 71 #include "llvm/Support/Debug.h" 72 #include "llvm/Support/ErrorHandling.h" 73 #include "llvm/Support/ErrorOr.h" 74 #include "llvm/Support/GenericDomTree.h" 75 #include "llvm/Support/raw_ostream.h" 76 #include "llvm/Transforms/IPO.h" 77 #include "llvm/Transforms/Instrumentation.h" 78 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 79 #include "llvm/Transforms/Utils/Cloning.h" 80 #include "llvm/Transforms/Utils/MisExpect.h" 81 #include <algorithm> 82 #include <cassert> 83 #include <cstdint> 84 #include <functional> 85 #include <limits> 86 #include <map> 87 #include <memory> 88 #include <queue> 89 #include <string> 90 #include <system_error> 91 #include <utility> 92 #include <vector> 93 94 using namespace llvm; 95 using namespace sampleprof; 96 using ProfileCount = Function::ProfileCount; 97 #define DEBUG_TYPE "sample-profile" 98 #define CSINLINE_DEBUG DEBUG_TYPE "-inline" 99 100 STATISTIC(NumCSInlined, 101 "Number of functions inlined with context sensitive profile"); 102 STATISTIC(NumCSNotInlined, 103 "Number of functions not inlined with context sensitive profile"); 104 105 // Command line option to specify the file to read samples from. This is 106 // mainly used for debugging. 107 static cl::opt<std::string> SampleProfileFile( 108 "sample-profile-file", cl::init(""), cl::value_desc("filename"), 109 cl::desc("Profile file loaded by -sample-profile"), cl::Hidden); 110 111 // The named file contains a set of transformations that may have been applied 112 // to the symbol names between the program from which the sample data was 113 // collected and the current program's symbols. 114 static cl::opt<std::string> SampleProfileRemappingFile( 115 "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"), 116 cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden); 117 118 static cl::opt<unsigned> SampleProfileMaxPropagateIterations( 119 "sample-profile-max-propagate-iterations", cl::init(100), 120 cl::desc("Maximum number of iterations to go through when propagating " 121 "sample block/edge weights through the CFG.")); 122 123 static cl::opt<unsigned> SampleProfileRecordCoverage( 124 "sample-profile-check-record-coverage", cl::init(0), cl::value_desc("N"), 125 cl::desc("Emit a warning if less than N% of records in the input profile " 126 "are matched to the IR.")); 127 128 static cl::opt<unsigned> SampleProfileSampleCoverage( 129 "sample-profile-check-sample-coverage", cl::init(0), cl::value_desc("N"), 130 cl::desc("Emit a warning if less than N% of samples in the input profile " 131 "are matched to the IR.")); 132 133 static cl::opt<bool> NoWarnSampleUnused( 134 "no-warn-sample-unused", cl::init(false), cl::Hidden, 135 cl::desc("Use this option to turn off/on warnings about function with " 136 "samples but without debug information to use those samples. ")); 137 138 static cl::opt<bool> ProfileSampleAccurate( 139 "profile-sample-accurate", cl::Hidden, cl::init(false), 140 cl::desc("If the sample profile is accurate, we will mark all un-sampled " 141 "callsite and function as having 0 samples. Otherwise, treat " 142 "un-sampled callsites and functions conservatively as unknown. ")); 143 144 static cl::opt<bool> ProfileAccurateForSymsInList( 145 "profile-accurate-for-symsinlist", cl::Hidden, cl::ZeroOrMore, 146 cl::init(true), 147 cl::desc("For symbols in profile symbol list, regard their profiles to " 148 "be accurate. It may be overriden by profile-sample-accurate. ")); 149 150 static cl::opt<bool> ProfileMergeInlinee( 151 "sample-profile-merge-inlinee", cl::Hidden, cl::init(false), 152 cl::desc("Merge past inlinee's profile to outline version if sample " 153 "profile loader decided not to inline a call site.")); 154 155 static cl::opt<bool> ProfileTopDownLoad( 156 "sample-profile-top-down-load", cl::Hidden, cl::init(false), 157 cl::desc("Do profile annotation and inlining for functions in top-down " 158 "order of call graph during sample profile loading.")); 159 160 static cl::opt<bool> ProfileSizeInline( 161 "sample-profile-inline-size", cl::Hidden, cl::init(false), 162 cl::desc("Inline cold call sites in profile loader if it's beneficial " 163 "for code size.")); 164 165 static cl::opt<int> SampleColdCallSiteThreshold( 166 "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45), 167 cl::desc("Threshold for inlining cold callsites")); 168 169 namespace { 170 171 using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>; 172 using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>; 173 using Edge = std::pair<const BasicBlock *, const BasicBlock *>; 174 using EdgeWeightMap = DenseMap<Edge, uint64_t>; 175 using BlockEdgeMap = 176 DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>; 177 178 class SampleProfileLoader; 179 180 class SampleCoverageTracker { 181 public: 182 SampleCoverageTracker(SampleProfileLoader &SPL) : SPLoader(SPL){}; 183 184 bool markSamplesUsed(const FunctionSamples *FS, uint32_t LineOffset, 185 uint32_t Discriminator, uint64_t Samples); 186 unsigned computeCoverage(unsigned Used, unsigned Total) const; 187 unsigned countUsedRecords(const FunctionSamples *FS, 188 ProfileSummaryInfo *PSI) const; 189 unsigned countBodyRecords(const FunctionSamples *FS, 190 ProfileSummaryInfo *PSI) const; 191 uint64_t getTotalUsedSamples() const { return TotalUsedSamples; } 192 uint64_t countBodySamples(const FunctionSamples *FS, 193 ProfileSummaryInfo *PSI) const; 194 195 void clear() { 196 SampleCoverage.clear(); 197 TotalUsedSamples = 0; 198 } 199 200 private: 201 using BodySampleCoverageMap = std::map<LineLocation, unsigned>; 202 using FunctionSamplesCoverageMap = 203 DenseMap<const FunctionSamples *, BodySampleCoverageMap>; 204 205 /// Coverage map for sampling records. 206 /// 207 /// This map keeps a record of sampling records that have been matched to 208 /// an IR instruction. This is used to detect some form of staleness in 209 /// profiles (see flag -sample-profile-check-coverage). 210 /// 211 /// Each entry in the map corresponds to a FunctionSamples instance. This is 212 /// another map that counts how many times the sample record at the 213 /// given location has been used. 214 FunctionSamplesCoverageMap SampleCoverage; 215 216 /// Number of samples used from the profile. 217 /// 218 /// When a sampling record is used for the first time, the samples from 219 /// that record are added to this accumulator. Coverage is later computed 220 /// based on the total number of samples available in this function and 221 /// its callsites. 222 /// 223 /// Note that this accumulator tracks samples used from a single function 224 /// and all the inlined callsites. Strictly, we should have a map of counters 225 /// keyed by FunctionSamples pointers, but these stats are cleared after 226 /// every function, so we just need to keep a single counter. 227 uint64_t TotalUsedSamples = 0; 228 229 SampleProfileLoader &SPLoader; 230 }; 231 232 class GUIDToFuncNameMapper { 233 public: 234 GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader, 235 DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap) 236 : CurrentReader(Reader), CurrentModule(M), 237 CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) { 238 if (!CurrentReader.useMD5()) 239 return; 240 241 for (const auto &F : CurrentModule) { 242 StringRef OrigName = F.getName(); 243 CurrentGUIDToFuncNameMap.insert( 244 {Function::getGUID(OrigName), OrigName}); 245 246 // Local to global var promotion used by optimization like thinlto 247 // will rename the var and add suffix like ".llvm.xxx" to the 248 // original local name. In sample profile, the suffixes of function 249 // names are all stripped. Since it is possible that the mapper is 250 // built in post-thin-link phase and var promotion has been done, 251 // we need to add the substring of function name without the suffix 252 // into the GUIDToFuncNameMap. 253 StringRef CanonName = FunctionSamples::getCanonicalFnName(F); 254 if (CanonName != OrigName) 255 CurrentGUIDToFuncNameMap.insert( 256 {Function::getGUID(CanonName), CanonName}); 257 } 258 259 // Update GUIDToFuncNameMap for each function including inlinees. 260 SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap); 261 } 262 263 ~GUIDToFuncNameMapper() { 264 if (!CurrentReader.useMD5()) 265 return; 266 267 CurrentGUIDToFuncNameMap.clear(); 268 269 // Reset GUIDToFuncNameMap for of each function as they're no 270 // longer valid at this point. 271 SetGUIDToFuncNameMapForAll(nullptr); 272 } 273 274 private: 275 void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) { 276 std::queue<FunctionSamples *> FSToUpdate; 277 for (auto &IFS : CurrentReader.getProfiles()) { 278 FSToUpdate.push(&IFS.second); 279 } 280 281 while (!FSToUpdate.empty()) { 282 FunctionSamples *FS = FSToUpdate.front(); 283 FSToUpdate.pop(); 284 FS->GUIDToFuncNameMap = Map; 285 for (const auto &ICS : FS->getCallsiteSamples()) { 286 const FunctionSamplesMap &FSMap = ICS.second; 287 for (auto &IFS : FSMap) { 288 FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second); 289 FSToUpdate.push(&FS); 290 } 291 } 292 } 293 } 294 295 SampleProfileReader &CurrentReader; 296 Module &CurrentModule; 297 DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap; 298 }; 299 300 /// Sample profile pass. 301 /// 302 /// This pass reads profile data from the file specified by 303 /// -sample-profile-file and annotates every affected function with the 304 /// profile information found in that file. 305 class SampleProfileLoader { 306 public: 307 SampleProfileLoader( 308 StringRef Name, StringRef RemapName, bool IsThinLTOPreLink, 309 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 310 std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo, 311 std::function<const TargetLibraryInfo &(Function &)> GetTLI) 312 : GetAC(std::move(GetAssumptionCache)), 313 GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)), 314 CoverageTracker(*this), Filename(std::string(Name)), 315 RemappingFilename(std::string(RemapName)), 316 IsThinLTOPreLink(IsThinLTOPreLink) {} 317 318 bool doInitialization(Module &M); 319 bool runOnModule(Module &M, ModuleAnalysisManager *AM, 320 ProfileSummaryInfo *_PSI, CallGraph *CG); 321 322 void dump() { Reader->dump(); } 323 324 protected: 325 friend class SampleCoverageTracker; 326 327 bool runOnFunction(Function &F, ModuleAnalysisManager *AM); 328 unsigned getFunctionLoc(Function &F); 329 bool emitAnnotations(Function &F); 330 ErrorOr<uint64_t> getInstWeight(const Instruction &I); 331 ErrorOr<uint64_t> getBlockWeight(const BasicBlock *BB); 332 const FunctionSamples *findCalleeFunctionSamples(const Instruction &I) const; 333 std::vector<const FunctionSamples *> 334 findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const; 335 mutable DenseMap<const DILocation *, const FunctionSamples *> DILocation2SampleMap; 336 const FunctionSamples *findFunctionSamples(const Instruction &I) const; 337 bool inlineCallInstruction(Instruction *I); 338 bool inlineHotFunctions(Function &F, 339 DenseSet<GlobalValue::GUID> &InlinedGUIDs); 340 // Inline cold/small functions in addition to hot ones 341 bool shouldInlineColdCallee(Instruction &CallInst); 342 void emitOptimizationRemarksForInlineCandidates( 343 const SmallVector<Instruction *, 10> &Candidates, const Function &F, bool Hot); 344 void printEdgeWeight(raw_ostream &OS, Edge E); 345 void printBlockWeight(raw_ostream &OS, const BasicBlock *BB) const; 346 void printBlockEquivalence(raw_ostream &OS, const BasicBlock *BB); 347 bool computeBlockWeights(Function &F); 348 void findEquivalenceClasses(Function &F); 349 template <bool IsPostDom> 350 void findEquivalencesFor(BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants, 351 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree); 352 353 void propagateWeights(Function &F); 354 uint64_t visitEdge(Edge E, unsigned *NumUnknownEdges, Edge *UnknownEdge); 355 void buildEdges(Function &F); 356 std::vector<Function *> buildFunctionOrder(Module &M, CallGraph *CG); 357 bool propagateThroughEdges(Function &F, bool UpdateBlockCount); 358 void computeDominanceAndLoopInfo(Function &F); 359 void clearFunctionData(); 360 bool callsiteIsHot(const FunctionSamples *CallsiteFS, 361 ProfileSummaryInfo *PSI); 362 363 /// Map basic blocks to their computed weights. 364 /// 365 /// The weight of a basic block is defined to be the maximum 366 /// of all the instruction weights in that block. 367 BlockWeightMap BlockWeights; 368 369 /// Map edges to their computed weights. 370 /// 371 /// Edge weights are computed by propagating basic block weights in 372 /// SampleProfile::propagateWeights. 373 EdgeWeightMap EdgeWeights; 374 375 /// Set of visited blocks during propagation. 376 SmallPtrSet<const BasicBlock *, 32> VisitedBlocks; 377 378 /// Set of visited edges during propagation. 379 SmallSet<Edge, 32> VisitedEdges; 380 381 /// Equivalence classes for block weights. 382 /// 383 /// Two blocks BB1 and BB2 are in the same equivalence class if they 384 /// dominate and post-dominate each other, and they are in the same loop 385 /// nest. When this happens, the two blocks are guaranteed to execute 386 /// the same number of times. 387 EquivalenceClassMap EquivalenceClass; 388 389 /// Map from function name to Function *. Used to find the function from 390 /// the function name. If the function name contains suffix, additional 391 /// entry is added to map from the stripped name to the function if there 392 /// is one-to-one mapping. 393 StringMap<Function *> SymbolMap; 394 395 /// Dominance, post-dominance and loop information. 396 std::unique_ptr<DominatorTree> DT; 397 std::unique_ptr<PostDominatorTree> PDT; 398 std::unique_ptr<LoopInfo> LI; 399 400 std::function<AssumptionCache &(Function &)> GetAC; 401 std::function<TargetTransformInfo &(Function &)> GetTTI; 402 std::function<const TargetLibraryInfo &(Function &)> GetTLI; 403 404 /// Predecessors for each basic block in the CFG. 405 BlockEdgeMap Predecessors; 406 407 /// Successors for each basic block in the CFG. 408 BlockEdgeMap Successors; 409 410 SampleCoverageTracker CoverageTracker; 411 412 /// Profile reader object. 413 std::unique_ptr<SampleProfileReader> Reader; 414 415 /// Samples collected for the body of this function. 416 FunctionSamples *Samples = nullptr; 417 418 /// Name of the profile file to load. 419 std::string Filename; 420 421 /// Name of the profile remapping file to load. 422 std::string RemappingFilename; 423 424 /// Flag indicating whether the profile input loaded successfully. 425 bool ProfileIsValid = false; 426 427 /// Flag indicating if the pass is invoked in ThinLTO compile phase. 428 /// 429 /// In this phase, in annotation, we should not promote indirect calls. 430 /// Instead, we will mark GUIDs that needs to be annotated to the function. 431 bool IsThinLTOPreLink; 432 433 /// Profile Summary Info computed from sample profile. 434 ProfileSummaryInfo *PSI = nullptr; 435 436 /// Profle Symbol list tells whether a function name appears in the binary 437 /// used to generate the current profile. 438 std::unique_ptr<ProfileSymbolList> PSL; 439 440 /// Total number of samples collected in this profile. 441 /// 442 /// This is the sum of all the samples collected in all the functions executed 443 /// at runtime. 444 uint64_t TotalCollectedSamples = 0; 445 446 /// Optimization Remark Emitter used to emit diagnostic remarks. 447 OptimizationRemarkEmitter *ORE = nullptr; 448 449 // Information recorded when we declined to inline a call site 450 // because we have determined it is too cold is accumulated for 451 // each callee function. Initially this is just the entry count. 452 struct NotInlinedProfileInfo { 453 uint64_t entryCount; 454 }; 455 DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo; 456 457 // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for 458 // all the function symbols defined or declared in current module. 459 DenseMap<uint64_t, StringRef> GUIDToFuncNameMap; 460 461 // All the Names used in FunctionSamples including outline function 462 // names, inline instance names and call target names. 463 StringSet<> NamesInProfile; 464 465 // For symbol in profile symbol list, whether to regard their profiles 466 // to be accurate. It is mainly decided by existance of profile symbol 467 // list and -profile-accurate-for-symsinlist flag, but it can be 468 // overriden by -profile-sample-accurate or profile-sample-accurate 469 // attribute. 470 bool ProfAccForSymsInList; 471 }; 472 473 class SampleProfileLoaderLegacyPass : public ModulePass { 474 public: 475 // Class identification, replacement for typeinfo 476 static char ID; 477 478 SampleProfileLoaderLegacyPass(StringRef Name = SampleProfileFile, 479 bool IsThinLTOPreLink = false) 480 : ModulePass(ID), SampleLoader( 481 Name, SampleProfileRemappingFile, IsThinLTOPreLink, 482 [&](Function &F) -> AssumptionCache & { 483 return ACT->getAssumptionCache(F); 484 }, 485 [&](Function &F) -> TargetTransformInfo & { 486 return TTIWP->getTTI(F); 487 }, 488 [&](Function &F) -> TargetLibraryInfo & { 489 return TLIWP->getTLI(F); 490 }) { 491 initializeSampleProfileLoaderLegacyPassPass( 492 *PassRegistry::getPassRegistry()); 493 } 494 495 void dump() { SampleLoader.dump(); } 496 497 bool doInitialization(Module &M) override { 498 return SampleLoader.doInitialization(M); 499 } 500 501 StringRef getPassName() const override { return "Sample profile pass"; } 502 bool runOnModule(Module &M) override; 503 504 void getAnalysisUsage(AnalysisUsage &AU) const override { 505 AU.addRequired<AssumptionCacheTracker>(); 506 AU.addRequired<TargetTransformInfoWrapperPass>(); 507 AU.addRequired<TargetLibraryInfoWrapperPass>(); 508 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 509 } 510 511 private: 512 SampleProfileLoader SampleLoader; 513 AssumptionCacheTracker *ACT = nullptr; 514 TargetTransformInfoWrapperPass *TTIWP = nullptr; 515 TargetLibraryInfoWrapperPass *TLIWP = nullptr; 516 }; 517 518 } // end anonymous namespace 519 520 /// Return true if the given callsite is hot wrt to hot cutoff threshold. 521 /// 522 /// Functions that were inlined in the original binary will be represented 523 /// in the inline stack in the sample profile. If the profile shows that 524 /// the original inline decision was "good" (i.e., the callsite is executed 525 /// frequently), then we will recreate the inline decision and apply the 526 /// profile from the inlined callsite. 527 /// 528 /// To decide whether an inlined callsite is hot, we compare the callsite 529 /// sample count with the hot cutoff computed by ProfileSummaryInfo, it is 530 /// regarded as hot if the count is above the cutoff value. 531 /// 532 /// When ProfileAccurateForSymsInList is enabled and profile symbol list 533 /// is present, functions in the profile symbol list but without profile will 534 /// be regarded as cold and much less inlining will happen in CGSCC inlining 535 /// pass, so we tend to lower the hot criteria here to allow more early 536 /// inlining to happen for warm callsites and it is helpful for performance. 537 bool SampleProfileLoader::callsiteIsHot(const FunctionSamples *CallsiteFS, 538 ProfileSummaryInfo *PSI) { 539 if (!CallsiteFS) 540 return false; // The callsite was not inlined in the original binary. 541 542 assert(PSI && "PSI is expected to be non null"); 543 uint64_t CallsiteTotalSamples = CallsiteFS->getTotalSamples(); 544 if (ProfAccForSymsInList) 545 return !PSI->isColdCount(CallsiteTotalSamples); 546 else 547 return PSI->isHotCount(CallsiteTotalSamples); 548 } 549 550 /// Mark as used the sample record for the given function samples at 551 /// (LineOffset, Discriminator). 552 /// 553 /// \returns true if this is the first time we mark the given record. 554 bool SampleCoverageTracker::markSamplesUsed(const FunctionSamples *FS, 555 uint32_t LineOffset, 556 uint32_t Discriminator, 557 uint64_t Samples) { 558 LineLocation Loc(LineOffset, Discriminator); 559 unsigned &Count = SampleCoverage[FS][Loc]; 560 bool FirstTime = (++Count == 1); 561 if (FirstTime) 562 TotalUsedSamples += Samples; 563 return FirstTime; 564 } 565 566 /// Return the number of sample records that were applied from this profile. 567 /// 568 /// This count does not include records from cold inlined callsites. 569 unsigned 570 SampleCoverageTracker::countUsedRecords(const FunctionSamples *FS, 571 ProfileSummaryInfo *PSI) const { 572 auto I = SampleCoverage.find(FS); 573 574 // The size of the coverage map for FS represents the number of records 575 // that were marked used at least once. 576 unsigned Count = (I != SampleCoverage.end()) ? I->second.size() : 0; 577 578 // If there are inlined callsites in this function, count the samples found 579 // in the respective bodies. However, do not bother counting callees with 0 580 // total samples, these are callees that were never invoked at runtime. 581 for (const auto &I : FS->getCallsiteSamples()) 582 for (const auto &J : I.second) { 583 const FunctionSamples *CalleeSamples = &J.second; 584 if (SPLoader.callsiteIsHot(CalleeSamples, PSI)) 585 Count += countUsedRecords(CalleeSamples, PSI); 586 } 587 588 return Count; 589 } 590 591 /// Return the number of sample records in the body of this profile. 592 /// 593 /// This count does not include records from cold inlined callsites. 594 unsigned 595 SampleCoverageTracker::countBodyRecords(const FunctionSamples *FS, 596 ProfileSummaryInfo *PSI) const { 597 unsigned Count = FS->getBodySamples().size(); 598 599 // Only count records in hot callsites. 600 for (const auto &I : FS->getCallsiteSamples()) 601 for (const auto &J : I.second) { 602 const FunctionSamples *CalleeSamples = &J.second; 603 if (SPLoader.callsiteIsHot(CalleeSamples, PSI)) 604 Count += countBodyRecords(CalleeSamples, PSI); 605 } 606 607 return Count; 608 } 609 610 /// Return the number of samples collected in the body of this profile. 611 /// 612 /// This count does not include samples from cold inlined callsites. 613 uint64_t 614 SampleCoverageTracker::countBodySamples(const FunctionSamples *FS, 615 ProfileSummaryInfo *PSI) const { 616 uint64_t Total = 0; 617 for (const auto &I : FS->getBodySamples()) 618 Total += I.second.getSamples(); 619 620 // Only count samples in hot callsites. 621 for (const auto &I : FS->getCallsiteSamples()) 622 for (const auto &J : I.second) { 623 const FunctionSamples *CalleeSamples = &J.second; 624 if (SPLoader.callsiteIsHot(CalleeSamples, PSI)) 625 Total += countBodySamples(CalleeSamples, PSI); 626 } 627 628 return Total; 629 } 630 631 /// Return the fraction of sample records used in this profile. 632 /// 633 /// The returned value is an unsigned integer in the range 0-100 indicating 634 /// the percentage of sample records that were used while applying this 635 /// profile to the associated function. 636 unsigned SampleCoverageTracker::computeCoverage(unsigned Used, 637 unsigned Total) const { 638 assert(Used <= Total && 639 "number of used records cannot exceed the total number of records"); 640 return Total > 0 ? Used * 100 / Total : 100; 641 } 642 643 /// Clear all the per-function data used to load samples and propagate weights. 644 void SampleProfileLoader::clearFunctionData() { 645 BlockWeights.clear(); 646 EdgeWeights.clear(); 647 VisitedBlocks.clear(); 648 VisitedEdges.clear(); 649 EquivalenceClass.clear(); 650 DT = nullptr; 651 PDT = nullptr; 652 LI = nullptr; 653 Predecessors.clear(); 654 Successors.clear(); 655 CoverageTracker.clear(); 656 } 657 658 #ifndef NDEBUG 659 /// Print the weight of edge \p E on stream \p OS. 660 /// 661 /// \param OS Stream to emit the output to. 662 /// \param E Edge to print. 663 void SampleProfileLoader::printEdgeWeight(raw_ostream &OS, Edge E) { 664 OS << "weight[" << E.first->getName() << "->" << E.second->getName() 665 << "]: " << EdgeWeights[E] << "\n"; 666 } 667 668 /// Print the equivalence class of block \p BB on stream \p OS. 669 /// 670 /// \param OS Stream to emit the output to. 671 /// \param BB Block to print. 672 void SampleProfileLoader::printBlockEquivalence(raw_ostream &OS, 673 const BasicBlock *BB) { 674 const BasicBlock *Equiv = EquivalenceClass[BB]; 675 OS << "equivalence[" << BB->getName() 676 << "]: " << ((Equiv) ? EquivalenceClass[BB]->getName() : "NONE") << "\n"; 677 } 678 679 /// Print the weight of block \p BB on stream \p OS. 680 /// 681 /// \param OS Stream to emit the output to. 682 /// \param BB Block to print. 683 void SampleProfileLoader::printBlockWeight(raw_ostream &OS, 684 const BasicBlock *BB) const { 685 const auto &I = BlockWeights.find(BB); 686 uint64_t W = (I == BlockWeights.end() ? 0 : I->second); 687 OS << "weight[" << BB->getName() << "]: " << W << "\n"; 688 } 689 #endif 690 691 /// Get the weight for an instruction. 692 /// 693 /// The "weight" of an instruction \p Inst is the number of samples 694 /// collected on that instruction at runtime. To retrieve it, we 695 /// need to compute the line number of \p Inst relative to the start of its 696 /// function. We use HeaderLineno to compute the offset. We then 697 /// look up the samples collected for \p Inst using BodySamples. 698 /// 699 /// \param Inst Instruction to query. 700 /// 701 /// \returns the weight of \p Inst. 702 ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) { 703 const DebugLoc &DLoc = Inst.getDebugLoc(); 704 if (!DLoc) 705 return std::error_code(); 706 707 const FunctionSamples *FS = findFunctionSamples(Inst); 708 if (!FS) 709 return std::error_code(); 710 711 // Ignore all intrinsics, phinodes and branch instructions. 712 // Branch and phinodes instruction usually contains debug info from sources outside of 713 // the residing basic block, thus we ignore them during annotation. 714 if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst)) 715 return std::error_code(); 716 717 // If a direct call/invoke instruction is inlined in profile 718 // (findCalleeFunctionSamples returns non-empty result), but not inlined here, 719 // it means that the inlined callsite has no sample, thus the call 720 // instruction should have 0 count. 721 if ((isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) && 722 !ImmutableCallSite(&Inst).isIndirectCall() && 723 findCalleeFunctionSamples(Inst)) 724 return 0; 725 726 const DILocation *DIL = DLoc; 727 uint32_t LineOffset = FunctionSamples::getOffset(DIL); 728 uint32_t Discriminator = DIL->getBaseDiscriminator(); 729 ErrorOr<uint64_t> R = FS->findSamplesAt(LineOffset, Discriminator); 730 if (R) { 731 bool FirstMark = 732 CoverageTracker.markSamplesUsed(FS, LineOffset, Discriminator, R.get()); 733 if (FirstMark) { 734 ORE->emit([&]() { 735 OptimizationRemarkAnalysis Remark(DEBUG_TYPE, "AppliedSamples", &Inst); 736 Remark << "Applied " << ore::NV("NumSamples", *R); 737 Remark << " samples from profile (offset: "; 738 Remark << ore::NV("LineOffset", LineOffset); 739 if (Discriminator) { 740 Remark << "."; 741 Remark << ore::NV("Discriminator", Discriminator); 742 } 743 Remark << ")"; 744 return Remark; 745 }); 746 } 747 LLVM_DEBUG(dbgs() << " " << DLoc.getLine() << "." 748 << DIL->getBaseDiscriminator() << ":" << Inst 749 << " (line offset: " << LineOffset << "." 750 << DIL->getBaseDiscriminator() << " - weight: " << R.get() 751 << ")\n"); 752 } 753 return R; 754 } 755 756 /// Compute the weight of a basic block. 757 /// 758 /// The weight of basic block \p BB is the maximum weight of all the 759 /// instructions in BB. 760 /// 761 /// \param BB The basic block to query. 762 /// 763 /// \returns the weight for \p BB. 764 ErrorOr<uint64_t> SampleProfileLoader::getBlockWeight(const BasicBlock *BB) { 765 uint64_t Max = 0; 766 bool HasWeight = false; 767 for (auto &I : BB->getInstList()) { 768 const ErrorOr<uint64_t> &R = getInstWeight(I); 769 if (R) { 770 Max = std::max(Max, R.get()); 771 HasWeight = true; 772 } 773 } 774 return HasWeight ? ErrorOr<uint64_t>(Max) : std::error_code(); 775 } 776 777 /// Compute and store the weights of every basic block. 778 /// 779 /// This populates the BlockWeights map by computing 780 /// the weights of every basic block in the CFG. 781 /// 782 /// \param F The function to query. 783 bool SampleProfileLoader::computeBlockWeights(Function &F) { 784 bool Changed = false; 785 LLVM_DEBUG(dbgs() << "Block weights\n"); 786 for (const auto &BB : F) { 787 ErrorOr<uint64_t> Weight = getBlockWeight(&BB); 788 if (Weight) { 789 BlockWeights[&BB] = Weight.get(); 790 VisitedBlocks.insert(&BB); 791 Changed = true; 792 } 793 LLVM_DEBUG(printBlockWeight(dbgs(), &BB)); 794 } 795 796 return Changed; 797 } 798 799 /// Get the FunctionSamples for a call instruction. 800 /// 801 /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined 802 /// instance in which that call instruction is calling to. It contains 803 /// all samples that resides in the inlined instance. We first find the 804 /// inlined instance in which the call instruction is from, then we 805 /// traverse its children to find the callsite with the matching 806 /// location. 807 /// 808 /// \param Inst Call/Invoke instruction to query. 809 /// 810 /// \returns The FunctionSamples pointer to the inlined instance. 811 const FunctionSamples * 812 SampleProfileLoader::findCalleeFunctionSamples(const Instruction &Inst) const { 813 const DILocation *DIL = Inst.getDebugLoc(); 814 if (!DIL) { 815 return nullptr; 816 } 817 818 StringRef CalleeName; 819 if (const CallInst *CI = dyn_cast<CallInst>(&Inst)) 820 if (Function *Callee = CI->getCalledFunction()) 821 CalleeName = Callee->getName(); 822 823 const FunctionSamples *FS = findFunctionSamples(Inst); 824 if (FS == nullptr) 825 return nullptr; 826 827 return FS->findFunctionSamplesAt(LineLocation(FunctionSamples::getOffset(DIL), 828 DIL->getBaseDiscriminator()), 829 CalleeName); 830 } 831 832 /// Returns a vector of FunctionSamples that are the indirect call targets 833 /// of \p Inst. The vector is sorted by the total number of samples. Stores 834 /// the total call count of the indirect call in \p Sum. 835 std::vector<const FunctionSamples *> 836 SampleProfileLoader::findIndirectCallFunctionSamples( 837 const Instruction &Inst, uint64_t &Sum) const { 838 const DILocation *DIL = Inst.getDebugLoc(); 839 std::vector<const FunctionSamples *> R; 840 841 if (!DIL) { 842 return R; 843 } 844 845 const FunctionSamples *FS = findFunctionSamples(Inst); 846 if (FS == nullptr) 847 return R; 848 849 uint32_t LineOffset = FunctionSamples::getOffset(DIL); 850 uint32_t Discriminator = DIL->getBaseDiscriminator(); 851 852 auto T = FS->findCallTargetMapAt(LineOffset, Discriminator); 853 Sum = 0; 854 if (T) 855 for (const auto &T_C : T.get()) 856 Sum += T_C.second; 857 if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(LineLocation( 858 FunctionSamples::getOffset(DIL), DIL->getBaseDiscriminator()))) { 859 if (M->empty()) 860 return R; 861 for (const auto &NameFS : *M) { 862 Sum += NameFS.second.getEntrySamples(); 863 R.push_back(&NameFS.second); 864 } 865 llvm::sort(R, [](const FunctionSamples *L, const FunctionSamples *R) { 866 if (L->getEntrySamples() != R->getEntrySamples()) 867 return L->getEntrySamples() > R->getEntrySamples(); 868 return FunctionSamples::getGUID(L->getName()) < 869 FunctionSamples::getGUID(R->getName()); 870 }); 871 } 872 return R; 873 } 874 875 /// Get the FunctionSamples for an instruction. 876 /// 877 /// The FunctionSamples of an instruction \p Inst is the inlined instance 878 /// in which that instruction is coming from. We traverse the inline stack 879 /// of that instruction, and match it with the tree nodes in the profile. 880 /// 881 /// \param Inst Instruction to query. 882 /// 883 /// \returns the FunctionSamples pointer to the inlined instance. 884 const FunctionSamples * 885 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const { 886 const DILocation *DIL = Inst.getDebugLoc(); 887 if (!DIL) 888 return Samples; 889 890 auto it = DILocation2SampleMap.try_emplace(DIL,nullptr); 891 if (it.second) 892 it.first->second = Samples->findFunctionSamples(DIL); 893 return it.first->second; 894 } 895 896 // FIXME(CallSite): Parameter should be CallBase&, as it's assumed to be that, 897 // and non-null. 898 bool SampleProfileLoader::inlineCallInstruction(Instruction *I) { 899 assert(isa<CallInst>(I) || isa<InvokeInst>(I)); 900 CallBase &CS = *cast<CallBase>(I); 901 Function *CalledFunction = CS.getCalledFunction(); 902 assert(CalledFunction); 903 DebugLoc DLoc = I->getDebugLoc(); 904 BasicBlock *BB = I->getParent(); 905 InlineParams Params = getInlineParams(); 906 Params.ComputeFullInlineCost = true; 907 // Checks if there is anything in the reachable portion of the callee at 908 // this callsite that makes this inlining potentially illegal. Need to 909 // set ComputeFullInlineCost, otherwise getInlineCost may return early 910 // when cost exceeds threshold without checking all IRs in the callee. 911 // The acutal cost does not matter because we only checks isNever() to 912 // see if it is legal to inline the callsite. 913 InlineCost Cost = 914 getInlineCost(cast<CallBase>(*I), Params, GetTTI(*CalledFunction), GetAC, 915 None, GetTLI, nullptr, nullptr); 916 if (Cost.isNever()) { 917 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineFail", DLoc, BB) 918 << "incompatible inlining"); 919 return false; 920 } 921 InlineFunctionInfo IFI(nullptr, &GetAC); 922 if (InlineFunction(CS, IFI).isSuccess()) { 923 // The call to InlineFunction erases I, so we can't pass it here. 924 ORE->emit(OptimizationRemark(CSINLINE_DEBUG, "InlineSuccess", DLoc, BB) 925 << "inlined callee '" << ore::NV("Callee", CalledFunction) 926 << "' into '" << ore::NV("Caller", BB->getParent()) << "'"); 927 return true; 928 } 929 return false; 930 } 931 932 bool SampleProfileLoader::shouldInlineColdCallee(Instruction &CallInst) { 933 if (!ProfileSizeInline) 934 return false; 935 936 Function *Callee = CallSite(&CallInst).getCalledFunction(); 937 if (Callee == nullptr) 938 return false; 939 940 InlineCost Cost = 941 getInlineCost(cast<CallBase>(CallInst), getInlineParams(), 942 GetTTI(*Callee), GetAC, None, GetTLI, nullptr, nullptr); 943 944 return Cost.getCost() <= SampleColdCallSiteThreshold; 945 } 946 947 void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates( 948 const SmallVector<Instruction *, 10> &Candidates, const Function &F, 949 bool Hot) { 950 for (auto I : Candidates) { 951 Function *CalledFunction = CallSite(I).getCalledFunction(); 952 if (CalledFunction) { 953 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "InlineAttempt", 954 I->getDebugLoc(), I->getParent()) 955 << "previous inlining reattempted for " 956 << (Hot ? "hotness: '" : "size: '") 957 << ore::NV("Callee", CalledFunction) << "' into '" 958 << ore::NV("Caller", &F) << "'"); 959 } 960 } 961 } 962 963 /// Iteratively inline hot callsites of a function. 964 /// 965 /// Iteratively traverse all callsites of the function \p F, and find if 966 /// the corresponding inlined instance exists and is hot in profile. If 967 /// it is hot enough, inline the callsites and adds new callsites of the 968 /// callee into the caller. If the call is an indirect call, first promote 969 /// it to direct call. Each indirect call is limited with a single target. 970 /// 971 /// \param F function to perform iterative inlining. 972 /// \param InlinedGUIDs a set to be updated to include all GUIDs that are 973 /// inlined in the profiled binary. 974 /// 975 /// \returns True if there is any inline happened. 976 bool SampleProfileLoader::inlineHotFunctions( 977 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) { 978 DenseSet<Instruction *> PromotedInsns; 979 980 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure 981 // Profile symbol list is ignored when profile-sample-accurate is on. 982 assert((!ProfAccForSymsInList || 983 (!ProfileSampleAccurate && 984 !F.hasFnAttribute("profile-sample-accurate"))) && 985 "ProfAccForSymsInList should be false when profile-sample-accurate " 986 "is enabled"); 987 988 // FIXME(CallSite): refactor the vectors here, as they operate with CallBase 989 // values 990 DenseMap<Instruction *, const FunctionSamples *> localNotInlinedCallSites; 991 bool Changed = false; 992 while (true) { 993 bool LocalChanged = false; 994 SmallVector<Instruction *, 10> CIS; 995 for (auto &BB : F) { 996 bool Hot = false; 997 SmallVector<Instruction *, 10> AllCandidates; 998 SmallVector<Instruction *, 10> ColdCandidates; 999 for (auto &I : BB.getInstList()) { 1000 const FunctionSamples *FS = nullptr; 1001 if ((isa<CallInst>(I) || isa<InvokeInst>(I)) && 1002 !isa<IntrinsicInst>(I) && (FS = findCalleeFunctionSamples(I))) { 1003 AllCandidates.push_back(&I); 1004 if (FS->getEntrySamples() > 0) 1005 localNotInlinedCallSites.try_emplace(&I, FS); 1006 if (callsiteIsHot(FS, PSI)) 1007 Hot = true; 1008 else if (shouldInlineColdCallee(I)) 1009 ColdCandidates.push_back(&I); 1010 } 1011 } 1012 if (Hot) { 1013 CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end()); 1014 emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true); 1015 } 1016 else { 1017 CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end()); 1018 emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false); 1019 } 1020 } 1021 for (auto I : CIS) { 1022 Function *CalledFunction = CallSite(I).getCalledFunction(); 1023 // Do not inline recursive calls. 1024 if (CalledFunction == &F) 1025 continue; 1026 if (CallSite(I).isIndirectCall()) { 1027 if (PromotedInsns.count(I)) 1028 continue; 1029 uint64_t Sum; 1030 for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) { 1031 if (IsThinLTOPreLink) { 1032 FS->findInlinedFunctions(InlinedGUIDs, F.getParent(), 1033 PSI->getOrCompHotCountThreshold()); 1034 continue; 1035 } 1036 auto CalleeFunctionName = FS->getFuncNameInModule(F.getParent()); 1037 // If it is a recursive call, we do not inline it as it could bloat 1038 // the code exponentially. There is way to better handle this, e.g. 1039 // clone the caller first, and inline the cloned caller if it is 1040 // recursive. As llvm does not inline recursive calls, we will 1041 // simply ignore it instead of handling it explicitly. 1042 if (CalleeFunctionName == F.getName()) 1043 continue; 1044 1045 if (!callsiteIsHot(FS, PSI)) 1046 continue; 1047 1048 const char *Reason = "Callee function not available"; 1049 auto R = SymbolMap.find(CalleeFunctionName); 1050 if (R != SymbolMap.end() && R->getValue() && 1051 !R->getValue()->isDeclaration() && 1052 R->getValue()->getSubprogram() && 1053 isLegalToPromote(*cast<CallBase>(I), R->getValue(), &Reason)) { 1054 uint64_t C = FS->getEntrySamples(); 1055 Instruction *DI = 1056 pgo::promoteIndirectCall(I, R->getValue(), C, Sum, false, ORE); 1057 Sum -= C; 1058 PromotedInsns.insert(I); 1059 // If profile mismatches, we should not attempt to inline DI. 1060 if ((isa<CallInst>(DI) || isa<InvokeInst>(DI)) && 1061 inlineCallInstruction(DI)) { 1062 localNotInlinedCallSites.erase(I); 1063 LocalChanged = true; 1064 ++NumCSInlined; 1065 } 1066 } else { 1067 LLVM_DEBUG(dbgs() 1068 << "\nFailed to promote indirect call to " 1069 << CalleeFunctionName << " because " << Reason << "\n"); 1070 } 1071 } 1072 } else if (CalledFunction && CalledFunction->getSubprogram() && 1073 !CalledFunction->isDeclaration()) { 1074 if (inlineCallInstruction(I)) { 1075 localNotInlinedCallSites.erase(I); 1076 LocalChanged = true; 1077 ++NumCSInlined; 1078 } 1079 } else if (IsThinLTOPreLink) { 1080 findCalleeFunctionSamples(*I)->findInlinedFunctions( 1081 InlinedGUIDs, F.getParent(), PSI->getOrCompHotCountThreshold()); 1082 } 1083 } 1084 if (LocalChanged) { 1085 Changed = true; 1086 } else { 1087 break; 1088 } 1089 } 1090 1091 // Accumulate not inlined callsite information into notInlinedSamples 1092 for (const auto &Pair : localNotInlinedCallSites) { 1093 Instruction *I = Pair.getFirst(); 1094 Function *Callee = CallSite(I).getCalledFunction(); 1095 if (!Callee || Callee->isDeclaration()) 1096 continue; 1097 1098 ORE->emit(OptimizationRemarkAnalysis(CSINLINE_DEBUG, "NotInline", 1099 I->getDebugLoc(), I->getParent()) 1100 << "previous inlining not repeated: '" 1101 << ore::NV("Callee", Callee) << "' into '" 1102 << ore::NV("Caller", &F) << "'"); 1103 1104 ++NumCSNotInlined; 1105 const FunctionSamples *FS = Pair.getSecond(); 1106 if (FS->getTotalSamples() == 0 && FS->getEntrySamples() == 0) { 1107 continue; 1108 } 1109 1110 if (ProfileMergeInlinee) { 1111 // Use entry samples as head samples during the merge, as inlinees 1112 // don't have head samples. 1113 assert(FS->getHeadSamples() == 0 && "Expect 0 head sample for inlinee"); 1114 const_cast<FunctionSamples *>(FS)->addHeadSamples(FS->getEntrySamples()); 1115 1116 // Note that we have to do the merge right after processing function. 1117 // This allows OutlineFS's profile to be used for annotation during 1118 // top-down processing of functions' annotation. 1119 FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee); 1120 OutlineFS->merge(*FS); 1121 } else { 1122 auto pair = 1123 notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0}); 1124 pair.first->second.entryCount += FS->getEntrySamples(); 1125 } 1126 } 1127 return Changed; 1128 } 1129 1130 /// Find equivalence classes for the given block. 1131 /// 1132 /// This finds all the blocks that are guaranteed to execute the same 1133 /// number of times as \p BB1. To do this, it traverses all the 1134 /// descendants of \p BB1 in the dominator or post-dominator tree. 1135 /// 1136 /// A block BB2 will be in the same equivalence class as \p BB1 if 1137 /// the following holds: 1138 /// 1139 /// 1- \p BB1 is a descendant of BB2 in the opposite tree. So, if BB2 1140 /// is a descendant of \p BB1 in the dominator tree, then BB2 should 1141 /// dominate BB1 in the post-dominator tree. 1142 /// 1143 /// 2- Both BB2 and \p BB1 must be in the same loop. 1144 /// 1145 /// For every block BB2 that meets those two requirements, we set BB2's 1146 /// equivalence class to \p BB1. 1147 /// 1148 /// \param BB1 Block to check. 1149 /// \param Descendants Descendants of \p BB1 in either the dom or pdom tree. 1150 /// \param DomTree Opposite dominator tree. If \p Descendants is filled 1151 /// with blocks from \p BB1's dominator tree, then 1152 /// this is the post-dominator tree, and vice versa. 1153 template <bool IsPostDom> 1154 void SampleProfileLoader::findEquivalencesFor( 1155 BasicBlock *BB1, ArrayRef<BasicBlock *> Descendants, 1156 DominatorTreeBase<BasicBlock, IsPostDom> *DomTree) { 1157 const BasicBlock *EC = EquivalenceClass[BB1]; 1158 uint64_t Weight = BlockWeights[EC]; 1159 for (const auto *BB2 : Descendants) { 1160 bool IsDomParent = DomTree->dominates(BB2, BB1); 1161 bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2); 1162 if (BB1 != BB2 && IsDomParent && IsInSameLoop) { 1163 EquivalenceClass[BB2] = EC; 1164 // If BB2 is visited, then the entire EC should be marked as visited. 1165 if (VisitedBlocks.count(BB2)) { 1166 VisitedBlocks.insert(EC); 1167 } 1168 1169 // If BB2 is heavier than BB1, make BB2 have the same weight 1170 // as BB1. 1171 // 1172 // Note that we don't worry about the opposite situation here 1173 // (when BB2 is lighter than BB1). We will deal with this 1174 // during the propagation phase. Right now, we just want to 1175 // make sure that BB1 has the largest weight of all the 1176 // members of its equivalence set. 1177 Weight = std::max(Weight, BlockWeights[BB2]); 1178 } 1179 } 1180 if (EC == &EC->getParent()->getEntryBlock()) { 1181 BlockWeights[EC] = Samples->getHeadSamples() + 1; 1182 } else { 1183 BlockWeights[EC] = Weight; 1184 } 1185 } 1186 1187 /// Find equivalence classes. 1188 /// 1189 /// Since samples may be missing from blocks, we can fill in the gaps by setting 1190 /// the weights of all the blocks in the same equivalence class to the same 1191 /// weight. To compute the concept of equivalence, we use dominance and loop 1192 /// information. Two blocks B1 and B2 are in the same equivalence class if B1 1193 /// dominates B2, B2 post-dominates B1 and both are in the same loop. 1194 /// 1195 /// \param F The function to query. 1196 void SampleProfileLoader::findEquivalenceClasses(Function &F) { 1197 SmallVector<BasicBlock *, 8> DominatedBBs; 1198 LLVM_DEBUG(dbgs() << "\nBlock equivalence classes\n"); 1199 // Find equivalence sets based on dominance and post-dominance information. 1200 for (auto &BB : F) { 1201 BasicBlock *BB1 = &BB; 1202 1203 // Compute BB1's equivalence class once. 1204 if (EquivalenceClass.count(BB1)) { 1205 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); 1206 continue; 1207 } 1208 1209 // By default, blocks are in their own equivalence class. 1210 EquivalenceClass[BB1] = BB1; 1211 1212 // Traverse all the blocks dominated by BB1. We are looking for 1213 // every basic block BB2 such that: 1214 // 1215 // 1- BB1 dominates BB2. 1216 // 2- BB2 post-dominates BB1. 1217 // 3- BB1 and BB2 are in the same loop nest. 1218 // 1219 // If all those conditions hold, it means that BB2 is executed 1220 // as many times as BB1, so they are placed in the same equivalence 1221 // class by making BB2's equivalence class be BB1. 1222 DominatedBBs.clear(); 1223 DT->getDescendants(BB1, DominatedBBs); 1224 findEquivalencesFor(BB1, DominatedBBs, PDT.get()); 1225 1226 LLVM_DEBUG(printBlockEquivalence(dbgs(), BB1)); 1227 } 1228 1229 // Assign weights to equivalence classes. 1230 // 1231 // All the basic blocks in the same equivalence class will execute 1232 // the same number of times. Since we know that the head block in 1233 // each equivalence class has the largest weight, assign that weight 1234 // to all the blocks in that equivalence class. 1235 LLVM_DEBUG( 1236 dbgs() << "\nAssign the same weight to all blocks in the same class\n"); 1237 for (auto &BI : F) { 1238 const BasicBlock *BB = &BI; 1239 const BasicBlock *EquivBB = EquivalenceClass[BB]; 1240 if (BB != EquivBB) 1241 BlockWeights[BB] = BlockWeights[EquivBB]; 1242 LLVM_DEBUG(printBlockWeight(dbgs(), BB)); 1243 } 1244 } 1245 1246 /// Visit the given edge to decide if it has a valid weight. 1247 /// 1248 /// If \p E has not been visited before, we copy to \p UnknownEdge 1249 /// and increment the count of unknown edges. 1250 /// 1251 /// \param E Edge to visit. 1252 /// \param NumUnknownEdges Current number of unknown edges. 1253 /// \param UnknownEdge Set if E has not been visited before. 1254 /// 1255 /// \returns E's weight, if known. Otherwise, return 0. 1256 uint64_t SampleProfileLoader::visitEdge(Edge E, unsigned *NumUnknownEdges, 1257 Edge *UnknownEdge) { 1258 if (!VisitedEdges.count(E)) { 1259 (*NumUnknownEdges)++; 1260 *UnknownEdge = E; 1261 return 0; 1262 } 1263 1264 return EdgeWeights[E]; 1265 } 1266 1267 /// Propagate weights through incoming/outgoing edges. 1268 /// 1269 /// If the weight of a basic block is known, and there is only one edge 1270 /// with an unknown weight, we can calculate the weight of that edge. 1271 /// 1272 /// Similarly, if all the edges have a known count, we can calculate the 1273 /// count of the basic block, if needed. 1274 /// 1275 /// \param F Function to process. 1276 /// \param UpdateBlockCount Whether we should update basic block counts that 1277 /// has already been annotated. 1278 /// 1279 /// \returns True if new weights were assigned to edges or blocks. 1280 bool SampleProfileLoader::propagateThroughEdges(Function &F, 1281 bool UpdateBlockCount) { 1282 bool Changed = false; 1283 LLVM_DEBUG(dbgs() << "\nPropagation through edges\n"); 1284 for (const auto &BI : F) { 1285 const BasicBlock *BB = &BI; 1286 const BasicBlock *EC = EquivalenceClass[BB]; 1287 1288 // Visit all the predecessor and successor edges to determine 1289 // which ones have a weight assigned already. Note that it doesn't 1290 // matter that we only keep track of a single unknown edge. The 1291 // only case we are interested in handling is when only a single 1292 // edge is unknown (see setEdgeOrBlockWeight). 1293 for (unsigned i = 0; i < 2; i++) { 1294 uint64_t TotalWeight = 0; 1295 unsigned NumUnknownEdges = 0, NumTotalEdges = 0; 1296 Edge UnknownEdge, SelfReferentialEdge, SingleEdge; 1297 1298 if (i == 0) { 1299 // First, visit all predecessor edges. 1300 NumTotalEdges = Predecessors[BB].size(); 1301 for (auto *Pred : Predecessors[BB]) { 1302 Edge E = std::make_pair(Pred, BB); 1303 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); 1304 if (E.first == E.second) 1305 SelfReferentialEdge = E; 1306 } 1307 if (NumTotalEdges == 1) { 1308 SingleEdge = std::make_pair(Predecessors[BB][0], BB); 1309 } 1310 } else { 1311 // On the second round, visit all successor edges. 1312 NumTotalEdges = Successors[BB].size(); 1313 for (auto *Succ : Successors[BB]) { 1314 Edge E = std::make_pair(BB, Succ); 1315 TotalWeight += visitEdge(E, &NumUnknownEdges, &UnknownEdge); 1316 } 1317 if (NumTotalEdges == 1) { 1318 SingleEdge = std::make_pair(BB, Successors[BB][0]); 1319 } 1320 } 1321 1322 // After visiting all the edges, there are three cases that we 1323 // can handle immediately: 1324 // 1325 // - All the edge weights are known (i.e., NumUnknownEdges == 0). 1326 // In this case, we simply check that the sum of all the edges 1327 // is the same as BB's weight. If not, we change BB's weight 1328 // to match. Additionally, if BB had not been visited before, 1329 // we mark it visited. 1330 // 1331 // - Only one edge is unknown and BB has already been visited. 1332 // In this case, we can compute the weight of the edge by 1333 // subtracting the total block weight from all the known 1334 // edge weights. If the edges weight more than BB, then the 1335 // edge of the last remaining edge is set to zero. 1336 // 1337 // - There exists a self-referential edge and the weight of BB is 1338 // known. In this case, this edge can be based on BB's weight. 1339 // We add up all the other known edges and set the weight on 1340 // the self-referential edge as we did in the previous case. 1341 // 1342 // In any other case, we must continue iterating. Eventually, 1343 // all edges will get a weight, or iteration will stop when 1344 // it reaches SampleProfileMaxPropagateIterations. 1345 if (NumUnknownEdges <= 1) { 1346 uint64_t &BBWeight = BlockWeights[EC]; 1347 if (NumUnknownEdges == 0) { 1348 if (!VisitedBlocks.count(EC)) { 1349 // If we already know the weight of all edges, the weight of the 1350 // basic block can be computed. It should be no larger than the sum 1351 // of all edge weights. 1352 if (TotalWeight > BBWeight) { 1353 BBWeight = TotalWeight; 1354 Changed = true; 1355 LLVM_DEBUG(dbgs() << "All edge weights for " << BB->getName() 1356 << " known. Set weight for block: "; 1357 printBlockWeight(dbgs(), BB);); 1358 } 1359 } else if (NumTotalEdges == 1 && 1360 EdgeWeights[SingleEdge] < BlockWeights[EC]) { 1361 // If there is only one edge for the visited basic block, use the 1362 // block weight to adjust edge weight if edge weight is smaller. 1363 EdgeWeights[SingleEdge] = BlockWeights[EC]; 1364 Changed = true; 1365 } 1366 } else if (NumUnknownEdges == 1 && VisitedBlocks.count(EC)) { 1367 // If there is a single unknown edge and the block has been 1368 // visited, then we can compute E's weight. 1369 if (BBWeight >= TotalWeight) 1370 EdgeWeights[UnknownEdge] = BBWeight - TotalWeight; 1371 else 1372 EdgeWeights[UnknownEdge] = 0; 1373 const BasicBlock *OtherEC; 1374 if (i == 0) 1375 OtherEC = EquivalenceClass[UnknownEdge.first]; 1376 else 1377 OtherEC = EquivalenceClass[UnknownEdge.second]; 1378 // Edge weights should never exceed the BB weights it connects. 1379 if (VisitedBlocks.count(OtherEC) && 1380 EdgeWeights[UnknownEdge] > BlockWeights[OtherEC]) 1381 EdgeWeights[UnknownEdge] = BlockWeights[OtherEC]; 1382 VisitedEdges.insert(UnknownEdge); 1383 Changed = true; 1384 LLVM_DEBUG(dbgs() << "Set weight for edge: "; 1385 printEdgeWeight(dbgs(), UnknownEdge)); 1386 } 1387 } else if (VisitedBlocks.count(EC) && BlockWeights[EC] == 0) { 1388 // If a block Weights 0, all its in/out edges should weight 0. 1389 if (i == 0) { 1390 for (auto *Pred : Predecessors[BB]) { 1391 Edge E = std::make_pair(Pred, BB); 1392 EdgeWeights[E] = 0; 1393 VisitedEdges.insert(E); 1394 } 1395 } else { 1396 for (auto *Succ : Successors[BB]) { 1397 Edge E = std::make_pair(BB, Succ); 1398 EdgeWeights[E] = 0; 1399 VisitedEdges.insert(E); 1400 } 1401 } 1402 } else if (SelfReferentialEdge.first && VisitedBlocks.count(EC)) { 1403 uint64_t &BBWeight = BlockWeights[BB]; 1404 // We have a self-referential edge and the weight of BB is known. 1405 if (BBWeight >= TotalWeight) 1406 EdgeWeights[SelfReferentialEdge] = BBWeight - TotalWeight; 1407 else 1408 EdgeWeights[SelfReferentialEdge] = 0; 1409 VisitedEdges.insert(SelfReferentialEdge); 1410 Changed = true; 1411 LLVM_DEBUG(dbgs() << "Set self-referential edge weight to: "; 1412 printEdgeWeight(dbgs(), SelfReferentialEdge)); 1413 } 1414 if (UpdateBlockCount && !VisitedBlocks.count(EC) && TotalWeight > 0) { 1415 BlockWeights[EC] = TotalWeight; 1416 VisitedBlocks.insert(EC); 1417 Changed = true; 1418 } 1419 } 1420 } 1421 1422 return Changed; 1423 } 1424 1425 /// Build in/out edge lists for each basic block in the CFG. 1426 /// 1427 /// We are interested in unique edges. If a block B1 has multiple 1428 /// edges to another block B2, we only add a single B1->B2 edge. 1429 void SampleProfileLoader::buildEdges(Function &F) { 1430 for (auto &BI : F) { 1431 BasicBlock *B1 = &BI; 1432 1433 // Add predecessors for B1. 1434 SmallPtrSet<BasicBlock *, 16> Visited; 1435 if (!Predecessors[B1].empty()) 1436 llvm_unreachable("Found a stale predecessors list in a basic block."); 1437 for (pred_iterator PI = pred_begin(B1), PE = pred_end(B1); PI != PE; ++PI) { 1438 BasicBlock *B2 = *PI; 1439 if (Visited.insert(B2).second) 1440 Predecessors[B1].push_back(B2); 1441 } 1442 1443 // Add successors for B1. 1444 Visited.clear(); 1445 if (!Successors[B1].empty()) 1446 llvm_unreachable("Found a stale successors list in a basic block."); 1447 for (succ_iterator SI = succ_begin(B1), SE = succ_end(B1); SI != SE; ++SI) { 1448 BasicBlock *B2 = *SI; 1449 if (Visited.insert(B2).second) 1450 Successors[B1].push_back(B2); 1451 } 1452 } 1453 } 1454 1455 /// Returns the sorted CallTargetMap \p M by count in descending order. 1456 static SmallVector<InstrProfValueData, 2> GetSortedValueDataFromCallTargets( 1457 const SampleRecord::CallTargetMap & M) { 1458 SmallVector<InstrProfValueData, 2> R; 1459 for (const auto &I : SampleRecord::SortCallTargets(M)) { 1460 R.emplace_back(InstrProfValueData{FunctionSamples::getGUID(I.first), I.second}); 1461 } 1462 return R; 1463 } 1464 1465 /// Propagate weights into edges 1466 /// 1467 /// The following rules are applied to every block BB in the CFG: 1468 /// 1469 /// - If BB has a single predecessor/successor, then the weight 1470 /// of that edge is the weight of the block. 1471 /// 1472 /// - If all incoming or outgoing edges are known except one, and the 1473 /// weight of the block is already known, the weight of the unknown 1474 /// edge will be the weight of the block minus the sum of all the known 1475 /// edges. If the sum of all the known edges is larger than BB's weight, 1476 /// we set the unknown edge weight to zero. 1477 /// 1478 /// - If there is a self-referential edge, and the weight of the block is 1479 /// known, the weight for that edge is set to the weight of the block 1480 /// minus the weight of the other incoming edges to that block (if 1481 /// known). 1482 void SampleProfileLoader::propagateWeights(Function &F) { 1483 bool Changed = true; 1484 unsigned I = 0; 1485 1486 // If BB weight is larger than its corresponding loop's header BB weight, 1487 // use the BB weight to replace the loop header BB weight. 1488 for (auto &BI : F) { 1489 BasicBlock *BB = &BI; 1490 Loop *L = LI->getLoopFor(BB); 1491 if (!L) { 1492 continue; 1493 } 1494 BasicBlock *Header = L->getHeader(); 1495 if (Header && BlockWeights[BB] > BlockWeights[Header]) { 1496 BlockWeights[Header] = BlockWeights[BB]; 1497 } 1498 } 1499 1500 // Before propagation starts, build, for each block, a list of 1501 // unique predecessors and successors. This is necessary to handle 1502 // identical edges in multiway branches. Since we visit all blocks and all 1503 // edges of the CFG, it is cleaner to build these lists once at the start 1504 // of the pass. 1505 buildEdges(F); 1506 1507 // Propagate until we converge or we go past the iteration limit. 1508 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1509 Changed = propagateThroughEdges(F, false); 1510 } 1511 1512 // The first propagation propagates BB counts from annotated BBs to unknown 1513 // BBs. The 2nd propagation pass resets edges weights, and use all BB weights 1514 // to propagate edge weights. 1515 VisitedEdges.clear(); 1516 Changed = true; 1517 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1518 Changed = propagateThroughEdges(F, false); 1519 } 1520 1521 // The 3rd propagation pass allows adjust annotated BB weights that are 1522 // obviously wrong. 1523 Changed = true; 1524 while (Changed && I++ < SampleProfileMaxPropagateIterations) { 1525 Changed = propagateThroughEdges(F, true); 1526 } 1527 1528 // Generate MD_prof metadata for every branch instruction using the 1529 // edge weights computed during propagation. 1530 LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n"); 1531 LLVMContext &Ctx = F.getContext(); 1532 MDBuilder MDB(Ctx); 1533 for (auto &BI : F) { 1534 BasicBlock *BB = &BI; 1535 1536 if (BlockWeights[BB]) { 1537 for (auto &I : BB->getInstList()) { 1538 if (!isa<CallInst>(I) && !isa<InvokeInst>(I)) 1539 continue; 1540 CallSite CS(&I); 1541 if (!CS.getCalledFunction()) { 1542 const DebugLoc &DLoc = I.getDebugLoc(); 1543 if (!DLoc) 1544 continue; 1545 const DILocation *DIL = DLoc; 1546 uint32_t LineOffset = FunctionSamples::getOffset(DIL); 1547 uint32_t Discriminator = DIL->getBaseDiscriminator(); 1548 1549 const FunctionSamples *FS = findFunctionSamples(I); 1550 if (!FS) 1551 continue; 1552 auto T = FS->findCallTargetMapAt(LineOffset, Discriminator); 1553 if (!T || T.get().empty()) 1554 continue; 1555 SmallVector<InstrProfValueData, 2> SortedCallTargets = 1556 GetSortedValueDataFromCallTargets(T.get()); 1557 uint64_t Sum; 1558 findIndirectCallFunctionSamples(I, Sum); 1559 annotateValueSite(*I.getParent()->getParent()->getParent(), I, 1560 SortedCallTargets, Sum, IPVK_IndirectCallTarget, 1561 SortedCallTargets.size()); 1562 } else if (!isa<IntrinsicInst>(&I)) { 1563 I.setMetadata(LLVMContext::MD_prof, 1564 MDB.createBranchWeights( 1565 {static_cast<uint32_t>(BlockWeights[BB])})); 1566 } 1567 } 1568 } 1569 Instruction *TI = BB->getTerminator(); 1570 if (TI->getNumSuccessors() == 1) 1571 continue; 1572 if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI)) 1573 continue; 1574 1575 DebugLoc BranchLoc = TI->getDebugLoc(); 1576 LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line " 1577 << ((BranchLoc) ? Twine(BranchLoc.getLine()) 1578 : Twine("<UNKNOWN LOCATION>")) 1579 << ".\n"); 1580 SmallVector<uint32_t, 4> Weights; 1581 uint32_t MaxWeight = 0; 1582 Instruction *MaxDestInst; 1583 for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) { 1584 BasicBlock *Succ = TI->getSuccessor(I); 1585 Edge E = std::make_pair(BB, Succ); 1586 uint64_t Weight = EdgeWeights[E]; 1587 LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E)); 1588 // Use uint32_t saturated arithmetic to adjust the incoming weights, 1589 // if needed. Sample counts in profiles are 64-bit unsigned values, 1590 // but internally branch weights are expressed as 32-bit values. 1591 if (Weight > std::numeric_limits<uint32_t>::max()) { 1592 LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)"); 1593 Weight = std::numeric_limits<uint32_t>::max(); 1594 } 1595 // Weight is added by one to avoid propagation errors introduced by 1596 // 0 weights. 1597 Weights.push_back(static_cast<uint32_t>(Weight + 1)); 1598 if (Weight != 0) { 1599 if (Weight > MaxWeight) { 1600 MaxWeight = Weight; 1601 MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime(); 1602 } 1603 } 1604 } 1605 1606 misexpect::verifyMisExpect(TI, Weights, TI->getContext()); 1607 1608 uint64_t TempWeight; 1609 // Only set weights if there is at least one non-zero weight. 1610 // In any other case, let the analyzer set weights. 1611 // Do not set weights if the weights are present. In ThinLTO, the profile 1612 // annotation is done twice. If the first annotation already set the 1613 // weights, the second pass does not need to set it. 1614 if (MaxWeight > 0 && !TI->extractProfTotalWeight(TempWeight)) { 1615 LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n"); 1616 TI->setMetadata(LLVMContext::MD_prof, 1617 MDB.createBranchWeights(Weights)); 1618 ORE->emit([&]() { 1619 return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst) 1620 << "most popular destination for conditional branches at " 1621 << ore::NV("CondBranchesLoc", BranchLoc); 1622 }); 1623 } else { 1624 LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n"); 1625 } 1626 } 1627 } 1628 1629 /// Get the line number for the function header. 1630 /// 1631 /// This looks up function \p F in the current compilation unit and 1632 /// retrieves the line number where the function is defined. This is 1633 /// line 0 for all the samples read from the profile file. Every line 1634 /// number is relative to this line. 1635 /// 1636 /// \param F Function object to query. 1637 /// 1638 /// \returns the line number where \p F is defined. If it returns 0, 1639 /// it means that there is no debug information available for \p F. 1640 unsigned SampleProfileLoader::getFunctionLoc(Function &F) { 1641 if (DISubprogram *S = F.getSubprogram()) 1642 return S->getLine(); 1643 1644 if (NoWarnSampleUnused) 1645 return 0; 1646 1647 // If the start of \p F is missing, emit a diagnostic to inform the user 1648 // about the missed opportunity. 1649 F.getContext().diagnose(DiagnosticInfoSampleProfile( 1650 "No debug information found in function " + F.getName() + 1651 ": Function profile not used", 1652 DS_Warning)); 1653 return 0; 1654 } 1655 1656 void SampleProfileLoader::computeDominanceAndLoopInfo(Function &F) { 1657 DT.reset(new DominatorTree); 1658 DT->recalculate(F); 1659 1660 PDT.reset(new PostDominatorTree(F)); 1661 1662 LI.reset(new LoopInfo); 1663 LI->analyze(*DT); 1664 } 1665 1666 /// Generate branch weight metadata for all branches in \p F. 1667 /// 1668 /// Branch weights are computed out of instruction samples using a 1669 /// propagation heuristic. Propagation proceeds in 3 phases: 1670 /// 1671 /// 1- Assignment of block weights. All the basic blocks in the function 1672 /// are initial assigned the same weight as their most frequently 1673 /// executed instruction. 1674 /// 1675 /// 2- Creation of equivalence classes. Since samples may be missing from 1676 /// blocks, we can fill in the gaps by setting the weights of all the 1677 /// blocks in the same equivalence class to the same weight. To compute 1678 /// the concept of equivalence, we use dominance and loop information. 1679 /// Two blocks B1 and B2 are in the same equivalence class if B1 1680 /// dominates B2, B2 post-dominates B1 and both are in the same loop. 1681 /// 1682 /// 3- Propagation of block weights into edges. This uses a simple 1683 /// propagation heuristic. The following rules are applied to every 1684 /// block BB in the CFG: 1685 /// 1686 /// - If BB has a single predecessor/successor, then the weight 1687 /// of that edge is the weight of the block. 1688 /// 1689 /// - If all the edges are known except one, and the weight of the 1690 /// block is already known, the weight of the unknown edge will 1691 /// be the weight of the block minus the sum of all the known 1692 /// edges. If the sum of all the known edges is larger than BB's weight, 1693 /// we set the unknown edge weight to zero. 1694 /// 1695 /// - If there is a self-referential edge, and the weight of the block is 1696 /// known, the weight for that edge is set to the weight of the block 1697 /// minus the weight of the other incoming edges to that block (if 1698 /// known). 1699 /// 1700 /// Since this propagation is not guaranteed to finalize for every CFG, we 1701 /// only allow it to proceed for a limited number of iterations (controlled 1702 /// by -sample-profile-max-propagate-iterations). 1703 /// 1704 /// FIXME: Try to replace this propagation heuristic with a scheme 1705 /// that is guaranteed to finalize. A work-list approach similar to 1706 /// the standard value propagation algorithm used by SSA-CCP might 1707 /// work here. 1708 /// 1709 /// Once all the branch weights are computed, we emit the MD_prof 1710 /// metadata on BB using the computed values for each of its branches. 1711 /// 1712 /// \param F The function to query. 1713 /// 1714 /// \returns true if \p F was modified. Returns false, otherwise. 1715 bool SampleProfileLoader::emitAnnotations(Function &F) { 1716 bool Changed = false; 1717 1718 if (getFunctionLoc(F) == 0) 1719 return false; 1720 1721 LLVM_DEBUG(dbgs() << "Line number for the first instruction in " 1722 << F.getName() << ": " << getFunctionLoc(F) << "\n"); 1723 1724 DenseSet<GlobalValue::GUID> InlinedGUIDs; 1725 Changed |= inlineHotFunctions(F, InlinedGUIDs); 1726 1727 // Compute basic block weights. 1728 Changed |= computeBlockWeights(F); 1729 1730 if (Changed) { 1731 // Add an entry count to the function using the samples gathered at the 1732 // function entry. 1733 // Sets the GUIDs that are inlined in the profiled binary. This is used 1734 // for ThinLink to make correct liveness analysis, and also make the IR 1735 // match the profiled binary before annotation. 1736 F.setEntryCount( 1737 ProfileCount(Samples->getHeadSamples() + 1, Function::PCT_Real), 1738 &InlinedGUIDs); 1739 1740 // Compute dominance and loop info needed for propagation. 1741 computeDominanceAndLoopInfo(F); 1742 1743 // Find equivalence classes. 1744 findEquivalenceClasses(F); 1745 1746 // Propagate weights to all edges. 1747 propagateWeights(F); 1748 } 1749 1750 // If coverage checking was requested, compute it now. 1751 if (SampleProfileRecordCoverage) { 1752 unsigned Used = CoverageTracker.countUsedRecords(Samples, PSI); 1753 unsigned Total = CoverageTracker.countBodyRecords(Samples, PSI); 1754 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total); 1755 if (Coverage < SampleProfileRecordCoverage) { 1756 F.getContext().diagnose(DiagnosticInfoSampleProfile( 1757 F.getSubprogram()->getFilename(), getFunctionLoc(F), 1758 Twine(Used) + " of " + Twine(Total) + " available profile records (" + 1759 Twine(Coverage) + "%) were applied", 1760 DS_Warning)); 1761 } 1762 } 1763 1764 if (SampleProfileSampleCoverage) { 1765 uint64_t Used = CoverageTracker.getTotalUsedSamples(); 1766 uint64_t Total = CoverageTracker.countBodySamples(Samples, PSI); 1767 unsigned Coverage = CoverageTracker.computeCoverage(Used, Total); 1768 if (Coverage < SampleProfileSampleCoverage) { 1769 F.getContext().diagnose(DiagnosticInfoSampleProfile( 1770 F.getSubprogram()->getFilename(), getFunctionLoc(F), 1771 Twine(Used) + " of " + Twine(Total) + " available profile samples (" + 1772 Twine(Coverage) + "%) were applied", 1773 DS_Warning)); 1774 } 1775 } 1776 return Changed; 1777 } 1778 1779 char SampleProfileLoaderLegacyPass::ID = 0; 1780 1781 INITIALIZE_PASS_BEGIN(SampleProfileLoaderLegacyPass, "sample-profile", 1782 "Sample Profile loader", false, false) 1783 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1784 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1785 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1786 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass) 1787 INITIALIZE_PASS_END(SampleProfileLoaderLegacyPass, "sample-profile", 1788 "Sample Profile loader", false, false) 1789 1790 std::vector<Function *> 1791 SampleProfileLoader::buildFunctionOrder(Module &M, CallGraph *CG) { 1792 std::vector<Function *> FunctionOrderList; 1793 FunctionOrderList.reserve(M.size()); 1794 1795 if (!ProfileTopDownLoad || CG == nullptr) { 1796 for (Function &F : M) 1797 if (!F.isDeclaration()) 1798 FunctionOrderList.push_back(&F); 1799 return FunctionOrderList; 1800 } 1801 1802 assert(&CG->getModule() == &M); 1803 scc_iterator<CallGraph *> CGI = scc_begin(CG); 1804 while (!CGI.isAtEnd()) { 1805 for (CallGraphNode *node : *CGI) { 1806 auto F = node->getFunction(); 1807 if (F && !F->isDeclaration()) 1808 FunctionOrderList.push_back(F); 1809 } 1810 ++CGI; 1811 } 1812 1813 std::reverse(FunctionOrderList.begin(), FunctionOrderList.end()); 1814 return FunctionOrderList; 1815 } 1816 1817 bool SampleProfileLoader::doInitialization(Module &M) { 1818 auto &Ctx = M.getContext(); 1819 1820 std::unique_ptr<SampleProfileReaderItaniumRemapper> RemapReader; 1821 auto ReaderOrErr = 1822 SampleProfileReader::create(Filename, Ctx, RemappingFilename); 1823 if (std::error_code EC = ReaderOrErr.getError()) { 1824 std::string Msg = "Could not open profile: " + EC.message(); 1825 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg)); 1826 return false; 1827 } 1828 Reader = std::move(ReaderOrErr.get()); 1829 Reader->collectFuncsFrom(M); 1830 ProfileIsValid = (Reader->read() == sampleprof_error::success); 1831 PSL = Reader->getProfileSymbolList(); 1832 1833 // While profile-sample-accurate is on, ignore symbol list. 1834 ProfAccForSymsInList = 1835 ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate; 1836 if (ProfAccForSymsInList) { 1837 NamesInProfile.clear(); 1838 if (auto NameTable = Reader->getNameTable()) 1839 NamesInProfile.insert(NameTable->begin(), NameTable->end()); 1840 } 1841 1842 return true; 1843 } 1844 1845 ModulePass *llvm::createSampleProfileLoaderPass() { 1846 return new SampleProfileLoaderLegacyPass(); 1847 } 1848 1849 ModulePass *llvm::createSampleProfileLoaderPass(StringRef Name) { 1850 return new SampleProfileLoaderLegacyPass(Name); 1851 } 1852 1853 bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM, 1854 ProfileSummaryInfo *_PSI, CallGraph *CG) { 1855 GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap); 1856 if (!ProfileIsValid) 1857 return false; 1858 1859 PSI = _PSI; 1860 if (M.getProfileSummary(/* IsCS */ false) == nullptr) 1861 M.setProfileSummary(Reader->getSummary().getMD(M.getContext()), 1862 ProfileSummary::PSK_Sample); 1863 1864 // Compute the total number of samples collected in this profile. 1865 for (const auto &I : Reader->getProfiles()) 1866 TotalCollectedSamples += I.second.getTotalSamples(); 1867 1868 // Populate the symbol map. 1869 for (const auto &N_F : M.getValueSymbolTable()) { 1870 StringRef OrigName = N_F.getKey(); 1871 Function *F = dyn_cast<Function>(N_F.getValue()); 1872 if (F == nullptr) 1873 continue; 1874 SymbolMap[OrigName] = F; 1875 auto pos = OrigName.find('.'); 1876 if (pos != StringRef::npos) { 1877 StringRef NewName = OrigName.substr(0, pos); 1878 auto r = SymbolMap.insert(std::make_pair(NewName, F)); 1879 // Failiing to insert means there is already an entry in SymbolMap, 1880 // thus there are multiple functions that are mapped to the same 1881 // stripped name. In this case of name conflicting, set the value 1882 // to nullptr to avoid confusion. 1883 if (!r.second) 1884 r.first->second = nullptr; 1885 } 1886 } 1887 1888 bool retval = false; 1889 for (auto F : buildFunctionOrder(M, CG)) { 1890 assert(!F->isDeclaration()); 1891 clearFunctionData(); 1892 retval |= runOnFunction(*F, AM); 1893 } 1894 1895 // Account for cold calls not inlined.... 1896 for (const std::pair<Function *, NotInlinedProfileInfo> &pair : 1897 notInlinedCallInfo) 1898 updateProfileCallee(pair.first, pair.second.entryCount); 1899 1900 return retval; 1901 } 1902 1903 bool SampleProfileLoaderLegacyPass::runOnModule(Module &M) { 1904 ACT = &getAnalysis<AssumptionCacheTracker>(); 1905 TTIWP = &getAnalysis<TargetTransformInfoWrapperPass>(); 1906 TLIWP = &getAnalysis<TargetLibraryInfoWrapperPass>(); 1907 ProfileSummaryInfo *PSI = 1908 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 1909 return SampleLoader.runOnModule(M, nullptr, PSI, nullptr); 1910 } 1911 1912 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) { 1913 1914 DILocation2SampleMap.clear(); 1915 // By default the entry count is initialized to -1, which will be treated 1916 // conservatively by getEntryCount as the same as unknown (None). This is 1917 // to avoid newly added code to be treated as cold. If we have samples 1918 // this will be overwritten in emitAnnotations. 1919 uint64_t initialEntryCount = -1; 1920 1921 ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL; 1922 if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) { 1923 // initialize all the function entry counts to 0. It means all the 1924 // functions without profile will be regarded as cold. 1925 initialEntryCount = 0; 1926 // profile-sample-accurate is a user assertion which has a higher precedence 1927 // than symbol list. When profile-sample-accurate is on, ignore symbol list. 1928 ProfAccForSymsInList = false; 1929 } 1930 1931 // PSL -- profile symbol list include all the symbols in sampled binary. 1932 // If ProfileAccurateForSymsInList is enabled, PSL is used to treat 1933 // old functions without samples being cold, without having to worry 1934 // about new and hot functions being mistakenly treated as cold. 1935 if (ProfAccForSymsInList) { 1936 // Initialize the entry count to 0 for functions in the list. 1937 if (PSL->contains(F.getName())) 1938 initialEntryCount = 0; 1939 1940 // Function in the symbol list but without sample will be regarded as 1941 // cold. To minimize the potential negative performance impact it could 1942 // have, we want to be a little conservative here saying if a function 1943 // shows up in the profile, no matter as outline function, inline instance 1944 // or call targets, treat the function as not being cold. This will handle 1945 // the cases such as most callsites of a function are inlined in sampled 1946 // binary but not inlined in current build (because of source code drift, 1947 // imprecise debug information, or the callsites are all cold individually 1948 // but not cold accumulatively...), so the outline function showing up as 1949 // cold in sampled binary will actually not be cold after current build. 1950 StringRef CanonName = FunctionSamples::getCanonicalFnName(F); 1951 if (NamesInProfile.count(CanonName)) 1952 initialEntryCount = -1; 1953 } 1954 1955 F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real)); 1956 std::unique_ptr<OptimizationRemarkEmitter> OwnedORE; 1957 if (AM) { 1958 auto &FAM = 1959 AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent()) 1960 .getManager(); 1961 ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 1962 } else { 1963 OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F); 1964 ORE = OwnedORE.get(); 1965 } 1966 Samples = Reader->getSamplesFor(F); 1967 if (Samples && !Samples->empty()) 1968 return emitAnnotations(F); 1969 return false; 1970 } 1971 1972 PreservedAnalyses SampleProfileLoaderPass::run(Module &M, 1973 ModuleAnalysisManager &AM) { 1974 FunctionAnalysisManager &FAM = 1975 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 1976 1977 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 1978 return FAM.getResult<AssumptionAnalysis>(F); 1979 }; 1980 auto GetTTI = [&](Function &F) -> TargetTransformInfo & { 1981 return FAM.getResult<TargetIRAnalysis>(F); 1982 }; 1983 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 1984 return FAM.getResult<TargetLibraryAnalysis>(F); 1985 }; 1986 1987 SampleProfileLoader SampleLoader( 1988 ProfileFileName.empty() ? SampleProfileFile : ProfileFileName, 1989 ProfileRemappingFileName.empty() ? SampleProfileRemappingFile 1990 : ProfileRemappingFileName, 1991 IsThinLTOPreLink, GetAssumptionCache, GetTTI, GetTLI); 1992 1993 if (!SampleLoader.doInitialization(M)) 1994 return PreservedAnalyses::all(); 1995 1996 ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M); 1997 CallGraph &CG = AM.getResult<CallGraphAnalysis>(M); 1998 if (!SampleLoader.runOnModule(M, &AM, PSI, &CG)) 1999 return PreservedAnalyses::all(); 2000 2001 return PreservedAnalyses::none(); 2002 } 2003