1 //===- Inliner.cpp - Code common to all inliners --------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the mechanics required to implement inlining without 11 // missing any calls and updating the call graph. The decisions of which calls 12 // are profitable to inline are implemented elsewhere. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/IPO/Inliner.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/None.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumptionCache.h" 28 #include "llvm/Analysis/BasicAliasAnalysis.h" 29 #include "llvm/Analysis/BlockFrequencyInfo.h" 30 #include "llvm/Analysis/CGSCCPassManager.h" 31 #include "llvm/Analysis/CallGraph.h" 32 #include "llvm/Analysis/InlineCost.h" 33 #include "llvm/Analysis/LazyCallGraph.h" 34 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 35 #include "llvm/Analysis/ProfileSummaryInfo.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 #include "llvm/Analysis/TargetTransformInfo.h" 38 #include "llvm/Transforms/Utils/Local.h" 39 #include "llvm/IR/Attributes.h" 40 #include "llvm/IR/BasicBlock.h" 41 #include "llvm/IR/CallSite.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugLoc.h" 44 #include "llvm/IR/DerivedTypes.h" 45 #include "llvm/IR/DiagnosticInfo.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/InstIterator.h" 48 #include "llvm/IR/Instruction.h" 49 #include "llvm/IR/Instructions.h" 50 #include "llvm/IR/IntrinsicInst.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Module.h" 53 #include "llvm/IR/PassManager.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Transforms/Utils/Cloning.h" 62 #include "llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h" 63 #include "llvm/Transforms/Utils/ModuleUtils.h" 64 #include <algorithm> 65 #include <cassert> 66 #include <functional> 67 #include <sstream> 68 #include <tuple> 69 #include <utility> 70 #include <vector> 71 72 using namespace llvm; 73 74 #define DEBUG_TYPE "inline" 75 76 STATISTIC(NumInlined, "Number of functions inlined"); 77 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined"); 78 STATISTIC(NumDeleted, "Number of functions deleted because all callers found"); 79 STATISTIC(NumMergedAllocas, "Number of allocas merged together"); 80 81 // This weirdly named statistic tracks the number of times that, when attempting 82 // to inline a function A into B, we analyze the callers of B in order to see 83 // if those would be more profitable and blocked inline steps. 84 STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed"); 85 86 /// Flag to disable manual alloca merging. 87 /// 88 /// Merging of allocas was originally done as a stack-size saving technique 89 /// prior to LLVM's code generator having support for stack coloring based on 90 /// lifetime markers. It is now in the process of being removed. To experiment 91 /// with disabling it and relying fully on lifetime marker based stack 92 /// coloring, you can pass this flag to LLVM. 93 static cl::opt<bool> 94 DisableInlinedAllocaMerging("disable-inlined-alloca-merging", 95 cl::init(false), cl::Hidden); 96 97 namespace { 98 99 enum class InlinerFunctionImportStatsOpts { 100 No = 0, 101 Basic = 1, 102 Verbose = 2, 103 }; 104 105 } // end anonymous namespace 106 107 static cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats( 108 "inliner-function-import-stats", 109 cl::init(InlinerFunctionImportStatsOpts::No), 110 cl::values(clEnumValN(InlinerFunctionImportStatsOpts::Basic, "basic", 111 "basic statistics"), 112 clEnumValN(InlinerFunctionImportStatsOpts::Verbose, "verbose", 113 "printing of statistics for each inlined function")), 114 cl::Hidden, cl::desc("Enable inliner stats for imported functions")); 115 116 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {} 117 118 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime) 119 : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {} 120 121 /// For this class, we declare that we require and preserve the call graph. 122 /// If the derived class implements this method, it should 123 /// always explicitly call the implementation here. 124 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const { 125 AU.addRequired<AssumptionCacheTracker>(); 126 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 127 AU.addRequired<TargetLibraryInfoWrapperPass>(); 128 getAAResultsAnalysisUsage(AU); 129 CallGraphSCCPass::getAnalysisUsage(AU); 130 } 131 132 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>; 133 134 /// Look at all of the allocas that we inlined through this call site. If we 135 /// have already inlined other allocas through other calls into this function, 136 /// then we know that they have disjoint lifetimes and that we can merge them. 137 /// 138 /// There are many heuristics possible for merging these allocas, and the 139 /// different options have different tradeoffs. One thing that we *really* 140 /// don't want to hurt is SRoA: once inlining happens, often allocas are no 141 /// longer address taken and so they can be promoted. 142 /// 143 /// Our "solution" for that is to only merge allocas whose outermost type is an 144 /// array type. These are usually not promoted because someone is using a 145 /// variable index into them. These are also often the most important ones to 146 /// merge. 147 /// 148 /// A better solution would be to have real memory lifetime markers in the IR 149 /// and not have the inliner do any merging of allocas at all. This would 150 /// allow the backend to do proper stack slot coloring of all allocas that 151 /// *actually make it to the backend*, which is really what we want. 152 /// 153 /// Because we don't have this information, we do this simple and useful hack. 154 static void mergeInlinedArrayAllocas( 155 Function *Caller, InlineFunctionInfo &IFI, 156 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory) { 157 SmallPtrSet<AllocaInst *, 16> UsedAllocas; 158 159 // When processing our SCC, check to see if CS was inlined from some other 160 // call site. For example, if we're processing "A" in this code: 161 // A() { B() } 162 // B() { x = alloca ... C() } 163 // C() { y = alloca ... } 164 // Assume that C was not inlined into B initially, and so we're processing A 165 // and decide to inline B into A. Doing this makes an alloca available for 166 // reuse and makes a callsite (C) available for inlining. When we process 167 // the C call site we don't want to do any alloca merging between X and Y 168 // because their scopes are not disjoint. We could make this smarter by 169 // keeping track of the inline history for each alloca in the 170 // InlinedArrayAllocas but this isn't likely to be a significant win. 171 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC. 172 return; 173 174 // Loop over all the allocas we have so far and see if they can be merged with 175 // a previously inlined alloca. If not, remember that we had it. 176 for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size(); AllocaNo != e; 177 ++AllocaNo) { 178 AllocaInst *AI = IFI.StaticAllocas[AllocaNo]; 179 180 // Don't bother trying to merge array allocations (they will usually be 181 // canonicalized to be an allocation *of* an array), or allocations whose 182 // type is not itself an array (because we're afraid of pessimizing SRoA). 183 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType()); 184 if (!ATy || AI->isArrayAllocation()) 185 continue; 186 187 // Get the list of all available allocas for this array type. 188 std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy]; 189 190 // Loop over the allocas in AllocasForType to see if we can reuse one. Note 191 // that we have to be careful not to reuse the same "available" alloca for 192 // multiple different allocas that we just inlined, we use the 'UsedAllocas' 193 // set to keep track of which "available" allocas are being used by this 194 // function. Also, AllocasForType can be empty of course! 195 bool MergedAwayAlloca = false; 196 for (AllocaInst *AvailableAlloca : AllocasForType) { 197 unsigned Align1 = AI->getAlignment(), 198 Align2 = AvailableAlloca->getAlignment(); 199 200 // The available alloca has to be in the right function, not in some other 201 // function in this SCC. 202 if (AvailableAlloca->getParent() != AI->getParent()) 203 continue; 204 205 // If the inlined function already uses this alloca then we can't reuse 206 // it. 207 if (!UsedAllocas.insert(AvailableAlloca).second) 208 continue; 209 210 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare 211 // success! 212 LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI 213 << "\n\t\tINTO: " << *AvailableAlloca << '\n'); 214 215 // Move affected dbg.declare calls immediately after the new alloca to 216 // avoid the situation when a dbg.declare precedes its alloca. 217 if (auto *L = LocalAsMetadata::getIfExists(AI)) 218 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) 219 for (User *U : MDV->users()) 220 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) 221 DDI->moveBefore(AvailableAlloca->getNextNode()); 222 223 AI->replaceAllUsesWith(AvailableAlloca); 224 225 if (Align1 != Align2) { 226 if (!Align1 || !Align2) { 227 const DataLayout &DL = Caller->getParent()->getDataLayout(); 228 unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType()); 229 230 Align1 = Align1 ? Align1 : TypeAlign; 231 Align2 = Align2 ? Align2 : TypeAlign; 232 } 233 234 if (Align1 > Align2) 235 AvailableAlloca->setAlignment(AI->getAlignment()); 236 } 237 238 AI->eraseFromParent(); 239 MergedAwayAlloca = true; 240 ++NumMergedAllocas; 241 IFI.StaticAllocas[AllocaNo] = nullptr; 242 break; 243 } 244 245 // If we already nuked the alloca, we're done with it. 246 if (MergedAwayAlloca) 247 continue; 248 249 // If we were unable to merge away the alloca either because there are no 250 // allocas of the right type available or because we reused them all 251 // already, remember that this alloca came from an inlined function and mark 252 // it used so we don't reuse it for other allocas from this inline 253 // operation. 254 AllocasForType.push_back(AI); 255 UsedAllocas.insert(AI); 256 } 257 } 258 259 /// If it is possible to inline the specified call site, 260 /// do so and update the CallGraph for this operation. 261 /// 262 /// This function also does some basic book-keeping to update the IR. The 263 /// InlinedArrayAllocas map keeps track of any allocas that are already 264 /// available from other functions inlined into the caller. If we are able to 265 /// inline this call site we attempt to reuse already available allocas or add 266 /// any new allocas to the set if not possible. 267 static bool InlineCallIfPossible( 268 CallSite CS, InlineFunctionInfo &IFI, 269 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory, 270 bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter, 271 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 272 Function *Callee = CS.getCalledFunction(); 273 Function *Caller = CS.getCaller(); 274 275 AAResults &AAR = AARGetter(*Callee); 276 277 // Try to inline the function. Get the list of static allocas that were 278 // inlined. 279 InlineResult IR = InlineFunction(CS, IFI, &AAR, InsertLifetime); 280 if (!IR) 281 return IR; 282 283 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 284 ImportedFunctionsStats.recordInline(*Caller, *Callee); 285 286 AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee); 287 288 if (!DisableInlinedAllocaMerging) 289 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory); 290 291 return IR; // success 292 } 293 294 /// Return true if inlining of CS can block the caller from being 295 /// inlined which is proved to be more beneficial. \p IC is the 296 /// estimated inline cost associated with callsite \p CS. 297 /// \p TotalSecondaryCost will be set to the estimated cost of inlining the 298 /// caller if \p CS is suppressed for inlining. 299 static bool 300 shouldBeDeferred(Function *Caller, CallSite CS, InlineCost IC, 301 int &TotalSecondaryCost, 302 function_ref<InlineCost(CallSite CS)> GetInlineCost) { 303 // For now we only handle local or inline functions. 304 if (!Caller->hasLocalLinkage() && !Caller->hasLinkOnceODRLinkage()) 305 return false; 306 // Try to detect the case where the current inlining candidate caller (call 307 // it B) is a static or linkonce-ODR function and is an inlining candidate 308 // elsewhere, and the current candidate callee (call it C) is large enough 309 // that inlining it into B would make B too big to inline later. In these 310 // circumstances it may be best not to inline C into B, but to inline B into 311 // its callers. 312 // 313 // This only applies to static and linkonce-ODR functions because those are 314 // expected to be available for inlining in the translation units where they 315 // are used. Thus we will always have the opportunity to make local inlining 316 // decisions. Importantly the linkonce-ODR linkage covers inline functions 317 // and templates in C++. 318 // 319 // FIXME: All of this logic should be sunk into getInlineCost. It relies on 320 // the internal implementation of the inline cost metrics rather than 321 // treating them as truly abstract units etc. 322 TotalSecondaryCost = 0; 323 // The candidate cost to be imposed upon the current function. 324 int CandidateCost = IC.getCost() - 1; 325 // This bool tracks what happens if we do NOT inline C into B. 326 bool callerWillBeRemoved = Caller->hasLocalLinkage(); 327 // This bool tracks what happens if we DO inline C into B. 328 bool inliningPreventsSomeOuterInline = false; 329 for (User *U : Caller->users()) { 330 CallSite CS2(U); 331 332 // If this isn't a call to Caller (it could be some other sort 333 // of reference) skip it. Such references will prevent the caller 334 // from being removed. 335 if (!CS2 || CS2.getCalledFunction() != Caller) { 336 callerWillBeRemoved = false; 337 continue; 338 } 339 340 InlineCost IC2 = GetInlineCost(CS2); 341 ++NumCallerCallersAnalyzed; 342 if (!IC2) { 343 callerWillBeRemoved = false; 344 continue; 345 } 346 if (IC2.isAlways()) 347 continue; 348 349 // See if inlining of the original callsite would erase the cost delta of 350 // this callsite. We subtract off the penalty for the call instruction, 351 // which we would be deleting. 352 if (IC2.getCostDelta() <= CandidateCost) { 353 inliningPreventsSomeOuterInline = true; 354 TotalSecondaryCost += IC2.getCost(); 355 } 356 } 357 // If all outer calls to Caller would get inlined, the cost for the last 358 // one is set very low by getInlineCost, in anticipation that Caller will 359 // be removed entirely. We did not account for this above unless there 360 // is only one caller of Caller. 361 if (callerWillBeRemoved && !Caller->hasOneUse()) 362 TotalSecondaryCost -= InlineConstants::LastCallToStaticBonus; 363 364 if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) 365 return true; 366 367 return false; 368 } 369 370 #ifndef NDEBUG 371 static std::basic_ostream<char> &operator<<(std::basic_ostream<char> &R, 372 const ore::NV &Arg) { 373 return R << Arg.Val; 374 } 375 #endif 376 377 template <class RemarkT> 378 RemarkT &operator<<(RemarkT &&R, const InlineCost &IC) { 379 using namespace ore; 380 if (IC.isAlways()) { 381 R << "(cost=always)"; 382 } else if (IC.isNever()) { 383 R << "(cost=never)"; 384 } else { 385 R << "(cost=" << ore::NV("Cost", IC.getCost()) 386 << ", threshold=" << ore::NV("Threshold", IC.getThreshold()) << ")"; 387 } 388 if (const char *Reason = IC.getReason()) 389 R << ": " << ore::NV("Reason", Reason); 390 return R; 391 } 392 393 #ifndef NDEBUG 394 static std::string inlineCostStr(const InlineCost &IC) { 395 std::stringstream Remark; 396 Remark << IC; 397 return Remark.str(); 398 } 399 #endif 400 401 /// Return the cost only if the inliner should attempt to inline at the given 402 /// CallSite. If we return the cost, we will emit an optimisation remark later 403 /// using that cost, so we won't do so from this function. 404 static Optional<InlineCost> 405 shouldInline(CallSite CS, function_ref<InlineCost(CallSite CS)> GetInlineCost, 406 OptimizationRemarkEmitter &ORE) { 407 using namespace ore; 408 409 InlineCost IC = GetInlineCost(CS); 410 Instruction *Call = CS.getInstruction(); 411 Function *Callee = CS.getCalledFunction(); 412 Function *Caller = CS.getCaller(); 413 414 if (IC.isAlways()) { 415 LLVM_DEBUG(dbgs() << " Inlining " << inlineCostStr(IC) 416 << ", Call: " << *CS.getInstruction() << "\n"); 417 return IC; 418 } 419 420 if (IC.isNever()) { 421 LLVM_DEBUG(dbgs() << " NOT Inlining " << inlineCostStr(IC) 422 << ", Call: " << *CS.getInstruction() << "\n"); 423 ORE.emit([&]() { 424 return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline", Call) 425 << NV("Callee", Callee) << " not inlined into " 426 << NV("Caller", Caller) << " because it should never be inlined " 427 << IC; 428 }); 429 return IC; 430 } 431 432 if (!IC) { 433 LLVM_DEBUG(dbgs() << " NOT Inlining " << inlineCostStr(IC) 434 << ", Call: " << *CS.getInstruction() << "\n"); 435 ORE.emit([&]() { 436 return OptimizationRemarkMissed(DEBUG_TYPE, "TooCostly", Call) 437 << NV("Callee", Callee) << " not inlined into " 438 << NV("Caller", Caller) << " because too costly to inline " << IC; 439 }); 440 return IC; 441 } 442 443 int TotalSecondaryCost = 0; 444 if (shouldBeDeferred(Caller, CS, IC, TotalSecondaryCost, GetInlineCost)) { 445 LLVM_DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() 446 << " Cost = " << IC.getCost() 447 << ", outer Cost = " << TotalSecondaryCost << '\n'); 448 ORE.emit([&]() { 449 return OptimizationRemarkMissed(DEBUG_TYPE, "IncreaseCostInOtherContexts", 450 Call) 451 << "Not inlining. Cost of inlining " << NV("Callee", Callee) 452 << " increases the cost of inlining " << NV("Caller", Caller) 453 << " in other contexts"; 454 }); 455 456 // IC does not bool() to false, so get an InlineCost that will. 457 // This will not be inspected to make an error message. 458 return None; 459 } 460 461 LLVM_DEBUG(dbgs() << " Inlining " << inlineCostStr(IC) 462 << ", Call: " << *CS.getInstruction() << '\n'); 463 return IC; 464 } 465 466 /// Return true if the specified inline history ID 467 /// indicates an inline history that includes the specified function. 468 static bool InlineHistoryIncludes( 469 Function *F, int InlineHistoryID, 470 const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) { 471 while (InlineHistoryID != -1) { 472 assert(unsigned(InlineHistoryID) < InlineHistory.size() && 473 "Invalid inline history ID"); 474 if (InlineHistory[InlineHistoryID].first == F) 475 return true; 476 InlineHistoryID = InlineHistory[InlineHistoryID].second; 477 } 478 return false; 479 } 480 481 bool LegacyInlinerBase::doInitialization(CallGraph &CG) { 482 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 483 ImportedFunctionsStats.setModuleInfo(CG.getModule()); 484 return false; // No changes to CallGraph. 485 } 486 487 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) { 488 if (skipSCC(SCC)) 489 return false; 490 return inlineCalls(SCC); 491 } 492 493 static void emit_inlined_into(OptimizationRemarkEmitter &ORE, DebugLoc &DLoc, 494 const BasicBlock *Block, const Function &Callee, 495 const Function &Caller, const InlineCost &IC) { 496 ORE.emit([&]() { 497 bool AlwaysInline = IC.isAlways(); 498 StringRef RemarkName = AlwaysInline ? "AlwaysInline" : "Inlined"; 499 return OptimizationRemark(DEBUG_TYPE, RemarkName, DLoc, Block) 500 << ore::NV("Callee", &Callee) << " inlined into " 501 << ore::NV("Caller", &Caller) << " with " << IC; 502 }); 503 } 504 505 static bool 506 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, 507 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 508 ProfileSummaryInfo *PSI, TargetLibraryInfo &TLI, 509 bool InsertLifetime, 510 function_ref<InlineCost(CallSite CS)> GetInlineCost, 511 function_ref<AAResults &(Function &)> AARGetter, 512 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 513 SmallPtrSet<Function *, 8> SCCFunctions; 514 LLVM_DEBUG(dbgs() << "Inliner visiting SCC:"); 515 for (CallGraphNode *Node : SCC) { 516 Function *F = Node->getFunction(); 517 if (F) 518 SCCFunctions.insert(F); 519 LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE")); 520 } 521 522 // Scan through and identify all call sites ahead of time so that we only 523 // inline call sites in the original functions, not call sites that result 524 // from inlining other functions. 525 SmallVector<std::pair<CallSite, int>, 16> CallSites; 526 527 // When inlining a callee produces new call sites, we want to keep track of 528 // the fact that they were inlined from the callee. This allows us to avoid 529 // infinite inlining in some obscure cases. To represent this, we use an 530 // index into the InlineHistory vector. 531 SmallVector<std::pair<Function *, int>, 8> InlineHistory; 532 533 for (CallGraphNode *Node : SCC) { 534 Function *F = Node->getFunction(); 535 if (!F || F->isDeclaration()) 536 continue; 537 538 OptimizationRemarkEmitter ORE(F); 539 for (BasicBlock &BB : *F) 540 for (Instruction &I : BB) { 541 CallSite CS(cast<Value>(&I)); 542 // If this isn't a call, or it is a call to an intrinsic, it can 543 // never be inlined. 544 if (!CS || isa<IntrinsicInst>(I)) 545 continue; 546 547 // If this is a direct call to an external function, we can never inline 548 // it. If it is an indirect call, inlining may resolve it to be a 549 // direct call, so we keep it. 550 if (Function *Callee = CS.getCalledFunction()) 551 if (Callee->isDeclaration()) { 552 using namespace ore; 553 554 ORE.emit([&]() { 555 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 556 << NV("Callee", Callee) << " will not be inlined into " 557 << NV("Caller", CS.getCaller()) 558 << " because its definition is unavailable" 559 << setIsVerbose(); 560 }); 561 continue; 562 } 563 564 CallSites.push_back(std::make_pair(CS, -1)); 565 } 566 } 567 568 LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n"); 569 570 // If there are no calls in this function, exit early. 571 if (CallSites.empty()) 572 return false; 573 574 // Now that we have all of the call sites, move the ones to functions in the 575 // current SCC to the end of the list. 576 unsigned FirstCallInSCC = CallSites.size(); 577 for (unsigned i = 0; i < FirstCallInSCC; ++i) 578 if (Function *F = CallSites[i].first.getCalledFunction()) 579 if (SCCFunctions.count(F)) 580 std::swap(CallSites[i--], CallSites[--FirstCallInSCC]); 581 582 InlinedArrayAllocasTy InlinedArrayAllocas; 583 InlineFunctionInfo InlineInfo(&CG, &GetAssumptionCache, PSI); 584 585 // Now that we have all of the call sites, loop over them and inline them if 586 // it looks profitable to do so. 587 bool Changed = false; 588 bool LocalChange; 589 do { 590 LocalChange = false; 591 // Iterate over the outer loop because inlining functions can cause indirect 592 // calls to become direct calls. 593 // CallSites may be modified inside so ranged for loop can not be used. 594 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) { 595 CallSite CS = CallSites[CSi].first; 596 597 Function *Caller = CS.getCaller(); 598 Function *Callee = CS.getCalledFunction(); 599 600 // We can only inline direct calls to non-declarations. 601 if (!Callee || Callee->isDeclaration()) 602 continue; 603 604 Instruction *Instr = CS.getInstruction(); 605 606 bool IsTriviallyDead = isInstructionTriviallyDead(Instr, &TLI); 607 608 int InlineHistoryID; 609 if (!IsTriviallyDead) { 610 // If this call site was obtained by inlining another function, verify 611 // that the include path for the function did not include the callee 612 // itself. If so, we'd be recursively inlining the same function, 613 // which would provide the same callsites, which would cause us to 614 // infinitely inline. 615 InlineHistoryID = CallSites[CSi].second; 616 if (InlineHistoryID != -1 && 617 InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) 618 continue; 619 } 620 621 // FIXME for new PM: because of the old PM we currently generate ORE and 622 // in turn BFI on demand. With the new PM, the ORE dependency should 623 // just become a regular analysis dependency. 624 OptimizationRemarkEmitter ORE(Caller); 625 626 Optional<InlineCost> OIC = shouldInline(CS, GetInlineCost, ORE); 627 // If the policy determines that we should inline this function, 628 // delete the call instead. 629 if (!OIC || !*OIC) { 630 continue; 631 } 632 633 // If this call site is dead and it is to a readonly function, we should 634 // just delete the call instead of trying to inline it, regardless of 635 // size. This happens because IPSCCP propagates the result out of the 636 // call and then we're left with the dead call. 637 if (IsTriviallyDead) { 638 LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << *Instr << "\n"); 639 // Update the call graph by deleting the edge from Callee to Caller. 640 CG[Caller]->removeCallEdgeFor(CS); 641 Instr->eraseFromParent(); 642 ++NumCallsDeleted; 643 } else { 644 // Get DebugLoc to report. CS will be invalid after Inliner. 645 DebugLoc DLoc = CS->getDebugLoc(); 646 BasicBlock *Block = CS.getParent(); 647 648 // Attempt to inline the function. 649 using namespace ore; 650 651 InlineResult IR = InlineCallIfPossible( 652 CS, InlineInfo, InlinedArrayAllocas, InlineHistoryID, 653 InsertLifetime, AARGetter, ImportedFunctionsStats); 654 if (!IR) { 655 ORE.emit([&]() { 656 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, 657 Block) 658 << NV("Callee", Callee) << " will not be inlined into " 659 << NV("Caller", Caller) << ": " << NV("Reason", IR.message); 660 }); 661 continue; 662 } 663 ++NumInlined; 664 665 emit_inlined_into(ORE, DLoc, Block, *Callee, *Caller, *OIC); 666 667 // If inlining this function gave us any new call sites, throw them 668 // onto our worklist to process. They are useful inline candidates. 669 if (!InlineInfo.InlinedCalls.empty()) { 670 // Create a new inline history entry for this, so that we remember 671 // that these new callsites came about due to inlining Callee. 672 int NewHistoryID = InlineHistory.size(); 673 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID)); 674 675 for (Value *Ptr : InlineInfo.InlinedCalls) 676 CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID)); 677 } 678 } 679 680 // If we inlined or deleted the last possible call site to the function, 681 // delete the function body now. 682 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() && 683 // TODO: Can remove if in SCC now. 684 !SCCFunctions.count(Callee) && 685 // The function may be apparently dead, but if there are indirect 686 // callgraph references to the node, we cannot delete it yet, this 687 // could invalidate the CGSCC iterator. 688 CG[Callee]->getNumReferences() == 0) { 689 LLVM_DEBUG(dbgs() << " -> Deleting dead function: " 690 << Callee->getName() << "\n"); 691 CallGraphNode *CalleeNode = CG[Callee]; 692 693 // Remove any call graph edges from the callee to its callees. 694 CalleeNode->removeAllCalledFunctions(); 695 696 // Removing the node for callee from the call graph and delete it. 697 delete CG.removeFunctionFromModule(CalleeNode); 698 ++NumDeleted; 699 } 700 701 // Remove this call site from the list. If possible, use 702 // swap/pop_back for efficiency, but do not use it if doing so would 703 // move a call site to a function in this SCC before the 704 // 'FirstCallInSCC' barrier. 705 if (SCC.isSingular()) { 706 CallSites[CSi] = CallSites.back(); 707 CallSites.pop_back(); 708 } else { 709 CallSites.erase(CallSites.begin() + CSi); 710 } 711 --CSi; 712 713 Changed = true; 714 LocalChange = true; 715 } 716 } while (LocalChange); 717 718 return Changed; 719 } 720 721 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) { 722 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 723 ACT = &getAnalysis<AssumptionCacheTracker>(); 724 PSI = getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 725 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 726 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 727 return ACT->getAssumptionCache(F); 728 }; 729 return inlineCallsImpl(SCC, CG, GetAssumptionCache, PSI, TLI, InsertLifetime, 730 [this](CallSite CS) { return getInlineCost(CS); }, 731 LegacyAARGetter(*this), ImportedFunctionsStats); 732 } 733 734 /// Remove now-dead linkonce functions at the end of 735 /// processing to avoid breaking the SCC traversal. 736 bool LegacyInlinerBase::doFinalization(CallGraph &CG) { 737 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 738 ImportedFunctionsStats.dump(InlinerFunctionImportStats == 739 InlinerFunctionImportStatsOpts::Verbose); 740 return removeDeadFunctions(CG); 741 } 742 743 /// Remove dead functions that are not included in DNR (Do Not Remove) list. 744 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG, 745 bool AlwaysInlineOnly) { 746 SmallVector<CallGraphNode *, 16> FunctionsToRemove; 747 SmallVector<Function *, 16> DeadFunctionsInComdats; 748 749 auto RemoveCGN = [&](CallGraphNode *CGN) { 750 // Remove any call graph edges from the function to its callees. 751 CGN->removeAllCalledFunctions(); 752 753 // Remove any edges from the external node to the function's call graph 754 // node. These edges might have been made irrelegant due to 755 // optimization of the program. 756 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN); 757 758 // Removing the node for callee from the call graph and delete it. 759 FunctionsToRemove.push_back(CGN); 760 }; 761 762 // Scan for all of the functions, looking for ones that should now be removed 763 // from the program. Insert the dead ones in the FunctionsToRemove set. 764 for (const auto &I : CG) { 765 CallGraphNode *CGN = I.second.get(); 766 Function *F = CGN->getFunction(); 767 if (!F || F->isDeclaration()) 768 continue; 769 770 // Handle the case when this function is called and we only want to care 771 // about always-inline functions. This is a bit of a hack to share code 772 // between here and the InlineAlways pass. 773 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline)) 774 continue; 775 776 // If the only remaining users of the function are dead constants, remove 777 // them. 778 F->removeDeadConstantUsers(); 779 780 if (!F->isDefTriviallyDead()) 781 continue; 782 783 // It is unsafe to drop a function with discardable linkage from a COMDAT 784 // without also dropping the other members of the COMDAT. 785 // The inliner doesn't visit non-function entities which are in COMDAT 786 // groups so it is unsafe to do so *unless* the linkage is local. 787 if (!F->hasLocalLinkage()) { 788 if (F->hasComdat()) { 789 DeadFunctionsInComdats.push_back(F); 790 continue; 791 } 792 } 793 794 RemoveCGN(CGN); 795 } 796 if (!DeadFunctionsInComdats.empty()) { 797 // Filter out the functions whose comdats remain alive. 798 filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats); 799 // Remove the rest. 800 for (Function *F : DeadFunctionsInComdats) 801 RemoveCGN(CG[F]); 802 } 803 804 if (FunctionsToRemove.empty()) 805 return false; 806 807 // Now that we know which functions to delete, do so. We didn't want to do 808 // this inline, because that would invalidate our CallGraph::iterator 809 // objects. :( 810 // 811 // Note that it doesn't matter that we are iterating over a non-stable order 812 // here to do this, it doesn't matter which order the functions are deleted 813 // in. 814 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end()); 815 FunctionsToRemove.erase( 816 std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()), 817 FunctionsToRemove.end()); 818 for (CallGraphNode *CGN : FunctionsToRemove) { 819 delete CG.removeFunctionFromModule(CGN); 820 ++NumDeleted; 821 } 822 return true; 823 } 824 825 InlinerPass::~InlinerPass() { 826 if (ImportedFunctionsStats) { 827 assert(InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No); 828 ImportedFunctionsStats->dump(InlinerFunctionImportStats == 829 InlinerFunctionImportStatsOpts::Verbose); 830 } 831 } 832 833 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, 834 CGSCCAnalysisManager &AM, LazyCallGraph &CG, 835 CGSCCUpdateResult &UR) { 836 const ModuleAnalysisManager &MAM = 837 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG).getManager(); 838 bool Changed = false; 839 840 assert(InitialC.size() > 0 && "Cannot handle an empty SCC!"); 841 Module &M = *InitialC.begin()->getFunction().getParent(); 842 ProfileSummaryInfo *PSI = MAM.getCachedResult<ProfileSummaryAnalysis>(M); 843 844 if (!ImportedFunctionsStats && 845 InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) { 846 ImportedFunctionsStats = 847 llvm::make_unique<ImportedFunctionsInliningStatistics>(); 848 ImportedFunctionsStats->setModuleInfo(M); 849 } 850 851 // We use a single common worklist for calls across the entire SCC. We 852 // process these in-order and append new calls introduced during inlining to 853 // the end. 854 // 855 // Note that this particular order of processing is actually critical to 856 // avoid very bad behaviors. Consider *highly connected* call graphs where 857 // each function contains a small amonut of code and a couple of calls to 858 // other functions. Because the LLVM inliner is fundamentally a bottom-up 859 // inliner, it can handle gracefully the fact that these all appear to be 860 // reasonable inlining candidates as it will flatten things until they become 861 // too big to inline, and then move on and flatten another batch. 862 // 863 // However, when processing call edges *within* an SCC we cannot rely on this 864 // bottom-up behavior. As a consequence, with heavily connected *SCCs* of 865 // functions we can end up incrementally inlining N calls into each of 866 // N functions because each incremental inlining decision looks good and we 867 // don't have a topological ordering to prevent explosions. 868 // 869 // To compensate for this, we don't process transitive edges made immediate 870 // by inlining until we've done one pass of inlining across the entire SCC. 871 // Large, highly connected SCCs still lead to some amount of code bloat in 872 // this model, but it is uniformly spread across all the functions in the SCC 873 // and eventually they all become too large to inline, rather than 874 // incrementally maknig a single function grow in a super linear fashion. 875 SmallVector<std::pair<CallSite, int>, 16> Calls; 876 877 FunctionAnalysisManager &FAM = 878 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG) 879 .getManager(); 880 881 // Populate the initial list of calls in this SCC. 882 for (auto &N : InitialC) { 883 auto &ORE = 884 FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction()); 885 // We want to generally process call sites top-down in order for 886 // simplifications stemming from replacing the call with the returned value 887 // after inlining to be visible to subsequent inlining decisions. 888 // FIXME: Using instructions sequence is a really bad way to do this. 889 // Instead we should do an actual RPO walk of the function body. 890 for (Instruction &I : instructions(N.getFunction())) 891 if (auto CS = CallSite(&I)) 892 if (Function *Callee = CS.getCalledFunction()) { 893 if (!Callee->isDeclaration()) 894 Calls.push_back({CS, -1}); 895 else if (!isa<IntrinsicInst>(I)) { 896 using namespace ore; 897 ORE.emit([&]() { 898 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 899 << NV("Callee", Callee) << " will not be inlined into " 900 << NV("Caller", CS.getCaller()) 901 << " because its definition is unavailable" 902 << setIsVerbose(); 903 }); 904 } 905 } 906 } 907 if (Calls.empty()) 908 return PreservedAnalyses::all(); 909 910 // Capture updatable variables for the current SCC and RefSCC. 911 auto *C = &InitialC; 912 auto *RC = &C->getOuterRefSCC(); 913 914 // When inlining a callee produces new call sites, we want to keep track of 915 // the fact that they were inlined from the callee. This allows us to avoid 916 // infinite inlining in some obscure cases. To represent this, we use an 917 // index into the InlineHistory vector. 918 SmallVector<std::pair<Function *, int>, 16> InlineHistory; 919 920 // Track a set vector of inlined callees so that we can augment the caller 921 // with all of their edges in the call graph before pruning out the ones that 922 // got simplified away. 923 SmallSetVector<Function *, 4> InlinedCallees; 924 925 // Track the dead functions to delete once finished with inlining calls. We 926 // defer deleting these to make it easier to handle the call graph updates. 927 SmallVector<Function *, 4> DeadFunctions; 928 929 // Loop forward over all of the calls. Note that we cannot cache the size as 930 // inlining can introduce new calls that need to be processed. 931 for (int i = 0; i < (int)Calls.size(); ++i) { 932 // We expect the calls to typically be batched with sequences of calls that 933 // have the same caller, so we first set up some shared infrastructure for 934 // this caller. We also do any pruning we can at this layer on the caller 935 // alone. 936 Function &F = *Calls[i].first.getCaller(); 937 LazyCallGraph::Node &N = *CG.lookup(F); 938 if (CG.lookupSCC(N) != C) 939 continue; 940 if (F.hasFnAttribute(Attribute::OptimizeNone)) 941 continue; 942 943 LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"); 944 945 // Get a FunctionAnalysisManager via a proxy for this particular node. We 946 // do this each time we visit a node as the SCC may have changed and as 947 // we're going to mutate this particular function we want to make sure the 948 // proxy is in place to forward any invalidation events. We can use the 949 // manager we get here for looking up results for functions other than this 950 // node however because those functions aren't going to be mutated by this 951 // pass. 952 FunctionAnalysisManager &FAM = 953 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, CG) 954 .getManager(); 955 956 // Get the remarks emission analysis for the caller. 957 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 958 959 std::function<AssumptionCache &(Function &)> GetAssumptionCache = 960 [&](Function &F) -> AssumptionCache & { 961 return FAM.getResult<AssumptionAnalysis>(F); 962 }; 963 auto GetBFI = [&](Function &F) -> BlockFrequencyInfo & { 964 return FAM.getResult<BlockFrequencyAnalysis>(F); 965 }; 966 967 auto GetInlineCost = [&](CallSite CS) { 968 Function &Callee = *CS.getCalledFunction(); 969 auto &CalleeTTI = FAM.getResult<TargetIRAnalysis>(Callee); 970 return getInlineCost(CS, Params, CalleeTTI, GetAssumptionCache, {GetBFI}, 971 PSI, &ORE); 972 }; 973 974 // Now process as many calls as we have within this caller in the sequnece. 975 // We bail out as soon as the caller has to change so we can update the 976 // call graph and prepare the context of that new caller. 977 bool DidInline = false; 978 for (; i < (int)Calls.size() && Calls[i].first.getCaller() == &F; ++i) { 979 int InlineHistoryID; 980 CallSite CS; 981 std::tie(CS, InlineHistoryID) = Calls[i]; 982 Function &Callee = *CS.getCalledFunction(); 983 984 if (InlineHistoryID != -1 && 985 InlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) 986 continue; 987 988 // Check if this inlining may repeat breaking an SCC apart that has 989 // already been split once before. In that case, inlining here may 990 // trigger infinite inlining, much like is prevented within the inliner 991 // itself by the InlineHistory above, but spread across CGSCC iterations 992 // and thus hidden from the full inline history. 993 if (CG.lookupSCC(*CG.lookup(Callee)) == C && 994 UR.InlinedInternalEdges.count({&N, C})) { 995 LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node " 996 "previously split out of this SCC by inlining: " 997 << F.getName() << " -> " << Callee.getName() << "\n"); 998 continue; 999 } 1000 1001 Optional<InlineCost> OIC = shouldInline(CS, GetInlineCost, ORE); 1002 // Check whether we want to inline this callsite. 1003 if (!OIC || !*OIC) 1004 continue; 1005 1006 // Setup the data structure used to plumb customization into the 1007 // `InlineFunction` routine. 1008 InlineFunctionInfo IFI( 1009 /*cg=*/nullptr, &GetAssumptionCache, PSI, 1010 &FAM.getResult<BlockFrequencyAnalysis>(*(CS.getCaller())), 1011 &FAM.getResult<BlockFrequencyAnalysis>(Callee)); 1012 1013 // Get DebugLoc to report. CS will be invalid after Inliner. 1014 DebugLoc DLoc = CS->getDebugLoc(); 1015 BasicBlock *Block = CS.getParent(); 1016 1017 using namespace ore; 1018 1019 InlineResult IR = InlineFunction(CS, IFI); 1020 if (!IR) { 1021 ORE.emit([&]() { 1022 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block) 1023 << NV("Callee", &Callee) << " will not be inlined into " 1024 << NV("Caller", &F) << ": " << NV("Reason", IR.message); 1025 }); 1026 continue; 1027 } 1028 DidInline = true; 1029 InlinedCallees.insert(&Callee); 1030 1031 emit_inlined_into(ORE, DLoc, Block, Callee, F, *OIC); 1032 1033 // Add any new callsites to defined functions to the worklist. 1034 if (!IFI.InlinedCallSites.empty()) { 1035 int NewHistoryID = InlineHistory.size(); 1036 InlineHistory.push_back({&Callee, InlineHistoryID}); 1037 for (CallSite &CS : reverse(IFI.InlinedCallSites)) 1038 if (Function *NewCallee = CS.getCalledFunction()) 1039 if (!NewCallee->isDeclaration()) 1040 Calls.push_back({CS, NewHistoryID}); 1041 } 1042 1043 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 1044 ImportedFunctionsStats->recordInline(F, Callee); 1045 1046 // Merge the attributes based on the inlining. 1047 AttributeFuncs::mergeAttributesForInlining(F, Callee); 1048 1049 // For local functions, check whether this makes the callee trivially 1050 // dead. In that case, we can drop the body of the function eagerly 1051 // which may reduce the number of callers of other functions to one, 1052 // changing inline cost thresholds. 1053 if (Callee.hasLocalLinkage()) { 1054 // To check this we also need to nuke any dead constant uses (perhaps 1055 // made dead by this operation on other functions). 1056 Callee.removeDeadConstantUsers(); 1057 if (Callee.use_empty() && !CG.isLibFunction(Callee)) { 1058 Calls.erase( 1059 std::remove_if(Calls.begin() + i + 1, Calls.end(), 1060 [&Callee](const std::pair<CallSite, int> &Call) { 1061 return Call.first.getCaller() == &Callee; 1062 }), 1063 Calls.end()); 1064 // Clear the body and queue the function itself for deletion when we 1065 // finish inlining and call graph updates. 1066 // Note that after this point, it is an error to do anything other 1067 // than use the callee's address or delete it. 1068 Callee.dropAllReferences(); 1069 assert(find(DeadFunctions, &Callee) == DeadFunctions.end() && 1070 "Cannot put cause a function to become dead twice!"); 1071 DeadFunctions.push_back(&Callee); 1072 } 1073 } 1074 } 1075 1076 // Back the call index up by one to put us in a good position to go around 1077 // the outer loop. 1078 --i; 1079 1080 if (!DidInline) 1081 continue; 1082 Changed = true; 1083 1084 // Add all the inlined callees' edges as ref edges to the caller. These are 1085 // by definition trivial edges as we always have *some* transitive ref edge 1086 // chain. While in some cases these edges are direct calls inside the 1087 // callee, they have to be modeled in the inliner as reference edges as 1088 // there may be a reference edge anywhere along the chain from the current 1089 // caller to the callee that causes the whole thing to appear like 1090 // a (transitive) reference edge that will require promotion to a call edge 1091 // below. 1092 for (Function *InlinedCallee : InlinedCallees) { 1093 LazyCallGraph::Node &CalleeN = *CG.lookup(*InlinedCallee); 1094 for (LazyCallGraph::Edge &E : *CalleeN) 1095 RC->insertTrivialRefEdge(N, E.getNode()); 1096 } 1097 1098 // At this point, since we have made changes we have at least removed 1099 // a call instruction. However, in the process we do some incremental 1100 // simplification of the surrounding code. This simplification can 1101 // essentially do all of the same things as a function pass and we can 1102 // re-use the exact same logic for updating the call graph to reflect the 1103 // change. 1104 LazyCallGraph::SCC *OldC = C; 1105 C = &updateCGAndAnalysisManagerForFunctionPass(CG, *C, N, AM, UR); 1106 LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n"); 1107 RC = &C->getOuterRefSCC(); 1108 1109 // If this causes an SCC to split apart into multiple smaller SCCs, there 1110 // is a subtle risk we need to prepare for. Other transformations may 1111 // expose an "infinite inlining" opportunity later, and because of the SCC 1112 // mutation, we will revisit this function and potentially re-inline. If we 1113 // do, and that re-inlining also has the potentially to mutate the SCC 1114 // structure, the infinite inlining problem can manifest through infinite 1115 // SCC splits and merges. To avoid this, we capture the originating caller 1116 // node and the SCC containing the call edge. This is a slight over 1117 // approximation of the possible inlining decisions that must be avoided, 1118 // but is relatively efficient to store. 1119 // FIXME: This seems like a very heavyweight way of retaining the inline 1120 // history, we should look for a more efficient way of tracking it. 1121 if (C != OldC && llvm::any_of(InlinedCallees, [&](Function *Callee) { 1122 return CG.lookupSCC(*CG.lookup(*Callee)) == OldC; 1123 })) { 1124 LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, " 1125 "retaining this to avoid infinite inlining.\n"); 1126 UR.InlinedInternalEdges.insert({&N, OldC}); 1127 } 1128 InlinedCallees.clear(); 1129 } 1130 1131 // Now that we've finished inlining all of the calls across this SCC, delete 1132 // all of the trivially dead functions, updating the call graph and the CGSCC 1133 // pass manager in the process. 1134 // 1135 // Note that this walks a pointer set which has non-deterministic order but 1136 // that is OK as all we do is delete things and add pointers to unordered 1137 // sets. 1138 for (Function *DeadF : DeadFunctions) { 1139 // Get the necessary information out of the call graph and nuke the 1140 // function there. Also, cclear out any cached analyses. 1141 auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF)); 1142 FunctionAnalysisManager &FAM = 1143 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(DeadC, CG) 1144 .getManager(); 1145 FAM.clear(*DeadF, DeadF->getName()); 1146 AM.clear(DeadC, DeadC.getName()); 1147 auto &DeadRC = DeadC.getOuterRefSCC(); 1148 CG.removeDeadFunction(*DeadF); 1149 1150 // Mark the relevant parts of the call graph as invalid so we don't visit 1151 // them. 1152 UR.InvalidatedSCCs.insert(&DeadC); 1153 UR.InvalidatedRefSCCs.insert(&DeadRC); 1154 1155 // And delete the actual function from the module. 1156 M.getFunctionList().erase(DeadF); 1157 } 1158 1159 if (!Changed) 1160 return PreservedAnalyses::all(); 1161 1162 // Even if we change the IR, we update the core CGSCC data structures and so 1163 // can preserve the proxy to the function analysis manager. 1164 PreservedAnalyses PA; 1165 PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); 1166 return PA; 1167 } 1168