1 //===- Inliner.cpp - Code common to all inliners --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the mechanics required to implement inlining without 10 // missing any calls and updating the call graph. The decisions of which calls 11 // are profitable to inline are implemented elsewhere. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/Inliner.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/Optional.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/ScopeExit.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumptionCache.h" 28 #include "llvm/Analysis/BasicAliasAnalysis.h" 29 #include "llvm/Analysis/BlockFrequencyInfo.h" 30 #include "llvm/Analysis/CGSCCPassManager.h" 31 #include "llvm/Analysis/CallGraph.h" 32 #include "llvm/Analysis/GlobalsModRef.h" 33 #include "llvm/Analysis/InlineAdvisor.h" 34 #include "llvm/Analysis/InlineCost.h" 35 #include "llvm/Analysis/LazyCallGraph.h" 36 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 37 #include "llvm/Analysis/ProfileSummaryInfo.h" 38 #include "llvm/Analysis/TargetLibraryInfo.h" 39 #include "llvm/Analysis/TargetTransformInfo.h" 40 #include "llvm/IR/Attributes.h" 41 #include "llvm/IR/BasicBlock.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugLoc.h" 44 #include "llvm/IR/DerivedTypes.h" 45 #include "llvm/IR/DiagnosticInfo.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/InstIterator.h" 48 #include "llvm/IR/Instruction.h" 49 #include "llvm/IR/Instructions.h" 50 #include "llvm/IR/IntrinsicInst.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Module.h" 53 #include "llvm/IR/PassManager.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 62 #include "llvm/Transforms/Utils/Cloning.h" 63 #include "llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h" 64 #include "llvm/Transforms/Utils/Local.h" 65 #include "llvm/Transforms/Utils/ModuleUtils.h" 66 #include <algorithm> 67 #include <cassert> 68 #include <functional> 69 #include <sstream> 70 #include <tuple> 71 #include <utility> 72 #include <vector> 73 74 using namespace llvm; 75 76 #define DEBUG_TYPE "inline" 77 78 STATISTIC(NumInlined, "Number of functions inlined"); 79 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined"); 80 STATISTIC(NumDeleted, "Number of functions deleted because all callers found"); 81 STATISTIC(NumMergedAllocas, "Number of allocas merged together"); 82 83 /// Flag to disable manual alloca merging. 84 /// 85 /// Merging of allocas was originally done as a stack-size saving technique 86 /// prior to LLVM's code generator having support for stack coloring based on 87 /// lifetime markers. It is now in the process of being removed. To experiment 88 /// with disabling it and relying fully on lifetime marker based stack 89 /// coloring, you can pass this flag to LLVM. 90 static cl::opt<bool> 91 DisableInlinedAllocaMerging("disable-inlined-alloca-merging", 92 cl::init(false), cl::Hidden); 93 94 namespace { 95 96 enum class InlinerFunctionImportStatsOpts { 97 No = 0, 98 Basic = 1, 99 Verbose = 2, 100 }; 101 102 } // end anonymous namespace 103 104 static cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats( 105 "inliner-function-import-stats", 106 cl::init(InlinerFunctionImportStatsOpts::No), 107 cl::values(clEnumValN(InlinerFunctionImportStatsOpts::Basic, "basic", 108 "basic statistics"), 109 clEnumValN(InlinerFunctionImportStatsOpts::Verbose, "verbose", 110 "printing of statistics for each inlined function")), 111 cl::Hidden, cl::desc("Enable inliner stats for imported functions")); 112 113 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {} 114 115 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime) 116 : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {} 117 118 /// For this class, we declare that we require and preserve the call graph. 119 /// If the derived class implements this method, it should 120 /// always explicitly call the implementation here. 121 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const { 122 AU.addRequired<AssumptionCacheTracker>(); 123 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 124 AU.addRequired<TargetLibraryInfoWrapperPass>(); 125 getAAResultsAnalysisUsage(AU); 126 CallGraphSCCPass::getAnalysisUsage(AU); 127 } 128 129 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>; 130 131 /// Look at all of the allocas that we inlined through this call site. If we 132 /// have already inlined other allocas through other calls into this function, 133 /// then we know that they have disjoint lifetimes and that we can merge them. 134 /// 135 /// There are many heuristics possible for merging these allocas, and the 136 /// different options have different tradeoffs. One thing that we *really* 137 /// don't want to hurt is SRoA: once inlining happens, often allocas are no 138 /// longer address taken and so they can be promoted. 139 /// 140 /// Our "solution" for that is to only merge allocas whose outermost type is an 141 /// array type. These are usually not promoted because someone is using a 142 /// variable index into them. These are also often the most important ones to 143 /// merge. 144 /// 145 /// A better solution would be to have real memory lifetime markers in the IR 146 /// and not have the inliner do any merging of allocas at all. This would 147 /// allow the backend to do proper stack slot coloring of all allocas that 148 /// *actually make it to the backend*, which is really what we want. 149 /// 150 /// Because we don't have this information, we do this simple and useful hack. 151 static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI, 152 InlinedArrayAllocasTy &InlinedArrayAllocas, 153 int InlineHistory) { 154 SmallPtrSet<AllocaInst *, 16> UsedAllocas; 155 156 // When processing our SCC, check to see if the call site was inlined from 157 // some other call site. For example, if we're processing "A" in this code: 158 // A() { B() } 159 // B() { x = alloca ... C() } 160 // C() { y = alloca ... } 161 // Assume that C was not inlined into B initially, and so we're processing A 162 // and decide to inline B into A. Doing this makes an alloca available for 163 // reuse and makes a callsite (C) available for inlining. When we process 164 // the C call site we don't want to do any alloca merging between X and Y 165 // because their scopes are not disjoint. We could make this smarter by 166 // keeping track of the inline history for each alloca in the 167 // InlinedArrayAllocas but this isn't likely to be a significant win. 168 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC. 169 return; 170 171 // Loop over all the allocas we have so far and see if they can be merged with 172 // a previously inlined alloca. If not, remember that we had it. 173 for (unsigned AllocaNo = 0, E = IFI.StaticAllocas.size(); AllocaNo != E; 174 ++AllocaNo) { 175 AllocaInst *AI = IFI.StaticAllocas[AllocaNo]; 176 177 // Don't bother trying to merge array allocations (they will usually be 178 // canonicalized to be an allocation *of* an array), or allocations whose 179 // type is not itself an array (because we're afraid of pessimizing SRoA). 180 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType()); 181 if (!ATy || AI->isArrayAllocation()) 182 continue; 183 184 // Get the list of all available allocas for this array type. 185 std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy]; 186 187 // Loop over the allocas in AllocasForType to see if we can reuse one. Note 188 // that we have to be careful not to reuse the same "available" alloca for 189 // multiple different allocas that we just inlined, we use the 'UsedAllocas' 190 // set to keep track of which "available" allocas are being used by this 191 // function. Also, AllocasForType can be empty of course! 192 bool MergedAwayAlloca = false; 193 for (AllocaInst *AvailableAlloca : AllocasForType) { 194 Align Align1 = AI->getAlign(); 195 Align Align2 = AvailableAlloca->getAlign(); 196 197 // The available alloca has to be in the right function, not in some other 198 // function in this SCC. 199 if (AvailableAlloca->getParent() != AI->getParent()) 200 continue; 201 202 // If the inlined function already uses this alloca then we can't reuse 203 // it. 204 if (!UsedAllocas.insert(AvailableAlloca).second) 205 continue; 206 207 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare 208 // success! 209 LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI 210 << "\n\t\tINTO: " << *AvailableAlloca << '\n'); 211 212 // Move affected dbg.declare calls immediately after the new alloca to 213 // avoid the situation when a dbg.declare precedes its alloca. 214 if (auto *L = LocalAsMetadata::getIfExists(AI)) 215 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) 216 for (User *U : MDV->users()) 217 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) 218 DDI->moveBefore(AvailableAlloca->getNextNode()); 219 220 AI->replaceAllUsesWith(AvailableAlloca); 221 222 if (Align1 > Align2) 223 AvailableAlloca->setAlignment(AI->getAlign()); 224 225 AI->eraseFromParent(); 226 MergedAwayAlloca = true; 227 ++NumMergedAllocas; 228 IFI.StaticAllocas[AllocaNo] = nullptr; 229 break; 230 } 231 232 // If we already nuked the alloca, we're done with it. 233 if (MergedAwayAlloca) 234 continue; 235 236 // If we were unable to merge away the alloca either because there are no 237 // allocas of the right type available or because we reused them all 238 // already, remember that this alloca came from an inlined function and mark 239 // it used so we don't reuse it for other allocas from this inline 240 // operation. 241 AllocasForType.push_back(AI); 242 UsedAllocas.insert(AI); 243 } 244 } 245 246 /// If it is possible to inline the specified call site, 247 /// do so and update the CallGraph for this operation. 248 /// 249 /// This function also does some basic book-keeping to update the IR. The 250 /// InlinedArrayAllocas map keeps track of any allocas that are already 251 /// available from other functions inlined into the caller. If we are able to 252 /// inline this call site we attempt to reuse already available allocas or add 253 /// any new allocas to the set if not possible. 254 static InlineResult inlineCallIfPossible( 255 CallBase &CB, InlineFunctionInfo &IFI, 256 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory, 257 bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter, 258 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 259 Function *Callee = CB.getCalledFunction(); 260 Function *Caller = CB.getCaller(); 261 262 AAResults &AAR = AARGetter(*Callee); 263 264 // Try to inline the function. Get the list of static allocas that were 265 // inlined. 266 InlineResult IR = InlineFunction(CB, IFI, &AAR, InsertLifetime); 267 if (!IR.isSuccess()) 268 return IR; 269 270 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 271 ImportedFunctionsStats.recordInline(*Caller, *Callee); 272 273 AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee); 274 275 if (!DisableInlinedAllocaMerging) 276 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory); 277 278 return IR; // success 279 } 280 281 /// Return true if the specified inline history ID 282 /// indicates an inline history that includes the specified function. 283 static bool inlineHistoryIncludes( 284 Function *F, int InlineHistoryID, 285 const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) { 286 while (InlineHistoryID != -1) { 287 assert(unsigned(InlineHistoryID) < InlineHistory.size() && 288 "Invalid inline history ID"); 289 if (InlineHistory[InlineHistoryID].first == F) 290 return true; 291 InlineHistoryID = InlineHistory[InlineHistoryID].second; 292 } 293 return false; 294 } 295 296 bool LegacyInlinerBase::doInitialization(CallGraph &CG) { 297 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 298 ImportedFunctionsStats.setModuleInfo(CG.getModule()); 299 return false; // No changes to CallGraph. 300 } 301 302 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) { 303 if (skipSCC(SCC)) 304 return false; 305 return inlineCalls(SCC); 306 } 307 308 static bool 309 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, 310 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 311 ProfileSummaryInfo *PSI, 312 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 313 bool InsertLifetime, 314 function_ref<InlineCost(CallBase &CB)> GetInlineCost, 315 function_ref<AAResults &(Function &)> AARGetter, 316 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 317 SmallPtrSet<Function *, 8> SCCFunctions; 318 LLVM_DEBUG(dbgs() << "Inliner visiting SCC:"); 319 for (CallGraphNode *Node : SCC) { 320 Function *F = Node->getFunction(); 321 if (F) 322 SCCFunctions.insert(F); 323 LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE")); 324 } 325 326 // Scan through and identify all call sites ahead of time so that we only 327 // inline call sites in the original functions, not call sites that result 328 // from inlining other functions. 329 SmallVector<std::pair<CallBase *, int>, 16> CallSites; 330 331 // When inlining a callee produces new call sites, we want to keep track of 332 // the fact that they were inlined from the callee. This allows us to avoid 333 // infinite inlining in some obscure cases. To represent this, we use an 334 // index into the InlineHistory vector. 335 SmallVector<std::pair<Function *, int>, 8> InlineHistory; 336 337 for (CallGraphNode *Node : SCC) { 338 Function *F = Node->getFunction(); 339 if (!F || F->isDeclaration()) 340 continue; 341 342 OptimizationRemarkEmitter ORE(F); 343 for (BasicBlock &BB : *F) 344 for (Instruction &I : BB) { 345 auto *CB = dyn_cast<CallBase>(&I); 346 // If this isn't a call, or it is a call to an intrinsic, it can 347 // never be inlined. 348 if (!CB || isa<IntrinsicInst>(I)) 349 continue; 350 351 // If this is a direct call to an external function, we can never inline 352 // it. If it is an indirect call, inlining may resolve it to be a 353 // direct call, so we keep it. 354 if (Function *Callee = CB->getCalledFunction()) 355 if (Callee->isDeclaration()) { 356 using namespace ore; 357 358 setInlineRemark(*CB, "unavailable definition"); 359 ORE.emit([&]() { 360 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 361 << NV("Callee", Callee) << " will not be inlined into " 362 << NV("Caller", CB->getCaller()) 363 << " because its definition is unavailable" 364 << setIsVerbose(); 365 }); 366 continue; 367 } 368 369 CallSites.push_back(std::make_pair(CB, -1)); 370 } 371 } 372 373 LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n"); 374 375 // If there are no calls in this function, exit early. 376 if (CallSites.empty()) 377 return false; 378 379 // Now that we have all of the call sites, move the ones to functions in the 380 // current SCC to the end of the list. 381 unsigned FirstCallInSCC = CallSites.size(); 382 for (unsigned I = 0; I < FirstCallInSCC; ++I) 383 if (Function *F = CallSites[I].first->getCalledFunction()) 384 if (SCCFunctions.count(F)) 385 std::swap(CallSites[I--], CallSites[--FirstCallInSCC]); 386 387 InlinedArrayAllocasTy InlinedArrayAllocas; 388 InlineFunctionInfo InlineInfo(&CG, GetAssumptionCache, PSI); 389 390 // Now that we have all of the call sites, loop over them and inline them if 391 // it looks profitable to do so. 392 bool Changed = false; 393 bool LocalChange; 394 do { 395 LocalChange = false; 396 // Iterate over the outer loop because inlining functions can cause indirect 397 // calls to become direct calls. 398 // CallSites may be modified inside so ranged for loop can not be used. 399 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) { 400 auto &P = CallSites[CSi]; 401 CallBase &CB = *P.first; 402 const int InlineHistoryID = P.second; 403 404 Function *Caller = CB.getCaller(); 405 Function *Callee = CB.getCalledFunction(); 406 407 // We can only inline direct calls to non-declarations. 408 if (!Callee || Callee->isDeclaration()) 409 continue; 410 411 bool IsTriviallyDead = isInstructionTriviallyDead(&CB, &GetTLI(*Caller)); 412 413 if (!IsTriviallyDead) { 414 // If this call site was obtained by inlining another function, verify 415 // that the include path for the function did not include the callee 416 // itself. If so, we'd be recursively inlining the same function, 417 // which would provide the same callsites, which would cause us to 418 // infinitely inline. 419 if (InlineHistoryID != -1 && 420 inlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) { 421 setInlineRemark(CB, "recursive"); 422 continue; 423 } 424 } 425 426 // FIXME for new PM: because of the old PM we currently generate ORE and 427 // in turn BFI on demand. With the new PM, the ORE dependency should 428 // just become a regular analysis dependency. 429 OptimizationRemarkEmitter ORE(Caller); 430 431 auto OIC = shouldInline(CB, GetInlineCost, ORE); 432 // If the policy determines that we should inline this function, 433 // delete the call instead. 434 if (!OIC) 435 continue; 436 437 // If this call site is dead and it is to a readonly function, we should 438 // just delete the call instead of trying to inline it, regardless of 439 // size. This happens because IPSCCP propagates the result out of the 440 // call and then we're left with the dead call. 441 if (IsTriviallyDead) { 442 LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << CB << "\n"); 443 // Update the call graph by deleting the edge from Callee to Caller. 444 setInlineRemark(CB, "trivially dead"); 445 CG[Caller]->removeCallEdgeFor(CB); 446 CB.eraseFromParent(); 447 ++NumCallsDeleted; 448 } else { 449 // Get DebugLoc to report. CB will be invalid after Inliner. 450 DebugLoc DLoc = CB.getDebugLoc(); 451 BasicBlock *Block = CB.getParent(); 452 453 // Attempt to inline the function. 454 using namespace ore; 455 456 InlineResult IR = inlineCallIfPossible( 457 CB, InlineInfo, InlinedArrayAllocas, InlineHistoryID, 458 InsertLifetime, AARGetter, ImportedFunctionsStats); 459 if (!IR.isSuccess()) { 460 setInlineRemark(CB, std::string(IR.getFailureReason()) + "; " + 461 inlineCostStr(*OIC)); 462 ORE.emit([&]() { 463 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, 464 Block) 465 << NV("Callee", Callee) << " will not be inlined into " 466 << NV("Caller", Caller) << ": " 467 << NV("Reason", IR.getFailureReason()); 468 }); 469 continue; 470 } 471 ++NumInlined; 472 473 emitInlinedInto(ORE, DLoc, Block, *Callee, *Caller, *OIC); 474 475 // If inlining this function gave us any new call sites, throw them 476 // onto our worklist to process. They are useful inline candidates. 477 if (!InlineInfo.InlinedCalls.empty()) { 478 // Create a new inline history entry for this, so that we remember 479 // that these new callsites came about due to inlining Callee. 480 int NewHistoryID = InlineHistory.size(); 481 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID)); 482 483 #ifndef NDEBUG 484 // Make sure no dupplicates in the inline candidates. This could 485 // happen when a callsite is simpilfied to reusing the return value 486 // of another callsite during function cloning, thus the other 487 // callsite will be reconsidered here. 488 DenseSet<CallBase *> DbgCallSites; 489 for (auto &II : CallSites) 490 DbgCallSites.insert(II.first); 491 #endif 492 493 for (Value *Ptr : InlineInfo.InlinedCalls) { 494 #ifndef NDEBUG 495 assert(DbgCallSites.count(dyn_cast<CallBase>(Ptr)) == 0); 496 #endif 497 CallSites.push_back( 498 std::make_pair(dyn_cast<CallBase>(Ptr), NewHistoryID)); 499 } 500 } 501 } 502 503 // If we inlined or deleted the last possible call site to the function, 504 // delete the function body now. 505 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() && 506 // TODO: Can remove if in SCC now. 507 !SCCFunctions.count(Callee) && 508 // The function may be apparently dead, but if there are indirect 509 // callgraph references to the node, we cannot delete it yet, this 510 // could invalidate the CGSCC iterator. 511 CG[Callee]->getNumReferences() == 0) { 512 LLVM_DEBUG(dbgs() << " -> Deleting dead function: " 513 << Callee->getName() << "\n"); 514 CallGraphNode *CalleeNode = CG[Callee]; 515 516 // Remove any call graph edges from the callee to its callees. 517 CalleeNode->removeAllCalledFunctions(); 518 519 // Removing the node for callee from the call graph and delete it. 520 delete CG.removeFunctionFromModule(CalleeNode); 521 ++NumDeleted; 522 } 523 524 // Remove this call site from the list. If possible, use 525 // swap/pop_back for efficiency, but do not use it if doing so would 526 // move a call site to a function in this SCC before the 527 // 'FirstCallInSCC' barrier. 528 if (SCC.isSingular()) { 529 CallSites[CSi] = CallSites.back(); 530 CallSites.pop_back(); 531 } else { 532 CallSites.erase(CallSites.begin() + CSi); 533 } 534 --CSi; 535 536 Changed = true; 537 LocalChange = true; 538 } 539 } while (LocalChange); 540 541 return Changed; 542 } 543 544 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) { 545 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 546 ACT = &getAnalysis<AssumptionCacheTracker>(); 547 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 548 GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 549 return getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 550 }; 551 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 552 return ACT->getAssumptionCache(F); 553 }; 554 return inlineCallsImpl( 555 SCC, CG, GetAssumptionCache, PSI, GetTLI, InsertLifetime, 556 [&](CallBase &CB) { return getInlineCost(CB); }, LegacyAARGetter(*this), 557 ImportedFunctionsStats); 558 } 559 560 /// Remove now-dead linkonce functions at the end of 561 /// processing to avoid breaking the SCC traversal. 562 bool LegacyInlinerBase::doFinalization(CallGraph &CG) { 563 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 564 ImportedFunctionsStats.dump(InlinerFunctionImportStats == 565 InlinerFunctionImportStatsOpts::Verbose); 566 return removeDeadFunctions(CG); 567 } 568 569 /// Remove dead functions that are not included in DNR (Do Not Remove) list. 570 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG, 571 bool AlwaysInlineOnly) { 572 SmallVector<CallGraphNode *, 16> FunctionsToRemove; 573 SmallVector<Function *, 16> DeadFunctionsInComdats; 574 575 auto RemoveCGN = [&](CallGraphNode *CGN) { 576 // Remove any call graph edges from the function to its callees. 577 CGN->removeAllCalledFunctions(); 578 579 // Remove any edges from the external node to the function's call graph 580 // node. These edges might have been made irrelegant due to 581 // optimization of the program. 582 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN); 583 584 // Removing the node for callee from the call graph and delete it. 585 FunctionsToRemove.push_back(CGN); 586 }; 587 588 // Scan for all of the functions, looking for ones that should now be removed 589 // from the program. Insert the dead ones in the FunctionsToRemove set. 590 for (const auto &I : CG) { 591 CallGraphNode *CGN = I.second.get(); 592 Function *F = CGN->getFunction(); 593 if (!F || F->isDeclaration()) 594 continue; 595 596 // Handle the case when this function is called and we only want to care 597 // about always-inline functions. This is a bit of a hack to share code 598 // between here and the InlineAlways pass. 599 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline)) 600 continue; 601 602 // If the only remaining users of the function are dead constants, remove 603 // them. 604 F->removeDeadConstantUsers(); 605 606 if (!F->isDefTriviallyDead()) 607 continue; 608 609 // It is unsafe to drop a function with discardable linkage from a COMDAT 610 // without also dropping the other members of the COMDAT. 611 // The inliner doesn't visit non-function entities which are in COMDAT 612 // groups so it is unsafe to do so *unless* the linkage is local. 613 if (!F->hasLocalLinkage()) { 614 if (F->hasComdat()) { 615 DeadFunctionsInComdats.push_back(F); 616 continue; 617 } 618 } 619 620 RemoveCGN(CGN); 621 } 622 if (!DeadFunctionsInComdats.empty()) { 623 // Filter out the functions whose comdats remain alive. 624 filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats); 625 // Remove the rest. 626 for (Function *F : DeadFunctionsInComdats) 627 RemoveCGN(CG[F]); 628 } 629 630 if (FunctionsToRemove.empty()) 631 return false; 632 633 // Now that we know which functions to delete, do so. We didn't want to do 634 // this inline, because that would invalidate our CallGraph::iterator 635 // objects. :( 636 // 637 // Note that it doesn't matter that we are iterating over a non-stable order 638 // here to do this, it doesn't matter which order the functions are deleted 639 // in. 640 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end()); 641 FunctionsToRemove.erase( 642 std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()), 643 FunctionsToRemove.end()); 644 for (CallGraphNode *CGN : FunctionsToRemove) { 645 delete CG.removeFunctionFromModule(CGN); 646 ++NumDeleted; 647 } 648 return true; 649 } 650 651 InlinerPass::~InlinerPass() { 652 if (ImportedFunctionsStats) { 653 assert(InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No); 654 ImportedFunctionsStats->dump(InlinerFunctionImportStats == 655 InlinerFunctionImportStatsOpts::Verbose); 656 } 657 } 658 659 InlineAdvisor & 660 InlinerPass::getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM, 661 FunctionAnalysisManager &FAM, Module &M) { 662 auto *IAA = MAM.getCachedResult<InlineAdvisorAnalysis>(M); 663 if (!IAA) { 664 // It should still be possible to run the inliner as a stand-alone SCC pass, 665 // for test scenarios. In that case, we default to the 666 // DefaultInlineAdvisor, which doesn't need to keep state between SCC pass 667 // runs. It also uses just the default InlineParams. 668 // In this case, we need to use the provided FAM, which is valid for the 669 // duration of the inliner pass, and thus the lifetime of the owned advisor. 670 // The one we would get from the MAM can be invalidated as a result of the 671 // inliner's activity. 672 OwnedDefaultAdvisor.emplace(FAM, getInlineParams()); 673 return *OwnedDefaultAdvisor; 674 } 675 assert(IAA->getAdvisor() && 676 "Expected a present InlineAdvisorAnalysis also have an " 677 "InlineAdvisor initialized"); 678 return *IAA->getAdvisor(); 679 } 680 681 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, 682 CGSCCAnalysisManager &AM, LazyCallGraph &CG, 683 CGSCCUpdateResult &UR) { 684 const auto &MAMProxy = 685 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG); 686 bool Changed = false; 687 688 assert(InitialC.size() > 0 && "Cannot handle an empty SCC!"); 689 Module &M = *InitialC.begin()->getFunction().getParent(); 690 ProfileSummaryInfo *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(M); 691 692 FunctionAnalysisManager &FAM = 693 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG) 694 .getManager(); 695 696 InlineAdvisor &Advisor = getAdvisor(MAMProxy, FAM, M); 697 Advisor.onPassEntry(); 698 699 auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(); }); 700 701 if (!ImportedFunctionsStats && 702 InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) { 703 ImportedFunctionsStats = 704 std::make_unique<ImportedFunctionsInliningStatistics>(); 705 ImportedFunctionsStats->setModuleInfo(M); 706 } 707 708 // We use a single common worklist for calls across the entire SCC. We 709 // process these in-order and append new calls introduced during inlining to 710 // the end. 711 // 712 // Note that this particular order of processing is actually critical to 713 // avoid very bad behaviors. Consider *highly connected* call graphs where 714 // each function contains a small amonut of code and a couple of calls to 715 // other functions. Because the LLVM inliner is fundamentally a bottom-up 716 // inliner, it can handle gracefully the fact that these all appear to be 717 // reasonable inlining candidates as it will flatten things until they become 718 // too big to inline, and then move on and flatten another batch. 719 // 720 // However, when processing call edges *within* an SCC we cannot rely on this 721 // bottom-up behavior. As a consequence, with heavily connected *SCCs* of 722 // functions we can end up incrementally inlining N calls into each of 723 // N functions because each incremental inlining decision looks good and we 724 // don't have a topological ordering to prevent explosions. 725 // 726 // To compensate for this, we don't process transitive edges made immediate 727 // by inlining until we've done one pass of inlining across the entire SCC. 728 // Large, highly connected SCCs still lead to some amount of code bloat in 729 // this model, but it is uniformly spread across all the functions in the SCC 730 // and eventually they all become too large to inline, rather than 731 // incrementally maknig a single function grow in a super linear fashion. 732 SmallVector<std::pair<CallBase *, int>, 16> Calls; 733 734 // Populate the initial list of calls in this SCC. 735 for (auto &N : InitialC) { 736 auto &ORE = 737 FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction()); 738 // We want to generally process call sites top-down in order for 739 // simplifications stemming from replacing the call with the returned value 740 // after inlining to be visible to subsequent inlining decisions. 741 // FIXME: Using instructions sequence is a really bad way to do this. 742 // Instead we should do an actual RPO walk of the function body. 743 for (Instruction &I : instructions(N.getFunction())) 744 if (auto *CB = dyn_cast<CallBase>(&I)) 745 if (Function *Callee = CB->getCalledFunction()) { 746 if (!Callee->isDeclaration()) 747 Calls.push_back({CB, -1}); 748 else if (!isa<IntrinsicInst>(I)) { 749 using namespace ore; 750 setInlineRemark(*CB, "unavailable definition"); 751 ORE.emit([&]() { 752 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 753 << NV("Callee", Callee) << " will not be inlined into " 754 << NV("Caller", CB->getCaller()) 755 << " because its definition is unavailable" 756 << setIsVerbose(); 757 }); 758 } 759 } 760 } 761 if (Calls.empty()) 762 return PreservedAnalyses::all(); 763 764 // Capture updatable variables for the current SCC and RefSCC. 765 auto *C = &InitialC; 766 auto *RC = &C->getOuterRefSCC(); 767 768 // When inlining a callee produces new call sites, we want to keep track of 769 // the fact that they were inlined from the callee. This allows us to avoid 770 // infinite inlining in some obscure cases. To represent this, we use an 771 // index into the InlineHistory vector. 772 SmallVector<std::pair<Function *, int>, 16> InlineHistory; 773 774 // Track a set vector of inlined callees so that we can augment the caller 775 // with all of their edges in the call graph before pruning out the ones that 776 // got simplified away. 777 SmallSetVector<Function *, 4> InlinedCallees; 778 779 // Track the dead functions to delete once finished with inlining calls. We 780 // defer deleting these to make it easier to handle the call graph updates. 781 SmallVector<Function *, 4> DeadFunctions; 782 783 // Loop forward over all of the calls. Note that we cannot cache the size as 784 // inlining can introduce new calls that need to be processed. 785 for (int I = 0; I < (int)Calls.size(); ++I) { 786 // We expect the calls to typically be batched with sequences of calls that 787 // have the same caller, so we first set up some shared infrastructure for 788 // this caller. We also do any pruning we can at this layer on the caller 789 // alone. 790 Function &F = *Calls[I].first->getCaller(); 791 LazyCallGraph::Node &N = *CG.lookup(F); 792 if (CG.lookupSCC(N) != C) 793 continue; 794 if (F.hasOptNone()) { 795 setInlineRemark(*Calls[I].first, "optnone attribute"); 796 continue; 797 } 798 799 LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"); 800 801 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 802 return FAM.getResult<AssumptionAnalysis>(F); 803 }; 804 805 // Now process as many calls as we have within this caller in the sequence. 806 // We bail out as soon as the caller has to change so we can update the 807 // call graph and prepare the context of that new caller. 808 bool DidInline = false; 809 for (; I < (int)Calls.size() && Calls[I].first->getCaller() == &F; ++I) { 810 auto &P = Calls[I]; 811 CallBase *CB = P.first; 812 const int InlineHistoryID = P.second; 813 Function &Callee = *CB->getCalledFunction(); 814 815 if (InlineHistoryID != -1 && 816 inlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) { 817 setInlineRemark(*CB, "recursive"); 818 continue; 819 } 820 821 // Check if this inlining may repeat breaking an SCC apart that has 822 // already been split once before. In that case, inlining here may 823 // trigger infinite inlining, much like is prevented within the inliner 824 // itself by the InlineHistory above, but spread across CGSCC iterations 825 // and thus hidden from the full inline history. 826 if (CG.lookupSCC(*CG.lookup(Callee)) == C && 827 UR.InlinedInternalEdges.count({&N, C})) { 828 LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node " 829 "previously split out of this SCC by inlining: " 830 << F.getName() << " -> " << Callee.getName() << "\n"); 831 setInlineRemark(*CB, "recursive SCC split"); 832 continue; 833 } 834 835 auto Advice = Advisor.getAdvice(*CB); 836 // Check whether we want to inline this callsite. 837 if (!Advice->isInliningRecommended()) { 838 Advice->recordUnattemptedInlining(); 839 continue; 840 } 841 842 // Setup the data structure used to plumb customization into the 843 // `InlineFunction` routine. 844 InlineFunctionInfo IFI( 845 /*cg=*/nullptr, GetAssumptionCache, PSI, 846 &FAM.getResult<BlockFrequencyAnalysis>(*(CB->getCaller())), 847 &FAM.getResult<BlockFrequencyAnalysis>(Callee)); 848 849 InlineResult IR = InlineFunction(*CB, IFI); 850 if (!IR.isSuccess()) { 851 Advice->recordUnsuccessfulInlining(IR); 852 continue; 853 } 854 855 DidInline = true; 856 InlinedCallees.insert(&Callee); 857 ++NumInlined; 858 859 // Add any new callsites to defined functions to the worklist. 860 if (!IFI.InlinedCallSites.empty()) { 861 int NewHistoryID = InlineHistory.size(); 862 InlineHistory.push_back({&Callee, InlineHistoryID}); 863 864 for (CallBase *ICB : reverse(IFI.InlinedCallSites)) { 865 Function *NewCallee = ICB->getCalledFunction(); 866 if (!NewCallee) { 867 // Try to promote an indirect (virtual) call without waiting for 868 // the post-inline cleanup and the next DevirtSCCRepeatedPass 869 // iteration because the next iteration may not happen and we may 870 // miss inlining it. 871 if (tryPromoteCall(*ICB)) 872 NewCallee = ICB->getCalledFunction(); 873 } 874 if (NewCallee) 875 if (!NewCallee->isDeclaration()) 876 Calls.push_back({ICB, NewHistoryID}); 877 } 878 } 879 880 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 881 ImportedFunctionsStats->recordInline(F, Callee); 882 883 // Merge the attributes based on the inlining. 884 AttributeFuncs::mergeAttributesForInlining(F, Callee); 885 886 // For local functions, check whether this makes the callee trivially 887 // dead. In that case, we can drop the body of the function eagerly 888 // which may reduce the number of callers of other functions to one, 889 // changing inline cost thresholds. 890 bool CalleeWasDeleted = false; 891 if (Callee.hasLocalLinkage()) { 892 // To check this we also need to nuke any dead constant uses (perhaps 893 // made dead by this operation on other functions). 894 Callee.removeDeadConstantUsers(); 895 if (Callee.use_empty() && !CG.isLibFunction(Callee)) { 896 Calls.erase( 897 std::remove_if(Calls.begin() + I + 1, Calls.end(), 898 [&](const std::pair<CallBase *, int> &Call) { 899 return Call.first->getCaller() == &Callee; 900 }), 901 Calls.end()); 902 // Clear the body and queue the function itself for deletion when we 903 // finish inlining and call graph updates. 904 // Note that after this point, it is an error to do anything other 905 // than use the callee's address or delete it. 906 Callee.dropAllReferences(); 907 assert(find(DeadFunctions, &Callee) == DeadFunctions.end() && 908 "Cannot put cause a function to become dead twice!"); 909 DeadFunctions.push_back(&Callee); 910 CalleeWasDeleted = true; 911 } 912 } 913 if (CalleeWasDeleted) 914 Advice->recordInliningWithCalleeDeleted(); 915 else 916 Advice->recordInlining(); 917 } 918 919 // Back the call index up by one to put us in a good position to go around 920 // the outer loop. 921 --I; 922 923 if (!DidInline) 924 continue; 925 Changed = true; 926 927 // Add all the inlined callees' edges as ref edges to the caller. These are 928 // by definition trivial edges as we always have *some* transitive ref edge 929 // chain. While in some cases these edges are direct calls inside the 930 // callee, they have to be modeled in the inliner as reference edges as 931 // there may be a reference edge anywhere along the chain from the current 932 // caller to the callee that causes the whole thing to appear like 933 // a (transitive) reference edge that will require promotion to a call edge 934 // below. 935 for (Function *InlinedCallee : InlinedCallees) { 936 LazyCallGraph::Node &CalleeN = *CG.lookup(*InlinedCallee); 937 for (LazyCallGraph::Edge &E : *CalleeN) 938 RC->insertTrivialRefEdge(N, E.getNode()); 939 } 940 941 // At this point, since we have made changes we have at least removed 942 // a call instruction. However, in the process we do some incremental 943 // simplification of the surrounding code. This simplification can 944 // essentially do all of the same things as a function pass and we can 945 // re-use the exact same logic for updating the call graph to reflect the 946 // change. 947 948 // Inside the update, we also update the FunctionAnalysisManager in the 949 // proxy for this particular SCC. We do this as the SCC may have changed and 950 // as we're going to mutate this particular function we want to make sure 951 // the proxy is in place to forward any invalidation events. 952 LazyCallGraph::SCC *OldC = C; 953 C = &updateCGAndAnalysisManagerForFunctionPass(CG, *C, N, AM, UR, FAM); 954 LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n"); 955 RC = &C->getOuterRefSCC(); 956 957 // If this causes an SCC to split apart into multiple smaller SCCs, there 958 // is a subtle risk we need to prepare for. Other transformations may 959 // expose an "infinite inlining" opportunity later, and because of the SCC 960 // mutation, we will revisit this function and potentially re-inline. If we 961 // do, and that re-inlining also has the potentially to mutate the SCC 962 // structure, the infinite inlining problem can manifest through infinite 963 // SCC splits and merges. To avoid this, we capture the originating caller 964 // node and the SCC containing the call edge. This is a slight over 965 // approximation of the possible inlining decisions that must be avoided, 966 // but is relatively efficient to store. We use C != OldC to know when 967 // a new SCC is generated and the original SCC may be generated via merge 968 // in later iterations. 969 // 970 // It is also possible that even if no new SCC is generated 971 // (i.e., C == OldC), the original SCC could be split and then merged 972 // into the same one as itself. and the original SCC will be added into 973 // UR.CWorklist again, we want to catch such cases too. 974 // 975 // FIXME: This seems like a very heavyweight way of retaining the inline 976 // history, we should look for a more efficient way of tracking it. 977 if ((C != OldC || UR.CWorklist.count(OldC)) && 978 llvm::any_of(InlinedCallees, [&](Function *Callee) { 979 return CG.lookupSCC(*CG.lookup(*Callee)) == OldC; 980 })) { 981 LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, " 982 "retaining this to avoid infinite inlining.\n"); 983 UR.InlinedInternalEdges.insert({&N, OldC}); 984 } 985 InlinedCallees.clear(); 986 } 987 988 // Now that we've finished inlining all of the calls across this SCC, delete 989 // all of the trivially dead functions, updating the call graph and the CGSCC 990 // pass manager in the process. 991 // 992 // Note that this walks a pointer set which has non-deterministic order but 993 // that is OK as all we do is delete things and add pointers to unordered 994 // sets. 995 for (Function *DeadF : DeadFunctions) { 996 // Get the necessary information out of the call graph and nuke the 997 // function there. Also, clear out any cached analyses. 998 auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF)); 999 FAM.clear(*DeadF, DeadF->getName()); 1000 AM.clear(DeadC, DeadC.getName()); 1001 auto &DeadRC = DeadC.getOuterRefSCC(); 1002 CG.removeDeadFunction(*DeadF); 1003 1004 // Mark the relevant parts of the call graph as invalid so we don't visit 1005 // them. 1006 UR.InvalidatedSCCs.insert(&DeadC); 1007 UR.InvalidatedRefSCCs.insert(&DeadRC); 1008 1009 // And delete the actual function from the module. 1010 // The Advisor may use Function pointers to efficiently index various 1011 // internal maps, e.g. for memoization. Function cleanup passes like 1012 // argument promotion create new functions. It is possible for a new 1013 // function to be allocated at the address of a deleted function. We could 1014 // index using names, but that's inefficient. Alternatively, we let the 1015 // Advisor free the functions when it sees fit. 1016 DeadF->getBasicBlockList().clear(); 1017 M.getFunctionList().remove(DeadF); 1018 1019 ++NumDeleted; 1020 } 1021 1022 if (!Changed) 1023 return PreservedAnalyses::all(); 1024 1025 // Even if we change the IR, we update the core CGSCC data structures and so 1026 // can preserve the proxy to the function analysis manager. 1027 PreservedAnalyses PA; 1028 PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); 1029 return PA; 1030 } 1031 1032 ModuleInlinerWrapperPass::ModuleInlinerWrapperPass(InlineParams Params, 1033 bool Debugging, 1034 InliningAdvisorMode Mode, 1035 unsigned MaxDevirtIterations) 1036 : Params(Params), Mode(Mode), MaxDevirtIterations(MaxDevirtIterations), 1037 PM(Debugging), MPM(Debugging) { 1038 // Run the inliner first. The theory is that we are walking bottom-up and so 1039 // the callees have already been fully optimized, and we want to inline them 1040 // into the callers so that our optimizations can reflect that. 1041 // For PreLinkThinLTO pass, we disable hot-caller heuristic for sample PGO 1042 // because it makes profile annotation in the backend inaccurate. 1043 PM.addPass(InlinerPass()); 1044 } 1045 1046 PreservedAnalyses ModuleInlinerWrapperPass::run(Module &M, 1047 ModuleAnalysisManager &MAM) { 1048 auto &IAA = MAM.getResult<InlineAdvisorAnalysis>(M); 1049 if (!IAA.tryCreate(Params, Mode)) { 1050 M.getContext().emitError( 1051 "Could not setup Inlining Advisor for the requested " 1052 "mode and/or options"); 1053 return PreservedAnalyses::all(); 1054 } 1055 1056 // We wrap the CGSCC pipeline in a devirtualization repeater. This will try 1057 // to detect when we devirtualize indirect calls and iterate the SCC passes 1058 // in that case to try and catch knock-on inlining or function attrs 1059 // opportunities. Then we add it to the module pipeline by walking the SCCs 1060 // in postorder (or bottom-up). 1061 // If MaxDevirtIterations is 0, we just don't use the devirtualization 1062 // wrapper. 1063 if (MaxDevirtIterations == 0) 1064 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(PM))); 1065 else 1066 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor( 1067 createDevirtSCCRepeatedPass(std::move(PM), MaxDevirtIterations))); 1068 auto Ret = MPM.run(M, MAM); 1069 1070 IAA.clear(); 1071 return Ret; 1072 } 1073