1 //===- Inliner.cpp - Code common to all inliners --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the mechanics required to implement inlining without 10 // missing any calls and updating the call graph. The decisions of which calls 11 // are profitable to inline are implemented elsewhere. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/Inliner.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/Optional.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/ScopeExit.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumptionCache.h" 28 #include "llvm/Analysis/BasicAliasAnalysis.h" 29 #include "llvm/Analysis/BlockFrequencyInfo.h" 30 #include "llvm/Analysis/CGSCCPassManager.h" 31 #include "llvm/Analysis/CallGraph.h" 32 #include "llvm/Analysis/GlobalsModRef.h" 33 #include "llvm/Analysis/InlineAdvisor.h" 34 #include "llvm/Analysis/InlineCost.h" 35 #include "llvm/Analysis/LazyCallGraph.h" 36 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 37 #include "llvm/Analysis/ProfileSummaryInfo.h" 38 #include "llvm/Analysis/TargetLibraryInfo.h" 39 #include "llvm/Analysis/TargetTransformInfo.h" 40 #include "llvm/IR/Attributes.h" 41 #include "llvm/IR/BasicBlock.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugLoc.h" 44 #include "llvm/IR/DerivedTypes.h" 45 #include "llvm/IR/DiagnosticInfo.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/InstIterator.h" 48 #include "llvm/IR/Instruction.h" 49 #include "llvm/IR/Instructions.h" 50 #include "llvm/IR/IntrinsicInst.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Module.h" 53 #include "llvm/IR/PassManager.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Transforms/IPO/AlwaysInliner.h" 62 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 63 #include "llvm/Transforms/Utils/Cloning.h" 64 #include "llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h" 65 #include "llvm/Transforms/Utils/Local.h" 66 #include "llvm/Transforms/Utils/ModuleUtils.h" 67 #include <algorithm> 68 #include <cassert> 69 #include <functional> 70 #include <sstream> 71 #include <tuple> 72 #include <utility> 73 #include <vector> 74 75 using namespace llvm; 76 77 #define DEBUG_TYPE "inline" 78 79 STATISTIC(NumInlined, "Number of functions inlined"); 80 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined"); 81 STATISTIC(NumDeleted, "Number of functions deleted because all callers found"); 82 STATISTIC(NumMergedAllocas, "Number of allocas merged together"); 83 84 /// Flag to disable manual alloca merging. 85 /// 86 /// Merging of allocas was originally done as a stack-size saving technique 87 /// prior to LLVM's code generator having support for stack coloring based on 88 /// lifetime markers. It is now in the process of being removed. To experiment 89 /// with disabling it and relying fully on lifetime marker based stack 90 /// coloring, you can pass this flag to LLVM. 91 static cl::opt<bool> 92 DisableInlinedAllocaMerging("disable-inlined-alloca-merging", 93 cl::init(false), cl::Hidden); 94 95 /// Flag to disable adding AlwaysInlinerPass to ModuleInlinerWrapperPass. 96 /// TODO: remove this once this has is baked in for long enough. 97 static cl::opt<bool> DisableAlwaysInlinerInModuleWrapper( 98 "disable-always-inliner-in-module-wrapper", cl::init(false), cl::Hidden); 99 100 namespace { 101 102 enum class InlinerFunctionImportStatsOpts { 103 No = 0, 104 Basic = 1, 105 Verbose = 2, 106 }; 107 108 } // end anonymous namespace 109 110 static cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats( 111 "inliner-function-import-stats", 112 cl::init(InlinerFunctionImportStatsOpts::No), 113 cl::values(clEnumValN(InlinerFunctionImportStatsOpts::Basic, "basic", 114 "basic statistics"), 115 clEnumValN(InlinerFunctionImportStatsOpts::Verbose, "verbose", 116 "printing of statistics for each inlined function")), 117 cl::Hidden, cl::desc("Enable inliner stats for imported functions")); 118 119 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {} 120 121 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime) 122 : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {} 123 124 /// For this class, we declare that we require and preserve the call graph. 125 /// If the derived class implements this method, it should 126 /// always explicitly call the implementation here. 127 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const { 128 AU.addRequired<AssumptionCacheTracker>(); 129 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 130 AU.addRequired<TargetLibraryInfoWrapperPass>(); 131 getAAResultsAnalysisUsage(AU); 132 CallGraphSCCPass::getAnalysisUsage(AU); 133 } 134 135 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>; 136 137 /// Look at all of the allocas that we inlined through this call site. If we 138 /// have already inlined other allocas through other calls into this function, 139 /// then we know that they have disjoint lifetimes and that we can merge them. 140 /// 141 /// There are many heuristics possible for merging these allocas, and the 142 /// different options have different tradeoffs. One thing that we *really* 143 /// don't want to hurt is SRoA: once inlining happens, often allocas are no 144 /// longer address taken and so they can be promoted. 145 /// 146 /// Our "solution" for that is to only merge allocas whose outermost type is an 147 /// array type. These are usually not promoted because someone is using a 148 /// variable index into them. These are also often the most important ones to 149 /// merge. 150 /// 151 /// A better solution would be to have real memory lifetime markers in the IR 152 /// and not have the inliner do any merging of allocas at all. This would 153 /// allow the backend to do proper stack slot coloring of all allocas that 154 /// *actually make it to the backend*, which is really what we want. 155 /// 156 /// Because we don't have this information, we do this simple and useful hack. 157 static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI, 158 InlinedArrayAllocasTy &InlinedArrayAllocas, 159 int InlineHistory) { 160 SmallPtrSet<AllocaInst *, 16> UsedAllocas; 161 162 // When processing our SCC, check to see if the call site was inlined from 163 // some other call site. For example, if we're processing "A" in this code: 164 // A() { B() } 165 // B() { x = alloca ... C() } 166 // C() { y = alloca ... } 167 // Assume that C was not inlined into B initially, and so we're processing A 168 // and decide to inline B into A. Doing this makes an alloca available for 169 // reuse and makes a callsite (C) available for inlining. When we process 170 // the C call site we don't want to do any alloca merging between X and Y 171 // because their scopes are not disjoint. We could make this smarter by 172 // keeping track of the inline history for each alloca in the 173 // InlinedArrayAllocas but this isn't likely to be a significant win. 174 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC. 175 return; 176 177 // Loop over all the allocas we have so far and see if they can be merged with 178 // a previously inlined alloca. If not, remember that we had it. 179 for (unsigned AllocaNo = 0, E = IFI.StaticAllocas.size(); AllocaNo != E; 180 ++AllocaNo) { 181 AllocaInst *AI = IFI.StaticAllocas[AllocaNo]; 182 183 // Don't bother trying to merge array allocations (they will usually be 184 // canonicalized to be an allocation *of* an array), or allocations whose 185 // type is not itself an array (because we're afraid of pessimizing SRoA). 186 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType()); 187 if (!ATy || AI->isArrayAllocation()) 188 continue; 189 190 // Get the list of all available allocas for this array type. 191 std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy]; 192 193 // Loop over the allocas in AllocasForType to see if we can reuse one. Note 194 // that we have to be careful not to reuse the same "available" alloca for 195 // multiple different allocas that we just inlined, we use the 'UsedAllocas' 196 // set to keep track of which "available" allocas are being used by this 197 // function. Also, AllocasForType can be empty of course! 198 bool MergedAwayAlloca = false; 199 for (AllocaInst *AvailableAlloca : AllocasForType) { 200 Align Align1 = AI->getAlign(); 201 Align Align2 = AvailableAlloca->getAlign(); 202 203 // The available alloca has to be in the right function, not in some other 204 // function in this SCC. 205 if (AvailableAlloca->getParent() != AI->getParent()) 206 continue; 207 208 // If the inlined function already uses this alloca then we can't reuse 209 // it. 210 if (!UsedAllocas.insert(AvailableAlloca).second) 211 continue; 212 213 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare 214 // success! 215 LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI 216 << "\n\t\tINTO: " << *AvailableAlloca << '\n'); 217 218 // Move affected dbg.declare calls immediately after the new alloca to 219 // avoid the situation when a dbg.declare precedes its alloca. 220 if (auto *L = LocalAsMetadata::getIfExists(AI)) 221 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) 222 for (User *U : MDV->users()) 223 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) 224 DDI->moveBefore(AvailableAlloca->getNextNode()); 225 226 AI->replaceAllUsesWith(AvailableAlloca); 227 228 if (Align1 > Align2) 229 AvailableAlloca->setAlignment(AI->getAlign()); 230 231 AI->eraseFromParent(); 232 MergedAwayAlloca = true; 233 ++NumMergedAllocas; 234 IFI.StaticAllocas[AllocaNo] = nullptr; 235 break; 236 } 237 238 // If we already nuked the alloca, we're done with it. 239 if (MergedAwayAlloca) 240 continue; 241 242 // If we were unable to merge away the alloca either because there are no 243 // allocas of the right type available or because we reused them all 244 // already, remember that this alloca came from an inlined function and mark 245 // it used so we don't reuse it for other allocas from this inline 246 // operation. 247 AllocasForType.push_back(AI); 248 UsedAllocas.insert(AI); 249 } 250 } 251 252 /// If it is possible to inline the specified call site, 253 /// do so and update the CallGraph for this operation. 254 /// 255 /// This function also does some basic book-keeping to update the IR. The 256 /// InlinedArrayAllocas map keeps track of any allocas that are already 257 /// available from other functions inlined into the caller. If we are able to 258 /// inline this call site we attempt to reuse already available allocas or add 259 /// any new allocas to the set if not possible. 260 static InlineResult inlineCallIfPossible( 261 CallBase &CB, InlineFunctionInfo &IFI, 262 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory, 263 bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter, 264 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 265 Function *Callee = CB.getCalledFunction(); 266 Function *Caller = CB.getCaller(); 267 268 AAResults &AAR = AARGetter(*Callee); 269 270 // Try to inline the function. Get the list of static allocas that were 271 // inlined. 272 InlineResult IR = InlineFunction(CB, IFI, &AAR, InsertLifetime); 273 if (!IR.isSuccess()) 274 return IR; 275 276 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 277 ImportedFunctionsStats.recordInline(*Caller, *Callee); 278 279 AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee); 280 281 if (!DisableInlinedAllocaMerging) 282 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory); 283 284 return IR; // success 285 } 286 287 /// Return true if the specified inline history ID 288 /// indicates an inline history that includes the specified function. 289 static bool inlineHistoryIncludes( 290 Function *F, int InlineHistoryID, 291 const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) { 292 while (InlineHistoryID != -1) { 293 assert(unsigned(InlineHistoryID) < InlineHistory.size() && 294 "Invalid inline history ID"); 295 if (InlineHistory[InlineHistoryID].first == F) 296 return true; 297 InlineHistoryID = InlineHistory[InlineHistoryID].second; 298 } 299 return false; 300 } 301 302 bool LegacyInlinerBase::doInitialization(CallGraph &CG) { 303 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 304 ImportedFunctionsStats.setModuleInfo(CG.getModule()); 305 return false; // No changes to CallGraph. 306 } 307 308 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) { 309 if (skipSCC(SCC)) 310 return false; 311 return inlineCalls(SCC); 312 } 313 314 static bool 315 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, 316 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 317 ProfileSummaryInfo *PSI, 318 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 319 bool InsertLifetime, 320 function_ref<InlineCost(CallBase &CB)> GetInlineCost, 321 function_ref<AAResults &(Function &)> AARGetter, 322 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 323 SmallPtrSet<Function *, 8> SCCFunctions; 324 LLVM_DEBUG(dbgs() << "Inliner visiting SCC:"); 325 for (CallGraphNode *Node : SCC) { 326 Function *F = Node->getFunction(); 327 if (F) 328 SCCFunctions.insert(F); 329 LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE")); 330 } 331 332 // Scan through and identify all call sites ahead of time so that we only 333 // inline call sites in the original functions, not call sites that result 334 // from inlining other functions. 335 SmallVector<std::pair<CallBase *, int>, 16> CallSites; 336 337 // When inlining a callee produces new call sites, we want to keep track of 338 // the fact that they were inlined from the callee. This allows us to avoid 339 // infinite inlining in some obscure cases. To represent this, we use an 340 // index into the InlineHistory vector. 341 SmallVector<std::pair<Function *, int>, 8> InlineHistory; 342 343 for (CallGraphNode *Node : SCC) { 344 Function *F = Node->getFunction(); 345 if (!F || F->isDeclaration()) 346 continue; 347 348 OptimizationRemarkEmitter ORE(F); 349 for (BasicBlock &BB : *F) 350 for (Instruction &I : BB) { 351 auto *CB = dyn_cast<CallBase>(&I); 352 // If this isn't a call, or it is a call to an intrinsic, it can 353 // never be inlined. 354 if (!CB || isa<IntrinsicInst>(I)) 355 continue; 356 357 // If this is a direct call to an external function, we can never inline 358 // it. If it is an indirect call, inlining may resolve it to be a 359 // direct call, so we keep it. 360 if (Function *Callee = CB->getCalledFunction()) 361 if (Callee->isDeclaration()) { 362 using namespace ore; 363 364 setInlineRemark(*CB, "unavailable definition"); 365 ORE.emit([&]() { 366 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 367 << NV("Callee", Callee) << " will not be inlined into " 368 << NV("Caller", CB->getCaller()) 369 << " because its definition is unavailable" 370 << setIsVerbose(); 371 }); 372 continue; 373 } 374 375 CallSites.push_back(std::make_pair(CB, -1)); 376 } 377 } 378 379 LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n"); 380 381 // If there are no calls in this function, exit early. 382 if (CallSites.empty()) 383 return false; 384 385 // Now that we have all of the call sites, move the ones to functions in the 386 // current SCC to the end of the list. 387 unsigned FirstCallInSCC = CallSites.size(); 388 for (unsigned I = 0; I < FirstCallInSCC; ++I) 389 if (Function *F = CallSites[I].first->getCalledFunction()) 390 if (SCCFunctions.count(F)) 391 std::swap(CallSites[I--], CallSites[--FirstCallInSCC]); 392 393 InlinedArrayAllocasTy InlinedArrayAllocas; 394 InlineFunctionInfo InlineInfo(&CG, GetAssumptionCache, PSI); 395 396 // Now that we have all of the call sites, loop over them and inline them if 397 // it looks profitable to do so. 398 bool Changed = false; 399 bool LocalChange; 400 do { 401 LocalChange = false; 402 // Iterate over the outer loop because inlining functions can cause indirect 403 // calls to become direct calls. 404 // CallSites may be modified inside so ranged for loop can not be used. 405 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) { 406 auto &P = CallSites[CSi]; 407 CallBase &CB = *P.first; 408 const int InlineHistoryID = P.second; 409 410 Function *Caller = CB.getCaller(); 411 Function *Callee = CB.getCalledFunction(); 412 413 // We can only inline direct calls to non-declarations. 414 if (!Callee || Callee->isDeclaration()) 415 continue; 416 417 bool IsTriviallyDead = isInstructionTriviallyDead(&CB, &GetTLI(*Caller)); 418 419 if (!IsTriviallyDead) { 420 // If this call site was obtained by inlining another function, verify 421 // that the include path for the function did not include the callee 422 // itself. If so, we'd be recursively inlining the same function, 423 // which would provide the same callsites, which would cause us to 424 // infinitely inline. 425 if (InlineHistoryID != -1 && 426 inlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) { 427 setInlineRemark(CB, "recursive"); 428 continue; 429 } 430 } 431 432 // FIXME for new PM: because of the old PM we currently generate ORE and 433 // in turn BFI on demand. With the new PM, the ORE dependency should 434 // just become a regular analysis dependency. 435 OptimizationRemarkEmitter ORE(Caller); 436 437 auto OIC = shouldInline(CB, GetInlineCost, ORE); 438 // If the policy determines that we should inline this function, 439 // delete the call instead. 440 if (!OIC) 441 continue; 442 443 // If this call site is dead and it is to a readonly function, we should 444 // just delete the call instead of trying to inline it, regardless of 445 // size. This happens because IPSCCP propagates the result out of the 446 // call and then we're left with the dead call. 447 if (IsTriviallyDead) { 448 LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << CB << "\n"); 449 // Update the call graph by deleting the edge from Callee to Caller. 450 setInlineRemark(CB, "trivially dead"); 451 CG[Caller]->removeCallEdgeFor(CB); 452 CB.eraseFromParent(); 453 ++NumCallsDeleted; 454 } else { 455 // Get DebugLoc to report. CB will be invalid after Inliner. 456 DebugLoc DLoc = CB.getDebugLoc(); 457 BasicBlock *Block = CB.getParent(); 458 459 // Attempt to inline the function. 460 using namespace ore; 461 462 InlineResult IR = inlineCallIfPossible( 463 CB, InlineInfo, InlinedArrayAllocas, InlineHistoryID, 464 InsertLifetime, AARGetter, ImportedFunctionsStats); 465 if (!IR.isSuccess()) { 466 setInlineRemark(CB, std::string(IR.getFailureReason()) + "; " + 467 inlineCostStr(*OIC)); 468 ORE.emit([&]() { 469 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, 470 Block) 471 << NV("Callee", Callee) << " will not be inlined into " 472 << NV("Caller", Caller) << ": " 473 << NV("Reason", IR.getFailureReason()); 474 }); 475 continue; 476 } 477 ++NumInlined; 478 479 emitInlinedInto(ORE, DLoc, Block, *Callee, *Caller, *OIC); 480 481 // If inlining this function gave us any new call sites, throw them 482 // onto our worklist to process. They are useful inline candidates. 483 if (!InlineInfo.InlinedCalls.empty()) { 484 // Create a new inline history entry for this, so that we remember 485 // that these new callsites came about due to inlining Callee. 486 int NewHistoryID = InlineHistory.size(); 487 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID)); 488 489 #ifndef NDEBUG 490 // Make sure no dupplicates in the inline candidates. This could 491 // happen when a callsite is simpilfied to reusing the return value 492 // of another callsite during function cloning, thus the other 493 // callsite will be reconsidered here. 494 DenseSet<CallBase *> DbgCallSites; 495 for (auto &II : CallSites) 496 DbgCallSites.insert(II.first); 497 #endif 498 499 for (Value *Ptr : InlineInfo.InlinedCalls) { 500 #ifndef NDEBUG 501 assert(DbgCallSites.count(dyn_cast<CallBase>(Ptr)) == 0); 502 #endif 503 CallSites.push_back( 504 std::make_pair(dyn_cast<CallBase>(Ptr), NewHistoryID)); 505 } 506 } 507 } 508 509 // If we inlined or deleted the last possible call site to the function, 510 // delete the function body now. 511 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() && 512 // TODO: Can remove if in SCC now. 513 !SCCFunctions.count(Callee) && 514 // The function may be apparently dead, but if there are indirect 515 // callgraph references to the node, we cannot delete it yet, this 516 // could invalidate the CGSCC iterator. 517 CG[Callee]->getNumReferences() == 0) { 518 LLVM_DEBUG(dbgs() << " -> Deleting dead function: " 519 << Callee->getName() << "\n"); 520 CallGraphNode *CalleeNode = CG[Callee]; 521 522 // Remove any call graph edges from the callee to its callees. 523 CalleeNode->removeAllCalledFunctions(); 524 525 // Removing the node for callee from the call graph and delete it. 526 delete CG.removeFunctionFromModule(CalleeNode); 527 ++NumDeleted; 528 } 529 530 // Remove this call site from the list. If possible, use 531 // swap/pop_back for efficiency, but do not use it if doing so would 532 // move a call site to a function in this SCC before the 533 // 'FirstCallInSCC' barrier. 534 if (SCC.isSingular()) { 535 CallSites[CSi] = CallSites.back(); 536 CallSites.pop_back(); 537 } else { 538 CallSites.erase(CallSites.begin() + CSi); 539 } 540 --CSi; 541 542 Changed = true; 543 LocalChange = true; 544 } 545 } while (LocalChange); 546 547 return Changed; 548 } 549 550 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) { 551 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 552 ACT = &getAnalysis<AssumptionCacheTracker>(); 553 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 554 GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 555 return getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 556 }; 557 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 558 return ACT->getAssumptionCache(F); 559 }; 560 return inlineCallsImpl( 561 SCC, CG, GetAssumptionCache, PSI, GetTLI, InsertLifetime, 562 [&](CallBase &CB) { return getInlineCost(CB); }, LegacyAARGetter(*this), 563 ImportedFunctionsStats); 564 } 565 566 /// Remove now-dead linkonce functions at the end of 567 /// processing to avoid breaking the SCC traversal. 568 bool LegacyInlinerBase::doFinalization(CallGraph &CG) { 569 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 570 ImportedFunctionsStats.dump(InlinerFunctionImportStats == 571 InlinerFunctionImportStatsOpts::Verbose); 572 return removeDeadFunctions(CG); 573 } 574 575 /// Remove dead functions that are not included in DNR (Do Not Remove) list. 576 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG, 577 bool AlwaysInlineOnly) { 578 SmallVector<CallGraphNode *, 16> FunctionsToRemove; 579 SmallVector<Function *, 16> DeadFunctionsInComdats; 580 581 auto RemoveCGN = [&](CallGraphNode *CGN) { 582 // Remove any call graph edges from the function to its callees. 583 CGN->removeAllCalledFunctions(); 584 585 // Remove any edges from the external node to the function's call graph 586 // node. These edges might have been made irrelegant due to 587 // optimization of the program. 588 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN); 589 590 // Removing the node for callee from the call graph and delete it. 591 FunctionsToRemove.push_back(CGN); 592 }; 593 594 // Scan for all of the functions, looking for ones that should now be removed 595 // from the program. Insert the dead ones in the FunctionsToRemove set. 596 for (const auto &I : CG) { 597 CallGraphNode *CGN = I.second.get(); 598 Function *F = CGN->getFunction(); 599 if (!F || F->isDeclaration()) 600 continue; 601 602 // Handle the case when this function is called and we only want to care 603 // about always-inline functions. This is a bit of a hack to share code 604 // between here and the InlineAlways pass. 605 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline)) 606 continue; 607 608 // If the only remaining users of the function are dead constants, remove 609 // them. 610 F->removeDeadConstantUsers(); 611 612 if (!F->isDefTriviallyDead()) 613 continue; 614 615 // It is unsafe to drop a function with discardable linkage from a COMDAT 616 // without also dropping the other members of the COMDAT. 617 // The inliner doesn't visit non-function entities which are in COMDAT 618 // groups so it is unsafe to do so *unless* the linkage is local. 619 if (!F->hasLocalLinkage()) { 620 if (F->hasComdat()) { 621 DeadFunctionsInComdats.push_back(F); 622 continue; 623 } 624 } 625 626 RemoveCGN(CGN); 627 } 628 if (!DeadFunctionsInComdats.empty()) { 629 // Filter out the functions whose comdats remain alive. 630 filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats); 631 // Remove the rest. 632 for (Function *F : DeadFunctionsInComdats) 633 RemoveCGN(CG[F]); 634 } 635 636 if (FunctionsToRemove.empty()) 637 return false; 638 639 // Now that we know which functions to delete, do so. We didn't want to do 640 // this inline, because that would invalidate our CallGraph::iterator 641 // objects. :( 642 // 643 // Note that it doesn't matter that we are iterating over a non-stable order 644 // here to do this, it doesn't matter which order the functions are deleted 645 // in. 646 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end()); 647 FunctionsToRemove.erase( 648 std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()), 649 FunctionsToRemove.end()); 650 for (CallGraphNode *CGN : FunctionsToRemove) { 651 delete CG.removeFunctionFromModule(CGN); 652 ++NumDeleted; 653 } 654 return true; 655 } 656 657 InlinerPass::~InlinerPass() { 658 if (ImportedFunctionsStats) { 659 assert(InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No); 660 ImportedFunctionsStats->dump(InlinerFunctionImportStats == 661 InlinerFunctionImportStatsOpts::Verbose); 662 } 663 } 664 665 InlineAdvisor & 666 InlinerPass::getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM, 667 FunctionAnalysisManager &FAM, Module &M) { 668 auto *IAA = MAM.getCachedResult<InlineAdvisorAnalysis>(M); 669 if (!IAA) { 670 // It should still be possible to run the inliner as a stand-alone SCC pass, 671 // for test scenarios. In that case, we default to the 672 // DefaultInlineAdvisor, which doesn't need to keep state between SCC pass 673 // runs. It also uses just the default InlineParams. 674 // In this case, we need to use the provided FAM, which is valid for the 675 // duration of the inliner pass, and thus the lifetime of the owned advisor. 676 // The one we would get from the MAM can be invalidated as a result of the 677 // inliner's activity. 678 OwnedDefaultAdvisor.emplace(FAM, getInlineParams()); 679 return *OwnedDefaultAdvisor; 680 } 681 assert(IAA->getAdvisor() && 682 "Expected a present InlineAdvisorAnalysis also have an " 683 "InlineAdvisor initialized"); 684 return *IAA->getAdvisor(); 685 } 686 687 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, 688 CGSCCAnalysisManager &AM, LazyCallGraph &CG, 689 CGSCCUpdateResult &UR) { 690 const auto &MAMProxy = 691 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG); 692 bool Changed = false; 693 694 assert(InitialC.size() > 0 && "Cannot handle an empty SCC!"); 695 Module &M = *InitialC.begin()->getFunction().getParent(); 696 ProfileSummaryInfo *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(M); 697 698 FunctionAnalysisManager &FAM = 699 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG) 700 .getManager(); 701 702 InlineAdvisor &Advisor = getAdvisor(MAMProxy, FAM, M); 703 Advisor.onPassEntry(); 704 705 auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(); }); 706 707 if (!ImportedFunctionsStats && 708 InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) { 709 ImportedFunctionsStats = 710 std::make_unique<ImportedFunctionsInliningStatistics>(); 711 ImportedFunctionsStats->setModuleInfo(M); 712 } 713 714 // We use a single common worklist for calls across the entire SCC. We 715 // process these in-order and append new calls introduced during inlining to 716 // the end. 717 // 718 // Note that this particular order of processing is actually critical to 719 // avoid very bad behaviors. Consider *highly connected* call graphs where 720 // each function contains a small amonut of code and a couple of calls to 721 // other functions. Because the LLVM inliner is fundamentally a bottom-up 722 // inliner, it can handle gracefully the fact that these all appear to be 723 // reasonable inlining candidates as it will flatten things until they become 724 // too big to inline, and then move on and flatten another batch. 725 // 726 // However, when processing call edges *within* an SCC we cannot rely on this 727 // bottom-up behavior. As a consequence, with heavily connected *SCCs* of 728 // functions we can end up incrementally inlining N calls into each of 729 // N functions because each incremental inlining decision looks good and we 730 // don't have a topological ordering to prevent explosions. 731 // 732 // To compensate for this, we don't process transitive edges made immediate 733 // by inlining until we've done one pass of inlining across the entire SCC. 734 // Large, highly connected SCCs still lead to some amount of code bloat in 735 // this model, but it is uniformly spread across all the functions in the SCC 736 // and eventually they all become too large to inline, rather than 737 // incrementally maknig a single function grow in a super linear fashion. 738 SmallVector<std::pair<CallBase *, int>, 16> Calls; 739 740 // Populate the initial list of calls in this SCC. 741 for (auto &N : InitialC) { 742 auto &ORE = 743 FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction()); 744 // We want to generally process call sites top-down in order for 745 // simplifications stemming from replacing the call with the returned value 746 // after inlining to be visible to subsequent inlining decisions. 747 // FIXME: Using instructions sequence is a really bad way to do this. 748 // Instead we should do an actual RPO walk of the function body. 749 for (Instruction &I : instructions(N.getFunction())) 750 if (auto *CB = dyn_cast<CallBase>(&I)) 751 if (Function *Callee = CB->getCalledFunction()) { 752 if (!Callee->isDeclaration()) 753 Calls.push_back({CB, -1}); 754 else if (!isa<IntrinsicInst>(I)) { 755 using namespace ore; 756 setInlineRemark(*CB, "unavailable definition"); 757 ORE.emit([&]() { 758 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 759 << NV("Callee", Callee) << " will not be inlined into " 760 << NV("Caller", CB->getCaller()) 761 << " because its definition is unavailable" 762 << setIsVerbose(); 763 }); 764 } 765 } 766 } 767 if (Calls.empty()) 768 return PreservedAnalyses::all(); 769 770 // Capture updatable variables for the current SCC and RefSCC. 771 auto *C = &InitialC; 772 auto *RC = &C->getOuterRefSCC(); 773 774 // When inlining a callee produces new call sites, we want to keep track of 775 // the fact that they were inlined from the callee. This allows us to avoid 776 // infinite inlining in some obscure cases. To represent this, we use an 777 // index into the InlineHistory vector. 778 SmallVector<std::pair<Function *, int>, 16> InlineHistory; 779 780 // Track a set vector of inlined callees so that we can augment the caller 781 // with all of their edges in the call graph before pruning out the ones that 782 // got simplified away. 783 SmallSetVector<Function *, 4> InlinedCallees; 784 785 // Track the dead functions to delete once finished with inlining calls. We 786 // defer deleting these to make it easier to handle the call graph updates. 787 SmallVector<Function *, 4> DeadFunctions; 788 789 // Loop forward over all of the calls. Note that we cannot cache the size as 790 // inlining can introduce new calls that need to be processed. 791 for (int I = 0; I < (int)Calls.size(); ++I) { 792 // We expect the calls to typically be batched with sequences of calls that 793 // have the same caller, so we first set up some shared infrastructure for 794 // this caller. We also do any pruning we can at this layer on the caller 795 // alone. 796 Function &F = *Calls[I].first->getCaller(); 797 LazyCallGraph::Node &N = *CG.lookup(F); 798 if (CG.lookupSCC(N) != C) 799 continue; 800 if (!Calls[I].first->getCalledFunction()->hasFnAttribute( 801 Attribute::AlwaysInline) && 802 F.hasOptNone()) { 803 setInlineRemark(*Calls[I].first, "optnone attribute"); 804 continue; 805 } 806 807 LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"); 808 809 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 810 return FAM.getResult<AssumptionAnalysis>(F); 811 }; 812 813 // Now process as many calls as we have within this caller in the sequence. 814 // We bail out as soon as the caller has to change so we can update the 815 // call graph and prepare the context of that new caller. 816 bool DidInline = false; 817 for (; I < (int)Calls.size() && Calls[I].first->getCaller() == &F; ++I) { 818 auto &P = Calls[I]; 819 CallBase *CB = P.first; 820 const int InlineHistoryID = P.second; 821 Function &Callee = *CB->getCalledFunction(); 822 823 if (InlineHistoryID != -1 && 824 inlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) { 825 setInlineRemark(*CB, "recursive"); 826 continue; 827 } 828 829 // Check if this inlining may repeat breaking an SCC apart that has 830 // already been split once before. In that case, inlining here may 831 // trigger infinite inlining, much like is prevented within the inliner 832 // itself by the InlineHistory above, but spread across CGSCC iterations 833 // and thus hidden from the full inline history. 834 if (CG.lookupSCC(*CG.lookup(Callee)) == C && 835 UR.InlinedInternalEdges.count({&N, C})) { 836 LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node " 837 "previously split out of this SCC by inlining: " 838 << F.getName() << " -> " << Callee.getName() << "\n"); 839 setInlineRemark(*CB, "recursive SCC split"); 840 continue; 841 } 842 843 auto Advice = Advisor.getAdvice(*CB); 844 // Check whether we want to inline this callsite. 845 if (!Advice->isInliningRecommended()) { 846 Advice->recordUnattemptedInlining(); 847 continue; 848 } 849 850 // Setup the data structure used to plumb customization into the 851 // `InlineFunction` routine. 852 InlineFunctionInfo IFI( 853 /*cg=*/nullptr, GetAssumptionCache, PSI, 854 &FAM.getResult<BlockFrequencyAnalysis>(*(CB->getCaller())), 855 &FAM.getResult<BlockFrequencyAnalysis>(Callee)); 856 857 InlineResult IR = 858 InlineFunction(*CB, IFI, &FAM.getResult<AAManager>(*CB->getCaller())); 859 if (!IR.isSuccess()) { 860 Advice->recordUnsuccessfulInlining(IR); 861 continue; 862 } 863 864 DidInline = true; 865 InlinedCallees.insert(&Callee); 866 ++NumInlined; 867 868 // Add any new callsites to defined functions to the worklist. 869 if (!IFI.InlinedCallSites.empty()) { 870 int NewHistoryID = InlineHistory.size(); 871 InlineHistory.push_back({&Callee, InlineHistoryID}); 872 873 for (CallBase *ICB : reverse(IFI.InlinedCallSites)) { 874 Function *NewCallee = ICB->getCalledFunction(); 875 if (!NewCallee) { 876 // Try to promote an indirect (virtual) call without waiting for 877 // the post-inline cleanup and the next DevirtSCCRepeatedPass 878 // iteration because the next iteration may not happen and we may 879 // miss inlining it. 880 if (tryPromoteCall(*ICB)) 881 NewCallee = ICB->getCalledFunction(); 882 } 883 if (NewCallee) 884 if (!NewCallee->isDeclaration()) 885 Calls.push_back({ICB, NewHistoryID}); 886 } 887 } 888 889 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 890 ImportedFunctionsStats->recordInline(F, Callee); 891 892 // Merge the attributes based on the inlining. 893 AttributeFuncs::mergeAttributesForInlining(F, Callee); 894 895 // For local functions, check whether this makes the callee trivially 896 // dead. In that case, we can drop the body of the function eagerly 897 // which may reduce the number of callers of other functions to one, 898 // changing inline cost thresholds. 899 bool CalleeWasDeleted = false; 900 if (Callee.hasLocalLinkage()) { 901 // To check this we also need to nuke any dead constant uses (perhaps 902 // made dead by this operation on other functions). 903 Callee.removeDeadConstantUsers(); 904 if (Callee.use_empty() && !CG.isLibFunction(Callee)) { 905 Calls.erase( 906 std::remove_if(Calls.begin() + I + 1, Calls.end(), 907 [&](const std::pair<CallBase *, int> &Call) { 908 return Call.first->getCaller() == &Callee; 909 }), 910 Calls.end()); 911 // Clear the body and queue the function itself for deletion when we 912 // finish inlining and call graph updates. 913 // Note that after this point, it is an error to do anything other 914 // than use the callee's address or delete it. 915 Callee.dropAllReferences(); 916 assert(find(DeadFunctions, &Callee) == DeadFunctions.end() && 917 "Cannot put cause a function to become dead twice!"); 918 DeadFunctions.push_back(&Callee); 919 CalleeWasDeleted = true; 920 } 921 } 922 if (CalleeWasDeleted) 923 Advice->recordInliningWithCalleeDeleted(); 924 else 925 Advice->recordInlining(); 926 } 927 928 // Back the call index up by one to put us in a good position to go around 929 // the outer loop. 930 --I; 931 932 if (!DidInline) 933 continue; 934 Changed = true; 935 936 // At this point, since we have made changes we have at least removed 937 // a call instruction. However, in the process we do some incremental 938 // simplification of the surrounding code. This simplification can 939 // essentially do all of the same things as a function pass and we can 940 // re-use the exact same logic for updating the call graph to reflect the 941 // change. 942 943 // Inside the update, we also update the FunctionAnalysisManager in the 944 // proxy for this particular SCC. We do this as the SCC may have changed and 945 // as we're going to mutate this particular function we want to make sure 946 // the proxy is in place to forward any invalidation events. 947 LazyCallGraph::SCC *OldC = C; 948 C = &updateCGAndAnalysisManagerForCGSCCPass(CG, *C, N, AM, UR, FAM); 949 LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n"); 950 RC = &C->getOuterRefSCC(); 951 952 // If this causes an SCC to split apart into multiple smaller SCCs, there 953 // is a subtle risk we need to prepare for. Other transformations may 954 // expose an "infinite inlining" opportunity later, and because of the SCC 955 // mutation, we will revisit this function and potentially re-inline. If we 956 // do, and that re-inlining also has the potentially to mutate the SCC 957 // structure, the infinite inlining problem can manifest through infinite 958 // SCC splits and merges. To avoid this, we capture the originating caller 959 // node and the SCC containing the call edge. This is a slight over 960 // approximation of the possible inlining decisions that must be avoided, 961 // but is relatively efficient to store. We use C != OldC to know when 962 // a new SCC is generated and the original SCC may be generated via merge 963 // in later iterations. 964 // 965 // It is also possible that even if no new SCC is generated 966 // (i.e., C == OldC), the original SCC could be split and then merged 967 // into the same one as itself. and the original SCC will be added into 968 // UR.CWorklist again, we want to catch such cases too. 969 // 970 // FIXME: This seems like a very heavyweight way of retaining the inline 971 // history, we should look for a more efficient way of tracking it. 972 if ((C != OldC || UR.CWorklist.count(OldC)) && 973 llvm::any_of(InlinedCallees, [&](Function *Callee) { 974 return CG.lookupSCC(*CG.lookup(*Callee)) == OldC; 975 })) { 976 LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, " 977 "retaining this to avoid infinite inlining.\n"); 978 UR.InlinedInternalEdges.insert({&N, OldC}); 979 } 980 InlinedCallees.clear(); 981 } 982 983 // Now that we've finished inlining all of the calls across this SCC, delete 984 // all of the trivially dead functions, updating the call graph and the CGSCC 985 // pass manager in the process. 986 // 987 // Note that this walks a pointer set which has non-deterministic order but 988 // that is OK as all we do is delete things and add pointers to unordered 989 // sets. 990 for (Function *DeadF : DeadFunctions) { 991 // Get the necessary information out of the call graph and nuke the 992 // function there. Also, clear out any cached analyses. 993 auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF)); 994 FAM.clear(*DeadF, DeadF->getName()); 995 AM.clear(DeadC, DeadC.getName()); 996 auto &DeadRC = DeadC.getOuterRefSCC(); 997 CG.removeDeadFunction(*DeadF); 998 999 // Mark the relevant parts of the call graph as invalid so we don't visit 1000 // them. 1001 UR.InvalidatedSCCs.insert(&DeadC); 1002 UR.InvalidatedRefSCCs.insert(&DeadRC); 1003 1004 // And delete the actual function from the module. 1005 // The Advisor may use Function pointers to efficiently index various 1006 // internal maps, e.g. for memoization. Function cleanup passes like 1007 // argument promotion create new functions. It is possible for a new 1008 // function to be allocated at the address of a deleted function. We could 1009 // index using names, but that's inefficient. Alternatively, we let the 1010 // Advisor free the functions when it sees fit. 1011 DeadF->getBasicBlockList().clear(); 1012 M.getFunctionList().remove(DeadF); 1013 1014 ++NumDeleted; 1015 } 1016 1017 if (!Changed) 1018 return PreservedAnalyses::all(); 1019 1020 // Even if we change the IR, we update the core CGSCC data structures and so 1021 // can preserve the proxy to the function analysis manager. 1022 PreservedAnalyses PA; 1023 PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); 1024 return PA; 1025 } 1026 1027 ModuleInlinerWrapperPass::ModuleInlinerWrapperPass(InlineParams Params, 1028 bool Debugging, 1029 InliningAdvisorMode Mode, 1030 unsigned MaxDevirtIterations) 1031 : Params(Params), Mode(Mode), MaxDevirtIterations(MaxDevirtIterations), 1032 PM(Debugging), MPM(Debugging) { 1033 // Run the inliner first. The theory is that we are walking bottom-up and so 1034 // the callees have already been fully optimized, and we want to inline them 1035 // into the callers so that our optimizations can reflect that. 1036 // For PreLinkThinLTO pass, we disable hot-caller heuristic for sample PGO 1037 // because it makes profile annotation in the backend inaccurate. 1038 PM.addPass(InlinerPass()); 1039 } 1040 1041 PreservedAnalyses ModuleInlinerWrapperPass::run(Module &M, 1042 ModuleAnalysisManager &MAM) { 1043 auto &IAA = MAM.getResult<InlineAdvisorAnalysis>(M); 1044 if (!IAA.tryCreate(Params, Mode)) { 1045 M.getContext().emitError( 1046 "Could not setup Inlining Advisor for the requested " 1047 "mode and/or options"); 1048 return PreservedAnalyses::all(); 1049 } 1050 1051 if (!DisableAlwaysInlinerInModuleWrapper) 1052 MPM.addPass(AlwaysInlinerPass()); 1053 // We wrap the CGSCC pipeline in a devirtualization repeater. This will try 1054 // to detect when we devirtualize indirect calls and iterate the SCC passes 1055 // in that case to try and catch knock-on inlining or function attrs 1056 // opportunities. Then we add it to the module pipeline by walking the SCCs 1057 // in postorder (or bottom-up). 1058 // If MaxDevirtIterations is 0, we just don't use the devirtualization 1059 // wrapper. 1060 if (MaxDevirtIterations == 0) 1061 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(PM))); 1062 else 1063 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor( 1064 createDevirtSCCRepeatedPass(std::move(PM), MaxDevirtIterations))); 1065 auto Ret = MPM.run(M, MAM); 1066 1067 IAA.clear(); 1068 return Ret; 1069 } 1070