1 //===- Inliner.cpp - Code common to all inliners --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the mechanics required to implement inlining without 10 // missing any calls and updating the call graph. The decisions of which calls 11 // are profitable to inline are implemented elsewhere. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/Inliner.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/Optional.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/ScopeExit.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumptionCache.h" 28 #include "llvm/Analysis/BasicAliasAnalysis.h" 29 #include "llvm/Analysis/BlockFrequencyInfo.h" 30 #include "llvm/Analysis/CGSCCPassManager.h" 31 #include "llvm/Analysis/CallGraph.h" 32 #include "llvm/Analysis/GlobalsModRef.h" 33 #include "llvm/Analysis/InlineAdvisor.h" 34 #include "llvm/Analysis/InlineCost.h" 35 #include "llvm/Analysis/LazyCallGraph.h" 36 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 37 #include "llvm/Analysis/ProfileSummaryInfo.h" 38 #include "llvm/Analysis/TargetLibraryInfo.h" 39 #include "llvm/Analysis/TargetTransformInfo.h" 40 #include "llvm/IR/Attributes.h" 41 #include "llvm/IR/BasicBlock.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugLoc.h" 44 #include "llvm/IR/DerivedTypes.h" 45 #include "llvm/IR/DiagnosticInfo.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/InstIterator.h" 48 #include "llvm/IR/Instruction.h" 49 #include "llvm/IR/Instructions.h" 50 #include "llvm/IR/IntrinsicInst.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Module.h" 53 #include "llvm/IR/PassManager.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 62 #include "llvm/Transforms/Utils/Cloning.h" 63 #include "llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h" 64 #include "llvm/Transforms/Utils/Local.h" 65 #include "llvm/Transforms/Utils/ModuleUtils.h" 66 #include <algorithm> 67 #include <cassert> 68 #include <functional> 69 #include <sstream> 70 #include <tuple> 71 #include <utility> 72 #include <vector> 73 74 using namespace llvm; 75 76 #define DEBUG_TYPE "inline" 77 78 STATISTIC(NumInlined, "Number of functions inlined"); 79 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined"); 80 STATISTIC(NumDeleted, "Number of functions deleted because all callers found"); 81 STATISTIC(NumMergedAllocas, "Number of allocas merged together"); 82 83 /// Flag to disable manual alloca merging. 84 /// 85 /// Merging of allocas was originally done as a stack-size saving technique 86 /// prior to LLVM's code generator having support for stack coloring based on 87 /// lifetime markers. It is now in the process of being removed. To experiment 88 /// with disabling it and relying fully on lifetime marker based stack 89 /// coloring, you can pass this flag to LLVM. 90 static cl::opt<bool> 91 DisableInlinedAllocaMerging("disable-inlined-alloca-merging", 92 cl::init(false), cl::Hidden); 93 94 namespace { 95 96 enum class InlinerFunctionImportStatsOpts { 97 No = 0, 98 Basic = 1, 99 Verbose = 2, 100 }; 101 102 } // end anonymous namespace 103 104 static cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats( 105 "inliner-function-import-stats", 106 cl::init(InlinerFunctionImportStatsOpts::No), 107 cl::values(clEnumValN(InlinerFunctionImportStatsOpts::Basic, "basic", 108 "basic statistics"), 109 clEnumValN(InlinerFunctionImportStatsOpts::Verbose, "verbose", 110 "printing of statistics for each inlined function")), 111 cl::Hidden, cl::desc("Enable inliner stats for imported functions")); 112 113 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {} 114 115 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime) 116 : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {} 117 118 /// For this class, we declare that we require and preserve the call graph. 119 /// If the derived class implements this method, it should 120 /// always explicitly call the implementation here. 121 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const { 122 AU.addRequired<AssumptionCacheTracker>(); 123 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 124 AU.addRequired<TargetLibraryInfoWrapperPass>(); 125 getAAResultsAnalysisUsage(AU); 126 CallGraphSCCPass::getAnalysisUsage(AU); 127 } 128 129 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>; 130 131 /// Look at all of the allocas that we inlined through this call site. If we 132 /// have already inlined other allocas through other calls into this function, 133 /// then we know that they have disjoint lifetimes and that we can merge them. 134 /// 135 /// There are many heuristics possible for merging these allocas, and the 136 /// different options have different tradeoffs. One thing that we *really* 137 /// don't want to hurt is SRoA: once inlining happens, often allocas are no 138 /// longer address taken and so they can be promoted. 139 /// 140 /// Our "solution" for that is to only merge allocas whose outermost type is an 141 /// array type. These are usually not promoted because someone is using a 142 /// variable index into them. These are also often the most important ones to 143 /// merge. 144 /// 145 /// A better solution would be to have real memory lifetime markers in the IR 146 /// and not have the inliner do any merging of allocas at all. This would 147 /// allow the backend to do proper stack slot coloring of all allocas that 148 /// *actually make it to the backend*, which is really what we want. 149 /// 150 /// Because we don't have this information, we do this simple and useful hack. 151 static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI, 152 InlinedArrayAllocasTy &InlinedArrayAllocas, 153 int InlineHistory) { 154 SmallPtrSet<AllocaInst *, 16> UsedAllocas; 155 156 // When processing our SCC, check to see if the call site was inlined from 157 // some other call site. For example, if we're processing "A" in this code: 158 // A() { B() } 159 // B() { x = alloca ... C() } 160 // C() { y = alloca ... } 161 // Assume that C was not inlined into B initially, and so we're processing A 162 // and decide to inline B into A. Doing this makes an alloca available for 163 // reuse and makes a callsite (C) available for inlining. When we process 164 // the C call site we don't want to do any alloca merging between X and Y 165 // because their scopes are not disjoint. We could make this smarter by 166 // keeping track of the inline history for each alloca in the 167 // InlinedArrayAllocas but this isn't likely to be a significant win. 168 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC. 169 return; 170 171 // Loop over all the allocas we have so far and see if they can be merged with 172 // a previously inlined alloca. If not, remember that we had it. 173 for (unsigned AllocaNo = 0, E = IFI.StaticAllocas.size(); AllocaNo != E; 174 ++AllocaNo) { 175 AllocaInst *AI = IFI.StaticAllocas[AllocaNo]; 176 177 // Don't bother trying to merge array allocations (they will usually be 178 // canonicalized to be an allocation *of* an array), or allocations whose 179 // type is not itself an array (because we're afraid of pessimizing SRoA). 180 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType()); 181 if (!ATy || AI->isArrayAllocation()) 182 continue; 183 184 // Get the list of all available allocas for this array type. 185 std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy]; 186 187 // Loop over the allocas in AllocasForType to see if we can reuse one. Note 188 // that we have to be careful not to reuse the same "available" alloca for 189 // multiple different allocas that we just inlined, we use the 'UsedAllocas' 190 // set to keep track of which "available" allocas are being used by this 191 // function. Also, AllocasForType can be empty of course! 192 bool MergedAwayAlloca = false; 193 for (AllocaInst *AvailableAlloca : AllocasForType) { 194 unsigned Align1 = AI->getAlignment(), 195 Align2 = AvailableAlloca->getAlignment(); 196 197 // The available alloca has to be in the right function, not in some other 198 // function in this SCC. 199 if (AvailableAlloca->getParent() != AI->getParent()) 200 continue; 201 202 // If the inlined function already uses this alloca then we can't reuse 203 // it. 204 if (!UsedAllocas.insert(AvailableAlloca).second) 205 continue; 206 207 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare 208 // success! 209 LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI 210 << "\n\t\tINTO: " << *AvailableAlloca << '\n'); 211 212 // Move affected dbg.declare calls immediately after the new alloca to 213 // avoid the situation when a dbg.declare precedes its alloca. 214 if (auto *L = LocalAsMetadata::getIfExists(AI)) 215 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) 216 for (User *U : MDV->users()) 217 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) 218 DDI->moveBefore(AvailableAlloca->getNextNode()); 219 220 AI->replaceAllUsesWith(AvailableAlloca); 221 222 if (Align1 != Align2) { 223 if (!Align1 || !Align2) { 224 const DataLayout &DL = Caller->getParent()->getDataLayout(); 225 unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType()); 226 227 Align1 = Align1 ? Align1 : TypeAlign; 228 Align2 = Align2 ? Align2 : TypeAlign; 229 } 230 231 if (Align1 > Align2) 232 AvailableAlloca->setAlignment(AI->getAlign()); 233 } 234 235 AI->eraseFromParent(); 236 MergedAwayAlloca = true; 237 ++NumMergedAllocas; 238 IFI.StaticAllocas[AllocaNo] = nullptr; 239 break; 240 } 241 242 // If we already nuked the alloca, we're done with it. 243 if (MergedAwayAlloca) 244 continue; 245 246 // If we were unable to merge away the alloca either because there are no 247 // allocas of the right type available or because we reused them all 248 // already, remember that this alloca came from an inlined function and mark 249 // it used so we don't reuse it for other allocas from this inline 250 // operation. 251 AllocasForType.push_back(AI); 252 UsedAllocas.insert(AI); 253 } 254 } 255 256 /// If it is possible to inline the specified call site, 257 /// do so and update the CallGraph for this operation. 258 /// 259 /// This function also does some basic book-keeping to update the IR. The 260 /// InlinedArrayAllocas map keeps track of any allocas that are already 261 /// available from other functions inlined into the caller. If we are able to 262 /// inline this call site we attempt to reuse already available allocas or add 263 /// any new allocas to the set if not possible. 264 static InlineResult inlineCallIfPossible( 265 CallBase &CB, InlineFunctionInfo &IFI, 266 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory, 267 bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter, 268 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 269 Function *Callee = CB.getCalledFunction(); 270 Function *Caller = CB.getCaller(); 271 272 AAResults &AAR = AARGetter(*Callee); 273 274 // Try to inline the function. Get the list of static allocas that were 275 // inlined. 276 InlineResult IR = InlineFunction(CB, IFI, &AAR, InsertLifetime); 277 if (!IR.isSuccess()) 278 return IR; 279 280 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 281 ImportedFunctionsStats.recordInline(*Caller, *Callee); 282 283 AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee); 284 285 if (!DisableInlinedAllocaMerging) 286 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory); 287 288 return IR; // success 289 } 290 291 /// Return true if the specified inline history ID 292 /// indicates an inline history that includes the specified function. 293 static bool inlineHistoryIncludes( 294 Function *F, int InlineHistoryID, 295 const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) { 296 while (InlineHistoryID != -1) { 297 assert(unsigned(InlineHistoryID) < InlineHistory.size() && 298 "Invalid inline history ID"); 299 if (InlineHistory[InlineHistoryID].first == F) 300 return true; 301 InlineHistoryID = InlineHistory[InlineHistoryID].second; 302 } 303 return false; 304 } 305 306 bool LegacyInlinerBase::doInitialization(CallGraph &CG) { 307 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 308 ImportedFunctionsStats.setModuleInfo(CG.getModule()); 309 return false; // No changes to CallGraph. 310 } 311 312 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) { 313 if (skipSCC(SCC)) 314 return false; 315 return inlineCalls(SCC); 316 } 317 318 static bool 319 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, 320 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 321 ProfileSummaryInfo *PSI, 322 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 323 bool InsertLifetime, 324 function_ref<InlineCost(CallBase &CB)> GetInlineCost, 325 function_ref<AAResults &(Function &)> AARGetter, 326 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 327 SmallPtrSet<Function *, 8> SCCFunctions; 328 LLVM_DEBUG(dbgs() << "Inliner visiting SCC:"); 329 for (CallGraphNode *Node : SCC) { 330 Function *F = Node->getFunction(); 331 if (F) 332 SCCFunctions.insert(F); 333 LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE")); 334 } 335 336 // Scan through and identify all call sites ahead of time so that we only 337 // inline call sites in the original functions, not call sites that result 338 // from inlining other functions. 339 SmallVector<std::pair<CallBase *, int>, 16> CallSites; 340 341 // When inlining a callee produces new call sites, we want to keep track of 342 // the fact that they were inlined from the callee. This allows us to avoid 343 // infinite inlining in some obscure cases. To represent this, we use an 344 // index into the InlineHistory vector. 345 SmallVector<std::pair<Function *, int>, 8> InlineHistory; 346 347 for (CallGraphNode *Node : SCC) { 348 Function *F = Node->getFunction(); 349 if (!F || F->isDeclaration()) 350 continue; 351 352 OptimizationRemarkEmitter ORE(F); 353 for (BasicBlock &BB : *F) 354 for (Instruction &I : BB) { 355 auto *CB = dyn_cast<CallBase>(&I); 356 // If this isn't a call, or it is a call to an intrinsic, it can 357 // never be inlined. 358 if (!CB || isa<IntrinsicInst>(I)) 359 continue; 360 361 // If this is a direct call to an external function, we can never inline 362 // it. If it is an indirect call, inlining may resolve it to be a 363 // direct call, so we keep it. 364 if (Function *Callee = CB->getCalledFunction()) 365 if (Callee->isDeclaration()) { 366 using namespace ore; 367 368 setInlineRemark(*CB, "unavailable definition"); 369 ORE.emit([&]() { 370 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 371 << NV("Callee", Callee) << " will not be inlined into " 372 << NV("Caller", CB->getCaller()) 373 << " because its definition is unavailable" 374 << setIsVerbose(); 375 }); 376 continue; 377 } 378 379 CallSites.push_back(std::make_pair(CB, -1)); 380 } 381 } 382 383 LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n"); 384 385 // If there are no calls in this function, exit early. 386 if (CallSites.empty()) 387 return false; 388 389 // Now that we have all of the call sites, move the ones to functions in the 390 // current SCC to the end of the list. 391 unsigned FirstCallInSCC = CallSites.size(); 392 for (unsigned I = 0; I < FirstCallInSCC; ++I) 393 if (Function *F = CallSites[I].first->getCalledFunction()) 394 if (SCCFunctions.count(F)) 395 std::swap(CallSites[I--], CallSites[--FirstCallInSCC]); 396 397 InlinedArrayAllocasTy InlinedArrayAllocas; 398 InlineFunctionInfo InlineInfo(&CG, GetAssumptionCache, PSI); 399 400 // Now that we have all of the call sites, loop over them and inline them if 401 // it looks profitable to do so. 402 bool Changed = false; 403 bool LocalChange; 404 do { 405 LocalChange = false; 406 // Iterate over the outer loop because inlining functions can cause indirect 407 // calls to become direct calls. 408 // CallSites may be modified inside so ranged for loop can not be used. 409 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) { 410 auto &P = CallSites[CSi]; 411 CallBase &CB = *P.first; 412 const int InlineHistoryID = P.second; 413 414 Function *Caller = CB.getCaller(); 415 Function *Callee = CB.getCalledFunction(); 416 417 // We can only inline direct calls to non-declarations. 418 if (!Callee || Callee->isDeclaration()) 419 continue; 420 421 bool IsTriviallyDead = isInstructionTriviallyDead(&CB, &GetTLI(*Caller)); 422 423 if (!IsTriviallyDead) { 424 // If this call site was obtained by inlining another function, verify 425 // that the include path for the function did not include the callee 426 // itself. If so, we'd be recursively inlining the same function, 427 // which would provide the same callsites, which would cause us to 428 // infinitely inline. 429 if (InlineHistoryID != -1 && 430 inlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) { 431 setInlineRemark(CB, "recursive"); 432 continue; 433 } 434 } 435 436 // FIXME for new PM: because of the old PM we currently generate ORE and 437 // in turn BFI on demand. With the new PM, the ORE dependency should 438 // just become a regular analysis dependency. 439 OptimizationRemarkEmitter ORE(Caller); 440 441 auto OIC = shouldInline(CB, GetInlineCost, ORE); 442 // If the policy determines that we should inline this function, 443 // delete the call instead. 444 if (!OIC) 445 continue; 446 447 // If this call site is dead and it is to a readonly function, we should 448 // just delete the call instead of trying to inline it, regardless of 449 // size. This happens because IPSCCP propagates the result out of the 450 // call and then we're left with the dead call. 451 if (IsTriviallyDead) { 452 LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << CB << "\n"); 453 // Update the call graph by deleting the edge from Callee to Caller. 454 setInlineRemark(CB, "trivially dead"); 455 CG[Caller]->removeCallEdgeFor(CB); 456 CB.eraseFromParent(); 457 ++NumCallsDeleted; 458 } else { 459 // Get DebugLoc to report. CB will be invalid after Inliner. 460 DebugLoc DLoc = CB.getDebugLoc(); 461 BasicBlock *Block = CB.getParent(); 462 463 // Attempt to inline the function. 464 using namespace ore; 465 466 InlineResult IR = inlineCallIfPossible( 467 CB, InlineInfo, InlinedArrayAllocas, InlineHistoryID, 468 InsertLifetime, AARGetter, ImportedFunctionsStats); 469 if (!IR.isSuccess()) { 470 setInlineRemark(CB, std::string(IR.getFailureReason()) + "; " + 471 inlineCostStr(*OIC)); 472 ORE.emit([&]() { 473 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, 474 Block) 475 << NV("Callee", Callee) << " will not be inlined into " 476 << NV("Caller", Caller) << ": " 477 << NV("Reason", IR.getFailureReason()); 478 }); 479 continue; 480 } 481 ++NumInlined; 482 483 emitInlinedInto(ORE, DLoc, Block, *Callee, *Caller, *OIC); 484 485 // If inlining this function gave us any new call sites, throw them 486 // onto our worklist to process. They are useful inline candidates. 487 if (!InlineInfo.InlinedCalls.empty()) { 488 // Create a new inline history entry for this, so that we remember 489 // that these new callsites came about due to inlining Callee. 490 int NewHistoryID = InlineHistory.size(); 491 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID)); 492 493 #ifndef NDEBUG 494 // Make sure no dupplicates in the inline candidates. This could 495 // happen when a callsite is simpilfied to reusing the return value 496 // of another callsite during function cloning, thus the other 497 // callsite will be reconsidered here. 498 DenseSet<CallBase *> DbgCallSites; 499 for (auto &II : CallSites) 500 DbgCallSites.insert(II.first); 501 #endif 502 503 for (Value *Ptr : InlineInfo.InlinedCalls) { 504 #ifndef NDEBUG 505 assert(DbgCallSites.count(dyn_cast<CallBase>(Ptr)) == 0); 506 #endif 507 CallSites.push_back( 508 std::make_pair(dyn_cast<CallBase>(Ptr), NewHistoryID)); 509 } 510 } 511 } 512 513 // If we inlined or deleted the last possible call site to the function, 514 // delete the function body now. 515 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() && 516 // TODO: Can remove if in SCC now. 517 !SCCFunctions.count(Callee) && 518 // The function may be apparently dead, but if there are indirect 519 // callgraph references to the node, we cannot delete it yet, this 520 // could invalidate the CGSCC iterator. 521 CG[Callee]->getNumReferences() == 0) { 522 LLVM_DEBUG(dbgs() << " -> Deleting dead function: " 523 << Callee->getName() << "\n"); 524 CallGraphNode *CalleeNode = CG[Callee]; 525 526 // Remove any call graph edges from the callee to its callees. 527 CalleeNode->removeAllCalledFunctions(); 528 529 // Removing the node for callee from the call graph and delete it. 530 delete CG.removeFunctionFromModule(CalleeNode); 531 ++NumDeleted; 532 } 533 534 // Remove this call site from the list. If possible, use 535 // swap/pop_back for efficiency, but do not use it if doing so would 536 // move a call site to a function in this SCC before the 537 // 'FirstCallInSCC' barrier. 538 if (SCC.isSingular()) { 539 CallSites[CSi] = CallSites.back(); 540 CallSites.pop_back(); 541 } else { 542 CallSites.erase(CallSites.begin() + CSi); 543 } 544 --CSi; 545 546 Changed = true; 547 LocalChange = true; 548 } 549 } while (LocalChange); 550 551 return Changed; 552 } 553 554 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) { 555 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 556 ACT = &getAnalysis<AssumptionCacheTracker>(); 557 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 558 GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 559 return getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 560 }; 561 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 562 return ACT->getAssumptionCache(F); 563 }; 564 return inlineCallsImpl( 565 SCC, CG, GetAssumptionCache, PSI, GetTLI, InsertLifetime, 566 [&](CallBase &CB) { return getInlineCost(CB); }, LegacyAARGetter(*this), 567 ImportedFunctionsStats); 568 } 569 570 /// Remove now-dead linkonce functions at the end of 571 /// processing to avoid breaking the SCC traversal. 572 bool LegacyInlinerBase::doFinalization(CallGraph &CG) { 573 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 574 ImportedFunctionsStats.dump(InlinerFunctionImportStats == 575 InlinerFunctionImportStatsOpts::Verbose); 576 return removeDeadFunctions(CG); 577 } 578 579 /// Remove dead functions that are not included in DNR (Do Not Remove) list. 580 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG, 581 bool AlwaysInlineOnly) { 582 SmallVector<CallGraphNode *, 16> FunctionsToRemove; 583 SmallVector<Function *, 16> DeadFunctionsInComdats; 584 585 auto RemoveCGN = [&](CallGraphNode *CGN) { 586 // Remove any call graph edges from the function to its callees. 587 CGN->removeAllCalledFunctions(); 588 589 // Remove any edges from the external node to the function's call graph 590 // node. These edges might have been made irrelegant due to 591 // optimization of the program. 592 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN); 593 594 // Removing the node for callee from the call graph and delete it. 595 FunctionsToRemove.push_back(CGN); 596 }; 597 598 // Scan for all of the functions, looking for ones that should now be removed 599 // from the program. Insert the dead ones in the FunctionsToRemove set. 600 for (const auto &I : CG) { 601 CallGraphNode *CGN = I.second.get(); 602 Function *F = CGN->getFunction(); 603 if (!F || F->isDeclaration()) 604 continue; 605 606 // Handle the case when this function is called and we only want to care 607 // about always-inline functions. This is a bit of a hack to share code 608 // between here and the InlineAlways pass. 609 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline)) 610 continue; 611 612 // If the only remaining users of the function are dead constants, remove 613 // them. 614 F->removeDeadConstantUsers(); 615 616 if (!F->isDefTriviallyDead()) 617 continue; 618 619 // It is unsafe to drop a function with discardable linkage from a COMDAT 620 // without also dropping the other members of the COMDAT. 621 // The inliner doesn't visit non-function entities which are in COMDAT 622 // groups so it is unsafe to do so *unless* the linkage is local. 623 if (!F->hasLocalLinkage()) { 624 if (F->hasComdat()) { 625 DeadFunctionsInComdats.push_back(F); 626 continue; 627 } 628 } 629 630 RemoveCGN(CGN); 631 } 632 if (!DeadFunctionsInComdats.empty()) { 633 // Filter out the functions whose comdats remain alive. 634 filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats); 635 // Remove the rest. 636 for (Function *F : DeadFunctionsInComdats) 637 RemoveCGN(CG[F]); 638 } 639 640 if (FunctionsToRemove.empty()) 641 return false; 642 643 // Now that we know which functions to delete, do so. We didn't want to do 644 // this inline, because that would invalidate our CallGraph::iterator 645 // objects. :( 646 // 647 // Note that it doesn't matter that we are iterating over a non-stable order 648 // here to do this, it doesn't matter which order the functions are deleted 649 // in. 650 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end()); 651 FunctionsToRemove.erase( 652 std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()), 653 FunctionsToRemove.end()); 654 for (CallGraphNode *CGN : FunctionsToRemove) { 655 delete CG.removeFunctionFromModule(CGN); 656 ++NumDeleted; 657 } 658 return true; 659 } 660 661 InlinerPass::~InlinerPass() { 662 if (ImportedFunctionsStats) { 663 assert(InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No); 664 ImportedFunctionsStats->dump(InlinerFunctionImportStats == 665 InlinerFunctionImportStatsOpts::Verbose); 666 } 667 } 668 669 InlineAdvisor & 670 InlinerPass::getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM, 671 Module &M) { 672 auto *IAA = MAM.getCachedResult<InlineAdvisorAnalysis>(M); 673 if (!IAA) { 674 // It should still be possible to run the inliner as a stand-alone SCC pass, 675 // for test scenarios. In that case, we default to the 676 // DefaultInlineAdvisor, which doesn't need to keep state between SCC pass 677 // runs. It also uses just the default InlineParams. 678 OwnedDefaultAdvisor.emplace(getInlineParams()); 679 return *OwnedDefaultAdvisor; 680 } 681 assert(IAA->getAdvisor() && 682 "Expected a present InlineAdvisorAnalysis also have an " 683 "InlineAdvisor initialized"); 684 return *IAA->getAdvisor(); 685 } 686 687 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, 688 CGSCCAnalysisManager &AM, LazyCallGraph &CG, 689 CGSCCUpdateResult &UR) { 690 const auto &MAMProxy = 691 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG); 692 bool Changed = false; 693 694 assert(InitialC.size() > 0 && "Cannot handle an empty SCC!"); 695 Module &M = *InitialC.begin()->getFunction().getParent(); 696 ProfileSummaryInfo *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(M); 697 698 InlineAdvisor &Advisor = getAdvisor(MAMProxy, M); 699 Advisor.onPassEntry(); 700 701 auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(); }); 702 703 if (!ImportedFunctionsStats && 704 InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) { 705 ImportedFunctionsStats = 706 std::make_unique<ImportedFunctionsInliningStatistics>(); 707 ImportedFunctionsStats->setModuleInfo(M); 708 } 709 710 // We use a single common worklist for calls across the entire SCC. We 711 // process these in-order and append new calls introduced during inlining to 712 // the end. 713 // 714 // Note that this particular order of processing is actually critical to 715 // avoid very bad behaviors. Consider *highly connected* call graphs where 716 // each function contains a small amonut of code and a couple of calls to 717 // other functions. Because the LLVM inliner is fundamentally a bottom-up 718 // inliner, it can handle gracefully the fact that these all appear to be 719 // reasonable inlining candidates as it will flatten things until they become 720 // too big to inline, and then move on and flatten another batch. 721 // 722 // However, when processing call edges *within* an SCC we cannot rely on this 723 // bottom-up behavior. As a consequence, with heavily connected *SCCs* of 724 // functions we can end up incrementally inlining N calls into each of 725 // N functions because each incremental inlining decision looks good and we 726 // don't have a topological ordering to prevent explosions. 727 // 728 // To compensate for this, we don't process transitive edges made immediate 729 // by inlining until we've done one pass of inlining across the entire SCC. 730 // Large, highly connected SCCs still lead to some amount of code bloat in 731 // this model, but it is uniformly spread across all the functions in the SCC 732 // and eventually they all become too large to inline, rather than 733 // incrementally maknig a single function grow in a super linear fashion. 734 SmallVector<std::pair<CallBase *, int>, 16> Calls; 735 736 FunctionAnalysisManager &FAM = 737 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG) 738 .getManager(); 739 740 // Populate the initial list of calls in this SCC. 741 for (auto &N : InitialC) { 742 auto &ORE = 743 FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction()); 744 // We want to generally process call sites top-down in order for 745 // simplifications stemming from replacing the call with the returned value 746 // after inlining to be visible to subsequent inlining decisions. 747 // FIXME: Using instructions sequence is a really bad way to do this. 748 // Instead we should do an actual RPO walk of the function body. 749 for (Instruction &I : instructions(N.getFunction())) 750 if (auto *CB = dyn_cast<CallBase>(&I)) 751 if (Function *Callee = CB->getCalledFunction()) { 752 if (!Callee->isDeclaration()) 753 Calls.push_back({CB, -1}); 754 else if (!isa<IntrinsicInst>(I)) { 755 using namespace ore; 756 setInlineRemark(*CB, "unavailable definition"); 757 ORE.emit([&]() { 758 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 759 << NV("Callee", Callee) << " will not be inlined into " 760 << NV("Caller", CB->getCaller()) 761 << " because its definition is unavailable" 762 << setIsVerbose(); 763 }); 764 } 765 } 766 } 767 if (Calls.empty()) 768 return PreservedAnalyses::all(); 769 770 // Capture updatable variables for the current SCC and RefSCC. 771 auto *C = &InitialC; 772 auto *RC = &C->getOuterRefSCC(); 773 774 // When inlining a callee produces new call sites, we want to keep track of 775 // the fact that they were inlined from the callee. This allows us to avoid 776 // infinite inlining in some obscure cases. To represent this, we use an 777 // index into the InlineHistory vector. 778 SmallVector<std::pair<Function *, int>, 16> InlineHistory; 779 780 // Track a set vector of inlined callees so that we can augment the caller 781 // with all of their edges in the call graph before pruning out the ones that 782 // got simplified away. 783 SmallSetVector<Function *, 4> InlinedCallees; 784 785 // Track the dead functions to delete once finished with inlining calls. We 786 // defer deleting these to make it easier to handle the call graph updates. 787 SmallVector<Function *, 4> DeadFunctions; 788 789 // Loop forward over all of the calls. Note that we cannot cache the size as 790 // inlining can introduce new calls that need to be processed. 791 for (int I = 0; I < (int)Calls.size(); ++I) { 792 // We expect the calls to typically be batched with sequences of calls that 793 // have the same caller, so we first set up some shared infrastructure for 794 // this caller. We also do any pruning we can at this layer on the caller 795 // alone. 796 Function &F = *Calls[I].first->getCaller(); 797 LazyCallGraph::Node &N = *CG.lookup(F); 798 if (CG.lookupSCC(N) != C) 799 continue; 800 if (F.hasOptNone()) { 801 setInlineRemark(*Calls[I].first, "optnone attribute"); 802 continue; 803 } 804 805 LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"); 806 807 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 808 return FAM.getResult<AssumptionAnalysis>(F); 809 }; 810 811 // Now process as many calls as we have within this caller in the sequence. 812 // We bail out as soon as the caller has to change so we can update the 813 // call graph and prepare the context of that new caller. 814 bool DidInline = false; 815 for (; I < (int)Calls.size() && Calls[I].first->getCaller() == &F; ++I) { 816 auto &P = Calls[I]; 817 CallBase *CB = P.first; 818 const int InlineHistoryID = P.second; 819 Function &Callee = *CB->getCalledFunction(); 820 821 if (InlineHistoryID != -1 && 822 inlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) { 823 setInlineRemark(*CB, "recursive"); 824 continue; 825 } 826 827 // Check if this inlining may repeat breaking an SCC apart that has 828 // already been split once before. In that case, inlining here may 829 // trigger infinite inlining, much like is prevented within the inliner 830 // itself by the InlineHistory above, but spread across CGSCC iterations 831 // and thus hidden from the full inline history. 832 if (CG.lookupSCC(*CG.lookup(Callee)) == C && 833 UR.InlinedInternalEdges.count({&N, C})) { 834 LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node " 835 "previously split out of this SCC by inlining: " 836 << F.getName() << " -> " << Callee.getName() << "\n"); 837 setInlineRemark(*CB, "recursive SCC split"); 838 continue; 839 } 840 841 auto Advice = Advisor.getAdvice(*CB, FAM); 842 // Check whether we want to inline this callsite. 843 if (!Advice->isInliningRecommended()) { 844 Advice->recordUnattemptedInlining(); 845 continue; 846 } 847 848 // Setup the data structure used to plumb customization into the 849 // `InlineFunction` routine. 850 InlineFunctionInfo IFI( 851 /*cg=*/nullptr, GetAssumptionCache, PSI, 852 &FAM.getResult<BlockFrequencyAnalysis>(*(CB->getCaller())), 853 &FAM.getResult<BlockFrequencyAnalysis>(Callee)); 854 855 InlineResult IR = InlineFunction(*CB, IFI); 856 if (!IR.isSuccess()) { 857 Advice->recordUnsuccessfulInlining(IR); 858 continue; 859 } 860 861 DidInline = true; 862 InlinedCallees.insert(&Callee); 863 ++NumInlined; 864 865 // Add any new callsites to defined functions to the worklist. 866 if (!IFI.InlinedCallSites.empty()) { 867 int NewHistoryID = InlineHistory.size(); 868 InlineHistory.push_back({&Callee, InlineHistoryID}); 869 870 for (CallBase *ICB : reverse(IFI.InlinedCallSites)) { 871 Function *NewCallee = ICB->getCalledFunction(); 872 if (!NewCallee) { 873 // Try to promote an indirect (virtual) call without waiting for 874 // the post-inline cleanup and the next DevirtSCCRepeatedPass 875 // iteration because the next iteration may not happen and we may 876 // miss inlining it. 877 if (tryPromoteCall(*ICB)) 878 NewCallee = ICB->getCalledFunction(); 879 } 880 if (NewCallee) 881 if (!NewCallee->isDeclaration()) 882 Calls.push_back({ICB, NewHistoryID}); 883 } 884 } 885 886 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 887 ImportedFunctionsStats->recordInline(F, Callee); 888 889 // Merge the attributes based on the inlining. 890 AttributeFuncs::mergeAttributesForInlining(F, Callee); 891 892 // For local functions, check whether this makes the callee trivially 893 // dead. In that case, we can drop the body of the function eagerly 894 // which may reduce the number of callers of other functions to one, 895 // changing inline cost thresholds. 896 bool CalleeWasDeleted = false; 897 if (Callee.hasLocalLinkage()) { 898 // To check this we also need to nuke any dead constant uses (perhaps 899 // made dead by this operation on other functions). 900 Callee.removeDeadConstantUsers(); 901 if (Callee.use_empty() && !CG.isLibFunction(Callee)) { 902 Calls.erase( 903 std::remove_if(Calls.begin() + I + 1, Calls.end(), 904 [&](const std::pair<CallBase *, int> &Call) { 905 return Call.first->getCaller() == &Callee; 906 }), 907 Calls.end()); 908 // Clear the body and queue the function itself for deletion when we 909 // finish inlining and call graph updates. 910 // Note that after this point, it is an error to do anything other 911 // than use the callee's address or delete it. 912 Callee.dropAllReferences(); 913 assert(find(DeadFunctions, &Callee) == DeadFunctions.end() && 914 "Cannot put cause a function to become dead twice!"); 915 DeadFunctions.push_back(&Callee); 916 CalleeWasDeleted = true; 917 } 918 } 919 if (CalleeWasDeleted) 920 Advice->recordInliningWithCalleeDeleted(); 921 else 922 Advice->recordInlining(); 923 } 924 925 // Back the call index up by one to put us in a good position to go around 926 // the outer loop. 927 --I; 928 929 if (!DidInline) 930 continue; 931 Changed = true; 932 933 // Add all the inlined callees' edges as ref edges to the caller. These are 934 // by definition trivial edges as we always have *some* transitive ref edge 935 // chain. While in some cases these edges are direct calls inside the 936 // callee, they have to be modeled in the inliner as reference edges as 937 // there may be a reference edge anywhere along the chain from the current 938 // caller to the callee that causes the whole thing to appear like 939 // a (transitive) reference edge that will require promotion to a call edge 940 // below. 941 for (Function *InlinedCallee : InlinedCallees) { 942 LazyCallGraph::Node &CalleeN = *CG.lookup(*InlinedCallee); 943 for (LazyCallGraph::Edge &E : *CalleeN) 944 RC->insertTrivialRefEdge(N, E.getNode()); 945 } 946 947 // At this point, since we have made changes we have at least removed 948 // a call instruction. However, in the process we do some incremental 949 // simplification of the surrounding code. This simplification can 950 // essentially do all of the same things as a function pass and we can 951 // re-use the exact same logic for updating the call graph to reflect the 952 // change. 953 954 // Inside the update, we also update the FunctionAnalysisManager in the 955 // proxy for this particular SCC. We do this as the SCC may have changed and 956 // as we're going to mutate this particular function we want to make sure 957 // the proxy is in place to forward any invalidation events. 958 LazyCallGraph::SCC *OldC = C; 959 C = &updateCGAndAnalysisManagerForFunctionPass(CG, *C, N, AM, UR, FAM); 960 LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n"); 961 RC = &C->getOuterRefSCC(); 962 963 // If this causes an SCC to split apart into multiple smaller SCCs, there 964 // is a subtle risk we need to prepare for. Other transformations may 965 // expose an "infinite inlining" opportunity later, and because of the SCC 966 // mutation, we will revisit this function and potentially re-inline. If we 967 // do, and that re-inlining also has the potentially to mutate the SCC 968 // structure, the infinite inlining problem can manifest through infinite 969 // SCC splits and merges. To avoid this, we capture the originating caller 970 // node and the SCC containing the call edge. This is a slight over 971 // approximation of the possible inlining decisions that must be avoided, 972 // but is relatively efficient to store. We use C != OldC to know when 973 // a new SCC is generated and the original SCC may be generated via merge 974 // in later iterations. 975 // 976 // It is also possible that even if no new SCC is generated 977 // (i.e., C == OldC), the original SCC could be split and then merged 978 // into the same one as itself. and the original SCC will be added into 979 // UR.CWorklist again, we want to catch such cases too. 980 // 981 // FIXME: This seems like a very heavyweight way of retaining the inline 982 // history, we should look for a more efficient way of tracking it. 983 if ((C != OldC || UR.CWorklist.count(OldC)) && 984 llvm::any_of(InlinedCallees, [&](Function *Callee) { 985 return CG.lookupSCC(*CG.lookup(*Callee)) == OldC; 986 })) { 987 LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, " 988 "retaining this to avoid infinite inlining.\n"); 989 UR.InlinedInternalEdges.insert({&N, OldC}); 990 } 991 InlinedCallees.clear(); 992 } 993 994 // Now that we've finished inlining all of the calls across this SCC, delete 995 // all of the trivially dead functions, updating the call graph and the CGSCC 996 // pass manager in the process. 997 // 998 // Note that this walks a pointer set which has non-deterministic order but 999 // that is OK as all we do is delete things and add pointers to unordered 1000 // sets. 1001 for (Function *DeadF : DeadFunctions) { 1002 // Get the necessary information out of the call graph and nuke the 1003 // function there. Also, clear out any cached analyses. 1004 auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF)); 1005 FAM.clear(*DeadF, DeadF->getName()); 1006 AM.clear(DeadC, DeadC.getName()); 1007 auto &DeadRC = DeadC.getOuterRefSCC(); 1008 CG.removeDeadFunction(*DeadF); 1009 1010 // Mark the relevant parts of the call graph as invalid so we don't visit 1011 // them. 1012 UR.InvalidatedSCCs.insert(&DeadC); 1013 UR.InvalidatedRefSCCs.insert(&DeadRC); 1014 1015 // And delete the actual function from the module. 1016 // The Advisor may use Function pointers to efficiently index various 1017 // internal maps, e.g. for memoization. Function cleanup passes like 1018 // argument promotion create new functions. It is possible for a new 1019 // function to be allocated at the address of a deleted function. We could 1020 // index using names, but that's inefficient. Alternatively, we let the 1021 // Advisor free the functions when it sees fit. 1022 DeadF->getBasicBlockList().clear(); 1023 M.getFunctionList().remove(DeadF); 1024 1025 ++NumDeleted; 1026 } 1027 1028 if (!Changed) 1029 return PreservedAnalyses::all(); 1030 1031 // Even if we change the IR, we update the core CGSCC data structures and so 1032 // can preserve the proxy to the function analysis manager. 1033 PreservedAnalyses PA; 1034 PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); 1035 return PA; 1036 } 1037 1038 ModuleInlinerWrapperPass::ModuleInlinerWrapperPass(InlineParams Params, 1039 bool Debugging, 1040 InliningAdvisorMode Mode, 1041 unsigned MaxDevirtIterations) 1042 : Params(Params), Mode(Mode), MaxDevirtIterations(MaxDevirtIterations), 1043 PM(Debugging), MPM(Debugging) { 1044 // Run the inliner first. The theory is that we are walking bottom-up and so 1045 // the callees have already been fully optimized, and we want to inline them 1046 // into the callers so that our optimizations can reflect that. 1047 // For PreLinkThinLTO pass, we disable hot-caller heuristic for sample PGO 1048 // because it makes profile annotation in the backend inaccurate. 1049 PM.addPass(InlinerPass()); 1050 } 1051 1052 PreservedAnalyses ModuleInlinerWrapperPass::run(Module &M, 1053 ModuleAnalysisManager &MAM) { 1054 auto &IAA = MAM.getResult<InlineAdvisorAnalysis>(M); 1055 if (!IAA.tryCreate(Params, Mode)) { 1056 M.getContext().emitError( 1057 "Could not setup Inlining Advisor for the requested " 1058 "mode and/or options"); 1059 return PreservedAnalyses::all(); 1060 } 1061 1062 // We wrap the CGSCC pipeline in a devirtualization repeater. This will try 1063 // to detect when we devirtualize indirect calls and iterate the SCC passes 1064 // in that case to try and catch knock-on inlining or function attrs 1065 // opportunities. Then we add it to the module pipeline by walking the SCCs 1066 // in postorder (or bottom-up). 1067 // If MaxDevirtIterations is 0, we just don't use the devirtualization 1068 // wrapper. 1069 if (MaxDevirtIterations == 0) 1070 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(PM))); 1071 else 1072 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor( 1073 createDevirtSCCRepeatedPass(std::move(PM), MaxDevirtIterations))); 1074 auto Ret = MPM.run(M, MAM); 1075 1076 IAA.clear(); 1077 return Ret; 1078 } 1079