1 //===- Inliner.cpp - Code common to all inliners --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the mechanics required to implement inlining without 10 // missing any calls and updating the call graph. The decisions of which calls 11 // are profitable to inline are implemented elsewhere. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/Inliner.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/Optional.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/StringRef.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Analysis/AssumptionCache.h" 27 #include "llvm/Analysis/BasicAliasAnalysis.h" 28 #include "llvm/Analysis/BlockFrequencyInfo.h" 29 #include "llvm/Analysis/CGSCCPassManager.h" 30 #include "llvm/Analysis/CallGraph.h" 31 #include "llvm/Analysis/InlineAdvisor.h" 32 #include "llvm/Analysis/InlineCost.h" 33 #include "llvm/Analysis/LazyCallGraph.h" 34 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 35 #include "llvm/Analysis/ProfileSummaryInfo.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 #include "llvm/Analysis/TargetTransformInfo.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/BasicBlock.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DebugLoc.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/DiagnosticInfo.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/InstIterator.h" 46 #include "llvm/IR/Instruction.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/IntrinsicInst.h" 49 #include "llvm/IR/Metadata.h" 50 #include "llvm/IR/Module.h" 51 #include "llvm/IR/PassManager.h" 52 #include "llvm/IR/User.h" 53 #include "llvm/IR/Value.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/raw_ostream.h" 59 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 60 #include "llvm/Transforms/Utils/Cloning.h" 61 #include "llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h" 62 #include "llvm/Transforms/Utils/Local.h" 63 #include "llvm/Transforms/Utils/ModuleUtils.h" 64 #include <algorithm> 65 #include <cassert> 66 #include <functional> 67 #include <sstream> 68 #include <tuple> 69 #include <utility> 70 #include <vector> 71 72 using namespace llvm; 73 74 #define DEBUG_TYPE "inline" 75 76 STATISTIC(NumInlined, "Number of functions inlined"); 77 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined"); 78 STATISTIC(NumDeleted, "Number of functions deleted because all callers found"); 79 STATISTIC(NumMergedAllocas, "Number of allocas merged together"); 80 81 /// Flag to disable manual alloca merging. 82 /// 83 /// Merging of allocas was originally done as a stack-size saving technique 84 /// prior to LLVM's code generator having support for stack coloring based on 85 /// lifetime markers. It is now in the process of being removed. To experiment 86 /// with disabling it and relying fully on lifetime marker based stack 87 /// coloring, you can pass this flag to LLVM. 88 static cl::opt<bool> 89 DisableInlinedAllocaMerging("disable-inlined-alloca-merging", 90 cl::init(false), cl::Hidden); 91 92 namespace { 93 94 enum class InlinerFunctionImportStatsOpts { 95 No = 0, 96 Basic = 1, 97 Verbose = 2, 98 }; 99 100 } // end anonymous namespace 101 102 static cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats( 103 "inliner-function-import-stats", 104 cl::init(InlinerFunctionImportStatsOpts::No), 105 cl::values(clEnumValN(InlinerFunctionImportStatsOpts::Basic, "basic", 106 "basic statistics"), 107 clEnumValN(InlinerFunctionImportStatsOpts::Verbose, "verbose", 108 "printing of statistics for each inlined function")), 109 cl::Hidden, cl::desc("Enable inliner stats for imported functions")); 110 111 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {} 112 113 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime) 114 : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {} 115 116 /// For this class, we declare that we require and preserve the call graph. 117 /// If the derived class implements this method, it should 118 /// always explicitly call the implementation here. 119 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const { 120 AU.addRequired<AssumptionCacheTracker>(); 121 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 122 AU.addRequired<TargetLibraryInfoWrapperPass>(); 123 getAAResultsAnalysisUsage(AU); 124 CallGraphSCCPass::getAnalysisUsage(AU); 125 } 126 127 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>; 128 129 /// Look at all of the allocas that we inlined through this call site. If we 130 /// have already inlined other allocas through other calls into this function, 131 /// then we know that they have disjoint lifetimes and that we can merge them. 132 /// 133 /// There are many heuristics possible for merging these allocas, and the 134 /// different options have different tradeoffs. One thing that we *really* 135 /// don't want to hurt is SRoA: once inlining happens, often allocas are no 136 /// longer address taken and so they can be promoted. 137 /// 138 /// Our "solution" for that is to only merge allocas whose outermost type is an 139 /// array type. These are usually not promoted because someone is using a 140 /// variable index into them. These are also often the most important ones to 141 /// merge. 142 /// 143 /// A better solution would be to have real memory lifetime markers in the IR 144 /// and not have the inliner do any merging of allocas at all. This would 145 /// allow the backend to do proper stack slot coloring of all allocas that 146 /// *actually make it to the backend*, which is really what we want. 147 /// 148 /// Because we don't have this information, we do this simple and useful hack. 149 static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI, 150 InlinedArrayAllocasTy &InlinedArrayAllocas, 151 int InlineHistory) { 152 SmallPtrSet<AllocaInst *, 16> UsedAllocas; 153 154 // When processing our SCC, check to see if the call site was inlined from 155 // some other call site. For example, if we're processing "A" in this code: 156 // A() { B() } 157 // B() { x = alloca ... C() } 158 // C() { y = alloca ... } 159 // Assume that C was not inlined into B initially, and so we're processing A 160 // and decide to inline B into A. Doing this makes an alloca available for 161 // reuse and makes a callsite (C) available for inlining. When we process 162 // the C call site we don't want to do any alloca merging between X and Y 163 // because their scopes are not disjoint. We could make this smarter by 164 // keeping track of the inline history for each alloca in the 165 // InlinedArrayAllocas but this isn't likely to be a significant win. 166 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC. 167 return; 168 169 // Loop over all the allocas we have so far and see if they can be merged with 170 // a previously inlined alloca. If not, remember that we had it. 171 for (unsigned AllocaNo = 0, E = IFI.StaticAllocas.size(); AllocaNo != E; 172 ++AllocaNo) { 173 AllocaInst *AI = IFI.StaticAllocas[AllocaNo]; 174 175 // Don't bother trying to merge array allocations (they will usually be 176 // canonicalized to be an allocation *of* an array), or allocations whose 177 // type is not itself an array (because we're afraid of pessimizing SRoA). 178 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType()); 179 if (!ATy || AI->isArrayAllocation()) 180 continue; 181 182 // Get the list of all available allocas for this array type. 183 std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy]; 184 185 // Loop over the allocas in AllocasForType to see if we can reuse one. Note 186 // that we have to be careful not to reuse the same "available" alloca for 187 // multiple different allocas that we just inlined, we use the 'UsedAllocas' 188 // set to keep track of which "available" allocas are being used by this 189 // function. Also, AllocasForType can be empty of course! 190 bool MergedAwayAlloca = false; 191 for (AllocaInst *AvailableAlloca : AllocasForType) { 192 unsigned Align1 = AI->getAlignment(), 193 Align2 = AvailableAlloca->getAlignment(); 194 195 // The available alloca has to be in the right function, not in some other 196 // function in this SCC. 197 if (AvailableAlloca->getParent() != AI->getParent()) 198 continue; 199 200 // If the inlined function already uses this alloca then we can't reuse 201 // it. 202 if (!UsedAllocas.insert(AvailableAlloca).second) 203 continue; 204 205 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare 206 // success! 207 LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI 208 << "\n\t\tINTO: " << *AvailableAlloca << '\n'); 209 210 // Move affected dbg.declare calls immediately after the new alloca to 211 // avoid the situation when a dbg.declare precedes its alloca. 212 if (auto *L = LocalAsMetadata::getIfExists(AI)) 213 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) 214 for (User *U : MDV->users()) 215 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) 216 DDI->moveBefore(AvailableAlloca->getNextNode()); 217 218 AI->replaceAllUsesWith(AvailableAlloca); 219 220 if (Align1 != Align2) { 221 if (!Align1 || !Align2) { 222 const DataLayout &DL = Caller->getParent()->getDataLayout(); 223 unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType()); 224 225 Align1 = Align1 ? Align1 : TypeAlign; 226 Align2 = Align2 ? Align2 : TypeAlign; 227 } 228 229 if (Align1 > Align2) 230 AvailableAlloca->setAlignment(MaybeAlign(AI->getAlignment())); 231 } 232 233 AI->eraseFromParent(); 234 MergedAwayAlloca = true; 235 ++NumMergedAllocas; 236 IFI.StaticAllocas[AllocaNo] = nullptr; 237 break; 238 } 239 240 // If we already nuked the alloca, we're done with it. 241 if (MergedAwayAlloca) 242 continue; 243 244 // If we were unable to merge away the alloca either because there are no 245 // allocas of the right type available or because we reused them all 246 // already, remember that this alloca came from an inlined function and mark 247 // it used so we don't reuse it for other allocas from this inline 248 // operation. 249 AllocasForType.push_back(AI); 250 UsedAllocas.insert(AI); 251 } 252 } 253 254 /// If it is possible to inline the specified call site, 255 /// do so and update the CallGraph for this operation. 256 /// 257 /// This function also does some basic book-keeping to update the IR. The 258 /// InlinedArrayAllocas map keeps track of any allocas that are already 259 /// available from other functions inlined into the caller. If we are able to 260 /// inline this call site we attempt to reuse already available allocas or add 261 /// any new allocas to the set if not possible. 262 static InlineResult inlineCallIfPossible( 263 CallBase &CB, InlineFunctionInfo &IFI, 264 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory, 265 bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter, 266 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 267 Function *Callee = CB.getCalledFunction(); 268 Function *Caller = CB.getCaller(); 269 270 AAResults &AAR = AARGetter(*Callee); 271 272 // Try to inline the function. Get the list of static allocas that were 273 // inlined. 274 InlineResult IR = InlineFunction(CB, IFI, &AAR, InsertLifetime); 275 if (!IR.isSuccess()) 276 return IR; 277 278 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 279 ImportedFunctionsStats.recordInline(*Caller, *Callee); 280 281 AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee); 282 283 if (!DisableInlinedAllocaMerging) 284 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory); 285 286 return IR; // success 287 } 288 289 /// Return true if the specified inline history ID 290 /// indicates an inline history that includes the specified function. 291 static bool inlineHistoryIncludes( 292 Function *F, int InlineHistoryID, 293 const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) { 294 while (InlineHistoryID != -1) { 295 assert(unsigned(InlineHistoryID) < InlineHistory.size() && 296 "Invalid inline history ID"); 297 if (InlineHistory[InlineHistoryID].first == F) 298 return true; 299 InlineHistoryID = InlineHistory[InlineHistoryID].second; 300 } 301 return false; 302 } 303 304 bool LegacyInlinerBase::doInitialization(CallGraph &CG) { 305 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 306 ImportedFunctionsStats.setModuleInfo(CG.getModule()); 307 return false; // No changes to CallGraph. 308 } 309 310 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) { 311 if (skipSCC(SCC)) 312 return false; 313 return inlineCalls(SCC); 314 } 315 316 static bool 317 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, 318 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 319 ProfileSummaryInfo *PSI, 320 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 321 bool InsertLifetime, 322 function_ref<InlineCost(CallBase &CB)> GetInlineCost, 323 function_ref<AAResults &(Function &)> AARGetter, 324 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 325 SmallPtrSet<Function *, 8> SCCFunctions; 326 LLVM_DEBUG(dbgs() << "Inliner visiting SCC:"); 327 for (CallGraphNode *Node : SCC) { 328 Function *F = Node->getFunction(); 329 if (F) 330 SCCFunctions.insert(F); 331 LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE")); 332 } 333 334 // Scan through and identify all call sites ahead of time so that we only 335 // inline call sites in the original functions, not call sites that result 336 // from inlining other functions. 337 SmallVector<std::pair<CallBase *, int>, 16> CallSites; 338 339 // When inlining a callee produces new call sites, we want to keep track of 340 // the fact that they were inlined from the callee. This allows us to avoid 341 // infinite inlining in some obscure cases. To represent this, we use an 342 // index into the InlineHistory vector. 343 SmallVector<std::pair<Function *, int>, 8> InlineHistory; 344 345 for (CallGraphNode *Node : SCC) { 346 Function *F = Node->getFunction(); 347 if (!F || F->isDeclaration()) 348 continue; 349 350 OptimizationRemarkEmitter ORE(F); 351 for (BasicBlock &BB : *F) 352 for (Instruction &I : BB) { 353 auto *CB = dyn_cast<CallBase>(&I); 354 // If this isn't a call, or it is a call to an intrinsic, it can 355 // never be inlined. 356 if (!CB || isa<IntrinsicInst>(I)) 357 continue; 358 359 // If this is a direct call to an external function, we can never inline 360 // it. If it is an indirect call, inlining may resolve it to be a 361 // direct call, so we keep it. 362 if (Function *Callee = CB->getCalledFunction()) 363 if (Callee->isDeclaration()) { 364 using namespace ore; 365 366 setInlineRemark(*CB, "unavailable definition"); 367 ORE.emit([&]() { 368 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 369 << NV("Callee", Callee) << " will not be inlined into " 370 << NV("Caller", CB->getCaller()) 371 << " because its definition is unavailable" 372 << setIsVerbose(); 373 }); 374 continue; 375 } 376 377 CallSites.push_back(std::make_pair(CB, -1)); 378 } 379 } 380 381 LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n"); 382 383 // If there are no calls in this function, exit early. 384 if (CallSites.empty()) 385 return false; 386 387 // Now that we have all of the call sites, move the ones to functions in the 388 // current SCC to the end of the list. 389 unsigned FirstCallInSCC = CallSites.size(); 390 for (unsigned I = 0; I < FirstCallInSCC; ++I) 391 if (Function *F = CallSites[I].first->getCalledFunction()) 392 if (SCCFunctions.count(F)) 393 std::swap(CallSites[I--], CallSites[--FirstCallInSCC]); 394 395 InlinedArrayAllocasTy InlinedArrayAllocas; 396 InlineFunctionInfo InlineInfo(&CG, &GetAssumptionCache, PSI); 397 398 // Now that we have all of the call sites, loop over them and inline them if 399 // it looks profitable to do so. 400 bool Changed = false; 401 bool LocalChange; 402 do { 403 LocalChange = false; 404 // Iterate over the outer loop because inlining functions can cause indirect 405 // calls to become direct calls. 406 // CallSites may be modified inside so ranged for loop can not be used. 407 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) { 408 auto &P = CallSites[CSi]; 409 CallBase &CB = *P.first; 410 const int InlineHistoryID = P.second; 411 412 Function *Caller = CB.getCaller(); 413 Function *Callee = CB.getCalledFunction(); 414 415 // We can only inline direct calls to non-declarations. 416 if (!Callee || Callee->isDeclaration()) 417 continue; 418 419 bool IsTriviallyDead = isInstructionTriviallyDead(&CB, &GetTLI(*Caller)); 420 421 if (!IsTriviallyDead) { 422 // If this call site was obtained by inlining another function, verify 423 // that the include path for the function did not include the callee 424 // itself. If so, we'd be recursively inlining the same function, 425 // which would provide the same callsites, which would cause us to 426 // infinitely inline. 427 if (InlineHistoryID != -1 && 428 inlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) { 429 setInlineRemark(CB, "recursive"); 430 continue; 431 } 432 } 433 434 // FIXME for new PM: because of the old PM we currently generate ORE and 435 // in turn BFI on demand. With the new PM, the ORE dependency should 436 // just become a regular analysis dependency. 437 OptimizationRemarkEmitter ORE(Caller); 438 439 auto OIC = shouldInline(CB, GetInlineCost, ORE); 440 // If the policy determines that we should inline this function, 441 // delete the call instead. 442 if (!OIC) 443 continue; 444 445 // If this call site is dead and it is to a readonly function, we should 446 // just delete the call instead of trying to inline it, regardless of 447 // size. This happens because IPSCCP propagates the result out of the 448 // call and then we're left with the dead call. 449 if (IsTriviallyDead) { 450 LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << CB << "\n"); 451 // Update the call graph by deleting the edge from Callee to Caller. 452 setInlineRemark(CB, "trivially dead"); 453 CG[Caller]->removeCallEdgeFor(CB); 454 CB.eraseFromParent(); 455 ++NumCallsDeleted; 456 } else { 457 // Get DebugLoc to report. CB will be invalid after Inliner. 458 DebugLoc DLoc = CB.getDebugLoc(); 459 BasicBlock *Block = CB.getParent(); 460 461 // Attempt to inline the function. 462 using namespace ore; 463 464 InlineResult IR = inlineCallIfPossible( 465 CB, InlineInfo, InlinedArrayAllocas, InlineHistoryID, 466 InsertLifetime, AARGetter, ImportedFunctionsStats); 467 if (!IR.isSuccess()) { 468 setInlineRemark(CB, std::string(IR.getFailureReason()) + "; " + 469 inlineCostStr(*OIC)); 470 ORE.emit([&]() { 471 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, 472 Block) 473 << NV("Callee", Callee) << " will not be inlined into " 474 << NV("Caller", Caller) << ": " 475 << NV("Reason", IR.getFailureReason()); 476 }); 477 continue; 478 } 479 ++NumInlined; 480 481 emitInlinedInto(ORE, DLoc, Block, *Callee, *Caller, *OIC); 482 483 // If inlining this function gave us any new call sites, throw them 484 // onto our worklist to process. They are useful inline candidates. 485 if (!InlineInfo.InlinedCalls.empty()) { 486 // Create a new inline history entry for this, so that we remember 487 // that these new callsites came about due to inlining Callee. 488 int NewHistoryID = InlineHistory.size(); 489 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID)); 490 491 #ifndef NDEBUG 492 // Make sure no dupplicates in the inline candidates. This could 493 // happen when a callsite is simpilfied to reusing the return value 494 // of another callsite during function cloning, thus the other 495 // callsite will be reconsidered here. 496 DenseSet<CallBase *> DbgCallSites; 497 for (auto &II : CallSites) 498 DbgCallSites.insert(II.first); 499 #endif 500 501 for (Value *Ptr : InlineInfo.InlinedCalls) { 502 #ifndef NDEBUG 503 assert(DbgCallSites.count(dyn_cast<CallBase>(Ptr)) == 0); 504 #endif 505 CallSites.push_back( 506 std::make_pair(dyn_cast<CallBase>(Ptr), NewHistoryID)); 507 } 508 } 509 } 510 511 // If we inlined or deleted the last possible call site to the function, 512 // delete the function body now. 513 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() && 514 // TODO: Can remove if in SCC now. 515 !SCCFunctions.count(Callee) && 516 // The function may be apparently dead, but if there are indirect 517 // callgraph references to the node, we cannot delete it yet, this 518 // could invalidate the CGSCC iterator. 519 CG[Callee]->getNumReferences() == 0) { 520 LLVM_DEBUG(dbgs() << " -> Deleting dead function: " 521 << Callee->getName() << "\n"); 522 CallGraphNode *CalleeNode = CG[Callee]; 523 524 // Remove any call graph edges from the callee to its callees. 525 CalleeNode->removeAllCalledFunctions(); 526 527 // Removing the node for callee from the call graph and delete it. 528 delete CG.removeFunctionFromModule(CalleeNode); 529 ++NumDeleted; 530 } 531 532 // Remove this call site from the list. If possible, use 533 // swap/pop_back for efficiency, but do not use it if doing so would 534 // move a call site to a function in this SCC before the 535 // 'FirstCallInSCC' barrier. 536 if (SCC.isSingular()) { 537 CallSites[CSi] = CallSites.back(); 538 CallSites.pop_back(); 539 } else { 540 CallSites.erase(CallSites.begin() + CSi); 541 } 542 --CSi; 543 544 Changed = true; 545 LocalChange = true; 546 } 547 } while (LocalChange); 548 549 return Changed; 550 } 551 552 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) { 553 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 554 ACT = &getAnalysis<AssumptionCacheTracker>(); 555 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 556 GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 557 return getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 558 }; 559 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 560 return ACT->getAssumptionCache(F); 561 }; 562 return inlineCallsImpl( 563 SCC, CG, GetAssumptionCache, PSI, GetTLI, InsertLifetime, 564 [&](CallBase &CB) { return getInlineCost(CB); }, LegacyAARGetter(*this), 565 ImportedFunctionsStats); 566 } 567 568 /// Remove now-dead linkonce functions at the end of 569 /// processing to avoid breaking the SCC traversal. 570 bool LegacyInlinerBase::doFinalization(CallGraph &CG) { 571 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 572 ImportedFunctionsStats.dump(InlinerFunctionImportStats == 573 InlinerFunctionImportStatsOpts::Verbose); 574 return removeDeadFunctions(CG); 575 } 576 577 /// Remove dead functions that are not included in DNR (Do Not Remove) list. 578 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG, 579 bool AlwaysInlineOnly) { 580 SmallVector<CallGraphNode *, 16> FunctionsToRemove; 581 SmallVector<Function *, 16> DeadFunctionsInComdats; 582 583 auto RemoveCGN = [&](CallGraphNode *CGN) { 584 // Remove any call graph edges from the function to its callees. 585 CGN->removeAllCalledFunctions(); 586 587 // Remove any edges from the external node to the function's call graph 588 // node. These edges might have been made irrelegant due to 589 // optimization of the program. 590 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN); 591 592 // Removing the node for callee from the call graph and delete it. 593 FunctionsToRemove.push_back(CGN); 594 }; 595 596 // Scan for all of the functions, looking for ones that should now be removed 597 // from the program. Insert the dead ones in the FunctionsToRemove set. 598 for (const auto &I : CG) { 599 CallGraphNode *CGN = I.second.get(); 600 Function *F = CGN->getFunction(); 601 if (!F || F->isDeclaration()) 602 continue; 603 604 // Handle the case when this function is called and we only want to care 605 // about always-inline functions. This is a bit of a hack to share code 606 // between here and the InlineAlways pass. 607 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline)) 608 continue; 609 610 // If the only remaining users of the function are dead constants, remove 611 // them. 612 F->removeDeadConstantUsers(); 613 614 if (!F->isDefTriviallyDead()) 615 continue; 616 617 // It is unsafe to drop a function with discardable linkage from a COMDAT 618 // without also dropping the other members of the COMDAT. 619 // The inliner doesn't visit non-function entities which are in COMDAT 620 // groups so it is unsafe to do so *unless* the linkage is local. 621 if (!F->hasLocalLinkage()) { 622 if (F->hasComdat()) { 623 DeadFunctionsInComdats.push_back(F); 624 continue; 625 } 626 } 627 628 RemoveCGN(CGN); 629 } 630 if (!DeadFunctionsInComdats.empty()) { 631 // Filter out the functions whose comdats remain alive. 632 filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats); 633 // Remove the rest. 634 for (Function *F : DeadFunctionsInComdats) 635 RemoveCGN(CG[F]); 636 } 637 638 if (FunctionsToRemove.empty()) 639 return false; 640 641 // Now that we know which functions to delete, do so. We didn't want to do 642 // this inline, because that would invalidate our CallGraph::iterator 643 // objects. :( 644 // 645 // Note that it doesn't matter that we are iterating over a non-stable order 646 // here to do this, it doesn't matter which order the functions are deleted 647 // in. 648 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end()); 649 FunctionsToRemove.erase( 650 std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()), 651 FunctionsToRemove.end()); 652 for (CallGraphNode *CGN : FunctionsToRemove) { 653 delete CG.removeFunctionFromModule(CGN); 654 ++NumDeleted; 655 } 656 return true; 657 } 658 659 InlinerPass::~InlinerPass() { 660 if (ImportedFunctionsStats) { 661 assert(InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No); 662 ImportedFunctionsStats->dump(InlinerFunctionImportStats == 663 InlinerFunctionImportStatsOpts::Verbose); 664 } 665 } 666 667 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, 668 CGSCCAnalysisManager &AM, LazyCallGraph &CG, 669 CGSCCUpdateResult &UR) { 670 const ModuleAnalysisManager &MAM = 671 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG).getManager(); 672 bool Changed = false; 673 674 assert(InitialC.size() > 0 && "Cannot handle an empty SCC!"); 675 Module &M = *InitialC.begin()->getFunction().getParent(); 676 ProfileSummaryInfo *PSI = MAM.getCachedResult<ProfileSummaryAnalysis>(M); 677 678 if (!ImportedFunctionsStats && 679 InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) { 680 ImportedFunctionsStats = 681 std::make_unique<ImportedFunctionsInliningStatistics>(); 682 ImportedFunctionsStats->setModuleInfo(M); 683 } 684 685 // We use a single common worklist for calls across the entire SCC. We 686 // process these in-order and append new calls introduced during inlining to 687 // the end. 688 // 689 // Note that this particular order of processing is actually critical to 690 // avoid very bad behaviors. Consider *highly connected* call graphs where 691 // each function contains a small amonut of code and a couple of calls to 692 // other functions. Because the LLVM inliner is fundamentally a bottom-up 693 // inliner, it can handle gracefully the fact that these all appear to be 694 // reasonable inlining candidates as it will flatten things until they become 695 // too big to inline, and then move on and flatten another batch. 696 // 697 // However, when processing call edges *within* an SCC we cannot rely on this 698 // bottom-up behavior. As a consequence, with heavily connected *SCCs* of 699 // functions we can end up incrementally inlining N calls into each of 700 // N functions because each incremental inlining decision looks good and we 701 // don't have a topological ordering to prevent explosions. 702 // 703 // To compensate for this, we don't process transitive edges made immediate 704 // by inlining until we've done one pass of inlining across the entire SCC. 705 // Large, highly connected SCCs still lead to some amount of code bloat in 706 // this model, but it is uniformly spread across all the functions in the SCC 707 // and eventually they all become too large to inline, rather than 708 // incrementally maknig a single function grow in a super linear fashion. 709 SmallVector<std::pair<CallBase *, int>, 16> Calls; 710 711 FunctionAnalysisManager &FAM = 712 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG) 713 .getManager(); 714 715 // Populate the initial list of calls in this SCC. 716 for (auto &N : InitialC) { 717 auto &ORE = 718 FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction()); 719 // We want to generally process call sites top-down in order for 720 // simplifications stemming from replacing the call with the returned value 721 // after inlining to be visible to subsequent inlining decisions. 722 // FIXME: Using instructions sequence is a really bad way to do this. 723 // Instead we should do an actual RPO walk of the function body. 724 for (Instruction &I : instructions(N.getFunction())) 725 if (auto *CB = dyn_cast<CallBase>(&I)) 726 if (Function *Callee = CB->getCalledFunction()) { 727 if (!Callee->isDeclaration()) 728 Calls.push_back({CB, -1}); 729 else if (!isa<IntrinsicInst>(I)) { 730 using namespace ore; 731 setInlineRemark(*CB, "unavailable definition"); 732 ORE.emit([&]() { 733 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 734 << NV("Callee", Callee) << " will not be inlined into " 735 << NV("Caller", CB->getCaller()) 736 << " because its definition is unavailable" 737 << setIsVerbose(); 738 }); 739 } 740 } 741 } 742 if (Calls.empty()) 743 return PreservedAnalyses::all(); 744 745 // Capture updatable variables for the current SCC and RefSCC. 746 auto *C = &InitialC; 747 auto *RC = &C->getOuterRefSCC(); 748 749 // When inlining a callee produces new call sites, we want to keep track of 750 // the fact that they were inlined from the callee. This allows us to avoid 751 // infinite inlining in some obscure cases. To represent this, we use an 752 // index into the InlineHistory vector. 753 SmallVector<std::pair<Function *, int>, 16> InlineHistory; 754 755 // Track a set vector of inlined callees so that we can augment the caller 756 // with all of their edges in the call graph before pruning out the ones that 757 // got simplified away. 758 SmallSetVector<Function *, 4> InlinedCallees; 759 760 // Track the dead functions to delete once finished with inlining calls. We 761 // defer deleting these to make it easier to handle the call graph updates. 762 SmallVector<Function *, 4> DeadFunctions; 763 764 // Loop forward over all of the calls. Note that we cannot cache the size as 765 // inlining can introduce new calls that need to be processed. 766 for (int I = 0; I < (int)Calls.size(); ++I) { 767 // We expect the calls to typically be batched with sequences of calls that 768 // have the same caller, so we first set up some shared infrastructure for 769 // this caller. We also do any pruning we can at this layer on the caller 770 // alone. 771 Function &F = *Calls[I].first->getCaller(); 772 LazyCallGraph::Node &N = *CG.lookup(F); 773 if (CG.lookupSCC(N) != C) 774 continue; 775 if (F.hasOptNone()) { 776 setInlineRemark(*Calls[I].first, "optnone attribute"); 777 continue; 778 } 779 780 LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"); 781 782 // Get a FunctionAnalysisManager via a proxy for this particular node. We 783 // do this each time we visit a node as the SCC may have changed and as 784 // we're going to mutate this particular function we want to make sure the 785 // proxy is in place to forward any invalidation events. We can use the 786 // manager we get here for looking up results for functions other than this 787 // node however because those functions aren't going to be mutated by this 788 // pass. 789 FunctionAnalysisManager &FAM = 790 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, CG).getManager(); 791 792 // Get the remarks emission analysis for the caller. 793 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 794 795 std::function<AssumptionCache &(Function &)> GetAssumptionCache = 796 [&](Function &F) -> AssumptionCache & { 797 return FAM.getResult<AssumptionAnalysis>(F); 798 }; 799 auto GetBFI = [&](Function &F) -> BlockFrequencyInfo & { 800 return FAM.getResult<BlockFrequencyAnalysis>(F); 801 }; 802 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 803 return FAM.getResult<TargetLibraryAnalysis>(F); 804 }; 805 806 auto GetInlineCost = [&](CallBase &CB) { 807 Function &Callee = *CB.getCalledFunction(); 808 auto &CalleeTTI = FAM.getResult<TargetIRAnalysis>(Callee); 809 bool RemarksEnabled = 810 Callee.getContext().getDiagHandlerPtr()->isMissedOptRemarkEnabled( 811 DEBUG_TYPE); 812 return getInlineCost(CB, Params, CalleeTTI, GetAssumptionCache, {GetBFI}, 813 GetTLI, PSI, RemarksEnabled ? &ORE : nullptr); 814 }; 815 816 // Now process as many calls as we have within this caller in the sequnece. 817 // We bail out as soon as the caller has to change so we can update the 818 // call graph and prepare the context of that new caller. 819 bool DidInline = false; 820 for (; I < (int)Calls.size() && Calls[I].first->getCaller() == &F; ++I) { 821 auto &P = Calls[I]; 822 CallBase *CB = P.first; 823 const int InlineHistoryID = P.second; 824 Function &Callee = *CB->getCalledFunction(); 825 826 if (InlineHistoryID != -1 && 827 inlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) { 828 setInlineRemark(*CB, "recursive"); 829 continue; 830 } 831 832 // Check if this inlining may repeat breaking an SCC apart that has 833 // already been split once before. In that case, inlining here may 834 // trigger infinite inlining, much like is prevented within the inliner 835 // itself by the InlineHistory above, but spread across CGSCC iterations 836 // and thus hidden from the full inline history. 837 if (CG.lookupSCC(*CG.lookup(Callee)) == C && 838 UR.InlinedInternalEdges.count({&N, C})) { 839 LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node " 840 "previously split out of this SCC by inlining: " 841 << F.getName() << " -> " << Callee.getName() << "\n"); 842 setInlineRemark(*CB, "recursive SCC split"); 843 continue; 844 } 845 846 auto OIC = shouldInline(*CB, GetInlineCost, ORE); 847 // Check whether we want to inline this callsite. 848 if (!OIC) 849 continue; 850 auto DoInline = [&]() -> InlineResult { 851 // Setup the data structure used to plumb customization into the 852 // `InlineFunction` routine. 853 InlineFunctionInfo IFI( 854 /*cg=*/nullptr, &GetAssumptionCache, PSI, 855 &FAM.getResult<BlockFrequencyAnalysis>(*(CB->getCaller())), 856 &FAM.getResult<BlockFrequencyAnalysis>(Callee)); 857 858 InlineResult IR = InlineFunction(*CB, IFI); 859 if (!IR.isSuccess()) 860 return IR; 861 862 DidInline = true; 863 InlinedCallees.insert(&Callee); 864 ++NumInlined; 865 866 // Add any new callsites to defined functions to the worklist. 867 if (!IFI.InlinedCallSites.empty()) { 868 int NewHistoryID = InlineHistory.size(); 869 InlineHistory.push_back({&Callee, InlineHistoryID}); 870 871 for (CallBase *ICB : reverse(IFI.InlinedCallSites)) { 872 Function *NewCallee = ICB->getCalledFunction(); 873 if (!NewCallee) { 874 // Try to promote an indirect (virtual) call without waiting for 875 // the post-inline cleanup and the next DevirtSCCRepeatedPass 876 // iteration because the next iteration may not happen and we may 877 // miss inlining it. 878 if (tryPromoteCall(*ICB)) 879 NewCallee = ICB->getCalledFunction(); 880 } 881 if (NewCallee) 882 if (!NewCallee->isDeclaration()) 883 Calls.push_back({ICB, NewHistoryID}); 884 } 885 } 886 887 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 888 ImportedFunctionsStats->recordInline(F, Callee); 889 890 // Merge the attributes based on the inlining. 891 AttributeFuncs::mergeAttributesForInlining(F, Callee); 892 893 // For local functions, check whether this makes the callee trivially 894 // dead. In that case, we can drop the body of the function eagerly 895 // which may reduce the number of callers of other functions to one, 896 // changing inline cost thresholds. 897 if (Callee.hasLocalLinkage()) { 898 // To check this we also need to nuke any dead constant uses (perhaps 899 // made dead by this operation on other functions). 900 Callee.removeDeadConstantUsers(); 901 if (Callee.use_empty() && !CG.isLibFunction(Callee)) { 902 Calls.erase( 903 std::remove_if(Calls.begin() + I + 1, Calls.end(), 904 [&](const std::pair<CallBase *, int> &Call) { 905 return Call.first->getCaller() == &Callee; 906 }), 907 Calls.end()); 908 // Clear the body and queue the function itself for deletion when we 909 // finish inlining and call graph updates. 910 // Note that after this point, it is an error to do anything other 911 // than use the callee's address or delete it. 912 Callee.dropAllReferences(); 913 assert(find(DeadFunctions, &Callee) == DeadFunctions.end() && 914 "Cannot put cause a function to become dead twice!"); 915 DeadFunctions.push_back(&Callee); 916 } 917 } 918 return IR; 919 }; 920 // Capture the context of CB before inlining, as a successful inlining may 921 // change that context, and we want to report success or failure in the 922 // original context. 923 auto DLoc = CB->getDebugLoc(); 924 auto *Block = CB->getParent(); 925 926 auto Outcome = DoInline(); 927 if (!Outcome.isSuccess()) { 928 using namespace ore; 929 setInlineRemark(*CB, std::string(Outcome.getFailureReason()) + "; " + 930 inlineCostStr(*OIC)); 931 ORE.emit([&]() { 932 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block) 933 << NV("Callee", &Callee) << " will not be inlined into " 934 << NV("Caller", &F) << ": " 935 << NV("Reason", Outcome.getFailureReason()); 936 }); 937 continue; 938 } 939 940 emitInlinedInto(ORE, DLoc, Block, Callee, F, *OIC); 941 } 942 943 // Back the call index up by one to put us in a good position to go around 944 // the outer loop. 945 --I; 946 947 if (!DidInline) 948 continue; 949 Changed = true; 950 951 // Add all the inlined callees' edges as ref edges to the caller. These are 952 // by definition trivial edges as we always have *some* transitive ref edge 953 // chain. While in some cases these edges are direct calls inside the 954 // callee, they have to be modeled in the inliner as reference edges as 955 // there may be a reference edge anywhere along the chain from the current 956 // caller to the callee that causes the whole thing to appear like 957 // a (transitive) reference edge that will require promotion to a call edge 958 // below. 959 for (Function *InlinedCallee : InlinedCallees) { 960 LazyCallGraph::Node &CalleeN = *CG.lookup(*InlinedCallee); 961 for (LazyCallGraph::Edge &E : *CalleeN) 962 RC->insertTrivialRefEdge(N, E.getNode()); 963 } 964 965 // At this point, since we have made changes we have at least removed 966 // a call instruction. However, in the process we do some incremental 967 // simplification of the surrounding code. This simplification can 968 // essentially do all of the same things as a function pass and we can 969 // re-use the exact same logic for updating the call graph to reflect the 970 // change. 971 LazyCallGraph::SCC *OldC = C; 972 C = &updateCGAndAnalysisManagerForFunctionPass(CG, *C, N, AM, UR); 973 LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n"); 974 RC = &C->getOuterRefSCC(); 975 976 // If this causes an SCC to split apart into multiple smaller SCCs, there 977 // is a subtle risk we need to prepare for. Other transformations may 978 // expose an "infinite inlining" opportunity later, and because of the SCC 979 // mutation, we will revisit this function and potentially re-inline. If we 980 // do, and that re-inlining also has the potentially to mutate the SCC 981 // structure, the infinite inlining problem can manifest through infinite 982 // SCC splits and merges. To avoid this, we capture the originating caller 983 // node and the SCC containing the call edge. This is a slight over 984 // approximation of the possible inlining decisions that must be avoided, 985 // but is relatively efficient to store. We use C != OldC to know when 986 // a new SCC is generated and the original SCC may be generated via merge 987 // in later iterations. 988 // 989 // It is also possible that even if no new SCC is generated 990 // (i.e., C == OldC), the original SCC could be split and then merged 991 // into the same one as itself. and the original SCC will be added into 992 // UR.CWorklist again, we want to catch such cases too. 993 // 994 // FIXME: This seems like a very heavyweight way of retaining the inline 995 // history, we should look for a more efficient way of tracking it. 996 if ((C != OldC || UR.CWorklist.count(OldC)) && 997 llvm::any_of(InlinedCallees, [&](Function *Callee) { 998 return CG.lookupSCC(*CG.lookup(*Callee)) == OldC; 999 })) { 1000 LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, " 1001 "retaining this to avoid infinite inlining.\n"); 1002 UR.InlinedInternalEdges.insert({&N, OldC}); 1003 } 1004 InlinedCallees.clear(); 1005 } 1006 1007 // Now that we've finished inlining all of the calls across this SCC, delete 1008 // all of the trivially dead functions, updating the call graph and the CGSCC 1009 // pass manager in the process. 1010 // 1011 // Note that this walks a pointer set which has non-deterministic order but 1012 // that is OK as all we do is delete things and add pointers to unordered 1013 // sets. 1014 for (Function *DeadF : DeadFunctions) { 1015 // Get the necessary information out of the call graph and nuke the 1016 // function there. Also, cclear out any cached analyses. 1017 auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF)); 1018 FunctionAnalysisManager &FAM = 1019 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(DeadC, CG).getManager(); 1020 FAM.clear(*DeadF, DeadF->getName()); 1021 AM.clear(DeadC, DeadC.getName()); 1022 auto &DeadRC = DeadC.getOuterRefSCC(); 1023 CG.removeDeadFunction(*DeadF); 1024 1025 // Mark the relevant parts of the call graph as invalid so we don't visit 1026 // them. 1027 UR.InvalidatedSCCs.insert(&DeadC); 1028 UR.InvalidatedRefSCCs.insert(&DeadRC); 1029 1030 // And delete the actual function from the module. 1031 M.getFunctionList().erase(DeadF); 1032 ++NumDeleted; 1033 } 1034 1035 if (!Changed) 1036 return PreservedAnalyses::all(); 1037 1038 // Even if we change the IR, we update the core CGSCC data structures and so 1039 // can preserve the proxy to the function analysis manager. 1040 PreservedAnalyses PA; 1041 PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); 1042 return PA; 1043 } 1044