1 //===- Inliner.cpp - Code common to all inliners --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the mechanics required to implement inlining without 10 // missing any calls and updating the call graph. The decisions of which calls 11 // are profitable to inline are implemented elsewhere. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/Inliner.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/None.h" 18 #include "llvm/ADT/Optional.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/StringRef.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Analysis/AssumptionCache.h" 27 #include "llvm/Analysis/BasicAliasAnalysis.h" 28 #include "llvm/Analysis/BlockFrequencyInfo.h" 29 #include "llvm/Analysis/CGSCCPassManager.h" 30 #include "llvm/Analysis/CallGraph.h" 31 #include "llvm/Analysis/InlineCost.h" 32 #include "llvm/Analysis/LazyCallGraph.h" 33 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 34 #include "llvm/Analysis/ProfileSummaryInfo.h" 35 #include "llvm/Analysis/TargetLibraryInfo.h" 36 #include "llvm/Analysis/TargetTransformInfo.h" 37 #include "llvm/Transforms/Utils/Local.h" 38 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 39 #include "llvm/IR/Attributes.h" 40 #include "llvm/IR/BasicBlock.h" 41 #include "llvm/IR/CallSite.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugLoc.h" 44 #include "llvm/IR/DerivedTypes.h" 45 #include "llvm/IR/DiagnosticInfo.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/InstIterator.h" 48 #include "llvm/IR/Instruction.h" 49 #include "llvm/IR/Instructions.h" 50 #include "llvm/IR/IntrinsicInst.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Module.h" 53 #include "llvm/IR/PassManager.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Transforms/Utils/Cloning.h" 62 #include "llvm/Transforms/Utils/ImportedFunctionsInliningStatistics.h" 63 #include "llvm/Transforms/Utils/ModuleUtils.h" 64 #include <algorithm> 65 #include <cassert> 66 #include <functional> 67 #include <sstream> 68 #include <tuple> 69 #include <utility> 70 #include <vector> 71 72 using namespace llvm; 73 74 #define DEBUG_TYPE "inline" 75 76 STATISTIC(NumInlined, "Number of functions inlined"); 77 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined"); 78 STATISTIC(NumDeleted, "Number of functions deleted because all callers found"); 79 STATISTIC(NumMergedAllocas, "Number of allocas merged together"); 80 81 // This weirdly named statistic tracks the number of times that, when attempting 82 // to inline a function A into B, we analyze the callers of B in order to see 83 // if those would be more profitable and blocked inline steps. 84 STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed"); 85 86 /// Flag to disable manual alloca merging. 87 /// 88 /// Merging of allocas was originally done as a stack-size saving technique 89 /// prior to LLVM's code generator having support for stack coloring based on 90 /// lifetime markers. It is now in the process of being removed. To experiment 91 /// with disabling it and relying fully on lifetime marker based stack 92 /// coloring, you can pass this flag to LLVM. 93 static cl::opt<bool> 94 DisableInlinedAllocaMerging("disable-inlined-alloca-merging", 95 cl::init(false), cl::Hidden); 96 97 namespace { 98 99 enum class InlinerFunctionImportStatsOpts { 100 No = 0, 101 Basic = 1, 102 Verbose = 2, 103 }; 104 105 } // end anonymous namespace 106 107 static cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats( 108 "inliner-function-import-stats", 109 cl::init(InlinerFunctionImportStatsOpts::No), 110 cl::values(clEnumValN(InlinerFunctionImportStatsOpts::Basic, "basic", 111 "basic statistics"), 112 clEnumValN(InlinerFunctionImportStatsOpts::Verbose, "verbose", 113 "printing of statistics for each inlined function")), 114 cl::Hidden, cl::desc("Enable inliner stats for imported functions")); 115 116 /// Flag to add inline messages as callsite attributes 'inline-remark'. 117 static cl::opt<bool> 118 InlineRemarkAttribute("inline-remark-attribute", cl::init(false), 119 cl::Hidden, 120 cl::desc("Enable adding inline-remark attribute to" 121 " callsites processed by inliner but decided" 122 " to be not inlined")); 123 124 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {} 125 126 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime) 127 : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {} 128 129 /// For this class, we declare that we require and preserve the call graph. 130 /// If the derived class implements this method, it should 131 /// always explicitly call the implementation here. 132 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const { 133 AU.addRequired<AssumptionCacheTracker>(); 134 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 135 AU.addRequired<TargetLibraryInfoWrapperPass>(); 136 getAAResultsAnalysisUsage(AU); 137 CallGraphSCCPass::getAnalysisUsage(AU); 138 } 139 140 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>; 141 142 /// Look at all of the allocas that we inlined through this call site. If we 143 /// have already inlined other allocas through other calls into this function, 144 /// then we know that they have disjoint lifetimes and that we can merge them. 145 /// 146 /// There are many heuristics possible for merging these allocas, and the 147 /// different options have different tradeoffs. One thing that we *really* 148 /// don't want to hurt is SRoA: once inlining happens, often allocas are no 149 /// longer address taken and so they can be promoted. 150 /// 151 /// Our "solution" for that is to only merge allocas whose outermost type is an 152 /// array type. These are usually not promoted because someone is using a 153 /// variable index into them. These are also often the most important ones to 154 /// merge. 155 /// 156 /// A better solution would be to have real memory lifetime markers in the IR 157 /// and not have the inliner do any merging of allocas at all. This would 158 /// allow the backend to do proper stack slot coloring of all allocas that 159 /// *actually make it to the backend*, which is really what we want. 160 /// 161 /// Because we don't have this information, we do this simple and useful hack. 162 static void mergeInlinedArrayAllocas( 163 Function *Caller, InlineFunctionInfo &IFI, 164 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory) { 165 SmallPtrSet<AllocaInst *, 16> UsedAllocas; 166 167 // When processing our SCC, check to see if CS was inlined from some other 168 // call site. For example, if we're processing "A" in this code: 169 // A() { B() } 170 // B() { x = alloca ... C() } 171 // C() { y = alloca ... } 172 // Assume that C was not inlined into B initially, and so we're processing A 173 // and decide to inline B into A. Doing this makes an alloca available for 174 // reuse and makes a callsite (C) available for inlining. When we process 175 // the C call site we don't want to do any alloca merging between X and Y 176 // because their scopes are not disjoint. We could make this smarter by 177 // keeping track of the inline history for each alloca in the 178 // InlinedArrayAllocas but this isn't likely to be a significant win. 179 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC. 180 return; 181 182 // Loop over all the allocas we have so far and see if they can be merged with 183 // a previously inlined alloca. If not, remember that we had it. 184 for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size(); AllocaNo != e; 185 ++AllocaNo) { 186 AllocaInst *AI = IFI.StaticAllocas[AllocaNo]; 187 188 // Don't bother trying to merge array allocations (they will usually be 189 // canonicalized to be an allocation *of* an array), or allocations whose 190 // type is not itself an array (because we're afraid of pessimizing SRoA). 191 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType()); 192 if (!ATy || AI->isArrayAllocation()) 193 continue; 194 195 // Get the list of all available allocas for this array type. 196 std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy]; 197 198 // Loop over the allocas in AllocasForType to see if we can reuse one. Note 199 // that we have to be careful not to reuse the same "available" alloca for 200 // multiple different allocas that we just inlined, we use the 'UsedAllocas' 201 // set to keep track of which "available" allocas are being used by this 202 // function. Also, AllocasForType can be empty of course! 203 bool MergedAwayAlloca = false; 204 for (AllocaInst *AvailableAlloca : AllocasForType) { 205 unsigned Align1 = AI->getAlignment(), 206 Align2 = AvailableAlloca->getAlignment(); 207 208 // The available alloca has to be in the right function, not in some other 209 // function in this SCC. 210 if (AvailableAlloca->getParent() != AI->getParent()) 211 continue; 212 213 // If the inlined function already uses this alloca then we can't reuse 214 // it. 215 if (!UsedAllocas.insert(AvailableAlloca).second) 216 continue; 217 218 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare 219 // success! 220 LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI 221 << "\n\t\tINTO: " << *AvailableAlloca << '\n'); 222 223 // Move affected dbg.declare calls immediately after the new alloca to 224 // avoid the situation when a dbg.declare precedes its alloca. 225 if (auto *L = LocalAsMetadata::getIfExists(AI)) 226 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) 227 for (User *U : MDV->users()) 228 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) 229 DDI->moveBefore(AvailableAlloca->getNextNode()); 230 231 AI->replaceAllUsesWith(AvailableAlloca); 232 233 if (Align1 != Align2) { 234 if (!Align1 || !Align2) { 235 const DataLayout &DL = Caller->getParent()->getDataLayout(); 236 unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType()); 237 238 Align1 = Align1 ? Align1 : TypeAlign; 239 Align2 = Align2 ? Align2 : TypeAlign; 240 } 241 242 if (Align1 > Align2) 243 AvailableAlloca->setAlignment(MaybeAlign(AI->getAlignment())); 244 } 245 246 AI->eraseFromParent(); 247 MergedAwayAlloca = true; 248 ++NumMergedAllocas; 249 IFI.StaticAllocas[AllocaNo] = nullptr; 250 break; 251 } 252 253 // If we already nuked the alloca, we're done with it. 254 if (MergedAwayAlloca) 255 continue; 256 257 // If we were unable to merge away the alloca either because there are no 258 // allocas of the right type available or because we reused them all 259 // already, remember that this alloca came from an inlined function and mark 260 // it used so we don't reuse it for other allocas from this inline 261 // operation. 262 AllocasForType.push_back(AI); 263 UsedAllocas.insert(AI); 264 } 265 } 266 267 /// If it is possible to inline the specified call site, 268 /// do so and update the CallGraph for this operation. 269 /// 270 /// This function also does some basic book-keeping to update the IR. The 271 /// InlinedArrayAllocas map keeps track of any allocas that are already 272 /// available from other functions inlined into the caller. If we are able to 273 /// inline this call site we attempt to reuse already available allocas or add 274 /// any new allocas to the set if not possible. 275 static InlineResult InlineCallIfPossible( 276 CallSite CS, InlineFunctionInfo &IFI, 277 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory, 278 bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter, 279 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 280 Function *Callee = CS.getCalledFunction(); 281 Function *Caller = CS.getCaller(); 282 283 AAResults &AAR = AARGetter(*Callee); 284 285 // Try to inline the function. Get the list of static allocas that were 286 // inlined. 287 InlineResult IR = InlineFunction(CS, IFI, &AAR, InsertLifetime); 288 if (!IR.isSuccess()) 289 return IR; 290 291 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 292 ImportedFunctionsStats.recordInline(*Caller, *Callee); 293 294 AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee); 295 296 if (!DisableInlinedAllocaMerging) 297 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory); 298 299 return IR; // success 300 } 301 302 /// Return true if inlining of CS can block the caller from being 303 /// inlined which is proved to be more beneficial. \p IC is the 304 /// estimated inline cost associated with callsite \p CS. 305 /// \p TotalSecondaryCost will be set to the estimated cost of inlining the 306 /// caller if \p CS is suppressed for inlining. 307 static bool 308 shouldBeDeferred(Function *Caller, CallSite CS, InlineCost IC, 309 int &TotalSecondaryCost, 310 function_ref<InlineCost(CallSite CS)> GetInlineCost) { 311 // For now we only handle local or inline functions. 312 if (!Caller->hasLocalLinkage() && !Caller->hasLinkOnceODRLinkage()) 313 return false; 314 // If the cost of inlining CS is non-positive, it is not going to prevent the 315 // caller from being inlined into its callers and hence we don't need to 316 // defer. 317 if (IC.getCost() <= 0) 318 return false; 319 // Try to detect the case where the current inlining candidate caller (call 320 // it B) is a static or linkonce-ODR function and is an inlining candidate 321 // elsewhere, and the current candidate callee (call it C) is large enough 322 // that inlining it into B would make B too big to inline later. In these 323 // circumstances it may be best not to inline C into B, but to inline B into 324 // its callers. 325 // 326 // This only applies to static and linkonce-ODR functions because those are 327 // expected to be available for inlining in the translation units where they 328 // are used. Thus we will always have the opportunity to make local inlining 329 // decisions. Importantly the linkonce-ODR linkage covers inline functions 330 // and templates in C++. 331 // 332 // FIXME: All of this logic should be sunk into getInlineCost. It relies on 333 // the internal implementation of the inline cost metrics rather than 334 // treating them as truly abstract units etc. 335 TotalSecondaryCost = 0; 336 // The candidate cost to be imposed upon the current function. 337 int CandidateCost = IC.getCost() - 1; 338 // If the caller has local linkage and can be inlined to all its callers, we 339 // can apply a huge negative bonus to TotalSecondaryCost. 340 bool ApplyLastCallBonus = Caller->hasLocalLinkage() && !Caller->hasOneUse(); 341 // This bool tracks what happens if we DO inline C into B. 342 bool inliningPreventsSomeOuterInline = false; 343 for (User *U : Caller->users()) { 344 // If the caller will not be removed (either because it does not have a 345 // local linkage or because the LastCallToStaticBonus has been already 346 // applied), then we can exit the loop early. 347 if (!ApplyLastCallBonus && TotalSecondaryCost >= IC.getCost()) 348 return false; 349 CallSite CS2(U); 350 351 // If this isn't a call to Caller (it could be some other sort 352 // of reference) skip it. Such references will prevent the caller 353 // from being removed. 354 if (!CS2 || CS2.getCalledFunction() != Caller) { 355 ApplyLastCallBonus = false; 356 continue; 357 } 358 359 InlineCost IC2 = GetInlineCost(CS2); 360 ++NumCallerCallersAnalyzed; 361 if (!IC2) { 362 ApplyLastCallBonus = false; 363 continue; 364 } 365 if (IC2.isAlways()) 366 continue; 367 368 // See if inlining of the original callsite would erase the cost delta of 369 // this callsite. We subtract off the penalty for the call instruction, 370 // which we would be deleting. 371 if (IC2.getCostDelta() <= CandidateCost) { 372 inliningPreventsSomeOuterInline = true; 373 TotalSecondaryCost += IC2.getCost(); 374 } 375 } 376 // If all outer calls to Caller would get inlined, the cost for the last 377 // one is set very low by getInlineCost, in anticipation that Caller will 378 // be removed entirely. We did not account for this above unless there 379 // is only one caller of Caller. 380 if (ApplyLastCallBonus) 381 TotalSecondaryCost -= InlineConstants::LastCallToStaticBonus; 382 383 if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) 384 return true; 385 386 return false; 387 } 388 389 static std::basic_ostream<char> &operator<<(std::basic_ostream<char> &R, 390 const ore::NV &Arg) { 391 return R << Arg.Val; 392 } 393 394 template <class RemarkT> 395 RemarkT &operator<<(RemarkT &&R, const InlineCost &IC) { 396 using namespace ore; 397 if (IC.isAlways()) { 398 R << "(cost=always)"; 399 } else if (IC.isNever()) { 400 R << "(cost=never)"; 401 } else { 402 R << "(cost=" << ore::NV("Cost", IC.getCost()) 403 << ", threshold=" << ore::NV("Threshold", IC.getThreshold()) << ")"; 404 } 405 if (const char *Reason = IC.getReason()) 406 R << ": " << ore::NV("Reason", Reason); 407 return R; 408 } 409 410 static std::string inlineCostStr(const InlineCost &IC) { 411 std::stringstream Remark; 412 Remark << IC; 413 return Remark.str(); 414 } 415 416 /// Return the cost only if the inliner should attempt to inline at the given 417 /// CallSite. If we return the cost, we will emit an optimisation remark later 418 /// using that cost, so we won't do so from this function. 419 static Optional<InlineCost> 420 shouldInline(CallSite CS, function_ref<InlineCost(CallSite CS)> GetInlineCost, 421 OptimizationRemarkEmitter &ORE) { 422 using namespace ore; 423 424 InlineCost IC = GetInlineCost(CS); 425 Instruction *Call = CS.getInstruction(); 426 Function *Callee = CS.getCalledFunction(); 427 Function *Caller = CS.getCaller(); 428 429 if (IC.isAlways()) { 430 LLVM_DEBUG(dbgs() << " Inlining " << inlineCostStr(IC) 431 << ", Call: " << *CS.getInstruction() << "\n"); 432 return IC; 433 } 434 435 if (IC.isNever()) { 436 LLVM_DEBUG(dbgs() << " NOT Inlining " << inlineCostStr(IC) 437 << ", Call: " << *CS.getInstruction() << "\n"); 438 ORE.emit([&]() { 439 return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline", Call) 440 << NV("Callee", Callee) << " not inlined into " 441 << NV("Caller", Caller) << " because it should never be inlined " 442 << IC; 443 }); 444 return IC; 445 } 446 447 if (!IC) { 448 LLVM_DEBUG(dbgs() << " NOT Inlining " << inlineCostStr(IC) 449 << ", Call: " << *CS.getInstruction() << "\n"); 450 ORE.emit([&]() { 451 return OptimizationRemarkMissed(DEBUG_TYPE, "TooCostly", Call) 452 << NV("Callee", Callee) << " not inlined into " 453 << NV("Caller", Caller) << " because too costly to inline " << IC; 454 }); 455 return IC; 456 } 457 458 int TotalSecondaryCost = 0; 459 if (shouldBeDeferred(Caller, CS, IC, TotalSecondaryCost, GetInlineCost)) { 460 LLVM_DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() 461 << " Cost = " << IC.getCost() 462 << ", outer Cost = " << TotalSecondaryCost << '\n'); 463 ORE.emit([&]() { 464 return OptimizationRemarkMissed(DEBUG_TYPE, "IncreaseCostInOtherContexts", 465 Call) 466 << "Not inlining. Cost of inlining " << NV("Callee", Callee) 467 << " increases the cost of inlining " << NV("Caller", Caller) 468 << " in other contexts"; 469 }); 470 471 // IC does not bool() to false, so get an InlineCost that will. 472 // This will not be inspected to make an error message. 473 return None; 474 } 475 476 LLVM_DEBUG(dbgs() << " Inlining " << inlineCostStr(IC) 477 << ", Call: " << *CS.getInstruction() << '\n'); 478 return IC; 479 } 480 481 /// Return true if the specified inline history ID 482 /// indicates an inline history that includes the specified function. 483 static bool InlineHistoryIncludes( 484 Function *F, int InlineHistoryID, 485 const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) { 486 while (InlineHistoryID != -1) { 487 assert(unsigned(InlineHistoryID) < InlineHistory.size() && 488 "Invalid inline history ID"); 489 if (InlineHistory[InlineHistoryID].first == F) 490 return true; 491 InlineHistoryID = InlineHistory[InlineHistoryID].second; 492 } 493 return false; 494 } 495 496 bool LegacyInlinerBase::doInitialization(CallGraph &CG) { 497 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 498 ImportedFunctionsStats.setModuleInfo(CG.getModule()); 499 return false; // No changes to CallGraph. 500 } 501 502 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) { 503 if (skipSCC(SCC)) 504 return false; 505 return inlineCalls(SCC); 506 } 507 508 static void emit_inlined_into(OptimizationRemarkEmitter &ORE, DebugLoc &DLoc, 509 const BasicBlock *Block, const Function &Callee, 510 const Function &Caller, const InlineCost &IC) { 511 ORE.emit([&]() { 512 bool AlwaysInline = IC.isAlways(); 513 StringRef RemarkName = AlwaysInline ? "AlwaysInline" : "Inlined"; 514 return OptimizationRemark(DEBUG_TYPE, RemarkName, DLoc, Block) 515 << ore::NV("Callee", &Callee) << " inlined into " 516 << ore::NV("Caller", &Caller) << " with " << IC; 517 }); 518 } 519 520 static void setInlineRemark(CallSite &CS, StringRef message) { 521 if (!InlineRemarkAttribute) 522 return; 523 524 Attribute attr = Attribute::get(CS->getContext(), "inline-remark", message); 525 CS.addAttribute(AttributeList::FunctionIndex, attr); 526 } 527 528 static bool 529 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, 530 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 531 ProfileSummaryInfo *PSI, 532 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 533 bool InsertLifetime, 534 function_ref<InlineCost(CallSite CS)> GetInlineCost, 535 function_ref<AAResults &(Function &)> AARGetter, 536 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 537 SmallPtrSet<Function *, 8> SCCFunctions; 538 LLVM_DEBUG(dbgs() << "Inliner visiting SCC:"); 539 for (CallGraphNode *Node : SCC) { 540 Function *F = Node->getFunction(); 541 if (F) 542 SCCFunctions.insert(F); 543 LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE")); 544 } 545 546 // Scan through and identify all call sites ahead of time so that we only 547 // inline call sites in the original functions, not call sites that result 548 // from inlining other functions. 549 SmallVector<std::pair<CallSite, int>, 16> CallSites; 550 551 // When inlining a callee produces new call sites, we want to keep track of 552 // the fact that they were inlined from the callee. This allows us to avoid 553 // infinite inlining in some obscure cases. To represent this, we use an 554 // index into the InlineHistory vector. 555 SmallVector<std::pair<Function *, int>, 8> InlineHistory; 556 557 for (CallGraphNode *Node : SCC) { 558 Function *F = Node->getFunction(); 559 if (!F || F->isDeclaration()) 560 continue; 561 562 OptimizationRemarkEmitter ORE(F); 563 for (BasicBlock &BB : *F) 564 for (Instruction &I : BB) { 565 CallSite CS(cast<Value>(&I)); 566 // If this isn't a call, or it is a call to an intrinsic, it can 567 // never be inlined. 568 if (!CS || isa<IntrinsicInst>(I)) 569 continue; 570 571 // If this is a direct call to an external function, we can never inline 572 // it. If it is an indirect call, inlining may resolve it to be a 573 // direct call, so we keep it. 574 if (Function *Callee = CS.getCalledFunction()) 575 if (Callee->isDeclaration()) { 576 using namespace ore; 577 578 setInlineRemark(CS, "unavailable definition"); 579 ORE.emit([&]() { 580 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 581 << NV("Callee", Callee) << " will not be inlined into " 582 << NV("Caller", CS.getCaller()) 583 << " because its definition is unavailable" 584 << setIsVerbose(); 585 }); 586 continue; 587 } 588 589 CallSites.push_back(std::make_pair(CS, -1)); 590 } 591 } 592 593 LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n"); 594 595 // If there are no calls in this function, exit early. 596 if (CallSites.empty()) 597 return false; 598 599 // Now that we have all of the call sites, move the ones to functions in the 600 // current SCC to the end of the list. 601 unsigned FirstCallInSCC = CallSites.size(); 602 for (unsigned i = 0; i < FirstCallInSCC; ++i) 603 if (Function *F = CallSites[i].first.getCalledFunction()) 604 if (SCCFunctions.count(F)) 605 std::swap(CallSites[i--], CallSites[--FirstCallInSCC]); 606 607 InlinedArrayAllocasTy InlinedArrayAllocas; 608 InlineFunctionInfo InlineInfo(&CG, &GetAssumptionCache, PSI); 609 610 // Now that we have all of the call sites, loop over them and inline them if 611 // it looks profitable to do so. 612 bool Changed = false; 613 bool LocalChange; 614 do { 615 LocalChange = false; 616 // Iterate over the outer loop because inlining functions can cause indirect 617 // calls to become direct calls. 618 // CallSites may be modified inside so ranged for loop can not be used. 619 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) { 620 CallSite CS = CallSites[CSi].first; 621 622 Function *Caller = CS.getCaller(); 623 Function *Callee = CS.getCalledFunction(); 624 625 // We can only inline direct calls to non-declarations. 626 if (!Callee || Callee->isDeclaration()) 627 continue; 628 629 Instruction *Instr = CS.getInstruction(); 630 631 bool IsTriviallyDead = 632 isInstructionTriviallyDead(Instr, &GetTLI(*Caller)); 633 634 int InlineHistoryID; 635 if (!IsTriviallyDead) { 636 // If this call site was obtained by inlining another function, verify 637 // that the include path for the function did not include the callee 638 // itself. If so, we'd be recursively inlining the same function, 639 // which would provide the same callsites, which would cause us to 640 // infinitely inline. 641 InlineHistoryID = CallSites[CSi].second; 642 if (InlineHistoryID != -1 && 643 InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) { 644 setInlineRemark(CS, "recursive"); 645 continue; 646 } 647 } 648 649 // FIXME for new PM: because of the old PM we currently generate ORE and 650 // in turn BFI on demand. With the new PM, the ORE dependency should 651 // just become a regular analysis dependency. 652 OptimizationRemarkEmitter ORE(Caller); 653 654 Optional<InlineCost> OIC = shouldInline(CS, GetInlineCost, ORE); 655 // If the policy determines that we should inline this function, 656 // delete the call instead. 657 if (!OIC.hasValue()) { 658 setInlineRemark(CS, "deferred"); 659 continue; 660 } 661 662 if (!OIC.getValue()) { 663 // shouldInline() call returned a negative inline cost that explains 664 // why this callsite should not be inlined. 665 setInlineRemark(CS, inlineCostStr(*OIC)); 666 continue; 667 } 668 669 // If this call site is dead and it is to a readonly function, we should 670 // just delete the call instead of trying to inline it, regardless of 671 // size. This happens because IPSCCP propagates the result out of the 672 // call and then we're left with the dead call. 673 if (IsTriviallyDead) { 674 LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << *Instr << "\n"); 675 // Update the call graph by deleting the edge from Callee to Caller. 676 setInlineRemark(CS, "trivially dead"); 677 CG[Caller]->removeCallEdgeFor(*cast<CallBase>(CS.getInstruction())); 678 Instr->eraseFromParent(); 679 ++NumCallsDeleted; 680 } else { 681 // Get DebugLoc to report. CS will be invalid after Inliner. 682 DebugLoc DLoc = CS->getDebugLoc(); 683 BasicBlock *Block = CS.getParent(); 684 685 // Attempt to inline the function. 686 using namespace ore; 687 688 InlineResult IR = InlineCallIfPossible( 689 CS, InlineInfo, InlinedArrayAllocas, InlineHistoryID, 690 InsertLifetime, AARGetter, ImportedFunctionsStats); 691 if (!IR.isSuccess()) { 692 setInlineRemark(CS, std::string(IR.getFailureReason()) + "; " + 693 inlineCostStr(*OIC)); 694 ORE.emit([&]() { 695 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, 696 Block) 697 << NV("Callee", Callee) << " will not be inlined into " 698 << NV("Caller", Caller) << ": " 699 << NV("Reason", IR.getFailureReason()); 700 }); 701 continue; 702 } 703 ++NumInlined; 704 705 emit_inlined_into(ORE, DLoc, Block, *Callee, *Caller, *OIC); 706 707 // If inlining this function gave us any new call sites, throw them 708 // onto our worklist to process. They are useful inline candidates. 709 if (!InlineInfo.InlinedCalls.empty()) { 710 // Create a new inline history entry for this, so that we remember 711 // that these new callsites came about due to inlining Callee. 712 int NewHistoryID = InlineHistory.size(); 713 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID)); 714 715 for (Value *Ptr : InlineInfo.InlinedCalls) 716 CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID)); 717 } 718 } 719 720 // If we inlined or deleted the last possible call site to the function, 721 // delete the function body now. 722 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() && 723 // TODO: Can remove if in SCC now. 724 !SCCFunctions.count(Callee) && 725 // The function may be apparently dead, but if there are indirect 726 // callgraph references to the node, we cannot delete it yet, this 727 // could invalidate the CGSCC iterator. 728 CG[Callee]->getNumReferences() == 0) { 729 LLVM_DEBUG(dbgs() << " -> Deleting dead function: " 730 << Callee->getName() << "\n"); 731 CallGraphNode *CalleeNode = CG[Callee]; 732 733 // Remove any call graph edges from the callee to its callees. 734 CalleeNode->removeAllCalledFunctions(); 735 736 // Removing the node for callee from the call graph and delete it. 737 delete CG.removeFunctionFromModule(CalleeNode); 738 ++NumDeleted; 739 } 740 741 // Remove this call site from the list. If possible, use 742 // swap/pop_back for efficiency, but do not use it if doing so would 743 // move a call site to a function in this SCC before the 744 // 'FirstCallInSCC' barrier. 745 if (SCC.isSingular()) { 746 CallSites[CSi] = CallSites.back(); 747 CallSites.pop_back(); 748 } else { 749 CallSites.erase(CallSites.begin() + CSi); 750 } 751 --CSi; 752 753 Changed = true; 754 LocalChange = true; 755 } 756 } while (LocalChange); 757 758 return Changed; 759 } 760 761 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) { 762 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 763 ACT = &getAnalysis<AssumptionCacheTracker>(); 764 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 765 GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 766 return getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 767 }; 768 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 769 return ACT->getAssumptionCache(F); 770 }; 771 return inlineCallsImpl( 772 SCC, CG, GetAssumptionCache, PSI, GetTLI, InsertLifetime, 773 [this](CallSite CS) { return getInlineCost(CS); }, LegacyAARGetter(*this), 774 ImportedFunctionsStats); 775 } 776 777 /// Remove now-dead linkonce functions at the end of 778 /// processing to avoid breaking the SCC traversal. 779 bool LegacyInlinerBase::doFinalization(CallGraph &CG) { 780 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 781 ImportedFunctionsStats.dump(InlinerFunctionImportStats == 782 InlinerFunctionImportStatsOpts::Verbose); 783 return removeDeadFunctions(CG); 784 } 785 786 /// Remove dead functions that are not included in DNR (Do Not Remove) list. 787 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG, 788 bool AlwaysInlineOnly) { 789 SmallVector<CallGraphNode *, 16> FunctionsToRemove; 790 SmallVector<Function *, 16> DeadFunctionsInComdats; 791 792 auto RemoveCGN = [&](CallGraphNode *CGN) { 793 // Remove any call graph edges from the function to its callees. 794 CGN->removeAllCalledFunctions(); 795 796 // Remove any edges from the external node to the function's call graph 797 // node. These edges might have been made irrelegant due to 798 // optimization of the program. 799 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN); 800 801 // Removing the node for callee from the call graph and delete it. 802 FunctionsToRemove.push_back(CGN); 803 }; 804 805 // Scan for all of the functions, looking for ones that should now be removed 806 // from the program. Insert the dead ones in the FunctionsToRemove set. 807 for (const auto &I : CG) { 808 CallGraphNode *CGN = I.second.get(); 809 Function *F = CGN->getFunction(); 810 if (!F || F->isDeclaration()) 811 continue; 812 813 // Handle the case when this function is called and we only want to care 814 // about always-inline functions. This is a bit of a hack to share code 815 // between here and the InlineAlways pass. 816 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline)) 817 continue; 818 819 // If the only remaining users of the function are dead constants, remove 820 // them. 821 F->removeDeadConstantUsers(); 822 823 if (!F->isDefTriviallyDead()) 824 continue; 825 826 // It is unsafe to drop a function with discardable linkage from a COMDAT 827 // without also dropping the other members of the COMDAT. 828 // The inliner doesn't visit non-function entities which are in COMDAT 829 // groups so it is unsafe to do so *unless* the linkage is local. 830 if (!F->hasLocalLinkage()) { 831 if (F->hasComdat()) { 832 DeadFunctionsInComdats.push_back(F); 833 continue; 834 } 835 } 836 837 RemoveCGN(CGN); 838 } 839 if (!DeadFunctionsInComdats.empty()) { 840 // Filter out the functions whose comdats remain alive. 841 filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats); 842 // Remove the rest. 843 for (Function *F : DeadFunctionsInComdats) 844 RemoveCGN(CG[F]); 845 } 846 847 if (FunctionsToRemove.empty()) 848 return false; 849 850 // Now that we know which functions to delete, do so. We didn't want to do 851 // this inline, because that would invalidate our CallGraph::iterator 852 // objects. :( 853 // 854 // Note that it doesn't matter that we are iterating over a non-stable order 855 // here to do this, it doesn't matter which order the functions are deleted 856 // in. 857 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end()); 858 FunctionsToRemove.erase( 859 std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()), 860 FunctionsToRemove.end()); 861 for (CallGraphNode *CGN : FunctionsToRemove) { 862 delete CG.removeFunctionFromModule(CGN); 863 ++NumDeleted; 864 } 865 return true; 866 } 867 868 InlinerPass::~InlinerPass() { 869 if (ImportedFunctionsStats) { 870 assert(InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No); 871 ImportedFunctionsStats->dump(InlinerFunctionImportStats == 872 InlinerFunctionImportStatsOpts::Verbose); 873 } 874 } 875 876 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, 877 CGSCCAnalysisManager &AM, LazyCallGraph &CG, 878 CGSCCUpdateResult &UR) { 879 const ModuleAnalysisManager &MAM = 880 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG).getManager(); 881 bool Changed = false; 882 883 assert(InitialC.size() > 0 && "Cannot handle an empty SCC!"); 884 Module &M = *InitialC.begin()->getFunction().getParent(); 885 ProfileSummaryInfo *PSI = MAM.getCachedResult<ProfileSummaryAnalysis>(M); 886 887 if (!ImportedFunctionsStats && 888 InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) { 889 ImportedFunctionsStats = 890 std::make_unique<ImportedFunctionsInliningStatistics>(); 891 ImportedFunctionsStats->setModuleInfo(M); 892 } 893 894 // We use a single common worklist for calls across the entire SCC. We 895 // process these in-order and append new calls introduced during inlining to 896 // the end. 897 // 898 // Note that this particular order of processing is actually critical to 899 // avoid very bad behaviors. Consider *highly connected* call graphs where 900 // each function contains a small amonut of code and a couple of calls to 901 // other functions. Because the LLVM inliner is fundamentally a bottom-up 902 // inliner, it can handle gracefully the fact that these all appear to be 903 // reasonable inlining candidates as it will flatten things until they become 904 // too big to inline, and then move on and flatten another batch. 905 // 906 // However, when processing call edges *within* an SCC we cannot rely on this 907 // bottom-up behavior. As a consequence, with heavily connected *SCCs* of 908 // functions we can end up incrementally inlining N calls into each of 909 // N functions because each incremental inlining decision looks good and we 910 // don't have a topological ordering to prevent explosions. 911 // 912 // To compensate for this, we don't process transitive edges made immediate 913 // by inlining until we've done one pass of inlining across the entire SCC. 914 // Large, highly connected SCCs still lead to some amount of code bloat in 915 // this model, but it is uniformly spread across all the functions in the SCC 916 // and eventually they all become too large to inline, rather than 917 // incrementally maknig a single function grow in a super linear fashion. 918 SmallVector<std::pair<CallSite, int>, 16> Calls; 919 920 FunctionAnalysisManager &FAM = 921 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG) 922 .getManager(); 923 924 // Populate the initial list of calls in this SCC. 925 for (auto &N : InitialC) { 926 auto &ORE = 927 FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction()); 928 // We want to generally process call sites top-down in order for 929 // simplifications stemming from replacing the call with the returned value 930 // after inlining to be visible to subsequent inlining decisions. 931 // FIXME: Using instructions sequence is a really bad way to do this. 932 // Instead we should do an actual RPO walk of the function body. 933 for (Instruction &I : instructions(N.getFunction())) 934 if (auto CS = CallSite(&I)) 935 if (Function *Callee = CS.getCalledFunction()) { 936 if (!Callee->isDeclaration()) 937 Calls.push_back({CS, -1}); 938 else if (!isa<IntrinsicInst>(I)) { 939 using namespace ore; 940 setInlineRemark(CS, "unavailable definition"); 941 ORE.emit([&]() { 942 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 943 << NV("Callee", Callee) << " will not be inlined into " 944 << NV("Caller", CS.getCaller()) 945 << " because its definition is unavailable" 946 << setIsVerbose(); 947 }); 948 } 949 } 950 } 951 if (Calls.empty()) 952 return PreservedAnalyses::all(); 953 954 // Capture updatable variables for the current SCC and RefSCC. 955 auto *C = &InitialC; 956 auto *RC = &C->getOuterRefSCC(); 957 958 // When inlining a callee produces new call sites, we want to keep track of 959 // the fact that they were inlined from the callee. This allows us to avoid 960 // infinite inlining in some obscure cases. To represent this, we use an 961 // index into the InlineHistory vector. 962 SmallVector<std::pair<Function *, int>, 16> InlineHistory; 963 964 // Track a set vector of inlined callees so that we can augment the caller 965 // with all of their edges in the call graph before pruning out the ones that 966 // got simplified away. 967 SmallSetVector<Function *, 4> InlinedCallees; 968 969 // Track the dead functions to delete once finished with inlining calls. We 970 // defer deleting these to make it easier to handle the call graph updates. 971 SmallVector<Function *, 4> DeadFunctions; 972 973 // Loop forward over all of the calls. Note that we cannot cache the size as 974 // inlining can introduce new calls that need to be processed. 975 for (int i = 0; i < (int)Calls.size(); ++i) { 976 // We expect the calls to typically be batched with sequences of calls that 977 // have the same caller, so we first set up some shared infrastructure for 978 // this caller. We also do any pruning we can at this layer on the caller 979 // alone. 980 Function &F = *Calls[i].first.getCaller(); 981 LazyCallGraph::Node &N = *CG.lookup(F); 982 if (CG.lookupSCC(N) != C) 983 continue; 984 if (F.hasOptNone()) { 985 setInlineRemark(Calls[i].first, "optnone attribute"); 986 continue; 987 } 988 989 LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"); 990 991 // Get a FunctionAnalysisManager via a proxy for this particular node. We 992 // do this each time we visit a node as the SCC may have changed and as 993 // we're going to mutate this particular function we want to make sure the 994 // proxy is in place to forward any invalidation events. We can use the 995 // manager we get here for looking up results for functions other than this 996 // node however because those functions aren't going to be mutated by this 997 // pass. 998 FunctionAnalysisManager &FAM = 999 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, CG) 1000 .getManager(); 1001 1002 // Get the remarks emission analysis for the caller. 1003 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F); 1004 1005 std::function<AssumptionCache &(Function &)> GetAssumptionCache = 1006 [&](Function &F) -> AssumptionCache & { 1007 return FAM.getResult<AssumptionAnalysis>(F); 1008 }; 1009 auto GetBFI = [&](Function &F) -> BlockFrequencyInfo & { 1010 return FAM.getResult<BlockFrequencyAnalysis>(F); 1011 }; 1012 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 1013 return FAM.getResult<TargetLibraryAnalysis>(F); 1014 }; 1015 1016 auto GetInlineCost = [&](CallSite CS) { 1017 Function &Callee = *CS.getCalledFunction(); 1018 auto &CalleeTTI = FAM.getResult<TargetIRAnalysis>(Callee); 1019 bool RemarksEnabled = 1020 Callee.getContext().getDiagHandlerPtr()->isMissedOptRemarkEnabled( 1021 DEBUG_TYPE); 1022 return getInlineCost(cast<CallBase>(*CS.getInstruction()), Params, 1023 CalleeTTI, GetAssumptionCache, {GetBFI}, GetTLI, PSI, 1024 RemarksEnabled ? &ORE : nullptr); 1025 }; 1026 1027 // Now process as many calls as we have within this caller in the sequnece. 1028 // We bail out as soon as the caller has to change so we can update the 1029 // call graph and prepare the context of that new caller. 1030 bool DidInline = false; 1031 for (; i < (int)Calls.size() && Calls[i].first.getCaller() == &F; ++i) { 1032 int InlineHistoryID; 1033 CallSite CS; 1034 std::tie(CS, InlineHistoryID) = Calls[i]; 1035 Function &Callee = *CS.getCalledFunction(); 1036 1037 if (InlineHistoryID != -1 && 1038 InlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) { 1039 setInlineRemark(CS, "recursive"); 1040 continue; 1041 } 1042 1043 // Check if this inlining may repeat breaking an SCC apart that has 1044 // already been split once before. In that case, inlining here may 1045 // trigger infinite inlining, much like is prevented within the inliner 1046 // itself by the InlineHistory above, but spread across CGSCC iterations 1047 // and thus hidden from the full inline history. 1048 if (CG.lookupSCC(*CG.lookup(Callee)) == C && 1049 UR.InlinedInternalEdges.count({&N, C})) { 1050 LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node " 1051 "previously split out of this SCC by inlining: " 1052 << F.getName() << " -> " << Callee.getName() << "\n"); 1053 setInlineRemark(CS, "recursive SCC split"); 1054 continue; 1055 } 1056 1057 Optional<InlineCost> OIC = shouldInline(CS, GetInlineCost, ORE); 1058 // Check whether we want to inline this callsite. 1059 if (!OIC.hasValue()) { 1060 setInlineRemark(CS, "deferred"); 1061 continue; 1062 } 1063 1064 if (!OIC.getValue()) { 1065 // shouldInline() call returned a negative inline cost that explains 1066 // why this callsite should not be inlined. 1067 setInlineRemark(CS, inlineCostStr(*OIC)); 1068 continue; 1069 } 1070 1071 // Setup the data structure used to plumb customization into the 1072 // `InlineFunction` routine. 1073 InlineFunctionInfo IFI( 1074 /*cg=*/nullptr, &GetAssumptionCache, PSI, 1075 &FAM.getResult<BlockFrequencyAnalysis>(*(CS.getCaller())), 1076 &FAM.getResult<BlockFrequencyAnalysis>(Callee)); 1077 1078 // Get DebugLoc to report. CS will be invalid after Inliner. 1079 DebugLoc DLoc = CS->getDebugLoc(); 1080 BasicBlock *Block = CS.getParent(); 1081 1082 using namespace ore; 1083 1084 InlineResult IR = InlineFunction(CS, IFI); 1085 if (!IR.isSuccess()) { 1086 setInlineRemark(CS, std::string(IR.getFailureReason()) + "; " + 1087 inlineCostStr(*OIC)); 1088 ORE.emit([&]() { 1089 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, Block) 1090 << NV("Callee", &Callee) << " will not be inlined into " 1091 << NV("Caller", &F) << ": " 1092 << NV("Reason", IR.getFailureReason()); 1093 }); 1094 continue; 1095 } 1096 DidInline = true; 1097 InlinedCallees.insert(&Callee); 1098 1099 ++NumInlined; 1100 1101 emit_inlined_into(ORE, DLoc, Block, Callee, F, *OIC); 1102 1103 // Add any new callsites to defined functions to the worklist. 1104 if (!IFI.InlinedCallSites.empty()) { 1105 int NewHistoryID = InlineHistory.size(); 1106 InlineHistory.push_back({&Callee, InlineHistoryID}); 1107 for (CallSite &CS : reverse(IFI.InlinedCallSites)) { 1108 Function *NewCallee = CS.getCalledFunction(); 1109 if (!NewCallee) { 1110 // Try to promote an indirect (virtual) call without waiting for the 1111 // post-inline cleanup and the next DevirtSCCRepeatedPass iteration 1112 // because the next iteration may not happen and we may miss 1113 // inlining it. 1114 if (tryPromoteCall(CS)) 1115 NewCallee = CS.getCalledFunction(); 1116 } 1117 if (NewCallee) 1118 if (!NewCallee->isDeclaration()) 1119 Calls.push_back({CS, NewHistoryID}); 1120 } 1121 } 1122 1123 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 1124 ImportedFunctionsStats->recordInline(F, Callee); 1125 1126 // Merge the attributes based on the inlining. 1127 AttributeFuncs::mergeAttributesForInlining(F, Callee); 1128 1129 // For local functions, check whether this makes the callee trivially 1130 // dead. In that case, we can drop the body of the function eagerly 1131 // which may reduce the number of callers of other functions to one, 1132 // changing inline cost thresholds. 1133 if (Callee.hasLocalLinkage()) { 1134 // To check this we also need to nuke any dead constant uses (perhaps 1135 // made dead by this operation on other functions). 1136 Callee.removeDeadConstantUsers(); 1137 if (Callee.use_empty() && !CG.isLibFunction(Callee)) { 1138 Calls.erase( 1139 std::remove_if(Calls.begin() + i + 1, Calls.end(), 1140 [&Callee](const std::pair<CallSite, int> &Call) { 1141 return Call.first.getCaller() == &Callee; 1142 }), 1143 Calls.end()); 1144 // Clear the body and queue the function itself for deletion when we 1145 // finish inlining and call graph updates. 1146 // Note that after this point, it is an error to do anything other 1147 // than use the callee's address or delete it. 1148 Callee.dropAllReferences(); 1149 assert(find(DeadFunctions, &Callee) == DeadFunctions.end() && 1150 "Cannot put cause a function to become dead twice!"); 1151 DeadFunctions.push_back(&Callee); 1152 } 1153 } 1154 } 1155 1156 // Back the call index up by one to put us in a good position to go around 1157 // the outer loop. 1158 --i; 1159 1160 if (!DidInline) 1161 continue; 1162 Changed = true; 1163 1164 // Add all the inlined callees' edges as ref edges to the caller. These are 1165 // by definition trivial edges as we always have *some* transitive ref edge 1166 // chain. While in some cases these edges are direct calls inside the 1167 // callee, they have to be modeled in the inliner as reference edges as 1168 // there may be a reference edge anywhere along the chain from the current 1169 // caller to the callee that causes the whole thing to appear like 1170 // a (transitive) reference edge that will require promotion to a call edge 1171 // below. 1172 for (Function *InlinedCallee : InlinedCallees) { 1173 LazyCallGraph::Node &CalleeN = *CG.lookup(*InlinedCallee); 1174 for (LazyCallGraph::Edge &E : *CalleeN) 1175 RC->insertTrivialRefEdge(N, E.getNode()); 1176 } 1177 1178 // At this point, since we have made changes we have at least removed 1179 // a call instruction. However, in the process we do some incremental 1180 // simplification of the surrounding code. This simplification can 1181 // essentially do all of the same things as a function pass and we can 1182 // re-use the exact same logic for updating the call graph to reflect the 1183 // change. 1184 LazyCallGraph::SCC *OldC = C; 1185 C = &updateCGAndAnalysisManagerForFunctionPass(CG, *C, N, AM, UR); 1186 LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n"); 1187 RC = &C->getOuterRefSCC(); 1188 1189 // If this causes an SCC to split apart into multiple smaller SCCs, there 1190 // is a subtle risk we need to prepare for. Other transformations may 1191 // expose an "infinite inlining" opportunity later, and because of the SCC 1192 // mutation, we will revisit this function and potentially re-inline. If we 1193 // do, and that re-inlining also has the potentially to mutate the SCC 1194 // structure, the infinite inlining problem can manifest through infinite 1195 // SCC splits and merges. To avoid this, we capture the originating caller 1196 // node and the SCC containing the call edge. This is a slight over 1197 // approximation of the possible inlining decisions that must be avoided, 1198 // but is relatively efficient to store. We use C != OldC to know when 1199 // a new SCC is generated and the original SCC may be generated via merge 1200 // in later iterations. 1201 // 1202 // It is also possible that even if no new SCC is generated 1203 // (i.e., C == OldC), the original SCC could be split and then merged 1204 // into the same one as itself. and the original SCC will be added into 1205 // UR.CWorklist again, we want to catch such cases too. 1206 // 1207 // FIXME: This seems like a very heavyweight way of retaining the inline 1208 // history, we should look for a more efficient way of tracking it. 1209 if ((C != OldC || UR.CWorklist.count(OldC)) && 1210 llvm::any_of(InlinedCallees, [&](Function *Callee) { 1211 return CG.lookupSCC(*CG.lookup(*Callee)) == OldC; 1212 })) { 1213 LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, " 1214 "retaining this to avoid infinite inlining.\n"); 1215 UR.InlinedInternalEdges.insert({&N, OldC}); 1216 } 1217 InlinedCallees.clear(); 1218 } 1219 1220 // Now that we've finished inlining all of the calls across this SCC, delete 1221 // all of the trivially dead functions, updating the call graph and the CGSCC 1222 // pass manager in the process. 1223 // 1224 // Note that this walks a pointer set which has non-deterministic order but 1225 // that is OK as all we do is delete things and add pointers to unordered 1226 // sets. 1227 for (Function *DeadF : DeadFunctions) { 1228 // Get the necessary information out of the call graph and nuke the 1229 // function there. Also, cclear out any cached analyses. 1230 auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF)); 1231 FunctionAnalysisManager &FAM = 1232 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(DeadC, CG) 1233 .getManager(); 1234 FAM.clear(*DeadF, DeadF->getName()); 1235 AM.clear(DeadC, DeadC.getName()); 1236 auto &DeadRC = DeadC.getOuterRefSCC(); 1237 CG.removeDeadFunction(*DeadF); 1238 1239 // Mark the relevant parts of the call graph as invalid so we don't visit 1240 // them. 1241 UR.InvalidatedSCCs.insert(&DeadC); 1242 UR.InvalidatedRefSCCs.insert(&DeadRC); 1243 1244 // And delete the actual function from the module. 1245 M.getFunctionList().erase(DeadF); 1246 ++NumDeleted; 1247 } 1248 1249 if (!Changed) 1250 return PreservedAnalyses::all(); 1251 1252 // Even if we change the IR, we update the core CGSCC data structures and so 1253 // can preserve the proxy to the function analysis manager. 1254 PreservedAnalyses PA; 1255 PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); 1256 return PA; 1257 } 1258