1 //===- Inliner.cpp - Code common to all inliners --------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the mechanics required to implement inlining without 10 // missing any calls and updating the call graph. The decisions of which calls 11 // are profitable to inline are implemented elsewhere. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/IPO/Inliner.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/PriorityWorklist.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/ScopeExit.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/ADT/StringExtras.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/Analysis/AssumptionCache.h" 28 #include "llvm/Analysis/BasicAliasAnalysis.h" 29 #include "llvm/Analysis/BlockFrequencyInfo.h" 30 #include "llvm/Analysis/CGSCCPassManager.h" 31 #include "llvm/Analysis/CallGraph.h" 32 #include "llvm/Analysis/InlineAdvisor.h" 33 #include "llvm/Analysis/InlineCost.h" 34 #include "llvm/Analysis/InlineOrder.h" 35 #include "llvm/Analysis/LazyCallGraph.h" 36 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 37 #include "llvm/Analysis/ProfileSummaryInfo.h" 38 #include "llvm/Analysis/ReplayInlineAdvisor.h" 39 #include "llvm/Analysis/TargetLibraryInfo.h" 40 #include "llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h" 41 #include "llvm/IR/Attributes.h" 42 #include "llvm/IR/BasicBlock.h" 43 #include "llvm/IR/DebugLoc.h" 44 #include "llvm/IR/DerivedTypes.h" 45 #include "llvm/IR/DiagnosticInfo.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/InstIterator.h" 48 #include "llvm/IR/Instruction.h" 49 #include "llvm/IR/Instructions.h" 50 #include "llvm/IR/IntrinsicInst.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Module.h" 53 #include "llvm/IR/PassManager.h" 54 #include "llvm/IR/User.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/CommandLine.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Transforms/Utils/CallPromotionUtils.h" 62 #include "llvm/Transforms/Utils/Cloning.h" 63 #include "llvm/Transforms/Utils/Local.h" 64 #include "llvm/Transforms/Utils/ModuleUtils.h" 65 #include <algorithm> 66 #include <cassert> 67 #include <functional> 68 #include <utility> 69 #include <vector> 70 71 using namespace llvm; 72 73 #define DEBUG_TYPE "inline" 74 75 STATISTIC(NumInlined, "Number of functions inlined"); 76 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined"); 77 STATISTIC(NumDeleted, "Number of functions deleted because all callers found"); 78 STATISTIC(NumMergedAllocas, "Number of allocas merged together"); 79 80 /// Flag to disable manual alloca merging. 81 /// 82 /// Merging of allocas was originally done as a stack-size saving technique 83 /// prior to LLVM's code generator having support for stack coloring based on 84 /// lifetime markers. It is now in the process of being removed. To experiment 85 /// with disabling it and relying fully on lifetime marker based stack 86 /// coloring, you can pass this flag to LLVM. 87 static cl::opt<bool> 88 DisableInlinedAllocaMerging("disable-inlined-alloca-merging", 89 cl::init(false), cl::Hidden); 90 91 static cl::opt<int> IntraSCCCostMultiplier( 92 "intra-scc-cost-multiplier", cl::init(2), cl::Hidden, 93 cl::desc( 94 "Cost multiplier to multiply onto inlined call sites where the " 95 "new call was previously an intra-SCC call (not relevant when the " 96 "original call was already intra-SCC). This can accumulate over " 97 "multiple inlinings (e.g. if a call site already had a cost " 98 "multiplier and one of its inlined calls was also subject to " 99 "this, the inlined call would have the original multiplier " 100 "multiplied by intra-scc-cost-multiplier). This is to prevent tons of " 101 "inlining through a child SCC which can cause terrible compile times")); 102 103 /// A flag for test, so we can print the content of the advisor when running it 104 /// as part of the default (e.g. -O3) pipeline. 105 static cl::opt<bool> KeepAdvisorForPrinting("keep-inline-advisor-for-printing", 106 cl::init(false), cl::Hidden); 107 108 extern cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats; 109 110 static cl::opt<std::string> CGSCCInlineReplayFile( 111 "cgscc-inline-replay", cl::init(""), cl::value_desc("filename"), 112 cl::desc( 113 "Optimization remarks file containing inline remarks to be replayed " 114 "by cgscc inlining."), 115 cl::Hidden); 116 117 static cl::opt<ReplayInlinerSettings::Scope> CGSCCInlineReplayScope( 118 "cgscc-inline-replay-scope", 119 cl::init(ReplayInlinerSettings::Scope::Function), 120 cl::values(clEnumValN(ReplayInlinerSettings::Scope::Function, "Function", 121 "Replay on functions that have remarks associated " 122 "with them (default)"), 123 clEnumValN(ReplayInlinerSettings::Scope::Module, "Module", 124 "Replay on the entire module")), 125 cl::desc("Whether inline replay should be applied to the entire " 126 "Module or just the Functions (default) that are present as " 127 "callers in remarks during cgscc inlining."), 128 cl::Hidden); 129 130 static cl::opt<ReplayInlinerSettings::Fallback> CGSCCInlineReplayFallback( 131 "cgscc-inline-replay-fallback", 132 cl::init(ReplayInlinerSettings::Fallback::Original), 133 cl::values( 134 clEnumValN( 135 ReplayInlinerSettings::Fallback::Original, "Original", 136 "All decisions not in replay send to original advisor (default)"), 137 clEnumValN(ReplayInlinerSettings::Fallback::AlwaysInline, 138 "AlwaysInline", "All decisions not in replay are inlined"), 139 clEnumValN(ReplayInlinerSettings::Fallback::NeverInline, "NeverInline", 140 "All decisions not in replay are not inlined")), 141 cl::desc( 142 "How cgscc inline replay treats sites that don't come from the replay. " 143 "Original: defers to original advisor, AlwaysInline: inline all sites " 144 "not in replay, NeverInline: inline no sites not in replay"), 145 cl::Hidden); 146 147 static cl::opt<CallSiteFormat::Format> CGSCCInlineReplayFormat( 148 "cgscc-inline-replay-format", 149 cl::init(CallSiteFormat::Format::LineColumnDiscriminator), 150 cl::values( 151 clEnumValN(CallSiteFormat::Format::Line, "Line", "<Line Number>"), 152 clEnumValN(CallSiteFormat::Format::LineColumn, "LineColumn", 153 "<Line Number>:<Column Number>"), 154 clEnumValN(CallSiteFormat::Format::LineDiscriminator, 155 "LineDiscriminator", "<Line Number>.<Discriminator>"), 156 clEnumValN(CallSiteFormat::Format::LineColumnDiscriminator, 157 "LineColumnDiscriminator", 158 "<Line Number>:<Column Number>.<Discriminator> (default)")), 159 cl::desc("How cgscc inline replay file is formatted"), cl::Hidden); 160 161 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {} 162 163 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime) 164 : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {} 165 166 /// For this class, we declare that we require and preserve the call graph. 167 /// If the derived class implements this method, it should 168 /// always explicitly call the implementation here. 169 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const { 170 AU.addRequired<AssumptionCacheTracker>(); 171 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 172 AU.addRequired<TargetLibraryInfoWrapperPass>(); 173 getAAResultsAnalysisUsage(AU); 174 CallGraphSCCPass::getAnalysisUsage(AU); 175 } 176 177 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>; 178 179 /// Look at all of the allocas that we inlined through this call site. If we 180 /// have already inlined other allocas through other calls into this function, 181 /// then we know that they have disjoint lifetimes and that we can merge them. 182 /// 183 /// There are many heuristics possible for merging these allocas, and the 184 /// different options have different tradeoffs. One thing that we *really* 185 /// don't want to hurt is SRoA: once inlining happens, often allocas are no 186 /// longer address taken and so they can be promoted. 187 /// 188 /// Our "solution" for that is to only merge allocas whose outermost type is an 189 /// array type. These are usually not promoted because someone is using a 190 /// variable index into them. These are also often the most important ones to 191 /// merge. 192 /// 193 /// A better solution would be to have real memory lifetime markers in the IR 194 /// and not have the inliner do any merging of allocas at all. This would 195 /// allow the backend to do proper stack slot coloring of all allocas that 196 /// *actually make it to the backend*, which is really what we want. 197 /// 198 /// Because we don't have this information, we do this simple and useful hack. 199 static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI, 200 InlinedArrayAllocasTy &InlinedArrayAllocas, 201 int InlineHistory) { 202 SmallPtrSet<AllocaInst *, 16> UsedAllocas; 203 204 // When processing our SCC, check to see if the call site was inlined from 205 // some other call site. For example, if we're processing "A" in this code: 206 // A() { B() } 207 // B() { x = alloca ... C() } 208 // C() { y = alloca ... } 209 // Assume that C was not inlined into B initially, and so we're processing A 210 // and decide to inline B into A. Doing this makes an alloca available for 211 // reuse and makes a callsite (C) available for inlining. When we process 212 // the C call site we don't want to do any alloca merging between X and Y 213 // because their scopes are not disjoint. We could make this smarter by 214 // keeping track of the inline history for each alloca in the 215 // InlinedArrayAllocas but this isn't likely to be a significant win. 216 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC. 217 return; 218 219 // Loop over all the allocas we have so far and see if they can be merged with 220 // a previously inlined alloca. If not, remember that we had it. 221 for (unsigned AllocaNo = 0, E = IFI.StaticAllocas.size(); AllocaNo != E; 222 ++AllocaNo) { 223 AllocaInst *AI = IFI.StaticAllocas[AllocaNo]; 224 225 // Don't bother trying to merge array allocations (they will usually be 226 // canonicalized to be an allocation *of* an array), or allocations whose 227 // type is not itself an array (because we're afraid of pessimizing SRoA). 228 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType()); 229 if (!ATy || AI->isArrayAllocation()) 230 continue; 231 232 // Get the list of all available allocas for this array type. 233 std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy]; 234 235 // Loop over the allocas in AllocasForType to see if we can reuse one. Note 236 // that we have to be careful not to reuse the same "available" alloca for 237 // multiple different allocas that we just inlined, we use the 'UsedAllocas' 238 // set to keep track of which "available" allocas are being used by this 239 // function. Also, AllocasForType can be empty of course! 240 bool MergedAwayAlloca = false; 241 for (AllocaInst *AvailableAlloca : AllocasForType) { 242 Align Align1 = AI->getAlign(); 243 Align Align2 = AvailableAlloca->getAlign(); 244 245 // The available alloca has to be in the right function, not in some other 246 // function in this SCC. 247 if (AvailableAlloca->getParent() != AI->getParent()) 248 continue; 249 250 // If the inlined function already uses this alloca then we can't reuse 251 // it. 252 if (!UsedAllocas.insert(AvailableAlloca).second) 253 continue; 254 255 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare 256 // success! 257 LLVM_DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI 258 << "\n\t\tINTO: " << *AvailableAlloca << '\n'); 259 260 // Move affected dbg.declare calls immediately after the new alloca to 261 // avoid the situation when a dbg.declare precedes its alloca. 262 if (auto *L = LocalAsMetadata::getIfExists(AI)) 263 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L)) 264 for (User *U : MDV->users()) 265 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U)) 266 DDI->moveBefore(AvailableAlloca->getNextNode()); 267 268 AI->replaceAllUsesWith(AvailableAlloca); 269 270 if (Align1 > Align2) 271 AvailableAlloca->setAlignment(AI->getAlign()); 272 273 AI->eraseFromParent(); 274 MergedAwayAlloca = true; 275 ++NumMergedAllocas; 276 IFI.StaticAllocas[AllocaNo] = nullptr; 277 break; 278 } 279 280 // If we already nuked the alloca, we're done with it. 281 if (MergedAwayAlloca) 282 continue; 283 284 // If we were unable to merge away the alloca either because there are no 285 // allocas of the right type available or because we reused them all 286 // already, remember that this alloca came from an inlined function and mark 287 // it used so we don't reuse it for other allocas from this inline 288 // operation. 289 AllocasForType.push_back(AI); 290 UsedAllocas.insert(AI); 291 } 292 } 293 294 /// If it is possible to inline the specified call site, 295 /// do so and update the CallGraph for this operation. 296 /// 297 /// This function also does some basic book-keeping to update the IR. The 298 /// InlinedArrayAllocas map keeps track of any allocas that are already 299 /// available from other functions inlined into the caller. If we are able to 300 /// inline this call site we attempt to reuse already available allocas or add 301 /// any new allocas to the set if not possible. 302 static InlineResult inlineCallIfPossible( 303 CallBase &CB, InlineFunctionInfo &IFI, 304 InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory, 305 bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter, 306 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 307 Function *Callee = CB.getCalledFunction(); 308 Function *Caller = CB.getCaller(); 309 310 AAResults &AAR = AARGetter(*Callee); 311 312 // Try to inline the function. Get the list of static allocas that were 313 // inlined. 314 InlineResult IR = InlineFunction(CB, IFI, &AAR, InsertLifetime); 315 if (!IR.isSuccess()) 316 return IR; 317 318 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 319 ImportedFunctionsStats.recordInline(*Caller, *Callee); 320 321 AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee); 322 323 if (!DisableInlinedAllocaMerging) 324 mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory); 325 326 return IR; // success 327 } 328 329 /// Return true if the specified inline history ID 330 /// indicates an inline history that includes the specified function. 331 static bool inlineHistoryIncludes( 332 Function *F, int InlineHistoryID, 333 const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) { 334 while (InlineHistoryID != -1) { 335 assert(unsigned(InlineHistoryID) < InlineHistory.size() && 336 "Invalid inline history ID"); 337 if (InlineHistory[InlineHistoryID].first == F) 338 return true; 339 InlineHistoryID = InlineHistory[InlineHistoryID].second; 340 } 341 return false; 342 } 343 344 bool LegacyInlinerBase::doInitialization(CallGraph &CG) { 345 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 346 ImportedFunctionsStats.setModuleInfo(CG.getModule()); 347 return false; // No changes to CallGraph. 348 } 349 350 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) { 351 if (skipSCC(SCC)) 352 return false; 353 return inlineCalls(SCC); 354 } 355 356 static bool 357 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG, 358 std::function<AssumptionCache &(Function &)> GetAssumptionCache, 359 ProfileSummaryInfo *PSI, 360 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 361 bool InsertLifetime, 362 function_ref<InlineCost(CallBase &CB)> GetInlineCost, 363 function_ref<AAResults &(Function &)> AARGetter, 364 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) { 365 SmallPtrSet<Function *, 8> SCCFunctions; 366 LLVM_DEBUG(dbgs() << "Inliner visiting SCC:"); 367 for (CallGraphNode *Node : SCC) { 368 Function *F = Node->getFunction(); 369 if (F) 370 SCCFunctions.insert(F); 371 LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE")); 372 } 373 374 // Scan through and identify all call sites ahead of time so that we only 375 // inline call sites in the original functions, not call sites that result 376 // from inlining other functions. 377 SmallVector<std::pair<CallBase *, int>, 16> CallSites; 378 379 // When inlining a callee produces new call sites, we want to keep track of 380 // the fact that they were inlined from the callee. This allows us to avoid 381 // infinite inlining in some obscure cases. To represent this, we use an 382 // index into the InlineHistory vector. 383 SmallVector<std::pair<Function *, int>, 8> InlineHistory; 384 385 for (CallGraphNode *Node : SCC) { 386 Function *F = Node->getFunction(); 387 if (!F || F->isDeclaration()) 388 continue; 389 390 OptimizationRemarkEmitter ORE(F); 391 for (BasicBlock &BB : *F) 392 for (Instruction &I : BB) { 393 auto *CB = dyn_cast<CallBase>(&I); 394 // If this isn't a call, or it is a call to an intrinsic, it can 395 // never be inlined. 396 if (!CB || isa<IntrinsicInst>(I)) 397 continue; 398 399 // If this is a direct call to an external function, we can never inline 400 // it. If it is an indirect call, inlining may resolve it to be a 401 // direct call, so we keep it. 402 if (Function *Callee = CB->getCalledFunction()) 403 if (Callee->isDeclaration()) { 404 using namespace ore; 405 406 setInlineRemark(*CB, "unavailable definition"); 407 ORE.emit([&]() { 408 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 409 << NV("Callee", Callee) << " will not be inlined into " 410 << NV("Caller", CB->getCaller()) 411 << " because its definition is unavailable" 412 << setIsVerbose(); 413 }); 414 continue; 415 } 416 417 CallSites.push_back(std::make_pair(CB, -1)); 418 } 419 } 420 421 LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n"); 422 423 // If there are no calls in this function, exit early. 424 if (CallSites.empty()) 425 return false; 426 427 // Now that we have all of the call sites, move the ones to functions in the 428 // current SCC to the end of the list. 429 unsigned FirstCallInSCC = CallSites.size(); 430 for (unsigned I = 0; I < FirstCallInSCC; ++I) 431 if (Function *F = CallSites[I].first->getCalledFunction()) 432 if (SCCFunctions.count(F)) 433 std::swap(CallSites[I--], CallSites[--FirstCallInSCC]); 434 435 InlinedArrayAllocasTy InlinedArrayAllocas; 436 InlineFunctionInfo InlineInfo(&CG, GetAssumptionCache, PSI); 437 438 // Now that we have all of the call sites, loop over them and inline them if 439 // it looks profitable to do so. 440 bool Changed = false; 441 bool LocalChange; 442 do { 443 LocalChange = false; 444 // Iterate over the outer loop because inlining functions can cause indirect 445 // calls to become direct calls. 446 // CallSites may be modified inside so ranged for loop can not be used. 447 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) { 448 auto &P = CallSites[CSi]; 449 CallBase &CB = *P.first; 450 const int InlineHistoryID = P.second; 451 452 Function *Caller = CB.getCaller(); 453 Function *Callee = CB.getCalledFunction(); 454 455 // We can only inline direct calls to non-declarations. 456 if (!Callee || Callee->isDeclaration()) 457 continue; 458 459 bool IsTriviallyDead = isInstructionTriviallyDead(&CB, &GetTLI(*Caller)); 460 461 if (!IsTriviallyDead) { 462 // If this call site was obtained by inlining another function, verify 463 // that the include path for the function did not include the callee 464 // itself. If so, we'd be recursively inlining the same function, 465 // which would provide the same callsites, which would cause us to 466 // infinitely inline. 467 if (InlineHistoryID != -1 && 468 inlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) { 469 setInlineRemark(CB, "recursive"); 470 continue; 471 } 472 } 473 474 // FIXME for new PM: because of the old PM we currently generate ORE and 475 // in turn BFI on demand. With the new PM, the ORE dependency should 476 // just become a regular analysis dependency. 477 OptimizationRemarkEmitter ORE(Caller); 478 479 auto OIC = shouldInline(CB, GetInlineCost, ORE); 480 // If the policy determines that we should inline this function, 481 // delete the call instead. 482 if (!OIC) 483 continue; 484 485 // If this call site is dead and it is to a readonly function, we should 486 // just delete the call instead of trying to inline it, regardless of 487 // size. This happens because IPSCCP propagates the result out of the 488 // call and then we're left with the dead call. 489 if (IsTriviallyDead) { 490 LLVM_DEBUG(dbgs() << " -> Deleting dead call: " << CB << "\n"); 491 // Update the call graph by deleting the edge from Callee to Caller. 492 setInlineRemark(CB, "trivially dead"); 493 CG[Caller]->removeCallEdgeFor(CB); 494 CB.eraseFromParent(); 495 ++NumCallsDeleted; 496 } else { 497 // Get DebugLoc to report. CB will be invalid after Inliner. 498 DebugLoc DLoc = CB.getDebugLoc(); 499 BasicBlock *Block = CB.getParent(); 500 501 // Attempt to inline the function. 502 using namespace ore; 503 504 InlineResult IR = inlineCallIfPossible( 505 CB, InlineInfo, InlinedArrayAllocas, InlineHistoryID, 506 InsertLifetime, AARGetter, ImportedFunctionsStats); 507 if (!IR.isSuccess()) { 508 setInlineRemark(CB, std::string(IR.getFailureReason()) + "; " + 509 inlineCostStr(*OIC)); 510 ORE.emit([&]() { 511 return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc, 512 Block) 513 << NV("Callee", Callee) << " will not be inlined into " 514 << NV("Caller", Caller) << ": " 515 << NV("Reason", IR.getFailureReason()); 516 }); 517 continue; 518 } 519 ++NumInlined; 520 521 emitInlinedIntoBasedOnCost(ORE, DLoc, Block, *Callee, *Caller, *OIC); 522 523 // If inlining this function gave us any new call sites, throw them 524 // onto our worklist to process. They are useful inline candidates. 525 if (!InlineInfo.InlinedCalls.empty()) { 526 // Create a new inline history entry for this, so that we remember 527 // that these new callsites came about due to inlining Callee. 528 int NewHistoryID = InlineHistory.size(); 529 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID)); 530 531 #ifndef NDEBUG 532 // Make sure no dupplicates in the inline candidates. This could 533 // happen when a callsite is simpilfied to reusing the return value 534 // of another callsite during function cloning, thus the other 535 // callsite will be reconsidered here. 536 DenseSet<CallBase *> DbgCallSites; 537 for (auto &II : CallSites) 538 DbgCallSites.insert(II.first); 539 #endif 540 541 for (Value *Ptr : InlineInfo.InlinedCalls) { 542 #ifndef NDEBUG 543 assert(DbgCallSites.count(dyn_cast<CallBase>(Ptr)) == 0); 544 #endif 545 CallSites.push_back( 546 std::make_pair(dyn_cast<CallBase>(Ptr), NewHistoryID)); 547 } 548 } 549 } 550 551 // If we inlined or deleted the last possible call site to the function, 552 // delete the function body now. 553 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() && 554 // TODO: Can remove if in SCC now. 555 !SCCFunctions.count(Callee) && 556 // The function may be apparently dead, but if there are indirect 557 // callgraph references to the node, we cannot delete it yet, this 558 // could invalidate the CGSCC iterator. 559 CG[Callee]->getNumReferences() == 0) { 560 LLVM_DEBUG(dbgs() << " -> Deleting dead function: " 561 << Callee->getName() << "\n"); 562 CallGraphNode *CalleeNode = CG[Callee]; 563 564 // Remove any call graph edges from the callee to its callees. 565 CalleeNode->removeAllCalledFunctions(); 566 567 // Removing the node for callee from the call graph and delete it. 568 delete CG.removeFunctionFromModule(CalleeNode); 569 ++NumDeleted; 570 } 571 572 // Remove this call site from the list. If possible, use 573 // swap/pop_back for efficiency, but do not use it if doing so would 574 // move a call site to a function in this SCC before the 575 // 'FirstCallInSCC' barrier. 576 if (SCC.isSingular()) { 577 CallSites[CSi] = CallSites.back(); 578 CallSites.pop_back(); 579 } else { 580 CallSites.erase(CallSites.begin() + CSi); 581 } 582 --CSi; 583 584 Changed = true; 585 LocalChange = true; 586 } 587 } while (LocalChange); 588 589 return Changed; 590 } 591 592 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) { 593 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 594 ACT = &getAnalysis<AssumptionCacheTracker>(); 595 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 596 GetTLI = [&](Function &F) -> const TargetLibraryInfo & { 597 return getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 598 }; 599 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 600 return ACT->getAssumptionCache(F); 601 }; 602 return inlineCallsImpl( 603 SCC, CG, GetAssumptionCache, PSI, GetTLI, InsertLifetime, 604 [&](CallBase &CB) { return getInlineCost(CB); }, LegacyAARGetter(*this), 605 ImportedFunctionsStats); 606 } 607 608 /// Remove now-dead linkonce functions at the end of 609 /// processing to avoid breaking the SCC traversal. 610 bool LegacyInlinerBase::doFinalization(CallGraph &CG) { 611 if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) 612 ImportedFunctionsStats.dump(InlinerFunctionImportStats == 613 InlinerFunctionImportStatsOpts::Verbose); 614 return removeDeadFunctions(CG); 615 } 616 617 /// Remove dead functions that are not included in DNR (Do Not Remove) list. 618 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG, 619 bool AlwaysInlineOnly) { 620 SmallVector<CallGraphNode *, 16> FunctionsToRemove; 621 SmallVector<Function *, 16> DeadFunctionsInComdats; 622 623 auto RemoveCGN = [&](CallGraphNode *CGN) { 624 // Remove any call graph edges from the function to its callees. 625 CGN->removeAllCalledFunctions(); 626 627 // Remove any edges from the external node to the function's call graph 628 // node. These edges might have been made irrelegant due to 629 // optimization of the program. 630 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN); 631 632 // Removing the node for callee from the call graph and delete it. 633 FunctionsToRemove.push_back(CGN); 634 }; 635 636 // Scan for all of the functions, looking for ones that should now be removed 637 // from the program. Insert the dead ones in the FunctionsToRemove set. 638 for (const auto &I : CG) { 639 CallGraphNode *CGN = I.second.get(); 640 Function *F = CGN->getFunction(); 641 if (!F || F->isDeclaration()) 642 continue; 643 644 // Handle the case when this function is called and we only want to care 645 // about always-inline functions. This is a bit of a hack to share code 646 // between here and the InlineAlways pass. 647 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline)) 648 continue; 649 650 // If the only remaining users of the function are dead constants, remove 651 // them. 652 F->removeDeadConstantUsers(); 653 654 if (!F->isDefTriviallyDead()) 655 continue; 656 657 // It is unsafe to drop a function with discardable linkage from a COMDAT 658 // without also dropping the other members of the COMDAT. 659 // The inliner doesn't visit non-function entities which are in COMDAT 660 // groups so it is unsafe to do so *unless* the linkage is local. 661 if (!F->hasLocalLinkage()) { 662 if (F->hasComdat()) { 663 DeadFunctionsInComdats.push_back(F); 664 continue; 665 } 666 } 667 668 RemoveCGN(CGN); 669 } 670 if (!DeadFunctionsInComdats.empty()) { 671 // Filter out the functions whose comdats remain alive. 672 filterDeadComdatFunctions(DeadFunctionsInComdats); 673 // Remove the rest. 674 for (Function *F : DeadFunctionsInComdats) 675 RemoveCGN(CG[F]); 676 } 677 678 if (FunctionsToRemove.empty()) 679 return false; 680 681 // Now that we know which functions to delete, do so. We didn't want to do 682 // this inline, because that would invalidate our CallGraph::iterator 683 // objects. :( 684 // 685 // Note that it doesn't matter that we are iterating over a non-stable order 686 // here to do this, it doesn't matter which order the functions are deleted 687 // in. 688 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end()); 689 FunctionsToRemove.erase( 690 std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()), 691 FunctionsToRemove.end()); 692 for (CallGraphNode *CGN : FunctionsToRemove) { 693 delete CG.removeFunctionFromModule(CGN); 694 ++NumDeleted; 695 } 696 return true; 697 } 698 699 InlineAdvisor & 700 InlinerPass::getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM, 701 FunctionAnalysisManager &FAM, Module &M) { 702 if (OwnedAdvisor) 703 return *OwnedAdvisor; 704 705 auto *IAA = MAM.getCachedResult<InlineAdvisorAnalysis>(M); 706 if (!IAA) { 707 // It should still be possible to run the inliner as a stand-alone SCC pass, 708 // for test scenarios. In that case, we default to the 709 // DefaultInlineAdvisor, which doesn't need to keep state between SCC pass 710 // runs. It also uses just the default InlineParams. 711 // In this case, we need to use the provided FAM, which is valid for the 712 // duration of the inliner pass, and thus the lifetime of the owned advisor. 713 // The one we would get from the MAM can be invalidated as a result of the 714 // inliner's activity. 715 OwnedAdvisor = 716 std::make_unique<DefaultInlineAdvisor>(M, FAM, getInlineParams()); 717 718 if (!CGSCCInlineReplayFile.empty()) 719 OwnedAdvisor = getReplayInlineAdvisor( 720 M, FAM, M.getContext(), std::move(OwnedAdvisor), 721 ReplayInlinerSettings{CGSCCInlineReplayFile, 722 CGSCCInlineReplayScope, 723 CGSCCInlineReplayFallback, 724 {CGSCCInlineReplayFormat}}, 725 /*EmitRemarks=*/true); 726 727 return *OwnedAdvisor; 728 } 729 assert(IAA->getAdvisor() && 730 "Expected a present InlineAdvisorAnalysis also have an " 731 "InlineAdvisor initialized"); 732 return *IAA->getAdvisor(); 733 } 734 735 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, 736 CGSCCAnalysisManager &AM, LazyCallGraph &CG, 737 CGSCCUpdateResult &UR) { 738 const auto &MAMProxy = 739 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG); 740 bool Changed = false; 741 742 assert(InitialC.size() > 0 && "Cannot handle an empty SCC!"); 743 Module &M = *InitialC.begin()->getFunction().getParent(); 744 ProfileSummaryInfo *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(M); 745 746 FunctionAnalysisManager &FAM = 747 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG) 748 .getManager(); 749 750 InlineAdvisor &Advisor = getAdvisor(MAMProxy, FAM, M); 751 Advisor.onPassEntry(); 752 753 auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(&InitialC); }); 754 755 // We use a single common worklist for calls across the entire SCC. We 756 // process these in-order and append new calls introduced during inlining to 757 // the end. The PriorityInlineOrder is optional here, in which the smaller 758 // callee would have a higher priority to inline. 759 // 760 // Note that this particular order of processing is actually critical to 761 // avoid very bad behaviors. Consider *highly connected* call graphs where 762 // each function contains a small amount of code and a couple of calls to 763 // other functions. Because the LLVM inliner is fundamentally a bottom-up 764 // inliner, it can handle gracefully the fact that these all appear to be 765 // reasonable inlining candidates as it will flatten things until they become 766 // too big to inline, and then move on and flatten another batch. 767 // 768 // However, when processing call edges *within* an SCC we cannot rely on this 769 // bottom-up behavior. As a consequence, with heavily connected *SCCs* of 770 // functions we can end up incrementally inlining N calls into each of 771 // N functions because each incremental inlining decision looks good and we 772 // don't have a topological ordering to prevent explosions. 773 // 774 // To compensate for this, we don't process transitive edges made immediate 775 // by inlining until we've done one pass of inlining across the entire SCC. 776 // Large, highly connected SCCs still lead to some amount of code bloat in 777 // this model, but it is uniformly spread across all the functions in the SCC 778 // and eventually they all become too large to inline, rather than 779 // incrementally maknig a single function grow in a super linear fashion. 780 DefaultInlineOrder<std::pair<CallBase *, int>> Calls; 781 782 // Populate the initial list of calls in this SCC. 783 for (auto &N : InitialC) { 784 auto &ORE = 785 FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction()); 786 // We want to generally process call sites top-down in order for 787 // simplifications stemming from replacing the call with the returned value 788 // after inlining to be visible to subsequent inlining decisions. 789 // FIXME: Using instructions sequence is a really bad way to do this. 790 // Instead we should do an actual RPO walk of the function body. 791 for (Instruction &I : instructions(N.getFunction())) 792 if (auto *CB = dyn_cast<CallBase>(&I)) 793 if (Function *Callee = CB->getCalledFunction()) { 794 if (!Callee->isDeclaration()) 795 Calls.push({CB, -1}); 796 else if (!isa<IntrinsicInst>(I)) { 797 using namespace ore; 798 setInlineRemark(*CB, "unavailable definition"); 799 ORE.emit([&]() { 800 return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I) 801 << NV("Callee", Callee) << " will not be inlined into " 802 << NV("Caller", CB->getCaller()) 803 << " because its definition is unavailable" 804 << setIsVerbose(); 805 }); 806 } 807 } 808 } 809 if (Calls.empty()) 810 return PreservedAnalyses::all(); 811 812 // Capture updatable variable for the current SCC. 813 auto *C = &InitialC; 814 815 // When inlining a callee produces new call sites, we want to keep track of 816 // the fact that they were inlined from the callee. This allows us to avoid 817 // infinite inlining in some obscure cases. To represent this, we use an 818 // index into the InlineHistory vector. 819 SmallVector<std::pair<Function *, int>, 16> InlineHistory; 820 821 // Track a set vector of inlined callees so that we can augment the caller 822 // with all of their edges in the call graph before pruning out the ones that 823 // got simplified away. 824 SmallSetVector<Function *, 4> InlinedCallees; 825 826 // Track the dead functions to delete once finished with inlining calls. We 827 // defer deleting these to make it easier to handle the call graph updates. 828 SmallVector<Function *, 4> DeadFunctions; 829 830 // Track potentially dead non-local functions with comdats to see if they can 831 // be deleted as a batch after inlining. 832 SmallVector<Function *, 4> DeadFunctionsInComdats; 833 834 // Loop forward over all of the calls. 835 while (!Calls.empty()) { 836 // We expect the calls to typically be batched with sequences of calls that 837 // have the same caller, so we first set up some shared infrastructure for 838 // this caller. We also do any pruning we can at this layer on the caller 839 // alone. 840 Function &F = *Calls.front().first->getCaller(); 841 LazyCallGraph::Node &N = *CG.lookup(F); 842 if (CG.lookupSCC(N) != C) { 843 Calls.pop(); 844 continue; 845 } 846 847 LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n" 848 << " Function size: " << F.getInstructionCount() 849 << "\n"); 850 851 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & { 852 return FAM.getResult<AssumptionAnalysis>(F); 853 }; 854 855 // Now process as many calls as we have within this caller in the sequence. 856 // We bail out as soon as the caller has to change so we can update the 857 // call graph and prepare the context of that new caller. 858 bool DidInline = false; 859 while (!Calls.empty() && Calls.front().first->getCaller() == &F) { 860 auto P = Calls.pop(); 861 CallBase *CB = P.first; 862 const int InlineHistoryID = P.second; 863 Function &Callee = *CB->getCalledFunction(); 864 865 if (InlineHistoryID != -1 && 866 inlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) { 867 LLVM_DEBUG(dbgs() << "Skipping inlining due to history: " 868 << F.getName() << " -> " << Callee.getName() << "\n"); 869 setInlineRemark(*CB, "recursive"); 870 continue; 871 } 872 873 // Check if this inlining may repeat breaking an SCC apart that has 874 // already been split once before. In that case, inlining here may 875 // trigger infinite inlining, much like is prevented within the inliner 876 // itself by the InlineHistory above, but spread across CGSCC iterations 877 // and thus hidden from the full inline history. 878 LazyCallGraph::SCC *CalleeSCC = CG.lookupSCC(*CG.lookup(Callee)); 879 if (CalleeSCC == C && UR.InlinedInternalEdges.count({&N, C})) { 880 LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node " 881 "previously split out of this SCC by inlining: " 882 << F.getName() << " -> " << Callee.getName() << "\n"); 883 setInlineRemark(*CB, "recursive SCC split"); 884 continue; 885 } 886 887 std::unique_ptr<InlineAdvice> Advice = 888 Advisor.getAdvice(*CB, OnlyMandatory); 889 890 // Check whether we want to inline this callsite. 891 if (!Advice) 892 continue; 893 894 if (!Advice->isInliningRecommended()) { 895 Advice->recordUnattemptedInlining(); 896 continue; 897 } 898 899 int CBCostMult = 900 getStringFnAttrAsInt( 901 *CB, InlineConstants::FunctionInlineCostMultiplierAttributeName) 902 .getValueOr(1); 903 904 // Setup the data structure used to plumb customization into the 905 // `InlineFunction` routine. 906 InlineFunctionInfo IFI( 907 /*cg=*/nullptr, GetAssumptionCache, PSI, 908 &FAM.getResult<BlockFrequencyAnalysis>(*(CB->getCaller())), 909 &FAM.getResult<BlockFrequencyAnalysis>(Callee)); 910 911 InlineResult IR = 912 InlineFunction(*CB, IFI, &FAM.getResult<AAManager>(*CB->getCaller())); 913 if (!IR.isSuccess()) { 914 Advice->recordUnsuccessfulInlining(IR); 915 continue; 916 } 917 918 DidInline = true; 919 InlinedCallees.insert(&Callee); 920 ++NumInlined; 921 922 LLVM_DEBUG(dbgs() << " Size after inlining: " 923 << F.getInstructionCount() << "\n"); 924 925 // Add any new callsites to defined functions to the worklist. 926 if (!IFI.InlinedCallSites.empty()) { 927 int NewHistoryID = InlineHistory.size(); 928 InlineHistory.push_back({&Callee, InlineHistoryID}); 929 930 for (CallBase *ICB : reverse(IFI.InlinedCallSites)) { 931 Function *NewCallee = ICB->getCalledFunction(); 932 assert(!(NewCallee && NewCallee->isIntrinsic()) && 933 "Intrinsic calls should not be tracked."); 934 if (!NewCallee) { 935 // Try to promote an indirect (virtual) call without waiting for 936 // the post-inline cleanup and the next DevirtSCCRepeatedPass 937 // iteration because the next iteration may not happen and we may 938 // miss inlining it. 939 if (tryPromoteCall(*ICB)) 940 NewCallee = ICB->getCalledFunction(); 941 } 942 if (NewCallee) { 943 if (!NewCallee->isDeclaration()) { 944 Calls.push({ICB, NewHistoryID}); 945 // Continually inlining through an SCC can result in huge compile 946 // times and bloated code since we arbitrarily stop at some point 947 // when the inliner decides it's not profitable to inline anymore. 948 // We attempt to mitigate this by making these calls exponentially 949 // more expensive. 950 // This doesn't apply to calls in the same SCC since if we do 951 // inline through the SCC the function will end up being 952 // self-recursive which the inliner bails out on, and inlining 953 // within an SCC is necessary for performance. 954 if (CalleeSCC != C && 955 CalleeSCC == CG.lookupSCC(CG.get(*NewCallee))) { 956 Attribute NewCBCostMult = Attribute::get( 957 M.getContext(), 958 InlineConstants::FunctionInlineCostMultiplierAttributeName, 959 itostr(CBCostMult * IntraSCCCostMultiplier)); 960 ICB->addFnAttr(NewCBCostMult); 961 } 962 } 963 } 964 } 965 } 966 967 // Merge the attributes based on the inlining. 968 AttributeFuncs::mergeAttributesForInlining(F, Callee); 969 970 // For local functions or discardable functions without comdats, check 971 // whether this makes the callee trivially dead. In that case, we can drop 972 // the body of the function eagerly which may reduce the number of callers 973 // of other functions to one, changing inline cost thresholds. Non-local 974 // discardable functions with comdats are checked later on. 975 bool CalleeWasDeleted = false; 976 if (Callee.isDiscardableIfUnused() && Callee.hasZeroLiveUses() && 977 !CG.isLibFunction(Callee)) { 978 if (Callee.hasLocalLinkage() || !Callee.hasComdat()) { 979 Calls.erase_if([&](const std::pair<CallBase *, int> &Call) { 980 return Call.first->getCaller() == &Callee; 981 }); 982 // Clear the body and queue the function itself for deletion when we 983 // finish inlining and call graph updates. 984 // Note that after this point, it is an error to do anything other 985 // than use the callee's address or delete it. 986 Callee.dropAllReferences(); 987 assert(!is_contained(DeadFunctions, &Callee) && 988 "Cannot put cause a function to become dead twice!"); 989 DeadFunctions.push_back(&Callee); 990 CalleeWasDeleted = true; 991 } else { 992 DeadFunctionsInComdats.push_back(&Callee); 993 } 994 } 995 if (CalleeWasDeleted) 996 Advice->recordInliningWithCalleeDeleted(); 997 else 998 Advice->recordInlining(); 999 } 1000 1001 if (!DidInline) 1002 continue; 1003 Changed = true; 1004 1005 // At this point, since we have made changes we have at least removed 1006 // a call instruction. However, in the process we do some incremental 1007 // simplification of the surrounding code. This simplification can 1008 // essentially do all of the same things as a function pass and we can 1009 // re-use the exact same logic for updating the call graph to reflect the 1010 // change. 1011 1012 // Inside the update, we also update the FunctionAnalysisManager in the 1013 // proxy for this particular SCC. We do this as the SCC may have changed and 1014 // as we're going to mutate this particular function we want to make sure 1015 // the proxy is in place to forward any invalidation events. 1016 LazyCallGraph::SCC *OldC = C; 1017 C = &updateCGAndAnalysisManagerForCGSCCPass(CG, *C, N, AM, UR, FAM); 1018 LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n"); 1019 1020 // If this causes an SCC to split apart into multiple smaller SCCs, there 1021 // is a subtle risk we need to prepare for. Other transformations may 1022 // expose an "infinite inlining" opportunity later, and because of the SCC 1023 // mutation, we will revisit this function and potentially re-inline. If we 1024 // do, and that re-inlining also has the potentially to mutate the SCC 1025 // structure, the infinite inlining problem can manifest through infinite 1026 // SCC splits and merges. To avoid this, we capture the originating caller 1027 // node and the SCC containing the call edge. This is a slight over 1028 // approximation of the possible inlining decisions that must be avoided, 1029 // but is relatively efficient to store. We use C != OldC to know when 1030 // a new SCC is generated and the original SCC may be generated via merge 1031 // in later iterations. 1032 // 1033 // It is also possible that even if no new SCC is generated 1034 // (i.e., C == OldC), the original SCC could be split and then merged 1035 // into the same one as itself. and the original SCC will be added into 1036 // UR.CWorklist again, we want to catch such cases too. 1037 // 1038 // FIXME: This seems like a very heavyweight way of retaining the inline 1039 // history, we should look for a more efficient way of tracking it. 1040 if ((C != OldC || UR.CWorklist.count(OldC)) && 1041 llvm::any_of(InlinedCallees, [&](Function *Callee) { 1042 return CG.lookupSCC(*CG.lookup(*Callee)) == OldC; 1043 })) { 1044 LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, " 1045 "retaining this to avoid infinite inlining.\n"); 1046 UR.InlinedInternalEdges.insert({&N, OldC}); 1047 } 1048 InlinedCallees.clear(); 1049 1050 // Invalidate analyses for this function now so that we don't have to 1051 // invalidate analyses for all functions in this SCC later. 1052 FAM.invalidate(F, PreservedAnalyses::none()); 1053 } 1054 1055 // We must ensure that we only delete functions with comdats if every function 1056 // in the comdat is going to be deleted. 1057 if (!DeadFunctionsInComdats.empty()) { 1058 filterDeadComdatFunctions(DeadFunctionsInComdats); 1059 for (auto *Callee : DeadFunctionsInComdats) 1060 Callee->dropAllReferences(); 1061 DeadFunctions.append(DeadFunctionsInComdats); 1062 } 1063 1064 // Now that we've finished inlining all of the calls across this SCC, delete 1065 // all of the trivially dead functions, updating the call graph and the CGSCC 1066 // pass manager in the process. 1067 // 1068 // Note that this walks a pointer set which has non-deterministic order but 1069 // that is OK as all we do is delete things and add pointers to unordered 1070 // sets. 1071 for (Function *DeadF : DeadFunctions) { 1072 // Get the necessary information out of the call graph and nuke the 1073 // function there. Also, clear out any cached analyses. 1074 auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF)); 1075 FAM.clear(*DeadF, DeadF->getName()); 1076 AM.clear(DeadC, DeadC.getName()); 1077 auto &DeadRC = DeadC.getOuterRefSCC(); 1078 CG.removeDeadFunction(*DeadF); 1079 1080 // Mark the relevant parts of the call graph as invalid so we don't visit 1081 // them. 1082 UR.InvalidatedSCCs.insert(&DeadC); 1083 UR.InvalidatedRefSCCs.insert(&DeadRC); 1084 1085 // If the updated SCC was the one containing the deleted function, clear it. 1086 if (&DeadC == UR.UpdatedC) 1087 UR.UpdatedC = nullptr; 1088 1089 // And delete the actual function from the module. 1090 M.getFunctionList().erase(DeadF); 1091 1092 ++NumDeleted; 1093 } 1094 1095 if (!Changed) 1096 return PreservedAnalyses::all(); 1097 1098 PreservedAnalyses PA; 1099 // Even if we change the IR, we update the core CGSCC data structures and so 1100 // can preserve the proxy to the function analysis manager. 1101 PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); 1102 // We have already invalidated all analyses on modified functions. 1103 PA.preserveSet<AllAnalysesOn<Function>>(); 1104 return PA; 1105 } 1106 1107 ModuleInlinerWrapperPass::ModuleInlinerWrapperPass(InlineParams Params, 1108 bool MandatoryFirst, 1109 InliningAdvisorMode Mode, 1110 unsigned MaxDevirtIterations) 1111 : Params(Params), Mode(Mode), MaxDevirtIterations(MaxDevirtIterations) { 1112 // Run the inliner first. The theory is that we are walking bottom-up and so 1113 // the callees have already been fully optimized, and we want to inline them 1114 // into the callers so that our optimizations can reflect that. 1115 // For PreLinkThinLTO pass, we disable hot-caller heuristic for sample PGO 1116 // because it makes profile annotation in the backend inaccurate. 1117 if (MandatoryFirst) 1118 PM.addPass(InlinerPass(/*OnlyMandatory*/ true)); 1119 PM.addPass(InlinerPass()); 1120 } 1121 1122 PreservedAnalyses ModuleInlinerWrapperPass::run(Module &M, 1123 ModuleAnalysisManager &MAM) { 1124 auto &IAA = MAM.getResult<InlineAdvisorAnalysis>(M); 1125 if (!IAA.tryCreate(Params, Mode, 1126 {CGSCCInlineReplayFile, 1127 CGSCCInlineReplayScope, 1128 CGSCCInlineReplayFallback, 1129 {CGSCCInlineReplayFormat}})) { 1130 M.getContext().emitError( 1131 "Could not setup Inlining Advisor for the requested " 1132 "mode and/or options"); 1133 return PreservedAnalyses::all(); 1134 } 1135 1136 // We wrap the CGSCC pipeline in a devirtualization repeater. This will try 1137 // to detect when we devirtualize indirect calls and iterate the SCC passes 1138 // in that case to try and catch knock-on inlining or function attrs 1139 // opportunities. Then we add it to the module pipeline by walking the SCCs 1140 // in postorder (or bottom-up). 1141 // If MaxDevirtIterations is 0, we just don't use the devirtualization 1142 // wrapper. 1143 if (MaxDevirtIterations == 0) 1144 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(PM))); 1145 else 1146 MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor( 1147 createDevirtSCCRepeatedPass(std::move(PM), MaxDevirtIterations))); 1148 1149 MPM.addPass(std::move(AfterCGMPM)); 1150 MPM.run(M, MAM); 1151 1152 // Discard the InlineAdvisor, a subsequent inlining session should construct 1153 // its own. 1154 auto PA = PreservedAnalyses::all(); 1155 if (!KeepAdvisorForPrinting) 1156 PA.abandon<InlineAdvisorAnalysis>(); 1157 return PA; 1158 } 1159 1160 void InlinerPass::printPipeline( 1161 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 1162 static_cast<PassInfoMixin<InlinerPass> *>(this)->printPipeline( 1163 OS, MapClassName2PassName); 1164 if (OnlyMandatory) 1165 OS << "<only-mandatory>"; 1166 } 1167 1168 void ModuleInlinerWrapperPass::printPipeline( 1169 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 1170 // Print some info about passes added to the wrapper. This is however 1171 // incomplete as InlineAdvisorAnalysis part isn't included (which also depends 1172 // on Params and Mode). 1173 if (!MPM.isEmpty()) { 1174 MPM.printPipeline(OS, MapClassName2PassName); 1175 OS << ","; 1176 } 1177 OS << "cgscc("; 1178 if (MaxDevirtIterations != 0) 1179 OS << "devirt<" << MaxDevirtIterations << ">("; 1180 PM.printPipeline(OS, MapClassName2PassName); 1181 if (MaxDevirtIterations != 0) 1182 OS << ")"; 1183 OS << ")"; 1184 } 1185