1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the mechanics required to implement inlining without
10 // missing any calls and updating the call graph.  The decisions of which calls
11 // are profitable to inline are implemented elsewhere.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/IPO/Inliner.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/None.h"
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/ScopeExit.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/BasicAliasAnalysis.h"
28 #include "llvm/Analysis/BlockFrequencyInfo.h"
29 #include "llvm/Analysis/CGSCCPassManager.h"
30 #include "llvm/Analysis/CallGraph.h"
31 #include "llvm/Analysis/GlobalsModRef.h"
32 #include "llvm/Analysis/InlineAdvisor.h"
33 #include "llvm/Analysis/InlineCost.h"
34 #include "llvm/Analysis/InlineOrder.h"
35 #include "llvm/Analysis/LazyCallGraph.h"
36 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
37 #include "llvm/Analysis/ProfileSummaryInfo.h"
38 #include "llvm/Analysis/TargetLibraryInfo.h"
39 #include "llvm/Analysis/TargetTransformInfo.h"
40 #include "llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h"
41 #include "llvm/IR/Attributes.h"
42 #include "llvm/IR/BasicBlock.h"
43 #include "llvm/IR/DataLayout.h"
44 #include "llvm/IR/DebugLoc.h"
45 #include "llvm/IR/DerivedTypes.h"
46 #include "llvm/IR/DiagnosticInfo.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/InstIterator.h"
49 #include "llvm/IR/Instruction.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/IntrinsicInst.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Module.h"
54 #include "llvm/IR/PassManager.h"
55 #include "llvm/IR/User.h"
56 #include "llvm/IR/Value.h"
57 #include "llvm/Pass.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/raw_ostream.h"
62 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
63 #include "llvm/Transforms/Utils/Cloning.h"
64 #include "llvm/Transforms/Utils/Local.h"
65 #include "llvm/Transforms/Utils/ModuleUtils.h"
66 #include <algorithm>
67 #include <cassert>
68 #include <functional>
69 #include <sstream>
70 #include <tuple>
71 #include <utility>
72 #include <vector>
73 
74 using namespace llvm;
75 
76 #define DEBUG_TYPE "inline"
77 
78 STATISTIC(NumInlined, "Number of functions inlined");
79 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
80 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
81 STATISTIC(NumMergedAllocas, "Number of allocas merged together");
82 
83 /// Flag to disable manual alloca merging.
84 ///
85 /// Merging of allocas was originally done as a stack-size saving technique
86 /// prior to LLVM's code generator having support for stack coloring based on
87 /// lifetime markers. It is now in the process of being removed. To experiment
88 /// with disabling it and relying fully on lifetime marker based stack
89 /// coloring, you can pass this flag to LLVM.
90 static cl::opt<bool>
91     DisableInlinedAllocaMerging("disable-inlined-alloca-merging",
92                                 cl::init(false), cl::Hidden);
93 
94 extern cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats;
95 
96 static cl::opt<std::string> CGSCCInlineReplayFile(
97     "cgscc-inline-replay", cl::init(""), cl::value_desc("filename"),
98     cl::desc(
99         "Optimization remarks file containing inline remarks to be replayed "
100         "by cgscc inlining."),
101     cl::Hidden);
102 
103 static cl::opt<ReplayInlineScope> CGSCCInlineReplayScope(
104     "cgscc-inline-replay-scope", cl::init(ReplayInlineScope::Function),
105     cl::values(clEnumValN(ReplayInlineScope::Function, "Function",
106                           "Replay on functions that have remarks associated "
107                           "with them (default)"),
108                clEnumValN(ReplayInlineScope::Module, "Module",
109                           "Replay on the entire module")),
110     cl::desc("Whether inline replay should be applied to the entire "
111              "Module or just the Functions (default) that are present as "
112              "callers in remarks during cgscc inlining."),
113     cl::Hidden);
114 
115 static cl::opt<bool> InlineEnablePriorityOrder(
116     "inline-enable-priority-order", cl::Hidden, cl::init(false),
117     cl::desc("Enable the priority inline order for the inliner"));
118 
119 LegacyInlinerBase::LegacyInlinerBase(char &ID) : CallGraphSCCPass(ID) {}
120 
121 LegacyInlinerBase::LegacyInlinerBase(char &ID, bool InsertLifetime)
122     : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {}
123 
124 /// For this class, we declare that we require and preserve the call graph.
125 /// If the derived class implements this method, it should
126 /// always explicitly call the implementation here.
127 void LegacyInlinerBase::getAnalysisUsage(AnalysisUsage &AU) const {
128   AU.addRequired<AssumptionCacheTracker>();
129   AU.addRequired<ProfileSummaryInfoWrapperPass>();
130   AU.addRequired<TargetLibraryInfoWrapperPass>();
131   getAAResultsAnalysisUsage(AU);
132   CallGraphSCCPass::getAnalysisUsage(AU);
133 }
134 
135 using InlinedArrayAllocasTy = DenseMap<ArrayType *, std::vector<AllocaInst *>>;
136 
137 /// Look at all of the allocas that we inlined through this call site.  If we
138 /// have already inlined other allocas through other calls into this function,
139 /// then we know that they have disjoint lifetimes and that we can merge them.
140 ///
141 /// There are many heuristics possible for merging these allocas, and the
142 /// different options have different tradeoffs.  One thing that we *really*
143 /// don't want to hurt is SRoA: once inlining happens, often allocas are no
144 /// longer address taken and so they can be promoted.
145 ///
146 /// Our "solution" for that is to only merge allocas whose outermost type is an
147 /// array type.  These are usually not promoted because someone is using a
148 /// variable index into them.  These are also often the most important ones to
149 /// merge.
150 ///
151 /// A better solution would be to have real memory lifetime markers in the IR
152 /// and not have the inliner do any merging of allocas at all.  This would
153 /// allow the backend to do proper stack slot coloring of all allocas that
154 /// *actually make it to the backend*, which is really what we want.
155 ///
156 /// Because we don't have this information, we do this simple and useful hack.
157 static void mergeInlinedArrayAllocas(Function *Caller, InlineFunctionInfo &IFI,
158                                      InlinedArrayAllocasTy &InlinedArrayAllocas,
159                                      int InlineHistory) {
160   SmallPtrSet<AllocaInst *, 16> UsedAllocas;
161 
162   // When processing our SCC, check to see if the call site was inlined from
163   // some other call site.  For example, if we're processing "A" in this code:
164   //   A() { B() }
165   //   B() { x = alloca ... C() }
166   //   C() { y = alloca ... }
167   // Assume that C was not inlined into B initially, and so we're processing A
168   // and decide to inline B into A.  Doing this makes an alloca available for
169   // reuse and makes a callsite (C) available for inlining.  When we process
170   // the C call site we don't want to do any alloca merging between X and Y
171   // because their scopes are not disjoint.  We could make this smarter by
172   // keeping track of the inline history for each alloca in the
173   // InlinedArrayAllocas but this isn't likely to be a significant win.
174   if (InlineHistory != -1) // Only do merging for top-level call sites in SCC.
175     return;
176 
177   // Loop over all the allocas we have so far and see if they can be merged with
178   // a previously inlined alloca.  If not, remember that we had it.
179   for (unsigned AllocaNo = 0, E = IFI.StaticAllocas.size(); AllocaNo != E;
180        ++AllocaNo) {
181     AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
182 
183     // Don't bother trying to merge array allocations (they will usually be
184     // canonicalized to be an allocation *of* an array), or allocations whose
185     // type is not itself an array (because we're afraid of pessimizing SRoA).
186     ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
187     if (!ATy || AI->isArrayAllocation())
188       continue;
189 
190     // Get the list of all available allocas for this array type.
191     std::vector<AllocaInst *> &AllocasForType = InlinedArrayAllocas[ATy];
192 
193     // Loop over the allocas in AllocasForType to see if we can reuse one.  Note
194     // that we have to be careful not to reuse the same "available" alloca for
195     // multiple different allocas that we just inlined, we use the 'UsedAllocas'
196     // set to keep track of which "available" allocas are being used by this
197     // function.  Also, AllocasForType can be empty of course!
198     bool MergedAwayAlloca = false;
199     for (AllocaInst *AvailableAlloca : AllocasForType) {
200       Align Align1 = AI->getAlign();
201       Align Align2 = AvailableAlloca->getAlign();
202 
203       // The available alloca has to be in the right function, not in some other
204       // function in this SCC.
205       if (AvailableAlloca->getParent() != AI->getParent())
206         continue;
207 
208       // If the inlined function already uses this alloca then we can't reuse
209       // it.
210       if (!UsedAllocas.insert(AvailableAlloca).second)
211         continue;
212 
213       // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
214       // success!
215       LLVM_DEBUG(dbgs() << "    ***MERGED ALLOCA: " << *AI
216                         << "\n\t\tINTO: " << *AvailableAlloca << '\n');
217 
218       // Move affected dbg.declare calls immediately after the new alloca to
219       // avoid the situation when a dbg.declare precedes its alloca.
220       if (auto *L = LocalAsMetadata::getIfExists(AI))
221         if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
222           for (User *U : MDV->users())
223             if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
224               DDI->moveBefore(AvailableAlloca->getNextNode());
225 
226       AI->replaceAllUsesWith(AvailableAlloca);
227 
228       if (Align1 > Align2)
229         AvailableAlloca->setAlignment(AI->getAlign());
230 
231       AI->eraseFromParent();
232       MergedAwayAlloca = true;
233       ++NumMergedAllocas;
234       IFI.StaticAllocas[AllocaNo] = nullptr;
235       break;
236     }
237 
238     // If we already nuked the alloca, we're done with it.
239     if (MergedAwayAlloca)
240       continue;
241 
242     // If we were unable to merge away the alloca either because there are no
243     // allocas of the right type available or because we reused them all
244     // already, remember that this alloca came from an inlined function and mark
245     // it used so we don't reuse it for other allocas from this inline
246     // operation.
247     AllocasForType.push_back(AI);
248     UsedAllocas.insert(AI);
249   }
250 }
251 
252 /// If it is possible to inline the specified call site,
253 /// do so and update the CallGraph for this operation.
254 ///
255 /// This function also does some basic book-keeping to update the IR.  The
256 /// InlinedArrayAllocas map keeps track of any allocas that are already
257 /// available from other functions inlined into the caller.  If we are able to
258 /// inline this call site we attempt to reuse already available allocas or add
259 /// any new allocas to the set if not possible.
260 static InlineResult inlineCallIfPossible(
261     CallBase &CB, InlineFunctionInfo &IFI,
262     InlinedArrayAllocasTy &InlinedArrayAllocas, int InlineHistory,
263     bool InsertLifetime, function_ref<AAResults &(Function &)> &AARGetter,
264     ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
265   Function *Callee = CB.getCalledFunction();
266   Function *Caller = CB.getCaller();
267 
268   AAResults &AAR = AARGetter(*Callee);
269 
270   // Try to inline the function.  Get the list of static allocas that were
271   // inlined.
272   InlineResult IR = InlineFunction(CB, IFI, &AAR, InsertLifetime);
273   if (!IR.isSuccess())
274     return IR;
275 
276   if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
277     ImportedFunctionsStats.recordInline(*Caller, *Callee);
278 
279   AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee);
280 
281   if (!DisableInlinedAllocaMerging)
282     mergeInlinedArrayAllocas(Caller, IFI, InlinedArrayAllocas, InlineHistory);
283 
284   return IR; // success
285 }
286 
287 /// Return true if the specified inline history ID
288 /// indicates an inline history that includes the specified function.
289 static bool inlineHistoryIncludes(
290     Function *F, int InlineHistoryID,
291     const SmallVectorImpl<std::pair<Function *, int>> &InlineHistory) {
292   while (InlineHistoryID != -1) {
293     assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
294            "Invalid inline history ID");
295     if (InlineHistory[InlineHistoryID].first == F)
296       return true;
297     InlineHistoryID = InlineHistory[InlineHistoryID].second;
298   }
299   return false;
300 }
301 
302 bool LegacyInlinerBase::doInitialization(CallGraph &CG) {
303   if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
304     ImportedFunctionsStats.setModuleInfo(CG.getModule());
305   return false; // No changes to CallGraph.
306 }
307 
308 bool LegacyInlinerBase::runOnSCC(CallGraphSCC &SCC) {
309   if (skipSCC(SCC))
310     return false;
311   return inlineCalls(SCC);
312 }
313 
314 static bool
315 inlineCallsImpl(CallGraphSCC &SCC, CallGraph &CG,
316                 std::function<AssumptionCache &(Function &)> GetAssumptionCache,
317                 ProfileSummaryInfo *PSI,
318                 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
319                 bool InsertLifetime,
320                 function_ref<InlineCost(CallBase &CB)> GetInlineCost,
321                 function_ref<AAResults &(Function &)> AARGetter,
322                 ImportedFunctionsInliningStatistics &ImportedFunctionsStats) {
323   SmallPtrSet<Function *, 8> SCCFunctions;
324   LLVM_DEBUG(dbgs() << "Inliner visiting SCC:");
325   for (CallGraphNode *Node : SCC) {
326     Function *F = Node->getFunction();
327     if (F)
328       SCCFunctions.insert(F);
329     LLVM_DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
330   }
331 
332   // Scan through and identify all call sites ahead of time so that we only
333   // inline call sites in the original functions, not call sites that result
334   // from inlining other functions.
335   SmallVector<std::pair<CallBase *, int>, 16> CallSites;
336 
337   // When inlining a callee produces new call sites, we want to keep track of
338   // the fact that they were inlined from the callee.  This allows us to avoid
339   // infinite inlining in some obscure cases.  To represent this, we use an
340   // index into the InlineHistory vector.
341   SmallVector<std::pair<Function *, int>, 8> InlineHistory;
342 
343   for (CallGraphNode *Node : SCC) {
344     Function *F = Node->getFunction();
345     if (!F || F->isDeclaration())
346       continue;
347 
348     OptimizationRemarkEmitter ORE(F);
349     for (BasicBlock &BB : *F)
350       for (Instruction &I : BB) {
351         auto *CB = dyn_cast<CallBase>(&I);
352         // If this isn't a call, or it is a call to an intrinsic, it can
353         // never be inlined.
354         if (!CB || isa<IntrinsicInst>(I))
355           continue;
356 
357         // If this is a direct call to an external function, we can never inline
358         // it.  If it is an indirect call, inlining may resolve it to be a
359         // direct call, so we keep it.
360         if (Function *Callee = CB->getCalledFunction())
361           if (Callee->isDeclaration()) {
362             using namespace ore;
363 
364             setInlineRemark(*CB, "unavailable definition");
365             ORE.emit([&]() {
366               return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
367                      << NV("Callee", Callee) << " will not be inlined into "
368                      << NV("Caller", CB->getCaller())
369                      << " because its definition is unavailable"
370                      << setIsVerbose();
371             });
372             continue;
373           }
374 
375         CallSites.push_back(std::make_pair(CB, -1));
376       }
377   }
378 
379   LLVM_DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
380 
381   // If there are no calls in this function, exit early.
382   if (CallSites.empty())
383     return false;
384 
385   // Now that we have all of the call sites, move the ones to functions in the
386   // current SCC to the end of the list.
387   unsigned FirstCallInSCC = CallSites.size();
388   for (unsigned I = 0; I < FirstCallInSCC; ++I)
389     if (Function *F = CallSites[I].first->getCalledFunction())
390       if (SCCFunctions.count(F))
391         std::swap(CallSites[I--], CallSites[--FirstCallInSCC]);
392 
393   InlinedArrayAllocasTy InlinedArrayAllocas;
394   InlineFunctionInfo InlineInfo(&CG, GetAssumptionCache, PSI);
395 
396   // Now that we have all of the call sites, loop over them and inline them if
397   // it looks profitable to do so.
398   bool Changed = false;
399   bool LocalChange;
400   do {
401     LocalChange = false;
402     // Iterate over the outer loop because inlining functions can cause indirect
403     // calls to become direct calls.
404     // CallSites may be modified inside so ranged for loop can not be used.
405     for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
406       auto &P = CallSites[CSi];
407       CallBase &CB = *P.first;
408       const int InlineHistoryID = P.second;
409 
410       Function *Caller = CB.getCaller();
411       Function *Callee = CB.getCalledFunction();
412 
413       // We can only inline direct calls to non-declarations.
414       if (!Callee || Callee->isDeclaration())
415         continue;
416 
417       bool IsTriviallyDead = isInstructionTriviallyDead(&CB, &GetTLI(*Caller));
418 
419       if (!IsTriviallyDead) {
420         // If this call site was obtained by inlining another function, verify
421         // that the include path for the function did not include the callee
422         // itself.  If so, we'd be recursively inlining the same function,
423         // which would provide the same callsites, which would cause us to
424         // infinitely inline.
425         if (InlineHistoryID != -1 &&
426             inlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory)) {
427           setInlineRemark(CB, "recursive");
428           continue;
429         }
430       }
431 
432       // FIXME for new PM: because of the old PM we currently generate ORE and
433       // in turn BFI on demand.  With the new PM, the ORE dependency should
434       // just become a regular analysis dependency.
435       OptimizationRemarkEmitter ORE(Caller);
436 
437       auto OIC = shouldInline(CB, GetInlineCost, ORE);
438       // If the policy determines that we should inline this function,
439       // delete the call instead.
440       if (!OIC)
441         continue;
442 
443       // If this call site is dead and it is to a readonly function, we should
444       // just delete the call instead of trying to inline it, regardless of
445       // size.  This happens because IPSCCP propagates the result out of the
446       // call and then we're left with the dead call.
447       if (IsTriviallyDead) {
448         LLVM_DEBUG(dbgs() << "    -> Deleting dead call: " << CB << "\n");
449         // Update the call graph by deleting the edge from Callee to Caller.
450         setInlineRemark(CB, "trivially dead");
451         CG[Caller]->removeCallEdgeFor(CB);
452         CB.eraseFromParent();
453         ++NumCallsDeleted;
454       } else {
455         // Get DebugLoc to report. CB will be invalid after Inliner.
456         DebugLoc DLoc = CB.getDebugLoc();
457         BasicBlock *Block = CB.getParent();
458 
459         // Attempt to inline the function.
460         using namespace ore;
461 
462         InlineResult IR = inlineCallIfPossible(
463             CB, InlineInfo, InlinedArrayAllocas, InlineHistoryID,
464             InsertLifetime, AARGetter, ImportedFunctionsStats);
465         if (!IR.isSuccess()) {
466           setInlineRemark(CB, std::string(IR.getFailureReason()) + "; " +
467                                   inlineCostStr(*OIC));
468           ORE.emit([&]() {
469             return OptimizationRemarkMissed(DEBUG_TYPE, "NotInlined", DLoc,
470                                             Block)
471                    << NV("Callee", Callee) << " will not be inlined into "
472                    << NV("Caller", Caller) << ": "
473                    << NV("Reason", IR.getFailureReason());
474           });
475           continue;
476         }
477         ++NumInlined;
478 
479         emitInlinedIntoBasedOnCost(ORE, DLoc, Block, *Callee, *Caller, *OIC);
480 
481         // If inlining this function gave us any new call sites, throw them
482         // onto our worklist to process.  They are useful inline candidates.
483         if (!InlineInfo.InlinedCalls.empty()) {
484           // Create a new inline history entry for this, so that we remember
485           // that these new callsites came about due to inlining Callee.
486           int NewHistoryID = InlineHistory.size();
487           InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
488 
489 #ifndef NDEBUG
490           // Make sure no dupplicates in the inline candidates. This could
491           // happen when a callsite is simpilfied to reusing the return value
492           // of another callsite during function cloning, thus the other
493           // callsite will be reconsidered here.
494           DenseSet<CallBase *> DbgCallSites;
495           for (auto &II : CallSites)
496             DbgCallSites.insert(II.first);
497 #endif
498 
499           for (Value *Ptr : InlineInfo.InlinedCalls) {
500 #ifndef NDEBUG
501             assert(DbgCallSites.count(dyn_cast<CallBase>(Ptr)) == 0);
502 #endif
503             CallSites.push_back(
504                 std::make_pair(dyn_cast<CallBase>(Ptr), NewHistoryID));
505           }
506         }
507       }
508 
509       // If we inlined or deleted the last possible call site to the function,
510       // delete the function body now.
511       if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
512           // TODO: Can remove if in SCC now.
513           !SCCFunctions.count(Callee) &&
514           // The function may be apparently dead, but if there are indirect
515           // callgraph references to the node, we cannot delete it yet, this
516           // could invalidate the CGSCC iterator.
517           CG[Callee]->getNumReferences() == 0) {
518         LLVM_DEBUG(dbgs() << "    -> Deleting dead function: "
519                           << Callee->getName() << "\n");
520         CallGraphNode *CalleeNode = CG[Callee];
521 
522         // Remove any call graph edges from the callee to its callees.
523         CalleeNode->removeAllCalledFunctions();
524 
525         // Removing the node for callee from the call graph and delete it.
526         delete CG.removeFunctionFromModule(CalleeNode);
527         ++NumDeleted;
528       }
529 
530       // Remove this call site from the list.  If possible, use
531       // swap/pop_back for efficiency, but do not use it if doing so would
532       // move a call site to a function in this SCC before the
533       // 'FirstCallInSCC' barrier.
534       if (SCC.isSingular()) {
535         CallSites[CSi] = CallSites.back();
536         CallSites.pop_back();
537       } else {
538         CallSites.erase(CallSites.begin() + CSi);
539       }
540       --CSi;
541 
542       Changed = true;
543       LocalChange = true;
544     }
545   } while (LocalChange);
546 
547   return Changed;
548 }
549 
550 bool LegacyInlinerBase::inlineCalls(CallGraphSCC &SCC) {
551   CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
552   ACT = &getAnalysis<AssumptionCacheTracker>();
553   PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
554   GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
555     return getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
556   };
557   auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
558     return ACT->getAssumptionCache(F);
559   };
560   return inlineCallsImpl(
561       SCC, CG, GetAssumptionCache, PSI, GetTLI, InsertLifetime,
562       [&](CallBase &CB) { return getInlineCost(CB); }, LegacyAARGetter(*this),
563       ImportedFunctionsStats);
564 }
565 
566 /// Remove now-dead linkonce functions at the end of
567 /// processing to avoid breaking the SCC traversal.
568 bool LegacyInlinerBase::doFinalization(CallGraph &CG) {
569   if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No)
570     ImportedFunctionsStats.dump(InlinerFunctionImportStats ==
571                                 InlinerFunctionImportStatsOpts::Verbose);
572   return removeDeadFunctions(CG);
573 }
574 
575 /// Remove dead functions that are not included in DNR (Do Not Remove) list.
576 bool LegacyInlinerBase::removeDeadFunctions(CallGraph &CG,
577                                             bool AlwaysInlineOnly) {
578   SmallVector<CallGraphNode *, 16> FunctionsToRemove;
579   SmallVector<Function *, 16> DeadFunctionsInComdats;
580 
581   auto RemoveCGN = [&](CallGraphNode *CGN) {
582     // Remove any call graph edges from the function to its callees.
583     CGN->removeAllCalledFunctions();
584 
585     // Remove any edges from the external node to the function's call graph
586     // node.  These edges might have been made irrelegant due to
587     // optimization of the program.
588     CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
589 
590     // Removing the node for callee from the call graph and delete it.
591     FunctionsToRemove.push_back(CGN);
592   };
593 
594   // Scan for all of the functions, looking for ones that should now be removed
595   // from the program.  Insert the dead ones in the FunctionsToRemove set.
596   for (const auto &I : CG) {
597     CallGraphNode *CGN = I.second.get();
598     Function *F = CGN->getFunction();
599     if (!F || F->isDeclaration())
600       continue;
601 
602     // Handle the case when this function is called and we only want to care
603     // about always-inline functions. This is a bit of a hack to share code
604     // between here and the InlineAlways pass.
605     if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
606       continue;
607 
608     // If the only remaining users of the function are dead constants, remove
609     // them.
610     F->removeDeadConstantUsers();
611 
612     if (!F->isDefTriviallyDead())
613       continue;
614 
615     // It is unsafe to drop a function with discardable linkage from a COMDAT
616     // without also dropping the other members of the COMDAT.
617     // The inliner doesn't visit non-function entities which are in COMDAT
618     // groups so it is unsafe to do so *unless* the linkage is local.
619     if (!F->hasLocalLinkage()) {
620       if (F->hasComdat()) {
621         DeadFunctionsInComdats.push_back(F);
622         continue;
623       }
624     }
625 
626     RemoveCGN(CGN);
627   }
628   if (!DeadFunctionsInComdats.empty()) {
629     // Filter out the functions whose comdats remain alive.
630     filterDeadComdatFunctions(CG.getModule(), DeadFunctionsInComdats);
631     // Remove the rest.
632     for (Function *F : DeadFunctionsInComdats)
633       RemoveCGN(CG[F]);
634   }
635 
636   if (FunctionsToRemove.empty())
637     return false;
638 
639   // Now that we know which functions to delete, do so.  We didn't want to do
640   // this inline, because that would invalidate our CallGraph::iterator
641   // objects. :(
642   //
643   // Note that it doesn't matter that we are iterating over a non-stable order
644   // here to do this, it doesn't matter which order the functions are deleted
645   // in.
646   array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
647   FunctionsToRemove.erase(
648       std::unique(FunctionsToRemove.begin(), FunctionsToRemove.end()),
649       FunctionsToRemove.end());
650   for (CallGraphNode *CGN : FunctionsToRemove) {
651     delete CG.removeFunctionFromModule(CGN);
652     ++NumDeleted;
653   }
654   return true;
655 }
656 
657 InlineAdvisor &
658 InlinerPass::getAdvisor(const ModuleAnalysisManagerCGSCCProxy::Result &MAM,
659                         FunctionAnalysisManager &FAM, Module &M) {
660   if (OwnedAdvisor)
661     return *OwnedAdvisor;
662 
663   auto *IAA = MAM.getCachedResult<InlineAdvisorAnalysis>(M);
664   if (!IAA) {
665     // It should still be possible to run the inliner as a stand-alone SCC pass,
666     // for test scenarios. In that case, we default to the
667     // DefaultInlineAdvisor, which doesn't need to keep state between SCC pass
668     // runs. It also uses just the default InlineParams.
669     // In this case, we need to use the provided FAM, which is valid for the
670     // duration of the inliner pass, and thus the lifetime of the owned advisor.
671     // The one we would get from the MAM can be invalidated as a result of the
672     // inliner's activity.
673     OwnedAdvisor =
674         std::make_unique<DefaultInlineAdvisor>(M, FAM, getInlineParams());
675 
676     if (!CGSCCInlineReplayFile.empty())
677       OwnedAdvisor = getReplayInlineAdvisor(
678           M, FAM, M.getContext(), std::move(OwnedAdvisor),
679           CGSCCInlineReplayFile, CGSCCInlineReplayScope,
680           /*EmitRemarks=*/true);
681 
682     return *OwnedAdvisor;
683   }
684   assert(IAA->getAdvisor() &&
685          "Expected a present InlineAdvisorAnalysis also have an "
686          "InlineAdvisor initialized");
687   return *IAA->getAdvisor();
688 }
689 
690 PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC,
691                                    CGSCCAnalysisManager &AM, LazyCallGraph &CG,
692                                    CGSCCUpdateResult &UR) {
693   const auto &MAMProxy =
694       AM.getResult<ModuleAnalysisManagerCGSCCProxy>(InitialC, CG);
695   bool Changed = false;
696 
697   assert(InitialC.size() > 0 && "Cannot handle an empty SCC!");
698   Module &M = *InitialC.begin()->getFunction().getParent();
699   ProfileSummaryInfo *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(M);
700 
701   FunctionAnalysisManager &FAM =
702       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(InitialC, CG)
703           .getManager();
704 
705   InlineAdvisor &Advisor = getAdvisor(MAMProxy, FAM, M);
706   Advisor.onPassEntry();
707 
708   auto AdvisorOnExit = make_scope_exit([&] { Advisor.onPassExit(); });
709 
710   // We use a single common worklist for calls across the entire SCC. We
711   // process these in-order and append new calls introduced during inlining to
712   // the end. The PriorityInlineOrder is optional here, in which the smaller
713   // callee would have a higher priority to inline.
714   //
715   // Note that this particular order of processing is actually critical to
716   // avoid very bad behaviors. Consider *highly connected* call graphs where
717   // each function contains a small amount of code and a couple of calls to
718   // other functions. Because the LLVM inliner is fundamentally a bottom-up
719   // inliner, it can handle gracefully the fact that these all appear to be
720   // reasonable inlining candidates as it will flatten things until they become
721   // too big to inline, and then move on and flatten another batch.
722   //
723   // However, when processing call edges *within* an SCC we cannot rely on this
724   // bottom-up behavior. As a consequence, with heavily connected *SCCs* of
725   // functions we can end up incrementally inlining N calls into each of
726   // N functions because each incremental inlining decision looks good and we
727   // don't have a topological ordering to prevent explosions.
728   //
729   // To compensate for this, we don't process transitive edges made immediate
730   // by inlining until we've done one pass of inlining across the entire SCC.
731   // Large, highly connected SCCs still lead to some amount of code bloat in
732   // this model, but it is uniformly spread across all the functions in the SCC
733   // and eventually they all become too large to inline, rather than
734   // incrementally maknig a single function grow in a super linear fashion.
735   std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>> Calls;
736   if (InlineEnablePriorityOrder)
737     Calls = std::make_unique<PriorityInlineOrder<InlineSizePriority>>();
738   else
739     Calls = std::make_unique<DefaultInlineOrder<std::pair<CallBase *, int>>>();
740   assert(Calls != nullptr && "Expected an initialized InlineOrder");
741 
742   // Populate the initial list of calls in this SCC.
743   for (auto &N : InitialC) {
744     auto &ORE =
745         FAM.getResult<OptimizationRemarkEmitterAnalysis>(N.getFunction());
746     // We want to generally process call sites top-down in order for
747     // simplifications stemming from replacing the call with the returned value
748     // after inlining to be visible to subsequent inlining decisions.
749     // FIXME: Using instructions sequence is a really bad way to do this.
750     // Instead we should do an actual RPO walk of the function body.
751     for (Instruction &I : instructions(N.getFunction()))
752       if (auto *CB = dyn_cast<CallBase>(&I))
753         if (Function *Callee = CB->getCalledFunction()) {
754           if (!Callee->isDeclaration())
755             Calls->push({CB, -1});
756           else if (!isa<IntrinsicInst>(I)) {
757             using namespace ore;
758             setInlineRemark(*CB, "unavailable definition");
759             ORE.emit([&]() {
760               return OptimizationRemarkMissed(DEBUG_TYPE, "NoDefinition", &I)
761                      << NV("Callee", Callee) << " will not be inlined into "
762                      << NV("Caller", CB->getCaller())
763                      << " because its definition is unavailable"
764                      << setIsVerbose();
765             });
766           }
767         }
768   }
769   if (Calls->empty())
770     return PreservedAnalyses::all();
771 
772   // Capture updatable variable for the current SCC.
773   auto *C = &InitialC;
774 
775   // When inlining a callee produces new call sites, we want to keep track of
776   // the fact that they were inlined from the callee.  This allows us to avoid
777   // infinite inlining in some obscure cases.  To represent this, we use an
778   // index into the InlineHistory vector.
779   SmallVector<std::pair<Function *, int>, 16> InlineHistory;
780 
781   // Track a set vector of inlined callees so that we can augment the caller
782   // with all of their edges in the call graph before pruning out the ones that
783   // got simplified away.
784   SmallSetVector<Function *, 4> InlinedCallees;
785 
786   // Track the dead functions to delete once finished with inlining calls. We
787   // defer deleting these to make it easier to handle the call graph updates.
788   SmallVector<Function *, 4> DeadFunctions;
789 
790   // Loop forward over all of the calls.
791   while (!Calls->empty()) {
792     // We expect the calls to typically be batched with sequences of calls that
793     // have the same caller, so we first set up some shared infrastructure for
794     // this caller. We also do any pruning we can at this layer on the caller
795     // alone.
796     Function &F = *Calls->front().first->getCaller();
797     LazyCallGraph::Node &N = *CG.lookup(F);
798     if (CG.lookupSCC(N) != C) {
799       Calls->pop();
800       continue;
801     }
802 
803     LLVM_DEBUG(dbgs() << "Inlining calls in: " << F.getName() << "\n"
804                       << "    Function size: " << F.getInstructionCount()
805                       << "\n");
806 
807     auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
808       return FAM.getResult<AssumptionAnalysis>(F);
809     };
810 
811     // Now process as many calls as we have within this caller in the sequence.
812     // We bail out as soon as the caller has to change so we can update the
813     // call graph and prepare the context of that new caller.
814     bool DidInline = false;
815     while (!Calls->empty() && Calls->front().first->getCaller() == &F) {
816       auto P = Calls->pop();
817       CallBase *CB = P.first;
818       const int InlineHistoryID = P.second;
819       Function &Callee = *CB->getCalledFunction();
820 
821       if (InlineHistoryID != -1 &&
822           inlineHistoryIncludes(&Callee, InlineHistoryID, InlineHistory)) {
823         setInlineRemark(*CB, "recursive");
824         continue;
825       }
826 
827       // Check if this inlining may repeat breaking an SCC apart that has
828       // already been split once before. In that case, inlining here may
829       // trigger infinite inlining, much like is prevented within the inliner
830       // itself by the InlineHistory above, but spread across CGSCC iterations
831       // and thus hidden from the full inline history.
832       if (CG.lookupSCC(*CG.lookup(Callee)) == C &&
833           UR.InlinedInternalEdges.count({&N, C})) {
834         LLVM_DEBUG(dbgs() << "Skipping inlining internal SCC edge from a node "
835                              "previously split out of this SCC by inlining: "
836                           << F.getName() << " -> " << Callee.getName() << "\n");
837         setInlineRemark(*CB, "recursive SCC split");
838         continue;
839       }
840 
841       auto Advice = Advisor.getAdvice(*CB, OnlyMandatory);
842 
843       // Check whether we want to inline this callsite.
844       if (!Advice || !Advice->isInliningRecommended()) {
845         Advice->recordUnattemptedInlining();
846         continue;
847       }
848 
849       // Setup the data structure used to plumb customization into the
850       // `InlineFunction` routine.
851       InlineFunctionInfo IFI(
852           /*cg=*/nullptr, GetAssumptionCache, PSI,
853           &FAM.getResult<BlockFrequencyAnalysis>(*(CB->getCaller())),
854           &FAM.getResult<BlockFrequencyAnalysis>(Callee));
855 
856       InlineResult IR =
857           InlineFunction(*CB, IFI, &FAM.getResult<AAManager>(*CB->getCaller()));
858       if (!IR.isSuccess()) {
859         Advice->recordUnsuccessfulInlining(IR);
860         continue;
861       }
862 
863       DidInline = true;
864       InlinedCallees.insert(&Callee);
865       ++NumInlined;
866 
867       LLVM_DEBUG(dbgs() << "    Size after inlining: "
868                         << F.getInstructionCount() << "\n");
869 
870       // Add any new callsites to defined functions to the worklist.
871       if (!IFI.InlinedCallSites.empty()) {
872         int NewHistoryID = InlineHistory.size();
873         InlineHistory.push_back({&Callee, InlineHistoryID});
874 
875         for (CallBase *ICB : reverse(IFI.InlinedCallSites)) {
876           Function *NewCallee = ICB->getCalledFunction();
877           assert(!(NewCallee && NewCallee->isIntrinsic()) &&
878                  "Intrinsic calls should not be tracked.");
879           if (!NewCallee) {
880             // Try to promote an indirect (virtual) call without waiting for
881             // the post-inline cleanup and the next DevirtSCCRepeatedPass
882             // iteration because the next iteration may not happen and we may
883             // miss inlining it.
884             if (tryPromoteCall(*ICB))
885               NewCallee = ICB->getCalledFunction();
886           }
887           if (NewCallee)
888             if (!NewCallee->isDeclaration())
889               Calls->push({ICB, NewHistoryID});
890         }
891       }
892 
893       // Merge the attributes based on the inlining.
894       AttributeFuncs::mergeAttributesForInlining(F, Callee);
895 
896       // For local functions, check whether this makes the callee trivially
897       // dead. In that case, we can drop the body of the function eagerly
898       // which may reduce the number of callers of other functions to one,
899       // changing inline cost thresholds.
900       bool CalleeWasDeleted = false;
901       if (Callee.hasLocalLinkage()) {
902         // To check this we also need to nuke any dead constant uses (perhaps
903         // made dead by this operation on other functions).
904         Callee.removeDeadConstantUsers();
905         if (Callee.use_empty() && !CG.isLibFunction(Callee)) {
906           Calls->erase_if([&](const std::pair<CallBase *, int> &Call) {
907             return Call.first->getCaller() == &Callee;
908           });
909           // Clear the body and queue the function itself for deletion when we
910           // finish inlining and call graph updates.
911           // Note that after this point, it is an error to do anything other
912           // than use the callee's address or delete it.
913           Callee.dropAllReferences();
914           assert(!is_contained(DeadFunctions, &Callee) &&
915                  "Cannot put cause a function to become dead twice!");
916           DeadFunctions.push_back(&Callee);
917           CalleeWasDeleted = true;
918         }
919       }
920       if (CalleeWasDeleted)
921         Advice->recordInliningWithCalleeDeleted();
922       else
923         Advice->recordInlining();
924     }
925 
926     if (!DidInline)
927       continue;
928     Changed = true;
929 
930     // At this point, since we have made changes we have at least removed
931     // a call instruction. However, in the process we do some incremental
932     // simplification of the surrounding code. This simplification can
933     // essentially do all of the same things as a function pass and we can
934     // re-use the exact same logic for updating the call graph to reflect the
935     // change.
936 
937     // Inside the update, we also update the FunctionAnalysisManager in the
938     // proxy for this particular SCC. We do this as the SCC may have changed and
939     // as we're going to mutate this particular function we want to make sure
940     // the proxy is in place to forward any invalidation events.
941     LazyCallGraph::SCC *OldC = C;
942     C = &updateCGAndAnalysisManagerForCGSCCPass(CG, *C, N, AM, UR, FAM);
943     LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n");
944 
945     // If this causes an SCC to split apart into multiple smaller SCCs, there
946     // is a subtle risk we need to prepare for. Other transformations may
947     // expose an "infinite inlining" opportunity later, and because of the SCC
948     // mutation, we will revisit this function and potentially re-inline. If we
949     // do, and that re-inlining also has the potentially to mutate the SCC
950     // structure, the infinite inlining problem can manifest through infinite
951     // SCC splits and merges. To avoid this, we capture the originating caller
952     // node and the SCC containing the call edge. This is a slight over
953     // approximation of the possible inlining decisions that must be avoided,
954     // but is relatively efficient to store. We use C != OldC to know when
955     // a new SCC is generated and the original SCC may be generated via merge
956     // in later iterations.
957     //
958     // It is also possible that even if no new SCC is generated
959     // (i.e., C == OldC), the original SCC could be split and then merged
960     // into the same one as itself. and the original SCC will be added into
961     // UR.CWorklist again, we want to catch such cases too.
962     //
963     // FIXME: This seems like a very heavyweight way of retaining the inline
964     // history, we should look for a more efficient way of tracking it.
965     if ((C != OldC || UR.CWorklist.count(OldC)) &&
966         llvm::any_of(InlinedCallees, [&](Function *Callee) {
967           return CG.lookupSCC(*CG.lookup(*Callee)) == OldC;
968         })) {
969       LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, "
970                            "retaining this to avoid infinite inlining.\n");
971       UR.InlinedInternalEdges.insert({&N, OldC});
972     }
973     InlinedCallees.clear();
974   }
975 
976   // Now that we've finished inlining all of the calls across this SCC, delete
977   // all of the trivially dead functions, updating the call graph and the CGSCC
978   // pass manager in the process.
979   //
980   // Note that this walks a pointer set which has non-deterministic order but
981   // that is OK as all we do is delete things and add pointers to unordered
982   // sets.
983   for (Function *DeadF : DeadFunctions) {
984     // Get the necessary information out of the call graph and nuke the
985     // function there. Also, clear out any cached analyses.
986     auto &DeadC = *CG.lookupSCC(*CG.lookup(*DeadF));
987     FAM.clear(*DeadF, DeadF->getName());
988     AM.clear(DeadC, DeadC.getName());
989     auto &DeadRC = DeadC.getOuterRefSCC();
990     CG.removeDeadFunction(*DeadF);
991 
992     // Mark the relevant parts of the call graph as invalid so we don't visit
993     // them.
994     UR.InvalidatedSCCs.insert(&DeadC);
995     UR.InvalidatedRefSCCs.insert(&DeadRC);
996 
997     // If the updated SCC was the one containing the deleted function, clear it.
998     if (&DeadC == UR.UpdatedC)
999       UR.UpdatedC = nullptr;
1000 
1001     // And delete the actual function from the module.
1002     // The Advisor may use Function pointers to efficiently index various
1003     // internal maps, e.g. for memoization. Function cleanup passes like
1004     // argument promotion create new functions. It is possible for a new
1005     // function to be allocated at the address of a deleted function. We could
1006     // index using names, but that's inefficient. Alternatively, we let the
1007     // Advisor free the functions when it sees fit.
1008     DeadF->getBasicBlockList().clear();
1009     M.getFunctionList().remove(DeadF);
1010 
1011     ++NumDeleted;
1012   }
1013 
1014   if (!Changed)
1015     return PreservedAnalyses::all();
1016 
1017   // Even if we change the IR, we update the core CGSCC data structures and so
1018   // can preserve the proxy to the function analysis manager.
1019   PreservedAnalyses PA;
1020   PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
1021   return PA;
1022 }
1023 
1024 ModuleInlinerWrapperPass::ModuleInlinerWrapperPass(InlineParams Params,
1025                                                    bool MandatoryFirst,
1026                                                    InliningAdvisorMode Mode,
1027                                                    unsigned MaxDevirtIterations)
1028     : Params(Params), Mode(Mode), MaxDevirtIterations(MaxDevirtIterations),
1029       PM(), MPM() {
1030   // Run the inliner first. The theory is that we are walking bottom-up and so
1031   // the callees have already been fully optimized, and we want to inline them
1032   // into the callers so that our optimizations can reflect that.
1033   // For PreLinkThinLTO pass, we disable hot-caller heuristic for sample PGO
1034   // because it makes profile annotation in the backend inaccurate.
1035   if (MandatoryFirst)
1036     PM.addPass(InlinerPass(/*OnlyMandatory*/ true));
1037   PM.addPass(InlinerPass());
1038 }
1039 
1040 PreservedAnalyses ModuleInlinerWrapperPass::run(Module &M,
1041                                                 ModuleAnalysisManager &MAM) {
1042   auto &IAA = MAM.getResult<InlineAdvisorAnalysis>(M);
1043   if (!IAA.tryCreate(Params, Mode, CGSCCInlineReplayFile,
1044                      CGSCCInlineReplayScope)) {
1045     M.getContext().emitError(
1046         "Could not setup Inlining Advisor for the requested "
1047         "mode and/or options");
1048     return PreservedAnalyses::all();
1049   }
1050 
1051   // We wrap the CGSCC pipeline in a devirtualization repeater. This will try
1052   // to detect when we devirtualize indirect calls and iterate the SCC passes
1053   // in that case to try and catch knock-on inlining or function attrs
1054   // opportunities. Then we add it to the module pipeline by walking the SCCs
1055   // in postorder (or bottom-up).
1056   // If MaxDevirtIterations is 0, we just don't use the devirtualization
1057   // wrapper.
1058   if (MaxDevirtIterations == 0)
1059     MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(PM)));
1060   else
1061     MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(
1062         createDevirtSCCRepeatedPass(std::move(PM), MaxDevirtIterations)));
1063   MPM.run(M, MAM);
1064 
1065   // Discard the InlineAdvisor, a subsequent inlining session should construct
1066   // its own.
1067   auto PA = PreservedAnalyses::all();
1068   PA.abandon<InlineAdvisorAnalysis>();
1069   return PA;
1070 }
1071 
1072 void InlinerPass::printPipeline(
1073     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1074   static_cast<PassInfoMixin<InlinerPass> *>(this)->printPipeline(
1075       OS, MapClassName2PassName);
1076   if (OnlyMandatory)
1077     OS << "<only-mandatory>";
1078 }
1079 
1080 void ModuleInlinerWrapperPass::printPipeline(
1081     raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
1082   // Print some info about passes added to the wrapper. This is however
1083   // incomplete as InlineAdvisorAnalysis part isn't included (which also depends
1084   // on Params and Mode).
1085   if (!MPM.isEmpty()) {
1086     MPM.printPipeline(OS, MapClassName2PassName);
1087     OS << ",";
1088   }
1089   OS << "cgscc(";
1090   if (MaxDevirtIterations != 0)
1091     OS << "devirt<" << MaxDevirtIterations << ">(";
1092   PM.printPipeline(OS, MapClassName2PassName);
1093   if (MaxDevirtIterations != 0)
1094     OS << ")";
1095   OS << ")";
1096 }
1097