1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the mechanics required to implement inlining without
11 // missing any calls and updating the call graph.  The decisions of which calls
12 // are profitable to inline are implemented elsewhere.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BasicAliasAnalysis.h"
21 #include "llvm/Analysis/CallGraph.h"
22 #include "llvm/Analysis/InlineCost.h"
23 #include "llvm/Analysis/TargetLibraryInfo.h"
24 #include "llvm/IR/CallSite.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DiagnosticInfo.h"
27 #include "llvm/IR/Instructions.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/Module.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Transforms/IPO/InlinerPass.h"
33 #include "llvm/Transforms/Utils/Cloning.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 using namespace llvm;
36 
37 #define DEBUG_TYPE "inline"
38 
39 STATISTIC(NumInlined, "Number of functions inlined");
40 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
41 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
42 STATISTIC(NumMergedAllocas, "Number of allocas merged together");
43 
44 // This weirdly named statistic tracks the number of times that, when attempting
45 // to inline a function A into B, we analyze the callers of B in order to see
46 // if those would be more profitable and blocked inline steps.
47 STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed");
48 
49 Inliner::Inliner(char &ID) : CallGraphSCCPass(ID), InsertLifetime(true) {}
50 
51 Inliner::Inliner(char &ID, bool InsertLifetime)
52     : CallGraphSCCPass(ID), InsertLifetime(InsertLifetime) {}
53 
54 /// For this class, we declare that we require and preserve the call graph.
55 /// If the derived class implements this method, it should
56 /// always explicitly call the implementation here.
57 void Inliner::getAnalysisUsage(AnalysisUsage &AU) const {
58   AU.addRequired<AssumptionCacheTracker>();
59   AU.addRequired<TargetLibraryInfoWrapperPass>();
60   getAAResultsAnalysisUsage(AU);
61   CallGraphSCCPass::getAnalysisUsage(AU);
62 }
63 
64 
65 typedef DenseMap<ArrayType*, std::vector<AllocaInst*> >
66 InlinedArrayAllocasTy;
67 
68 /// If it is possible to inline the specified call site,
69 /// do so and update the CallGraph for this operation.
70 ///
71 /// This function also does some basic book-keeping to update the IR.  The
72 /// InlinedArrayAllocas map keeps track of any allocas that are already
73 /// available from other functions inlined into the caller.  If we are able to
74 /// inline this call site we attempt to reuse already available allocas or add
75 /// any new allocas to the set if not possible.
76 static bool InlineCallIfPossible(Pass &P, CallSite CS, InlineFunctionInfo &IFI,
77                                  InlinedArrayAllocasTy &InlinedArrayAllocas,
78                                  int InlineHistory, bool InsertLifetime) {
79   Function *Callee = CS.getCalledFunction();
80   Function *Caller = CS.getCaller();
81 
82   // We need to manually construct BasicAA directly in order to disable
83   // its use of other function analyses.
84   BasicAAResult BAR(createLegacyPMBasicAAResult(P, *Callee));
85 
86   // Construct our own AA results for this function. We do this manually to
87   // work around the limitations of the legacy pass manager.
88   AAResults AAR(createLegacyPMAAResults(P, *Callee, BAR));
89 
90   // Try to inline the function.  Get the list of static allocas that were
91   // inlined.
92   if (!InlineFunction(CS, IFI, &AAR, InsertLifetime))
93     return false;
94 
95   AttributeFuncs::mergeAttributesForInlining(*Caller, *Callee);
96 
97   // Look at all of the allocas that we inlined through this call site.  If we
98   // have already inlined other allocas through other calls into this function,
99   // then we know that they have disjoint lifetimes and that we can merge them.
100   //
101   // There are many heuristics possible for merging these allocas, and the
102   // different options have different tradeoffs.  One thing that we *really*
103   // don't want to hurt is SRoA: once inlining happens, often allocas are no
104   // longer address taken and so they can be promoted.
105   //
106   // Our "solution" for that is to only merge allocas whose outermost type is an
107   // array type.  These are usually not promoted because someone is using a
108   // variable index into them.  These are also often the most important ones to
109   // merge.
110   //
111   // A better solution would be to have real memory lifetime markers in the IR
112   // and not have the inliner do any merging of allocas at all.  This would
113   // allow the backend to do proper stack slot coloring of all allocas that
114   // *actually make it to the backend*, which is really what we want.
115   //
116   // Because we don't have this information, we do this simple and useful hack.
117   //
118   SmallPtrSet<AllocaInst*, 16> UsedAllocas;
119 
120   // When processing our SCC, check to see if CS was inlined from some other
121   // call site.  For example, if we're processing "A" in this code:
122   //   A() { B() }
123   //   B() { x = alloca ... C() }
124   //   C() { y = alloca ... }
125   // Assume that C was not inlined into B initially, and so we're processing A
126   // and decide to inline B into A.  Doing this makes an alloca available for
127   // reuse and makes a callsite (C) available for inlining.  When we process
128   // the C call site we don't want to do any alloca merging between X and Y
129   // because their scopes are not disjoint.  We could make this smarter by
130   // keeping track of the inline history for each alloca in the
131   // InlinedArrayAllocas but this isn't likely to be a significant win.
132   if (InlineHistory != -1)  // Only do merging for top-level call sites in SCC.
133     return true;
134 
135   // Loop over all the allocas we have so far and see if they can be merged with
136   // a previously inlined alloca.  If not, remember that we had it.
137   for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
138        AllocaNo != e; ++AllocaNo) {
139     AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
140 
141     // Don't bother trying to merge array allocations (they will usually be
142     // canonicalized to be an allocation *of* an array), or allocations whose
143     // type is not itself an array (because we're afraid of pessimizing SRoA).
144     ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
145     if (!ATy || AI->isArrayAllocation())
146       continue;
147 
148     // Get the list of all available allocas for this array type.
149     std::vector<AllocaInst*> &AllocasForType = InlinedArrayAllocas[ATy];
150 
151     // Loop over the allocas in AllocasForType to see if we can reuse one.  Note
152     // that we have to be careful not to reuse the same "available" alloca for
153     // multiple different allocas that we just inlined, we use the 'UsedAllocas'
154     // set to keep track of which "available" allocas are being used by this
155     // function.  Also, AllocasForType can be empty of course!
156     bool MergedAwayAlloca = false;
157     for (AllocaInst *AvailableAlloca : AllocasForType) {
158 
159       unsigned Align1 = AI->getAlignment(),
160                Align2 = AvailableAlloca->getAlignment();
161 
162       // The available alloca has to be in the right function, not in some other
163       // function in this SCC.
164       if (AvailableAlloca->getParent() != AI->getParent())
165         continue;
166 
167       // If the inlined function already uses this alloca then we can't reuse
168       // it.
169       if (!UsedAllocas.insert(AvailableAlloca).second)
170         continue;
171 
172       // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
173       // success!
174       DEBUG(dbgs() << "    ***MERGED ALLOCA: " << *AI << "\n\t\tINTO: "
175                    << *AvailableAlloca << '\n');
176 
177       // Move affected dbg.declare calls immediately after the new alloca to
178       // avoid the situation when a dbg.declare preceeds its alloca.
179       if (auto *L = LocalAsMetadata::getIfExists(AI))
180         if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
181           for (User *U : MDV->users())
182             if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
183               DDI->moveBefore(AvailableAlloca->getNextNode());
184 
185       AI->replaceAllUsesWith(AvailableAlloca);
186 
187       if (Align1 != Align2) {
188         if (!Align1 || !Align2) {
189           const DataLayout &DL = Caller->getParent()->getDataLayout();
190           unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType());
191 
192           Align1 = Align1 ? Align1 : TypeAlign;
193           Align2 = Align2 ? Align2 : TypeAlign;
194         }
195 
196         if (Align1 > Align2)
197           AvailableAlloca->setAlignment(AI->getAlignment());
198       }
199 
200       AI->eraseFromParent();
201       MergedAwayAlloca = true;
202       ++NumMergedAllocas;
203       IFI.StaticAllocas[AllocaNo] = nullptr;
204       break;
205     }
206 
207     // If we already nuked the alloca, we're done with it.
208     if (MergedAwayAlloca)
209       continue;
210 
211     // If we were unable to merge away the alloca either because there are no
212     // allocas of the right type available or because we reused them all
213     // already, remember that this alloca came from an inlined function and mark
214     // it used so we don't reuse it for other allocas from this inline
215     // operation.
216     AllocasForType.push_back(AI);
217     UsedAllocas.insert(AI);
218   }
219 
220   return true;
221 }
222 
223 static void emitAnalysis(CallSite CS, const Twine &Msg) {
224   Function *Caller = CS.getCaller();
225   LLVMContext &Ctx = Caller->getContext();
226   DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
227   emitOptimizationRemarkAnalysis(Ctx, DEBUG_TYPE, *Caller, DLoc, Msg);
228 }
229 
230 bool Inliner::shouldBeDeferred(Function *Caller, CallSite CS, InlineCost IC,
231                                int &TotalSecondaryCost) {
232 
233   // For now we only handle local or inline functions.
234   if (!Caller->hasLocalLinkage() && !Caller->hasLinkOnceODRLinkage())
235     return false;
236   // Try to detect the case where the current inlining candidate caller (call
237   // it B) is a static or linkonce-ODR function and is an inlining candidate
238   // elsewhere, and the current candidate callee (call it C) is large enough
239   // that inlining it into B would make B too big to inline later. In these
240   // circumstances it may be best not to inline C into B, but to inline B into
241   // its callers.
242   //
243   // This only applies to static and linkonce-ODR functions because those are
244   // expected to be available for inlining in the translation units where they
245   // are used. Thus we will always have the opportunity to make local inlining
246   // decisions. Importantly the linkonce-ODR linkage covers inline functions
247   // and templates in C++.
248   //
249   // FIXME: All of this logic should be sunk into getInlineCost. It relies on
250   // the internal implementation of the inline cost metrics rather than
251   // treating them as truly abstract units etc.
252   TotalSecondaryCost = 0;
253   // The candidate cost to be imposed upon the current function.
254   int CandidateCost = IC.getCost() - (InlineConstants::CallPenalty + 1);
255   // This bool tracks what happens if we do NOT inline C into B.
256   bool callerWillBeRemoved = Caller->hasLocalLinkage();
257   // This bool tracks what happens if we DO inline C into B.
258   bool inliningPreventsSomeOuterInline = false;
259   for (User *U : Caller->users()) {
260     CallSite CS2(U);
261 
262     // If this isn't a call to Caller (it could be some other sort
263     // of reference) skip it.  Such references will prevent the caller
264     // from being removed.
265     if (!CS2 || CS2.getCalledFunction() != Caller) {
266       callerWillBeRemoved = false;
267       continue;
268     }
269 
270     InlineCost IC2 = getInlineCost(CS2);
271     ++NumCallerCallersAnalyzed;
272     if (!IC2) {
273       callerWillBeRemoved = false;
274       continue;
275     }
276     if (IC2.isAlways())
277       continue;
278 
279     // See if inlining or original callsite would erase the cost delta of
280     // this callsite. We subtract off the penalty for the call instruction,
281     // which we would be deleting.
282     if (IC2.getCostDelta() <= CandidateCost) {
283       inliningPreventsSomeOuterInline = true;
284       TotalSecondaryCost += IC2.getCost();
285     }
286   }
287   // If all outer calls to Caller would get inlined, the cost for the last
288   // one is set very low by getInlineCost, in anticipation that Caller will
289   // be removed entirely.  We did not account for this above unless there
290   // is only one caller of Caller.
291   if (callerWillBeRemoved && !Caller->use_empty())
292     TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
293 
294   if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost())
295     return true;
296 
297   return false;
298 }
299 
300 /// Return true if the inliner should attempt to inline at the given CallSite.
301 bool Inliner::shouldInline(CallSite CS) {
302   InlineCost IC = getInlineCost(CS);
303 
304   if (IC.isAlways()) {
305     DEBUG(dbgs() << "    Inlining: cost=always"
306           << ", Call: " << *CS.getInstruction() << "\n");
307     emitAnalysis(CS, Twine(CS.getCalledFunction()->getName()) +
308                          " should always be inlined (cost=always)");
309     return true;
310   }
311 
312   if (IC.isNever()) {
313     DEBUG(dbgs() << "    NOT Inlining: cost=never"
314           << ", Call: " << *CS.getInstruction() << "\n");
315     emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() +
316                            " should never be inlined (cost=never)"));
317     return false;
318   }
319 
320   Function *Caller = CS.getCaller();
321   if (!IC) {
322     DEBUG(dbgs() << "    NOT Inlining: cost=" << IC.getCost()
323           << ", thres=" << (IC.getCostDelta() + IC.getCost())
324           << ", Call: " << *CS.getInstruction() << "\n");
325     emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() +
326                            " too costly to inline (cost=") +
327                          Twine(IC.getCost()) + ", threshold=" +
328                          Twine(IC.getCostDelta() + IC.getCost()) + ")");
329     return false;
330   }
331 
332   int TotalSecondaryCost = 0;
333   if (shouldBeDeferred(Caller, CS, IC, TotalSecondaryCost)) {
334     DEBUG(dbgs() << "    NOT Inlining: " << *CS.getInstruction()
335           << " Cost = " << IC.getCost()
336           << ", outer Cost = " << TotalSecondaryCost << '\n');
337     emitAnalysis(CS, Twine("Not inlining. Cost of inlining " +
338                            CS.getCalledFunction()->getName() +
339                            " increases the cost of inlining " +
340                            CS.getCaller()->getName() + " in other contexts"));
341     return false;
342   }
343 
344   DEBUG(dbgs() << "    Inlining: cost=" << IC.getCost()
345         << ", thres=" << (IC.getCostDelta() + IC.getCost())
346         << ", Call: " << *CS.getInstruction() << '\n');
347   emitAnalysis(
348       CS, CS.getCalledFunction()->getName() + Twine(" can be inlined into ") +
349               CS.getCaller()->getName() + " with cost=" + Twine(IC.getCost()) +
350               " (threshold=" + Twine(IC.getCostDelta() + IC.getCost()) + ")");
351   return true;
352 }
353 
354 /// Return true if the specified inline history ID
355 /// indicates an inline history that includes the specified function.
356 static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
357             const SmallVectorImpl<std::pair<Function*, int> > &InlineHistory) {
358   while (InlineHistoryID != -1) {
359     assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
360            "Invalid inline history ID");
361     if (InlineHistory[InlineHistoryID].first == F)
362       return true;
363     InlineHistoryID = InlineHistory[InlineHistoryID].second;
364   }
365   return false;
366 }
367 
368 bool Inliner::runOnSCC(CallGraphSCC &SCC) {
369   if (skipSCC(SCC))
370     return false;
371   return inlineCalls(SCC);
372 }
373 
374 bool Inliner::inlineCalls(CallGraphSCC &SCC) {
375   CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
376   ACT = &getAnalysis<AssumptionCacheTracker>();
377   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
378 
379   SmallPtrSet<Function*, 8> SCCFunctions;
380   DEBUG(dbgs() << "Inliner visiting SCC:");
381   for (CallGraphNode *Node : SCC) {
382     Function *F = Node->getFunction();
383     if (F) SCCFunctions.insert(F);
384     DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
385   }
386 
387   // Scan through and identify all call sites ahead of time so that we only
388   // inline call sites in the original functions, not call sites that result
389   // from inlining other functions.
390   SmallVector<std::pair<CallSite, int>, 16> CallSites;
391 
392   // When inlining a callee produces new call sites, we want to keep track of
393   // the fact that they were inlined from the callee.  This allows us to avoid
394   // infinite inlining in some obscure cases.  To represent this, we use an
395   // index into the InlineHistory vector.
396   SmallVector<std::pair<Function*, int>, 8> InlineHistory;
397 
398   for (CallGraphNode *Node : SCC) {
399     Function *F = Node->getFunction();
400     if (!F) continue;
401 
402     for (BasicBlock &BB : *F)
403       for (Instruction &I : BB) {
404         CallSite CS(cast<Value>(&I));
405         // If this isn't a call, or it is a call to an intrinsic, it can
406         // never be inlined.
407         if (!CS || isa<IntrinsicInst>(I))
408           continue;
409 
410         // If this is a direct call to an external function, we can never inline
411         // it.  If it is an indirect call, inlining may resolve it to be a
412         // direct call, so we keep it.
413         if (Function *Callee = CS.getCalledFunction())
414           if (Callee->isDeclaration())
415             continue;
416 
417         CallSites.push_back(std::make_pair(CS, -1));
418       }
419   }
420 
421   DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
422 
423   // If there are no calls in this function, exit early.
424   if (CallSites.empty())
425     return false;
426 
427   // Now that we have all of the call sites, move the ones to functions in the
428   // current SCC to the end of the list.
429   unsigned FirstCallInSCC = CallSites.size();
430   for (unsigned i = 0; i < FirstCallInSCC; ++i)
431     if (Function *F = CallSites[i].first.getCalledFunction())
432       if (SCCFunctions.count(F))
433         std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);
434 
435 
436   InlinedArrayAllocasTy InlinedArrayAllocas;
437   InlineFunctionInfo InlineInfo(&CG, ACT);
438 
439   // Now that we have all of the call sites, loop over them and inline them if
440   // it looks profitable to do so.
441   bool Changed = false;
442   bool LocalChange;
443   do {
444     LocalChange = false;
445     // Iterate over the outer loop because inlining functions can cause indirect
446     // calls to become direct calls.
447     // CallSites may be modified inside so ranged for loop can not be used.
448     for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
449       CallSite CS = CallSites[CSi].first;
450 
451       Function *Caller = CS.getCaller();
452       Function *Callee = CS.getCalledFunction();
453 
454       // If this call site is dead and it is to a readonly function, we should
455       // just delete the call instead of trying to inline it, regardless of
456       // size.  This happens because IPSCCP propagates the result out of the
457       // call and then we're left with the dead call.
458       if (isInstructionTriviallyDead(CS.getInstruction(), &TLI)) {
459         DEBUG(dbgs() << "    -> Deleting dead call: "
460                      << *CS.getInstruction() << "\n");
461         // Update the call graph by deleting the edge from Callee to Caller.
462         CG[Caller]->removeCallEdgeFor(CS);
463         CS.getInstruction()->eraseFromParent();
464         ++NumCallsDeleted;
465       } else {
466         // We can only inline direct calls to non-declarations.
467         if (!Callee || Callee->isDeclaration()) continue;
468 
469         // If this call site was obtained by inlining another function, verify
470         // that the include path for the function did not include the callee
471         // itself.  If so, we'd be recursively inlining the same function,
472         // which would provide the same callsites, which would cause us to
473         // infinitely inline.
474         int InlineHistoryID = CallSites[CSi].second;
475         if (InlineHistoryID != -1 &&
476             InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
477           continue;
478 
479         LLVMContext &CallerCtx = Caller->getContext();
480 
481         // Get DebugLoc to report. CS will be invalid after Inliner.
482         DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
483 
484         // If the policy determines that we should inline this function,
485         // try to do so.
486         if (!shouldInline(CS)) {
487           emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
488                                        Twine(Callee->getName() +
489                                              " will not be inlined into " +
490                                              Caller->getName()));
491           continue;
492         }
493 
494         // Attempt to inline the function.
495         if (!InlineCallIfPossible(*this, CS, InlineInfo, InlinedArrayAllocas,
496                                   InlineHistoryID, InsertLifetime)) {
497           emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
498                                        Twine(Callee->getName() +
499                                              " will not be inlined into " +
500                                              Caller->getName()));
501           continue;
502         }
503         ++NumInlined;
504 
505         // Report the inline decision.
506         emitOptimizationRemark(
507             CallerCtx, DEBUG_TYPE, *Caller, DLoc,
508             Twine(Callee->getName() + " inlined into " + Caller->getName()));
509 
510         // If inlining this function gave us any new call sites, throw them
511         // onto our worklist to process.  They are useful inline candidates.
512         if (!InlineInfo.InlinedCalls.empty()) {
513           // Create a new inline history entry for this, so that we remember
514           // that these new callsites came about due to inlining Callee.
515           int NewHistoryID = InlineHistory.size();
516           InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
517 
518           for (Value *Ptr : InlineInfo.InlinedCalls)
519             CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
520         }
521       }
522 
523       // If we inlined or deleted the last possible call site to the function,
524       // delete the function body now.
525       if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
526           // TODO: Can remove if in SCC now.
527           !SCCFunctions.count(Callee) &&
528 
529           // The function may be apparently dead, but if there are indirect
530           // callgraph references to the node, we cannot delete it yet, this
531           // could invalidate the CGSCC iterator.
532           CG[Callee]->getNumReferences() == 0) {
533         DEBUG(dbgs() << "    -> Deleting dead function: "
534               << Callee->getName() << "\n");
535         CallGraphNode *CalleeNode = CG[Callee];
536 
537         // Remove any call graph edges from the callee to its callees.
538         CalleeNode->removeAllCalledFunctions();
539 
540         // Removing the node for callee from the call graph and delete it.
541         delete CG.removeFunctionFromModule(CalleeNode);
542         ++NumDeleted;
543       }
544 
545       // Remove this call site from the list.  If possible, use
546       // swap/pop_back for efficiency, but do not use it if doing so would
547       // move a call site to a function in this SCC before the
548       // 'FirstCallInSCC' barrier.
549       if (SCC.isSingular()) {
550         CallSites[CSi] = CallSites.back();
551         CallSites.pop_back();
552       } else {
553         CallSites.erase(CallSites.begin()+CSi);
554       }
555       --CSi;
556 
557       Changed = true;
558       LocalChange = true;
559     }
560   } while (LocalChange);
561 
562   return Changed;
563 }
564 
565 /// Remove now-dead linkonce functions at the end of
566 /// processing to avoid breaking the SCC traversal.
567 bool Inliner::doFinalization(CallGraph &CG) {
568   return removeDeadFunctions(CG);
569 }
570 
571 /// Remove dead functions that are not included in DNR (Do Not Remove) list.
572 bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
573   SmallVector<CallGraphNode*, 16> FunctionsToRemove;
574   SmallVector<CallGraphNode *, 16> DeadFunctionsInComdats;
575   SmallDenseMap<const Comdat *, int, 16> ComdatEntriesAlive;
576 
577   auto RemoveCGN = [&](CallGraphNode *CGN) {
578     // Remove any call graph edges from the function to its callees.
579     CGN->removeAllCalledFunctions();
580 
581     // Remove any edges from the external node to the function's call graph
582     // node.  These edges might have been made irrelegant due to
583     // optimization of the program.
584     CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
585 
586     // Removing the node for callee from the call graph and delete it.
587     FunctionsToRemove.push_back(CGN);
588   };
589 
590   // Scan for all of the functions, looking for ones that should now be removed
591   // from the program.  Insert the dead ones in the FunctionsToRemove set.
592   for (const auto &I : CG) {
593     CallGraphNode *CGN = I.second.get();
594     Function *F = CGN->getFunction();
595     if (!F || F->isDeclaration())
596       continue;
597 
598     // Handle the case when this function is called and we only want to care
599     // about always-inline functions. This is a bit of a hack to share code
600     // between here and the InlineAlways pass.
601     if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
602       continue;
603 
604     // If the only remaining users of the function are dead constants, remove
605     // them.
606     F->removeDeadConstantUsers();
607 
608     if (!F->isDefTriviallyDead())
609       continue;
610 
611     // It is unsafe to drop a function with discardable linkage from a COMDAT
612     // without also dropping the other members of the COMDAT.
613     // The inliner doesn't visit non-function entities which are in COMDAT
614     // groups so it is unsafe to do so *unless* the linkage is local.
615     if (!F->hasLocalLinkage()) {
616       if (const Comdat *C = F->getComdat()) {
617         --ComdatEntriesAlive[C];
618         DeadFunctionsInComdats.push_back(CGN);
619         continue;
620       }
621     }
622 
623     RemoveCGN(CGN);
624   }
625   if (!DeadFunctionsInComdats.empty()) {
626     // Count up all the entities in COMDAT groups
627     auto ComdatGroupReferenced = [&](const Comdat *C) {
628       auto I = ComdatEntriesAlive.find(C);
629       if (I != ComdatEntriesAlive.end())
630         ++(I->getSecond());
631     };
632     for (const Function &F : CG.getModule())
633       if (const Comdat *C = F.getComdat())
634         ComdatGroupReferenced(C);
635     for (const GlobalVariable &GV : CG.getModule().globals())
636       if (const Comdat *C = GV.getComdat())
637         ComdatGroupReferenced(C);
638     for (const GlobalAlias &GA : CG.getModule().aliases())
639       if (const Comdat *C = GA.getComdat())
640         ComdatGroupReferenced(C);
641     for (CallGraphNode *CGN : DeadFunctionsInComdats) {
642       Function *F = CGN->getFunction();
643       const Comdat *C = F->getComdat();
644       int NumAlive = ComdatEntriesAlive[C];
645       // We can remove functions in a COMDAT group if the entire group is dead.
646       assert(NumAlive >= 0);
647       if (NumAlive > 0)
648         continue;
649 
650       RemoveCGN(CGN);
651     }
652   }
653 
654   if (FunctionsToRemove.empty())
655     return false;
656 
657   // Now that we know which functions to delete, do so.  We didn't want to do
658   // this inline, because that would invalidate our CallGraph::iterator
659   // objects. :(
660   //
661   // Note that it doesn't matter that we are iterating over a non-stable order
662   // here to do this, it doesn't matter which order the functions are deleted
663   // in.
664   array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
665   FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
666                                       FunctionsToRemove.end()),
667                           FunctionsToRemove.end());
668   for (CallGraphNode *CGN : FunctionsToRemove) {
669     delete CG.removeFunctionFromModule(CGN);
670     ++NumDeleted;
671   }
672   return true;
673 }
674