1 //===- CGSCCPassManager.cpp - Managing & running CGSCC passes -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/Analysis/CGSCCPassManager.h"
10 #include "llvm/ADT/ArrayRef.h"
11 #include "llvm/ADT/Optional.h"
12 #include "llvm/ADT/STLExtras.h"
13 #include "llvm/ADT/SetVector.h"
14 #include "llvm/ADT/SmallPtrSet.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/iterator_range.h"
17 #include "llvm/Analysis/LazyCallGraph.h"
18 #include "llvm/IR/Constant.h"
19 #include "llvm/IR/InstIterator.h"
20 #include "llvm/IR/Instruction.h"
21 #include "llvm/IR/PassManager.h"
22 #include "llvm/IR/PassManagerImpl.h"
23 #include "llvm/IR/ValueHandle.h"
24 #include "llvm/Support/Casting.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/TimeProfiler.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include <algorithm>
31 #include <cassert>
32 #include <iterator>
33 
34 #define DEBUG_TYPE "cgscc"
35 
36 using namespace llvm;
37 
38 // Explicit template instantiations and specialization definitions for core
39 // template typedefs.
40 namespace llvm {
41 
42 static cl::opt<bool> AbortOnMaxDevirtIterationsReached(
43     "abort-on-max-devirt-iterations-reached",
44     cl::desc("Abort when the max iterations for devirtualization CGSCC repeat "
45              "pass is reached"));
46 
47 // Explicit instantiations for the core proxy templates.
48 template class AllAnalysesOn<LazyCallGraph::SCC>;
49 template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
50 template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
51                            LazyCallGraph &, CGSCCUpdateResult &>;
52 template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
53 template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
54                                          LazyCallGraph::SCC, LazyCallGraph &>;
55 template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
56 
57 /// Explicitly specialize the pass manager run method to handle call graph
58 /// updates.
59 template <>
60 PreservedAnalyses
61 PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
62             CGSCCUpdateResult &>::run(LazyCallGraph::SCC &InitialC,
63                                       CGSCCAnalysisManager &AM,
64                                       LazyCallGraph &G, CGSCCUpdateResult &UR) {
65   // Request PassInstrumentation from analysis manager, will use it to run
66   // instrumenting callbacks for the passes later.
67   PassInstrumentation PI =
68       AM.getResult<PassInstrumentationAnalysis>(InitialC, G);
69 
70   PreservedAnalyses PA = PreservedAnalyses::all();
71 
72   if (DebugLogging)
73     dbgs() << "Starting CGSCC pass manager run.\n";
74 
75   // The SCC may be refined while we are running passes over it, so set up
76   // a pointer that we can update.
77   LazyCallGraph::SCC *C = &InitialC;
78 
79   // Get Function analysis manager from its proxy.
80   FunctionAnalysisManager &FAM =
81       AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(*C)->getManager();
82 
83   for (auto &Pass : Passes) {
84     // Check the PassInstrumentation's BeforePass callbacks before running the
85     // pass, skip its execution completely if asked to (callback returns false).
86     if (!PI.runBeforePass(*Pass, *C))
87       continue;
88 
89     PreservedAnalyses PassPA;
90     {
91       TimeTraceScope TimeScope(Pass->name());
92       PassPA = Pass->run(*C, AM, G, UR);
93     }
94 
95     if (UR.InvalidatedSCCs.count(C))
96       PI.runAfterPassInvalidated<LazyCallGraph::SCC>(*Pass, PassPA);
97     else
98       PI.runAfterPass<LazyCallGraph::SCC>(*Pass, *C, PassPA);
99 
100     // Update the SCC if necessary.
101     C = UR.UpdatedC ? UR.UpdatedC : C;
102     if (UR.UpdatedC) {
103       // If C is updated, also create a proxy and update FAM inside the result.
104       auto *ResultFAMCP =
105           &AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G);
106       ResultFAMCP->updateFAM(FAM);
107     }
108 
109     // If the CGSCC pass wasn't able to provide a valid updated SCC, the
110     // current SCC may simply need to be skipped if invalid.
111     if (UR.InvalidatedSCCs.count(C)) {
112       LLVM_DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
113       break;
114     }
115     // Check that we didn't miss any update scenario.
116     assert(C->begin() != C->end() && "Cannot have an empty SCC!");
117 
118     // Update the analysis manager as each pass runs and potentially
119     // invalidates analyses.
120     AM.invalidate(*C, PassPA);
121 
122     // Finally, we intersect the final preserved analyses to compute the
123     // aggregate preserved set for this pass manager.
124     PA.intersect(std::move(PassPA));
125 
126     // FIXME: Historically, the pass managers all called the LLVM context's
127     // yield function here. We don't have a generic way to acquire the
128     // context and it isn't yet clear what the right pattern is for yielding
129     // in the new pass manager so it is currently omitted.
130     // ...getContext().yield();
131   }
132 
133   // Before we mark all of *this* SCC's analyses as preserved below, intersect
134   // this with the cross-SCC preserved analysis set. This is used to allow
135   // CGSCC passes to mutate ancestor SCCs and still trigger proper invalidation
136   // for them.
137   UR.CrossSCCPA.intersect(PA);
138 
139   // Invalidation was handled after each pass in the above loop for the current
140   // SCC. Therefore, the remaining analysis results in the AnalysisManager are
141   // preserved. We mark this with a set so that we don't need to inspect each
142   // one individually.
143   PA.preserveSet<AllAnalysesOn<LazyCallGraph::SCC>>();
144 
145   if (DebugLogging)
146     dbgs() << "Finished CGSCC pass manager run.\n";
147 
148   return PA;
149 }
150 
151 bool CGSCCAnalysisManagerModuleProxy::Result::invalidate(
152     Module &M, const PreservedAnalyses &PA,
153     ModuleAnalysisManager::Invalidator &Inv) {
154   // If literally everything is preserved, we're done.
155   if (PA.areAllPreserved())
156     return false; // This is still a valid proxy.
157 
158   // If this proxy or the call graph is going to be invalidated, we also need
159   // to clear all the keys coming from that analysis.
160   //
161   // We also directly invalidate the FAM's module proxy if necessary, and if
162   // that proxy isn't preserved we can't preserve this proxy either. We rely on
163   // it to handle module -> function analysis invalidation in the face of
164   // structural changes and so if it's unavailable we conservatively clear the
165   // entire SCC layer as well rather than trying to do invalidation ourselves.
166   auto PAC = PA.getChecker<CGSCCAnalysisManagerModuleProxy>();
167   if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Module>>()) ||
168       Inv.invalidate<LazyCallGraphAnalysis>(M, PA) ||
169       Inv.invalidate<FunctionAnalysisManagerModuleProxy>(M, PA)) {
170     InnerAM->clear();
171 
172     // And the proxy itself should be marked as invalid so that we can observe
173     // the new call graph. This isn't strictly necessary because we cheat
174     // above, but is still useful.
175     return true;
176   }
177 
178   // Directly check if the relevant set is preserved so we can short circuit
179   // invalidating SCCs below.
180   bool AreSCCAnalysesPreserved =
181       PA.allAnalysesInSetPreserved<AllAnalysesOn<LazyCallGraph::SCC>>();
182 
183   // Ok, we have a graph, so we can propagate the invalidation down into it.
184   G->buildRefSCCs();
185   for (auto &RC : G->postorder_ref_sccs())
186     for (auto &C : RC) {
187       Optional<PreservedAnalyses> InnerPA;
188 
189       // Check to see whether the preserved set needs to be adjusted based on
190       // module-level analysis invalidation triggering deferred invalidation
191       // for this SCC.
192       if (auto *OuterProxy =
193               InnerAM->getCachedResult<ModuleAnalysisManagerCGSCCProxy>(C))
194         for (const auto &OuterInvalidationPair :
195              OuterProxy->getOuterInvalidations()) {
196           AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first;
197           const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
198           if (Inv.invalidate(OuterAnalysisID, M, PA)) {
199             if (!InnerPA)
200               InnerPA = PA;
201             for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
202               InnerPA->abandon(InnerAnalysisID);
203           }
204         }
205 
206       // Check if we needed a custom PA set. If so we'll need to run the inner
207       // invalidation.
208       if (InnerPA) {
209         InnerAM->invalidate(C, *InnerPA);
210         continue;
211       }
212 
213       // Otherwise we only need to do invalidation if the original PA set didn't
214       // preserve all SCC analyses.
215       if (!AreSCCAnalysesPreserved)
216         InnerAM->invalidate(C, PA);
217     }
218 
219   // Return false to indicate that this result is still a valid proxy.
220   return false;
221 }
222 
223 template <>
224 CGSCCAnalysisManagerModuleProxy::Result
225 CGSCCAnalysisManagerModuleProxy::run(Module &M, ModuleAnalysisManager &AM) {
226   // Force the Function analysis manager to also be available so that it can
227   // be accessed in an SCC analysis and proxied onward to function passes.
228   // FIXME: It is pretty awkward to just drop the result here and assert that
229   // we can find it again later.
230   (void)AM.getResult<FunctionAnalysisManagerModuleProxy>(M);
231 
232   return Result(*InnerAM, AM.getResult<LazyCallGraphAnalysis>(M));
233 }
234 
235 AnalysisKey FunctionAnalysisManagerCGSCCProxy::Key;
236 
237 FunctionAnalysisManagerCGSCCProxy::Result
238 FunctionAnalysisManagerCGSCCProxy::run(LazyCallGraph::SCC &C,
239                                        CGSCCAnalysisManager &AM,
240                                        LazyCallGraph &CG) {
241   // Note: unconditionally getting checking that the proxy exists may get it at
242   // this point. There are cases when this is being run unnecessarily, but
243   // it is cheap and having the assertion in place is more valuable.
244   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C, CG);
245   Module &M = *C.begin()->getFunction().getParent();
246   bool ProxyExists =
247       MAMProxy.cachedResultExists<FunctionAnalysisManagerModuleProxy>(M);
248   assert(ProxyExists &&
249          "The CGSCC pass manager requires that the FAM module proxy is run "
250          "on the module prior to entering the CGSCC walk");
251   (void)ProxyExists;
252 
253   // We just return an empty result. The caller will use the updateFAM interface
254   // to correctly register the relevant FunctionAnalysisManager based on the
255   // context in which this proxy is run.
256   return Result();
257 }
258 
259 bool FunctionAnalysisManagerCGSCCProxy::Result::invalidate(
260     LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
261     CGSCCAnalysisManager::Invalidator &Inv) {
262   // If literally everything is preserved, we're done.
263   if (PA.areAllPreserved())
264     return false; // This is still a valid proxy.
265 
266   // All updates to preserve valid results are done below, so we don't need to
267   // invalidate this proxy.
268   //
269   // Note that in order to preserve this proxy, a module pass must ensure that
270   // the FAM has been completely updated to handle the deletion of functions.
271   // Specifically, any FAM-cached results for those functions need to have been
272   // forcibly cleared. When preserved, this proxy will only invalidate results
273   // cached on functions *still in the module* at the end of the module pass.
274   auto PAC = PA.getChecker<FunctionAnalysisManagerCGSCCProxy>();
275   if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<LazyCallGraph::SCC>>()) {
276     for (LazyCallGraph::Node &N : C)
277       FAM->clear(N.getFunction(), N.getFunction().getName());
278 
279     return false;
280   }
281 
282   // Directly check if the relevant set is preserved.
283   bool AreFunctionAnalysesPreserved =
284       PA.allAnalysesInSetPreserved<AllAnalysesOn<Function>>();
285 
286   // Now walk all the functions to see if any inner analysis invalidation is
287   // necessary.
288   for (LazyCallGraph::Node &N : C) {
289     Function &F = N.getFunction();
290     Optional<PreservedAnalyses> FunctionPA;
291 
292     // Check to see whether the preserved set needs to be pruned based on
293     // SCC-level analysis invalidation that triggers deferred invalidation
294     // registered with the outer analysis manager proxy for this function.
295     if (auto *OuterProxy =
296             FAM->getCachedResult<CGSCCAnalysisManagerFunctionProxy>(F))
297       for (const auto &OuterInvalidationPair :
298            OuterProxy->getOuterInvalidations()) {
299         AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first;
300         const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
301         if (Inv.invalidate(OuterAnalysisID, C, PA)) {
302           if (!FunctionPA)
303             FunctionPA = PA;
304           for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
305             FunctionPA->abandon(InnerAnalysisID);
306         }
307       }
308 
309     // Check if we needed a custom PA set, and if so we'll need to run the
310     // inner invalidation.
311     if (FunctionPA) {
312       FAM->invalidate(F, *FunctionPA);
313       continue;
314     }
315 
316     // Otherwise we only need to do invalidation if the original PA set didn't
317     // preserve all function analyses.
318     if (!AreFunctionAnalysesPreserved)
319       FAM->invalidate(F, PA);
320   }
321 
322   // Return false to indicate that this result is still a valid proxy.
323   return false;
324 }
325 
326 } // end namespace llvm
327 
328 /// When a new SCC is created for the graph we first update the
329 /// FunctionAnalysisManager in the Proxy's result.
330 /// As there might be function analysis results cached for the functions now in
331 /// that SCC, two forms of  updates are required.
332 ///
333 /// First, a proxy from the SCC to the FunctionAnalysisManager needs to be
334 /// created so that any subsequent invalidation events to the SCC are
335 /// propagated to the function analysis results cached for functions within it.
336 ///
337 /// Second, if any of the functions within the SCC have analysis results with
338 /// outer analysis dependencies, then those dependencies would point to the
339 /// *wrong* SCC's analysis result. We forcibly invalidate the necessary
340 /// function analyses so that they don't retain stale handles.
341 static void updateNewSCCFunctionAnalyses(LazyCallGraph::SCC &C,
342                                          LazyCallGraph &G,
343                                          CGSCCAnalysisManager &AM,
344                                          FunctionAnalysisManager &FAM) {
345   AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, G).updateFAM(FAM);
346 
347   // Now walk the functions in this SCC and invalidate any function analysis
348   // results that might have outer dependencies on an SCC analysis.
349   for (LazyCallGraph::Node &N : C) {
350     Function &F = N.getFunction();
351 
352     auto *OuterProxy =
353         FAM.getCachedResult<CGSCCAnalysisManagerFunctionProxy>(F);
354     if (!OuterProxy)
355       // No outer analyses were queried, nothing to do.
356       continue;
357 
358     // Forcibly abandon all the inner analyses with dependencies, but
359     // invalidate nothing else.
360     auto PA = PreservedAnalyses::all();
361     for (const auto &OuterInvalidationPair :
362          OuterProxy->getOuterInvalidations()) {
363       const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
364       for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
365         PA.abandon(InnerAnalysisID);
366     }
367 
368     // Now invalidate anything we found.
369     FAM.invalidate(F, PA);
370   }
371 }
372 
373 void llvm::maxDevirtIterationsReached() {
374   if (AbortOnMaxDevirtIterationsReached)
375     report_fatal_error("Max devirtualization iterations reached");
376 }
377 
378 /// Helper function to update both the \c CGSCCAnalysisManager \p AM and the \c
379 /// CGSCCPassManager's \c CGSCCUpdateResult \p UR based on a range of newly
380 /// added SCCs.
381 ///
382 /// The range of new SCCs must be in postorder already. The SCC they were split
383 /// out of must be provided as \p C. The current node being mutated and
384 /// triggering updates must be passed as \p N.
385 ///
386 /// This function returns the SCC containing \p N. This will be either \p C if
387 /// no new SCCs have been split out, or it will be the new SCC containing \p N.
388 template <typename SCCRangeT>
389 static LazyCallGraph::SCC *
390 incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
391                        LazyCallGraph::Node &N, LazyCallGraph::SCC *C,
392                        CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR) {
393   using SCC = LazyCallGraph::SCC;
394 
395   if (NewSCCRange.begin() == NewSCCRange.end())
396     return C;
397 
398   // Add the current SCC to the worklist as its shape has changed.
399   UR.CWorklist.insert(C);
400   LLVM_DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist:" << *C
401                     << "\n");
402 
403   SCC *OldC = C;
404 
405   // Update the current SCC. Note that if we have new SCCs, this must actually
406   // change the SCC.
407   assert(C != &*NewSCCRange.begin() &&
408          "Cannot insert new SCCs without changing current SCC!");
409   C = &*NewSCCRange.begin();
410   assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
411 
412   // If we had a cached FAM proxy originally, we will want to create more of
413   // them for each SCC that was split off.
414   FunctionAnalysisManager *FAM = nullptr;
415   if (auto *FAMProxy =
416           AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(*OldC))
417     FAM = &FAMProxy->getManager();
418 
419   // We need to propagate an invalidation call to all but the newly current SCC
420   // because the outer pass manager won't do that for us after splitting them.
421   // FIXME: We should accept a PreservedAnalysis from the CG updater so that if
422   // there are preserved analysis we can avoid invalidating them here for
423   // split-off SCCs.
424   // We know however that this will preserve any FAM proxy so go ahead and mark
425   // that.
426   PreservedAnalyses PA;
427   PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
428   AM.invalidate(*OldC, PA);
429 
430   // Ensure the now-current SCC's function analyses are updated.
431   if (FAM)
432     updateNewSCCFunctionAnalyses(*C, G, AM, *FAM);
433 
434   for (SCC &NewC : llvm::reverse(make_range(std::next(NewSCCRange.begin()),
435                                             NewSCCRange.end()))) {
436     assert(C != &NewC && "No need to re-visit the current SCC!");
437     assert(OldC != &NewC && "Already handled the original SCC!");
438     UR.CWorklist.insert(&NewC);
439     LLVM_DEBUG(dbgs() << "Enqueuing a newly formed SCC:" << NewC << "\n");
440 
441     // Ensure new SCCs' function analyses are updated.
442     if (FAM)
443       updateNewSCCFunctionAnalyses(NewC, G, AM, *FAM);
444 
445     // Also propagate a normal invalidation to the new SCC as only the current
446     // will get one from the pass manager infrastructure.
447     AM.invalidate(NewC, PA);
448   }
449   return C;
450 }
451 
452 static LazyCallGraph::SCC &updateCGAndAnalysisManagerForPass(
453     LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
454     CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
455     FunctionAnalysisManager &FAM, bool FunctionPass) {
456   using Node = LazyCallGraph::Node;
457   using Edge = LazyCallGraph::Edge;
458   using SCC = LazyCallGraph::SCC;
459   using RefSCC = LazyCallGraph::RefSCC;
460 
461   RefSCC &InitialRC = InitialC.getOuterRefSCC();
462   SCC *C = &InitialC;
463   RefSCC *RC = &InitialRC;
464   Function &F = N.getFunction();
465 
466   // Walk the function body and build up the set of retained, promoted, and
467   // demoted edges.
468   SmallVector<Constant *, 16> Worklist;
469   SmallPtrSet<Constant *, 16> Visited;
470   SmallPtrSet<Node *, 16> RetainedEdges;
471   SmallSetVector<Node *, 4> PromotedRefTargets;
472   SmallSetVector<Node *, 4> DemotedCallTargets;
473   SmallSetVector<Node *, 4> NewCallEdges;
474   SmallSetVector<Node *, 4> NewRefEdges;
475   SmallSetVector<Node *, 4> NewNodes;
476 
477   // First walk the function and handle all called functions. We do this first
478   // because if there is a single call edge, whether there are ref edges is
479   // irrelevant.
480   for (Instruction &I : instructions(F)) {
481     if (auto *CB = dyn_cast<CallBase>(&I)) {
482       if (Function *Callee = CB->getCalledFunction()) {
483         if (Visited.insert(Callee).second && !Callee->isDeclaration()) {
484           Node *CalleeN = G.lookup(*Callee);
485           if (!CalleeN) {
486             CalleeN = &G.get(*Callee);
487             NewNodes.insert(CalleeN);
488           }
489           Edge *E = N->lookup(*CalleeN);
490           assert((E || !FunctionPass) &&
491                  "No function transformations should introduce *new* "
492                  "call edges! Any new calls should be modeled as "
493                  "promoted existing ref edges!");
494           bool Inserted = RetainedEdges.insert(CalleeN).second;
495           (void)Inserted;
496           assert(Inserted && "We should never visit a function twice.");
497           if (!E)
498             NewCallEdges.insert(CalleeN);
499           else if (!E->isCall())
500             PromotedRefTargets.insert(CalleeN);
501         }
502       } else {
503         // We can miss devirtualization if an indirect call is created then
504         // promoted before updateCGAndAnalysisManagerForPass runs.
505         auto *Entry = UR.IndirectVHs.find(CB);
506         if (Entry == UR.IndirectVHs.end())
507           UR.IndirectVHs.insert({CB, WeakTrackingVH(CB)});
508         else if (!Entry->second)
509           Entry->second = WeakTrackingVH(CB);
510       }
511     }
512   }
513 
514   // Now walk all references.
515   for (Instruction &I : instructions(F))
516     for (Value *Op : I.operand_values())
517       if (auto *OpC = dyn_cast<Constant>(Op))
518         if (Visited.insert(OpC).second)
519           Worklist.push_back(OpC);
520 
521   auto VisitRef = [&](Function &Referee) {
522     Node *RefereeN = G.lookup(Referee);
523     if (!RefereeN) {
524       RefereeN = &G.get(Referee);
525       NewNodes.insert(RefereeN);
526     }
527     Edge *E = N->lookup(*RefereeN);
528     assert((E || !FunctionPass) &&
529            "No function transformations should introduce *new* ref "
530            "edges! Any new ref edges would require IPO which "
531            "function passes aren't allowed to do!");
532     bool Inserted = RetainedEdges.insert(RefereeN).second;
533     (void)Inserted;
534     assert(Inserted && "We should never visit a function twice.");
535     if (!E)
536       NewRefEdges.insert(RefereeN);
537     else if (E->isCall())
538       DemotedCallTargets.insert(RefereeN);
539   };
540   LazyCallGraph::visitReferences(Worklist, Visited, VisitRef);
541 
542   for (Node *NewNode : NewNodes)
543     G.initNode(*NewNode, *C);
544 
545   // Handle new ref edges.
546   for (Node *RefTarget : NewRefEdges) {
547     SCC &TargetC = *G.lookupSCC(*RefTarget);
548     RefSCC &TargetRC = TargetC.getOuterRefSCC();
549     (void)TargetRC;
550     // TODO: This only allows trivial edges to be added for now.
551     assert((RC == &TargetRC ||
552            RC->isAncestorOf(TargetRC)) && "New ref edge is not trivial!");
553     RC->insertTrivialRefEdge(N, *RefTarget);
554   }
555 
556   // Handle new call edges.
557   for (Node *CallTarget : NewCallEdges) {
558     SCC &TargetC = *G.lookupSCC(*CallTarget);
559     RefSCC &TargetRC = TargetC.getOuterRefSCC();
560     (void)TargetRC;
561     // TODO: This only allows trivial edges to be added for now.
562     assert((RC == &TargetRC ||
563            RC->isAncestorOf(TargetRC)) && "New call edge is not trivial!");
564     // Add a trivial ref edge to be promoted later on alongside
565     // PromotedRefTargets.
566     RC->insertTrivialRefEdge(N, *CallTarget);
567   }
568 
569   // Include synthetic reference edges to known, defined lib functions.
570   for (auto *LibFn : G.getLibFunctions())
571     // While the list of lib functions doesn't have repeats, don't re-visit
572     // anything handled above.
573     if (!Visited.count(LibFn))
574       VisitRef(*LibFn);
575 
576   // First remove all of the edges that are no longer present in this function.
577   // The first step makes these edges uniformly ref edges and accumulates them
578   // into a separate data structure so removal doesn't invalidate anything.
579   SmallVector<Node *, 4> DeadTargets;
580   for (Edge &E : *N) {
581     if (RetainedEdges.count(&E.getNode()))
582       continue;
583 
584     SCC &TargetC = *G.lookupSCC(E.getNode());
585     RefSCC &TargetRC = TargetC.getOuterRefSCC();
586     if (&TargetRC == RC && E.isCall()) {
587       if (C != &TargetC) {
588         // For separate SCCs this is trivial.
589         RC->switchTrivialInternalEdgeToRef(N, E.getNode());
590       } else {
591         // Now update the call graph.
592         C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, E.getNode()),
593                                    G, N, C, AM, UR);
594       }
595     }
596 
597     // Now that this is ready for actual removal, put it into our list.
598     DeadTargets.push_back(&E.getNode());
599   }
600   // Remove the easy cases quickly and actually pull them out of our list.
601   DeadTargets.erase(
602       llvm::remove_if(DeadTargets,
603                       [&](Node *TargetN) {
604                         SCC &TargetC = *G.lookupSCC(*TargetN);
605                         RefSCC &TargetRC = TargetC.getOuterRefSCC();
606 
607                         // We can't trivially remove internal targets, so skip
608                         // those.
609                         if (&TargetRC == RC)
610                           return false;
611 
612                         RC->removeOutgoingEdge(N, *TargetN);
613                         LLVM_DEBUG(dbgs() << "Deleting outgoing edge from '"
614                                           << N << "' to '" << TargetN << "'\n");
615                         return true;
616                       }),
617       DeadTargets.end());
618 
619   // Now do a batch removal of the internal ref edges left.
620   auto NewRefSCCs = RC->removeInternalRefEdge(N, DeadTargets);
621   if (!NewRefSCCs.empty()) {
622     // The old RefSCC is dead, mark it as such.
623     UR.InvalidatedRefSCCs.insert(RC);
624 
625     // Note that we don't bother to invalidate analyses as ref-edge
626     // connectivity is not really observable in any way and is intended
627     // exclusively to be used for ordering of transforms rather than for
628     // analysis conclusions.
629 
630     // Update RC to the "bottom".
631     assert(G.lookupSCC(N) == C && "Changed the SCC when splitting RefSCCs!");
632     RC = &C->getOuterRefSCC();
633     assert(G.lookupRefSCC(N) == RC && "Failed to update current RefSCC!");
634 
635     // The RC worklist is in reverse postorder, so we enqueue the new ones in
636     // RPO except for the one which contains the source node as that is the
637     // "bottom" we will continue processing in the bottom-up walk.
638     assert(NewRefSCCs.front() == RC &&
639            "New current RefSCC not first in the returned list!");
640     for (RefSCC *NewRC : llvm::reverse(make_range(std::next(NewRefSCCs.begin()),
641                                                   NewRefSCCs.end()))) {
642       assert(NewRC != RC && "Should not encounter the current RefSCC further "
643                             "in the postorder list of new RefSCCs.");
644       UR.RCWorklist.insert(NewRC);
645       LLVM_DEBUG(dbgs() << "Enqueuing a new RefSCC in the update worklist: "
646                         << *NewRC << "\n");
647     }
648   }
649 
650   // Next demote all the call edges that are now ref edges. This helps make
651   // the SCCs small which should minimize the work below as we don't want to
652   // form cycles that this would break.
653   for (Node *RefTarget : DemotedCallTargets) {
654     SCC &TargetC = *G.lookupSCC(*RefTarget);
655     RefSCC &TargetRC = TargetC.getOuterRefSCC();
656 
657     // The easy case is when the target RefSCC is not this RefSCC. This is
658     // only supported when the target RefSCC is a child of this RefSCC.
659     if (&TargetRC != RC) {
660       assert(RC->isAncestorOf(TargetRC) &&
661              "Cannot potentially form RefSCC cycles here!");
662       RC->switchOutgoingEdgeToRef(N, *RefTarget);
663       LLVM_DEBUG(dbgs() << "Switch outgoing call edge to a ref edge from '" << N
664                         << "' to '" << *RefTarget << "'\n");
665       continue;
666     }
667 
668     // We are switching an internal call edge to a ref edge. This may split up
669     // some SCCs.
670     if (C != &TargetC) {
671       // For separate SCCs this is trivial.
672       RC->switchTrivialInternalEdgeToRef(N, *RefTarget);
673       continue;
674     }
675 
676     // Now update the call graph.
677     C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, *RefTarget), G, N,
678                                C, AM, UR);
679   }
680 
681   // We added a ref edge earlier for new call edges, promote those to call edges
682   // alongside PromotedRefTargets.
683   for (Node *E : NewCallEdges)
684     PromotedRefTargets.insert(E);
685 
686   // Now promote ref edges into call edges.
687   for (Node *CallTarget : PromotedRefTargets) {
688     SCC &TargetC = *G.lookupSCC(*CallTarget);
689     RefSCC &TargetRC = TargetC.getOuterRefSCC();
690 
691     // The easy case is when the target RefSCC is not this RefSCC. This is
692     // only supported when the target RefSCC is a child of this RefSCC.
693     if (&TargetRC != RC) {
694       assert(RC->isAncestorOf(TargetRC) &&
695              "Cannot potentially form RefSCC cycles here!");
696       RC->switchOutgoingEdgeToCall(N, *CallTarget);
697       LLVM_DEBUG(dbgs() << "Switch outgoing ref edge to a call edge from '" << N
698                         << "' to '" << *CallTarget << "'\n");
699       continue;
700     }
701     LLVM_DEBUG(dbgs() << "Switch an internal ref edge to a call edge from '"
702                       << N << "' to '" << *CallTarget << "'\n");
703 
704     // Otherwise we are switching an internal ref edge to a call edge. This
705     // may merge away some SCCs, and we add those to the UpdateResult. We also
706     // need to make sure to update the worklist in the event SCCs have moved
707     // before the current one in the post-order sequence
708     bool HasFunctionAnalysisProxy = false;
709     auto InitialSCCIndex = RC->find(*C) - RC->begin();
710     bool FormedCycle = RC->switchInternalEdgeToCall(
711         N, *CallTarget, [&](ArrayRef<SCC *> MergedSCCs) {
712           for (SCC *MergedC : MergedSCCs) {
713             assert(MergedC != &TargetC && "Cannot merge away the target SCC!");
714 
715             HasFunctionAnalysisProxy |=
716                 AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(
717                     *MergedC) != nullptr;
718 
719             // Mark that this SCC will no longer be valid.
720             UR.InvalidatedSCCs.insert(MergedC);
721 
722             // FIXME: We should really do a 'clear' here to forcibly release
723             // memory, but we don't have a good way of doing that and
724             // preserving the function analyses.
725             auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
726             PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
727             AM.invalidate(*MergedC, PA);
728           }
729         });
730 
731     // If we formed a cycle by creating this call, we need to update more data
732     // structures.
733     if (FormedCycle) {
734       C = &TargetC;
735       assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
736 
737       // If one of the invalidated SCCs had a cached proxy to a function
738       // analysis manager, we need to create a proxy in the new current SCC as
739       // the invalidated SCCs had their functions moved.
740       if (HasFunctionAnalysisProxy)
741         AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G).updateFAM(FAM);
742 
743       // Any analyses cached for this SCC are no longer precise as the shape
744       // has changed by introducing this cycle. However, we have taken care to
745       // update the proxies so it remains valide.
746       auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
747       PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
748       AM.invalidate(*C, PA);
749     }
750     auto NewSCCIndex = RC->find(*C) - RC->begin();
751     // If we have actually moved an SCC to be topologically "below" the current
752     // one due to merging, we will need to revisit the current SCC after
753     // visiting those moved SCCs.
754     //
755     // It is critical that we *do not* revisit the current SCC unless we
756     // actually move SCCs in the process of merging because otherwise we may
757     // form a cycle where an SCC is split apart, merged, split, merged and so
758     // on infinitely.
759     if (InitialSCCIndex < NewSCCIndex) {
760       // Put our current SCC back onto the worklist as we'll visit other SCCs
761       // that are now definitively ordered prior to the current one in the
762       // post-order sequence, and may end up observing more precise context to
763       // optimize the current SCC.
764       UR.CWorklist.insert(C);
765       LLVM_DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist: " << *C
766                         << "\n");
767       // Enqueue in reverse order as we pop off the back of the worklist.
768       for (SCC &MovedC : llvm::reverse(make_range(RC->begin() + InitialSCCIndex,
769                                                   RC->begin() + NewSCCIndex))) {
770         UR.CWorklist.insert(&MovedC);
771         LLVM_DEBUG(dbgs() << "Enqueuing a newly earlier in post-order SCC: "
772                           << MovedC << "\n");
773       }
774     }
775   }
776 
777   assert(!UR.InvalidatedSCCs.count(C) && "Invalidated the current SCC!");
778   assert(!UR.InvalidatedRefSCCs.count(RC) && "Invalidated the current RefSCC!");
779   assert(&C->getOuterRefSCC() == RC && "Current SCC not in current RefSCC!");
780 
781   // Record the current RefSCC and SCC for higher layers of the CGSCC pass
782   // manager now that all the updates have been applied.
783   if (RC != &InitialRC)
784     UR.UpdatedRC = RC;
785   if (C != &InitialC)
786     UR.UpdatedC = C;
787 
788   return *C;
789 }
790 
791 LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
792     LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
793     CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
794     FunctionAnalysisManager &FAM) {
795   return updateCGAndAnalysisManagerForPass(G, InitialC, N, AM, UR, FAM,
796                                            /* FunctionPass */ true);
797 }
798 LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForCGSCCPass(
799     LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
800     CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
801     FunctionAnalysisManager &FAM) {
802   return updateCGAndAnalysisManagerForPass(G, InitialC, N, AM, UR, FAM,
803                                            /* FunctionPass */ false);
804 }
805