1 //===- CGSCCPassManager.cpp - Managing & running CGSCC passes -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/Analysis/CGSCCPassManager.h"
10 #include "llvm/ADT/ArrayRef.h"
11 #include "llvm/ADT/Optional.h"
12 #include "llvm/ADT/STLExtras.h"
13 #include "llvm/ADT/SetVector.h"
14 #include "llvm/ADT/SmallPtrSet.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/iterator_range.h"
17 #include "llvm/Analysis/LazyCallGraph.h"
18 #include "llvm/IR/Constant.h"
19 #include "llvm/IR/InstIterator.h"
20 #include "llvm/IR/Instruction.h"
21 #include "llvm/IR/PassManager.h"
22 #include "llvm/IR/PassManagerImpl.h"
23 #include "llvm/IR/ValueHandle.h"
24 #include "llvm/Support/Casting.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/TimeProfiler.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include <algorithm>
31 #include <cassert>
32 #include <iterator>
33 
34 #define DEBUG_TYPE "cgscc"
35 
36 using namespace llvm;
37 
38 // Explicit template instantiations and specialization definitions for core
39 // template typedefs.
40 namespace llvm {
41 static cl::opt<bool> AbortOnMaxDevirtIterationsReached(
42     "abort-on-max-devirt-iterations-reached",
43     cl::desc("Abort when the max iterations for devirtualization CGSCC repeat "
44              "pass is reached"));
45 
46 // Explicit instantiations for the core proxy templates.
47 template class AllAnalysesOn<LazyCallGraph::SCC>;
48 template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
49 template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
50                            LazyCallGraph &, CGSCCUpdateResult &>;
51 template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
52 template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
53                                          LazyCallGraph::SCC, LazyCallGraph &>;
54 template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
55 
56 /// Explicitly specialize the pass manager run method to handle call graph
57 /// updates.
58 template <>
59 PreservedAnalyses
60 PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
61             CGSCCUpdateResult &>::run(LazyCallGraph::SCC &InitialC,
62                                       CGSCCAnalysisManager &AM,
63                                       LazyCallGraph &G, CGSCCUpdateResult &UR) {
64   // Request PassInstrumentation from analysis manager, will use it to run
65   // instrumenting callbacks for the passes later.
66   PassInstrumentation PI =
67       AM.getResult<PassInstrumentationAnalysis>(InitialC, G);
68 
69   PreservedAnalyses PA = PreservedAnalyses::all();
70 
71   // The SCC may be refined while we are running passes over it, so set up
72   // a pointer that we can update.
73   LazyCallGraph::SCC *C = &InitialC;
74 
75   // Get Function analysis manager from its proxy.
76   FunctionAnalysisManager &FAM =
77       AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(*C)->getManager();
78 
79   for (auto &Pass : Passes) {
80     // Check the PassInstrumentation's BeforePass callbacks before running the
81     // pass, skip its execution completely if asked to (callback returns false).
82     if (!PI.runBeforePass(*Pass, *C))
83       continue;
84 
85     PreservedAnalyses PassPA;
86     {
87       TimeTraceScope TimeScope(Pass->name());
88       PassPA = Pass->run(*C, AM, G, UR);
89     }
90 
91     if (UR.InvalidatedSCCs.count(C))
92       PI.runAfterPassInvalidated<LazyCallGraph::SCC>(*Pass, PassPA);
93     else
94       PI.runAfterPass<LazyCallGraph::SCC>(*Pass, *C, PassPA);
95 
96     // Update the SCC if necessary.
97     C = UR.UpdatedC ? UR.UpdatedC : C;
98     if (UR.UpdatedC) {
99       // If C is updated, also create a proxy and update FAM inside the result.
100       auto *ResultFAMCP =
101           &AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G);
102       ResultFAMCP->updateFAM(FAM);
103     }
104 
105     // If the CGSCC pass wasn't able to provide a valid updated SCC, the
106     // current SCC may simply need to be skipped if invalid.
107     if (UR.InvalidatedSCCs.count(C)) {
108       LLVM_DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
109       break;
110     }
111     // Check that we didn't miss any update scenario.
112     assert(C->begin() != C->end() && "Cannot have an empty SCC!");
113 
114     // Update the analysis manager as each pass runs and potentially
115     // invalidates analyses.
116     AM.invalidate(*C, PassPA);
117 
118     // Finally, we intersect the final preserved analyses to compute the
119     // aggregate preserved set for this pass manager.
120     PA.intersect(std::move(PassPA));
121   }
122 
123   // Before we mark all of *this* SCC's analyses as preserved below, intersect
124   // this with the cross-SCC preserved analysis set. This is used to allow
125   // CGSCC passes to mutate ancestor SCCs and still trigger proper invalidation
126   // for them.
127   UR.CrossSCCPA.intersect(PA);
128 
129   // Invalidation was handled after each pass in the above loop for the current
130   // SCC. Therefore, the remaining analysis results in the AnalysisManager are
131   // preserved. We mark this with a set so that we don't need to inspect each
132   // one individually.
133   PA.preserveSet<AllAnalysesOn<LazyCallGraph::SCC>>();
134 
135   return PA;
136 }
137 
138 PreservedAnalyses
139 ModuleToPostOrderCGSCCPassAdaptor::run(Module &M, ModuleAnalysisManager &AM) {
140   // Setup the CGSCC analysis manager from its proxy.
141   CGSCCAnalysisManager &CGAM =
142       AM.getResult<CGSCCAnalysisManagerModuleProxy>(M).getManager();
143 
144   // Get the call graph for this module.
145   LazyCallGraph &CG = AM.getResult<LazyCallGraphAnalysis>(M);
146 
147   // Get Function analysis manager from its proxy.
148   FunctionAnalysisManager &FAM =
149       AM.getCachedResult<FunctionAnalysisManagerModuleProxy>(M)->getManager();
150 
151   // We keep worklists to allow us to push more work onto the pass manager as
152   // the passes are run.
153   SmallPriorityWorklist<LazyCallGraph::RefSCC *, 1> RCWorklist;
154   SmallPriorityWorklist<LazyCallGraph::SCC *, 1> CWorklist;
155 
156   // Keep sets for invalidated SCCs and RefSCCs that should be skipped when
157   // iterating off the worklists.
158   SmallPtrSet<LazyCallGraph::RefSCC *, 4> InvalidRefSCCSet;
159   SmallPtrSet<LazyCallGraph::SCC *, 4> InvalidSCCSet;
160 
161   SmallDenseSet<std::pair<LazyCallGraph::Node *, LazyCallGraph::SCC *>, 4>
162       InlinedInternalEdges;
163 
164   CGSCCUpdateResult UR = {
165       RCWorklist, CWorklist, InvalidRefSCCSet,         InvalidSCCSet,
166       nullptr,    nullptr,   PreservedAnalyses::all(), InlinedInternalEdges,
167       {}};
168 
169   // Request PassInstrumentation from analysis manager, will use it to run
170   // instrumenting callbacks for the passes later.
171   PassInstrumentation PI = AM.getResult<PassInstrumentationAnalysis>(M);
172 
173   PreservedAnalyses PA = PreservedAnalyses::all();
174   CG.buildRefSCCs();
175   for (auto RCI = CG.postorder_ref_scc_begin(),
176             RCE = CG.postorder_ref_scc_end();
177        RCI != RCE;) {
178     assert(RCWorklist.empty() &&
179            "Should always start with an empty RefSCC worklist");
180     // The postorder_ref_sccs range we are walking is lazily constructed, so
181     // we only push the first one onto the worklist. The worklist allows us
182     // to capture *new* RefSCCs created during transformations.
183     //
184     // We really want to form RefSCCs lazily because that makes them cheaper
185     // to update as the program is simplified and allows us to have greater
186     // cache locality as forming a RefSCC touches all the parts of all the
187     // functions within that RefSCC.
188     //
189     // We also eagerly increment the iterator to the next position because
190     // the CGSCC passes below may delete the current RefSCC.
191     RCWorklist.insert(&*RCI++);
192 
193     do {
194       LazyCallGraph::RefSCC *RC = RCWorklist.pop_back_val();
195       if (InvalidRefSCCSet.count(RC)) {
196         LLVM_DEBUG(dbgs() << "Skipping an invalid RefSCC...\n");
197         continue;
198       }
199 
200       assert(CWorklist.empty() &&
201              "Should always start with an empty SCC worklist");
202 
203       LLVM_DEBUG(dbgs() << "Running an SCC pass across the RefSCC: " << *RC
204                         << "\n");
205 
206       // The top of the worklist may *also* be the same SCC we just ran over
207       // (and invalidated for). Keep track of that last SCC we processed due
208       // to SCC update to avoid redundant processing when an SCC is both just
209       // updated itself and at the top of the worklist.
210       LazyCallGraph::SCC *LastUpdatedC = nullptr;
211 
212       // Push the initial SCCs in reverse post-order as we'll pop off the
213       // back and so see this in post-order.
214       for (LazyCallGraph::SCC &C : llvm::reverse(*RC))
215         CWorklist.insert(&C);
216 
217       do {
218         LazyCallGraph::SCC *C = CWorklist.pop_back_val();
219         // Due to call graph mutations, we may have invalid SCCs or SCCs from
220         // other RefSCCs in the worklist. The invalid ones are dead and the
221         // other RefSCCs should be queued above, so we just need to skip both
222         // scenarios here.
223         if (InvalidSCCSet.count(C)) {
224           LLVM_DEBUG(dbgs() << "Skipping an invalid SCC...\n");
225           continue;
226         }
227         if (LastUpdatedC == C) {
228           LLVM_DEBUG(dbgs() << "Skipping redundant run on SCC: " << *C << "\n");
229           continue;
230         }
231         if (&C->getOuterRefSCC() != RC) {
232           LLVM_DEBUG(dbgs() << "Skipping an SCC that is now part of some other "
233                                "RefSCC...\n");
234           continue;
235         }
236 
237         // Ensure we can proxy analysis updates from the CGSCC analysis manager
238         // into the the Function analysis manager by getting a proxy here.
239         // This also needs to update the FunctionAnalysisManager, as this may be
240         // the first time we see this SCC.
241         CGAM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, CG).updateFAM(
242             FAM);
243 
244         // Each time we visit a new SCC pulled off the worklist,
245         // a transformation of a child SCC may have also modified this parent
246         // and invalidated analyses. So we invalidate using the update record's
247         // cross-SCC preserved set. This preserved set is intersected by any
248         // CGSCC pass that handles invalidation (primarily pass managers) prior
249         // to marking its SCC as preserved. That lets us track everything that
250         // might need invalidation across SCCs without excessive invalidations
251         // on a single SCC.
252         //
253         // This essentially allows SCC passes to freely invalidate analyses
254         // of any ancestor SCC. If this becomes detrimental to successfully
255         // caching analyses, we could force each SCC pass to manually
256         // invalidate the analyses for any SCCs other than themselves which
257         // are mutated. However, that seems to lose the robustness of the
258         // pass-manager driven invalidation scheme.
259         CGAM.invalidate(*C, UR.CrossSCCPA);
260 
261         do {
262           // Check that we didn't miss any update scenario.
263           assert(!InvalidSCCSet.count(C) && "Processing an invalid SCC!");
264           assert(C->begin() != C->end() && "Cannot have an empty SCC!");
265           assert(&C->getOuterRefSCC() == RC &&
266                  "Processing an SCC in a different RefSCC!");
267 
268           LastUpdatedC = UR.UpdatedC;
269           UR.UpdatedRC = nullptr;
270           UR.UpdatedC = nullptr;
271 
272           // Check the PassInstrumentation's BeforePass callbacks before
273           // running the pass, skip its execution completely if asked to
274           // (callback returns false).
275           if (!PI.runBeforePass<LazyCallGraph::SCC>(*Pass, *C))
276             continue;
277 
278           PreservedAnalyses PassPA;
279           {
280             TimeTraceScope TimeScope(Pass->name());
281             PassPA = Pass->run(*C, CGAM, CG, UR);
282           }
283 
284           if (UR.InvalidatedSCCs.count(C))
285             PI.runAfterPassInvalidated<LazyCallGraph::SCC>(*Pass, PassPA);
286           else
287             PI.runAfterPass<LazyCallGraph::SCC>(*Pass, *C, PassPA);
288 
289           // Update the SCC and RefSCC if necessary.
290           C = UR.UpdatedC ? UR.UpdatedC : C;
291           RC = UR.UpdatedRC ? UR.UpdatedRC : RC;
292 
293           if (UR.UpdatedC) {
294             // If we're updating the SCC, also update the FAM inside the proxy's
295             // result.
296             CGAM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, CG).updateFAM(
297                 FAM);
298           }
299 
300           // If the CGSCC pass wasn't able to provide a valid updated SCC,
301           // the current SCC may simply need to be skipped if invalid.
302           if (UR.InvalidatedSCCs.count(C)) {
303             LLVM_DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
304             break;
305           }
306           // Check that we didn't miss any update scenario.
307           assert(C->begin() != C->end() && "Cannot have an empty SCC!");
308 
309           // We handle invalidating the CGSCC analysis manager's information
310           // for the (potentially updated) SCC here. Note that any other SCCs
311           // whose structure has changed should have been invalidated by
312           // whatever was updating the call graph. This SCC gets invalidated
313           // late as it contains the nodes that were actively being
314           // processed.
315           CGAM.invalidate(*C, PassPA);
316 
317           // Then intersect the preserved set so that invalidation of module
318           // analyses will eventually occur when the module pass completes.
319           // Also intersect with the cross-SCC preserved set to capture any
320           // cross-SCC invalidation.
321           UR.CrossSCCPA.intersect(PassPA);
322           PA.intersect(std::move(PassPA));
323 
324           // The pass may have restructured the call graph and refined the
325           // current SCC and/or RefSCC. We need to update our current SCC and
326           // RefSCC pointers to follow these. Also, when the current SCC is
327           // refined, re-run the SCC pass over the newly refined SCC in order
328           // to observe the most precise SCC model available. This inherently
329           // cannot cycle excessively as it only happens when we split SCCs
330           // apart, at most converging on a DAG of single nodes.
331           // FIXME: If we ever start having RefSCC passes, we'll want to
332           // iterate there too.
333           if (UR.UpdatedC)
334             LLVM_DEBUG(dbgs()
335                        << "Re-running SCC passes after a refinement of the "
336                           "current SCC: "
337                        << *UR.UpdatedC << "\n");
338 
339           // Note that both `C` and `RC` may at this point refer to deleted,
340           // invalid SCC and RefSCCs respectively. But we will short circuit
341           // the processing when we check them in the loop above.
342         } while (UR.UpdatedC);
343       } while (!CWorklist.empty());
344 
345       // We only need to keep internal inlined edge information within
346       // a RefSCC, clear it to save on space and let the next time we visit
347       // any of these functions have a fresh start.
348       InlinedInternalEdges.clear();
349     } while (!RCWorklist.empty());
350   }
351 
352   // By definition we preserve the call garph, all SCC analyses, and the
353   // analysis proxies by handling them above and in any nested pass managers.
354   PA.preserveSet<AllAnalysesOn<LazyCallGraph::SCC>>();
355   PA.preserve<LazyCallGraphAnalysis>();
356   PA.preserve<CGSCCAnalysisManagerModuleProxy>();
357   PA.preserve<FunctionAnalysisManagerModuleProxy>();
358   return PA;
359 }
360 
361 PreservedAnalyses DevirtSCCRepeatedPass::run(LazyCallGraph::SCC &InitialC,
362                                              CGSCCAnalysisManager &AM,
363                                              LazyCallGraph &CG,
364                                              CGSCCUpdateResult &UR) {
365   PreservedAnalyses PA = PreservedAnalyses::all();
366   PassInstrumentation PI =
367       AM.getResult<PassInstrumentationAnalysis>(InitialC, CG);
368 
369   // The SCC may be refined while we are running passes over it, so set up
370   // a pointer that we can update.
371   LazyCallGraph::SCC *C = &InitialC;
372 
373   // Struct to track the counts of direct and indirect calls in each function
374   // of the SCC.
375   struct CallCount {
376     int Direct;
377     int Indirect;
378   };
379 
380   // Put value handles on all of the indirect calls and return the number of
381   // direct calls for each function in the SCC.
382   auto ScanSCC = [](LazyCallGraph::SCC &C,
383                     SmallMapVector<Value *, WeakTrackingVH, 16> &CallHandles) {
384     assert(CallHandles.empty() && "Must start with a clear set of handles.");
385 
386     SmallDenseMap<Function *, CallCount> CallCounts;
387     CallCount CountLocal = {0, 0};
388     for (LazyCallGraph::Node &N : C) {
389       CallCount &Count =
390           CallCounts.insert(std::make_pair(&N.getFunction(), CountLocal))
391               .first->second;
392       for (Instruction &I : instructions(N.getFunction()))
393         if (auto *CB = dyn_cast<CallBase>(&I)) {
394           if (CB->getCalledFunction()) {
395             ++Count.Direct;
396           } else {
397             ++Count.Indirect;
398             CallHandles.insert({CB, WeakTrackingVH(CB)});
399           }
400         }
401     }
402 
403     return CallCounts;
404   };
405 
406   UR.IndirectVHs.clear();
407   // Populate the initial call handles and get the initial call counts.
408   auto CallCounts = ScanSCC(*C, UR.IndirectVHs);
409 
410   for (int Iteration = 0;; ++Iteration) {
411     if (!PI.runBeforePass<LazyCallGraph::SCC>(*Pass, *C))
412       continue;
413 
414     PreservedAnalyses PassPA = Pass->run(*C, AM, CG, UR);
415 
416     if (UR.InvalidatedSCCs.count(C))
417       PI.runAfterPassInvalidated<LazyCallGraph::SCC>(*Pass, PassPA);
418     else
419       PI.runAfterPass<LazyCallGraph::SCC>(*Pass, *C, PassPA);
420 
421     // If the SCC structure has changed, bail immediately and let the outer
422     // CGSCC layer handle any iteration to reflect the refined structure.
423     if (UR.UpdatedC && UR.UpdatedC != C) {
424       PA.intersect(std::move(PassPA));
425       break;
426     }
427 
428     // If the CGSCC pass wasn't able to provide a valid updated SCC, the
429     // current SCC may simply need to be skipped if invalid.
430     if (UR.InvalidatedSCCs.count(C)) {
431       LLVM_DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
432       break;
433     }
434 
435     assert(C->begin() != C->end() && "Cannot have an empty SCC!");
436 
437     // Check whether any of the handles were devirtualized.
438     bool Devirt = llvm::any_of(UR.IndirectVHs, [](auto &P) -> bool {
439       if (P.second) {
440         if (CallBase *CB = dyn_cast<CallBase>(P.second)) {
441           if (CB->getCalledFunction()) {
442             LLVM_DEBUG(dbgs() << "Found devirtualized call: " << *CB << "\n");
443             return true;
444           }
445         }
446       }
447       return false;
448     });
449 
450     // Rescan to build up a new set of handles and count how many direct
451     // calls remain. If we decide to iterate, this also sets up the input to
452     // the next iteration.
453     UR.IndirectVHs.clear();
454     auto NewCallCounts = ScanSCC(*C, UR.IndirectVHs);
455 
456     // If we haven't found an explicit devirtualization already see if we
457     // have decreased the number of indirect calls and increased the number
458     // of direct calls for any function in the SCC. This can be fooled by all
459     // manner of transformations such as DCE and other things, but seems to
460     // work well in practice.
461     if (!Devirt)
462       // Iterate over the keys in NewCallCounts, if Function also exists in
463       // CallCounts, make the check below.
464       for (auto &Pair : NewCallCounts) {
465         auto &CallCountNew = Pair.second;
466         auto CountIt = CallCounts.find(Pair.first);
467         if (CountIt != CallCounts.end()) {
468           const auto &CallCountOld = CountIt->second;
469           if (CallCountOld.Indirect > CallCountNew.Indirect &&
470               CallCountOld.Direct < CallCountNew.Direct) {
471             Devirt = true;
472             break;
473           }
474         }
475       }
476 
477     if (!Devirt) {
478       PA.intersect(std::move(PassPA));
479       break;
480     }
481 
482     // Otherwise, if we've already hit our max, we're done.
483     if (Iteration >= MaxIterations) {
484       if (AbortOnMaxDevirtIterationsReached)
485         report_fatal_error("Max devirtualization iterations reached");
486       LLVM_DEBUG(
487           dbgs() << "Found another devirtualization after hitting the max "
488                     "number of repetitions ("
489                  << MaxIterations << ") on SCC: " << *C << "\n");
490       PA.intersect(std::move(PassPA));
491       break;
492     }
493 
494     LLVM_DEBUG(
495         dbgs() << "Repeating an SCC pass after finding a devirtualization in: "
496                << *C << "\n");
497 
498     // Move over the new call counts in preparation for iterating.
499     CallCounts = std::move(NewCallCounts);
500 
501     // Update the analysis manager with each run and intersect the total set
502     // of preserved analyses so we're ready to iterate.
503     AM.invalidate(*C, PassPA);
504 
505     PA.intersect(std::move(PassPA));
506   }
507 
508   // Note that we don't add any preserved entries here unlike a more normal
509   // "pass manager" because we only handle invalidation *between* iterations,
510   // not after the last iteration.
511   return PA;
512 }
513 
514 PreservedAnalyses CGSCCToFunctionPassAdaptor::run(LazyCallGraph::SCC &C,
515                                                   CGSCCAnalysisManager &AM,
516                                                   LazyCallGraph &CG,
517                                                   CGSCCUpdateResult &UR) {
518   // Setup the function analysis manager from its proxy.
519   FunctionAnalysisManager &FAM =
520       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
521 
522   SmallVector<LazyCallGraph::Node *, 4> Nodes;
523   for (LazyCallGraph::Node &N : C)
524     Nodes.push_back(&N);
525 
526   // The SCC may get split while we are optimizing functions due to deleting
527   // edges. If this happens, the current SCC can shift, so keep track of
528   // a pointer we can overwrite.
529   LazyCallGraph::SCC *CurrentC = &C;
530 
531   LLVM_DEBUG(dbgs() << "Running function passes across an SCC: " << C << "\n");
532 
533   PreservedAnalyses PA = PreservedAnalyses::all();
534   for (LazyCallGraph::Node *N : Nodes) {
535     // Skip nodes from other SCCs. These may have been split out during
536     // processing. We'll eventually visit those SCCs and pick up the nodes
537     // there.
538     if (CG.lookupSCC(*N) != CurrentC)
539       continue;
540 
541     Function &F = N->getFunction();
542 
543     PassInstrumentation PI = FAM.getResult<PassInstrumentationAnalysis>(F);
544     if (!PI.runBeforePass<Function>(*Pass, F))
545       continue;
546 
547     PreservedAnalyses PassPA;
548     {
549       TimeTraceScope TimeScope(Pass->name());
550       PassPA = Pass->run(F, FAM);
551     }
552 
553     PI.runAfterPass<Function>(*Pass, F, PassPA);
554 
555     // We know that the function pass couldn't have invalidated any other
556     // function's analyses (that's the contract of a function pass), so
557     // directly handle the function analysis manager's invalidation here.
558     FAM.invalidate(F, EagerlyInvalidate ? PreservedAnalyses::none() : PassPA);
559 
560     // Then intersect the preserved set so that invalidation of module
561     // analyses will eventually occur when the module pass completes.
562     PA.intersect(std::move(PassPA));
563 
564     // If the call graph hasn't been preserved, update it based on this
565     // function pass. This may also update the current SCC to point to
566     // a smaller, more refined SCC.
567     auto PAC = PA.getChecker<LazyCallGraphAnalysis>();
568     if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Module>>()) {
569       CurrentC = &updateCGAndAnalysisManagerForFunctionPass(CG, *CurrentC, *N,
570                                                             AM, UR, FAM);
571       assert(CG.lookupSCC(*N) == CurrentC &&
572              "Current SCC not updated to the SCC containing the current node!");
573     }
574   }
575 
576   // By definition we preserve the proxy. And we preserve all analyses on
577   // Functions. This precludes *any* invalidation of function analyses by the
578   // proxy, but that's OK because we've taken care to invalidate analyses in
579   // the function analysis manager incrementally above.
580   PA.preserveSet<AllAnalysesOn<Function>>();
581   PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
582 
583   // We've also ensured that we updated the call graph along the way.
584   PA.preserve<LazyCallGraphAnalysis>();
585 
586   return PA;
587 }
588 
589 bool CGSCCAnalysisManagerModuleProxy::Result::invalidate(
590     Module &M, const PreservedAnalyses &PA,
591     ModuleAnalysisManager::Invalidator &Inv) {
592   // If literally everything is preserved, we're done.
593   if (PA.areAllPreserved())
594     return false; // This is still a valid proxy.
595 
596   // If this proxy or the call graph is going to be invalidated, we also need
597   // to clear all the keys coming from that analysis.
598   //
599   // We also directly invalidate the FAM's module proxy if necessary, and if
600   // that proxy isn't preserved we can't preserve this proxy either. We rely on
601   // it to handle module -> function analysis invalidation in the face of
602   // structural changes and so if it's unavailable we conservatively clear the
603   // entire SCC layer as well rather than trying to do invalidation ourselves.
604   auto PAC = PA.getChecker<CGSCCAnalysisManagerModuleProxy>();
605   if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Module>>()) ||
606       Inv.invalidate<LazyCallGraphAnalysis>(M, PA) ||
607       Inv.invalidate<FunctionAnalysisManagerModuleProxy>(M, PA)) {
608     InnerAM->clear();
609 
610     // And the proxy itself should be marked as invalid so that we can observe
611     // the new call graph. This isn't strictly necessary because we cheat
612     // above, but is still useful.
613     return true;
614   }
615 
616   // Directly check if the relevant set is preserved so we can short circuit
617   // invalidating SCCs below.
618   bool AreSCCAnalysesPreserved =
619       PA.allAnalysesInSetPreserved<AllAnalysesOn<LazyCallGraph::SCC>>();
620 
621   // Ok, we have a graph, so we can propagate the invalidation down into it.
622   G->buildRefSCCs();
623   for (auto &RC : G->postorder_ref_sccs())
624     for (auto &C : RC) {
625       Optional<PreservedAnalyses> InnerPA;
626 
627       // Check to see whether the preserved set needs to be adjusted based on
628       // module-level analysis invalidation triggering deferred invalidation
629       // for this SCC.
630       if (auto *OuterProxy =
631               InnerAM->getCachedResult<ModuleAnalysisManagerCGSCCProxy>(C))
632         for (const auto &OuterInvalidationPair :
633              OuterProxy->getOuterInvalidations()) {
634           AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first;
635           const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
636           if (Inv.invalidate(OuterAnalysisID, M, PA)) {
637             if (!InnerPA)
638               InnerPA = PA;
639             for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
640               InnerPA->abandon(InnerAnalysisID);
641           }
642         }
643 
644       // Check if we needed a custom PA set. If so we'll need to run the inner
645       // invalidation.
646       if (InnerPA) {
647         InnerAM->invalidate(C, *InnerPA);
648         continue;
649       }
650 
651       // Otherwise we only need to do invalidation if the original PA set didn't
652       // preserve all SCC analyses.
653       if (!AreSCCAnalysesPreserved)
654         InnerAM->invalidate(C, PA);
655     }
656 
657   // Return false to indicate that this result is still a valid proxy.
658   return false;
659 }
660 
661 template <>
662 CGSCCAnalysisManagerModuleProxy::Result
663 CGSCCAnalysisManagerModuleProxy::run(Module &M, ModuleAnalysisManager &AM) {
664   // Force the Function analysis manager to also be available so that it can
665   // be accessed in an SCC analysis and proxied onward to function passes.
666   // FIXME: It is pretty awkward to just drop the result here and assert that
667   // we can find it again later.
668   (void)AM.getResult<FunctionAnalysisManagerModuleProxy>(M);
669 
670   return Result(*InnerAM, AM.getResult<LazyCallGraphAnalysis>(M));
671 }
672 
673 AnalysisKey FunctionAnalysisManagerCGSCCProxy::Key;
674 
675 FunctionAnalysisManagerCGSCCProxy::Result
676 FunctionAnalysisManagerCGSCCProxy::run(LazyCallGraph::SCC &C,
677                                        CGSCCAnalysisManager &AM,
678                                        LazyCallGraph &CG) {
679   // Note: unconditionally getting checking that the proxy exists may get it at
680   // this point. There are cases when this is being run unnecessarily, but
681   // it is cheap and having the assertion in place is more valuable.
682   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C, CG);
683   Module &M = *C.begin()->getFunction().getParent();
684   bool ProxyExists =
685       MAMProxy.cachedResultExists<FunctionAnalysisManagerModuleProxy>(M);
686   assert(ProxyExists &&
687          "The CGSCC pass manager requires that the FAM module proxy is run "
688          "on the module prior to entering the CGSCC walk");
689   (void)ProxyExists;
690 
691   // We just return an empty result. The caller will use the updateFAM interface
692   // to correctly register the relevant FunctionAnalysisManager based on the
693   // context in which this proxy is run.
694   return Result();
695 }
696 
697 bool FunctionAnalysisManagerCGSCCProxy::Result::invalidate(
698     LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
699     CGSCCAnalysisManager::Invalidator &Inv) {
700   // If literally everything is preserved, we're done.
701   if (PA.areAllPreserved())
702     return false; // This is still a valid proxy.
703 
704   // All updates to preserve valid results are done below, so we don't need to
705   // invalidate this proxy.
706   //
707   // Note that in order to preserve this proxy, a module pass must ensure that
708   // the FAM has been completely updated to handle the deletion of functions.
709   // Specifically, any FAM-cached results for those functions need to have been
710   // forcibly cleared. When preserved, this proxy will only invalidate results
711   // cached on functions *still in the module* at the end of the module pass.
712   auto PAC = PA.getChecker<FunctionAnalysisManagerCGSCCProxy>();
713   if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<LazyCallGraph::SCC>>()) {
714     for (LazyCallGraph::Node &N : C)
715       FAM->invalidate(N.getFunction(), PA);
716 
717     return false;
718   }
719 
720   // Directly check if the relevant set is preserved.
721   bool AreFunctionAnalysesPreserved =
722       PA.allAnalysesInSetPreserved<AllAnalysesOn<Function>>();
723 
724   // Now walk all the functions to see if any inner analysis invalidation is
725   // necessary.
726   for (LazyCallGraph::Node &N : C) {
727     Function &F = N.getFunction();
728     Optional<PreservedAnalyses> FunctionPA;
729 
730     // Check to see whether the preserved set needs to be pruned based on
731     // SCC-level analysis invalidation that triggers deferred invalidation
732     // registered with the outer analysis manager proxy for this function.
733     if (auto *OuterProxy =
734             FAM->getCachedResult<CGSCCAnalysisManagerFunctionProxy>(F))
735       for (const auto &OuterInvalidationPair :
736            OuterProxy->getOuterInvalidations()) {
737         AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first;
738         const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
739         if (Inv.invalidate(OuterAnalysisID, C, PA)) {
740           if (!FunctionPA)
741             FunctionPA = PA;
742           for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
743             FunctionPA->abandon(InnerAnalysisID);
744         }
745       }
746 
747     // Check if we needed a custom PA set, and if so we'll need to run the
748     // inner invalidation.
749     if (FunctionPA) {
750       FAM->invalidate(F, *FunctionPA);
751       continue;
752     }
753 
754     // Otherwise we only need to do invalidation if the original PA set didn't
755     // preserve all function analyses.
756     if (!AreFunctionAnalysesPreserved)
757       FAM->invalidate(F, PA);
758   }
759 
760   // Return false to indicate that this result is still a valid proxy.
761   return false;
762 }
763 
764 } // end namespace llvm
765 
766 /// When a new SCC is created for the graph we first update the
767 /// FunctionAnalysisManager in the Proxy's result.
768 /// As there might be function analysis results cached for the functions now in
769 /// that SCC, two forms of  updates are required.
770 ///
771 /// First, a proxy from the SCC to the FunctionAnalysisManager needs to be
772 /// created so that any subsequent invalidation events to the SCC are
773 /// propagated to the function analysis results cached for functions within it.
774 ///
775 /// Second, if any of the functions within the SCC have analysis results with
776 /// outer analysis dependencies, then those dependencies would point to the
777 /// *wrong* SCC's analysis result. We forcibly invalidate the necessary
778 /// function analyses so that they don't retain stale handles.
779 static void updateNewSCCFunctionAnalyses(LazyCallGraph::SCC &C,
780                                          LazyCallGraph &G,
781                                          CGSCCAnalysisManager &AM,
782                                          FunctionAnalysisManager &FAM) {
783   AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, G).updateFAM(FAM);
784 
785   // Now walk the functions in this SCC and invalidate any function analysis
786   // results that might have outer dependencies on an SCC analysis.
787   for (LazyCallGraph::Node &N : C) {
788     Function &F = N.getFunction();
789 
790     auto *OuterProxy =
791         FAM.getCachedResult<CGSCCAnalysisManagerFunctionProxy>(F);
792     if (!OuterProxy)
793       // No outer analyses were queried, nothing to do.
794       continue;
795 
796     // Forcibly abandon all the inner analyses with dependencies, but
797     // invalidate nothing else.
798     auto PA = PreservedAnalyses::all();
799     for (const auto &OuterInvalidationPair :
800          OuterProxy->getOuterInvalidations()) {
801       const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
802       for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
803         PA.abandon(InnerAnalysisID);
804     }
805 
806     // Now invalidate anything we found.
807     FAM.invalidate(F, PA);
808   }
809 }
810 
811 /// Helper function to update both the \c CGSCCAnalysisManager \p AM and the \c
812 /// CGSCCPassManager's \c CGSCCUpdateResult \p UR based on a range of newly
813 /// added SCCs.
814 ///
815 /// The range of new SCCs must be in postorder already. The SCC they were split
816 /// out of must be provided as \p C. The current node being mutated and
817 /// triggering updates must be passed as \p N.
818 ///
819 /// This function returns the SCC containing \p N. This will be either \p C if
820 /// no new SCCs have been split out, or it will be the new SCC containing \p N.
821 template <typename SCCRangeT>
822 static LazyCallGraph::SCC *
823 incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
824                        LazyCallGraph::Node &N, LazyCallGraph::SCC *C,
825                        CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR) {
826   using SCC = LazyCallGraph::SCC;
827 
828   if (NewSCCRange.empty())
829     return C;
830 
831   // Add the current SCC to the worklist as its shape has changed.
832   UR.CWorklist.insert(C);
833   LLVM_DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist:" << *C
834                     << "\n");
835 
836   SCC *OldC = C;
837 
838   // Update the current SCC. Note that if we have new SCCs, this must actually
839   // change the SCC.
840   assert(C != &*NewSCCRange.begin() &&
841          "Cannot insert new SCCs without changing current SCC!");
842   C = &*NewSCCRange.begin();
843   assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
844 
845   // If we had a cached FAM proxy originally, we will want to create more of
846   // them for each SCC that was split off.
847   FunctionAnalysisManager *FAM = nullptr;
848   if (auto *FAMProxy =
849           AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(*OldC))
850     FAM = &FAMProxy->getManager();
851 
852   // We need to propagate an invalidation call to all but the newly current SCC
853   // because the outer pass manager won't do that for us after splitting them.
854   // FIXME: We should accept a PreservedAnalysis from the CG updater so that if
855   // there are preserved analysis we can avoid invalidating them here for
856   // split-off SCCs.
857   // We know however that this will preserve any FAM proxy so go ahead and mark
858   // that.
859   auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
860   PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
861   AM.invalidate(*OldC, PA);
862 
863   // Ensure the now-current SCC's function analyses are updated.
864   if (FAM)
865     updateNewSCCFunctionAnalyses(*C, G, AM, *FAM);
866 
867   for (SCC &NewC : llvm::reverse(llvm::drop_begin(NewSCCRange))) {
868     assert(C != &NewC && "No need to re-visit the current SCC!");
869     assert(OldC != &NewC && "Already handled the original SCC!");
870     UR.CWorklist.insert(&NewC);
871     LLVM_DEBUG(dbgs() << "Enqueuing a newly formed SCC:" << NewC << "\n");
872 
873     // Ensure new SCCs' function analyses are updated.
874     if (FAM)
875       updateNewSCCFunctionAnalyses(NewC, G, AM, *FAM);
876 
877     // Also propagate a normal invalidation to the new SCC as only the current
878     // will get one from the pass manager infrastructure.
879     AM.invalidate(NewC, PA);
880   }
881   return C;
882 }
883 
884 static LazyCallGraph::SCC &updateCGAndAnalysisManagerForPass(
885     LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
886     CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
887     FunctionAnalysisManager &FAM, bool FunctionPass) {
888   using Node = LazyCallGraph::Node;
889   using Edge = LazyCallGraph::Edge;
890   using SCC = LazyCallGraph::SCC;
891   using RefSCC = LazyCallGraph::RefSCC;
892 
893   RefSCC &InitialRC = InitialC.getOuterRefSCC();
894   SCC *C = &InitialC;
895   RefSCC *RC = &InitialRC;
896   Function &F = N.getFunction();
897 
898   // Walk the function body and build up the set of retained, promoted, and
899   // demoted edges.
900   SmallVector<Constant *, 16> Worklist;
901   SmallPtrSet<Constant *, 16> Visited;
902   SmallPtrSet<Node *, 16> RetainedEdges;
903   SmallSetVector<Node *, 4> PromotedRefTargets;
904   SmallSetVector<Node *, 4> DemotedCallTargets;
905   SmallSetVector<Node *, 4> NewCallEdges;
906   SmallSetVector<Node *, 4> NewRefEdges;
907 
908   // First walk the function and handle all called functions. We do this first
909   // because if there is a single call edge, whether there are ref edges is
910   // irrelevant.
911   for (Instruction &I : instructions(F)) {
912     if (auto *CB = dyn_cast<CallBase>(&I)) {
913       if (Function *Callee = CB->getCalledFunction()) {
914         if (Visited.insert(Callee).second && !Callee->isDeclaration()) {
915           Node *CalleeN = G.lookup(*Callee);
916           assert(CalleeN &&
917                  "Visited function should already have an associated node");
918           Edge *E = N->lookup(*CalleeN);
919           assert((E || !FunctionPass) &&
920                  "No function transformations should introduce *new* "
921                  "call edges! Any new calls should be modeled as "
922                  "promoted existing ref edges!");
923           bool Inserted = RetainedEdges.insert(CalleeN).second;
924           (void)Inserted;
925           assert(Inserted && "We should never visit a function twice.");
926           if (!E)
927             NewCallEdges.insert(CalleeN);
928           else if (!E->isCall())
929             PromotedRefTargets.insert(CalleeN);
930         }
931       } else {
932         // We can miss devirtualization if an indirect call is created then
933         // promoted before updateCGAndAnalysisManagerForPass runs.
934         auto *Entry = UR.IndirectVHs.find(CB);
935         if (Entry == UR.IndirectVHs.end())
936           UR.IndirectVHs.insert({CB, WeakTrackingVH(CB)});
937         else if (!Entry->second)
938           Entry->second = WeakTrackingVH(CB);
939       }
940     }
941   }
942 
943   // Now walk all references.
944   for (Instruction &I : instructions(F))
945     for (Value *Op : I.operand_values())
946       if (auto *OpC = dyn_cast<Constant>(Op))
947         if (Visited.insert(OpC).second)
948           Worklist.push_back(OpC);
949 
950   auto VisitRef = [&](Function &Referee) {
951     Node *RefereeN = G.lookup(Referee);
952     assert(RefereeN &&
953            "Visited function should already have an associated node");
954     Edge *E = N->lookup(*RefereeN);
955     assert((E || !FunctionPass) &&
956            "No function transformations should introduce *new* ref "
957            "edges! Any new ref edges would require IPO which "
958            "function passes aren't allowed to do!");
959     bool Inserted = RetainedEdges.insert(RefereeN).second;
960     (void)Inserted;
961     assert(Inserted && "We should never visit a function twice.");
962     if (!E)
963       NewRefEdges.insert(RefereeN);
964     else if (E->isCall())
965       DemotedCallTargets.insert(RefereeN);
966   };
967   LazyCallGraph::visitReferences(Worklist, Visited, VisitRef);
968 
969   // Handle new ref edges.
970   for (Node *RefTarget : NewRefEdges) {
971     SCC &TargetC = *G.lookupSCC(*RefTarget);
972     RefSCC &TargetRC = TargetC.getOuterRefSCC();
973     (void)TargetRC;
974     // TODO: This only allows trivial edges to be added for now.
975 #ifdef EXPENSIVE_CHECKS
976     assert((RC == &TargetRC ||
977            RC->isAncestorOf(TargetRC)) && "New ref edge is not trivial!");
978 #endif
979     RC->insertTrivialRefEdge(N, *RefTarget);
980   }
981 
982   // Handle new call edges.
983   for (Node *CallTarget : NewCallEdges) {
984     SCC &TargetC = *G.lookupSCC(*CallTarget);
985     RefSCC &TargetRC = TargetC.getOuterRefSCC();
986     (void)TargetRC;
987     // TODO: This only allows trivial edges to be added for now.
988 #ifdef EXPENSIVE_CHECKS
989     assert((RC == &TargetRC ||
990            RC->isAncestorOf(TargetRC)) && "New call edge is not trivial!");
991 #endif
992     // Add a trivial ref edge to be promoted later on alongside
993     // PromotedRefTargets.
994     RC->insertTrivialRefEdge(N, *CallTarget);
995   }
996 
997   // Include synthetic reference edges to known, defined lib functions.
998   for (auto *LibFn : G.getLibFunctions())
999     // While the list of lib functions doesn't have repeats, don't re-visit
1000     // anything handled above.
1001     if (!Visited.count(LibFn))
1002       VisitRef(*LibFn);
1003 
1004   // First remove all of the edges that are no longer present in this function.
1005   // The first step makes these edges uniformly ref edges and accumulates them
1006   // into a separate data structure so removal doesn't invalidate anything.
1007   SmallVector<Node *, 4> DeadTargets;
1008   for (Edge &E : *N) {
1009     if (RetainedEdges.count(&E.getNode()))
1010       continue;
1011 
1012     SCC &TargetC = *G.lookupSCC(E.getNode());
1013     RefSCC &TargetRC = TargetC.getOuterRefSCC();
1014     if (&TargetRC == RC && E.isCall()) {
1015       if (C != &TargetC) {
1016         // For separate SCCs this is trivial.
1017         RC->switchTrivialInternalEdgeToRef(N, E.getNode());
1018       } else {
1019         // Now update the call graph.
1020         C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, E.getNode()),
1021                                    G, N, C, AM, UR);
1022       }
1023     }
1024 
1025     // Now that this is ready for actual removal, put it into our list.
1026     DeadTargets.push_back(&E.getNode());
1027   }
1028   // Remove the easy cases quickly and actually pull them out of our list.
1029   llvm::erase_if(DeadTargets, [&](Node *TargetN) {
1030     SCC &TargetC = *G.lookupSCC(*TargetN);
1031     RefSCC &TargetRC = TargetC.getOuterRefSCC();
1032 
1033     // We can't trivially remove internal targets, so skip
1034     // those.
1035     if (&TargetRC == RC)
1036       return false;
1037 
1038     LLVM_DEBUG(dbgs() << "Deleting outgoing edge from '" << N << "' to '"
1039                       << *TargetN << "'\n");
1040     RC->removeOutgoingEdge(N, *TargetN);
1041     return true;
1042   });
1043 
1044   // Now do a batch removal of the internal ref edges left.
1045   auto NewRefSCCs = RC->removeInternalRefEdge(N, DeadTargets);
1046   if (!NewRefSCCs.empty()) {
1047     // The old RefSCC is dead, mark it as such.
1048     UR.InvalidatedRefSCCs.insert(RC);
1049 
1050     // Note that we don't bother to invalidate analyses as ref-edge
1051     // connectivity is not really observable in any way and is intended
1052     // exclusively to be used for ordering of transforms rather than for
1053     // analysis conclusions.
1054 
1055     // Update RC to the "bottom".
1056     assert(G.lookupSCC(N) == C && "Changed the SCC when splitting RefSCCs!");
1057     RC = &C->getOuterRefSCC();
1058     assert(G.lookupRefSCC(N) == RC && "Failed to update current RefSCC!");
1059 
1060     // The RC worklist is in reverse postorder, so we enqueue the new ones in
1061     // RPO except for the one which contains the source node as that is the
1062     // "bottom" we will continue processing in the bottom-up walk.
1063     assert(NewRefSCCs.front() == RC &&
1064            "New current RefSCC not first in the returned list!");
1065     for (RefSCC *NewRC : llvm::reverse(llvm::drop_begin(NewRefSCCs))) {
1066       assert(NewRC != RC && "Should not encounter the current RefSCC further "
1067                             "in the postorder list of new RefSCCs.");
1068       UR.RCWorklist.insert(NewRC);
1069       LLVM_DEBUG(dbgs() << "Enqueuing a new RefSCC in the update worklist: "
1070                         << *NewRC << "\n");
1071     }
1072   }
1073 
1074   // Next demote all the call edges that are now ref edges. This helps make
1075   // the SCCs small which should minimize the work below as we don't want to
1076   // form cycles that this would break.
1077   for (Node *RefTarget : DemotedCallTargets) {
1078     SCC &TargetC = *G.lookupSCC(*RefTarget);
1079     RefSCC &TargetRC = TargetC.getOuterRefSCC();
1080 
1081     // The easy case is when the target RefSCC is not this RefSCC. This is
1082     // only supported when the target RefSCC is a child of this RefSCC.
1083     if (&TargetRC != RC) {
1084 #ifdef EXPENSIVE_CHECKS
1085       assert(RC->isAncestorOf(TargetRC) &&
1086              "Cannot potentially form RefSCC cycles here!");
1087 #endif
1088       RC->switchOutgoingEdgeToRef(N, *RefTarget);
1089       LLVM_DEBUG(dbgs() << "Switch outgoing call edge to a ref edge from '" << N
1090                         << "' to '" << *RefTarget << "'\n");
1091       continue;
1092     }
1093 
1094     // We are switching an internal call edge to a ref edge. This may split up
1095     // some SCCs.
1096     if (C != &TargetC) {
1097       // For separate SCCs this is trivial.
1098       RC->switchTrivialInternalEdgeToRef(N, *RefTarget);
1099       continue;
1100     }
1101 
1102     // Now update the call graph.
1103     C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, *RefTarget), G, N,
1104                                C, AM, UR);
1105   }
1106 
1107   // We added a ref edge earlier for new call edges, promote those to call edges
1108   // alongside PromotedRefTargets.
1109   for (Node *E : NewCallEdges)
1110     PromotedRefTargets.insert(E);
1111 
1112   // Now promote ref edges into call edges.
1113   for (Node *CallTarget : PromotedRefTargets) {
1114     SCC &TargetC = *G.lookupSCC(*CallTarget);
1115     RefSCC &TargetRC = TargetC.getOuterRefSCC();
1116 
1117     // The easy case is when the target RefSCC is not this RefSCC. This is
1118     // only supported when the target RefSCC is a child of this RefSCC.
1119     if (&TargetRC != RC) {
1120 #ifdef EXPENSIVE_CHECKS
1121       assert(RC->isAncestorOf(TargetRC) &&
1122              "Cannot potentially form RefSCC cycles here!");
1123 #endif
1124       RC->switchOutgoingEdgeToCall(N, *CallTarget);
1125       LLVM_DEBUG(dbgs() << "Switch outgoing ref edge to a call edge from '" << N
1126                         << "' to '" << *CallTarget << "'\n");
1127       continue;
1128     }
1129     LLVM_DEBUG(dbgs() << "Switch an internal ref edge to a call edge from '"
1130                       << N << "' to '" << *CallTarget << "'\n");
1131 
1132     // Otherwise we are switching an internal ref edge to a call edge. This
1133     // may merge away some SCCs, and we add those to the UpdateResult. We also
1134     // need to make sure to update the worklist in the event SCCs have moved
1135     // before the current one in the post-order sequence
1136     bool HasFunctionAnalysisProxy = false;
1137     auto InitialSCCIndex = RC->find(*C) - RC->begin();
1138     bool FormedCycle = RC->switchInternalEdgeToCall(
1139         N, *CallTarget, [&](ArrayRef<SCC *> MergedSCCs) {
1140           for (SCC *MergedC : MergedSCCs) {
1141             assert(MergedC != &TargetC && "Cannot merge away the target SCC!");
1142 
1143             HasFunctionAnalysisProxy |=
1144                 AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(
1145                     *MergedC) != nullptr;
1146 
1147             // Mark that this SCC will no longer be valid.
1148             UR.InvalidatedSCCs.insert(MergedC);
1149 
1150             // FIXME: We should really do a 'clear' here to forcibly release
1151             // memory, but we don't have a good way of doing that and
1152             // preserving the function analyses.
1153             auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
1154             PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
1155             AM.invalidate(*MergedC, PA);
1156           }
1157         });
1158 
1159     // If we formed a cycle by creating this call, we need to update more data
1160     // structures.
1161     if (FormedCycle) {
1162       C = &TargetC;
1163       assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
1164 
1165       // If one of the invalidated SCCs had a cached proxy to a function
1166       // analysis manager, we need to create a proxy in the new current SCC as
1167       // the invalidated SCCs had their functions moved.
1168       if (HasFunctionAnalysisProxy)
1169         AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G).updateFAM(FAM);
1170 
1171       // Any analyses cached for this SCC are no longer precise as the shape
1172       // has changed by introducing this cycle. However, we have taken care to
1173       // update the proxies so it remains valide.
1174       auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>();
1175       PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
1176       AM.invalidate(*C, PA);
1177     }
1178     auto NewSCCIndex = RC->find(*C) - RC->begin();
1179     // If we have actually moved an SCC to be topologically "below" the current
1180     // one due to merging, we will need to revisit the current SCC after
1181     // visiting those moved SCCs.
1182     //
1183     // It is critical that we *do not* revisit the current SCC unless we
1184     // actually move SCCs in the process of merging because otherwise we may
1185     // form a cycle where an SCC is split apart, merged, split, merged and so
1186     // on infinitely.
1187     if (InitialSCCIndex < NewSCCIndex) {
1188       // Put our current SCC back onto the worklist as we'll visit other SCCs
1189       // that are now definitively ordered prior to the current one in the
1190       // post-order sequence, and may end up observing more precise context to
1191       // optimize the current SCC.
1192       UR.CWorklist.insert(C);
1193       LLVM_DEBUG(dbgs() << "Enqueuing the existing SCC in the worklist: " << *C
1194                         << "\n");
1195       // Enqueue in reverse order as we pop off the back of the worklist.
1196       for (SCC &MovedC : llvm::reverse(make_range(RC->begin() + InitialSCCIndex,
1197                                                   RC->begin() + NewSCCIndex))) {
1198         UR.CWorklist.insert(&MovedC);
1199         LLVM_DEBUG(dbgs() << "Enqueuing a newly earlier in post-order SCC: "
1200                           << MovedC << "\n");
1201       }
1202     }
1203   }
1204 
1205   assert(!UR.InvalidatedSCCs.count(C) && "Invalidated the current SCC!");
1206   assert(!UR.InvalidatedRefSCCs.count(RC) && "Invalidated the current RefSCC!");
1207   assert(&C->getOuterRefSCC() == RC && "Current SCC not in current RefSCC!");
1208 
1209   // Record the current RefSCC and SCC for higher layers of the CGSCC pass
1210   // manager now that all the updates have been applied.
1211   if (RC != &InitialRC)
1212     UR.UpdatedRC = RC;
1213   if (C != &InitialC)
1214     UR.UpdatedC = C;
1215 
1216   return *C;
1217 }
1218 
1219 LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
1220     LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
1221     CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1222     FunctionAnalysisManager &FAM) {
1223   return updateCGAndAnalysisManagerForPass(G, InitialC, N, AM, UR, FAM,
1224                                            /* FunctionPass */ true);
1225 }
1226 LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForCGSCCPass(
1227     LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
1228     CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1229     FunctionAnalysisManager &FAM) {
1230   return updateCGAndAnalysisManagerForPass(G, InitialC, N, AM, UR, FAM,
1231                                            /* FunctionPass */ false);
1232 }
1233