1 //===- CGSCCPassManager.cpp - Managing & running CGSCC passes -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "llvm/Analysis/CGSCCPassManager.h"
11 #include "llvm/IR/CallSite.h"
12 #include "llvm/IR/InstIterator.h"
13 
14 using namespace llvm;
15 
16 // Explicit template instantiations and specialization defininitions for core
17 // template typedefs.
18 namespace llvm {
19 
20 // Explicit instantiations for the core proxy templates.
21 template class AllAnalysesOn<LazyCallGraph::SCC>;
22 template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
23 template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
24                            LazyCallGraph &, CGSCCUpdateResult &>;
25 template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>;
26 template class OuterAnalysisManagerProxy<ModuleAnalysisManager,
27                                          LazyCallGraph::SCC, LazyCallGraph &>;
28 template class InnerAnalysisManagerProxy<FunctionAnalysisManager,
29                                          LazyCallGraph::SCC, LazyCallGraph &>;
30 template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>;
31 
32 /// Explicitly specialize the pass manager run method to handle call graph
33 /// updates.
34 template <>
35 PreservedAnalyses
36 PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
37             CGSCCUpdateResult &>::run(LazyCallGraph::SCC &InitialC,
38                                       CGSCCAnalysisManager &AM,
39                                       LazyCallGraph &G, CGSCCUpdateResult &UR) {
40   PreservedAnalyses PA = PreservedAnalyses::all();
41 
42   if (DebugLogging)
43     dbgs() << "Starting CGSCC pass manager run.\n";
44 
45   // The SCC may be refined while we are running passes over it, so set up
46   // a pointer that we can update.
47   LazyCallGraph::SCC *C = &InitialC;
48 
49   for (auto &Pass : Passes) {
50     if (DebugLogging)
51       dbgs() << "Running pass: " << Pass->name() << " on " << *C << "\n";
52 
53     PreservedAnalyses PassPA = Pass->run(*C, AM, G, UR);
54 
55     // Update the SCC if necessary.
56     C = UR.UpdatedC ? UR.UpdatedC : C;
57 
58     // Check that we didn't miss any update scenario.
59     assert(!UR.InvalidatedSCCs.count(C) && "Processing an invalid SCC!");
60     assert(C->begin() != C->end() && "Cannot have an empty SCC!");
61 
62     // Update the analysis manager as each pass runs and potentially
63     // invalidates analyses.
64     AM.invalidate(*C, PassPA);
65 
66     // Finally, we intersect the final preserved analyses to compute the
67     // aggregate preserved set for this pass manager.
68     PA.intersect(std::move(PassPA));
69 
70     // FIXME: Historically, the pass managers all called the LLVM context's
71     // yield function here. We don't have a generic way to acquire the
72     // context and it isn't yet clear what the right pattern is for yielding
73     // in the new pass manager so it is currently omitted.
74     // ...getContext().yield();
75   }
76 
77   // Invaliadtion was handled after each pass in the above loop for the current
78   // SCC. Therefore, the remaining analysis results in the AnalysisManager are
79   // preserved. We mark this with a set so that we don't need to inspect each
80   // one individually.
81   PA.preserve<AllAnalysesOn<LazyCallGraph::SCC>>();
82 
83   if (DebugLogging)
84     dbgs() << "Finished CGSCC pass manager run.\n";
85 
86   return PA;
87 }
88 
89 bool CGSCCAnalysisManagerModuleProxy::Result::invalidate(
90     Module &M, const PreservedAnalyses &PA,
91     ModuleAnalysisManager::Invalidator &Inv) {
92   // If this proxy or the call graph is going to be invalidated, we also need
93   // to clear all the keys coming from that analysis.
94   //
95   // We also directly invalidate the FAM's module proxy if necessary, and if
96   // that proxy isn't preserved we can't preserve this proxy either. We rely on
97   // it to handle module -> function analysis invalidation in the face of
98   // structural changes and so if it's unavailable we conservatively clear the
99   // entire SCC layer as well rather than trying to do invaliadtion ourselves.
100   if (!PA.preserved<CGSCCAnalysisManagerModuleProxy>() ||
101       Inv.invalidate<LazyCallGraphAnalysis>(M, PA) ||
102       Inv.invalidate<FunctionAnalysisManagerModuleProxy>(M, PA)) {
103     InnerAM->clear();
104 
105     // And the proxy itself should be marked as invalid so that we can observe
106     // the new call graph. This isn't strictly necessary because we cheat
107     // above, but is still useful.
108     return true;
109   }
110 
111   // Ok, we have a graph, so we can propagate the invalidation down into it.
112   for (auto &RC : G->postorder_ref_sccs())
113     for (auto &C : RC)
114       InnerAM->invalidate(C, PA);
115 
116   // Return false to indicate that this result is still a valid proxy.
117   return false;
118 }
119 
120 template <>
121 CGSCCAnalysisManagerModuleProxy::Result
122 CGSCCAnalysisManagerModuleProxy::run(Module &M, ModuleAnalysisManager &AM) {
123   // Force the Function analysis manager to also be available so that it can
124   // be accessed in an SCC analysis and proxied onward to function passes.
125   // FIXME: It is pretty awkward to just drop the result here and assert that
126   // we can find it again later.
127   (void)AM.getResult<FunctionAnalysisManagerModuleProxy>(M);
128 
129   return Result(*InnerAM, AM.getResult<LazyCallGraphAnalysis>(M));
130 }
131 
132 AnalysisKey FunctionAnalysisManagerCGSCCProxy::Key;
133 
134 FunctionAnalysisManagerCGSCCProxy::Result
135 FunctionAnalysisManagerCGSCCProxy::run(LazyCallGraph::SCC &C,
136                                        CGSCCAnalysisManager &AM,
137                                        LazyCallGraph &CG) {
138   // Collect the FunctionAnalysisManager from the Module layer and use that to
139   // build the proxy result.
140   //
141   // This allows us to rely on the FunctionAnalysisMangaerModuleProxy to
142   // invalidate the function analyses.
143   auto &MAM = AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C, CG).getManager();
144   Module &M = *C.begin()->getFunction().getParent();
145   auto *FAMProxy = MAM.getCachedResult<FunctionAnalysisManagerModuleProxy>(M);
146   assert(FAMProxy && "The CGSCC pass manager requires that the FAM module "
147                      "proxy is run on the module prior to entering the CGSCC "
148                      "walk.");
149 
150   // Note that we special-case invalidation handling of this proxy in the CGSCC
151   // analysis manager's Module proxy. This avoids the need to do anything
152   // special here to recompute all of this if ever the FAM's module proxy goes
153   // away.
154   return Result(FAMProxy->getManager());
155 }
156 
157 bool FunctionAnalysisManagerCGSCCProxy::Result::invalidate(
158     LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
159     CGSCCAnalysisManager::Invalidator &Inv) {
160   for (LazyCallGraph::Node &N : C)
161     FAM->invalidate(N.getFunction(), PA);
162 
163   // This proxy doesn't need to handle invalidation itself. Instead, the
164   // module-level CGSCC proxy handles it above by ensuring that if the
165   // module-level FAM proxy becomes invalid the entire SCC layer, which
166   // includes this proxy, is cleared.
167   return false;
168 }
169 
170 } // End llvm namespace
171 
172 namespace {
173 /// Helper function to update both the \c CGSCCAnalysisManager \p AM and the \c
174 /// CGSCCPassManager's \c CGSCCUpdateResult \p UR based on a range of newly
175 /// added SCCs.
176 ///
177 /// The range of new SCCs must be in postorder already. The SCC they were split
178 /// out of must be provided as \p C. The current node being mutated and
179 /// triggering updates must be passed as \p N.
180 ///
181 /// This function returns the SCC containing \p N. This will be either \p C if
182 /// no new SCCs have been split out, or it will be the new SCC containing \p N.
183 template <typename SCCRangeT>
184 LazyCallGraph::SCC *
185 incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G,
186                        LazyCallGraph::Node &N, LazyCallGraph::SCC *C,
187                        CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
188                        bool DebugLogging = false) {
189   typedef LazyCallGraph::SCC SCC;
190 
191   if (NewSCCRange.begin() == NewSCCRange.end())
192     return C;
193 
194   // Invalidate the analyses of the current SCC and add it to the worklist since
195   // it has changed its shape.
196   AM.invalidate(*C, PreservedAnalyses::none());
197   UR.CWorklist.insert(C);
198   if (DebugLogging)
199     dbgs() << "Enqueuing the existing SCC in the worklist:" << *C << "\n";
200 
201   SCC *OldC = C;
202   (void)OldC;
203 
204   // Update the current SCC. Note that if we have new SCCs, this must actually
205   // change the SCC.
206   assert(C != &*NewSCCRange.begin() &&
207          "Cannot insert new SCCs without changing current SCC!");
208   C = &*NewSCCRange.begin();
209   assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
210 
211   for (SCC &NewC :
212        reverse(make_range(std::next(NewSCCRange.begin()), NewSCCRange.end()))) {
213     assert(C != &NewC && "No need to re-visit the current SCC!");
214     assert(OldC != &NewC && "Already handled the original SCC!");
215     UR.CWorklist.insert(&NewC);
216     if (DebugLogging)
217       dbgs() << "Enqueuing a newly formed SCC:" << NewC << "\n";
218   }
219   return C;
220 }
221 }
222 
223 LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
224     LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N,
225     CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, bool DebugLogging) {
226   typedef LazyCallGraph::Node Node;
227   typedef LazyCallGraph::Edge Edge;
228   typedef LazyCallGraph::SCC SCC;
229   typedef LazyCallGraph::RefSCC RefSCC;
230 
231   RefSCC &InitialRC = InitialC.getOuterRefSCC();
232   SCC *C = &InitialC;
233   RefSCC *RC = &InitialRC;
234   Function &F = N.getFunction();
235 
236   // Walk the function body and build up the set of retained, promoted, and
237   // demoted edges.
238   SmallVector<Constant *, 16> Worklist;
239   SmallPtrSet<Constant *, 16> Visited;
240   SmallPtrSet<Function *, 16> RetainedEdges;
241   SmallSetVector<Function *, 4> PromotedRefTargets;
242   SmallSetVector<Function *, 4> DemotedCallTargets;
243 
244   // First walk the function and handle all called functions. We do this first
245   // because if there is a single call edge, whether there are ref edges is
246   // irrelevant.
247   for (Instruction &I : instructions(F))
248     if (auto CS = CallSite(&I))
249       if (Function *Callee = CS.getCalledFunction())
250         if (Visited.insert(Callee).second && !Callee->isDeclaration()) {
251           const Edge *E = N.lookup(*Callee);
252           // FIXME: We should really handle adding new calls. While it will
253           // make downstream usage more complex, there is no fundamental
254           // limitation and it will allow passes within the CGSCC to be a bit
255           // more flexible in what transforms they can do. Until then, we
256           // verify that new calls haven't been introduced.
257           assert(E && "No function transformations should introduce *new* "
258                       "call edges! Any new calls should be modeled as "
259                       "promoted existing ref edges!");
260           RetainedEdges.insert(Callee);
261           if (!E->isCall())
262             PromotedRefTargets.insert(Callee);
263         }
264 
265   // Now walk all references.
266   for (Instruction &I : instructions(F))
267     for (Value *Op : I.operand_values())
268       if (Constant *C = dyn_cast<Constant>(Op))
269         if (Visited.insert(C).second)
270           Worklist.push_back(C);
271 
272   LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &Referee) {
273     const Edge *E = N.lookup(Referee);
274     // FIXME: Similarly to new calls, we also currently preclude
275     // introducing new references. See above for details.
276     assert(E && "No function transformations should introduce *new* ref "
277                 "edges! Any new ref edges would require IPO which "
278                 "function passes aren't allowed to do!");
279     RetainedEdges.insert(&Referee);
280     if (E->isCall())
281       DemotedCallTargets.insert(&Referee);
282   });
283 
284   // First remove all of the edges that are no longer present in this function.
285   // We have to build a list of dead targets first and then remove them as the
286   // data structures will all be invalidated by removing them.
287   SmallVector<PointerIntPair<Node *, 1, Edge::Kind>, 4> DeadTargets;
288   for (Edge &E : N)
289     if (!RetainedEdges.count(&E.getFunction()))
290       DeadTargets.push_back({E.getNode(), E.getKind()});
291   for (auto DeadTarget : DeadTargets) {
292     Node &TargetN = *DeadTarget.getPointer();
293     bool IsCall = DeadTarget.getInt() == Edge::Call;
294     SCC &TargetC = *G.lookupSCC(TargetN);
295     RefSCC &TargetRC = TargetC.getOuterRefSCC();
296 
297     if (&TargetRC != RC) {
298       RC->removeOutgoingEdge(N, TargetN);
299       if (DebugLogging)
300         dbgs() << "Deleting outgoing edge from '" << N << "' to '" << TargetN
301                << "'\n";
302       continue;
303     }
304     if (DebugLogging)
305       dbgs() << "Deleting internal " << (IsCall ? "call" : "ref")
306              << " edge from '" << N << "' to '" << TargetN << "'\n";
307 
308     if (IsCall)
309       C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G, N,
310                                  C, AM, UR, DebugLogging);
311 
312     auto NewRefSCCs = RC->removeInternalRefEdge(N, TargetN);
313     if (!NewRefSCCs.empty()) {
314       // Note that we don't bother to invalidate analyses as ref-edge
315       // connectivity is not really observable in any way and is intended
316       // exclusively to be used for ordering of transforms rather than for
317       // analysis conclusions.
318 
319       // The RC worklist is in reverse postorder, so we first enqueue the
320       // current RefSCC as it will remain the parent of all split RefSCCs, then
321       // we enqueue the new ones in RPO except for the one which contains the
322       // source node as that is the "bottom" we will continue processing in the
323       // bottom-up walk.
324       UR.RCWorklist.insert(RC);
325       if (DebugLogging)
326         dbgs() << "Enqueuing the existing RefSCC in the update worklist: "
327                << *RC << "\n";
328       // Update the RC to the "bottom".
329       assert(G.lookupSCC(N) == C && "Changed the SCC when splitting RefSCCs!");
330       RC = &C->getOuterRefSCC();
331       assert(G.lookupRefSCC(N) == RC && "Failed to update current RefSCC!");
332       for (RefSCC *NewRC : reverse(NewRefSCCs))
333         if (NewRC != RC) {
334           UR.RCWorklist.insert(NewRC);
335           if (DebugLogging)
336             dbgs() << "Enqueuing a new RefSCC in the update worklist: "
337                    << *NewRC << "\n";
338         }
339     }
340   }
341 
342   // Next demote all the call edges that are now ref edges. This helps make
343   // the SCCs small which should minimize the work below as we don't want to
344   // form cycles that this would break.
345   for (Function *RefTarget : DemotedCallTargets) {
346     Node &TargetN = *G.lookup(*RefTarget);
347     SCC &TargetC = *G.lookupSCC(TargetN);
348     RefSCC &TargetRC = TargetC.getOuterRefSCC();
349 
350     // The easy case is when the target RefSCC is not this RefSCC. This is
351     // only supported when the target RefSCC is a child of this RefSCC.
352     if (&TargetRC != RC) {
353       assert(RC->isAncestorOf(TargetRC) &&
354              "Cannot potentially form RefSCC cycles here!");
355       RC->switchOutgoingEdgeToRef(N, TargetN);
356       if (DebugLogging)
357         dbgs() << "Switch outgoing call edge to a ref edge from '" << N
358                << "' to '" << TargetN << "'\n";
359       continue;
360     }
361 
362     // Otherwise we are switching an internal call edge to a ref edge. This
363     // may split up some SCCs.
364     C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G, N, C,
365                                AM, UR, DebugLogging);
366   }
367 
368   // Now promote ref edges into call edges.
369   for (Function *CallTarget : PromotedRefTargets) {
370     Node &TargetN = *G.lookup(*CallTarget);
371     SCC &TargetC = *G.lookupSCC(TargetN);
372     RefSCC &TargetRC = TargetC.getOuterRefSCC();
373 
374     // The easy case is when the target RefSCC is not this RefSCC. This is
375     // only supported when the target RefSCC is a child of this RefSCC.
376     if (&TargetRC != RC) {
377       assert(RC->isAncestorOf(TargetRC) &&
378              "Cannot potentially form RefSCC cycles here!");
379       RC->switchOutgoingEdgeToCall(N, TargetN);
380       if (DebugLogging)
381         dbgs() << "Switch outgoing ref edge to a call edge from '" << N
382                << "' to '" << TargetN << "'\n";
383       continue;
384     }
385     if (DebugLogging)
386       dbgs() << "Switch an internal ref edge to a call edge from '" << N
387              << "' to '" << TargetN << "'\n";
388 
389     // Otherwise we are switching an internal ref edge to a call edge. This
390     // may merge away some SCCs, and we add those to the UpdateResult. We also
391     // need to make sure to update the worklist in the event SCCs have moved
392     // before the current one in the post-order sequence.
393     auto InitialSCCIndex = RC->find(*C) - RC->begin();
394     auto InvalidatedSCCs = RC->switchInternalEdgeToCall(N, TargetN);
395     if (!InvalidatedSCCs.empty()) {
396       C = &TargetC;
397       assert(G.lookupSCC(N) == C && "Failed to update current SCC!");
398 
399       // Any analyses cached for this SCC are no longer precise as the shape
400       // has changed by introducing this cycle.
401       AM.invalidate(*C, PreservedAnalyses::none());
402 
403       for (SCC *InvalidatedC : InvalidatedSCCs) {
404         assert(InvalidatedC != C && "Cannot invalidate the current SCC!");
405         UR.InvalidatedSCCs.insert(InvalidatedC);
406 
407         // Also clear any cached analyses for the SCCs that are dead. This
408         // isn't really necessary for correctness but can release memory.
409         AM.clear(*InvalidatedC);
410       }
411     }
412     auto NewSCCIndex = RC->find(*C) - RC->begin();
413     if (InitialSCCIndex < NewSCCIndex) {
414       // Put our current SCC back onto the worklist as we'll visit other SCCs
415       // that are now definitively ordered prior to the current one in the
416       // post-order sequence, and may end up observing more precise context to
417       // optimize the current SCC.
418       UR.CWorklist.insert(C);
419       if (DebugLogging)
420         dbgs() << "Enqueuing the existing SCC in the worklist: " << *C << "\n";
421       // Enqueue in reverse order as we pop off the back of the worklist.
422       for (SCC &MovedC : reverse(make_range(RC->begin() + InitialSCCIndex,
423                                             RC->begin() + NewSCCIndex))) {
424         UR.CWorklist.insert(&MovedC);
425         if (DebugLogging)
426           dbgs() << "Enqueuing a newly earlier in post-order SCC: " << MovedC
427                  << "\n";
428       }
429     }
430   }
431 
432   assert(!UR.InvalidatedSCCs.count(C) && "Invalidated the current SCC!");
433   assert(!UR.InvalidatedRefSCCs.count(RC) && "Invalidated the current RefSCC!");
434   assert(&C->getOuterRefSCC() == RC && "Current SCC not in current RefSCC!");
435 
436   // Record the current RefSCC and SCC for higher layers of the CGSCC pass
437   // manager now that all the updates have been applied.
438   if (RC != &InitialRC)
439     UR.UpdatedRC = RC;
440   if (C != &InitialC)
441     UR.UpdatedC = C;
442 
443   return *C;
444 }
445