1 //===- CGSCCPassManager.cpp - Managing & running CGSCC passes -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "llvm/Analysis/CGSCCPassManager.h" 11 #include "llvm/IR/CallSite.h" 12 #include "llvm/IR/InstIterator.h" 13 14 using namespace llvm; 15 16 // Explicit template instantiations and specialization defininitions for core 17 // template typedefs. 18 namespace llvm { 19 20 // Explicit instantiations for the core proxy templates. 21 template class AllAnalysesOn<LazyCallGraph::SCC>; 22 template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>; 23 template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, 24 LazyCallGraph &, CGSCCUpdateResult &>; 25 template class InnerAnalysisManagerProxy<CGSCCAnalysisManager, Module>; 26 template class OuterAnalysisManagerProxy<ModuleAnalysisManager, 27 LazyCallGraph::SCC, LazyCallGraph &>; 28 template class OuterAnalysisManagerProxy<CGSCCAnalysisManager, Function>; 29 30 /// Explicitly specialize the pass manager run method to handle call graph 31 /// updates. 32 template <> 33 PreservedAnalyses 34 PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &, 35 CGSCCUpdateResult &>::run(LazyCallGraph::SCC &InitialC, 36 CGSCCAnalysisManager &AM, 37 LazyCallGraph &G, CGSCCUpdateResult &UR) { 38 PreservedAnalyses PA = PreservedAnalyses::all(); 39 40 if (DebugLogging) 41 dbgs() << "Starting CGSCC pass manager run.\n"; 42 43 // The SCC may be refined while we are running passes over it, so set up 44 // a pointer that we can update. 45 LazyCallGraph::SCC *C = &InitialC; 46 47 for (auto &Pass : Passes) { 48 if (DebugLogging) 49 dbgs() << "Running pass: " << Pass->name() << " on " << *C << "\n"; 50 51 PreservedAnalyses PassPA = Pass->run(*C, AM, G, UR); 52 53 // Update the SCC if necessary. 54 C = UR.UpdatedC ? UR.UpdatedC : C; 55 56 // Check that we didn't miss any update scenario. 57 assert(!UR.InvalidatedSCCs.count(C) && "Processing an invalid SCC!"); 58 assert(C->begin() != C->end() && "Cannot have an empty SCC!"); 59 60 // Update the analysis manager as each pass runs and potentially 61 // invalidates analyses. 62 AM.invalidate(*C, PassPA); 63 64 // Finally, we intersect the final preserved analyses to compute the 65 // aggregate preserved set for this pass manager. 66 PA.intersect(std::move(PassPA)); 67 68 // FIXME: Historically, the pass managers all called the LLVM context's 69 // yield function here. We don't have a generic way to acquire the 70 // context and it isn't yet clear what the right pattern is for yielding 71 // in the new pass manager so it is currently omitted. 72 // ...getContext().yield(); 73 } 74 75 // Invaliadtion was handled after each pass in the above loop for the current 76 // SCC. Therefore, the remaining analysis results in the AnalysisManager are 77 // preserved. We mark this with a set so that we don't need to inspect each 78 // one individually. 79 PA.preserveSet<AllAnalysesOn<LazyCallGraph::SCC>>(); 80 81 if (DebugLogging) 82 dbgs() << "Finished CGSCC pass manager run.\n"; 83 84 return PA; 85 } 86 87 bool CGSCCAnalysisManagerModuleProxy::Result::invalidate( 88 Module &M, const PreservedAnalyses &PA, 89 ModuleAnalysisManager::Invalidator &Inv) { 90 // If literally everything is preserved, we're done. 91 if (PA.areAllPreserved()) 92 return false; // This is still a valid proxy. 93 94 // If this proxy or the call graph is going to be invalidated, we also need 95 // to clear all the keys coming from that analysis. 96 // 97 // We also directly invalidate the FAM's module proxy if necessary, and if 98 // that proxy isn't preserved we can't preserve this proxy either. We rely on 99 // it to handle module -> function analysis invalidation in the face of 100 // structural changes and so if it's unavailable we conservatively clear the 101 // entire SCC layer as well rather than trying to do invalidation ourselves. 102 auto PAC = PA.getChecker<CGSCCAnalysisManagerModuleProxy>(); 103 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Module>>()) || 104 Inv.invalidate<LazyCallGraphAnalysis>(M, PA) || 105 Inv.invalidate<FunctionAnalysisManagerModuleProxy>(M, PA)) { 106 InnerAM->clear(); 107 108 // And the proxy itself should be marked as invalid so that we can observe 109 // the new call graph. This isn't strictly necessary because we cheat 110 // above, but is still useful. 111 return true; 112 } 113 114 // Directly check if the relevant set is preserved so we can short circuit 115 // invalidating SCCs below. 116 bool AreSCCAnalysesPreserved = 117 PA.allAnalysesInSetPreserved<AllAnalysesOn<LazyCallGraph::SCC>>(); 118 119 // Ok, we have a graph, so we can propagate the invalidation down into it. 120 G->buildRefSCCs(); 121 for (auto &RC : G->postorder_ref_sccs()) 122 for (auto &C : RC) { 123 Optional<PreservedAnalyses> InnerPA; 124 125 // Check to see whether the preserved set needs to be adjusted based on 126 // module-level analysis invalidation triggering deferred invalidation 127 // for this SCC. 128 if (auto *OuterProxy = 129 InnerAM->getCachedResult<ModuleAnalysisManagerCGSCCProxy>(C)) 130 for (const auto &OuterInvalidationPair : 131 OuterProxy->getOuterInvalidations()) { 132 AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first; 133 const auto &InnerAnalysisIDs = OuterInvalidationPair.second; 134 if (Inv.invalidate(OuterAnalysisID, M, PA)) { 135 if (!InnerPA) 136 InnerPA = PA; 137 for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs) 138 InnerPA->abandon(InnerAnalysisID); 139 } 140 } 141 142 // Check if we needed a custom PA set. If so we'll need to run the inner 143 // invalidation. 144 if (InnerPA) { 145 InnerAM->invalidate(C, *InnerPA); 146 continue; 147 } 148 149 // Otherwise we only need to do invalidation if the original PA set didn't 150 // preserve all SCC analyses. 151 if (!AreSCCAnalysesPreserved) 152 InnerAM->invalidate(C, PA); 153 } 154 155 // Return false to indicate that this result is still a valid proxy. 156 return false; 157 } 158 159 template <> 160 CGSCCAnalysisManagerModuleProxy::Result 161 CGSCCAnalysisManagerModuleProxy::run(Module &M, ModuleAnalysisManager &AM) { 162 // Force the Function analysis manager to also be available so that it can 163 // be accessed in an SCC analysis and proxied onward to function passes. 164 // FIXME: It is pretty awkward to just drop the result here and assert that 165 // we can find it again later. 166 (void)AM.getResult<FunctionAnalysisManagerModuleProxy>(M); 167 168 return Result(*InnerAM, AM.getResult<LazyCallGraphAnalysis>(M)); 169 } 170 171 AnalysisKey FunctionAnalysisManagerCGSCCProxy::Key; 172 173 FunctionAnalysisManagerCGSCCProxy::Result 174 FunctionAnalysisManagerCGSCCProxy::run(LazyCallGraph::SCC &C, 175 CGSCCAnalysisManager &AM, 176 LazyCallGraph &CG) { 177 // Collect the FunctionAnalysisManager from the Module layer and use that to 178 // build the proxy result. 179 // 180 // This allows us to rely on the FunctionAnalysisMangaerModuleProxy to 181 // invalidate the function analyses. 182 auto &MAM = AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C, CG).getManager(); 183 Module &M = *C.begin()->getFunction().getParent(); 184 auto *FAMProxy = MAM.getCachedResult<FunctionAnalysisManagerModuleProxy>(M); 185 assert(FAMProxy && "The CGSCC pass manager requires that the FAM module " 186 "proxy is run on the module prior to entering the CGSCC " 187 "walk."); 188 189 // Note that we special-case invalidation handling of this proxy in the CGSCC 190 // analysis manager's Module proxy. This avoids the need to do anything 191 // special here to recompute all of this if ever the FAM's module proxy goes 192 // away. 193 return Result(FAMProxy->getManager()); 194 } 195 196 bool FunctionAnalysisManagerCGSCCProxy::Result::invalidate( 197 LazyCallGraph::SCC &C, const PreservedAnalyses &PA, 198 CGSCCAnalysisManager::Invalidator &Inv) { 199 for (LazyCallGraph::Node &N : C) 200 FAM->invalidate(N.getFunction(), PA); 201 202 // This proxy doesn't need to handle invalidation itself. Instead, the 203 // module-level CGSCC proxy handles it above by ensuring that if the 204 // module-level FAM proxy becomes invalid the entire SCC layer, which 205 // includes this proxy, is cleared. 206 return false; 207 } 208 209 } // End llvm namespace 210 211 namespace { 212 /// Helper function to update both the \c CGSCCAnalysisManager \p AM and the \c 213 /// CGSCCPassManager's \c CGSCCUpdateResult \p UR based on a range of newly 214 /// added SCCs. 215 /// 216 /// The range of new SCCs must be in postorder already. The SCC they were split 217 /// out of must be provided as \p C. The current node being mutated and 218 /// triggering updates must be passed as \p N. 219 /// 220 /// This function returns the SCC containing \p N. This will be either \p C if 221 /// no new SCCs have been split out, or it will be the new SCC containing \p N. 222 template <typename SCCRangeT> 223 LazyCallGraph::SCC * 224 incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G, 225 LazyCallGraph::Node &N, LazyCallGraph::SCC *C, 226 CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, 227 bool DebugLogging = false) { 228 typedef LazyCallGraph::SCC SCC; 229 230 if (NewSCCRange.begin() == NewSCCRange.end()) 231 return C; 232 233 // Add the current SCC to the worklist as its shape has changed. 234 UR.CWorklist.insert(C); 235 if (DebugLogging) 236 dbgs() << "Enqueuing the existing SCC in the worklist:" << *C << "\n"; 237 238 SCC *OldC = C; 239 (void)OldC; 240 241 // Update the current SCC. Note that if we have new SCCs, this must actually 242 // change the SCC. 243 assert(C != &*NewSCCRange.begin() && 244 "Cannot insert new SCCs without changing current SCC!"); 245 C = &*NewSCCRange.begin(); 246 assert(G.lookupSCC(N) == C && "Failed to update current SCC!"); 247 248 for (SCC &NewC : 249 reverse(make_range(std::next(NewSCCRange.begin()), NewSCCRange.end()))) { 250 assert(C != &NewC && "No need to re-visit the current SCC!"); 251 assert(OldC != &NewC && "Already handled the original SCC!"); 252 UR.CWorklist.insert(&NewC); 253 if (DebugLogging) 254 dbgs() << "Enqueuing a newly formed SCC:" << NewC << "\n"; 255 } 256 return C; 257 } 258 } 259 260 LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass( 261 LazyCallGraph &G, LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N, 262 CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, bool DebugLogging) { 263 typedef LazyCallGraph::Node Node; 264 typedef LazyCallGraph::Edge Edge; 265 typedef LazyCallGraph::SCC SCC; 266 typedef LazyCallGraph::RefSCC RefSCC; 267 268 RefSCC &InitialRC = InitialC.getOuterRefSCC(); 269 SCC *C = &InitialC; 270 RefSCC *RC = &InitialRC; 271 Function &F = N.getFunction(); 272 273 // Walk the function body and build up the set of retained, promoted, and 274 // demoted edges. 275 SmallVector<Constant *, 16> Worklist; 276 SmallPtrSet<Constant *, 16> Visited; 277 SmallPtrSet<Node *, 16> RetainedEdges; 278 SmallSetVector<Node *, 4> PromotedRefTargets; 279 SmallSetVector<Node *, 4> DemotedCallTargets; 280 281 // First walk the function and handle all called functions. We do this first 282 // because if there is a single call edge, whether there are ref edges is 283 // irrelevant. 284 for (Instruction &I : instructions(F)) 285 if (auto CS = CallSite(&I)) 286 if (Function *Callee = CS.getCalledFunction()) 287 if (Visited.insert(Callee).second && !Callee->isDeclaration()) { 288 Node &CalleeN = *G.lookup(*Callee); 289 Edge *E = N->lookup(CalleeN); 290 // FIXME: We should really handle adding new calls. While it will 291 // make downstream usage more complex, there is no fundamental 292 // limitation and it will allow passes within the CGSCC to be a bit 293 // more flexible in what transforms they can do. Until then, we 294 // verify that new calls haven't been introduced. 295 assert(E && "No function transformations should introduce *new* " 296 "call edges! Any new calls should be modeled as " 297 "promoted existing ref edges!"); 298 RetainedEdges.insert(&CalleeN); 299 if (!E->isCall()) 300 PromotedRefTargets.insert(&CalleeN); 301 } 302 303 // Now walk all references. 304 for (Instruction &I : instructions(F)) 305 for (Value *Op : I.operand_values()) 306 if (Constant *C = dyn_cast<Constant>(Op)) 307 if (Visited.insert(C).second) 308 Worklist.push_back(C); 309 310 LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &Referee) { 311 Node &RefereeN = *G.lookup(Referee); 312 Edge *E = N->lookup(RefereeN); 313 // FIXME: Similarly to new calls, we also currently preclude 314 // introducing new references. See above for details. 315 assert(E && "No function transformations should introduce *new* ref " 316 "edges! Any new ref edges would require IPO which " 317 "function passes aren't allowed to do!"); 318 RetainedEdges.insert(&RefereeN); 319 if (E->isCall()) 320 DemotedCallTargets.insert(&RefereeN); 321 }); 322 323 // First remove all of the edges that are no longer present in this function. 324 // We have to build a list of dead targets first and then remove them as the 325 // data structures will all be invalidated by removing them. 326 SmallVector<PointerIntPair<Node *, 1, Edge::Kind>, 4> DeadTargets; 327 for (Edge &E : *N) 328 if (!RetainedEdges.count(&E.getNode())) 329 DeadTargets.push_back({&E.getNode(), E.getKind()}); 330 for (auto DeadTarget : DeadTargets) { 331 Node &TargetN = *DeadTarget.getPointer(); 332 bool IsCall = DeadTarget.getInt() == Edge::Call; 333 SCC &TargetC = *G.lookupSCC(TargetN); 334 RefSCC &TargetRC = TargetC.getOuterRefSCC(); 335 336 if (&TargetRC != RC) { 337 RC->removeOutgoingEdge(N, TargetN); 338 if (DebugLogging) 339 dbgs() << "Deleting outgoing edge from '" << N << "' to '" << TargetN 340 << "'\n"; 341 continue; 342 } 343 if (DebugLogging) 344 dbgs() << "Deleting internal " << (IsCall ? "call" : "ref") 345 << " edge from '" << N << "' to '" << TargetN << "'\n"; 346 347 if (IsCall) { 348 if (C != &TargetC) { 349 // For separate SCCs this is trivial. 350 RC->switchTrivialInternalEdgeToRef(N, TargetN); 351 } else { 352 // Otherwise we may end up re-structuring the call graph. First, 353 // invalidate any SCC analyses. We have to do this before we split 354 // functions into new SCCs and lose track of where their analyses are 355 // cached. 356 // FIXME: We should accept a more precise preserved set here. For 357 // example, it might be possible to preserve some function analyses 358 // even as the SCC structure is changed. 359 AM.invalidate(*C, PreservedAnalyses::none()); 360 // Now update the call graph. 361 C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G, 362 N, C, AM, UR, DebugLogging); 363 } 364 } 365 366 auto NewRefSCCs = RC->removeInternalRefEdge(N, TargetN); 367 if (!NewRefSCCs.empty()) { 368 // Note that we don't bother to invalidate analyses as ref-edge 369 // connectivity is not really observable in any way and is intended 370 // exclusively to be used for ordering of transforms rather than for 371 // analysis conclusions. 372 373 // The RC worklist is in reverse postorder, so we first enqueue the 374 // current RefSCC as it will remain the parent of all split RefSCCs, then 375 // we enqueue the new ones in RPO except for the one which contains the 376 // source node as that is the "bottom" we will continue processing in the 377 // bottom-up walk. 378 UR.RCWorklist.insert(RC); 379 if (DebugLogging) 380 dbgs() << "Enqueuing the existing RefSCC in the update worklist: " 381 << *RC << "\n"; 382 // Update the RC to the "bottom". 383 assert(G.lookupSCC(N) == C && "Changed the SCC when splitting RefSCCs!"); 384 RC = &C->getOuterRefSCC(); 385 assert(G.lookupRefSCC(N) == RC && "Failed to update current RefSCC!"); 386 assert(NewRefSCCs.front() == RC && 387 "New current RefSCC not first in the returned list!"); 388 for (RefSCC *NewRC : reverse( 389 make_range(std::next(NewRefSCCs.begin()), NewRefSCCs.end()))) { 390 assert(NewRC != RC && "Should not encounter the current RefSCC further " 391 "in the postorder list of new RefSCCs."); 392 UR.RCWorklist.insert(NewRC); 393 if (DebugLogging) 394 dbgs() << "Enqueuing a new RefSCC in the update worklist: " << *NewRC 395 << "\n"; 396 } 397 } 398 } 399 400 // Next demote all the call edges that are now ref edges. This helps make 401 // the SCCs small which should minimize the work below as we don't want to 402 // form cycles that this would break. 403 for (Node *RefTarget : DemotedCallTargets) { 404 SCC &TargetC = *G.lookupSCC(*RefTarget); 405 RefSCC &TargetRC = TargetC.getOuterRefSCC(); 406 407 // The easy case is when the target RefSCC is not this RefSCC. This is 408 // only supported when the target RefSCC is a child of this RefSCC. 409 if (&TargetRC != RC) { 410 assert(RC->isAncestorOf(TargetRC) && 411 "Cannot potentially form RefSCC cycles here!"); 412 RC->switchOutgoingEdgeToRef(N, *RefTarget); 413 if (DebugLogging) 414 dbgs() << "Switch outgoing call edge to a ref edge from '" << N 415 << "' to '" << *RefTarget << "'\n"; 416 continue; 417 } 418 419 // We are switching an internal call edge to a ref edge. This may split up 420 // some SCCs. 421 if (C != &TargetC) { 422 // For separate SCCs this is trivial. 423 RC->switchTrivialInternalEdgeToRef(N, *RefTarget); 424 continue; 425 } 426 427 // Otherwise we may end up re-structuring the call graph. First, invalidate 428 // any SCC analyses. We have to do this before we split functions into new 429 // SCCs and lose track of where their analyses are cached. 430 // FIXME: We should accept a more precise preserved set here. For example, 431 // it might be possible to preserve some function analyses even as the SCC 432 // structure is changed. 433 AM.invalidate(*C, PreservedAnalyses::none()); 434 // Now update the call graph. 435 C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, *RefTarget), G, N, 436 C, AM, UR, DebugLogging); 437 } 438 439 // Now promote ref edges into call edges. 440 for (Node *CallTarget : PromotedRefTargets) { 441 SCC &TargetC = *G.lookupSCC(*CallTarget); 442 RefSCC &TargetRC = TargetC.getOuterRefSCC(); 443 444 // The easy case is when the target RefSCC is not this RefSCC. This is 445 // only supported when the target RefSCC is a child of this RefSCC. 446 if (&TargetRC != RC) { 447 assert(RC->isAncestorOf(TargetRC) && 448 "Cannot potentially form RefSCC cycles here!"); 449 RC->switchOutgoingEdgeToCall(N, *CallTarget); 450 if (DebugLogging) 451 dbgs() << "Switch outgoing ref edge to a call edge from '" << N 452 << "' to '" << *CallTarget << "'\n"; 453 continue; 454 } 455 if (DebugLogging) 456 dbgs() << "Switch an internal ref edge to a call edge from '" << N 457 << "' to '" << *CallTarget << "'\n"; 458 459 // Otherwise we are switching an internal ref edge to a call edge. This 460 // may merge away some SCCs, and we add those to the UpdateResult. We also 461 // need to make sure to update the worklist in the event SCCs have moved 462 // before the current one in the post-order sequence. 463 auto InitialSCCIndex = RC->find(*C) - RC->begin(); 464 auto InvalidatedSCCs = RC->switchInternalEdgeToCall(N, *CallTarget); 465 if (!InvalidatedSCCs.empty()) { 466 C = &TargetC; 467 assert(G.lookupSCC(N) == C && "Failed to update current SCC!"); 468 469 // Any analyses cached for this SCC are no longer precise as the shape 470 // has changed by introducing this cycle. 471 AM.invalidate(*C, PreservedAnalyses::none()); 472 473 for (SCC *InvalidatedC : InvalidatedSCCs) { 474 assert(InvalidatedC != C && "Cannot invalidate the current SCC!"); 475 UR.InvalidatedSCCs.insert(InvalidatedC); 476 477 // Also clear any cached analyses for the SCCs that are dead. This 478 // isn't really necessary for correctness but can release memory. 479 AM.clear(*InvalidatedC); 480 } 481 } 482 auto NewSCCIndex = RC->find(*C) - RC->begin(); 483 if (InitialSCCIndex < NewSCCIndex) { 484 // Put our current SCC back onto the worklist as we'll visit other SCCs 485 // that are now definitively ordered prior to the current one in the 486 // post-order sequence, and may end up observing more precise context to 487 // optimize the current SCC. 488 UR.CWorklist.insert(C); 489 if (DebugLogging) 490 dbgs() << "Enqueuing the existing SCC in the worklist: " << *C << "\n"; 491 // Enqueue in reverse order as we pop off the back of the worklist. 492 for (SCC &MovedC : reverse(make_range(RC->begin() + InitialSCCIndex, 493 RC->begin() + NewSCCIndex))) { 494 UR.CWorklist.insert(&MovedC); 495 if (DebugLogging) 496 dbgs() << "Enqueuing a newly earlier in post-order SCC: " << MovedC 497 << "\n"; 498 } 499 } 500 } 501 502 assert(!UR.InvalidatedSCCs.count(C) && "Invalidated the current SCC!"); 503 assert(!UR.InvalidatedRefSCCs.count(RC) && "Invalidated the current RefSCC!"); 504 assert(&C->getOuterRefSCC() == RC && "Current SCC not in current RefSCC!"); 505 506 // Record the current RefSCC and SCC for higher layers of the CGSCC pass 507 // manager now that all the updates have been applied. 508 if (RC != &InitialRC) 509 UR.UpdatedRC = RC; 510 if (C != &InitialC) 511 UR.UpdatedC = C; 512 513 return *C; 514 } 515