1 //===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// This file implements interprocedural passes which walk the 12 /// call-graph deducing and/or propagating function attributes. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/IPO/FunctionAttrs.h" 17 #include "llvm/Transforms/IPO.h" 18 #include "llvm/ADT/SCCIterator.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/ADT/StringSwitch.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/BasicAliasAnalysis.h" 26 #include "llvm/Analysis/CallGraph.h" 27 #include "llvm/Analysis/CallGraphSCCPass.h" 28 #include "llvm/Analysis/CaptureTracking.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/GlobalVariable.h" 32 #include "llvm/IR/InstIterator.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/LLVMContext.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/raw_ostream.h" 37 #include "llvm/Analysis/TargetLibraryInfo.h" 38 using namespace llvm; 39 40 #define DEBUG_TYPE "functionattrs" 41 42 STATISTIC(NumReadNone, "Number of functions marked readnone"); 43 STATISTIC(NumReadOnly, "Number of functions marked readonly"); 44 STATISTIC(NumNoCapture, "Number of arguments marked nocapture"); 45 STATISTIC(NumReturned, "Number of arguments marked returned"); 46 STATISTIC(NumReadNoneArg, "Number of arguments marked readnone"); 47 STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly"); 48 STATISTIC(NumNoAlias, "Number of function returns marked noalias"); 49 STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull"); 50 STATISTIC(NumNoRecurse, "Number of functions marked as norecurse"); 51 52 namespace { 53 typedef SmallSetVector<Function *, 8> SCCNodeSet; 54 } 55 56 namespace { 57 /// The three kinds of memory access relevant to 'readonly' and 58 /// 'readnone' attributes. 59 enum MemoryAccessKind { 60 MAK_ReadNone = 0, 61 MAK_ReadOnly = 1, 62 MAK_MayWrite = 2 63 }; 64 } 65 66 static MemoryAccessKind checkFunctionMemoryAccess(Function &F, AAResults &AAR, 67 const SCCNodeSet &SCCNodes) { 68 FunctionModRefBehavior MRB = AAR.getModRefBehavior(&F); 69 if (MRB == FMRB_DoesNotAccessMemory) 70 // Already perfect! 71 return MAK_ReadNone; 72 73 // Non-exact function definitions may not be selected at link time, and an 74 // alternative version that writes to memory may be selected. See the comment 75 // on GlobalValue::isDefinitionExact for more details. 76 if (!F.hasExactDefinition()) { 77 if (AliasAnalysis::onlyReadsMemory(MRB)) 78 return MAK_ReadOnly; 79 80 // Conservatively assume it writes to memory. 81 return MAK_MayWrite; 82 } 83 84 // Scan the function body for instructions that may read or write memory. 85 bool ReadsMemory = false; 86 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) { 87 Instruction *I = &*II; 88 89 // Some instructions can be ignored even if they read or write memory. 90 // Detect these now, skipping to the next instruction if one is found. 91 CallSite CS(cast<Value>(I)); 92 if (CS) { 93 // Ignore calls to functions in the same SCC, as long as the call sites 94 // don't have operand bundles. Calls with operand bundles are allowed to 95 // have memory effects not described by the memory effects of the call 96 // target. 97 if (!CS.hasOperandBundles() && CS.getCalledFunction() && 98 SCCNodes.count(CS.getCalledFunction())) 99 continue; 100 FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS); 101 102 // If the call doesn't access memory, we're done. 103 if (!(MRB & MRI_ModRef)) 104 continue; 105 106 if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) { 107 // The call could access any memory. If that includes writes, give up. 108 if (MRB & MRI_Mod) 109 return MAK_MayWrite; 110 // If it reads, note it. 111 if (MRB & MRI_Ref) 112 ReadsMemory = true; 113 continue; 114 } 115 116 // Check whether all pointer arguments point to local memory, and 117 // ignore calls that only access local memory. 118 for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 119 CI != CE; ++CI) { 120 Value *Arg = *CI; 121 if (!Arg->getType()->isPtrOrPtrVectorTy()) 122 continue; 123 124 AAMDNodes AAInfo; 125 I->getAAMetadata(AAInfo); 126 MemoryLocation Loc(Arg, MemoryLocation::UnknownSize, AAInfo); 127 128 // Skip accesses to local or constant memory as they don't impact the 129 // externally visible mod/ref behavior. 130 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 131 continue; 132 133 if (MRB & MRI_Mod) 134 // Writes non-local memory. Give up. 135 return MAK_MayWrite; 136 if (MRB & MRI_Ref) 137 // Ok, it reads non-local memory. 138 ReadsMemory = true; 139 } 140 continue; 141 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 142 // Ignore non-volatile loads from local memory. (Atomic is okay here.) 143 if (!LI->isVolatile()) { 144 MemoryLocation Loc = MemoryLocation::get(LI); 145 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 146 continue; 147 } 148 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 149 // Ignore non-volatile stores to local memory. (Atomic is okay here.) 150 if (!SI->isVolatile()) { 151 MemoryLocation Loc = MemoryLocation::get(SI); 152 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 153 continue; 154 } 155 } else if (VAArgInst *VI = dyn_cast<VAArgInst>(I)) { 156 // Ignore vaargs on local memory. 157 MemoryLocation Loc = MemoryLocation::get(VI); 158 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 159 continue; 160 } 161 162 // Any remaining instructions need to be taken seriously! Check if they 163 // read or write memory. 164 if (I->mayWriteToMemory()) 165 // Writes memory. Just give up. 166 return MAK_MayWrite; 167 168 // If this instruction may read memory, remember that. 169 ReadsMemory |= I->mayReadFromMemory(); 170 } 171 172 return ReadsMemory ? MAK_ReadOnly : MAK_ReadNone; 173 } 174 175 /// Deduce readonly/readnone attributes for the SCC. 176 template <typename AARGetterT> 177 static bool addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT AARGetter) { 178 // Check if any of the functions in the SCC read or write memory. If they 179 // write memory then they can't be marked readnone or readonly. 180 bool ReadsMemory = false; 181 for (Function *F : SCCNodes) { 182 // Call the callable parameter to look up AA results for this function. 183 AAResults &AAR = AARGetter(*F); 184 185 switch (checkFunctionMemoryAccess(*F, AAR, SCCNodes)) { 186 case MAK_MayWrite: 187 return false; 188 case MAK_ReadOnly: 189 ReadsMemory = true; 190 break; 191 case MAK_ReadNone: 192 // Nothing to do! 193 break; 194 } 195 } 196 197 // Success! Functions in this SCC do not access memory, or only read memory. 198 // Give them the appropriate attribute. 199 bool MadeChange = false; 200 for (Function *F : SCCNodes) { 201 if (F->doesNotAccessMemory()) 202 // Already perfect! 203 continue; 204 205 if (F->onlyReadsMemory() && ReadsMemory) 206 // No change. 207 continue; 208 209 MadeChange = true; 210 211 // Clear out any existing attributes. 212 AttrBuilder B; 213 B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); 214 F->removeAttributes( 215 AttributeSet::FunctionIndex, 216 AttributeSet::get(F->getContext(), AttributeSet::FunctionIndex, B)); 217 218 // Add in the new attribute. 219 F->addAttribute(AttributeSet::FunctionIndex, 220 ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone); 221 222 if (ReadsMemory) 223 ++NumReadOnly; 224 else 225 ++NumReadNone; 226 } 227 228 return MadeChange; 229 } 230 231 namespace { 232 /// For a given pointer Argument, this retains a list of Arguments of functions 233 /// in the same SCC that the pointer data flows into. We use this to build an 234 /// SCC of the arguments. 235 struct ArgumentGraphNode { 236 Argument *Definition; 237 SmallVector<ArgumentGraphNode *, 4> Uses; 238 }; 239 240 class ArgumentGraph { 241 // We store pointers to ArgumentGraphNode objects, so it's important that 242 // that they not move around upon insert. 243 typedef std::map<Argument *, ArgumentGraphNode> ArgumentMapTy; 244 245 ArgumentMapTy ArgumentMap; 246 247 // There is no root node for the argument graph, in fact: 248 // void f(int *x, int *y) { if (...) f(x, y); } 249 // is an example where the graph is disconnected. The SCCIterator requires a 250 // single entry point, so we maintain a fake ("synthetic") root node that 251 // uses every node. Because the graph is directed and nothing points into 252 // the root, it will not participate in any SCCs (except for its own). 253 ArgumentGraphNode SyntheticRoot; 254 255 public: 256 ArgumentGraph() { SyntheticRoot.Definition = nullptr; } 257 258 typedef SmallVectorImpl<ArgumentGraphNode *>::iterator iterator; 259 260 iterator begin() { return SyntheticRoot.Uses.begin(); } 261 iterator end() { return SyntheticRoot.Uses.end(); } 262 ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; } 263 264 ArgumentGraphNode *operator[](Argument *A) { 265 ArgumentGraphNode &Node = ArgumentMap[A]; 266 Node.Definition = A; 267 SyntheticRoot.Uses.push_back(&Node); 268 return &Node; 269 } 270 }; 271 272 /// This tracker checks whether callees are in the SCC, and if so it does not 273 /// consider that a capture, instead adding it to the "Uses" list and 274 /// continuing with the analysis. 275 struct ArgumentUsesTracker : public CaptureTracker { 276 ArgumentUsesTracker(const SCCNodeSet &SCCNodes) 277 : Captured(false), SCCNodes(SCCNodes) {} 278 279 void tooManyUses() override { Captured = true; } 280 281 bool captured(const Use *U) override { 282 CallSite CS(U->getUser()); 283 if (!CS.getInstruction()) { 284 Captured = true; 285 return true; 286 } 287 288 Function *F = CS.getCalledFunction(); 289 if (!F || !F->hasExactDefinition() || !SCCNodes.count(F)) { 290 Captured = true; 291 return true; 292 } 293 294 // Note: the callee and the two successor blocks *follow* the argument 295 // operands. This means there is no need to adjust UseIndex to account for 296 // these. 297 298 unsigned UseIndex = 299 std::distance(const_cast<const Use *>(CS.arg_begin()), U); 300 301 assert(UseIndex < CS.data_operands_size() && 302 "Indirect function calls should have been filtered above!"); 303 304 if (UseIndex >= CS.getNumArgOperands()) { 305 // Data operand, but not a argument operand -- must be a bundle operand 306 assert(CS.hasOperandBundles() && "Must be!"); 307 308 // CaptureTracking told us that we're being captured by an operand bundle 309 // use. In this case it does not matter if the callee is within our SCC 310 // or not -- we've been captured in some unknown way, and we have to be 311 // conservative. 312 Captured = true; 313 return true; 314 } 315 316 if (UseIndex >= F->arg_size()) { 317 assert(F->isVarArg() && "More params than args in non-varargs call"); 318 Captured = true; 319 return true; 320 } 321 322 Uses.push_back(&*std::next(F->arg_begin(), UseIndex)); 323 return false; 324 } 325 326 bool Captured; // True only if certainly captured (used outside our SCC). 327 SmallVector<Argument *, 4> Uses; // Uses within our SCC. 328 329 const SCCNodeSet &SCCNodes; 330 }; 331 } 332 333 namespace llvm { 334 template <> struct GraphTraits<ArgumentGraphNode *> { 335 typedef ArgumentGraphNode NodeType; 336 typedef ArgumentGraphNode *NodeRef; 337 typedef SmallVectorImpl<ArgumentGraphNode *>::iterator ChildIteratorType; 338 339 static inline NodeType *getEntryNode(NodeType *A) { return A; } 340 static inline ChildIteratorType child_begin(NodeType *N) { 341 return N->Uses.begin(); 342 } 343 static inline ChildIteratorType child_end(NodeType *N) { 344 return N->Uses.end(); 345 } 346 }; 347 template <> 348 struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> { 349 static NodeType *getEntryNode(ArgumentGraph *AG) { 350 return AG->getEntryNode(); 351 } 352 static ChildIteratorType nodes_begin(ArgumentGraph *AG) { 353 return AG->begin(); 354 } 355 static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); } 356 }; 357 } 358 359 /// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone. 360 static Attribute::AttrKind 361 determinePointerReadAttrs(Argument *A, 362 const SmallPtrSet<Argument *, 8> &SCCNodes) { 363 364 SmallVector<Use *, 32> Worklist; 365 SmallSet<Use *, 32> Visited; 366 367 // inalloca arguments are always clobbered by the call. 368 if (A->hasInAllocaAttr()) 369 return Attribute::None; 370 371 bool IsRead = false; 372 // We don't need to track IsWritten. If A is written to, return immediately. 373 374 for (Use &U : A->uses()) { 375 Visited.insert(&U); 376 Worklist.push_back(&U); 377 } 378 379 while (!Worklist.empty()) { 380 Use *U = Worklist.pop_back_val(); 381 Instruction *I = cast<Instruction>(U->getUser()); 382 383 switch (I->getOpcode()) { 384 case Instruction::BitCast: 385 case Instruction::GetElementPtr: 386 case Instruction::PHI: 387 case Instruction::Select: 388 case Instruction::AddrSpaceCast: 389 // The original value is not read/written via this if the new value isn't. 390 for (Use &UU : I->uses()) 391 if (Visited.insert(&UU).second) 392 Worklist.push_back(&UU); 393 break; 394 395 case Instruction::Call: 396 case Instruction::Invoke: { 397 bool Captures = true; 398 399 if (I->getType()->isVoidTy()) 400 Captures = false; 401 402 auto AddUsersToWorklistIfCapturing = [&] { 403 if (Captures) 404 for (Use &UU : I->uses()) 405 if (Visited.insert(&UU).second) 406 Worklist.push_back(&UU); 407 }; 408 409 CallSite CS(I); 410 if (CS.doesNotAccessMemory()) { 411 AddUsersToWorklistIfCapturing(); 412 continue; 413 } 414 415 Function *F = CS.getCalledFunction(); 416 if (!F) { 417 if (CS.onlyReadsMemory()) { 418 IsRead = true; 419 AddUsersToWorklistIfCapturing(); 420 continue; 421 } 422 return Attribute::None; 423 } 424 425 // Note: the callee and the two successor blocks *follow* the argument 426 // operands. This means there is no need to adjust UseIndex to account 427 // for these. 428 429 unsigned UseIndex = std::distance(CS.arg_begin(), U); 430 431 // U cannot be the callee operand use: since we're exploring the 432 // transitive uses of an Argument, having such a use be a callee would 433 // imply the CallSite is an indirect call or invoke; and we'd take the 434 // early exit above. 435 assert(UseIndex < CS.data_operands_size() && 436 "Data operand use expected!"); 437 438 bool IsOperandBundleUse = UseIndex >= CS.getNumArgOperands(); 439 440 if (UseIndex >= F->arg_size() && !IsOperandBundleUse) { 441 assert(F->isVarArg() && "More params than args in non-varargs call"); 442 return Attribute::None; 443 } 444 445 Captures &= !CS.doesNotCapture(UseIndex); 446 447 // Since the optimizer (by design) cannot see the data flow corresponding 448 // to a operand bundle use, these cannot participate in the optimistic SCC 449 // analysis. Instead, we model the operand bundle uses as arguments in 450 // call to a function external to the SCC. 451 if (IsOperandBundleUse || 452 !SCCNodes.count(&*std::next(F->arg_begin(), UseIndex))) { 453 454 // The accessors used on CallSite here do the right thing for calls and 455 // invokes with operand bundles. 456 457 if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(UseIndex)) 458 return Attribute::None; 459 if (!CS.doesNotAccessMemory(UseIndex)) 460 IsRead = true; 461 } 462 463 AddUsersToWorklistIfCapturing(); 464 break; 465 } 466 467 case Instruction::Load: 468 // A volatile load has side effects beyond what readonly can be relied 469 // upon. 470 if (cast<LoadInst>(I)->isVolatile()) 471 return Attribute::None; 472 473 IsRead = true; 474 break; 475 476 case Instruction::ICmp: 477 case Instruction::Ret: 478 break; 479 480 default: 481 return Attribute::None; 482 } 483 } 484 485 return IsRead ? Attribute::ReadOnly : Attribute::ReadNone; 486 } 487 488 /// Deduce returned attributes for the SCC. 489 static bool addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes) { 490 bool Changed = false; 491 492 AttrBuilder B; 493 B.addAttribute(Attribute::Returned); 494 495 // Check each function in turn, determining if an argument is always returned. 496 for (Function *F : SCCNodes) { 497 // We can infer and propagate function attributes only when we know that the 498 // definition we'll get at link time is *exactly* the definition we see now. 499 // For more details, see GlobalValue::mayBeDerefined. 500 if (!F->hasExactDefinition()) 501 continue; 502 503 if (F->getReturnType()->isVoidTy()) 504 continue; 505 506 auto FindRetArg = [&]() -> Value * { 507 Value *RetArg = nullptr; 508 for (BasicBlock &BB : *F) 509 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) { 510 // Note that stripPointerCasts should look through functions with 511 // returned arguments. 512 Value *RetVal = Ret->getReturnValue()->stripPointerCasts(); 513 if (!isa<Argument>(RetVal) || RetVal->getType() != F->getReturnType()) 514 return nullptr; 515 516 if (!RetArg) 517 RetArg = RetVal; 518 else if (RetArg != RetVal) 519 return nullptr; 520 } 521 522 return RetArg; 523 }; 524 525 if (Value *RetArg = FindRetArg()) { 526 auto *A = cast<Argument>(RetArg); 527 A->addAttr(AttributeSet::get(F->getContext(), A->getArgNo() + 1, B)); 528 ++NumReturned; 529 Changed = true; 530 } 531 } 532 533 return Changed; 534 } 535 536 /// Deduce nocapture attributes for the SCC. 537 static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { 538 bool Changed = false; 539 540 ArgumentGraph AG; 541 542 AttrBuilder B; 543 B.addAttribute(Attribute::NoCapture); 544 545 // Check each function in turn, determining which pointer arguments are not 546 // captured. 547 for (Function *F : SCCNodes) { 548 // We can infer and propagate function attributes only when we know that the 549 // definition we'll get at link time is *exactly* the definition we see now. 550 // For more details, see GlobalValue::mayBeDerefined. 551 if (!F->hasExactDefinition()) 552 continue; 553 554 // Functions that are readonly (or readnone) and nounwind and don't return 555 // a value can't capture arguments. Don't analyze them. 556 if (F->onlyReadsMemory() && F->doesNotThrow() && 557 F->getReturnType()->isVoidTy()) { 558 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; 559 ++A) { 560 if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) { 561 A->addAttr(AttributeSet::get(F->getContext(), A->getArgNo() + 1, B)); 562 ++NumNoCapture; 563 Changed = true; 564 } 565 } 566 continue; 567 } 568 569 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; 570 ++A) { 571 if (!A->getType()->isPointerTy()) 572 continue; 573 bool HasNonLocalUses = false; 574 if (!A->hasNoCaptureAttr()) { 575 ArgumentUsesTracker Tracker(SCCNodes); 576 PointerMayBeCaptured(&*A, &Tracker); 577 if (!Tracker.Captured) { 578 if (Tracker.Uses.empty()) { 579 // If it's trivially not captured, mark it nocapture now. 580 A->addAttr( 581 AttributeSet::get(F->getContext(), A->getArgNo() + 1, B)); 582 ++NumNoCapture; 583 Changed = true; 584 } else { 585 // If it's not trivially captured and not trivially not captured, 586 // then it must be calling into another function in our SCC. Save 587 // its particulars for Argument-SCC analysis later. 588 ArgumentGraphNode *Node = AG[&*A]; 589 for (Argument *Use : Tracker.Uses) { 590 Node->Uses.push_back(AG[Use]); 591 if (Use != &*A) 592 HasNonLocalUses = true; 593 } 594 } 595 } 596 // Otherwise, it's captured. Don't bother doing SCC analysis on it. 597 } 598 if (!HasNonLocalUses && !A->onlyReadsMemory()) { 599 // Can we determine that it's readonly/readnone without doing an SCC? 600 // Note that we don't allow any calls at all here, or else our result 601 // will be dependent on the iteration order through the functions in the 602 // SCC. 603 SmallPtrSet<Argument *, 8> Self; 604 Self.insert(&*A); 605 Attribute::AttrKind R = determinePointerReadAttrs(&*A, Self); 606 if (R != Attribute::None) { 607 AttrBuilder B; 608 B.addAttribute(R); 609 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 610 Changed = true; 611 R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; 612 } 613 } 614 } 615 } 616 617 // The graph we've collected is partial because we stopped scanning for 618 // argument uses once we solved the argument trivially. These partial nodes 619 // show up as ArgumentGraphNode objects with an empty Uses list, and for 620 // these nodes the final decision about whether they capture has already been 621 // made. If the definition doesn't have a 'nocapture' attribute by now, it 622 // captures. 623 624 for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) { 625 const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I; 626 if (ArgumentSCC.size() == 1) { 627 if (!ArgumentSCC[0]->Definition) 628 continue; // synthetic root node 629 630 // eg. "void f(int* x) { if (...) f(x); }" 631 if (ArgumentSCC[0]->Uses.size() == 1 && 632 ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) { 633 Argument *A = ArgumentSCC[0]->Definition; 634 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 635 ++NumNoCapture; 636 Changed = true; 637 } 638 continue; 639 } 640 641 bool SCCCaptured = false; 642 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); 643 I != E && !SCCCaptured; ++I) { 644 ArgumentGraphNode *Node = *I; 645 if (Node->Uses.empty()) { 646 if (!Node->Definition->hasNoCaptureAttr()) 647 SCCCaptured = true; 648 } 649 } 650 if (SCCCaptured) 651 continue; 652 653 SmallPtrSet<Argument *, 8> ArgumentSCCNodes; 654 // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for 655 // quickly looking up whether a given Argument is in this ArgumentSCC. 656 for (ArgumentGraphNode *I : ArgumentSCC) { 657 ArgumentSCCNodes.insert(I->Definition); 658 } 659 660 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); 661 I != E && !SCCCaptured; ++I) { 662 ArgumentGraphNode *N = *I; 663 for (ArgumentGraphNode *Use : N->Uses) { 664 Argument *A = Use->Definition; 665 if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A)) 666 continue; 667 SCCCaptured = true; 668 break; 669 } 670 } 671 if (SCCCaptured) 672 continue; 673 674 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 675 Argument *A = ArgumentSCC[i]->Definition; 676 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 677 ++NumNoCapture; 678 Changed = true; 679 } 680 681 // We also want to compute readonly/readnone. With a small number of false 682 // negatives, we can assume that any pointer which is captured isn't going 683 // to be provably readonly or readnone, since by definition we can't 684 // analyze all uses of a captured pointer. 685 // 686 // The false negatives happen when the pointer is captured by a function 687 // that promises readonly/readnone behaviour on the pointer, then the 688 // pointer's lifetime ends before anything that writes to arbitrary memory. 689 // Also, a readonly/readnone pointer may be returned, but returning a 690 // pointer is capturing it. 691 692 Attribute::AttrKind ReadAttr = Attribute::ReadNone; 693 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 694 Argument *A = ArgumentSCC[i]->Definition; 695 Attribute::AttrKind K = determinePointerReadAttrs(A, ArgumentSCCNodes); 696 if (K == Attribute::ReadNone) 697 continue; 698 if (K == Attribute::ReadOnly) { 699 ReadAttr = Attribute::ReadOnly; 700 continue; 701 } 702 ReadAttr = K; 703 break; 704 } 705 706 if (ReadAttr != Attribute::None) { 707 AttrBuilder B, R; 708 B.addAttribute(ReadAttr); 709 R.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); 710 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 711 Argument *A = ArgumentSCC[i]->Definition; 712 // Clear out existing readonly/readnone attributes 713 A->removeAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, R)); 714 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 715 ReadAttr == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; 716 Changed = true; 717 } 718 } 719 } 720 721 return Changed; 722 } 723 724 /// Tests whether a function is "malloc-like". 725 /// 726 /// A function is "malloc-like" if it returns either null or a pointer that 727 /// doesn't alias any other pointer visible to the caller. 728 static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) { 729 SmallSetVector<Value *, 8> FlowsToReturn; 730 for (BasicBlock &BB : *F) 731 if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) 732 FlowsToReturn.insert(Ret->getReturnValue()); 733 734 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 735 Value *RetVal = FlowsToReturn[i]; 736 737 if (Constant *C = dyn_cast<Constant>(RetVal)) { 738 if (!C->isNullValue() && !isa<UndefValue>(C)) 739 return false; 740 741 continue; 742 } 743 744 if (isa<Argument>(RetVal)) 745 return false; 746 747 if (Instruction *RVI = dyn_cast<Instruction>(RetVal)) 748 switch (RVI->getOpcode()) { 749 // Extend the analysis by looking upwards. 750 case Instruction::BitCast: 751 case Instruction::GetElementPtr: 752 case Instruction::AddrSpaceCast: 753 FlowsToReturn.insert(RVI->getOperand(0)); 754 continue; 755 case Instruction::Select: { 756 SelectInst *SI = cast<SelectInst>(RVI); 757 FlowsToReturn.insert(SI->getTrueValue()); 758 FlowsToReturn.insert(SI->getFalseValue()); 759 continue; 760 } 761 case Instruction::PHI: { 762 PHINode *PN = cast<PHINode>(RVI); 763 for (Value *IncValue : PN->incoming_values()) 764 FlowsToReturn.insert(IncValue); 765 continue; 766 } 767 768 // Check whether the pointer came from an allocation. 769 case Instruction::Alloca: 770 break; 771 case Instruction::Call: 772 case Instruction::Invoke: { 773 CallSite CS(RVI); 774 if (CS.paramHasAttr(0, Attribute::NoAlias)) 775 break; 776 if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction())) 777 break; 778 } // fall-through 779 default: 780 return false; // Did not come from an allocation. 781 } 782 783 if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false)) 784 return false; 785 } 786 787 return true; 788 } 789 790 /// Deduce noalias attributes for the SCC. 791 static bool addNoAliasAttrs(const SCCNodeSet &SCCNodes) { 792 // Check each function in turn, determining which functions return noalias 793 // pointers. 794 for (Function *F : SCCNodes) { 795 // Already noalias. 796 if (F->doesNotAlias(0)) 797 continue; 798 799 // We can infer and propagate function attributes only when we know that the 800 // definition we'll get at link time is *exactly* the definition we see now. 801 // For more details, see GlobalValue::mayBeDerefined. 802 if (!F->hasExactDefinition()) 803 return false; 804 805 // We annotate noalias return values, which are only applicable to 806 // pointer types. 807 if (!F->getReturnType()->isPointerTy()) 808 continue; 809 810 if (!isFunctionMallocLike(F, SCCNodes)) 811 return false; 812 } 813 814 bool MadeChange = false; 815 for (Function *F : SCCNodes) { 816 if (F->doesNotAlias(0) || !F->getReturnType()->isPointerTy()) 817 continue; 818 819 F->setDoesNotAlias(0); 820 ++NumNoAlias; 821 MadeChange = true; 822 } 823 824 return MadeChange; 825 } 826 827 /// Tests whether this function is known to not return null. 828 /// 829 /// Requires that the function returns a pointer. 830 /// 831 /// Returns true if it believes the function will not return a null, and sets 832 /// \p Speculative based on whether the returned conclusion is a speculative 833 /// conclusion due to SCC calls. 834 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes, 835 bool &Speculative) { 836 assert(F->getReturnType()->isPointerTy() && 837 "nonnull only meaningful on pointer types"); 838 Speculative = false; 839 840 SmallSetVector<Value *, 8> FlowsToReturn; 841 for (BasicBlock &BB : *F) 842 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) 843 FlowsToReturn.insert(Ret->getReturnValue()); 844 845 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 846 Value *RetVal = FlowsToReturn[i]; 847 848 // If this value is locally known to be non-null, we're good 849 if (isKnownNonNull(RetVal)) 850 continue; 851 852 // Otherwise, we need to look upwards since we can't make any local 853 // conclusions. 854 Instruction *RVI = dyn_cast<Instruction>(RetVal); 855 if (!RVI) 856 return false; 857 switch (RVI->getOpcode()) { 858 // Extend the analysis by looking upwards. 859 case Instruction::BitCast: 860 case Instruction::GetElementPtr: 861 case Instruction::AddrSpaceCast: 862 FlowsToReturn.insert(RVI->getOperand(0)); 863 continue; 864 case Instruction::Select: { 865 SelectInst *SI = cast<SelectInst>(RVI); 866 FlowsToReturn.insert(SI->getTrueValue()); 867 FlowsToReturn.insert(SI->getFalseValue()); 868 continue; 869 } 870 case Instruction::PHI: { 871 PHINode *PN = cast<PHINode>(RVI); 872 for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 873 FlowsToReturn.insert(PN->getIncomingValue(i)); 874 continue; 875 } 876 case Instruction::Call: 877 case Instruction::Invoke: { 878 CallSite CS(RVI); 879 Function *Callee = CS.getCalledFunction(); 880 // A call to a node within the SCC is assumed to return null until 881 // proven otherwise 882 if (Callee && SCCNodes.count(Callee)) { 883 Speculative = true; 884 continue; 885 } 886 return false; 887 } 888 default: 889 return false; // Unknown source, may be null 890 }; 891 llvm_unreachable("should have either continued or returned"); 892 } 893 894 return true; 895 } 896 897 /// Deduce nonnull attributes for the SCC. 898 static bool addNonNullAttrs(const SCCNodeSet &SCCNodes) { 899 // Speculative that all functions in the SCC return only nonnull 900 // pointers. We may refute this as we analyze functions. 901 bool SCCReturnsNonNull = true; 902 903 bool MadeChange = false; 904 905 // Check each function in turn, determining which functions return nonnull 906 // pointers. 907 for (Function *F : SCCNodes) { 908 // Already nonnull. 909 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 910 Attribute::NonNull)) 911 continue; 912 913 // We can infer and propagate function attributes only when we know that the 914 // definition we'll get at link time is *exactly* the definition we see now. 915 // For more details, see GlobalValue::mayBeDerefined. 916 if (!F->hasExactDefinition()) 917 return false; 918 919 // We annotate nonnull return values, which are only applicable to 920 // pointer types. 921 if (!F->getReturnType()->isPointerTy()) 922 continue; 923 924 bool Speculative = false; 925 if (isReturnNonNull(F, SCCNodes, Speculative)) { 926 if (!Speculative) { 927 // Mark the function eagerly since we may discover a function 928 // which prevents us from speculating about the entire SCC 929 DEBUG(dbgs() << "Eagerly marking " << F->getName() << " as nonnull\n"); 930 F->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull); 931 ++NumNonNullReturn; 932 MadeChange = true; 933 } 934 continue; 935 } 936 // At least one function returns something which could be null, can't 937 // speculate any more. 938 SCCReturnsNonNull = false; 939 } 940 941 if (SCCReturnsNonNull) { 942 for (Function *F : SCCNodes) { 943 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 944 Attribute::NonNull) || 945 !F->getReturnType()->isPointerTy()) 946 continue; 947 948 DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n"); 949 F->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull); 950 ++NumNonNullReturn; 951 MadeChange = true; 952 } 953 } 954 955 return MadeChange; 956 } 957 958 /// Remove the convergent attribute from all functions in the SCC if every 959 /// callsite within the SCC is not convergent (except for calls to functions 960 /// within the SCC). Returns true if changes were made. 961 static bool removeConvergentAttrs(const SCCNodeSet &SCCNodes) { 962 // For every function in SCC, ensure that either 963 // * it is not convergent, or 964 // * we can remove its convergent attribute. 965 bool HasConvergentFn = false; 966 for (Function *F : SCCNodes) { 967 if (!F->isConvergent()) continue; 968 HasConvergentFn = true; 969 970 // Can't remove convergent from function declarations. 971 if (F->isDeclaration()) return false; 972 973 // Can't remove convergent if any of our functions has a convergent call to a 974 // function not in the SCC. 975 for (Instruction &I : instructions(*F)) { 976 CallSite CS(&I); 977 // Bail if CS is a convergent call to a function not in the SCC. 978 if (CS && CS.isConvergent() && 979 SCCNodes.count(CS.getCalledFunction()) == 0) 980 return false; 981 } 982 } 983 984 // If the SCC doesn't have any convergent functions, we have nothing to do. 985 if (!HasConvergentFn) return false; 986 987 // If we got here, all of the calls the SCC makes to functions not in the SCC 988 // are non-convergent. Therefore all of the SCC's functions can also be made 989 // non-convergent. We'll remove the attr from the callsites in 990 // InstCombineCalls. 991 for (Function *F : SCCNodes) { 992 if (!F->isConvergent()) continue; 993 994 DEBUG(dbgs() << "Removing convergent attr from fn " << F->getName() 995 << "\n"); 996 F->setNotConvergent(); 997 } 998 return true; 999 } 1000 1001 static bool setDoesNotRecurse(Function &F) { 1002 if (F.doesNotRecurse()) 1003 return false; 1004 F.setDoesNotRecurse(); 1005 ++NumNoRecurse; 1006 return true; 1007 } 1008 1009 static bool addNoRecurseAttrs(const SCCNodeSet &SCCNodes) { 1010 // Try and identify functions that do not recurse. 1011 1012 // If the SCC contains multiple nodes we know for sure there is recursion. 1013 if (SCCNodes.size() != 1) 1014 return false; 1015 1016 Function *F = *SCCNodes.begin(); 1017 if (!F || F->isDeclaration() || F->doesNotRecurse()) 1018 return false; 1019 1020 // If all of the calls in F are identifiable and are to norecurse functions, F 1021 // is norecurse. This check also detects self-recursion as F is not currently 1022 // marked norecurse, so any called from F to F will not be marked norecurse. 1023 for (Instruction &I : instructions(*F)) 1024 if (auto CS = CallSite(&I)) { 1025 Function *Callee = CS.getCalledFunction(); 1026 if (!Callee || Callee == F || !Callee->doesNotRecurse()) 1027 // Function calls a potentially recursive function. 1028 return false; 1029 } 1030 1031 // Every call was to a non-recursive function other than this function, and 1032 // we have no indirect recursion as the SCC size is one. This function cannot 1033 // recurse. 1034 return setDoesNotRecurse(*F); 1035 } 1036 1037 PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C, 1038 CGSCCAnalysisManager &AM) { 1039 FunctionAnalysisManager &FAM = 1040 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C).getManager(); 1041 1042 // We pass a lambda into functions to wire them up to the analysis manager 1043 // for getting function analyses. 1044 auto AARGetter = [&](Function &F) -> AAResults & { 1045 return FAM.getResult<AAManager>(F); 1046 }; 1047 1048 // Fill SCCNodes with the elements of the SCC. Also track whether there are 1049 // any external or opt-none nodes that will prevent us from optimizing any 1050 // part of the SCC. 1051 SCCNodeSet SCCNodes; 1052 bool HasUnknownCall = false; 1053 for (LazyCallGraph::Node &N : C) { 1054 Function &F = N.getFunction(); 1055 if (F.hasFnAttribute(Attribute::OptimizeNone)) { 1056 // Treat any function we're trying not to optimize as if it were an 1057 // indirect call and omit it from the node set used below. 1058 HasUnknownCall = true; 1059 continue; 1060 } 1061 // Track whether any functions in this SCC have an unknown call edge. 1062 // Note: if this is ever a performance hit, we can common it with 1063 // subsequent routines which also do scans over the instructions of the 1064 // function. 1065 if (!HasUnknownCall) 1066 for (Instruction &I : instructions(F)) 1067 if (auto CS = CallSite(&I)) 1068 if (!CS.getCalledFunction()) { 1069 HasUnknownCall = true; 1070 break; 1071 } 1072 1073 SCCNodes.insert(&F); 1074 } 1075 1076 bool Changed = false; 1077 Changed |= addArgumentReturnedAttrs(SCCNodes); 1078 Changed |= addReadAttrs(SCCNodes, AARGetter); 1079 Changed |= addArgumentAttrs(SCCNodes); 1080 1081 // If we have no external nodes participating in the SCC, we can deduce some 1082 // more precise attributes as well. 1083 if (!HasUnknownCall) { 1084 Changed |= addNoAliasAttrs(SCCNodes); 1085 Changed |= addNonNullAttrs(SCCNodes); 1086 Changed |= removeConvergentAttrs(SCCNodes); 1087 Changed |= addNoRecurseAttrs(SCCNodes); 1088 } 1089 1090 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all(); 1091 } 1092 1093 namespace { 1094 struct PostOrderFunctionAttrsLegacyPass : public CallGraphSCCPass { 1095 static char ID; // Pass identification, replacement for typeid 1096 PostOrderFunctionAttrsLegacyPass() : CallGraphSCCPass(ID) { 1097 initializePostOrderFunctionAttrsLegacyPassPass(*PassRegistry::getPassRegistry()); 1098 } 1099 1100 bool runOnSCC(CallGraphSCC &SCC) override; 1101 1102 void getAnalysisUsage(AnalysisUsage &AU) const override { 1103 AU.setPreservesCFG(); 1104 AU.addRequired<AssumptionCacheTracker>(); 1105 getAAResultsAnalysisUsage(AU); 1106 CallGraphSCCPass::getAnalysisUsage(AU); 1107 } 1108 }; 1109 } 1110 1111 char PostOrderFunctionAttrsLegacyPass::ID = 0; 1112 INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrsLegacyPass, "functionattrs", 1113 "Deduce function attributes", false, false) 1114 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1115 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1116 INITIALIZE_PASS_END(PostOrderFunctionAttrsLegacyPass, "functionattrs", 1117 "Deduce function attributes", false, false) 1118 1119 Pass *llvm::createPostOrderFunctionAttrsLegacyPass() { return new PostOrderFunctionAttrsLegacyPass(); } 1120 1121 template <typename AARGetterT> 1122 static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) { 1123 bool Changed = false; 1124 1125 // Fill SCCNodes with the elements of the SCC. Used for quickly looking up 1126 // whether a given CallGraphNode is in this SCC. Also track whether there are 1127 // any external or opt-none nodes that will prevent us from optimizing any 1128 // part of the SCC. 1129 SCCNodeSet SCCNodes; 1130 bool ExternalNode = false; 1131 for (CallGraphNode *I : SCC) { 1132 Function *F = I->getFunction(); 1133 if (!F || F->hasFnAttribute(Attribute::OptimizeNone)) { 1134 // External node or function we're trying not to optimize - we both avoid 1135 // transform them and avoid leveraging information they provide. 1136 ExternalNode = true; 1137 continue; 1138 } 1139 1140 SCCNodes.insert(F); 1141 } 1142 1143 Changed |= addArgumentReturnedAttrs(SCCNodes); 1144 Changed |= addReadAttrs(SCCNodes, AARGetter); 1145 Changed |= addArgumentAttrs(SCCNodes); 1146 1147 // If we have no external nodes participating in the SCC, we can deduce some 1148 // more precise attributes as well. 1149 if (!ExternalNode) { 1150 Changed |= addNoAliasAttrs(SCCNodes); 1151 Changed |= addNonNullAttrs(SCCNodes); 1152 Changed |= removeConvergentAttrs(SCCNodes); 1153 Changed |= addNoRecurseAttrs(SCCNodes); 1154 } 1155 1156 return Changed; 1157 } 1158 1159 bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) { 1160 if (skipSCC(SCC)) 1161 return false; 1162 1163 // We compute dedicated AA results for each function in the SCC as needed. We 1164 // use a lambda referencing external objects so that they live long enough to 1165 // be queried, but we re-use them each time. 1166 Optional<BasicAAResult> BAR; 1167 Optional<AAResults> AAR; 1168 auto AARGetter = [&](Function &F) -> AAResults & { 1169 BAR.emplace(createLegacyPMBasicAAResult(*this, F)); 1170 AAR.emplace(createLegacyPMAAResults(*this, F, *BAR)); 1171 return *AAR; 1172 }; 1173 1174 return runImpl(SCC, AARGetter); 1175 } 1176 1177 namespace { 1178 struct ReversePostOrderFunctionAttrsLegacyPass : public ModulePass { 1179 static char ID; // Pass identification, replacement for typeid 1180 ReversePostOrderFunctionAttrsLegacyPass() : ModulePass(ID) { 1181 initializeReversePostOrderFunctionAttrsLegacyPassPass(*PassRegistry::getPassRegistry()); 1182 } 1183 1184 bool runOnModule(Module &M) override; 1185 1186 void getAnalysisUsage(AnalysisUsage &AU) const override { 1187 AU.setPreservesCFG(); 1188 AU.addRequired<CallGraphWrapperPass>(); 1189 AU.addPreserved<CallGraphWrapperPass>(); 1190 } 1191 }; 1192 } 1193 1194 char ReversePostOrderFunctionAttrsLegacyPass::ID = 0; 1195 INITIALIZE_PASS_BEGIN(ReversePostOrderFunctionAttrsLegacyPass, "rpo-functionattrs", 1196 "Deduce function attributes in RPO", false, false) 1197 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1198 INITIALIZE_PASS_END(ReversePostOrderFunctionAttrsLegacyPass, "rpo-functionattrs", 1199 "Deduce function attributes in RPO", false, false) 1200 1201 Pass *llvm::createReversePostOrderFunctionAttrsPass() { 1202 return new ReversePostOrderFunctionAttrsLegacyPass(); 1203 } 1204 1205 static bool addNoRecurseAttrsTopDown(Function &F) { 1206 // We check the preconditions for the function prior to calling this to avoid 1207 // the cost of building up a reversible post-order list. We assert them here 1208 // to make sure none of the invariants this relies on were violated. 1209 assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!"); 1210 assert(!F.doesNotRecurse() && 1211 "This function has already been deduced as norecurs!"); 1212 assert(F.hasInternalLinkage() && 1213 "Can only do top-down deduction for internal linkage functions!"); 1214 1215 // If F is internal and all of its uses are calls from a non-recursive 1216 // functions, then none of its calls could in fact recurse without going 1217 // through a function marked norecurse, and so we can mark this function too 1218 // as norecurse. Note that the uses must actually be calls -- otherwise 1219 // a pointer to this function could be returned from a norecurse function but 1220 // this function could be recursively (indirectly) called. Note that this 1221 // also detects if F is directly recursive as F is not yet marked as 1222 // a norecurse function. 1223 for (auto *U : F.users()) { 1224 auto *I = dyn_cast<Instruction>(U); 1225 if (!I) 1226 return false; 1227 CallSite CS(I); 1228 if (!CS || !CS.getParent()->getParent()->doesNotRecurse()) 1229 return false; 1230 } 1231 return setDoesNotRecurse(F); 1232 } 1233 1234 static bool deduceFunctionAttributeInRPO(Module &M, CallGraph &CG) { 1235 // We only have a post-order SCC traversal (because SCCs are inherently 1236 // discovered in post-order), so we accumulate them in a vector and then walk 1237 // it in reverse. This is simpler than using the RPO iterator infrastructure 1238 // because we need to combine SCC detection and the PO walk of the call 1239 // graph. We can also cheat egregiously because we're primarily interested in 1240 // synthesizing norecurse and so we can only save the singular SCCs as SCCs 1241 // with multiple functions in them will clearly be recursive. 1242 SmallVector<Function *, 16> Worklist; 1243 for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) { 1244 if (I->size() != 1) 1245 continue; 1246 1247 Function *F = I->front()->getFunction(); 1248 if (F && !F->isDeclaration() && !F->doesNotRecurse() && 1249 F->hasInternalLinkage()) 1250 Worklist.push_back(F); 1251 } 1252 1253 bool Changed = false; 1254 for (auto *F : reverse(Worklist)) 1255 Changed |= addNoRecurseAttrsTopDown(*F); 1256 1257 return Changed; 1258 } 1259 1260 bool ReversePostOrderFunctionAttrsLegacyPass::runOnModule(Module &M) { 1261 if (skipModule(M)) 1262 return false; 1263 1264 auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 1265 1266 return deduceFunctionAttributeInRPO(M, CG); 1267 } 1268 1269 PreservedAnalyses 1270 ReversePostOrderFunctionAttrsPass::run(Module &M, ModuleAnalysisManager &AM) { 1271 auto &CG = AM.getResult<CallGraphAnalysis>(M); 1272 1273 bool Changed = deduceFunctionAttributeInRPO(M, CG); 1274 1275 // CallGraphAnalysis holds AssertingVH and must be invalidated eagerly so 1276 // that other passes don't delete stuff from under it. 1277 // FIXME: We need to invalidate this to avoid PR28400. Is there a better 1278 // solution? 1279 AM.invalidate<CallGraphAnalysis>(M); 1280 1281 if (!Changed) 1282 return PreservedAnalyses::all(); 1283 PreservedAnalyses PA; 1284 PA.preserve<CallGraphAnalysis>(); 1285 return PA; 1286 } 1287