1 //===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// This file implements interprocedural passes which walk the 12 /// call-graph deducing and/or propagating function attributes. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/IPO.h" 17 #include "llvm/ADT/SCCIterator.h" 18 #include "llvm/ADT/SetVector.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/ADT/StringSwitch.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/BasicAliasAnalysis.h" 25 #include "llvm/Analysis/CallGraph.h" 26 #include "llvm/Analysis/CallGraphSCCPass.h" 27 #include "llvm/Analysis/CaptureTracking.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/IR/GlobalVariable.h" 31 #include "llvm/IR/InstIterator.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/IR/LLVMContext.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 using namespace llvm; 38 39 #define DEBUG_TYPE "functionattrs" 40 41 STATISTIC(NumReadNone, "Number of functions marked readnone"); 42 STATISTIC(NumReadOnly, "Number of functions marked readonly"); 43 STATISTIC(NumNoCapture, "Number of arguments marked nocapture"); 44 STATISTIC(NumReadNoneArg, "Number of arguments marked readnone"); 45 STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly"); 46 STATISTIC(NumNoAlias, "Number of function returns marked noalias"); 47 STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull"); 48 STATISTIC(NumNoRecurse, "Number of functions marked as norecurse"); 49 50 namespace { 51 typedef SmallSetVector<Function *, 8> SCCNodeSet; 52 } 53 54 namespace { 55 struct PostOrderFunctionAttrs : public CallGraphSCCPass { 56 static char ID; // Pass identification, replacement for typeid 57 PostOrderFunctionAttrs() : CallGraphSCCPass(ID) { 58 initializePostOrderFunctionAttrsPass(*PassRegistry::getPassRegistry()); 59 } 60 61 bool runOnSCC(CallGraphSCC &SCC) override; 62 63 void getAnalysisUsage(AnalysisUsage &AU) const override { 64 AU.setPreservesCFG(); 65 AU.addRequired<AssumptionCacheTracker>(); 66 AU.addRequired<TargetLibraryInfoWrapperPass>(); 67 addUsedAAAnalyses(AU); 68 CallGraphSCCPass::getAnalysisUsage(AU); 69 } 70 71 private: 72 TargetLibraryInfo *TLI; 73 }; 74 } 75 76 char PostOrderFunctionAttrs::ID = 0; 77 INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrs, "functionattrs", 78 "Deduce function attributes", false, false) 79 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 80 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 81 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 82 INITIALIZE_PASS_END(PostOrderFunctionAttrs, "functionattrs", 83 "Deduce function attributes", false, false) 84 85 Pass *llvm::createPostOrderFunctionAttrsPass() { return new PostOrderFunctionAttrs(); } 86 87 namespace { 88 /// The three kinds of memory access relevant to 'readonly' and 89 /// 'readnone' attributes. 90 enum MemoryAccessKind { 91 MAK_ReadNone = 0, 92 MAK_ReadOnly = 1, 93 MAK_MayWrite = 2 94 }; 95 } 96 97 static MemoryAccessKind checkFunctionMemoryAccess(Function &F, AAResults &AAR, 98 const SCCNodeSet &SCCNodes) { 99 FunctionModRefBehavior MRB = AAR.getModRefBehavior(&F); 100 if (MRB == FMRB_DoesNotAccessMemory) 101 // Already perfect! 102 return MAK_ReadNone; 103 104 // Definitions with weak linkage may be overridden at linktime with 105 // something that writes memory, so treat them like declarations. 106 if (F.isDeclaration() || F.mayBeOverridden()) { 107 if (AliasAnalysis::onlyReadsMemory(MRB)) 108 return MAK_ReadOnly; 109 110 // Conservatively assume it writes to memory. 111 return MAK_MayWrite; 112 } 113 114 // Scan the function body for instructions that may read or write memory. 115 bool ReadsMemory = false; 116 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) { 117 Instruction *I = &*II; 118 119 // Some instructions can be ignored even if they read or write memory. 120 // Detect these now, skipping to the next instruction if one is found. 121 CallSite CS(cast<Value>(I)); 122 if (CS) { 123 // Ignore calls to functions in the same SCC, as long as the call sites 124 // don't have operand bundles. Calls with operand bundles are allowed to 125 // have memory effects not described by the memory effects of the call 126 // target. 127 if (!CS.hasOperandBundles() && CS.getCalledFunction() && 128 SCCNodes.count(CS.getCalledFunction())) 129 continue; 130 FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS); 131 132 // If the call doesn't access memory, we're done. 133 if (!(MRB & MRI_ModRef)) 134 continue; 135 136 if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) { 137 // The call could access any memory. If that includes writes, give up. 138 if (MRB & MRI_Mod) 139 return MAK_MayWrite; 140 // If it reads, note it. 141 if (MRB & MRI_Ref) 142 ReadsMemory = true; 143 continue; 144 } 145 146 // Check whether all pointer arguments point to local memory, and 147 // ignore calls that only access local memory. 148 for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 149 CI != CE; ++CI) { 150 Value *Arg = *CI; 151 if (!Arg->getType()->isPtrOrPtrVectorTy()) 152 continue; 153 154 AAMDNodes AAInfo; 155 I->getAAMetadata(AAInfo); 156 MemoryLocation Loc(Arg, MemoryLocation::UnknownSize, AAInfo); 157 158 // Skip accesses to local or constant memory as they don't impact the 159 // externally visible mod/ref behavior. 160 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 161 continue; 162 163 if (MRB & MRI_Mod) 164 // Writes non-local memory. Give up. 165 return MAK_MayWrite; 166 if (MRB & MRI_Ref) 167 // Ok, it reads non-local memory. 168 ReadsMemory = true; 169 } 170 continue; 171 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 172 // Ignore non-volatile loads from local memory. (Atomic is okay here.) 173 if (!LI->isVolatile()) { 174 MemoryLocation Loc = MemoryLocation::get(LI); 175 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 176 continue; 177 } 178 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 179 // Ignore non-volatile stores to local memory. (Atomic is okay here.) 180 if (!SI->isVolatile()) { 181 MemoryLocation Loc = MemoryLocation::get(SI); 182 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 183 continue; 184 } 185 } else if (VAArgInst *VI = dyn_cast<VAArgInst>(I)) { 186 // Ignore vaargs on local memory. 187 MemoryLocation Loc = MemoryLocation::get(VI); 188 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 189 continue; 190 } 191 192 // Any remaining instructions need to be taken seriously! Check if they 193 // read or write memory. 194 if (I->mayWriteToMemory()) 195 // Writes memory. Just give up. 196 return MAK_MayWrite; 197 198 // If this instruction may read memory, remember that. 199 ReadsMemory |= I->mayReadFromMemory(); 200 } 201 202 return ReadsMemory ? MAK_ReadOnly : MAK_ReadNone; 203 } 204 205 /// Deduce readonly/readnone attributes for the SCC. 206 template <typename AARGetterT> 207 static bool addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT AARGetter) { 208 // Check if any of the functions in the SCC read or write memory. If they 209 // write memory then they can't be marked readnone or readonly. 210 bool ReadsMemory = false; 211 for (Function *F : SCCNodes) { 212 // Call the callable parameter to look up AA results for this function. 213 AAResults &AAR = AARGetter(*F); 214 215 switch (checkFunctionMemoryAccess(*F, AAR, SCCNodes)) { 216 case MAK_MayWrite: 217 return false; 218 case MAK_ReadOnly: 219 ReadsMemory = true; 220 break; 221 case MAK_ReadNone: 222 // Nothing to do! 223 break; 224 } 225 } 226 227 // Success! Functions in this SCC do not access memory, or only read memory. 228 // Give them the appropriate attribute. 229 bool MadeChange = false; 230 for (Function *F : SCCNodes) { 231 if (F->doesNotAccessMemory()) 232 // Already perfect! 233 continue; 234 235 if (F->onlyReadsMemory() && ReadsMemory) 236 // No change. 237 continue; 238 239 MadeChange = true; 240 241 // Clear out any existing attributes. 242 AttrBuilder B; 243 B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); 244 F->removeAttributes( 245 AttributeSet::FunctionIndex, 246 AttributeSet::get(F->getContext(), AttributeSet::FunctionIndex, B)); 247 248 // Add in the new attribute. 249 F->addAttribute(AttributeSet::FunctionIndex, 250 ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone); 251 252 if (ReadsMemory) 253 ++NumReadOnly; 254 else 255 ++NumReadNone; 256 } 257 258 return MadeChange; 259 } 260 261 namespace { 262 /// For a given pointer Argument, this retains a list of Arguments of functions 263 /// in the same SCC that the pointer data flows into. We use this to build an 264 /// SCC of the arguments. 265 struct ArgumentGraphNode { 266 Argument *Definition; 267 SmallVector<ArgumentGraphNode *, 4> Uses; 268 }; 269 270 class ArgumentGraph { 271 // We store pointers to ArgumentGraphNode objects, so it's important that 272 // that they not move around upon insert. 273 typedef std::map<Argument *, ArgumentGraphNode> ArgumentMapTy; 274 275 ArgumentMapTy ArgumentMap; 276 277 // There is no root node for the argument graph, in fact: 278 // void f(int *x, int *y) { if (...) f(x, y); } 279 // is an example where the graph is disconnected. The SCCIterator requires a 280 // single entry point, so we maintain a fake ("synthetic") root node that 281 // uses every node. Because the graph is directed and nothing points into 282 // the root, it will not participate in any SCCs (except for its own). 283 ArgumentGraphNode SyntheticRoot; 284 285 public: 286 ArgumentGraph() { SyntheticRoot.Definition = nullptr; } 287 288 typedef SmallVectorImpl<ArgumentGraphNode *>::iterator iterator; 289 290 iterator begin() { return SyntheticRoot.Uses.begin(); } 291 iterator end() { return SyntheticRoot.Uses.end(); } 292 ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; } 293 294 ArgumentGraphNode *operator[](Argument *A) { 295 ArgumentGraphNode &Node = ArgumentMap[A]; 296 Node.Definition = A; 297 SyntheticRoot.Uses.push_back(&Node); 298 return &Node; 299 } 300 }; 301 302 /// This tracker checks whether callees are in the SCC, and if so it does not 303 /// consider that a capture, instead adding it to the "Uses" list and 304 /// continuing with the analysis. 305 struct ArgumentUsesTracker : public CaptureTracker { 306 ArgumentUsesTracker(const SCCNodeSet &SCCNodes) 307 : Captured(false), SCCNodes(SCCNodes) {} 308 309 void tooManyUses() override { Captured = true; } 310 311 bool captured(const Use *U) override { 312 CallSite CS(U->getUser()); 313 if (!CS.getInstruction()) { 314 Captured = true; 315 return true; 316 } 317 318 Function *F = CS.getCalledFunction(); 319 if (!F || F->isDeclaration() || F->mayBeOverridden() || 320 !SCCNodes.count(F)) { 321 Captured = true; 322 return true; 323 } 324 325 // Note: the callee and the two successor blocks *follow* the argument 326 // operands. This means there is no need to adjust UseIndex to account for 327 // these. 328 329 unsigned UseIndex = 330 std::distance(const_cast<const Use *>(CS.arg_begin()), U); 331 332 assert(UseIndex < CS.data_operands_size() && 333 "Indirect function calls should have been filtered above!"); 334 335 if (UseIndex >= CS.getNumArgOperands()) { 336 // Data operand, but not a argument operand -- must be a bundle operand 337 assert(CS.hasOperandBundles() && "Must be!"); 338 339 // CaptureTracking told us that we're being captured by an operand bundle 340 // use. In this case it does not matter if the callee is within our SCC 341 // or not -- we've been captured in some unknown way, and we have to be 342 // conservative. 343 Captured = true; 344 return true; 345 } 346 347 if (UseIndex >= F->arg_size()) { 348 assert(F->isVarArg() && "More params than args in non-varargs call"); 349 Captured = true; 350 return true; 351 } 352 353 Uses.push_back(&*std::next(F->arg_begin(), UseIndex)); 354 return false; 355 } 356 357 bool Captured; // True only if certainly captured (used outside our SCC). 358 SmallVector<Argument *, 4> Uses; // Uses within our SCC. 359 360 const SCCNodeSet &SCCNodes; 361 }; 362 } 363 364 namespace llvm { 365 template <> struct GraphTraits<ArgumentGraphNode *> { 366 typedef ArgumentGraphNode NodeType; 367 typedef SmallVectorImpl<ArgumentGraphNode *>::iterator ChildIteratorType; 368 369 static inline NodeType *getEntryNode(NodeType *A) { return A; } 370 static inline ChildIteratorType child_begin(NodeType *N) { 371 return N->Uses.begin(); 372 } 373 static inline ChildIteratorType child_end(NodeType *N) { 374 return N->Uses.end(); 375 } 376 }; 377 template <> 378 struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> { 379 static NodeType *getEntryNode(ArgumentGraph *AG) { 380 return AG->getEntryNode(); 381 } 382 static ChildIteratorType nodes_begin(ArgumentGraph *AG) { 383 return AG->begin(); 384 } 385 static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); } 386 }; 387 } 388 389 /// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone. 390 static Attribute::AttrKind 391 determinePointerReadAttrs(Argument *A, 392 const SmallPtrSet<Argument *, 8> &SCCNodes) { 393 394 SmallVector<Use *, 32> Worklist; 395 SmallSet<Use *, 32> Visited; 396 397 // inalloca arguments are always clobbered by the call. 398 if (A->hasInAllocaAttr()) 399 return Attribute::None; 400 401 bool IsRead = false; 402 // We don't need to track IsWritten. If A is written to, return immediately. 403 404 for (Use &U : A->uses()) { 405 Visited.insert(&U); 406 Worklist.push_back(&U); 407 } 408 409 while (!Worklist.empty()) { 410 Use *U = Worklist.pop_back_val(); 411 Instruction *I = cast<Instruction>(U->getUser()); 412 413 switch (I->getOpcode()) { 414 case Instruction::BitCast: 415 case Instruction::GetElementPtr: 416 case Instruction::PHI: 417 case Instruction::Select: 418 case Instruction::AddrSpaceCast: 419 // The original value is not read/written via this if the new value isn't. 420 for (Use &UU : I->uses()) 421 if (Visited.insert(&UU).second) 422 Worklist.push_back(&UU); 423 break; 424 425 case Instruction::Call: 426 case Instruction::Invoke: { 427 bool Captures = true; 428 429 if (I->getType()->isVoidTy()) 430 Captures = false; 431 432 auto AddUsersToWorklistIfCapturing = [&] { 433 if (Captures) 434 for (Use &UU : I->uses()) 435 if (Visited.insert(&UU).second) 436 Worklist.push_back(&UU); 437 }; 438 439 CallSite CS(I); 440 if (CS.doesNotAccessMemory()) { 441 AddUsersToWorklistIfCapturing(); 442 continue; 443 } 444 445 Function *F = CS.getCalledFunction(); 446 if (!F) { 447 if (CS.onlyReadsMemory()) { 448 IsRead = true; 449 AddUsersToWorklistIfCapturing(); 450 continue; 451 } 452 return Attribute::None; 453 } 454 455 // Note: the callee and the two successor blocks *follow* the argument 456 // operands. This means there is no need to adjust UseIndex to account 457 // for these. 458 459 unsigned UseIndex = std::distance(CS.arg_begin(), U); 460 461 // U cannot be the callee operand use: since we're exploring the 462 // transitive uses of an Argument, having such a use be a callee would 463 // imply the CallSite is an indirect call or invoke; and we'd take the 464 // early exit above. 465 assert(UseIndex < CS.data_operands_size() && 466 "Data operand use expected!"); 467 468 bool IsOperandBundleUse = UseIndex >= CS.getNumArgOperands(); 469 470 if (UseIndex >= F->arg_size() && !IsOperandBundleUse) { 471 assert(F->isVarArg() && "More params than args in non-varargs call"); 472 return Attribute::None; 473 } 474 475 Captures &= !CS.doesNotCapture(UseIndex); 476 477 // Since the optimizer (by design) cannot see the data flow corresponding 478 // to a operand bundle use, these cannot participate in the optimistic SCC 479 // analysis. Instead, we model the operand bundle uses as arguments in 480 // call to a function external to the SCC. 481 if (!SCCNodes.count(&*std::next(F->arg_begin(), UseIndex)) || 482 IsOperandBundleUse) { 483 484 // The accessors used on CallSite here do the right thing for calls and 485 // invokes with operand bundles. 486 487 if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(UseIndex)) 488 return Attribute::None; 489 if (!CS.doesNotAccessMemory(UseIndex)) 490 IsRead = true; 491 } 492 493 AddUsersToWorklistIfCapturing(); 494 break; 495 } 496 497 case Instruction::Load: 498 IsRead = true; 499 break; 500 501 case Instruction::ICmp: 502 case Instruction::Ret: 503 break; 504 505 default: 506 return Attribute::None; 507 } 508 } 509 510 return IsRead ? Attribute::ReadOnly : Attribute::ReadNone; 511 } 512 513 /// Deduce nocapture attributes for the SCC. 514 static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { 515 bool Changed = false; 516 517 ArgumentGraph AG; 518 519 AttrBuilder B; 520 B.addAttribute(Attribute::NoCapture); 521 522 // Check each function in turn, determining which pointer arguments are not 523 // captured. 524 for (Function *F : SCCNodes) { 525 // Definitions with weak linkage may be overridden at linktime with 526 // something that captures pointers, so treat them like declarations. 527 if (F->isDeclaration() || F->mayBeOverridden()) 528 continue; 529 530 // Functions that are readonly (or readnone) and nounwind and don't return 531 // a value can't capture arguments. Don't analyze them. 532 if (F->onlyReadsMemory() && F->doesNotThrow() && 533 F->getReturnType()->isVoidTy()) { 534 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; 535 ++A) { 536 if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) { 537 A->addAttr(AttributeSet::get(F->getContext(), A->getArgNo() + 1, B)); 538 ++NumNoCapture; 539 Changed = true; 540 } 541 } 542 continue; 543 } 544 545 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; 546 ++A) { 547 if (!A->getType()->isPointerTy()) 548 continue; 549 bool HasNonLocalUses = false; 550 if (!A->hasNoCaptureAttr()) { 551 ArgumentUsesTracker Tracker(SCCNodes); 552 PointerMayBeCaptured(&*A, &Tracker); 553 if (!Tracker.Captured) { 554 if (Tracker.Uses.empty()) { 555 // If it's trivially not captured, mark it nocapture now. 556 A->addAttr( 557 AttributeSet::get(F->getContext(), A->getArgNo() + 1, B)); 558 ++NumNoCapture; 559 Changed = true; 560 } else { 561 // If it's not trivially captured and not trivially not captured, 562 // then it must be calling into another function in our SCC. Save 563 // its particulars for Argument-SCC analysis later. 564 ArgumentGraphNode *Node = AG[&*A]; 565 for (SmallVectorImpl<Argument *>::iterator 566 UI = Tracker.Uses.begin(), 567 UE = Tracker.Uses.end(); 568 UI != UE; ++UI) { 569 Node->Uses.push_back(AG[*UI]); 570 if (*UI != A) 571 HasNonLocalUses = true; 572 } 573 } 574 } 575 // Otherwise, it's captured. Don't bother doing SCC analysis on it. 576 } 577 if (!HasNonLocalUses && !A->onlyReadsMemory()) { 578 // Can we determine that it's readonly/readnone without doing an SCC? 579 // Note that we don't allow any calls at all here, or else our result 580 // will be dependent on the iteration order through the functions in the 581 // SCC. 582 SmallPtrSet<Argument *, 8> Self; 583 Self.insert(&*A); 584 Attribute::AttrKind R = determinePointerReadAttrs(&*A, Self); 585 if (R != Attribute::None) { 586 AttrBuilder B; 587 B.addAttribute(R); 588 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 589 Changed = true; 590 R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; 591 } 592 } 593 } 594 } 595 596 // The graph we've collected is partial because we stopped scanning for 597 // argument uses once we solved the argument trivially. These partial nodes 598 // show up as ArgumentGraphNode objects with an empty Uses list, and for 599 // these nodes the final decision about whether they capture has already been 600 // made. If the definition doesn't have a 'nocapture' attribute by now, it 601 // captures. 602 603 for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) { 604 const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I; 605 if (ArgumentSCC.size() == 1) { 606 if (!ArgumentSCC[0]->Definition) 607 continue; // synthetic root node 608 609 // eg. "void f(int* x) { if (...) f(x); }" 610 if (ArgumentSCC[0]->Uses.size() == 1 && 611 ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) { 612 Argument *A = ArgumentSCC[0]->Definition; 613 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 614 ++NumNoCapture; 615 Changed = true; 616 } 617 continue; 618 } 619 620 bool SCCCaptured = false; 621 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); 622 I != E && !SCCCaptured; ++I) { 623 ArgumentGraphNode *Node = *I; 624 if (Node->Uses.empty()) { 625 if (!Node->Definition->hasNoCaptureAttr()) 626 SCCCaptured = true; 627 } 628 } 629 if (SCCCaptured) 630 continue; 631 632 SmallPtrSet<Argument *, 8> ArgumentSCCNodes; 633 // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for 634 // quickly looking up whether a given Argument is in this ArgumentSCC. 635 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); I != E; ++I) { 636 ArgumentSCCNodes.insert((*I)->Definition); 637 } 638 639 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); 640 I != E && !SCCCaptured; ++I) { 641 ArgumentGraphNode *N = *I; 642 for (SmallVectorImpl<ArgumentGraphNode *>::iterator UI = N->Uses.begin(), 643 UE = N->Uses.end(); 644 UI != UE; ++UI) { 645 Argument *A = (*UI)->Definition; 646 if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A)) 647 continue; 648 SCCCaptured = true; 649 break; 650 } 651 } 652 if (SCCCaptured) 653 continue; 654 655 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 656 Argument *A = ArgumentSCC[i]->Definition; 657 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 658 ++NumNoCapture; 659 Changed = true; 660 } 661 662 // We also want to compute readonly/readnone. With a small number of false 663 // negatives, we can assume that any pointer which is captured isn't going 664 // to be provably readonly or readnone, since by definition we can't 665 // analyze all uses of a captured pointer. 666 // 667 // The false negatives happen when the pointer is captured by a function 668 // that promises readonly/readnone behaviour on the pointer, then the 669 // pointer's lifetime ends before anything that writes to arbitrary memory. 670 // Also, a readonly/readnone pointer may be returned, but returning a 671 // pointer is capturing it. 672 673 Attribute::AttrKind ReadAttr = Attribute::ReadNone; 674 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 675 Argument *A = ArgumentSCC[i]->Definition; 676 Attribute::AttrKind K = determinePointerReadAttrs(A, ArgumentSCCNodes); 677 if (K == Attribute::ReadNone) 678 continue; 679 if (K == Attribute::ReadOnly) { 680 ReadAttr = Attribute::ReadOnly; 681 continue; 682 } 683 ReadAttr = K; 684 break; 685 } 686 687 if (ReadAttr != Attribute::None) { 688 AttrBuilder B, R; 689 B.addAttribute(ReadAttr); 690 R.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); 691 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 692 Argument *A = ArgumentSCC[i]->Definition; 693 // Clear out existing readonly/readnone attributes 694 A->removeAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, R)); 695 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 696 ReadAttr == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; 697 Changed = true; 698 } 699 } 700 } 701 702 return Changed; 703 } 704 705 /// Tests whether a function is "malloc-like". 706 /// 707 /// A function is "malloc-like" if it returns either null or a pointer that 708 /// doesn't alias any other pointer visible to the caller. 709 static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) { 710 SmallSetVector<Value *, 8> FlowsToReturn; 711 for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) 712 if (ReturnInst *Ret = dyn_cast<ReturnInst>(I->getTerminator())) 713 FlowsToReturn.insert(Ret->getReturnValue()); 714 715 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 716 Value *RetVal = FlowsToReturn[i]; 717 718 if (Constant *C = dyn_cast<Constant>(RetVal)) { 719 if (!C->isNullValue() && !isa<UndefValue>(C)) 720 return false; 721 722 continue; 723 } 724 725 if (isa<Argument>(RetVal)) 726 return false; 727 728 if (Instruction *RVI = dyn_cast<Instruction>(RetVal)) 729 switch (RVI->getOpcode()) { 730 // Extend the analysis by looking upwards. 731 case Instruction::BitCast: 732 case Instruction::GetElementPtr: 733 case Instruction::AddrSpaceCast: 734 FlowsToReturn.insert(RVI->getOperand(0)); 735 continue; 736 case Instruction::Select: { 737 SelectInst *SI = cast<SelectInst>(RVI); 738 FlowsToReturn.insert(SI->getTrueValue()); 739 FlowsToReturn.insert(SI->getFalseValue()); 740 continue; 741 } 742 case Instruction::PHI: { 743 PHINode *PN = cast<PHINode>(RVI); 744 for (Value *IncValue : PN->incoming_values()) 745 FlowsToReturn.insert(IncValue); 746 continue; 747 } 748 749 // Check whether the pointer came from an allocation. 750 case Instruction::Alloca: 751 break; 752 case Instruction::Call: 753 case Instruction::Invoke: { 754 CallSite CS(RVI); 755 if (CS.paramHasAttr(0, Attribute::NoAlias)) 756 break; 757 if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction())) 758 break; 759 } // fall-through 760 default: 761 return false; // Did not come from an allocation. 762 } 763 764 if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false)) 765 return false; 766 } 767 768 return true; 769 } 770 771 /// Deduce noalias attributes for the SCC. 772 static bool addNoAliasAttrs(const SCCNodeSet &SCCNodes) { 773 // Check each function in turn, determining which functions return noalias 774 // pointers. 775 for (Function *F : SCCNodes) { 776 // Already noalias. 777 if (F->doesNotAlias(0)) 778 continue; 779 780 // Definitions with weak linkage may be overridden at linktime, so 781 // treat them like declarations. 782 if (F->isDeclaration() || F->mayBeOverridden()) 783 return false; 784 785 // We annotate noalias return values, which are only applicable to 786 // pointer types. 787 if (!F->getReturnType()->isPointerTy()) 788 continue; 789 790 if (!isFunctionMallocLike(F, SCCNodes)) 791 return false; 792 } 793 794 bool MadeChange = false; 795 for (Function *F : SCCNodes) { 796 if (F->doesNotAlias(0) || !F->getReturnType()->isPointerTy()) 797 continue; 798 799 F->setDoesNotAlias(0); 800 ++NumNoAlias; 801 MadeChange = true; 802 } 803 804 return MadeChange; 805 } 806 807 /// Tests whether this function is known to not return null. 808 /// 809 /// Requires that the function returns a pointer. 810 /// 811 /// Returns true if it believes the function will not return a null, and sets 812 /// \p Speculative based on whether the returned conclusion is a speculative 813 /// conclusion due to SCC calls. 814 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes, 815 const TargetLibraryInfo &TLI, bool &Speculative) { 816 assert(F->getReturnType()->isPointerTy() && 817 "nonnull only meaningful on pointer types"); 818 Speculative = false; 819 820 SmallSetVector<Value *, 8> FlowsToReturn; 821 for (BasicBlock &BB : *F) 822 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) 823 FlowsToReturn.insert(Ret->getReturnValue()); 824 825 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 826 Value *RetVal = FlowsToReturn[i]; 827 828 // If this value is locally known to be non-null, we're good 829 if (isKnownNonNull(RetVal, &TLI)) 830 continue; 831 832 // Otherwise, we need to look upwards since we can't make any local 833 // conclusions. 834 Instruction *RVI = dyn_cast<Instruction>(RetVal); 835 if (!RVI) 836 return false; 837 switch (RVI->getOpcode()) { 838 // Extend the analysis by looking upwards. 839 case Instruction::BitCast: 840 case Instruction::GetElementPtr: 841 case Instruction::AddrSpaceCast: 842 FlowsToReturn.insert(RVI->getOperand(0)); 843 continue; 844 case Instruction::Select: { 845 SelectInst *SI = cast<SelectInst>(RVI); 846 FlowsToReturn.insert(SI->getTrueValue()); 847 FlowsToReturn.insert(SI->getFalseValue()); 848 continue; 849 } 850 case Instruction::PHI: { 851 PHINode *PN = cast<PHINode>(RVI); 852 for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 853 FlowsToReturn.insert(PN->getIncomingValue(i)); 854 continue; 855 } 856 case Instruction::Call: 857 case Instruction::Invoke: { 858 CallSite CS(RVI); 859 Function *Callee = CS.getCalledFunction(); 860 // A call to a node within the SCC is assumed to return null until 861 // proven otherwise 862 if (Callee && SCCNodes.count(Callee)) { 863 Speculative = true; 864 continue; 865 } 866 return false; 867 } 868 default: 869 return false; // Unknown source, may be null 870 }; 871 llvm_unreachable("should have either continued or returned"); 872 } 873 874 return true; 875 } 876 877 /// Deduce nonnull attributes for the SCC. 878 static bool addNonNullAttrs(const SCCNodeSet &SCCNodes, 879 const TargetLibraryInfo &TLI) { 880 // Speculative that all functions in the SCC return only nonnull 881 // pointers. We may refute this as we analyze functions. 882 bool SCCReturnsNonNull = true; 883 884 bool MadeChange = false; 885 886 // Check each function in turn, determining which functions return nonnull 887 // pointers. 888 for (Function *F : SCCNodes) { 889 // Already nonnull. 890 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 891 Attribute::NonNull)) 892 continue; 893 894 // Definitions with weak linkage may be overridden at linktime, so 895 // treat them like declarations. 896 if (F->isDeclaration() || F->mayBeOverridden()) 897 return false; 898 899 // We annotate nonnull return values, which are only applicable to 900 // pointer types. 901 if (!F->getReturnType()->isPointerTy()) 902 continue; 903 904 bool Speculative = false; 905 if (isReturnNonNull(F, SCCNodes, TLI, Speculative)) { 906 if (!Speculative) { 907 // Mark the function eagerly since we may discover a function 908 // which prevents us from speculating about the entire SCC 909 DEBUG(dbgs() << "Eagerly marking " << F->getName() << " as nonnull\n"); 910 F->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull); 911 ++NumNonNullReturn; 912 MadeChange = true; 913 } 914 continue; 915 } 916 // At least one function returns something which could be null, can't 917 // speculate any more. 918 SCCReturnsNonNull = false; 919 } 920 921 if (SCCReturnsNonNull) { 922 for (Function *F : SCCNodes) { 923 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 924 Attribute::NonNull) || 925 !F->getReturnType()->isPointerTy()) 926 continue; 927 928 DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n"); 929 F->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull); 930 ++NumNonNullReturn; 931 MadeChange = true; 932 } 933 } 934 935 return MadeChange; 936 } 937 938 /// Removes convergent attributes where we can prove that none of the SCC's 939 /// callees are themselves convergent. Returns true if successful at removing 940 /// the attribute. 941 static bool removeConvergentAttrs(const SCCNodeSet &SCCNodes) { 942 // Determines whether a function can be made non-convergent, ignoring all 943 // other functions in SCC. (A function can *actually* be made non-convergent 944 // only if all functions in its SCC can be made convergent.) 945 auto CanRemoveConvergent = [&](Function *F) { 946 if (!F->isConvergent()) 947 return true; 948 949 // Can't remove convergent from declarations. 950 if (F->isDeclaration()) 951 return false; 952 953 for (Instruction &I : instructions(*F)) 954 if (auto CS = CallSite(&I)) { 955 // Can't remove convergent if any of F's callees -- ignoring functions 956 // in the SCC itself -- are convergent. This needs to consider both 957 // function calls and intrinsic calls. We also assume indirect calls 958 // might call a convergent function. 959 // FIXME: We should revisit this when we put convergent onto calls 960 // instead of functions so that indirect calls which should be 961 // convergent are required to be marked as such. 962 Function *Callee = CS.getCalledFunction(); 963 if (!Callee || (SCCNodes.count(Callee) == 0 && Callee->isConvergent())) 964 return false; 965 } 966 967 return true; 968 }; 969 970 // We can remove the convergent attr from functions in the SCC if they all 971 // can be made non-convergent (because they call only non-convergent 972 // functions, other than each other). 973 if (!llvm::all_of(SCCNodes, CanRemoveConvergent)) 974 return false; 975 976 // If we got here, all of the SCC's callees are non-convergent. Therefore all 977 // of the SCC's functions can be marked as non-convergent. 978 for (Function *F : SCCNodes) { 979 if (F->isConvergent()) 980 DEBUG(dbgs() << "Removing convergent attr from " << F->getName() << "\n"); 981 F->setNotConvergent(); 982 } 983 return true; 984 } 985 986 static bool setDoesNotRecurse(Function &F) { 987 if (F.doesNotRecurse()) 988 return false; 989 F.setDoesNotRecurse(); 990 ++NumNoRecurse; 991 return true; 992 } 993 994 static bool addNoRecurseAttrs(const CallGraphSCC &SCC) { 995 // Try and identify functions that do not recurse. 996 997 // If the SCC contains multiple nodes we know for sure there is recursion. 998 if (!SCC.isSingular()) 999 return false; 1000 1001 const CallGraphNode *CGN = *SCC.begin(); 1002 Function *F = CGN->getFunction(); 1003 if (!F || F->isDeclaration() || F->doesNotRecurse()) 1004 return false; 1005 1006 // If all of the calls in F are identifiable and are to norecurse functions, F 1007 // is norecurse. This check also detects self-recursion as F is not currently 1008 // marked norecurse, so any called from F to F will not be marked norecurse. 1009 if (std::all_of(CGN->begin(), CGN->end(), 1010 [](const CallGraphNode::CallRecord &CR) { 1011 Function *F = CR.second->getFunction(); 1012 return F && F->doesNotRecurse(); 1013 })) 1014 // Function calls a potentially recursive function. 1015 return setDoesNotRecurse(*F); 1016 1017 // Nothing else we can deduce usefully during the postorder traversal. 1018 return false; 1019 } 1020 1021 bool PostOrderFunctionAttrs::runOnSCC(CallGraphSCC &SCC) { 1022 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1023 bool Changed = false; 1024 1025 // We compute dedicated AA results for each function in the SCC as needed. We 1026 // use a lambda referencing external objects so that they live long enough to 1027 // be queried, but we re-use them each time. 1028 Optional<BasicAAResult> BAR; 1029 Optional<AAResults> AAR; 1030 auto AARGetter = [&](Function &F) -> AAResults & { 1031 BAR.emplace(createLegacyPMBasicAAResult(*this, F)); 1032 AAR.emplace(createLegacyPMAAResults(*this, F, *BAR)); 1033 return *AAR; 1034 }; 1035 1036 // Fill SCCNodes with the elements of the SCC. Used for quickly looking up 1037 // whether a given CallGraphNode is in this SCC. Also track whether there are 1038 // any external or opt-none nodes that will prevent us from optimizing any 1039 // part of the SCC. 1040 SCCNodeSet SCCNodes; 1041 bool ExternalNode = false; 1042 for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { 1043 Function *F = (*I)->getFunction(); 1044 if (!F || F->hasFnAttribute(Attribute::OptimizeNone)) { 1045 // External node or function we're trying not to optimize - we both avoid 1046 // transform them and avoid leveraging information they provide. 1047 ExternalNode = true; 1048 continue; 1049 } 1050 1051 SCCNodes.insert(F); 1052 } 1053 1054 Changed |= addReadAttrs(SCCNodes, AARGetter); 1055 Changed |= addArgumentAttrs(SCCNodes); 1056 1057 // If we have no external nodes participating in the SCC, we can deduce some 1058 // more precise attributes as well. 1059 if (!ExternalNode) { 1060 Changed |= addNoAliasAttrs(SCCNodes); 1061 Changed |= addNonNullAttrs(SCCNodes, *TLI); 1062 Changed |= removeConvergentAttrs(SCCNodes); 1063 } 1064 1065 Changed |= addNoRecurseAttrs(SCC); 1066 return Changed; 1067 } 1068 1069 namespace { 1070 /// A pass to do RPO deduction and propagation of function attributes. 1071 /// 1072 /// This pass provides a general RPO or "top down" propagation of 1073 /// function attributes. For a few (rare) cases, we can deduce significantly 1074 /// more about function attributes by working in RPO, so this pass 1075 /// provides the compliment to the post-order pass above where the majority of 1076 /// deduction is performed. 1077 // FIXME: Currently there is no RPO CGSCC pass structure to slide into and so 1078 // this is a boring module pass, but eventually it should be an RPO CGSCC pass 1079 // when such infrastructure is available. 1080 struct ReversePostOrderFunctionAttrs : public ModulePass { 1081 static char ID; // Pass identification, replacement for typeid 1082 ReversePostOrderFunctionAttrs() : ModulePass(ID) { 1083 initializeReversePostOrderFunctionAttrsPass(*PassRegistry::getPassRegistry()); 1084 } 1085 1086 bool runOnModule(Module &M) override; 1087 1088 void getAnalysisUsage(AnalysisUsage &AU) const override { 1089 AU.setPreservesCFG(); 1090 AU.addRequired<CallGraphWrapperPass>(); 1091 } 1092 }; 1093 } 1094 1095 char ReversePostOrderFunctionAttrs::ID = 0; 1096 INITIALIZE_PASS_BEGIN(ReversePostOrderFunctionAttrs, "rpo-functionattrs", 1097 "Deduce function attributes in RPO", false, false) 1098 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1099 INITIALIZE_PASS_END(ReversePostOrderFunctionAttrs, "rpo-functionattrs", 1100 "Deduce function attributes in RPO", false, false) 1101 1102 Pass *llvm::createReversePostOrderFunctionAttrsPass() { 1103 return new ReversePostOrderFunctionAttrs(); 1104 } 1105 1106 static bool addNoRecurseAttrsTopDown(Function &F) { 1107 // We check the preconditions for the function prior to calling this to avoid 1108 // the cost of building up a reversible post-order list. We assert them here 1109 // to make sure none of the invariants this relies on were violated. 1110 assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!"); 1111 assert(!F.doesNotRecurse() && 1112 "This function has already been deduced as norecurs!"); 1113 assert(F.hasInternalLinkage() && 1114 "Can only do top-down deduction for internal linkage functions!"); 1115 1116 // If F is internal and all of its uses are calls from a non-recursive 1117 // functions, then none of its calls could in fact recurse without going 1118 // through a function marked norecurse, and so we can mark this function too 1119 // as norecurse. Note that the uses must actually be calls -- otherwise 1120 // a pointer to this function could be returned from a norecurse function but 1121 // this function could be recursively (indirectly) called. Note that this 1122 // also detects if F is directly recursive as F is not yet marked as 1123 // a norecurse function. 1124 for (auto *U : F.users()) { 1125 auto *I = dyn_cast<Instruction>(U); 1126 if (!I) 1127 return false; 1128 CallSite CS(I); 1129 if (!CS || !CS.getParent()->getParent()->doesNotRecurse()) 1130 return false; 1131 } 1132 return setDoesNotRecurse(F); 1133 } 1134 1135 bool ReversePostOrderFunctionAttrs::runOnModule(Module &M) { 1136 // We only have a post-order SCC traversal (because SCCs are inherently 1137 // discovered in post-order), so we accumulate them in a vector and then walk 1138 // it in reverse. This is simpler than using the RPO iterator infrastructure 1139 // because we need to combine SCC detection and the PO walk of the call 1140 // graph. We can also cheat egregiously because we're primarily interested in 1141 // synthesizing norecurse and so we can only save the singular SCCs as SCCs 1142 // with multiple functions in them will clearly be recursive. 1143 auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 1144 SmallVector<Function *, 16> Worklist; 1145 for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) { 1146 if (I->size() != 1) 1147 continue; 1148 1149 Function *F = I->front()->getFunction(); 1150 if (F && !F->isDeclaration() && !F->doesNotRecurse() && 1151 F->hasInternalLinkage()) 1152 Worklist.push_back(F); 1153 } 1154 1155 bool Changed = false; 1156 for (auto *F : reverse(Worklist)) 1157 Changed |= addNoRecurseAttrsTopDown(*F); 1158 1159 return Changed; 1160 } 1161