1 //===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// This file implements interprocedural passes which walk the 12 /// call-graph deducing and/or propagating function attributes. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/IPO.h" 17 #include "llvm/ADT/SCCIterator.h" 18 #include "llvm/ADT/SetVector.h" 19 #include "llvm/ADT/SmallSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/ADT/StringSwitch.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/BasicAliasAnalysis.h" 25 #include "llvm/Analysis/CallGraph.h" 26 #include "llvm/Analysis/CallGraphSCCPass.h" 27 #include "llvm/Analysis/CaptureTracking.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/IR/GlobalVariable.h" 31 #include "llvm/IR/InstIterator.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/IR/LLVMContext.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 using namespace llvm; 38 39 #define DEBUG_TYPE "functionattrs" 40 41 STATISTIC(NumReadNone, "Number of functions marked readnone"); 42 STATISTIC(NumReadOnly, "Number of functions marked readonly"); 43 STATISTIC(NumNoCapture, "Number of arguments marked nocapture"); 44 STATISTIC(NumReadNoneArg, "Number of arguments marked readnone"); 45 STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly"); 46 STATISTIC(NumNoAlias, "Number of function returns marked noalias"); 47 STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull"); 48 STATISTIC(NumNoRecurse, "Number of functions marked as norecurse"); 49 50 namespace { 51 typedef SmallSetVector<Function *, 8> SCCNodeSet; 52 } 53 54 namespace { 55 struct PostOrderFunctionAttrs : public CallGraphSCCPass { 56 static char ID; // Pass identification, replacement for typeid 57 PostOrderFunctionAttrs() : CallGraphSCCPass(ID) { 58 initializePostOrderFunctionAttrsPass(*PassRegistry::getPassRegistry()); 59 } 60 61 bool runOnSCC(CallGraphSCC &SCC) override; 62 63 void getAnalysisUsage(AnalysisUsage &AU) const override { 64 AU.setPreservesCFG(); 65 AU.addRequired<AssumptionCacheTracker>(); 66 AU.addRequired<TargetLibraryInfoWrapperPass>(); 67 CallGraphSCCPass::getAnalysisUsage(AU); 68 } 69 70 private: 71 TargetLibraryInfo *TLI; 72 }; 73 } 74 75 char PostOrderFunctionAttrs::ID = 0; 76 INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrs, "functionattrs", 77 "Deduce function attributes", false, false) 78 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 79 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 80 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 81 INITIALIZE_PASS_END(PostOrderFunctionAttrs, "functionattrs", 82 "Deduce function attributes", false, false) 83 84 Pass *llvm::createPostOrderFunctionAttrsPass() { return new PostOrderFunctionAttrs(); } 85 86 namespace { 87 /// The three kinds of memory access relevant to 'readonly' and 88 /// 'readnone' attributes. 89 enum MemoryAccessKind { 90 MAK_ReadNone = 0, 91 MAK_ReadOnly = 1, 92 MAK_MayWrite = 2 93 }; 94 } 95 96 static MemoryAccessKind checkFunctionMemoryAccess(Function &F, AAResults &AAR, 97 const SCCNodeSet &SCCNodes) { 98 FunctionModRefBehavior MRB = AAR.getModRefBehavior(&F); 99 if (MRB == FMRB_DoesNotAccessMemory) 100 // Already perfect! 101 return MAK_ReadNone; 102 103 // Definitions with weak linkage may be overridden at linktime with 104 // something that writes memory, so treat them like declarations. 105 if (F.isDeclaration() || F.mayBeOverridden()) { 106 if (AliasAnalysis::onlyReadsMemory(MRB)) 107 return MAK_ReadOnly; 108 109 // Conservatively assume it writes to memory. 110 return MAK_MayWrite; 111 } 112 113 // Scan the function body for instructions that may read or write memory. 114 bool ReadsMemory = false; 115 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) { 116 Instruction *I = &*II; 117 118 // Some instructions can be ignored even if they read or write memory. 119 // Detect these now, skipping to the next instruction if one is found. 120 CallSite CS(cast<Value>(I)); 121 if (CS) { 122 // Ignore calls to functions in the same SCC. 123 if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction())) 124 continue; 125 FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS); 126 127 // If the call doesn't access memory, we're done. 128 if (!(MRB & MRI_ModRef)) 129 continue; 130 131 if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) { 132 // The call could access any memory. If that includes writes, give up. 133 if (MRB & MRI_Mod) 134 return MAK_MayWrite; 135 // If it reads, note it. 136 if (MRB & MRI_Ref) 137 ReadsMemory = true; 138 continue; 139 } 140 141 // Check whether all pointer arguments point to local memory, and 142 // ignore calls that only access local memory. 143 for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 144 CI != CE; ++CI) { 145 Value *Arg = *CI; 146 if (!Arg->getType()->isPtrOrPtrVectorTy()) 147 continue; 148 149 AAMDNodes AAInfo; 150 I->getAAMetadata(AAInfo); 151 MemoryLocation Loc(Arg, MemoryLocation::UnknownSize, AAInfo); 152 153 // Skip accesses to local or constant memory as they don't impact the 154 // externally visible mod/ref behavior. 155 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 156 continue; 157 158 if (MRB & MRI_Mod) 159 // Writes non-local memory. Give up. 160 return MAK_MayWrite; 161 if (MRB & MRI_Ref) 162 // Ok, it reads non-local memory. 163 ReadsMemory = true; 164 } 165 continue; 166 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 167 // Ignore non-volatile loads from local memory. (Atomic is okay here.) 168 if (!LI->isVolatile()) { 169 MemoryLocation Loc = MemoryLocation::get(LI); 170 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 171 continue; 172 } 173 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 174 // Ignore non-volatile stores to local memory. (Atomic is okay here.) 175 if (!SI->isVolatile()) { 176 MemoryLocation Loc = MemoryLocation::get(SI); 177 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 178 continue; 179 } 180 } else if (VAArgInst *VI = dyn_cast<VAArgInst>(I)) { 181 // Ignore vaargs on local memory. 182 MemoryLocation Loc = MemoryLocation::get(VI); 183 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 184 continue; 185 } 186 187 // Any remaining instructions need to be taken seriously! Check if they 188 // read or write memory. 189 if (I->mayWriteToMemory()) 190 // Writes memory. Just give up. 191 return MAK_MayWrite; 192 193 // If this instruction may read memory, remember that. 194 ReadsMemory |= I->mayReadFromMemory(); 195 } 196 197 return ReadsMemory ? MAK_ReadOnly : MAK_ReadNone; 198 } 199 200 /// Deduce readonly/readnone attributes for the SCC. 201 template <typename AARGetterT> 202 static bool addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT AARGetter) { 203 // Check if any of the functions in the SCC read or write memory. If they 204 // write memory then they can't be marked readnone or readonly. 205 bool ReadsMemory = false; 206 for (Function *F : SCCNodes) { 207 // Call the callable parameter to look up AA results for this function. 208 AAResults &AAR = AARGetter(*F); 209 210 switch (checkFunctionMemoryAccess(*F, AAR, SCCNodes)) { 211 case MAK_MayWrite: 212 return false; 213 case MAK_ReadOnly: 214 ReadsMemory = true; 215 break; 216 case MAK_ReadNone: 217 // Nothing to do! 218 break; 219 } 220 } 221 222 // Success! Functions in this SCC do not access memory, or only read memory. 223 // Give them the appropriate attribute. 224 bool MadeChange = false; 225 for (Function *F : SCCNodes) { 226 if (F->doesNotAccessMemory()) 227 // Already perfect! 228 continue; 229 230 if (F->onlyReadsMemory() && ReadsMemory) 231 // No change. 232 continue; 233 234 MadeChange = true; 235 236 // Clear out any existing attributes. 237 AttrBuilder B; 238 B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); 239 F->removeAttributes( 240 AttributeSet::FunctionIndex, 241 AttributeSet::get(F->getContext(), AttributeSet::FunctionIndex, B)); 242 243 // Add in the new attribute. 244 F->addAttribute(AttributeSet::FunctionIndex, 245 ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone); 246 247 if (ReadsMemory) 248 ++NumReadOnly; 249 else 250 ++NumReadNone; 251 } 252 253 return MadeChange; 254 } 255 256 namespace { 257 /// For a given pointer Argument, this retains a list of Arguments of functions 258 /// in the same SCC that the pointer data flows into. We use this to build an 259 /// SCC of the arguments. 260 struct ArgumentGraphNode { 261 Argument *Definition; 262 SmallVector<ArgumentGraphNode *, 4> Uses; 263 }; 264 265 class ArgumentGraph { 266 // We store pointers to ArgumentGraphNode objects, so it's important that 267 // that they not move around upon insert. 268 typedef std::map<Argument *, ArgumentGraphNode> ArgumentMapTy; 269 270 ArgumentMapTy ArgumentMap; 271 272 // There is no root node for the argument graph, in fact: 273 // void f(int *x, int *y) { if (...) f(x, y); } 274 // is an example where the graph is disconnected. The SCCIterator requires a 275 // single entry point, so we maintain a fake ("synthetic") root node that 276 // uses every node. Because the graph is directed and nothing points into 277 // the root, it will not participate in any SCCs (except for its own). 278 ArgumentGraphNode SyntheticRoot; 279 280 public: 281 ArgumentGraph() { SyntheticRoot.Definition = nullptr; } 282 283 typedef SmallVectorImpl<ArgumentGraphNode *>::iterator iterator; 284 285 iterator begin() { return SyntheticRoot.Uses.begin(); } 286 iterator end() { return SyntheticRoot.Uses.end(); } 287 ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; } 288 289 ArgumentGraphNode *operator[](Argument *A) { 290 ArgumentGraphNode &Node = ArgumentMap[A]; 291 Node.Definition = A; 292 SyntheticRoot.Uses.push_back(&Node); 293 return &Node; 294 } 295 }; 296 297 /// This tracker checks whether callees are in the SCC, and if so it does not 298 /// consider that a capture, instead adding it to the "Uses" list and 299 /// continuing with the analysis. 300 struct ArgumentUsesTracker : public CaptureTracker { 301 ArgumentUsesTracker(const SCCNodeSet &SCCNodes) 302 : Captured(false), SCCNodes(SCCNodes) {} 303 304 void tooManyUses() override { Captured = true; } 305 306 bool captured(const Use *U) override { 307 CallSite CS(U->getUser()); 308 if (!CS.getInstruction()) { 309 Captured = true; 310 return true; 311 } 312 313 Function *F = CS.getCalledFunction(); 314 if (!F || F->isDeclaration() || F->mayBeOverridden() || 315 !SCCNodes.count(F)) { 316 Captured = true; 317 return true; 318 } 319 320 // Note: the callee and the two successor blocks *follow* the argument 321 // operands. This means there is no need to adjust UseIndex to account for 322 // these. 323 324 unsigned UseIndex = 325 std::distance(const_cast<const Use *>(CS.arg_begin()), U); 326 327 assert(UseIndex < CS.data_operands_size() && 328 "Indirect function calls should have been filtered above!"); 329 330 if (UseIndex >= CS.getNumArgOperands()) { 331 // Data operand, but not a argument operand -- must be a bundle operand 332 assert(CS.hasOperandBundles() && "Must be!"); 333 334 // CaptureTracking told us that we're being captured by an operand bundle 335 // use. In this case it does not matter if the callee is within our SCC 336 // or not -- we've been captured in some unknown way, and we have to be 337 // conservative. 338 Captured = true; 339 return true; 340 } 341 342 if (UseIndex >= F->arg_size()) { 343 assert(F->isVarArg() && "More params than args in non-varargs call"); 344 Captured = true; 345 return true; 346 } 347 348 Uses.push_back(&*std::next(F->arg_begin(), UseIndex)); 349 return false; 350 } 351 352 bool Captured; // True only if certainly captured (used outside our SCC). 353 SmallVector<Argument *, 4> Uses; // Uses within our SCC. 354 355 const SCCNodeSet &SCCNodes; 356 }; 357 } 358 359 namespace llvm { 360 template <> struct GraphTraits<ArgumentGraphNode *> { 361 typedef ArgumentGraphNode NodeType; 362 typedef SmallVectorImpl<ArgumentGraphNode *>::iterator ChildIteratorType; 363 364 static inline NodeType *getEntryNode(NodeType *A) { return A; } 365 static inline ChildIteratorType child_begin(NodeType *N) { 366 return N->Uses.begin(); 367 } 368 static inline ChildIteratorType child_end(NodeType *N) { 369 return N->Uses.end(); 370 } 371 }; 372 template <> 373 struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> { 374 static NodeType *getEntryNode(ArgumentGraph *AG) { 375 return AG->getEntryNode(); 376 } 377 static ChildIteratorType nodes_begin(ArgumentGraph *AG) { 378 return AG->begin(); 379 } 380 static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); } 381 }; 382 } 383 384 /// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone. 385 static Attribute::AttrKind 386 determinePointerReadAttrs(Argument *A, 387 const SmallPtrSet<Argument *, 8> &SCCNodes) { 388 389 SmallVector<Use *, 32> Worklist; 390 SmallSet<Use *, 32> Visited; 391 392 // inalloca arguments are always clobbered by the call. 393 if (A->hasInAllocaAttr()) 394 return Attribute::None; 395 396 bool IsRead = false; 397 // We don't need to track IsWritten. If A is written to, return immediately. 398 399 for (Use &U : A->uses()) { 400 Visited.insert(&U); 401 Worklist.push_back(&U); 402 } 403 404 while (!Worklist.empty()) { 405 Use *U = Worklist.pop_back_val(); 406 Instruction *I = cast<Instruction>(U->getUser()); 407 408 switch (I->getOpcode()) { 409 case Instruction::BitCast: 410 case Instruction::GetElementPtr: 411 case Instruction::PHI: 412 case Instruction::Select: 413 case Instruction::AddrSpaceCast: 414 // The original value is not read/written via this if the new value isn't. 415 for (Use &UU : I->uses()) 416 if (Visited.insert(&UU).second) 417 Worklist.push_back(&UU); 418 break; 419 420 case Instruction::Call: 421 case Instruction::Invoke: { 422 bool Captures = true; 423 424 if (I->getType()->isVoidTy()) 425 Captures = false; 426 427 auto AddUsersToWorklistIfCapturing = [&] { 428 if (Captures) 429 for (Use &UU : I->uses()) 430 if (Visited.insert(&UU).second) 431 Worklist.push_back(&UU); 432 }; 433 434 CallSite CS(I); 435 if (CS.doesNotAccessMemory()) { 436 AddUsersToWorklistIfCapturing(); 437 continue; 438 } 439 440 Function *F = CS.getCalledFunction(); 441 if (!F) { 442 if (CS.onlyReadsMemory()) { 443 IsRead = true; 444 AddUsersToWorklistIfCapturing(); 445 continue; 446 } 447 return Attribute::None; 448 } 449 450 // Note: the callee and the two successor blocks *follow* the argument 451 // operands. This means there is no need to adjust UseIndex to account 452 // for these. 453 454 unsigned UseIndex = std::distance(CS.arg_begin(), U); 455 456 // U cannot be the callee operand use: since we're exploring the 457 // transitive uses of an Argument, having such a use be a callee would 458 // imply the CallSite is an indirect call or invoke; and we'd take the 459 // early exit above. 460 assert(UseIndex < CS.data_operands_size() && 461 "Data operand use expected!"); 462 463 bool IsOperandBundleUse = UseIndex >= CS.getNumArgOperands(); 464 465 if (UseIndex >= F->arg_size() && !IsOperandBundleUse) { 466 assert(F->isVarArg() && "More params than args in non-varargs call"); 467 return Attribute::None; 468 } 469 470 Captures &= !CS.doesNotCapture(UseIndex); 471 472 // Since the optimizer (by design) cannot see the data flow corresponding 473 // to a operand bundle use, these cannot participate in the optimistic SCC 474 // analysis. Instead, we model the operand bundle uses as arguments in 475 // call to a function external to the SCC. 476 if (!SCCNodes.count(&*std::next(F->arg_begin(), UseIndex)) || 477 IsOperandBundleUse) { 478 479 // The accessors used on CallSite here do the right thing for calls and 480 // invokes with operand bundles. 481 482 if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(UseIndex)) 483 return Attribute::None; 484 if (!CS.doesNotAccessMemory(UseIndex)) 485 IsRead = true; 486 } 487 488 AddUsersToWorklistIfCapturing(); 489 break; 490 } 491 492 case Instruction::Load: 493 IsRead = true; 494 break; 495 496 case Instruction::ICmp: 497 case Instruction::Ret: 498 break; 499 500 default: 501 return Attribute::None; 502 } 503 } 504 505 return IsRead ? Attribute::ReadOnly : Attribute::ReadNone; 506 } 507 508 /// Deduce nocapture attributes for the SCC. 509 static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { 510 bool Changed = false; 511 512 ArgumentGraph AG; 513 514 AttrBuilder B; 515 B.addAttribute(Attribute::NoCapture); 516 517 // Check each function in turn, determining which pointer arguments are not 518 // captured. 519 for (Function *F : SCCNodes) { 520 // Definitions with weak linkage may be overridden at linktime with 521 // something that captures pointers, so treat them like declarations. 522 if (F->isDeclaration() || F->mayBeOverridden()) 523 continue; 524 525 // Functions that are readonly (or readnone) and nounwind and don't return 526 // a value can't capture arguments. Don't analyze them. 527 if (F->onlyReadsMemory() && F->doesNotThrow() && 528 F->getReturnType()->isVoidTy()) { 529 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; 530 ++A) { 531 if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) { 532 A->addAttr(AttributeSet::get(F->getContext(), A->getArgNo() + 1, B)); 533 ++NumNoCapture; 534 Changed = true; 535 } 536 } 537 continue; 538 } 539 540 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; 541 ++A) { 542 if (!A->getType()->isPointerTy()) 543 continue; 544 bool HasNonLocalUses = false; 545 if (!A->hasNoCaptureAttr()) { 546 ArgumentUsesTracker Tracker(SCCNodes); 547 PointerMayBeCaptured(&*A, &Tracker); 548 if (!Tracker.Captured) { 549 if (Tracker.Uses.empty()) { 550 // If it's trivially not captured, mark it nocapture now. 551 A->addAttr( 552 AttributeSet::get(F->getContext(), A->getArgNo() + 1, B)); 553 ++NumNoCapture; 554 Changed = true; 555 } else { 556 // If it's not trivially captured and not trivially not captured, 557 // then it must be calling into another function in our SCC. Save 558 // its particulars for Argument-SCC analysis later. 559 ArgumentGraphNode *Node = AG[&*A]; 560 for (SmallVectorImpl<Argument *>::iterator 561 UI = Tracker.Uses.begin(), 562 UE = Tracker.Uses.end(); 563 UI != UE; ++UI) { 564 Node->Uses.push_back(AG[*UI]); 565 if (*UI != A) 566 HasNonLocalUses = true; 567 } 568 } 569 } 570 // Otherwise, it's captured. Don't bother doing SCC analysis on it. 571 } 572 if (!HasNonLocalUses && !A->onlyReadsMemory()) { 573 // Can we determine that it's readonly/readnone without doing an SCC? 574 // Note that we don't allow any calls at all here, or else our result 575 // will be dependent on the iteration order through the functions in the 576 // SCC. 577 SmallPtrSet<Argument *, 8> Self; 578 Self.insert(&*A); 579 Attribute::AttrKind R = determinePointerReadAttrs(&*A, Self); 580 if (R != Attribute::None) { 581 AttrBuilder B; 582 B.addAttribute(R); 583 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 584 Changed = true; 585 R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; 586 } 587 } 588 } 589 } 590 591 // The graph we've collected is partial because we stopped scanning for 592 // argument uses once we solved the argument trivially. These partial nodes 593 // show up as ArgumentGraphNode objects with an empty Uses list, and for 594 // these nodes the final decision about whether they capture has already been 595 // made. If the definition doesn't have a 'nocapture' attribute by now, it 596 // captures. 597 598 for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) { 599 const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I; 600 if (ArgumentSCC.size() == 1) { 601 if (!ArgumentSCC[0]->Definition) 602 continue; // synthetic root node 603 604 // eg. "void f(int* x) { if (...) f(x); }" 605 if (ArgumentSCC[0]->Uses.size() == 1 && 606 ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) { 607 Argument *A = ArgumentSCC[0]->Definition; 608 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 609 ++NumNoCapture; 610 Changed = true; 611 } 612 continue; 613 } 614 615 bool SCCCaptured = false; 616 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); 617 I != E && !SCCCaptured; ++I) { 618 ArgumentGraphNode *Node = *I; 619 if (Node->Uses.empty()) { 620 if (!Node->Definition->hasNoCaptureAttr()) 621 SCCCaptured = true; 622 } 623 } 624 if (SCCCaptured) 625 continue; 626 627 SmallPtrSet<Argument *, 8> ArgumentSCCNodes; 628 // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for 629 // quickly looking up whether a given Argument is in this ArgumentSCC. 630 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); I != E; ++I) { 631 ArgumentSCCNodes.insert((*I)->Definition); 632 } 633 634 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); 635 I != E && !SCCCaptured; ++I) { 636 ArgumentGraphNode *N = *I; 637 for (SmallVectorImpl<ArgumentGraphNode *>::iterator UI = N->Uses.begin(), 638 UE = N->Uses.end(); 639 UI != UE; ++UI) { 640 Argument *A = (*UI)->Definition; 641 if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A)) 642 continue; 643 SCCCaptured = true; 644 break; 645 } 646 } 647 if (SCCCaptured) 648 continue; 649 650 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 651 Argument *A = ArgumentSCC[i]->Definition; 652 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 653 ++NumNoCapture; 654 Changed = true; 655 } 656 657 // We also want to compute readonly/readnone. With a small number of false 658 // negatives, we can assume that any pointer which is captured isn't going 659 // to be provably readonly or readnone, since by definition we can't 660 // analyze all uses of a captured pointer. 661 // 662 // The false negatives happen when the pointer is captured by a function 663 // that promises readonly/readnone behaviour on the pointer, then the 664 // pointer's lifetime ends before anything that writes to arbitrary memory. 665 // Also, a readonly/readnone pointer may be returned, but returning a 666 // pointer is capturing it. 667 668 Attribute::AttrKind ReadAttr = Attribute::ReadNone; 669 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 670 Argument *A = ArgumentSCC[i]->Definition; 671 Attribute::AttrKind K = determinePointerReadAttrs(A, ArgumentSCCNodes); 672 if (K == Attribute::ReadNone) 673 continue; 674 if (K == Attribute::ReadOnly) { 675 ReadAttr = Attribute::ReadOnly; 676 continue; 677 } 678 ReadAttr = K; 679 break; 680 } 681 682 if (ReadAttr != Attribute::None) { 683 AttrBuilder B, R; 684 B.addAttribute(ReadAttr); 685 R.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); 686 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 687 Argument *A = ArgumentSCC[i]->Definition; 688 // Clear out existing readonly/readnone attributes 689 A->removeAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, R)); 690 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 691 ReadAttr == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; 692 Changed = true; 693 } 694 } 695 } 696 697 return Changed; 698 } 699 700 /// Tests whether a function is "malloc-like". 701 /// 702 /// A function is "malloc-like" if it returns either null or a pointer that 703 /// doesn't alias any other pointer visible to the caller. 704 static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) { 705 SmallSetVector<Value *, 8> FlowsToReturn; 706 for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) 707 if (ReturnInst *Ret = dyn_cast<ReturnInst>(I->getTerminator())) 708 FlowsToReturn.insert(Ret->getReturnValue()); 709 710 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 711 Value *RetVal = FlowsToReturn[i]; 712 713 if (Constant *C = dyn_cast<Constant>(RetVal)) { 714 if (!C->isNullValue() && !isa<UndefValue>(C)) 715 return false; 716 717 continue; 718 } 719 720 if (isa<Argument>(RetVal)) 721 return false; 722 723 if (Instruction *RVI = dyn_cast<Instruction>(RetVal)) 724 switch (RVI->getOpcode()) { 725 // Extend the analysis by looking upwards. 726 case Instruction::BitCast: 727 case Instruction::GetElementPtr: 728 case Instruction::AddrSpaceCast: 729 FlowsToReturn.insert(RVI->getOperand(0)); 730 continue; 731 case Instruction::Select: { 732 SelectInst *SI = cast<SelectInst>(RVI); 733 FlowsToReturn.insert(SI->getTrueValue()); 734 FlowsToReturn.insert(SI->getFalseValue()); 735 continue; 736 } 737 case Instruction::PHI: { 738 PHINode *PN = cast<PHINode>(RVI); 739 for (Value *IncValue : PN->incoming_values()) 740 FlowsToReturn.insert(IncValue); 741 continue; 742 } 743 744 // Check whether the pointer came from an allocation. 745 case Instruction::Alloca: 746 break; 747 case Instruction::Call: 748 case Instruction::Invoke: { 749 CallSite CS(RVI); 750 if (CS.paramHasAttr(0, Attribute::NoAlias)) 751 break; 752 if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction())) 753 break; 754 } // fall-through 755 default: 756 return false; // Did not come from an allocation. 757 } 758 759 if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false)) 760 return false; 761 } 762 763 return true; 764 } 765 766 /// Deduce noalias attributes for the SCC. 767 static bool addNoAliasAttrs(const SCCNodeSet &SCCNodes) { 768 // Check each function in turn, determining which functions return noalias 769 // pointers. 770 for (Function *F : SCCNodes) { 771 // Already noalias. 772 if (F->doesNotAlias(0)) 773 continue; 774 775 // Definitions with weak linkage may be overridden at linktime, so 776 // treat them like declarations. 777 if (F->isDeclaration() || F->mayBeOverridden()) 778 return false; 779 780 // We annotate noalias return values, which are only applicable to 781 // pointer types. 782 if (!F->getReturnType()->isPointerTy()) 783 continue; 784 785 if (!isFunctionMallocLike(F, SCCNodes)) 786 return false; 787 } 788 789 bool MadeChange = false; 790 for (Function *F : SCCNodes) { 791 if (F->doesNotAlias(0) || !F->getReturnType()->isPointerTy()) 792 continue; 793 794 F->setDoesNotAlias(0); 795 ++NumNoAlias; 796 MadeChange = true; 797 } 798 799 return MadeChange; 800 } 801 802 /// Tests whether this function is known to not return null. 803 /// 804 /// Requires that the function returns a pointer. 805 /// 806 /// Returns true if it believes the function will not return a null, and sets 807 /// \p Speculative based on whether the returned conclusion is a speculative 808 /// conclusion due to SCC calls. 809 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes, 810 const TargetLibraryInfo &TLI, bool &Speculative) { 811 assert(F->getReturnType()->isPointerTy() && 812 "nonnull only meaningful on pointer types"); 813 Speculative = false; 814 815 SmallSetVector<Value *, 8> FlowsToReturn; 816 for (BasicBlock &BB : *F) 817 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) 818 FlowsToReturn.insert(Ret->getReturnValue()); 819 820 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 821 Value *RetVal = FlowsToReturn[i]; 822 823 // If this value is locally known to be non-null, we're good 824 if (isKnownNonNull(RetVal, &TLI)) 825 continue; 826 827 // Otherwise, we need to look upwards since we can't make any local 828 // conclusions. 829 Instruction *RVI = dyn_cast<Instruction>(RetVal); 830 if (!RVI) 831 return false; 832 switch (RVI->getOpcode()) { 833 // Extend the analysis by looking upwards. 834 case Instruction::BitCast: 835 case Instruction::GetElementPtr: 836 case Instruction::AddrSpaceCast: 837 FlowsToReturn.insert(RVI->getOperand(0)); 838 continue; 839 case Instruction::Select: { 840 SelectInst *SI = cast<SelectInst>(RVI); 841 FlowsToReturn.insert(SI->getTrueValue()); 842 FlowsToReturn.insert(SI->getFalseValue()); 843 continue; 844 } 845 case Instruction::PHI: { 846 PHINode *PN = cast<PHINode>(RVI); 847 for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 848 FlowsToReturn.insert(PN->getIncomingValue(i)); 849 continue; 850 } 851 case Instruction::Call: 852 case Instruction::Invoke: { 853 CallSite CS(RVI); 854 Function *Callee = CS.getCalledFunction(); 855 // A call to a node within the SCC is assumed to return null until 856 // proven otherwise 857 if (Callee && SCCNodes.count(Callee)) { 858 Speculative = true; 859 continue; 860 } 861 return false; 862 } 863 default: 864 return false; // Unknown source, may be null 865 }; 866 llvm_unreachable("should have either continued or returned"); 867 } 868 869 return true; 870 } 871 872 /// Deduce nonnull attributes for the SCC. 873 static bool addNonNullAttrs(const SCCNodeSet &SCCNodes, 874 const TargetLibraryInfo &TLI) { 875 // Speculative that all functions in the SCC return only nonnull 876 // pointers. We may refute this as we analyze functions. 877 bool SCCReturnsNonNull = true; 878 879 bool MadeChange = false; 880 881 // Check each function in turn, determining which functions return nonnull 882 // pointers. 883 for (Function *F : SCCNodes) { 884 // Already nonnull. 885 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 886 Attribute::NonNull)) 887 continue; 888 889 // Definitions with weak linkage may be overridden at linktime, so 890 // treat them like declarations. 891 if (F->isDeclaration() || F->mayBeOverridden()) 892 return false; 893 894 // We annotate nonnull return values, which are only applicable to 895 // pointer types. 896 if (!F->getReturnType()->isPointerTy()) 897 continue; 898 899 bool Speculative = false; 900 if (isReturnNonNull(F, SCCNodes, TLI, Speculative)) { 901 if (!Speculative) { 902 // Mark the function eagerly since we may discover a function 903 // which prevents us from speculating about the entire SCC 904 DEBUG(dbgs() << "Eagerly marking " << F->getName() << " as nonnull\n"); 905 F->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull); 906 ++NumNonNullReturn; 907 MadeChange = true; 908 } 909 continue; 910 } 911 // At least one function returns something which could be null, can't 912 // speculate any more. 913 SCCReturnsNonNull = false; 914 } 915 916 if (SCCReturnsNonNull) { 917 for (Function *F : SCCNodes) { 918 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 919 Attribute::NonNull) || 920 !F->getReturnType()->isPointerTy()) 921 continue; 922 923 DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n"); 924 F->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull); 925 ++NumNonNullReturn; 926 MadeChange = true; 927 } 928 } 929 930 return MadeChange; 931 } 932 933 static bool setDoesNotRecurse(Function &F) { 934 if (F.doesNotRecurse()) 935 return false; 936 F.setDoesNotRecurse(); 937 ++NumNoRecurse; 938 return true; 939 } 940 941 static bool addNoRecurseAttrs(const CallGraphSCC &SCC) { 942 // Try and identify functions that do not recurse. 943 944 // If the SCC contains multiple nodes we know for sure there is recursion. 945 if (!SCC.isSingular()) 946 return false; 947 948 const CallGraphNode *CGN = *SCC.begin(); 949 Function *F = CGN->getFunction(); 950 if (!F || F->isDeclaration() || F->doesNotRecurse()) 951 return false; 952 953 // If all of the calls in F are identifiable and are to norecurse functions, F 954 // is norecurse. This check also detects self-recursion as F is not currently 955 // marked norecurse, so any called from F to F will not be marked norecurse. 956 if (std::all_of(CGN->begin(), CGN->end(), 957 [](const CallGraphNode::CallRecord &CR) { 958 Function *F = CR.second->getFunction(); 959 return F && F->doesNotRecurse(); 960 })) 961 // Function calls a potentially recursive function. 962 return setDoesNotRecurse(*F); 963 964 // Nothing else we can deduce usefully during the postorder traversal. 965 return false; 966 } 967 968 bool PostOrderFunctionAttrs::runOnSCC(CallGraphSCC &SCC) { 969 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 970 bool Changed = false; 971 972 // We compute dedicated AA results for each function in the SCC as needed. We 973 // use a lambda referencing external objects so that they live long enough to 974 // be queried, but we re-use them each time. 975 Optional<BasicAAResult> BAR; 976 Optional<AAResults> AAR; 977 auto AARGetter = [&](Function &F) -> AAResults & { 978 BAR.emplace(createLegacyPMBasicAAResult(*this, F)); 979 AAR.emplace(createLegacyPMAAResults(*this, F, *BAR)); 980 return *AAR; 981 }; 982 983 // Fill SCCNodes with the elements of the SCC. Used for quickly looking up 984 // whether a given CallGraphNode is in this SCC. Also track whether there are 985 // any external or opt-none nodes that will prevent us from optimizing any 986 // part of the SCC. 987 SCCNodeSet SCCNodes; 988 bool ExternalNode = false; 989 for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { 990 Function *F = (*I)->getFunction(); 991 if (!F || F->hasFnAttribute(Attribute::OptimizeNone)) { 992 // External node or function we're trying not to optimize - we both avoid 993 // transform them and avoid leveraging information they provide. 994 ExternalNode = true; 995 continue; 996 } 997 998 SCCNodes.insert(F); 999 } 1000 1001 Changed |= addReadAttrs(SCCNodes, AARGetter); 1002 Changed |= addArgumentAttrs(SCCNodes); 1003 1004 // If we have no external nodes participating in the SCC, we can deduce some 1005 // more precise attributes as well. 1006 if (!ExternalNode) { 1007 Changed |= addNoAliasAttrs(SCCNodes); 1008 Changed |= addNonNullAttrs(SCCNodes, *TLI); 1009 } 1010 1011 Changed |= addNoRecurseAttrs(SCC); 1012 return Changed; 1013 } 1014 1015 namespace { 1016 /// A pass to do RPO deduction and propagation of function attributes. 1017 /// 1018 /// This pass provides a general RPO or "top down" propagation of 1019 /// function attributes. For a few (rare) cases, we can deduce significantly 1020 /// more about function attributes by working in RPO, so this pass 1021 /// provides the compliment to the post-order pass above where the majority of 1022 /// deduction is performed. 1023 // FIXME: Currently there is no RPO CGSCC pass structure to slide into and so 1024 // this is a boring module pass, but eventually it should be an RPO CGSCC pass 1025 // when such infrastructure is available. 1026 struct ReversePostOrderFunctionAttrs : public ModulePass { 1027 static char ID; // Pass identification, replacement for typeid 1028 ReversePostOrderFunctionAttrs() : ModulePass(ID) { 1029 initializeReversePostOrderFunctionAttrsPass(*PassRegistry::getPassRegistry()); 1030 } 1031 1032 bool runOnModule(Module &M) override; 1033 1034 void getAnalysisUsage(AnalysisUsage &AU) const override { 1035 AU.setPreservesCFG(); 1036 AU.addRequired<CallGraphWrapperPass>(); 1037 } 1038 }; 1039 } 1040 1041 char ReversePostOrderFunctionAttrs::ID = 0; 1042 INITIALIZE_PASS_BEGIN(ReversePostOrderFunctionAttrs, "rpo-functionattrs", 1043 "Deduce function attributes in RPO", false, false) 1044 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1045 INITIALIZE_PASS_END(ReversePostOrderFunctionAttrs, "rpo-functionattrs", 1046 "Deduce function attributes in RPO", false, false) 1047 1048 Pass *llvm::createReversePostOrderFunctionAttrsPass() { 1049 return new ReversePostOrderFunctionAttrs(); 1050 } 1051 1052 static bool addNoRecurseAttrsTopDown(Function &F) { 1053 // We check the preconditions for the function prior to calling this to avoid 1054 // the cost of building up a reversible post-order list. We assert them here 1055 // to make sure none of the invariants this relies on were violated. 1056 assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!"); 1057 assert(!F.doesNotRecurse() && 1058 "This function has already been deduced as norecurs!"); 1059 assert(F.hasInternalLinkage() && 1060 "Can only do top-down deduction for internal linkage functions!"); 1061 1062 // If F is internal and all of its uses are calls from a non-recursive 1063 // functions, then none of its calls could in fact recurse without going 1064 // through a function marked norecurse, and so we can mark this function too 1065 // as norecurse. Note that the uses must actually be calls -- otherwise 1066 // a pointer to this function could be returned from a norecurse function but 1067 // this function could be recursively (indirectly) called. Note that this 1068 // also detects if F is directly recursive as F is not yet marked as 1069 // a norecurse function. 1070 for (auto *U : F.users()) { 1071 auto *I = dyn_cast<Instruction>(U); 1072 if (!I) 1073 return false; 1074 CallSite CS(I); 1075 if (!CS || !CS.getParent()->getParent()->doesNotRecurse()) 1076 return false; 1077 } 1078 return setDoesNotRecurse(F); 1079 } 1080 1081 bool ReversePostOrderFunctionAttrs::runOnModule(Module &M) { 1082 // We only have a post-order SCC traversal (because SCCs are inherently 1083 // discovered in post-order), so we accumulate them in a vector and then walk 1084 // it in reverse. This is simpler than using the RPO iterator infrastructure 1085 // because we need to combine SCC detection and the PO walk of the call 1086 // graph. We can also cheat egregiously because we're primarily interested in 1087 // synthesizing norecurse and so we can only save the singular SCCs as SCCs 1088 // with multiple functions in them will clearly be recursive. 1089 auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 1090 SmallVector<Function *, 16> Worklist; 1091 for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) { 1092 if (I->size() != 1) 1093 continue; 1094 1095 Function *F = I->front()->getFunction(); 1096 if (F && !F->isDeclaration() && !F->doesNotRecurse() && 1097 F->hasInternalLinkage()) 1098 Worklist.push_back(F); 1099 } 1100 1101 bool Changed = false; 1102 for (auto *F : reverse(Worklist)) 1103 Changed |= addNoRecurseAttrsTopDown(*F); 1104 1105 return Changed; 1106 } 1107