1 //===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// This file implements interprocedural passes which walk the 12 /// call-graph deducing and/or propagating function attributes. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/IPO/FunctionAttrs.h" 17 #include "llvm/Transforms/IPO.h" 18 #include "llvm/ADT/SCCIterator.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallSet.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/ADT/StringSwitch.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/BasicAliasAnalysis.h" 26 #include "llvm/Analysis/CallGraph.h" 27 #include "llvm/Analysis/CallGraphSCCPass.h" 28 #include "llvm/Analysis/CaptureTracking.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/GlobalVariable.h" 32 #include "llvm/IR/InstIterator.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/LLVMContext.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/raw_ostream.h" 37 #include "llvm/Analysis/TargetLibraryInfo.h" 38 using namespace llvm; 39 40 #define DEBUG_TYPE "functionattrs" 41 42 STATISTIC(NumReadNone, "Number of functions marked readnone"); 43 STATISTIC(NumReadOnly, "Number of functions marked readonly"); 44 STATISTIC(NumNoCapture, "Number of arguments marked nocapture"); 45 STATISTIC(NumReadNoneArg, "Number of arguments marked readnone"); 46 STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly"); 47 STATISTIC(NumNoAlias, "Number of function returns marked noalias"); 48 STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull"); 49 STATISTIC(NumNoRecurse, "Number of functions marked as norecurse"); 50 51 namespace { 52 typedef SmallSetVector<Function *, 8> SCCNodeSet; 53 } 54 55 namespace { 56 /// The three kinds of memory access relevant to 'readonly' and 57 /// 'readnone' attributes. 58 enum MemoryAccessKind { 59 MAK_ReadNone = 0, 60 MAK_ReadOnly = 1, 61 MAK_MayWrite = 2 62 }; 63 } 64 65 static MemoryAccessKind checkFunctionMemoryAccess(Function &F, AAResults &AAR, 66 const SCCNodeSet &SCCNodes) { 67 FunctionModRefBehavior MRB = AAR.getModRefBehavior(&F); 68 if (MRB == FMRB_DoesNotAccessMemory) 69 // Already perfect! 70 return MAK_ReadNone; 71 72 // Non-exact function definitions may not be selected at link time, and an 73 // alternative version that writes to memory may be selected. See the comment 74 // on GlobalValue::isDefinitionExact for more details. 75 if (!F.hasExactDefinition()) { 76 if (AliasAnalysis::onlyReadsMemory(MRB)) 77 return MAK_ReadOnly; 78 79 // Conservatively assume it writes to memory. 80 return MAK_MayWrite; 81 } 82 83 // Scan the function body for instructions that may read or write memory. 84 bool ReadsMemory = false; 85 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) { 86 Instruction *I = &*II; 87 88 // Some instructions can be ignored even if they read or write memory. 89 // Detect these now, skipping to the next instruction if one is found. 90 CallSite CS(cast<Value>(I)); 91 if (CS) { 92 // Ignore calls to functions in the same SCC, as long as the call sites 93 // don't have operand bundles. Calls with operand bundles are allowed to 94 // have memory effects not described by the memory effects of the call 95 // target. 96 if (!CS.hasOperandBundles() && CS.getCalledFunction() && 97 SCCNodes.count(CS.getCalledFunction())) 98 continue; 99 FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS); 100 101 // If the call doesn't access memory, we're done. 102 if (!(MRB & MRI_ModRef)) 103 continue; 104 105 if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) { 106 // The call could access any memory. If that includes writes, give up. 107 if (MRB & MRI_Mod) 108 return MAK_MayWrite; 109 // If it reads, note it. 110 if (MRB & MRI_Ref) 111 ReadsMemory = true; 112 continue; 113 } 114 115 // Check whether all pointer arguments point to local memory, and 116 // ignore calls that only access local memory. 117 for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 118 CI != CE; ++CI) { 119 Value *Arg = *CI; 120 if (!Arg->getType()->isPtrOrPtrVectorTy()) 121 continue; 122 123 AAMDNodes AAInfo; 124 I->getAAMetadata(AAInfo); 125 MemoryLocation Loc(Arg, MemoryLocation::UnknownSize, AAInfo); 126 127 // Skip accesses to local or constant memory as they don't impact the 128 // externally visible mod/ref behavior. 129 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 130 continue; 131 132 if (MRB & MRI_Mod) 133 // Writes non-local memory. Give up. 134 return MAK_MayWrite; 135 if (MRB & MRI_Ref) 136 // Ok, it reads non-local memory. 137 ReadsMemory = true; 138 } 139 continue; 140 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 141 // Ignore non-volatile loads from local memory. (Atomic is okay here.) 142 if (!LI->isVolatile()) { 143 MemoryLocation Loc = MemoryLocation::get(LI); 144 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 145 continue; 146 } 147 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 148 // Ignore non-volatile stores to local memory. (Atomic is okay here.) 149 if (!SI->isVolatile()) { 150 MemoryLocation Loc = MemoryLocation::get(SI); 151 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 152 continue; 153 } 154 } else if (VAArgInst *VI = dyn_cast<VAArgInst>(I)) { 155 // Ignore vaargs on local memory. 156 MemoryLocation Loc = MemoryLocation::get(VI); 157 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 158 continue; 159 } 160 161 // Any remaining instructions need to be taken seriously! Check if they 162 // read or write memory. 163 if (I->mayWriteToMemory()) 164 // Writes memory. Just give up. 165 return MAK_MayWrite; 166 167 // If this instruction may read memory, remember that. 168 ReadsMemory |= I->mayReadFromMemory(); 169 } 170 171 return ReadsMemory ? MAK_ReadOnly : MAK_ReadNone; 172 } 173 174 /// Deduce readonly/readnone attributes for the SCC. 175 template <typename AARGetterT> 176 static bool addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT AARGetter) { 177 // Check if any of the functions in the SCC read or write memory. If they 178 // write memory then they can't be marked readnone or readonly. 179 bool ReadsMemory = false; 180 for (Function *F : SCCNodes) { 181 // Call the callable parameter to look up AA results for this function. 182 AAResults &AAR = AARGetter(*F); 183 184 switch (checkFunctionMemoryAccess(*F, AAR, SCCNodes)) { 185 case MAK_MayWrite: 186 return false; 187 case MAK_ReadOnly: 188 ReadsMemory = true; 189 break; 190 case MAK_ReadNone: 191 // Nothing to do! 192 break; 193 } 194 } 195 196 // Success! Functions in this SCC do not access memory, or only read memory. 197 // Give them the appropriate attribute. 198 bool MadeChange = false; 199 for (Function *F : SCCNodes) { 200 if (F->doesNotAccessMemory()) 201 // Already perfect! 202 continue; 203 204 if (F->onlyReadsMemory() && ReadsMemory) 205 // No change. 206 continue; 207 208 MadeChange = true; 209 210 // Clear out any existing attributes. 211 AttrBuilder B; 212 B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); 213 F->removeAttributes( 214 AttributeSet::FunctionIndex, 215 AttributeSet::get(F->getContext(), AttributeSet::FunctionIndex, B)); 216 217 // Add in the new attribute. 218 F->addAttribute(AttributeSet::FunctionIndex, 219 ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone); 220 221 if (ReadsMemory) 222 ++NumReadOnly; 223 else 224 ++NumReadNone; 225 } 226 227 return MadeChange; 228 } 229 230 namespace { 231 /// For a given pointer Argument, this retains a list of Arguments of functions 232 /// in the same SCC that the pointer data flows into. We use this to build an 233 /// SCC of the arguments. 234 struct ArgumentGraphNode { 235 Argument *Definition; 236 SmallVector<ArgumentGraphNode *, 4> Uses; 237 }; 238 239 class ArgumentGraph { 240 // We store pointers to ArgumentGraphNode objects, so it's important that 241 // that they not move around upon insert. 242 typedef std::map<Argument *, ArgumentGraphNode> ArgumentMapTy; 243 244 ArgumentMapTy ArgumentMap; 245 246 // There is no root node for the argument graph, in fact: 247 // void f(int *x, int *y) { if (...) f(x, y); } 248 // is an example where the graph is disconnected. The SCCIterator requires a 249 // single entry point, so we maintain a fake ("synthetic") root node that 250 // uses every node. Because the graph is directed and nothing points into 251 // the root, it will not participate in any SCCs (except for its own). 252 ArgumentGraphNode SyntheticRoot; 253 254 public: 255 ArgumentGraph() { SyntheticRoot.Definition = nullptr; } 256 257 typedef SmallVectorImpl<ArgumentGraphNode *>::iterator iterator; 258 259 iterator begin() { return SyntheticRoot.Uses.begin(); } 260 iterator end() { return SyntheticRoot.Uses.end(); } 261 ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; } 262 263 ArgumentGraphNode *operator[](Argument *A) { 264 ArgumentGraphNode &Node = ArgumentMap[A]; 265 Node.Definition = A; 266 SyntheticRoot.Uses.push_back(&Node); 267 return &Node; 268 } 269 }; 270 271 /// This tracker checks whether callees are in the SCC, and if so it does not 272 /// consider that a capture, instead adding it to the "Uses" list and 273 /// continuing with the analysis. 274 struct ArgumentUsesTracker : public CaptureTracker { 275 ArgumentUsesTracker(const SCCNodeSet &SCCNodes) 276 : Captured(false), SCCNodes(SCCNodes) {} 277 278 void tooManyUses() override { Captured = true; } 279 280 bool captured(const Use *U) override { 281 CallSite CS(U->getUser()); 282 if (!CS.getInstruction()) { 283 Captured = true; 284 return true; 285 } 286 287 Function *F = CS.getCalledFunction(); 288 if (!F || !F->hasExactDefinition() || !SCCNodes.count(F)) { 289 Captured = true; 290 return true; 291 } 292 293 // Note: the callee and the two successor blocks *follow* the argument 294 // operands. This means there is no need to adjust UseIndex to account for 295 // these. 296 297 unsigned UseIndex = 298 std::distance(const_cast<const Use *>(CS.arg_begin()), U); 299 300 assert(UseIndex < CS.data_operands_size() && 301 "Indirect function calls should have been filtered above!"); 302 303 if (UseIndex >= CS.getNumArgOperands()) { 304 // Data operand, but not a argument operand -- must be a bundle operand 305 assert(CS.hasOperandBundles() && "Must be!"); 306 307 // CaptureTracking told us that we're being captured by an operand bundle 308 // use. In this case it does not matter if the callee is within our SCC 309 // or not -- we've been captured in some unknown way, and we have to be 310 // conservative. 311 Captured = true; 312 return true; 313 } 314 315 if (UseIndex >= F->arg_size()) { 316 assert(F->isVarArg() && "More params than args in non-varargs call"); 317 Captured = true; 318 return true; 319 } 320 321 Uses.push_back(&*std::next(F->arg_begin(), UseIndex)); 322 return false; 323 } 324 325 bool Captured; // True only if certainly captured (used outside our SCC). 326 SmallVector<Argument *, 4> Uses; // Uses within our SCC. 327 328 const SCCNodeSet &SCCNodes; 329 }; 330 } 331 332 namespace llvm { 333 template <> struct GraphTraits<ArgumentGraphNode *> { 334 typedef ArgumentGraphNode NodeType; 335 typedef SmallVectorImpl<ArgumentGraphNode *>::iterator ChildIteratorType; 336 337 static inline NodeType *getEntryNode(NodeType *A) { return A; } 338 static inline ChildIteratorType child_begin(NodeType *N) { 339 return N->Uses.begin(); 340 } 341 static inline ChildIteratorType child_end(NodeType *N) { 342 return N->Uses.end(); 343 } 344 }; 345 template <> 346 struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> { 347 static NodeType *getEntryNode(ArgumentGraph *AG) { 348 return AG->getEntryNode(); 349 } 350 static ChildIteratorType nodes_begin(ArgumentGraph *AG) { 351 return AG->begin(); 352 } 353 static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); } 354 }; 355 } 356 357 /// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone. 358 static Attribute::AttrKind 359 determinePointerReadAttrs(Argument *A, 360 const SmallPtrSet<Argument *, 8> &SCCNodes) { 361 362 SmallVector<Use *, 32> Worklist; 363 SmallSet<Use *, 32> Visited; 364 365 // inalloca arguments are always clobbered by the call. 366 if (A->hasInAllocaAttr()) 367 return Attribute::None; 368 369 bool IsRead = false; 370 // We don't need to track IsWritten. If A is written to, return immediately. 371 372 for (Use &U : A->uses()) { 373 Visited.insert(&U); 374 Worklist.push_back(&U); 375 } 376 377 while (!Worklist.empty()) { 378 Use *U = Worklist.pop_back_val(); 379 Instruction *I = cast<Instruction>(U->getUser()); 380 381 switch (I->getOpcode()) { 382 case Instruction::BitCast: 383 case Instruction::GetElementPtr: 384 case Instruction::PHI: 385 case Instruction::Select: 386 case Instruction::AddrSpaceCast: 387 // The original value is not read/written via this if the new value isn't. 388 for (Use &UU : I->uses()) 389 if (Visited.insert(&UU).second) 390 Worklist.push_back(&UU); 391 break; 392 393 case Instruction::Call: 394 case Instruction::Invoke: { 395 bool Captures = true; 396 397 if (I->getType()->isVoidTy()) 398 Captures = false; 399 400 auto AddUsersToWorklistIfCapturing = [&] { 401 if (Captures) 402 for (Use &UU : I->uses()) 403 if (Visited.insert(&UU).second) 404 Worklist.push_back(&UU); 405 }; 406 407 CallSite CS(I); 408 if (CS.doesNotAccessMemory()) { 409 AddUsersToWorklistIfCapturing(); 410 continue; 411 } 412 413 Function *F = CS.getCalledFunction(); 414 if (!F) { 415 if (CS.onlyReadsMemory()) { 416 IsRead = true; 417 AddUsersToWorklistIfCapturing(); 418 continue; 419 } 420 return Attribute::None; 421 } 422 423 // Note: the callee and the two successor blocks *follow* the argument 424 // operands. This means there is no need to adjust UseIndex to account 425 // for these. 426 427 unsigned UseIndex = std::distance(CS.arg_begin(), U); 428 429 // U cannot be the callee operand use: since we're exploring the 430 // transitive uses of an Argument, having such a use be a callee would 431 // imply the CallSite is an indirect call or invoke; and we'd take the 432 // early exit above. 433 assert(UseIndex < CS.data_operands_size() && 434 "Data operand use expected!"); 435 436 bool IsOperandBundleUse = UseIndex >= CS.getNumArgOperands(); 437 438 if (UseIndex >= F->arg_size() && !IsOperandBundleUse) { 439 assert(F->isVarArg() && "More params than args in non-varargs call"); 440 return Attribute::None; 441 } 442 443 Captures &= !CS.doesNotCapture(UseIndex); 444 445 // Since the optimizer (by design) cannot see the data flow corresponding 446 // to a operand bundle use, these cannot participate in the optimistic SCC 447 // analysis. Instead, we model the operand bundle uses as arguments in 448 // call to a function external to the SCC. 449 if (!SCCNodes.count(&*std::next(F->arg_begin(), UseIndex)) || 450 IsOperandBundleUse) { 451 452 // The accessors used on CallSite here do the right thing for calls and 453 // invokes with operand bundles. 454 455 if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(UseIndex)) 456 return Attribute::None; 457 if (!CS.doesNotAccessMemory(UseIndex)) 458 IsRead = true; 459 } 460 461 AddUsersToWorklistIfCapturing(); 462 break; 463 } 464 465 case Instruction::Load: 466 IsRead = true; 467 break; 468 469 case Instruction::ICmp: 470 case Instruction::Ret: 471 break; 472 473 default: 474 return Attribute::None; 475 } 476 } 477 478 return IsRead ? Attribute::ReadOnly : Attribute::ReadNone; 479 } 480 481 /// Deduce nocapture attributes for the SCC. 482 static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { 483 bool Changed = false; 484 485 ArgumentGraph AG; 486 487 AttrBuilder B; 488 B.addAttribute(Attribute::NoCapture); 489 490 // Check each function in turn, determining which pointer arguments are not 491 // captured. 492 for (Function *F : SCCNodes) { 493 // We can infer and propagate function attributes only when we know that the 494 // definition we'll get at link time is *exactly* the definition we see now. 495 // For more details, see GlobalValue::mayBeDerefined. 496 if (!F->hasExactDefinition()) 497 continue; 498 499 // Functions that are readonly (or readnone) and nounwind and don't return 500 // a value can't capture arguments. Don't analyze them. 501 if (F->onlyReadsMemory() && F->doesNotThrow() && 502 F->getReturnType()->isVoidTy()) { 503 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; 504 ++A) { 505 if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) { 506 A->addAttr(AttributeSet::get(F->getContext(), A->getArgNo() + 1, B)); 507 ++NumNoCapture; 508 Changed = true; 509 } 510 } 511 continue; 512 } 513 514 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; 515 ++A) { 516 if (!A->getType()->isPointerTy()) 517 continue; 518 bool HasNonLocalUses = false; 519 if (!A->hasNoCaptureAttr()) { 520 ArgumentUsesTracker Tracker(SCCNodes); 521 PointerMayBeCaptured(&*A, &Tracker); 522 if (!Tracker.Captured) { 523 if (Tracker.Uses.empty()) { 524 // If it's trivially not captured, mark it nocapture now. 525 A->addAttr( 526 AttributeSet::get(F->getContext(), A->getArgNo() + 1, B)); 527 ++NumNoCapture; 528 Changed = true; 529 } else { 530 // If it's not trivially captured and not trivially not captured, 531 // then it must be calling into another function in our SCC. Save 532 // its particulars for Argument-SCC analysis later. 533 ArgumentGraphNode *Node = AG[&*A]; 534 for (SmallVectorImpl<Argument *>::iterator 535 UI = Tracker.Uses.begin(), 536 UE = Tracker.Uses.end(); 537 UI != UE; ++UI) { 538 Node->Uses.push_back(AG[*UI]); 539 if (*UI != &*A) 540 HasNonLocalUses = true; 541 } 542 } 543 } 544 // Otherwise, it's captured. Don't bother doing SCC analysis on it. 545 } 546 if (!HasNonLocalUses && !A->onlyReadsMemory()) { 547 // Can we determine that it's readonly/readnone without doing an SCC? 548 // Note that we don't allow any calls at all here, or else our result 549 // will be dependent on the iteration order through the functions in the 550 // SCC. 551 SmallPtrSet<Argument *, 8> Self; 552 Self.insert(&*A); 553 Attribute::AttrKind R = determinePointerReadAttrs(&*A, Self); 554 if (R != Attribute::None) { 555 AttrBuilder B; 556 B.addAttribute(R); 557 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 558 Changed = true; 559 R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; 560 } 561 } 562 } 563 } 564 565 // The graph we've collected is partial because we stopped scanning for 566 // argument uses once we solved the argument trivially. These partial nodes 567 // show up as ArgumentGraphNode objects with an empty Uses list, and for 568 // these nodes the final decision about whether they capture has already been 569 // made. If the definition doesn't have a 'nocapture' attribute by now, it 570 // captures. 571 572 for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) { 573 const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I; 574 if (ArgumentSCC.size() == 1) { 575 if (!ArgumentSCC[0]->Definition) 576 continue; // synthetic root node 577 578 // eg. "void f(int* x) { if (...) f(x); }" 579 if (ArgumentSCC[0]->Uses.size() == 1 && 580 ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) { 581 Argument *A = ArgumentSCC[0]->Definition; 582 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 583 ++NumNoCapture; 584 Changed = true; 585 } 586 continue; 587 } 588 589 bool SCCCaptured = false; 590 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); 591 I != E && !SCCCaptured; ++I) { 592 ArgumentGraphNode *Node = *I; 593 if (Node->Uses.empty()) { 594 if (!Node->Definition->hasNoCaptureAttr()) 595 SCCCaptured = true; 596 } 597 } 598 if (SCCCaptured) 599 continue; 600 601 SmallPtrSet<Argument *, 8> ArgumentSCCNodes; 602 // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for 603 // quickly looking up whether a given Argument is in this ArgumentSCC. 604 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); I != E; ++I) { 605 ArgumentSCCNodes.insert((*I)->Definition); 606 } 607 608 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); 609 I != E && !SCCCaptured; ++I) { 610 ArgumentGraphNode *N = *I; 611 for (SmallVectorImpl<ArgumentGraphNode *>::iterator UI = N->Uses.begin(), 612 UE = N->Uses.end(); 613 UI != UE; ++UI) { 614 Argument *A = (*UI)->Definition; 615 if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A)) 616 continue; 617 SCCCaptured = true; 618 break; 619 } 620 } 621 if (SCCCaptured) 622 continue; 623 624 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 625 Argument *A = ArgumentSCC[i]->Definition; 626 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 627 ++NumNoCapture; 628 Changed = true; 629 } 630 631 // We also want to compute readonly/readnone. With a small number of false 632 // negatives, we can assume that any pointer which is captured isn't going 633 // to be provably readonly or readnone, since by definition we can't 634 // analyze all uses of a captured pointer. 635 // 636 // The false negatives happen when the pointer is captured by a function 637 // that promises readonly/readnone behaviour on the pointer, then the 638 // pointer's lifetime ends before anything that writes to arbitrary memory. 639 // Also, a readonly/readnone pointer may be returned, but returning a 640 // pointer is capturing it. 641 642 Attribute::AttrKind ReadAttr = Attribute::ReadNone; 643 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 644 Argument *A = ArgumentSCC[i]->Definition; 645 Attribute::AttrKind K = determinePointerReadAttrs(A, ArgumentSCCNodes); 646 if (K == Attribute::ReadNone) 647 continue; 648 if (K == Attribute::ReadOnly) { 649 ReadAttr = Attribute::ReadOnly; 650 continue; 651 } 652 ReadAttr = K; 653 break; 654 } 655 656 if (ReadAttr != Attribute::None) { 657 AttrBuilder B, R; 658 B.addAttribute(ReadAttr); 659 R.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone); 660 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 661 Argument *A = ArgumentSCC[i]->Definition; 662 // Clear out existing readonly/readnone attributes 663 A->removeAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, R)); 664 A->addAttr(AttributeSet::get(A->getContext(), A->getArgNo() + 1, B)); 665 ReadAttr == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; 666 Changed = true; 667 } 668 } 669 } 670 671 return Changed; 672 } 673 674 /// Tests whether a function is "malloc-like". 675 /// 676 /// A function is "malloc-like" if it returns either null or a pointer that 677 /// doesn't alias any other pointer visible to the caller. 678 static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) { 679 SmallSetVector<Value *, 8> FlowsToReturn; 680 for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) 681 if (ReturnInst *Ret = dyn_cast<ReturnInst>(I->getTerminator())) 682 FlowsToReturn.insert(Ret->getReturnValue()); 683 684 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 685 Value *RetVal = FlowsToReturn[i]; 686 687 if (Constant *C = dyn_cast<Constant>(RetVal)) { 688 if (!C->isNullValue() && !isa<UndefValue>(C)) 689 return false; 690 691 continue; 692 } 693 694 if (isa<Argument>(RetVal)) 695 return false; 696 697 if (Instruction *RVI = dyn_cast<Instruction>(RetVal)) 698 switch (RVI->getOpcode()) { 699 // Extend the analysis by looking upwards. 700 case Instruction::BitCast: 701 case Instruction::GetElementPtr: 702 case Instruction::AddrSpaceCast: 703 FlowsToReturn.insert(RVI->getOperand(0)); 704 continue; 705 case Instruction::Select: { 706 SelectInst *SI = cast<SelectInst>(RVI); 707 FlowsToReturn.insert(SI->getTrueValue()); 708 FlowsToReturn.insert(SI->getFalseValue()); 709 continue; 710 } 711 case Instruction::PHI: { 712 PHINode *PN = cast<PHINode>(RVI); 713 for (Value *IncValue : PN->incoming_values()) 714 FlowsToReturn.insert(IncValue); 715 continue; 716 } 717 718 // Check whether the pointer came from an allocation. 719 case Instruction::Alloca: 720 break; 721 case Instruction::Call: 722 case Instruction::Invoke: { 723 CallSite CS(RVI); 724 if (CS.paramHasAttr(0, Attribute::NoAlias)) 725 break; 726 if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction())) 727 break; 728 } // fall-through 729 default: 730 return false; // Did not come from an allocation. 731 } 732 733 if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false)) 734 return false; 735 } 736 737 return true; 738 } 739 740 /// Deduce noalias attributes for the SCC. 741 static bool addNoAliasAttrs(const SCCNodeSet &SCCNodes) { 742 // Check each function in turn, determining which functions return noalias 743 // pointers. 744 for (Function *F : SCCNodes) { 745 // Already noalias. 746 if (F->doesNotAlias(0)) 747 continue; 748 749 // We can infer and propagate function attributes only when we know that the 750 // definition we'll get at link time is *exactly* the definition we see now. 751 // For more details, see GlobalValue::mayBeDerefined. 752 if (!F->hasExactDefinition()) 753 return false; 754 755 // We annotate noalias return values, which are only applicable to 756 // pointer types. 757 if (!F->getReturnType()->isPointerTy()) 758 continue; 759 760 if (!isFunctionMallocLike(F, SCCNodes)) 761 return false; 762 } 763 764 bool MadeChange = false; 765 for (Function *F : SCCNodes) { 766 if (F->doesNotAlias(0) || !F->getReturnType()->isPointerTy()) 767 continue; 768 769 F->setDoesNotAlias(0); 770 ++NumNoAlias; 771 MadeChange = true; 772 } 773 774 return MadeChange; 775 } 776 777 /// Tests whether this function is known to not return null. 778 /// 779 /// Requires that the function returns a pointer. 780 /// 781 /// Returns true if it believes the function will not return a null, and sets 782 /// \p Speculative based on whether the returned conclusion is a speculative 783 /// conclusion due to SCC calls. 784 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes, 785 const TargetLibraryInfo &TLI, bool &Speculative) { 786 assert(F->getReturnType()->isPointerTy() && 787 "nonnull only meaningful on pointer types"); 788 Speculative = false; 789 790 SmallSetVector<Value *, 8> FlowsToReturn; 791 for (BasicBlock &BB : *F) 792 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) 793 FlowsToReturn.insert(Ret->getReturnValue()); 794 795 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 796 Value *RetVal = FlowsToReturn[i]; 797 798 // If this value is locally known to be non-null, we're good 799 if (isKnownNonNull(RetVal, &TLI)) 800 continue; 801 802 // Otherwise, we need to look upwards since we can't make any local 803 // conclusions. 804 Instruction *RVI = dyn_cast<Instruction>(RetVal); 805 if (!RVI) 806 return false; 807 switch (RVI->getOpcode()) { 808 // Extend the analysis by looking upwards. 809 case Instruction::BitCast: 810 case Instruction::GetElementPtr: 811 case Instruction::AddrSpaceCast: 812 FlowsToReturn.insert(RVI->getOperand(0)); 813 continue; 814 case Instruction::Select: { 815 SelectInst *SI = cast<SelectInst>(RVI); 816 FlowsToReturn.insert(SI->getTrueValue()); 817 FlowsToReturn.insert(SI->getFalseValue()); 818 continue; 819 } 820 case Instruction::PHI: { 821 PHINode *PN = cast<PHINode>(RVI); 822 for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 823 FlowsToReturn.insert(PN->getIncomingValue(i)); 824 continue; 825 } 826 case Instruction::Call: 827 case Instruction::Invoke: { 828 CallSite CS(RVI); 829 Function *Callee = CS.getCalledFunction(); 830 // A call to a node within the SCC is assumed to return null until 831 // proven otherwise 832 if (Callee && SCCNodes.count(Callee)) { 833 Speculative = true; 834 continue; 835 } 836 return false; 837 } 838 default: 839 return false; // Unknown source, may be null 840 }; 841 llvm_unreachable("should have either continued or returned"); 842 } 843 844 return true; 845 } 846 847 /// Deduce nonnull attributes for the SCC. 848 static bool addNonNullAttrs(const SCCNodeSet &SCCNodes, 849 const TargetLibraryInfo &TLI) { 850 // Speculative that all functions in the SCC return only nonnull 851 // pointers. We may refute this as we analyze functions. 852 bool SCCReturnsNonNull = true; 853 854 bool MadeChange = false; 855 856 // Check each function in turn, determining which functions return nonnull 857 // pointers. 858 for (Function *F : SCCNodes) { 859 // Already nonnull. 860 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 861 Attribute::NonNull)) 862 continue; 863 864 // We can infer and propagate function attributes only when we know that the 865 // definition we'll get at link time is *exactly* the definition we see now. 866 // For more details, see GlobalValue::mayBeDerefined. 867 if (!F->hasExactDefinition()) 868 return false; 869 870 // We annotate nonnull return values, which are only applicable to 871 // pointer types. 872 if (!F->getReturnType()->isPointerTy()) 873 continue; 874 875 bool Speculative = false; 876 if (isReturnNonNull(F, SCCNodes, TLI, Speculative)) { 877 if (!Speculative) { 878 // Mark the function eagerly since we may discover a function 879 // which prevents us from speculating about the entire SCC 880 DEBUG(dbgs() << "Eagerly marking " << F->getName() << " as nonnull\n"); 881 F->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull); 882 ++NumNonNullReturn; 883 MadeChange = true; 884 } 885 continue; 886 } 887 // At least one function returns something which could be null, can't 888 // speculate any more. 889 SCCReturnsNonNull = false; 890 } 891 892 if (SCCReturnsNonNull) { 893 for (Function *F : SCCNodes) { 894 if (F->getAttributes().hasAttribute(AttributeSet::ReturnIndex, 895 Attribute::NonNull) || 896 !F->getReturnType()->isPointerTy()) 897 continue; 898 899 DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n"); 900 F->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull); 901 ++NumNonNullReturn; 902 MadeChange = true; 903 } 904 } 905 906 return MadeChange; 907 } 908 909 /// Remove the convergent attribute from all functions in the SCC if every 910 /// callsite within the SCC is not convergent (except for calls to functions 911 /// within the SCC). Returns true if changes were made. 912 static bool removeConvergentAttrs(const SCCNodeSet &SCCNodes) { 913 // For every function in SCC, ensure that either 914 // * it is not convergent, or 915 // * we can remove its convergent attribute. 916 bool HasConvergentFn = false; 917 for (Function *F : SCCNodes) { 918 if (!F->isConvergent()) continue; 919 HasConvergentFn = true; 920 921 // Can't remove convergent from function declarations. 922 if (F->isDeclaration()) return false; 923 924 // Can't remove convergent if any of our functions has a convergent call to a 925 // function not in the SCC. 926 for (Instruction &I : instructions(*F)) { 927 CallSite CS(&I); 928 // Bail if CS is a convergent call to a function not in the SCC. 929 if (CS && CS.isConvergent() && 930 SCCNodes.count(CS.getCalledFunction()) == 0) 931 return false; 932 } 933 } 934 935 // If the SCC doesn't have any convergent functions, we have nothing to do. 936 if (!HasConvergentFn) return false; 937 938 // If we got here, all of the calls the SCC makes to functions not in the SCC 939 // are non-convergent. Therefore all of the SCC's functions can also be made 940 // non-convergent. We'll remove the attr from the callsites in 941 // InstCombineCalls. 942 for (Function *F : SCCNodes) { 943 if (!F->isConvergent()) continue; 944 945 DEBUG(dbgs() << "Removing convergent attr from fn " << F->getName() 946 << "\n"); 947 F->setNotConvergent(); 948 } 949 return true; 950 } 951 952 static bool setDoesNotRecurse(Function &F) { 953 if (F.doesNotRecurse()) 954 return false; 955 F.setDoesNotRecurse(); 956 ++NumNoRecurse; 957 return true; 958 } 959 960 static bool addNoRecurseAttrs(const SCCNodeSet &SCCNodes) { 961 // Try and identify functions that do not recurse. 962 963 // If the SCC contains multiple nodes we know for sure there is recursion. 964 if (SCCNodes.size() != 1) 965 return false; 966 967 Function *F = *SCCNodes.begin(); 968 if (!F || F->isDeclaration() || F->doesNotRecurse()) 969 return false; 970 971 // If all of the calls in F are identifiable and are to norecurse functions, F 972 // is norecurse. This check also detects self-recursion as F is not currently 973 // marked norecurse, so any called from F to F will not be marked norecurse. 974 for (Instruction &I : instructions(*F)) 975 if (auto CS = CallSite(&I)) { 976 Function *Callee = CS.getCalledFunction(); 977 if (!Callee || Callee == F || !Callee->doesNotRecurse()) 978 // Function calls a potentially recursive function. 979 return false; 980 } 981 982 // Every call was to a non-recursive function other than this function, and 983 // we have no indirect recursion as the SCC size is one. This function cannot 984 // recurse. 985 return setDoesNotRecurse(*F); 986 } 987 988 PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C, 989 CGSCCAnalysisManager &AM) { 990 Module &M = *C.begin()->getFunction().getParent(); 991 const ModuleAnalysisManager &MAM = 992 AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C).getManager(); 993 FunctionAnalysisManager &FAM = 994 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C).getManager(); 995 996 // FIXME: Need some way to make it more reasonable to assume that this is 997 // always cached. 998 TargetLibraryInfo &TLI = *MAM.getCachedResult<TargetLibraryAnalysis>(M); 999 1000 // We pass a lambda into functions to wire them up to the analysis manager 1001 // for getting function analyses. 1002 auto AARGetter = [&](Function &F) -> AAResults & { 1003 return FAM.getResult<AAManager>(F); 1004 }; 1005 1006 // Fill SCCNodes with the elements of the SCC. Also track whether there are 1007 // any external or opt-none nodes that will prevent us from optimizing any 1008 // part of the SCC. 1009 SCCNodeSet SCCNodes; 1010 bool HasUnknownCall = false; 1011 for (LazyCallGraph::Node &N : C) { 1012 Function &F = N.getFunction(); 1013 if (F.hasFnAttribute(Attribute::OptimizeNone)) { 1014 // Treat any function we're trying not to optimize as if it were an 1015 // indirect call and omit it from the node set used below. 1016 HasUnknownCall = true; 1017 continue; 1018 } 1019 // Track whether any functions in this SCC have an unknown call edge. 1020 // Note: if this is ever a performance hit, we can common it with 1021 // subsequent routines which also do scans over the instructions of the 1022 // function. 1023 if (!HasUnknownCall) 1024 for (Instruction &I : instructions(F)) 1025 if (auto CS = CallSite(&I)) 1026 if (!CS.getCalledFunction()) { 1027 HasUnknownCall = true; 1028 break; 1029 } 1030 1031 SCCNodes.insert(&F); 1032 } 1033 1034 bool Changed = false; 1035 Changed |= addReadAttrs(SCCNodes, AARGetter); 1036 Changed |= addArgumentAttrs(SCCNodes); 1037 1038 // If we have no external nodes participating in the SCC, we can deduce some 1039 // more precise attributes as well. 1040 if (!HasUnknownCall) { 1041 Changed |= addNoAliasAttrs(SCCNodes); 1042 Changed |= addNonNullAttrs(SCCNodes, TLI); 1043 Changed |= removeConvergentAttrs(SCCNodes); 1044 Changed |= addNoRecurseAttrs(SCCNodes); 1045 } 1046 1047 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all(); 1048 } 1049 1050 namespace { 1051 struct PostOrderFunctionAttrsLegacyPass : public CallGraphSCCPass { 1052 static char ID; // Pass identification, replacement for typeid 1053 PostOrderFunctionAttrsLegacyPass() : CallGraphSCCPass(ID) { 1054 initializePostOrderFunctionAttrsLegacyPassPass(*PassRegistry::getPassRegistry()); 1055 } 1056 1057 bool runOnSCC(CallGraphSCC &SCC) override; 1058 1059 void getAnalysisUsage(AnalysisUsage &AU) const override { 1060 AU.setPreservesCFG(); 1061 AU.addRequired<AssumptionCacheTracker>(); 1062 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1063 getAAResultsAnalysisUsage(AU); 1064 CallGraphSCCPass::getAnalysisUsage(AU); 1065 } 1066 1067 private: 1068 TargetLibraryInfo *TLI; 1069 }; 1070 } 1071 1072 char PostOrderFunctionAttrsLegacyPass::ID = 0; 1073 INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrsLegacyPass, "functionattrs", 1074 "Deduce function attributes", false, false) 1075 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1076 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1077 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1078 INITIALIZE_PASS_END(PostOrderFunctionAttrsLegacyPass, "functionattrs", 1079 "Deduce function attributes", false, false) 1080 1081 Pass *llvm::createPostOrderFunctionAttrsLegacyPass() { return new PostOrderFunctionAttrsLegacyPass(); } 1082 1083 bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) { 1084 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1085 bool Changed = false; 1086 1087 // We compute dedicated AA results for each function in the SCC as needed. We 1088 // use a lambda referencing external objects so that they live long enough to 1089 // be queried, but we re-use them each time. 1090 Optional<BasicAAResult> BAR; 1091 Optional<AAResults> AAR; 1092 auto AARGetter = [&](Function &F) -> AAResults & { 1093 BAR.emplace(createLegacyPMBasicAAResult(*this, F)); 1094 AAR.emplace(createLegacyPMAAResults(*this, F, *BAR)); 1095 return *AAR; 1096 }; 1097 1098 // Fill SCCNodes with the elements of the SCC. Used for quickly looking up 1099 // whether a given CallGraphNode is in this SCC. Also track whether there are 1100 // any external or opt-none nodes that will prevent us from optimizing any 1101 // part of the SCC. 1102 SCCNodeSet SCCNodes; 1103 bool ExternalNode = false; 1104 for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { 1105 Function *F = (*I)->getFunction(); 1106 if (!F || F->hasFnAttribute(Attribute::OptimizeNone)) { 1107 // External node or function we're trying not to optimize - we both avoid 1108 // transform them and avoid leveraging information they provide. 1109 ExternalNode = true; 1110 continue; 1111 } 1112 1113 SCCNodes.insert(F); 1114 } 1115 1116 Changed |= addReadAttrs(SCCNodes, AARGetter); 1117 Changed |= addArgumentAttrs(SCCNodes); 1118 1119 // If we have no external nodes participating in the SCC, we can deduce some 1120 // more precise attributes as well. 1121 if (!ExternalNode) { 1122 Changed |= addNoAliasAttrs(SCCNodes); 1123 Changed |= addNonNullAttrs(SCCNodes, *TLI); 1124 Changed |= removeConvergentAttrs(SCCNodes); 1125 Changed |= addNoRecurseAttrs(SCCNodes); 1126 } 1127 1128 return Changed; 1129 } 1130 1131 namespace { 1132 /// A pass to do RPO deduction and propagation of function attributes. 1133 /// 1134 /// This pass provides a general RPO or "top down" propagation of 1135 /// function attributes. For a few (rare) cases, we can deduce significantly 1136 /// more about function attributes by working in RPO, so this pass 1137 /// provides the compliment to the post-order pass above where the majority of 1138 /// deduction is performed. 1139 // FIXME: Currently there is no RPO CGSCC pass structure to slide into and so 1140 // this is a boring module pass, but eventually it should be an RPO CGSCC pass 1141 // when such infrastructure is available. 1142 struct ReversePostOrderFunctionAttrs : public ModulePass { 1143 static char ID; // Pass identification, replacement for typeid 1144 ReversePostOrderFunctionAttrs() : ModulePass(ID) { 1145 initializeReversePostOrderFunctionAttrsPass(*PassRegistry::getPassRegistry()); 1146 } 1147 1148 bool runOnModule(Module &M) override; 1149 1150 void getAnalysisUsage(AnalysisUsage &AU) const override { 1151 AU.setPreservesCFG(); 1152 AU.addRequired<CallGraphWrapperPass>(); 1153 } 1154 }; 1155 } 1156 1157 char ReversePostOrderFunctionAttrs::ID = 0; 1158 INITIALIZE_PASS_BEGIN(ReversePostOrderFunctionAttrs, "rpo-functionattrs", 1159 "Deduce function attributes in RPO", false, false) 1160 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1161 INITIALIZE_PASS_END(ReversePostOrderFunctionAttrs, "rpo-functionattrs", 1162 "Deduce function attributes in RPO", false, false) 1163 1164 Pass *llvm::createReversePostOrderFunctionAttrsPass() { 1165 return new ReversePostOrderFunctionAttrs(); 1166 } 1167 1168 static bool addNoRecurseAttrsTopDown(Function &F) { 1169 // We check the preconditions for the function prior to calling this to avoid 1170 // the cost of building up a reversible post-order list. We assert them here 1171 // to make sure none of the invariants this relies on were violated. 1172 assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!"); 1173 assert(!F.doesNotRecurse() && 1174 "This function has already been deduced as norecurs!"); 1175 assert(F.hasInternalLinkage() && 1176 "Can only do top-down deduction for internal linkage functions!"); 1177 1178 // If F is internal and all of its uses are calls from a non-recursive 1179 // functions, then none of its calls could in fact recurse without going 1180 // through a function marked norecurse, and so we can mark this function too 1181 // as norecurse. Note that the uses must actually be calls -- otherwise 1182 // a pointer to this function could be returned from a norecurse function but 1183 // this function could be recursively (indirectly) called. Note that this 1184 // also detects if F is directly recursive as F is not yet marked as 1185 // a norecurse function. 1186 for (auto *U : F.users()) { 1187 auto *I = dyn_cast<Instruction>(U); 1188 if (!I) 1189 return false; 1190 CallSite CS(I); 1191 if (!CS || !CS.getParent()->getParent()->doesNotRecurse()) 1192 return false; 1193 } 1194 return setDoesNotRecurse(F); 1195 } 1196 1197 bool ReversePostOrderFunctionAttrs::runOnModule(Module &M) { 1198 // We only have a post-order SCC traversal (because SCCs are inherently 1199 // discovered in post-order), so we accumulate them in a vector and then walk 1200 // it in reverse. This is simpler than using the RPO iterator infrastructure 1201 // because we need to combine SCC detection and the PO walk of the call 1202 // graph. We can also cheat egregiously because we're primarily interested in 1203 // synthesizing norecurse and so we can only save the singular SCCs as SCCs 1204 // with multiple functions in them will clearly be recursive. 1205 auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 1206 SmallVector<Function *, 16> Worklist; 1207 for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) { 1208 if (I->size() != 1) 1209 continue; 1210 1211 Function *F = I->front()->getFunction(); 1212 if (F && !F->isDeclaration() && !F->doesNotRecurse() && 1213 F->hasInternalLinkage()) 1214 Worklist.push_back(F); 1215 } 1216 1217 bool Changed = false; 1218 for (auto *F : reverse(Worklist)) 1219 Changed |= addNoRecurseAttrsTopDown(*F); 1220 1221 return Changed; 1222 } 1223