1 //===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// This file implements interprocedural passes which walk the 12 /// call-graph deducing and/or propagating function attributes. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/IPO/FunctionAttrs.h" 17 #include "llvm/ADT/SCCIterator.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/BasicAliasAnalysis.h" 26 #include "llvm/Analysis/CGSCCPassManager.h" 27 #include "llvm/Analysis/CallGraph.h" 28 #include "llvm/Analysis/CallGraphSCCPass.h" 29 #include "llvm/Analysis/CaptureTracking.h" 30 #include "llvm/Analysis/LazyCallGraph.h" 31 #include "llvm/Analysis/MemoryLocation.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/IR/Argument.h" 34 #include "llvm/IR/Attributes.h" 35 #include "llvm/IR/BasicBlock.h" 36 #include "llvm/IR/CallSite.h" 37 #include "llvm/IR/Constant.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/InstIterator.h" 41 #include "llvm/IR/InstrTypes.h" 42 #include "llvm/IR/Instruction.h" 43 #include "llvm/IR/Instructions.h" 44 #include "llvm/IR/IntrinsicInst.h" 45 #include "llvm/IR/Metadata.h" 46 #include "llvm/IR/PassManager.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/IR/Use.h" 49 #include "llvm/IR/User.h" 50 #include "llvm/IR/Value.h" 51 #include "llvm/Pass.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/raw_ostream.h" 58 #include "llvm/Transforms/IPO.h" 59 #include <cassert> 60 #include <iterator> 61 #include <map> 62 #include <vector> 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "functionattrs" 67 68 STATISTIC(NumReadNone, "Number of functions marked readnone"); 69 STATISTIC(NumReadOnly, "Number of functions marked readonly"); 70 STATISTIC(NumWriteOnly, "Number of functions marked writeonly"); 71 STATISTIC(NumNoCapture, "Number of arguments marked nocapture"); 72 STATISTIC(NumReturned, "Number of arguments marked returned"); 73 STATISTIC(NumReadNoneArg, "Number of arguments marked readnone"); 74 STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly"); 75 STATISTIC(NumNoAlias, "Number of function returns marked noalias"); 76 STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull"); 77 STATISTIC(NumNoRecurse, "Number of functions marked as norecurse"); 78 STATISTIC(NumNoUnwind, "Number of functions marked as nounwind"); 79 80 // FIXME: This is disabled by default to avoid exposing security vulnerabilities 81 // in C/C++ code compiled by clang: 82 // http://lists.llvm.org/pipermail/cfe-dev/2017-January/052066.html 83 static cl::opt<bool> EnableNonnullArgPropagation( 84 "enable-nonnull-arg-prop", cl::Hidden, 85 cl::desc("Try to propagate nonnull argument attributes from callsites to " 86 "caller functions.")); 87 88 static cl::opt<bool> DisableNoUnwindInference( 89 "disable-nounwind-inference", cl::Hidden, 90 cl::desc("Stop inferring nounwind attribute during function-attrs pass")); 91 92 namespace { 93 94 using SCCNodeSet = SmallSetVector<Function *, 8>; 95 96 } // end anonymous namespace 97 98 /// Returns the memory access attribute for function F using AAR for AA results, 99 /// where SCCNodes is the current SCC. 100 /// 101 /// If ThisBody is true, this function may examine the function body and will 102 /// return a result pertaining to this copy of the function. If it is false, the 103 /// result will be based only on AA results for the function declaration; it 104 /// will be assumed that some other (perhaps less optimized) version of the 105 /// function may be selected at link time. 106 static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody, 107 AAResults &AAR, 108 const SCCNodeSet &SCCNodes) { 109 FunctionModRefBehavior MRB = AAR.getModRefBehavior(&F); 110 if (MRB == FMRB_DoesNotAccessMemory) 111 // Already perfect! 112 return MAK_ReadNone; 113 114 if (!ThisBody) { 115 if (AliasAnalysis::onlyReadsMemory(MRB)) 116 return MAK_ReadOnly; 117 118 if (AliasAnalysis::doesNotReadMemory(MRB)) 119 return MAK_WriteOnly; 120 121 // Conservatively assume it reads and writes to memory. 122 return MAK_MayWrite; 123 } 124 125 // Scan the function body for instructions that may read or write memory. 126 bool ReadsMemory = false; 127 bool WritesMemory = false; 128 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) { 129 Instruction *I = &*II; 130 131 // Some instructions can be ignored even if they read or write memory. 132 // Detect these now, skipping to the next instruction if one is found. 133 CallSite CS(cast<Value>(I)); 134 if (CS) { 135 // Ignore calls to functions in the same SCC, as long as the call sites 136 // don't have operand bundles. Calls with operand bundles are allowed to 137 // have memory effects not described by the memory effects of the call 138 // target. 139 if (!CS.hasOperandBundles() && CS.getCalledFunction() && 140 SCCNodes.count(CS.getCalledFunction())) 141 continue; 142 FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS); 143 ModRefInfo MRI = createModRefInfo(MRB); 144 145 // If the call doesn't access memory, we're done. 146 if (isNoModRef(MRI)) 147 continue; 148 149 if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) { 150 // The call could access any memory. If that includes writes, note it. 151 if (isModSet(MRI)) 152 WritesMemory = true; 153 // If it reads, note it. 154 if (isRefSet(MRI)) 155 ReadsMemory = true; 156 continue; 157 } 158 159 // Check whether all pointer arguments point to local memory, and 160 // ignore calls that only access local memory. 161 for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 162 CI != CE; ++CI) { 163 Value *Arg = *CI; 164 if (!Arg->getType()->isPtrOrPtrVectorTy()) 165 continue; 166 167 AAMDNodes AAInfo; 168 I->getAAMetadata(AAInfo); 169 MemoryLocation Loc(Arg, LocationSize::unknown(), AAInfo); 170 171 // Skip accesses to local or constant memory as they don't impact the 172 // externally visible mod/ref behavior. 173 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 174 continue; 175 176 if (isModSet(MRI)) 177 // Writes non-local memory. 178 WritesMemory = true; 179 if (isRefSet(MRI)) 180 // Ok, it reads non-local memory. 181 ReadsMemory = true; 182 } 183 continue; 184 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 185 // Ignore non-volatile loads from local memory. (Atomic is okay here.) 186 if (!LI->isVolatile()) { 187 MemoryLocation Loc = MemoryLocation::get(LI); 188 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 189 continue; 190 } 191 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 192 // Ignore non-volatile stores to local memory. (Atomic is okay here.) 193 if (!SI->isVolatile()) { 194 MemoryLocation Loc = MemoryLocation::get(SI); 195 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 196 continue; 197 } 198 } else if (VAArgInst *VI = dyn_cast<VAArgInst>(I)) { 199 // Ignore vaargs on local memory. 200 MemoryLocation Loc = MemoryLocation::get(VI); 201 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true)) 202 continue; 203 } 204 205 // Any remaining instructions need to be taken seriously! Check if they 206 // read or write memory. 207 // 208 // Writes memory, remember that. 209 WritesMemory |= I->mayWriteToMemory(); 210 211 // If this instruction may read memory, remember that. 212 ReadsMemory |= I->mayReadFromMemory(); 213 } 214 215 if (WritesMemory) { 216 if (!ReadsMemory) 217 return MAK_WriteOnly; 218 else 219 return MAK_MayWrite; 220 } 221 222 return ReadsMemory ? MAK_ReadOnly : MAK_ReadNone; 223 } 224 225 MemoryAccessKind llvm::computeFunctionBodyMemoryAccess(Function &F, 226 AAResults &AAR) { 227 return checkFunctionMemoryAccess(F, /*ThisBody=*/true, AAR, {}); 228 } 229 230 /// Deduce readonly/readnone attributes for the SCC. 231 template <typename AARGetterT> 232 static bool addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter) { 233 // Check if any of the functions in the SCC read or write memory. If they 234 // write memory then they can't be marked readnone or readonly. 235 bool ReadsMemory = false; 236 bool WritesMemory = false; 237 for (Function *F : SCCNodes) { 238 // Call the callable parameter to look up AA results for this function. 239 AAResults &AAR = AARGetter(*F); 240 241 // Non-exact function definitions may not be selected at link time, and an 242 // alternative version that writes to memory may be selected. See the 243 // comment on GlobalValue::isDefinitionExact for more details. 244 switch (checkFunctionMemoryAccess(*F, F->hasExactDefinition(), 245 AAR, SCCNodes)) { 246 case MAK_MayWrite: 247 return false; 248 case MAK_ReadOnly: 249 ReadsMemory = true; 250 break; 251 case MAK_WriteOnly: 252 WritesMemory = true; 253 break; 254 case MAK_ReadNone: 255 // Nothing to do! 256 break; 257 } 258 } 259 260 // Success! Functions in this SCC do not access memory, or only read memory. 261 // Give them the appropriate attribute. 262 bool MadeChange = false; 263 264 assert(!(ReadsMemory && WritesMemory) && 265 "Function marked read-only and write-only"); 266 for (Function *F : SCCNodes) { 267 if (F->doesNotAccessMemory()) 268 // Already perfect! 269 continue; 270 271 if (F->onlyReadsMemory() && ReadsMemory) 272 // No change. 273 continue; 274 275 if (F->doesNotReadMemory() && WritesMemory) 276 continue; 277 278 MadeChange = true; 279 280 // Clear out any existing attributes. 281 F->removeFnAttr(Attribute::ReadOnly); 282 F->removeFnAttr(Attribute::ReadNone); 283 F->removeFnAttr(Attribute::WriteOnly); 284 285 if (!WritesMemory && !ReadsMemory) { 286 // Clear out any "access range attributes" if readnone was deduced. 287 F->removeFnAttr(Attribute::ArgMemOnly); 288 F->removeFnAttr(Attribute::InaccessibleMemOnly); 289 F->removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly); 290 } 291 292 // Add in the new attribute. 293 if (WritesMemory && !ReadsMemory) 294 F->addFnAttr(Attribute::WriteOnly); 295 else 296 F->addFnAttr(ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone); 297 298 if (WritesMemory && !ReadsMemory) 299 ++NumWriteOnly; 300 else if (ReadsMemory) 301 ++NumReadOnly; 302 else 303 ++NumReadNone; 304 } 305 306 return MadeChange; 307 } 308 309 namespace { 310 311 /// For a given pointer Argument, this retains a list of Arguments of functions 312 /// in the same SCC that the pointer data flows into. We use this to build an 313 /// SCC of the arguments. 314 struct ArgumentGraphNode { 315 Argument *Definition; 316 SmallVector<ArgumentGraphNode *, 4> Uses; 317 }; 318 319 class ArgumentGraph { 320 // We store pointers to ArgumentGraphNode objects, so it's important that 321 // that they not move around upon insert. 322 using ArgumentMapTy = std::map<Argument *, ArgumentGraphNode>; 323 324 ArgumentMapTy ArgumentMap; 325 326 // There is no root node for the argument graph, in fact: 327 // void f(int *x, int *y) { if (...) f(x, y); } 328 // is an example where the graph is disconnected. The SCCIterator requires a 329 // single entry point, so we maintain a fake ("synthetic") root node that 330 // uses every node. Because the graph is directed and nothing points into 331 // the root, it will not participate in any SCCs (except for its own). 332 ArgumentGraphNode SyntheticRoot; 333 334 public: 335 ArgumentGraph() { SyntheticRoot.Definition = nullptr; } 336 337 using iterator = SmallVectorImpl<ArgumentGraphNode *>::iterator; 338 339 iterator begin() { return SyntheticRoot.Uses.begin(); } 340 iterator end() { return SyntheticRoot.Uses.end(); } 341 ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; } 342 343 ArgumentGraphNode *operator[](Argument *A) { 344 ArgumentGraphNode &Node = ArgumentMap[A]; 345 Node.Definition = A; 346 SyntheticRoot.Uses.push_back(&Node); 347 return &Node; 348 } 349 }; 350 351 /// This tracker checks whether callees are in the SCC, and if so it does not 352 /// consider that a capture, instead adding it to the "Uses" list and 353 /// continuing with the analysis. 354 struct ArgumentUsesTracker : public CaptureTracker { 355 ArgumentUsesTracker(const SCCNodeSet &SCCNodes) : SCCNodes(SCCNodes) {} 356 357 void tooManyUses() override { Captured = true; } 358 359 bool captured(const Use *U) override { 360 CallSite CS(U->getUser()); 361 if (!CS.getInstruction()) { 362 Captured = true; 363 return true; 364 } 365 366 Function *F = CS.getCalledFunction(); 367 if (!F || !F->hasExactDefinition() || !SCCNodes.count(F)) { 368 Captured = true; 369 return true; 370 } 371 372 // Note: the callee and the two successor blocks *follow* the argument 373 // operands. This means there is no need to adjust UseIndex to account for 374 // these. 375 376 unsigned UseIndex = 377 std::distance(const_cast<const Use *>(CS.arg_begin()), U); 378 379 assert(UseIndex < CS.data_operands_size() && 380 "Indirect function calls should have been filtered above!"); 381 382 if (UseIndex >= CS.getNumArgOperands()) { 383 // Data operand, but not a argument operand -- must be a bundle operand 384 assert(CS.hasOperandBundles() && "Must be!"); 385 386 // CaptureTracking told us that we're being captured by an operand bundle 387 // use. In this case it does not matter if the callee is within our SCC 388 // or not -- we've been captured in some unknown way, and we have to be 389 // conservative. 390 Captured = true; 391 return true; 392 } 393 394 if (UseIndex >= F->arg_size()) { 395 assert(F->isVarArg() && "More params than args in non-varargs call"); 396 Captured = true; 397 return true; 398 } 399 400 Uses.push_back(&*std::next(F->arg_begin(), UseIndex)); 401 return false; 402 } 403 404 // True only if certainly captured (used outside our SCC). 405 bool Captured = false; 406 407 // Uses within our SCC. 408 SmallVector<Argument *, 4> Uses; 409 410 const SCCNodeSet &SCCNodes; 411 }; 412 413 } // end anonymous namespace 414 415 namespace llvm { 416 417 template <> struct GraphTraits<ArgumentGraphNode *> { 418 using NodeRef = ArgumentGraphNode *; 419 using ChildIteratorType = SmallVectorImpl<ArgumentGraphNode *>::iterator; 420 421 static NodeRef getEntryNode(NodeRef A) { return A; } 422 static ChildIteratorType child_begin(NodeRef N) { return N->Uses.begin(); } 423 static ChildIteratorType child_end(NodeRef N) { return N->Uses.end(); } 424 }; 425 426 template <> 427 struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> { 428 static NodeRef getEntryNode(ArgumentGraph *AG) { return AG->getEntryNode(); } 429 430 static ChildIteratorType nodes_begin(ArgumentGraph *AG) { 431 return AG->begin(); 432 } 433 434 static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); } 435 }; 436 437 } // end namespace llvm 438 439 /// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone. 440 static Attribute::AttrKind 441 determinePointerReadAttrs(Argument *A, 442 const SmallPtrSet<Argument *, 8> &SCCNodes) { 443 SmallVector<Use *, 32> Worklist; 444 SmallPtrSet<Use *, 32> Visited; 445 446 // inalloca arguments are always clobbered by the call. 447 if (A->hasInAllocaAttr()) 448 return Attribute::None; 449 450 bool IsRead = false; 451 // We don't need to track IsWritten. If A is written to, return immediately. 452 453 for (Use &U : A->uses()) { 454 Visited.insert(&U); 455 Worklist.push_back(&U); 456 } 457 458 while (!Worklist.empty()) { 459 Use *U = Worklist.pop_back_val(); 460 Instruction *I = cast<Instruction>(U->getUser()); 461 462 switch (I->getOpcode()) { 463 case Instruction::BitCast: 464 case Instruction::GetElementPtr: 465 case Instruction::PHI: 466 case Instruction::Select: 467 case Instruction::AddrSpaceCast: 468 // The original value is not read/written via this if the new value isn't. 469 for (Use &UU : I->uses()) 470 if (Visited.insert(&UU).second) 471 Worklist.push_back(&UU); 472 break; 473 474 case Instruction::Call: 475 case Instruction::Invoke: { 476 bool Captures = true; 477 478 if (I->getType()->isVoidTy()) 479 Captures = false; 480 481 auto AddUsersToWorklistIfCapturing = [&] { 482 if (Captures) 483 for (Use &UU : I->uses()) 484 if (Visited.insert(&UU).second) 485 Worklist.push_back(&UU); 486 }; 487 488 CallSite CS(I); 489 if (CS.doesNotAccessMemory()) { 490 AddUsersToWorklistIfCapturing(); 491 continue; 492 } 493 494 Function *F = CS.getCalledFunction(); 495 if (!F) { 496 if (CS.onlyReadsMemory()) { 497 IsRead = true; 498 AddUsersToWorklistIfCapturing(); 499 continue; 500 } 501 return Attribute::None; 502 } 503 504 // Note: the callee and the two successor blocks *follow* the argument 505 // operands. This means there is no need to adjust UseIndex to account 506 // for these. 507 508 unsigned UseIndex = std::distance(CS.arg_begin(), U); 509 510 // U cannot be the callee operand use: since we're exploring the 511 // transitive uses of an Argument, having such a use be a callee would 512 // imply the CallSite is an indirect call or invoke; and we'd take the 513 // early exit above. 514 assert(UseIndex < CS.data_operands_size() && 515 "Data operand use expected!"); 516 517 bool IsOperandBundleUse = UseIndex >= CS.getNumArgOperands(); 518 519 if (UseIndex >= F->arg_size() && !IsOperandBundleUse) { 520 assert(F->isVarArg() && "More params than args in non-varargs call"); 521 return Attribute::None; 522 } 523 524 Captures &= !CS.doesNotCapture(UseIndex); 525 526 // Since the optimizer (by design) cannot see the data flow corresponding 527 // to a operand bundle use, these cannot participate in the optimistic SCC 528 // analysis. Instead, we model the operand bundle uses as arguments in 529 // call to a function external to the SCC. 530 if (IsOperandBundleUse || 531 !SCCNodes.count(&*std::next(F->arg_begin(), UseIndex))) { 532 533 // The accessors used on CallSite here do the right thing for calls and 534 // invokes with operand bundles. 535 536 if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(UseIndex)) 537 return Attribute::None; 538 if (!CS.doesNotAccessMemory(UseIndex)) 539 IsRead = true; 540 } 541 542 AddUsersToWorklistIfCapturing(); 543 break; 544 } 545 546 case Instruction::Load: 547 // A volatile load has side effects beyond what readonly can be relied 548 // upon. 549 if (cast<LoadInst>(I)->isVolatile()) 550 return Attribute::None; 551 552 IsRead = true; 553 break; 554 555 case Instruction::ICmp: 556 case Instruction::Ret: 557 break; 558 559 default: 560 return Attribute::None; 561 } 562 } 563 564 return IsRead ? Attribute::ReadOnly : Attribute::ReadNone; 565 } 566 567 /// Deduce returned attributes for the SCC. 568 static bool addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes) { 569 bool Changed = false; 570 571 // Check each function in turn, determining if an argument is always returned. 572 for (Function *F : SCCNodes) { 573 // We can infer and propagate function attributes only when we know that the 574 // definition we'll get at link time is *exactly* the definition we see now. 575 // For more details, see GlobalValue::mayBeDerefined. 576 if (!F->hasExactDefinition()) 577 continue; 578 579 if (F->getReturnType()->isVoidTy()) 580 continue; 581 582 // There is nothing to do if an argument is already marked as 'returned'. 583 if (llvm::any_of(F->args(), 584 [](const Argument &Arg) { return Arg.hasReturnedAttr(); })) 585 continue; 586 587 auto FindRetArg = [&]() -> Value * { 588 Value *RetArg = nullptr; 589 for (BasicBlock &BB : *F) 590 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) { 591 // Note that stripPointerCasts should look through functions with 592 // returned arguments. 593 Value *RetVal = Ret->getReturnValue()->stripPointerCasts(); 594 if (!isa<Argument>(RetVal) || RetVal->getType() != F->getReturnType()) 595 return nullptr; 596 597 if (!RetArg) 598 RetArg = RetVal; 599 else if (RetArg != RetVal) 600 return nullptr; 601 } 602 603 return RetArg; 604 }; 605 606 if (Value *RetArg = FindRetArg()) { 607 auto *A = cast<Argument>(RetArg); 608 A->addAttr(Attribute::Returned); 609 ++NumReturned; 610 Changed = true; 611 } 612 } 613 614 return Changed; 615 } 616 617 /// If a callsite has arguments that are also arguments to the parent function, 618 /// try to propagate attributes from the callsite's arguments to the parent's 619 /// arguments. This may be important because inlining can cause information loss 620 /// when attribute knowledge disappears with the inlined call. 621 static bool addArgumentAttrsFromCallsites(Function &F) { 622 if (!EnableNonnullArgPropagation) 623 return false; 624 625 bool Changed = false; 626 627 // For an argument attribute to transfer from a callsite to the parent, the 628 // call must be guaranteed to execute every time the parent is called. 629 // Conservatively, just check for calls in the entry block that are guaranteed 630 // to execute. 631 // TODO: This could be enhanced by testing if the callsite post-dominates the 632 // entry block or by doing simple forward walks or backward walks to the 633 // callsite. 634 BasicBlock &Entry = F.getEntryBlock(); 635 for (Instruction &I : Entry) { 636 if (auto CS = CallSite(&I)) { 637 if (auto *CalledFunc = CS.getCalledFunction()) { 638 for (auto &CSArg : CalledFunc->args()) { 639 if (!CSArg.hasNonNullAttr()) 640 continue; 641 642 // If the non-null callsite argument operand is an argument to 'F' 643 // (the caller) and the call is guaranteed to execute, then the value 644 // must be non-null throughout 'F'. 645 auto *FArg = dyn_cast<Argument>(CS.getArgOperand(CSArg.getArgNo())); 646 if (FArg && !FArg->hasNonNullAttr()) { 647 FArg->addAttr(Attribute::NonNull); 648 Changed = true; 649 } 650 } 651 } 652 } 653 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 654 break; 655 } 656 657 return Changed; 658 } 659 660 /// Deduce nocapture attributes for the SCC. 661 static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) { 662 bool Changed = false; 663 664 ArgumentGraph AG; 665 666 // Check each function in turn, determining which pointer arguments are not 667 // captured. 668 for (Function *F : SCCNodes) { 669 // We can infer and propagate function attributes only when we know that the 670 // definition we'll get at link time is *exactly* the definition we see now. 671 // For more details, see GlobalValue::mayBeDerefined. 672 if (!F->hasExactDefinition()) 673 continue; 674 675 Changed |= addArgumentAttrsFromCallsites(*F); 676 677 // Functions that are readonly (or readnone) and nounwind and don't return 678 // a value can't capture arguments. Don't analyze them. 679 if (F->onlyReadsMemory() && F->doesNotThrow() && 680 F->getReturnType()->isVoidTy()) { 681 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; 682 ++A) { 683 if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) { 684 A->addAttr(Attribute::NoCapture); 685 ++NumNoCapture; 686 Changed = true; 687 } 688 } 689 continue; 690 } 691 692 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; 693 ++A) { 694 if (!A->getType()->isPointerTy()) 695 continue; 696 bool HasNonLocalUses = false; 697 if (!A->hasNoCaptureAttr()) { 698 ArgumentUsesTracker Tracker(SCCNodes); 699 PointerMayBeCaptured(&*A, &Tracker); 700 if (!Tracker.Captured) { 701 if (Tracker.Uses.empty()) { 702 // If it's trivially not captured, mark it nocapture now. 703 A->addAttr(Attribute::NoCapture); 704 ++NumNoCapture; 705 Changed = true; 706 } else { 707 // If it's not trivially captured and not trivially not captured, 708 // then it must be calling into another function in our SCC. Save 709 // its particulars for Argument-SCC analysis later. 710 ArgumentGraphNode *Node = AG[&*A]; 711 for (Argument *Use : Tracker.Uses) { 712 Node->Uses.push_back(AG[Use]); 713 if (Use != &*A) 714 HasNonLocalUses = true; 715 } 716 } 717 } 718 // Otherwise, it's captured. Don't bother doing SCC analysis on it. 719 } 720 if (!HasNonLocalUses && !A->onlyReadsMemory()) { 721 // Can we determine that it's readonly/readnone without doing an SCC? 722 // Note that we don't allow any calls at all here, or else our result 723 // will be dependent on the iteration order through the functions in the 724 // SCC. 725 SmallPtrSet<Argument *, 8> Self; 726 Self.insert(&*A); 727 Attribute::AttrKind R = determinePointerReadAttrs(&*A, Self); 728 if (R != Attribute::None) { 729 A->addAttr(R); 730 Changed = true; 731 R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; 732 } 733 } 734 } 735 } 736 737 // The graph we've collected is partial because we stopped scanning for 738 // argument uses once we solved the argument trivially. These partial nodes 739 // show up as ArgumentGraphNode objects with an empty Uses list, and for 740 // these nodes the final decision about whether they capture has already been 741 // made. If the definition doesn't have a 'nocapture' attribute by now, it 742 // captures. 743 744 for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) { 745 const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I; 746 if (ArgumentSCC.size() == 1) { 747 if (!ArgumentSCC[0]->Definition) 748 continue; // synthetic root node 749 750 // eg. "void f(int* x) { if (...) f(x); }" 751 if (ArgumentSCC[0]->Uses.size() == 1 && 752 ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) { 753 Argument *A = ArgumentSCC[0]->Definition; 754 A->addAttr(Attribute::NoCapture); 755 ++NumNoCapture; 756 Changed = true; 757 } 758 continue; 759 } 760 761 bool SCCCaptured = false; 762 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); 763 I != E && !SCCCaptured; ++I) { 764 ArgumentGraphNode *Node = *I; 765 if (Node->Uses.empty()) { 766 if (!Node->Definition->hasNoCaptureAttr()) 767 SCCCaptured = true; 768 } 769 } 770 if (SCCCaptured) 771 continue; 772 773 SmallPtrSet<Argument *, 8> ArgumentSCCNodes; 774 // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for 775 // quickly looking up whether a given Argument is in this ArgumentSCC. 776 for (ArgumentGraphNode *I : ArgumentSCC) { 777 ArgumentSCCNodes.insert(I->Definition); 778 } 779 780 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end(); 781 I != E && !SCCCaptured; ++I) { 782 ArgumentGraphNode *N = *I; 783 for (ArgumentGraphNode *Use : N->Uses) { 784 Argument *A = Use->Definition; 785 if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A)) 786 continue; 787 SCCCaptured = true; 788 break; 789 } 790 } 791 if (SCCCaptured) 792 continue; 793 794 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 795 Argument *A = ArgumentSCC[i]->Definition; 796 A->addAttr(Attribute::NoCapture); 797 ++NumNoCapture; 798 Changed = true; 799 } 800 801 // We also want to compute readonly/readnone. With a small number of false 802 // negatives, we can assume that any pointer which is captured isn't going 803 // to be provably readonly or readnone, since by definition we can't 804 // analyze all uses of a captured pointer. 805 // 806 // The false negatives happen when the pointer is captured by a function 807 // that promises readonly/readnone behaviour on the pointer, then the 808 // pointer's lifetime ends before anything that writes to arbitrary memory. 809 // Also, a readonly/readnone pointer may be returned, but returning a 810 // pointer is capturing it. 811 812 Attribute::AttrKind ReadAttr = Attribute::ReadNone; 813 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 814 Argument *A = ArgumentSCC[i]->Definition; 815 Attribute::AttrKind K = determinePointerReadAttrs(A, ArgumentSCCNodes); 816 if (K == Attribute::ReadNone) 817 continue; 818 if (K == Attribute::ReadOnly) { 819 ReadAttr = Attribute::ReadOnly; 820 continue; 821 } 822 ReadAttr = K; 823 break; 824 } 825 826 if (ReadAttr != Attribute::None) { 827 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { 828 Argument *A = ArgumentSCC[i]->Definition; 829 // Clear out existing readonly/readnone attributes 830 A->removeAttr(Attribute::ReadOnly); 831 A->removeAttr(Attribute::ReadNone); 832 A->addAttr(ReadAttr); 833 ReadAttr == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg; 834 Changed = true; 835 } 836 } 837 } 838 839 return Changed; 840 } 841 842 /// Tests whether a function is "malloc-like". 843 /// 844 /// A function is "malloc-like" if it returns either null or a pointer that 845 /// doesn't alias any other pointer visible to the caller. 846 static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) { 847 SmallSetVector<Value *, 8> FlowsToReturn; 848 for (BasicBlock &BB : *F) 849 if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) 850 FlowsToReturn.insert(Ret->getReturnValue()); 851 852 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 853 Value *RetVal = FlowsToReturn[i]; 854 855 if (Constant *C = dyn_cast<Constant>(RetVal)) { 856 if (!C->isNullValue() && !isa<UndefValue>(C)) 857 return false; 858 859 continue; 860 } 861 862 if (isa<Argument>(RetVal)) 863 return false; 864 865 if (Instruction *RVI = dyn_cast<Instruction>(RetVal)) 866 switch (RVI->getOpcode()) { 867 // Extend the analysis by looking upwards. 868 case Instruction::BitCast: 869 case Instruction::GetElementPtr: 870 case Instruction::AddrSpaceCast: 871 FlowsToReturn.insert(RVI->getOperand(0)); 872 continue; 873 case Instruction::Select: { 874 SelectInst *SI = cast<SelectInst>(RVI); 875 FlowsToReturn.insert(SI->getTrueValue()); 876 FlowsToReturn.insert(SI->getFalseValue()); 877 continue; 878 } 879 case Instruction::PHI: { 880 PHINode *PN = cast<PHINode>(RVI); 881 for (Value *IncValue : PN->incoming_values()) 882 FlowsToReturn.insert(IncValue); 883 continue; 884 } 885 886 // Check whether the pointer came from an allocation. 887 case Instruction::Alloca: 888 break; 889 case Instruction::Call: 890 case Instruction::Invoke: { 891 CallSite CS(RVI); 892 if (CS.hasRetAttr(Attribute::NoAlias)) 893 break; 894 if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction())) 895 break; 896 LLVM_FALLTHROUGH; 897 } 898 default: 899 return false; // Did not come from an allocation. 900 } 901 902 if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false)) 903 return false; 904 } 905 906 return true; 907 } 908 909 /// Deduce noalias attributes for the SCC. 910 static bool addNoAliasAttrs(const SCCNodeSet &SCCNodes) { 911 // Check each function in turn, determining which functions return noalias 912 // pointers. 913 for (Function *F : SCCNodes) { 914 // Already noalias. 915 if (F->returnDoesNotAlias()) 916 continue; 917 918 // We can infer and propagate function attributes only when we know that the 919 // definition we'll get at link time is *exactly* the definition we see now. 920 // For more details, see GlobalValue::mayBeDerefined. 921 if (!F->hasExactDefinition()) 922 return false; 923 924 // We annotate noalias return values, which are only applicable to 925 // pointer types. 926 if (!F->getReturnType()->isPointerTy()) 927 continue; 928 929 if (!isFunctionMallocLike(F, SCCNodes)) 930 return false; 931 } 932 933 bool MadeChange = false; 934 for (Function *F : SCCNodes) { 935 if (F->returnDoesNotAlias() || 936 !F->getReturnType()->isPointerTy()) 937 continue; 938 939 F->setReturnDoesNotAlias(); 940 ++NumNoAlias; 941 MadeChange = true; 942 } 943 944 return MadeChange; 945 } 946 947 /// Tests whether this function is known to not return null. 948 /// 949 /// Requires that the function returns a pointer. 950 /// 951 /// Returns true if it believes the function will not return a null, and sets 952 /// \p Speculative based on whether the returned conclusion is a speculative 953 /// conclusion due to SCC calls. 954 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes, 955 bool &Speculative) { 956 assert(F->getReturnType()->isPointerTy() && 957 "nonnull only meaningful on pointer types"); 958 Speculative = false; 959 960 SmallSetVector<Value *, 8> FlowsToReturn; 961 for (BasicBlock &BB : *F) 962 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) 963 FlowsToReturn.insert(Ret->getReturnValue()); 964 965 auto &DL = F->getParent()->getDataLayout(); 966 967 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { 968 Value *RetVal = FlowsToReturn[i]; 969 970 // If this value is locally known to be non-null, we're good 971 if (isKnownNonZero(RetVal, DL)) 972 continue; 973 974 // Otherwise, we need to look upwards since we can't make any local 975 // conclusions. 976 Instruction *RVI = dyn_cast<Instruction>(RetVal); 977 if (!RVI) 978 return false; 979 switch (RVI->getOpcode()) { 980 // Extend the analysis by looking upwards. 981 case Instruction::BitCast: 982 case Instruction::GetElementPtr: 983 case Instruction::AddrSpaceCast: 984 FlowsToReturn.insert(RVI->getOperand(0)); 985 continue; 986 case Instruction::Select: { 987 SelectInst *SI = cast<SelectInst>(RVI); 988 FlowsToReturn.insert(SI->getTrueValue()); 989 FlowsToReturn.insert(SI->getFalseValue()); 990 continue; 991 } 992 case Instruction::PHI: { 993 PHINode *PN = cast<PHINode>(RVI); 994 for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 995 FlowsToReturn.insert(PN->getIncomingValue(i)); 996 continue; 997 } 998 case Instruction::Call: 999 case Instruction::Invoke: { 1000 CallSite CS(RVI); 1001 Function *Callee = CS.getCalledFunction(); 1002 // A call to a node within the SCC is assumed to return null until 1003 // proven otherwise 1004 if (Callee && SCCNodes.count(Callee)) { 1005 Speculative = true; 1006 continue; 1007 } 1008 return false; 1009 } 1010 default: 1011 return false; // Unknown source, may be null 1012 }; 1013 llvm_unreachable("should have either continued or returned"); 1014 } 1015 1016 return true; 1017 } 1018 1019 /// Deduce nonnull attributes for the SCC. 1020 static bool addNonNullAttrs(const SCCNodeSet &SCCNodes) { 1021 // Speculative that all functions in the SCC return only nonnull 1022 // pointers. We may refute this as we analyze functions. 1023 bool SCCReturnsNonNull = true; 1024 1025 bool MadeChange = false; 1026 1027 // Check each function in turn, determining which functions return nonnull 1028 // pointers. 1029 for (Function *F : SCCNodes) { 1030 // Already nonnull. 1031 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex, 1032 Attribute::NonNull)) 1033 continue; 1034 1035 // We can infer and propagate function attributes only when we know that the 1036 // definition we'll get at link time is *exactly* the definition we see now. 1037 // For more details, see GlobalValue::mayBeDerefined. 1038 if (!F->hasExactDefinition()) 1039 return false; 1040 1041 // We annotate nonnull return values, which are only applicable to 1042 // pointer types. 1043 if (!F->getReturnType()->isPointerTy()) 1044 continue; 1045 1046 bool Speculative = false; 1047 if (isReturnNonNull(F, SCCNodes, Speculative)) { 1048 if (!Speculative) { 1049 // Mark the function eagerly since we may discover a function 1050 // which prevents us from speculating about the entire SCC 1051 LLVM_DEBUG(dbgs() << "Eagerly marking " << F->getName() 1052 << " as nonnull\n"); 1053 F->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1054 ++NumNonNullReturn; 1055 MadeChange = true; 1056 } 1057 continue; 1058 } 1059 // At least one function returns something which could be null, can't 1060 // speculate any more. 1061 SCCReturnsNonNull = false; 1062 } 1063 1064 if (SCCReturnsNonNull) { 1065 for (Function *F : SCCNodes) { 1066 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex, 1067 Attribute::NonNull) || 1068 !F->getReturnType()->isPointerTy()) 1069 continue; 1070 1071 LLVM_DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n"); 1072 F->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1073 ++NumNonNullReturn; 1074 MadeChange = true; 1075 } 1076 } 1077 1078 return MadeChange; 1079 } 1080 1081 namespace { 1082 1083 /// Collects a set of attribute inference requests and performs them all in one 1084 /// go on a single SCC Node. Inference involves scanning function bodies 1085 /// looking for instructions that violate attribute assumptions. 1086 /// As soon as all the bodies are fine we are free to set the attribute. 1087 /// Customization of inference for individual attributes is performed by 1088 /// providing a handful of predicates for each attribute. 1089 class AttributeInferer { 1090 public: 1091 /// Describes a request for inference of a single attribute. 1092 struct InferenceDescriptor { 1093 1094 /// Returns true if this function does not have to be handled. 1095 /// General intent for this predicate is to provide an optimization 1096 /// for functions that do not need this attribute inference at all 1097 /// (say, for functions that already have the attribute). 1098 std::function<bool(const Function &)> SkipFunction; 1099 1100 /// Returns true if this instruction violates attribute assumptions. 1101 std::function<bool(Instruction &)> InstrBreaksAttribute; 1102 1103 /// Sets the inferred attribute for this function. 1104 std::function<void(Function &)> SetAttribute; 1105 1106 /// Attribute we derive. 1107 Attribute::AttrKind AKind; 1108 1109 /// If true, only "exact" definitions can be used to infer this attribute. 1110 /// See GlobalValue::isDefinitionExact. 1111 bool RequiresExactDefinition; 1112 1113 InferenceDescriptor(Attribute::AttrKind AK, 1114 std::function<bool(const Function &)> SkipFunc, 1115 std::function<bool(Instruction &)> InstrScan, 1116 std::function<void(Function &)> SetAttr, 1117 bool ReqExactDef) 1118 : SkipFunction(SkipFunc), InstrBreaksAttribute(InstrScan), 1119 SetAttribute(SetAttr), AKind(AK), 1120 RequiresExactDefinition(ReqExactDef) {} 1121 }; 1122 1123 private: 1124 SmallVector<InferenceDescriptor, 4> InferenceDescriptors; 1125 1126 public: 1127 void registerAttrInference(InferenceDescriptor AttrInference) { 1128 InferenceDescriptors.push_back(AttrInference); 1129 } 1130 1131 bool run(const SCCNodeSet &SCCNodes); 1132 }; 1133 1134 /// Perform all the requested attribute inference actions according to the 1135 /// attribute predicates stored before. 1136 bool AttributeInferer::run(const SCCNodeSet &SCCNodes) { 1137 SmallVector<InferenceDescriptor, 4> InferInSCC = InferenceDescriptors; 1138 // Go through all the functions in SCC and check corresponding attribute 1139 // assumptions for each of them. Attributes that are invalid for this SCC 1140 // will be removed from InferInSCC. 1141 for (Function *F : SCCNodes) { 1142 1143 // No attributes whose assumptions are still valid - done. 1144 if (InferInSCC.empty()) 1145 return false; 1146 1147 // Check if our attributes ever need scanning/can be scanned. 1148 llvm::erase_if(InferInSCC, [F](const InferenceDescriptor &ID) { 1149 if (ID.SkipFunction(*F)) 1150 return false; 1151 1152 // Remove from further inference (invalidate) when visiting a function 1153 // that has no instructions to scan/has an unsuitable definition. 1154 return F->isDeclaration() || 1155 (ID.RequiresExactDefinition && !F->hasExactDefinition()); 1156 }); 1157 1158 // For each attribute still in InferInSCC that doesn't explicitly skip F, 1159 // set up the F instructions scan to verify assumptions of the attribute. 1160 SmallVector<InferenceDescriptor, 4> InferInThisFunc; 1161 llvm::copy_if( 1162 InferInSCC, std::back_inserter(InferInThisFunc), 1163 [F](const InferenceDescriptor &ID) { return !ID.SkipFunction(*F); }); 1164 1165 if (InferInThisFunc.empty()) 1166 continue; 1167 1168 // Start instruction scan. 1169 for (Instruction &I : instructions(*F)) { 1170 llvm::erase_if(InferInThisFunc, [&](const InferenceDescriptor &ID) { 1171 if (!ID.InstrBreaksAttribute(I)) 1172 return false; 1173 // Remove attribute from further inference on any other functions 1174 // because attribute assumptions have just been violated. 1175 llvm::erase_if(InferInSCC, [&ID](const InferenceDescriptor &D) { 1176 return D.AKind == ID.AKind; 1177 }); 1178 // Remove attribute from the rest of current instruction scan. 1179 return true; 1180 }); 1181 1182 if (InferInThisFunc.empty()) 1183 break; 1184 } 1185 } 1186 1187 if (InferInSCC.empty()) 1188 return false; 1189 1190 bool Changed = false; 1191 for (Function *F : SCCNodes) 1192 // At this point InferInSCC contains only functions that were either: 1193 // - explicitly skipped from scan/inference, or 1194 // - verified to have no instructions that break attribute assumptions. 1195 // Hence we just go and force the attribute for all non-skipped functions. 1196 for (auto &ID : InferInSCC) { 1197 if (ID.SkipFunction(*F)) 1198 continue; 1199 Changed = true; 1200 ID.SetAttribute(*F); 1201 } 1202 return Changed; 1203 } 1204 1205 } // end anonymous namespace 1206 1207 /// Helper for non-Convergent inference predicate InstrBreaksAttribute. 1208 static bool InstrBreaksNonConvergent(Instruction &I, 1209 const SCCNodeSet &SCCNodes) { 1210 const CallSite CS(&I); 1211 // Breaks non-convergent assumption if CS is a convergent call to a function 1212 // not in the SCC. 1213 return CS && CS.isConvergent() && SCCNodes.count(CS.getCalledFunction()) == 0; 1214 } 1215 1216 /// Helper for NoUnwind inference predicate InstrBreaksAttribute. 1217 static bool InstrBreaksNonThrowing(Instruction &I, const SCCNodeSet &SCCNodes) { 1218 if (!I.mayThrow()) 1219 return false; 1220 if (const auto *CI = dyn_cast<CallInst>(&I)) { 1221 if (Function *Callee = CI->getCalledFunction()) { 1222 // I is a may-throw call to a function inside our SCC. This doesn't 1223 // invalidate our current working assumption that the SCC is no-throw; we 1224 // just have to scan that other function. 1225 if (SCCNodes.count(Callee) > 0) 1226 return false; 1227 } 1228 } 1229 return true; 1230 } 1231 1232 /// Infer attributes from all functions in the SCC by scanning every 1233 /// instruction for compliance to the attribute assumptions. Currently it 1234 /// does: 1235 /// - removal of Convergent attribute 1236 /// - addition of NoUnwind attribute 1237 /// 1238 /// Returns true if any changes to function attributes were made. 1239 static bool inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes) { 1240 1241 AttributeInferer AI; 1242 1243 // Request to remove the convergent attribute from all functions in the SCC 1244 // if every callsite within the SCC is not convergent (except for calls 1245 // to functions within the SCC). 1246 // Note: Removal of the attr from the callsites will happen in 1247 // InstCombineCalls separately. 1248 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{ 1249 Attribute::Convergent, 1250 // Skip non-convergent functions. 1251 [](const Function &F) { return !F.isConvergent(); }, 1252 // Instructions that break non-convergent assumption. 1253 [SCCNodes](Instruction &I) { 1254 return InstrBreaksNonConvergent(I, SCCNodes); 1255 }, 1256 [](Function &F) { 1257 LLVM_DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName() 1258 << "\n"); 1259 F.setNotConvergent(); 1260 }, 1261 /* RequiresExactDefinition= */ false}); 1262 1263 if (!DisableNoUnwindInference) 1264 // Request to infer nounwind attribute for all the functions in the SCC if 1265 // every callsite within the SCC is not throwing (except for calls to 1266 // functions within the SCC). Note that nounwind attribute suffers from 1267 // derefinement - results may change depending on how functions are 1268 // optimized. Thus it can be inferred only from exact definitions. 1269 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{ 1270 Attribute::NoUnwind, 1271 // Skip non-throwing functions. 1272 [](const Function &F) { return F.doesNotThrow(); }, 1273 // Instructions that break non-throwing assumption. 1274 [SCCNodes](Instruction &I) { 1275 return InstrBreaksNonThrowing(I, SCCNodes); 1276 }, 1277 [](Function &F) { 1278 LLVM_DEBUG(dbgs() 1279 << "Adding nounwind attr to fn " << F.getName() << "\n"); 1280 F.setDoesNotThrow(); 1281 ++NumNoUnwind; 1282 }, 1283 /* RequiresExactDefinition= */ true}); 1284 1285 // Perform all the requested attribute inference actions. 1286 return AI.run(SCCNodes); 1287 } 1288 1289 static bool setDoesNotRecurse(Function &F) { 1290 if (F.doesNotRecurse()) 1291 return false; 1292 F.setDoesNotRecurse(); 1293 ++NumNoRecurse; 1294 return true; 1295 } 1296 1297 static bool addNoRecurseAttrs(const SCCNodeSet &SCCNodes) { 1298 // Try and identify functions that do not recurse. 1299 1300 // If the SCC contains multiple nodes we know for sure there is recursion. 1301 if (SCCNodes.size() != 1) 1302 return false; 1303 1304 Function *F = *SCCNodes.begin(); 1305 if (!F || F->isDeclaration() || F->doesNotRecurse()) 1306 return false; 1307 1308 // If all of the calls in F are identifiable and are to norecurse functions, F 1309 // is norecurse. This check also detects self-recursion as F is not currently 1310 // marked norecurse, so any called from F to F will not be marked norecurse. 1311 for (auto &BB : *F) 1312 for (auto &I : BB.instructionsWithoutDebug()) 1313 if (auto CS = CallSite(&I)) { 1314 Function *Callee = CS.getCalledFunction(); 1315 if (!Callee || Callee == F || !Callee->doesNotRecurse()) 1316 // Function calls a potentially recursive function. 1317 return false; 1318 } 1319 1320 // Every call was to a non-recursive function other than this function, and 1321 // we have no indirect recursion as the SCC size is one. This function cannot 1322 // recurse. 1323 return setDoesNotRecurse(*F); 1324 } 1325 1326 template <typename AARGetterT> 1327 static bool deriveAttrsInPostOrder(SCCNodeSet &SCCNodes, AARGetterT &&AARGetter, 1328 bool HasUnknownCall) { 1329 bool Changed = false; 1330 1331 // Bail if the SCC only contains optnone functions. 1332 if (SCCNodes.empty()) 1333 return Changed; 1334 1335 Changed |= addArgumentReturnedAttrs(SCCNodes); 1336 Changed |= addReadAttrs(SCCNodes, AARGetter); 1337 Changed |= addArgumentAttrs(SCCNodes); 1338 1339 // If we have no external nodes participating in the SCC, we can deduce some 1340 // more precise attributes as well. 1341 if (!HasUnknownCall) { 1342 Changed |= addNoAliasAttrs(SCCNodes); 1343 Changed |= addNonNullAttrs(SCCNodes); 1344 Changed |= inferAttrsFromFunctionBodies(SCCNodes); 1345 Changed |= addNoRecurseAttrs(SCCNodes); 1346 } 1347 1348 return Changed; 1349 } 1350 1351 PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C, 1352 CGSCCAnalysisManager &AM, 1353 LazyCallGraph &CG, 1354 CGSCCUpdateResult &) { 1355 FunctionAnalysisManager &FAM = 1356 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 1357 1358 // We pass a lambda into functions to wire them up to the analysis manager 1359 // for getting function analyses. 1360 auto AARGetter = [&](Function &F) -> AAResults & { 1361 return FAM.getResult<AAManager>(F); 1362 }; 1363 1364 // Fill SCCNodes with the elements of the SCC. Also track whether there are 1365 // any external or opt-none nodes that will prevent us from optimizing any 1366 // part of the SCC. 1367 SCCNodeSet SCCNodes; 1368 bool HasUnknownCall = false; 1369 for (LazyCallGraph::Node &N : C) { 1370 Function &F = N.getFunction(); 1371 if (F.hasFnAttribute(Attribute::OptimizeNone) || 1372 F.hasFnAttribute(Attribute::Naked)) { 1373 // Treat any function we're trying not to optimize as if it were an 1374 // indirect call and omit it from the node set used below. 1375 HasUnknownCall = true; 1376 continue; 1377 } 1378 // Track whether any functions in this SCC have an unknown call edge. 1379 // Note: if this is ever a performance hit, we can common it with 1380 // subsequent routines which also do scans over the instructions of the 1381 // function. 1382 if (!HasUnknownCall) 1383 for (Instruction &I : instructions(F)) 1384 if (auto CS = CallSite(&I)) 1385 if (!CS.getCalledFunction()) { 1386 HasUnknownCall = true; 1387 break; 1388 } 1389 1390 SCCNodes.insert(&F); 1391 } 1392 1393 if (deriveAttrsInPostOrder(SCCNodes, AARGetter, HasUnknownCall)) 1394 return PreservedAnalyses::none(); 1395 1396 return PreservedAnalyses::all(); 1397 } 1398 1399 namespace { 1400 1401 struct PostOrderFunctionAttrsLegacyPass : public CallGraphSCCPass { 1402 // Pass identification, replacement for typeid 1403 static char ID; 1404 1405 PostOrderFunctionAttrsLegacyPass() : CallGraphSCCPass(ID) { 1406 initializePostOrderFunctionAttrsLegacyPassPass( 1407 *PassRegistry::getPassRegistry()); 1408 } 1409 1410 bool runOnSCC(CallGraphSCC &SCC) override; 1411 1412 void getAnalysisUsage(AnalysisUsage &AU) const override { 1413 AU.setPreservesCFG(); 1414 AU.addRequired<AssumptionCacheTracker>(); 1415 getAAResultsAnalysisUsage(AU); 1416 CallGraphSCCPass::getAnalysisUsage(AU); 1417 } 1418 }; 1419 1420 } // end anonymous namespace 1421 1422 char PostOrderFunctionAttrsLegacyPass::ID = 0; 1423 INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrsLegacyPass, "functionattrs", 1424 "Deduce function attributes", false, false) 1425 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1426 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1427 INITIALIZE_PASS_END(PostOrderFunctionAttrsLegacyPass, "functionattrs", 1428 "Deduce function attributes", false, false) 1429 1430 Pass *llvm::createPostOrderFunctionAttrsLegacyPass() { 1431 return new PostOrderFunctionAttrsLegacyPass(); 1432 } 1433 1434 template <typename AARGetterT> 1435 static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) { 1436 1437 // Fill SCCNodes with the elements of the SCC. Used for quickly looking up 1438 // whether a given CallGraphNode is in this SCC. Also track whether there are 1439 // any external or opt-none nodes that will prevent us from optimizing any 1440 // part of the SCC. 1441 SCCNodeSet SCCNodes; 1442 bool ExternalNode = false; 1443 for (CallGraphNode *I : SCC) { 1444 Function *F = I->getFunction(); 1445 if (!F || F->hasFnAttribute(Attribute::OptimizeNone) || 1446 F->hasFnAttribute(Attribute::Naked)) { 1447 // External node or function we're trying not to optimize - we both avoid 1448 // transform them and avoid leveraging information they provide. 1449 ExternalNode = true; 1450 continue; 1451 } 1452 1453 SCCNodes.insert(F); 1454 } 1455 1456 return deriveAttrsInPostOrder(SCCNodes, AARGetter, ExternalNode); 1457 } 1458 1459 bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) { 1460 if (skipSCC(SCC)) 1461 return false; 1462 return runImpl(SCC, LegacyAARGetter(*this)); 1463 } 1464 1465 namespace { 1466 1467 struct ReversePostOrderFunctionAttrsLegacyPass : public ModulePass { 1468 // Pass identification, replacement for typeid 1469 static char ID; 1470 1471 ReversePostOrderFunctionAttrsLegacyPass() : ModulePass(ID) { 1472 initializeReversePostOrderFunctionAttrsLegacyPassPass( 1473 *PassRegistry::getPassRegistry()); 1474 } 1475 1476 bool runOnModule(Module &M) override; 1477 1478 void getAnalysisUsage(AnalysisUsage &AU) const override { 1479 AU.setPreservesCFG(); 1480 AU.addRequired<CallGraphWrapperPass>(); 1481 AU.addPreserved<CallGraphWrapperPass>(); 1482 } 1483 }; 1484 1485 } // end anonymous namespace 1486 1487 char ReversePostOrderFunctionAttrsLegacyPass::ID = 0; 1488 1489 INITIALIZE_PASS_BEGIN(ReversePostOrderFunctionAttrsLegacyPass, "rpo-functionattrs", 1490 "Deduce function attributes in RPO", false, false) 1491 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 1492 INITIALIZE_PASS_END(ReversePostOrderFunctionAttrsLegacyPass, "rpo-functionattrs", 1493 "Deduce function attributes in RPO", false, false) 1494 1495 Pass *llvm::createReversePostOrderFunctionAttrsPass() { 1496 return new ReversePostOrderFunctionAttrsLegacyPass(); 1497 } 1498 1499 static bool addNoRecurseAttrsTopDown(Function &F) { 1500 // We check the preconditions for the function prior to calling this to avoid 1501 // the cost of building up a reversible post-order list. We assert them here 1502 // to make sure none of the invariants this relies on were violated. 1503 assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!"); 1504 assert(!F.doesNotRecurse() && 1505 "This function has already been deduced as norecurs!"); 1506 assert(F.hasInternalLinkage() && 1507 "Can only do top-down deduction for internal linkage functions!"); 1508 1509 // If F is internal and all of its uses are calls from a non-recursive 1510 // functions, then none of its calls could in fact recurse without going 1511 // through a function marked norecurse, and so we can mark this function too 1512 // as norecurse. Note that the uses must actually be calls -- otherwise 1513 // a pointer to this function could be returned from a norecurse function but 1514 // this function could be recursively (indirectly) called. Note that this 1515 // also detects if F is directly recursive as F is not yet marked as 1516 // a norecurse function. 1517 for (auto *U : F.users()) { 1518 auto *I = dyn_cast<Instruction>(U); 1519 if (!I) 1520 return false; 1521 CallSite CS(I); 1522 if (!CS || !CS.getParent()->getParent()->doesNotRecurse()) 1523 return false; 1524 } 1525 return setDoesNotRecurse(F); 1526 } 1527 1528 static bool deduceFunctionAttributeInRPO(Module &M, CallGraph &CG) { 1529 // We only have a post-order SCC traversal (because SCCs are inherently 1530 // discovered in post-order), so we accumulate them in a vector and then walk 1531 // it in reverse. This is simpler than using the RPO iterator infrastructure 1532 // because we need to combine SCC detection and the PO walk of the call 1533 // graph. We can also cheat egregiously because we're primarily interested in 1534 // synthesizing norecurse and so we can only save the singular SCCs as SCCs 1535 // with multiple functions in them will clearly be recursive. 1536 SmallVector<Function *, 16> Worklist; 1537 for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) { 1538 if (I->size() != 1) 1539 continue; 1540 1541 Function *F = I->front()->getFunction(); 1542 if (F && !F->isDeclaration() && !F->doesNotRecurse() && 1543 F->hasInternalLinkage()) 1544 Worklist.push_back(F); 1545 } 1546 1547 bool Changed = false; 1548 for (auto *F : llvm::reverse(Worklist)) 1549 Changed |= addNoRecurseAttrsTopDown(*F); 1550 1551 return Changed; 1552 } 1553 1554 bool ReversePostOrderFunctionAttrsLegacyPass::runOnModule(Module &M) { 1555 if (skipModule(M)) 1556 return false; 1557 1558 auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 1559 1560 return deduceFunctionAttributeInRPO(M, CG); 1561 } 1562 1563 PreservedAnalyses 1564 ReversePostOrderFunctionAttrsPass::run(Module &M, ModuleAnalysisManager &AM) { 1565 auto &CG = AM.getResult<CallGraphAnalysis>(M); 1566 1567 if (!deduceFunctionAttributeInRPO(M, CG)) 1568 return PreservedAnalyses::all(); 1569 1570 PreservedAnalyses PA; 1571 PA.preserve<CallGraphAnalysis>(); 1572 return PA; 1573 } 1574